From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- drivers/bluetooth/Kconfig | 482 ++++ drivers/bluetooth/Makefile | 53 + drivers/bluetooth/ath3k.c | 540 ++++ drivers/bluetooth/bcm203x.c | 266 ++ drivers/bluetooth/bfusb.c | 725 ++++++ drivers/bluetooth/bluecard_cs.c | 908 +++++++ drivers/bluetooth/bpa10x.c | 445 ++++ drivers/bluetooth/bt3c_cs.c | 749 ++++++ drivers/bluetooth/btbcm.c | 786 ++++++ drivers/bluetooth/btbcm.h | 117 + drivers/bluetooth/btintel.c | 3074 +++++++++++++++++++++++ drivers/bluetooth/btintel.h | 325 +++ drivers/bluetooth/btmrvl_debugfs.c | 193 ++ drivers/bluetooth/btmrvl_drv.h | 173 ++ drivers/bluetooth/btmrvl_main.c | 793 ++++++ drivers/bluetooth/btmrvl_sdio.c | 1781 +++++++++++++ drivers/bluetooth/btmrvl_sdio.h | 114 + drivers/bluetooth/btmtk.c | 424 ++++ drivers/bluetooth/btmtk.h | 196 ++ drivers/bluetooth/btmtksdio.c | 1503 +++++++++++ drivers/bluetooth/btmtkuart.c | 994 ++++++++ drivers/bluetooth/btnxpuart.c | 1412 +++++++++++ drivers/bluetooth/btqca.c | 782 ++++++ drivers/bluetooth/btqca.h | 196 ++ drivers/bluetooth/btqcomsmd.c | 232 ++ drivers/bluetooth/btrsi.c | 196 ++ drivers/bluetooth/btrtl.c | 1510 +++++++++++ drivers/bluetooth/btrtl.h | 198 ++ drivers/bluetooth/btsdio.c | 382 +++ drivers/bluetooth/btusb.c | 4814 ++++++++++++++++++++++++++++++++++++ drivers/bluetooth/dtl1_cs.c | 614 +++++ drivers/bluetooth/h4_recv.h | 146 ++ drivers/bluetooth/hci_ag6xx.c | 321 +++ drivers/bluetooth/hci_ath.c | 268 ++ drivers/bluetooth/hci_bcm.c | 1645 ++++++++++++ drivers/bluetooth/hci_bcm4377.c | 2519 +++++++++++++++++++ drivers/bluetooth/hci_bcsp.c | 784 ++++++ drivers/bluetooth/hci_h4.c | 274 ++ drivers/bluetooth/hci_h5.c | 1137 +++++++++ drivers/bluetooth/hci_intel.c | 1234 +++++++++ drivers/bluetooth/hci_ldisc.c | 926 +++++++ drivers/bluetooth/hci_ll.c | 822 ++++++ drivers/bluetooth/hci_mrvl.c | 516 ++++ drivers/bluetooth/hci_nokia.c | 811 ++++++ drivers/bluetooth/hci_qca.c | 2628 ++++++++++++++++++++ drivers/bluetooth/hci_serdev.c | 418 ++++ drivers/bluetooth/hci_uart.h | 202 ++ drivers/bluetooth/hci_vhci.c | 710 ++++++ drivers/bluetooth/virtio_bt.c | 433 ++++ 49 files changed, 40771 insertions(+) create mode 100644 drivers/bluetooth/Kconfig create mode 100644 drivers/bluetooth/Makefile create mode 100644 drivers/bluetooth/ath3k.c create mode 100644 drivers/bluetooth/bcm203x.c create mode 100644 drivers/bluetooth/bfusb.c create mode 100644 drivers/bluetooth/bluecard_cs.c create mode 100644 drivers/bluetooth/bpa10x.c create mode 100644 drivers/bluetooth/bt3c_cs.c create mode 100644 drivers/bluetooth/btbcm.c create mode 100644 drivers/bluetooth/btbcm.h create mode 100644 drivers/bluetooth/btintel.c create mode 100644 drivers/bluetooth/btintel.h create mode 100644 drivers/bluetooth/btmrvl_debugfs.c create mode 100644 drivers/bluetooth/btmrvl_drv.h create mode 100644 drivers/bluetooth/btmrvl_main.c create mode 100644 drivers/bluetooth/btmrvl_sdio.c create mode 100644 drivers/bluetooth/btmrvl_sdio.h create mode 100644 drivers/bluetooth/btmtk.c create mode 100644 drivers/bluetooth/btmtk.h create mode 100644 drivers/bluetooth/btmtksdio.c create mode 100644 drivers/bluetooth/btmtkuart.c create mode 100644 drivers/bluetooth/btnxpuart.c create mode 100644 drivers/bluetooth/btqca.c create mode 100644 drivers/bluetooth/btqca.h create mode 100644 drivers/bluetooth/btqcomsmd.c create mode 100644 drivers/bluetooth/btrsi.c create mode 100644 drivers/bluetooth/btrtl.c create mode 100644 drivers/bluetooth/btrtl.h create mode 100644 drivers/bluetooth/btsdio.c create mode 100644 drivers/bluetooth/btusb.c create mode 100644 drivers/bluetooth/dtl1_cs.c create mode 100644 drivers/bluetooth/h4_recv.h create mode 100644 drivers/bluetooth/hci_ag6xx.c create mode 100644 drivers/bluetooth/hci_ath.c create mode 100644 drivers/bluetooth/hci_bcm.c create mode 100644 drivers/bluetooth/hci_bcm4377.c create mode 100644 drivers/bluetooth/hci_bcsp.c create mode 100644 drivers/bluetooth/hci_h4.c create mode 100644 drivers/bluetooth/hci_h5.c create mode 100644 drivers/bluetooth/hci_intel.c create mode 100644 drivers/bluetooth/hci_ldisc.c create mode 100644 drivers/bluetooth/hci_ll.c create mode 100644 drivers/bluetooth/hci_mrvl.c create mode 100644 drivers/bluetooth/hci_nokia.c create mode 100644 drivers/bluetooth/hci_qca.c create mode 100644 drivers/bluetooth/hci_serdev.c create mode 100644 drivers/bluetooth/hci_uart.h create mode 100644 drivers/bluetooth/hci_vhci.c create mode 100644 drivers/bluetooth/virtio_bt.c (limited to 'drivers/bluetooth') diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig new file mode 100644 index 000000000..bc211c324 --- /dev/null +++ b/drivers/bluetooth/Kconfig @@ -0,0 +1,482 @@ +# SPDX-License-Identifier: GPL-2.0 + +menu "Bluetooth device drivers" + depends on BT + +config BT_INTEL + tristate + select REGMAP + +config BT_BCM + tristate + select FW_LOADER + +config BT_RTL + tristate + select FW_LOADER + +config BT_QCA + tristate + select FW_LOADER + +config BT_MTK + tristate + select FW_LOADER + +config BT_HCIBTUSB + tristate "HCI USB driver" + depends on USB + select BT_INTEL + help + Bluetooth HCI USB driver. + This driver is required if you want to use Bluetooth devices with + USB interface. + + Say Y here to compile support for Bluetooth USB devices into the + kernel or say M to compile it as module (btusb). + +config BT_HCIBTUSB_AUTOSUSPEND + bool "Enable USB autosuspend for Bluetooth USB devices by default" + depends on BT_HCIBTUSB + help + Say Y here to enable USB autosuspend for Bluetooth USB devices by + default. + + This can be overridden by passing btusb.enable_autosuspend=[y|n] + on the kernel commandline. + +config BT_HCIBTUSB_POLL_SYNC + bool "Enable USB poll_sync for Bluetooth USB devices by default" + depends on BT_HCIBTUSB + default y + help + poll_sync synchronizes the USB data and event endpoints by + prioritizing the later. + + Say Y here to enable USB poll_sync for Bluetooth USB devices by + default. + +config BT_HCIBTUSB_BCM + bool "Broadcom protocol support" + depends on BT_HCIBTUSB + select BT_BCM + default y + help + The Broadcom protocol support enables firmware and patchram + download support for Broadcom Bluetooth controllers. + + Say Y here to compile support for Broadcom protocol. + +config BT_HCIBTUSB_MTK + bool "MediaTek protocol support" + depends on BT_HCIBTUSB + select BT_MTK + default n + help + The MediaTek protocol support enables firmware download + support and chip initialization for MediaTek Bluetooth + USB controllers. + + Say Y here to compile support for MediaTek protocol. + +config BT_HCIBTUSB_RTL + bool "Realtek protocol support" + depends on BT_HCIBTUSB + select BT_RTL + default y + help + The Realtek protocol support enables firmware and configuration + download support for Realtek Bluetooth controllers. + + Say Y here to compile support for Realtek protocol. + +config BT_HCIBTSDIO + tristate "HCI SDIO driver" + depends on MMC + help + Bluetooth HCI SDIO driver. + This driver is required if you want to use Bluetooth device with + SDIO interface. + + Say Y here to compile support for Bluetooth SDIO devices into the + kernel or say M to compile it as module (btsdio). + +config BT_HCIUART + tristate "HCI UART driver" + depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS + depends on NVMEM || !NVMEM + depends on TTY + help + Bluetooth HCI UART driver. + This driver is required if you want to use Bluetooth devices with + serial port interface. You will also need this driver if you have + UART based Bluetooth PCMCIA and CF devices like Xircom Credit Card + adapter and BrainBoxes Bluetooth PC Card. + + Say Y here to compile support for Bluetooth UART devices into the + kernel or say M to compile it as module (hci_uart). + +config BT_HCIUART_SERDEV + bool + depends on SERIAL_DEV_BUS && BT_HCIUART + default y + +config BT_HCIUART_H4 + bool "UART (H4) protocol support" + depends on BT_HCIUART + help + UART (H4) is serial protocol for communication between Bluetooth + device and host. This protocol is required for most Bluetooth devices + with UART interface, including PCMCIA and CF cards. + + Say Y here to compile support for HCI UART (H4) protocol. + +config BT_HCIUART_NOKIA + tristate "UART Nokia H4+ protocol support" + depends on BT_HCIUART + depends on BT_HCIUART_SERDEV + depends on GPIOLIB + depends on PM + select BT_HCIUART_H4 + select BT_BCM + help + Nokia H4+ is serial protocol for communication between Bluetooth + device and host. This protocol is required for Bluetooth devices + with UART interface in Nokia devices. + + Say Y here to compile support for Nokia's H4+ protocol. + +config BT_HCIUART_BCSP + bool "BCSP protocol support" + depends on BT_HCIUART + select BITREVERSE + help + BCSP (BlueCore Serial Protocol) is serial protocol for communication + between Bluetooth device and host. This protocol is required for non + USB Bluetooth devices based on CSR BlueCore chip, including PCMCIA and + CF cards. + + Say Y here to compile support for HCI BCSP protocol. + +config BT_HCIUART_ATH3K + bool "Atheros AR300x serial support" + depends on BT_HCIUART + select BT_HCIUART_H4 + help + HCIATH3K (HCI Atheros AR300x) is a serial protocol for + communication between host and Atheros AR300x Bluetooth devices. + This protocol enables AR300x chips to be enabled with + power management support. + Enable this if you have Atheros AR300x serial Bluetooth device. + + Say Y here to compile support for HCI UART ATH3K protocol. + +config BT_HCIUART_LL + bool "HCILL protocol support" + depends on BT_HCIUART_SERDEV + select BT_HCIUART_H4 + help + HCILL (HCI Low Level) is a serial protocol for communication + between Bluetooth device and host. This protocol is required for + serial Bluetooth devices that are based on Texas Instruments' + BRF chips. + + Say Y here to compile support for HCILL protocol. + +config BT_HCIUART_3WIRE + bool "Three-wire UART (H5) protocol support" + depends on BT_HCIUART + depends on BT_HCIUART_SERDEV + help + The HCI Three-wire UART Transport Layer makes it possible to + user the Bluetooth HCI over a serial port interface. The HCI + Three-wire UART Transport Layer assumes that the UART + communication may have bit errors, overrun errors or burst + errors and thereby making CTS/RTS lines unnecessary. + + Say Y here to compile support for Three-wire UART protocol. + +config BT_HCIUART_INTEL + bool "Intel protocol support" + depends on BT_HCIUART + depends on GPIOLIB + select BT_HCIUART_H4 + select BT_INTEL + help + The Intel protocol support enables Bluetooth HCI over serial + port interface for Intel Bluetooth controllers. + + Say Y here to compile support for Intel protocol. + +config BT_HCIUART_BCM + bool "Broadcom protocol support" + depends on BT_HCIUART + depends on BT_HCIUART_SERDEV + depends on (!ACPI || SERIAL_DEV_CTRL_TTYPORT) + depends on GPIOLIB + select BT_HCIUART_H4 + select BT_BCM + help + The Broadcom protocol support enables Bluetooth HCI over serial + port interface for Broadcom Bluetooth controllers. + + Say Y here to compile support for Broadcom protocol. + +config BT_HCIUART_RTL + bool "Realtek protocol support" + depends on BT_HCIUART + depends on BT_HCIUART_SERDEV + depends on GPIOLIB + depends on (ACPI || SERIAL_DEV_CTRL_TTYPORT) + select BT_HCIUART_3WIRE + select BT_RTL + help + The Realtek protocol support enables Bluetooth HCI over 3-Wire + serial port interface for Realtek Bluetooth controllers. + + Say Y here to compile support for Realtek protocol. + +config BT_HCIUART_QCA + bool "Qualcomm Atheros protocol support" + depends on BT_HCIUART + depends on BT_HCIUART_SERDEV + select BT_HCIUART_H4 + select BT_QCA + help + The Qualcomm Atheros protocol supports HCI In-Band Sleep feature + over serial port interface(H4) between controller and host. + This protocol is required for UART clock control for QCA Bluetooth + devices. + + Say Y here to compile support for QCA protocol. + +config BT_HCIUART_AG6XX + bool "Intel AG6XX protocol support" + depends on BT_HCIUART + select BT_HCIUART_H4 + select BT_INTEL + help + The Intel/AG6XX protocol support enables Bluetooth HCI over serial + port interface for Intel ibt 2.1 Bluetooth controllers. + + Say Y here to compile support for Intel AG6XX protocol. + +config BT_HCIUART_MRVL + bool "Marvell protocol support" + depends on BT_HCIUART + depends on BT_HCIUART_SERDEV + select BT_HCIUART_H4 + help + Marvell is serial protocol for communication between Bluetooth + device and host. This protocol is required for most Marvell Bluetooth + devices with UART interface. + + Say Y here to compile support for HCI MRVL protocol. + +config BT_HCIBCM203X + tristate "HCI BCM203x USB driver" + depends on USB + select FW_LOADER + help + Bluetooth HCI BCM203x USB driver. + This driver provides the firmware loading mechanism for the Broadcom + Blutonium based devices. + + Say Y here to compile support for HCI BCM203x devices into the + kernel or say M to compile it as module (bcm203x). + + +config BT_HCIBCM4377 + tristate "HCI BCM4377/4378/4387 PCIe driver" + depends on PCI + select FW_LOADER + help + Support for Broadcom BCM4377/4378/4387 Bluetooth chipsets attached via + PCIe. These are usually found in Apple machines. + + Say Y here to compile support for HCI BCM4377 family devices into the + kernel or say M to compile it as module (hci_bcm4377). + +config BT_HCIBPA10X + tristate "HCI BPA10x USB driver" + depends on USB + help + Bluetooth HCI BPA10x USB driver. + This driver provides support for the Digianswer BPA 100/105 Bluetooth + sniffer devices. + + Say Y here to compile support for HCI BPA10x devices into the + kernel or say M to compile it as module (bpa10x). + +config BT_HCIBFUSB + tristate "HCI BlueFRITZ! USB driver" + depends on USB + select FW_LOADER + help + Bluetooth HCI BlueFRITZ! USB driver. + This driver provides support for Bluetooth USB devices with AVM + interface: + AVM BlueFRITZ! USB + + Say Y here to compile support for HCI BFUSB devices into the + kernel or say M to compile it as module (bfusb). + +config BT_HCIDTL1 + tristate "HCI DTL1 (PC Card) driver" + depends on PCMCIA + help + Bluetooth HCI DTL1 (PC Card) driver. + This driver provides support for Bluetooth PCMCIA devices with + Nokia DTL1 interface: + Nokia Bluetooth Card + Socket Bluetooth CF Card + + Say Y here to compile support for HCI DTL1 devices into the + kernel or say M to compile it as module (dtl1_cs). + +config BT_HCIBT3C + tristate "HCI BT3C (PC Card) driver" + depends on PCMCIA + select FW_LOADER + help + Bluetooth HCI BT3C (PC Card) driver. + This driver provides support for Bluetooth PCMCIA devices with + 3Com BT3C interface: + 3Com Bluetooth Card (3CRWB6096) + HP Bluetooth Card + + Say Y here to compile support for HCI BT3C devices into the + kernel or say M to compile it as module (bt3c_cs). + +config BT_HCIBLUECARD + tristate "HCI BlueCard (PC Card) driver" + depends on PCMCIA + help + Bluetooth HCI BlueCard (PC Card) driver. + This driver provides support for Bluetooth PCMCIA devices with + Anycom BlueCard interface: + Anycom Bluetooth PC Card + Anycom Bluetooth CF Card + + Say Y here to compile support for HCI BlueCard devices into the + kernel or say M to compile it as module (bluecard_cs). + +config BT_HCIVHCI + tristate "HCI VHCI (Virtual HCI device) driver" + select WANT_DEV_COREDUMP + help + Bluetooth Virtual HCI device driver. + This driver is required if you want to use HCI Emulation software. + + Say Y here to compile support for virtual HCI devices into the + kernel or say M to compile it as module (hci_vhci). + +config BT_MRVL + tristate "Marvell Bluetooth driver support" + help + The core driver to support Marvell Bluetooth devices. + + This driver is required if you want to support + Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8977/8987/8997. + + Say Y here to compile Marvell Bluetooth driver + into the kernel or say M to compile it as module. + +config BT_MRVL_SDIO + tristate "Marvell BT-over-SDIO driver" + depends on BT_MRVL && MMC + select FW_LOADER + select WANT_DEV_COREDUMP + help + The driver for Marvell Bluetooth chipsets with SDIO interface. + + This driver is required if you want to use Marvell Bluetooth + devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8977/SD8987/SD8997 + chipsets are supported. + + Say Y here to compile support for Marvell BT-over-SDIO driver + into the kernel or say M to compile it as module. + +config BT_ATH3K + tristate "Atheros firmware download driver" + depends on BT_HCIBTUSB + select FW_LOADER + help + Bluetooth firmware download driver. + This driver loads the firmware into the Atheros Bluetooth + chipset. + + Say Y here to compile support for "Atheros firmware download driver" + into the kernel or say M to compile it as module (ath3k). + +config BT_MTKSDIO + tristate "MediaTek HCI SDIO driver" + depends on MMC + select BT_MTK + help + MediaTek Bluetooth HCI SDIO driver. + This driver is required if you want to use MediaTek Bluetooth + with SDIO interface. + + Say Y here to compile support for MediaTek Bluetooth SDIO devices + into the kernel or say M to compile it as module (btmtksdio). + +config BT_MTKUART + tristate "MediaTek HCI UART driver" + depends on SERIAL_DEV_BUS + select BT_MTK + help + MediaTek Bluetooth HCI UART driver. + This driver is required if you want to use MediaTek Bluetooth + with serial interface. + + Say Y here to compile support for MediaTek Bluetooth UART devices + into the kernel or say M to compile it as module (btmtkuart). + +config BT_QCOMSMD + tristate "Qualcomm SMD based HCI support" + depends on RPMSG || (COMPILE_TEST && RPMSG=n) + depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n) + select BT_QCA + help + Qualcomm SMD based HCI driver. + This driver is used to bridge HCI data onto the shared memory + channels to the WCNSS core. + + Say Y here to compile support for HCI over Qualcomm SMD into the + kernel or say M to compile as a module. + +config BT_HCIRSI + tristate + help + Redpine BT driver. + This driver handles BT traffic from upper layers and pass + to the RSI_91x coex module for further scheduling to device + + Say Y here to compile support for HCI over Redpine into the + kernel or say M to compile as a module. + +config BT_VIRTIO + tristate "Virtio Bluetooth driver" + depends on VIRTIO + help + Virtio Bluetooth support driver. + This driver supports Virtio Bluetooth devices. + + Say Y here to compile support for HCI over Virtio into the + kernel or say M to compile as a module. + +config BT_NXPUART + tristate "NXP protocol support" + depends on SERIAL_DEV_BUS + select CRC32 + select CRC8 + help + NXP is serial driver required for NXP Bluetooth + devices with UART interface. + + Say Y here to compile support for NXP Bluetooth UART device into + the kernel, or say M here to compile as a module (btnxpuart). + + +endmenu diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile new file mode 100644 index 000000000..7a5967e9a --- /dev/null +++ b/drivers/bluetooth/Makefile @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux Bluetooth HCI device drivers. +# + +obj-$(CONFIG_BT_HCIVHCI) += hci_vhci.o +obj-$(CONFIG_BT_HCIUART) += hci_uart.o +obj-$(CONFIG_BT_HCIBCM203X) += bcm203x.o +obj-$(CONFIG_BT_HCIBCM4377) += hci_bcm4377.o +obj-$(CONFIG_BT_HCIBPA10X) += bpa10x.o +obj-$(CONFIG_BT_HCIBFUSB) += bfusb.o +obj-$(CONFIG_BT_HCIDTL1) += dtl1_cs.o +obj-$(CONFIG_BT_HCIBT3C) += bt3c_cs.o +obj-$(CONFIG_BT_HCIBLUECARD) += bluecard_cs.o + +obj-$(CONFIG_BT_HCIBTUSB) += btusb.o +obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o + +obj-$(CONFIG_BT_INTEL) += btintel.o +obj-$(CONFIG_BT_ATH3K) += ath3k.o +obj-$(CONFIG_BT_MRVL) += btmrvl.o +obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o +obj-$(CONFIG_BT_MTKSDIO) += btmtksdio.o +obj-$(CONFIG_BT_MTKUART) += btmtkuart.o +obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o +obj-$(CONFIG_BT_BCM) += btbcm.o +obj-$(CONFIG_BT_RTL) += btrtl.o +obj-$(CONFIG_BT_QCA) += btqca.o +obj-$(CONFIG_BT_MTK) += btmtk.o + +obj-$(CONFIG_BT_VIRTIO) += virtio_bt.o +obj-$(CONFIG_BT_NXPUART) += btnxpuart.o + +obj-$(CONFIG_BT_HCIUART_NOKIA) += hci_nokia.o + +obj-$(CONFIG_BT_HCIRSI) += btrsi.o + +btmrvl-y := btmrvl_main.o +btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o + +hci_uart-y := hci_ldisc.o +hci_uart-$(CONFIG_BT_HCIUART_SERDEV) += hci_serdev.o +hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o +hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o +hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o +hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o +hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o +hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o +hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o +hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o +hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o +hci_uart-$(CONFIG_BT_HCIUART_MRVL) += hci_mrvl.o +hci_uart-objs := $(hci_uart-y) diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c new file mode 100644 index 000000000..88262d3a9 --- /dev/null +++ b/drivers/bluetooth/ath3k.c @@ -0,0 +1,540 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define VERSION "1.0" +#define ATH3K_FIRMWARE "ath3k-1.fw" + +#define ATH3K_DNLOAD 0x01 +#define ATH3K_GETSTATE 0x05 +#define ATH3K_SET_NORMAL_MODE 0x07 +#define ATH3K_GETVERSION 0x09 +#define USB_REG_SWITCH_VID_PID 0x0a + +#define ATH3K_MODE_MASK 0x3F +#define ATH3K_NORMAL_MODE 0x0E + +#define ATH3K_PATCH_UPDATE 0x80 +#define ATH3K_SYSCFG_UPDATE 0x40 + +#define ATH3K_XTAL_FREQ_26M 0x00 +#define ATH3K_XTAL_FREQ_40M 0x01 +#define ATH3K_XTAL_FREQ_19P2 0x02 +#define ATH3K_NAME_LEN 0xFF + +struct ath3k_version { + __le32 rom_version; + __le32 build_version; + __le32 ram_version; + __u8 ref_clock; + __u8 reserved[7]; +} __packed; + +static const struct usb_device_id ath3k_table[] = { + /* Atheros AR3011 */ + { USB_DEVICE(0x0CF3, 0x3000) }, + + /* Atheros AR3011 with sflash firmware*/ + { USB_DEVICE(0x0489, 0xE027) }, + { USB_DEVICE(0x0489, 0xE03D) }, + { USB_DEVICE(0x04F2, 0xAFF1) }, + { USB_DEVICE(0x0930, 0x0215) }, + { USB_DEVICE(0x0CF3, 0x3002) }, + { USB_DEVICE(0x0CF3, 0xE019) }, + { USB_DEVICE(0x13d3, 0x3304) }, + + /* Atheros AR9285 Malbec with sflash firmware */ + { USB_DEVICE(0x03F0, 0x311D) }, + + /* Atheros AR3012 with sflash firmware*/ + { USB_DEVICE(0x0489, 0xe04d) }, + { USB_DEVICE(0x0489, 0xe04e) }, + { USB_DEVICE(0x0489, 0xe057) }, + { USB_DEVICE(0x0489, 0xe056) }, + { USB_DEVICE(0x0489, 0xe05f) }, + { USB_DEVICE(0x0489, 0xe076) }, + { USB_DEVICE(0x0489, 0xe078) }, + { USB_DEVICE(0x0489, 0xe095) }, + { USB_DEVICE(0x04c5, 0x1330) }, + { USB_DEVICE(0x04CA, 0x3004) }, + { USB_DEVICE(0x04CA, 0x3005) }, + { USB_DEVICE(0x04CA, 0x3006) }, + { USB_DEVICE(0x04CA, 0x3007) }, + { USB_DEVICE(0x04CA, 0x3008) }, + { USB_DEVICE(0x04CA, 0x300b) }, + { USB_DEVICE(0x04CA, 0x300d) }, + { USB_DEVICE(0x04CA, 0x300f) }, + { USB_DEVICE(0x04CA, 0x3010) }, + { USB_DEVICE(0x04CA, 0x3014) }, + { USB_DEVICE(0x04CA, 0x3018) }, + { USB_DEVICE(0x0930, 0x0219) }, + { USB_DEVICE(0x0930, 0x021c) }, + { USB_DEVICE(0x0930, 0x0220) }, + { USB_DEVICE(0x0930, 0x0227) }, + { USB_DEVICE(0x0b05, 0x17d0) }, + { USB_DEVICE(0x0CF3, 0x0036) }, + { USB_DEVICE(0x0CF3, 0x3004) }, + { USB_DEVICE(0x0CF3, 0x3008) }, + { USB_DEVICE(0x0CF3, 0x311D) }, + { USB_DEVICE(0x0CF3, 0x311E) }, + { USB_DEVICE(0x0CF3, 0x311F) }, + { USB_DEVICE(0x0cf3, 0x3121) }, + { USB_DEVICE(0x0CF3, 0x817a) }, + { USB_DEVICE(0x0CF3, 0x817b) }, + { USB_DEVICE(0x0cf3, 0xe003) }, + { USB_DEVICE(0x0CF3, 0xE004) }, + { USB_DEVICE(0x0CF3, 0xE005) }, + { USB_DEVICE(0x0CF3, 0xE006) }, + { USB_DEVICE(0x13d3, 0x3362) }, + { USB_DEVICE(0x13d3, 0x3375) }, + { USB_DEVICE(0x13d3, 0x3393) }, + { USB_DEVICE(0x13d3, 0x3395) }, + { USB_DEVICE(0x13d3, 0x3402) }, + { USB_DEVICE(0x13d3, 0x3408) }, + { USB_DEVICE(0x13d3, 0x3423) }, + { USB_DEVICE(0x13d3, 0x3432) }, + { USB_DEVICE(0x13d3, 0x3472) }, + { USB_DEVICE(0x13d3, 0x3474) }, + { USB_DEVICE(0x13d3, 0x3487) }, + { USB_DEVICE(0x13d3, 0x3490) }, + + /* Atheros AR5BBU12 with sflash firmware */ + { USB_DEVICE(0x0489, 0xE02C) }, + + /* Atheros AR5BBU22 with sflash firmware */ + { USB_DEVICE(0x0489, 0xE036) }, + { USB_DEVICE(0x0489, 0xE03C) }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, ath3k_table); + +#define BTUSB_ATH3012 0x80 +/* This table is to load patch and sysconfig files + * for AR3012 + */ +static const struct usb_device_id ath3k_blist_tbl[] = { + + /* Atheros AR3012 with sflash firmware*/ + { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0CF3, 0x817b), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 }, + + /* Atheros AR5BBU22 with sflash firmware */ + { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, + + { } /* Terminating entry */ +}; + +static inline void ath3k_log_failed_loading(int err, int len, int size, + int count) +{ + BT_ERR("Firmware loading err = %d, len = %d, size = %d, count = %d", + err, len, size, count); +} + +#define USB_REQ_DFU_DNLOAD 1 +#define BULK_SIZE 4096 +#define FW_HDR_SIZE 20 +#define TIMEGAP_USEC_MIN 50 +#define TIMEGAP_USEC_MAX 100 + +static int ath3k_load_firmware(struct usb_device *udev, + const struct firmware *firmware) +{ + u8 *send_buf; + int len = 0; + int err, pipe, size, sent = 0; + int count = firmware->size; + + BT_DBG("udev %p", udev); + + send_buf = kmalloc(BULK_SIZE, GFP_KERNEL); + if (!send_buf) { + BT_ERR("Can't allocate memory chunk for firmware"); + return -ENOMEM; + } + + err = usb_control_msg_send(udev, 0, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR, + 0, 0, firmware->data, FW_HDR_SIZE, + USB_CTRL_SET_TIMEOUT, GFP_KERNEL); + if (err) { + BT_ERR("Can't change to loading configuration err"); + goto error; + } + sent += FW_HDR_SIZE; + count -= FW_HDR_SIZE; + + pipe = usb_sndbulkpipe(udev, 0x02); + + while (count) { + /* workaround the compatibility issue with xHCI controller*/ + usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX); + + size = min_t(uint, count, BULK_SIZE); + memcpy(send_buf, firmware->data + sent, size); + + err = usb_bulk_msg(udev, pipe, send_buf, size, + &len, 3000); + + if (err || (len != size)) { + ath3k_log_failed_loading(err, len, size, count); + goto error; + } + + sent += size; + count -= size; + } + +error: + kfree(send_buf); + return err; +} + +static int ath3k_get_state(struct usb_device *udev, unsigned char *state) +{ + return usb_control_msg_recv(udev, 0, ATH3K_GETSTATE, + USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, + state, 1, USB_CTRL_SET_TIMEOUT, + GFP_KERNEL); +} + +static int ath3k_get_version(struct usb_device *udev, + struct ath3k_version *version) +{ + return usb_control_msg_recv(udev, 0, ATH3K_GETVERSION, + USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, + version, sizeof(*version), USB_CTRL_SET_TIMEOUT, + GFP_KERNEL); +} + +static int ath3k_load_fwfile(struct usb_device *udev, + const struct firmware *firmware) +{ + u8 *send_buf; + int len = 0; + int err, pipe, size, count, sent = 0; + int ret; + + count = firmware->size; + + send_buf = kmalloc(BULK_SIZE, GFP_KERNEL); + if (!send_buf) { + BT_ERR("Can't allocate memory chunk for firmware"); + return -ENOMEM; + } + + size = min_t(uint, count, FW_HDR_SIZE); + + ret = usb_control_msg_send(udev, 0, ATH3K_DNLOAD, USB_TYPE_VENDOR, 0, 0, + firmware->data, size, USB_CTRL_SET_TIMEOUT, + GFP_KERNEL); + if (ret) { + BT_ERR("Can't change to loading configuration err"); + kfree(send_buf); + return ret; + } + + sent += size; + count -= size; + + pipe = usb_sndbulkpipe(udev, 0x02); + + while (count) { + /* workaround the compatibility issue with xHCI controller*/ + usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX); + + size = min_t(uint, count, BULK_SIZE); + memcpy(send_buf, firmware->data + sent, size); + + err = usb_bulk_msg(udev, pipe, send_buf, size, + &len, 3000); + if (err || (len != size)) { + ath3k_log_failed_loading(err, len, size, count); + kfree(send_buf); + return err; + } + sent += size; + count -= size; + } + + kfree(send_buf); + return 0; +} + +static void ath3k_switch_pid(struct usb_device *udev) +{ + usb_control_msg_send(udev, 0, USB_REG_SWITCH_VID_PID, USB_TYPE_VENDOR, + 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); +} + +static int ath3k_set_normal_mode(struct usb_device *udev) +{ + unsigned char fw_state; + int ret; + + ret = ath3k_get_state(udev, &fw_state); + if (ret) { + BT_ERR("Can't get state to change to normal mode err"); + return ret; + } + + if ((fw_state & ATH3K_MODE_MASK) == ATH3K_NORMAL_MODE) { + BT_DBG("firmware was already in normal mode"); + return 0; + } + + return usb_control_msg_send(udev, 0, ATH3K_SET_NORMAL_MODE, + USB_TYPE_VENDOR, 0, 0, NULL, 0, + USB_CTRL_SET_TIMEOUT, GFP_KERNEL); +} + +static int ath3k_load_patch(struct usb_device *udev) +{ + unsigned char fw_state; + char filename[ATH3K_NAME_LEN]; + const struct firmware *firmware; + struct ath3k_version fw_version; + __u32 pt_rom_version, pt_build_version; + int ret; + + ret = ath3k_get_state(udev, &fw_state); + if (ret) { + BT_ERR("Can't get state to change to load ram patch err"); + return ret; + } + + if (fw_state & ATH3K_PATCH_UPDATE) { + BT_DBG("Patch was already downloaded"); + return 0; + } + + ret = ath3k_get_version(udev, &fw_version); + if (ret) { + BT_ERR("Can't get version to change to load ram patch err"); + return ret; + } + + snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu", + le32_to_cpu(fw_version.rom_version)); + + ret = request_firmware(&firmware, filename, &udev->dev); + if (ret < 0) { + BT_ERR("Patch file not found %s", filename); + return ret; + } + + pt_rom_version = get_unaligned_le32(firmware->data + + firmware->size - 8); + pt_build_version = get_unaligned_le32(firmware->data + + firmware->size - 4); + + if (pt_rom_version != le32_to_cpu(fw_version.rom_version) || + pt_build_version <= le32_to_cpu(fw_version.build_version)) { + BT_ERR("Patch file version did not match with firmware"); + release_firmware(firmware); + return -EINVAL; + } + + ret = ath3k_load_fwfile(udev, firmware); + release_firmware(firmware); + + return ret; +} + +static int ath3k_load_syscfg(struct usb_device *udev) +{ + unsigned char fw_state; + char filename[ATH3K_NAME_LEN]; + const struct firmware *firmware; + struct ath3k_version fw_version; + int clk_value, ret; + + ret = ath3k_get_state(udev, &fw_state); + if (ret) { + BT_ERR("Can't get state to change to load configuration err"); + return -EBUSY; + } + + ret = ath3k_get_version(udev, &fw_version); + if (ret) { + BT_ERR("Can't get version to change to load ram patch err"); + return ret; + } + + switch (fw_version.ref_clock) { + + case ATH3K_XTAL_FREQ_26M: + clk_value = 26; + break; + case ATH3K_XTAL_FREQ_40M: + clk_value = 40; + break; + case ATH3K_XTAL_FREQ_19P2: + clk_value = 19; + break; + default: + clk_value = 0; + break; + } + + snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s", + le32_to_cpu(fw_version.rom_version), clk_value, ".dfu"); + + ret = request_firmware(&firmware, filename, &udev->dev); + if (ret < 0) { + BT_ERR("Configuration file not found %s", filename); + return ret; + } + + ret = ath3k_load_fwfile(udev, firmware); + release_firmware(firmware); + + return ret; +} + +static int ath3k_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + const struct firmware *firmware; + struct usb_device *udev = interface_to_usbdev(intf); + int ret; + + BT_DBG("intf %p id %p", intf, id); + + if (intf->cur_altsetting->desc.bInterfaceNumber != 0) + return -ENODEV; + + /* match device ID in ath3k blacklist table */ + if (!id->driver_info) { + const struct usb_device_id *match; + + match = usb_match_id(intf, ath3k_blist_tbl); + if (match) + id = match; + } + + /* load patch and sysconfig files for AR3012 */ + if (id->driver_info & BTUSB_ATH3012) { + /* New firmware with patch and sysconfig files already loaded */ + if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x0001) + return -ENODEV; + + ret = ath3k_load_patch(udev); + if (ret < 0) { + BT_ERR("Loading patch file failed"); + return ret; + } + ret = ath3k_load_syscfg(udev); + if (ret < 0) { + BT_ERR("Loading sysconfig file failed"); + return ret; + } + ret = ath3k_set_normal_mode(udev); + if (ret) { + BT_ERR("Set normal mode failed"); + return ret; + } + ath3k_switch_pid(udev); + return 0; + } + + ret = request_firmware(&firmware, ATH3K_FIRMWARE, &udev->dev); + if (ret < 0) { + if (ret == -ENOENT) + BT_ERR("Firmware file \"%s\" not found", + ATH3K_FIRMWARE); + else + BT_ERR("Firmware file \"%s\" request failed (err=%d)", + ATH3K_FIRMWARE, ret); + return ret; + } + + ret = ath3k_load_firmware(udev, firmware); + release_firmware(firmware); + + return ret; +} + +static void ath3k_disconnect(struct usb_interface *intf) +{ + BT_DBG("%s intf %p", __func__, intf); +} + +static struct usb_driver ath3k_driver = { + .name = "ath3k", + .probe = ath3k_probe, + .disconnect = ath3k_disconnect, + .id_table = ath3k_table, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(ath3k_driver); + +MODULE_AUTHOR("Atheros Communications"); +MODULE_DESCRIPTION("Atheros AR30xx firmware driver"); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(ATH3K_FIRMWARE); diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c new file mode 100644 index 000000000..c738ad040 --- /dev/null +++ b/drivers/bluetooth/bcm203x.c @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Broadcom Blutonium firmware driver + * + * Copyright (C) 2003 Maxim Krasnyansky + * Copyright (C) 2003 Marcel Holtmann + */ + +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include + +#define VERSION "1.2" + +static const struct usb_device_id bcm203x_table[] = { + /* Broadcom Blutonium (BCM2033) */ + { USB_DEVICE(0x0a5c, 0x2033) }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, bcm203x_table); + +#define BCM203X_ERROR 0 +#define BCM203X_RESET 1 +#define BCM203X_LOAD_MINIDRV 2 +#define BCM203X_SELECT_MEMORY 3 +#define BCM203X_CHECK_MEMORY 4 +#define BCM203X_LOAD_FIRMWARE 5 +#define BCM203X_CHECK_FIRMWARE 6 + +#define BCM203X_IN_EP 0x81 +#define BCM203X_OUT_EP 0x02 + +struct bcm203x_data { + struct usb_device *udev; + + unsigned long state; + + struct work_struct work; + atomic_t shutdown; + + struct urb *urb; + unsigned char *buffer; + + unsigned char *fw_data; + unsigned int fw_size; + unsigned int fw_sent; +}; + +static void bcm203x_complete(struct urb *urb) +{ + struct bcm203x_data *data = urb->context; + struct usb_device *udev = urb->dev; + int len; + + BT_DBG("udev %p urb %p", udev, urb); + + if (urb->status) { + BT_ERR("URB failed with status %d", urb->status); + data->state = BCM203X_ERROR; + return; + } + + switch (data->state) { + case BCM203X_LOAD_MINIDRV: + memcpy(data->buffer, "#", 1); + + usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, BCM203X_OUT_EP), + data->buffer, 1, bcm203x_complete, data); + + data->state = BCM203X_SELECT_MEMORY; + + /* use workqueue to have a small delay */ + schedule_work(&data->work); + break; + + case BCM203X_SELECT_MEMORY: + usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP), + data->buffer, 32, bcm203x_complete, data, 1); + + data->state = BCM203X_CHECK_MEMORY; + + if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) + BT_ERR("Can't submit URB"); + break; + + case BCM203X_CHECK_MEMORY: + if (data->buffer[0] != '#') { + BT_ERR("Memory select failed"); + data->state = BCM203X_ERROR; + break; + } + + data->state = BCM203X_LOAD_FIRMWARE; + fallthrough; + case BCM203X_LOAD_FIRMWARE: + if (data->fw_sent == data->fw_size) { + usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP), + data->buffer, 32, bcm203x_complete, data, 1); + + data->state = BCM203X_CHECK_FIRMWARE; + } else { + len = min_t(uint, data->fw_size - data->fw_sent, 4096); + + usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, BCM203X_OUT_EP), + data->fw_data + data->fw_sent, len, bcm203x_complete, data); + + data->fw_sent += len; + } + + if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) + BT_ERR("Can't submit URB"); + break; + + case BCM203X_CHECK_FIRMWARE: + if (data->buffer[0] != '.') { + BT_ERR("Firmware loading failed"); + data->state = BCM203X_ERROR; + break; + } + + data->state = BCM203X_RESET; + break; + } +} + +static void bcm203x_work(struct work_struct *work) +{ + struct bcm203x_data *data = + container_of(work, struct bcm203x_data, work); + + if (atomic_read(&data->shutdown)) + return; + + if (usb_submit_urb(data->urb, GFP_KERNEL) < 0) + BT_ERR("Can't submit URB"); +} + +static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id *id) +{ + const struct firmware *firmware; + struct usb_device *udev = interface_to_usbdev(intf); + struct bcm203x_data *data; + int size; + + BT_DBG("intf %p id %p", intf, id); + + if (intf->cur_altsetting->desc.bInterfaceNumber != 0) + return -ENODEV; + + data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->udev = udev; + data->state = BCM203X_LOAD_MINIDRV; + + data->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!data->urb) + return -ENOMEM; + + if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) { + BT_ERR("Mini driver request failed"); + usb_free_urb(data->urb); + return -EIO; + } + + BT_DBG("minidrv data %p size %zu", firmware->data, firmware->size); + + size = max_t(uint, firmware->size, 4096); + + data->buffer = kmalloc(size, GFP_KERNEL); + if (!data->buffer) { + BT_ERR("Can't allocate memory for mini driver"); + release_firmware(firmware); + usb_free_urb(data->urb); + return -ENOMEM; + } + + memcpy(data->buffer, firmware->data, firmware->size); + + usb_fill_bulk_urb(data->urb, udev, usb_sndbulkpipe(udev, BCM203X_OUT_EP), + data->buffer, firmware->size, bcm203x_complete, data); + + release_firmware(firmware); + + if (request_firmware(&firmware, "BCM2033-FW.bin", &udev->dev) < 0) { + BT_ERR("Firmware request failed"); + usb_free_urb(data->urb); + kfree(data->buffer); + return -EIO; + } + + BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); + + data->fw_data = kmemdup(firmware->data, firmware->size, GFP_KERNEL); + if (!data->fw_data) { + BT_ERR("Can't allocate memory for firmware image"); + release_firmware(firmware); + usb_free_urb(data->urb); + kfree(data->buffer); + return -ENOMEM; + } + + data->fw_size = firmware->size; + data->fw_sent = 0; + + release_firmware(firmware); + + INIT_WORK(&data->work, bcm203x_work); + + usb_set_intfdata(intf, data); + + /* use workqueue to have a small delay */ + schedule_work(&data->work); + + return 0; +} + +static void bcm203x_disconnect(struct usb_interface *intf) +{ + struct bcm203x_data *data = usb_get_intfdata(intf); + + BT_DBG("intf %p", intf); + + atomic_inc(&data->shutdown); + cancel_work_sync(&data->work); + + usb_kill_urb(data->urb); + + usb_set_intfdata(intf, NULL); + + usb_free_urb(data->urb); + kfree(data->fw_data); + kfree(data->buffer); +} + +static struct usb_driver bcm203x_driver = { + .name = "bcm203x", + .probe = bcm203x_probe, + .disconnect = bcm203x_disconnect, + .id_table = bcm203x_table, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(bcm203x_driver); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Broadcom Blutonium firmware driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("BCM2033-MD.hex"); +MODULE_FIRMWARE("BCM2033-FW.bin"); diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c new file mode 100644 index 000000000..cab93935c --- /dev/null +++ b/drivers/bluetooth/bfusb.c @@ -0,0 +1,725 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * AVM BlueFRITZ! USB driver + * + * Copyright (C) 2003-2006 Marcel Holtmann + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include + +#define VERSION "1.2" + +static struct usb_driver bfusb_driver; + +static const struct usb_device_id bfusb_table[] = { + /* AVM BlueFRITZ! USB */ + { USB_DEVICE(0x057c, 0x2200) }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, bfusb_table); + +#define BFUSB_MAX_BLOCK_SIZE 256 + +#define BFUSB_BLOCK_TIMEOUT 3000 + +#define BFUSB_TX_PROCESS 1 +#define BFUSB_TX_WAKEUP 2 + +#define BFUSB_MAX_BULK_TX 2 +#define BFUSB_MAX_BULK_RX 2 + +struct bfusb_data { + struct hci_dev *hdev; + + unsigned long state; + + struct usb_device *udev; + + unsigned int bulk_in_ep; + unsigned int bulk_out_ep; + unsigned int bulk_pkt_size; + + rwlock_t lock; + + struct sk_buff_head transmit_q; + + struct sk_buff *reassembly; + + atomic_t pending_tx; + struct sk_buff_head pending_q; + struct sk_buff_head completed_q; +}; + +struct bfusb_data_scb { + struct urb *urb; +}; + +static void bfusb_tx_complete(struct urb *urb); +static void bfusb_rx_complete(struct urb *urb); + +static struct urb *bfusb_get_completed(struct bfusb_data *data) +{ + struct sk_buff *skb; + struct urb *urb = NULL; + + BT_DBG("bfusb %p", data); + + skb = skb_dequeue(&data->completed_q); + if (skb) { + urb = ((struct bfusb_data_scb *) skb->cb)->urb; + kfree_skb(skb); + } + + return urb; +} + +static void bfusb_unlink_urbs(struct bfusb_data *data) +{ + struct sk_buff *skb; + struct urb *urb; + + BT_DBG("bfusb %p", data); + + while ((skb = skb_dequeue(&data->pending_q))) { + urb = ((struct bfusb_data_scb *) skb->cb)->urb; + usb_kill_urb(urb); + skb_queue_tail(&data->completed_q, skb); + } + + while ((urb = bfusb_get_completed(data))) + usb_free_urb(urb); +} + +static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb) +{ + struct bfusb_data_scb *scb = (void *) skb->cb; + struct urb *urb = bfusb_get_completed(data); + int err, pipe; + + BT_DBG("bfusb %p skb %p len %d", data, skb, skb->len); + + if (!urb) { + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + return -ENOMEM; + } + + pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep); + + usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, + bfusb_tx_complete, skb); + + scb->urb = urb; + + skb_queue_tail(&data->pending_q, skb); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + bt_dev_err(data->hdev, "bulk tx submit failed urb %p err %d", + urb, err); + skb_unlink(skb, &data->pending_q); + usb_free_urb(urb); + } else + atomic_inc(&data->pending_tx); + + return err; +} + +static void bfusb_tx_wakeup(struct bfusb_data *data) +{ + struct sk_buff *skb; + + BT_DBG("bfusb %p", data); + + if (test_and_set_bit(BFUSB_TX_PROCESS, &data->state)) { + set_bit(BFUSB_TX_WAKEUP, &data->state); + return; + } + + do { + clear_bit(BFUSB_TX_WAKEUP, &data->state); + + while ((atomic_read(&data->pending_tx) < BFUSB_MAX_BULK_TX) && + (skb = skb_dequeue(&data->transmit_q))) { + if (bfusb_send_bulk(data, skb) < 0) { + skb_queue_head(&data->transmit_q, skb); + break; + } + } + + } while (test_bit(BFUSB_TX_WAKEUP, &data->state)); + + clear_bit(BFUSB_TX_PROCESS, &data->state); +} + +static void bfusb_tx_complete(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *) urb->context; + struct bfusb_data *data = (struct bfusb_data *) skb->dev; + + BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len); + + atomic_dec(&data->pending_tx); + + if (!test_bit(HCI_RUNNING, &data->hdev->flags)) + return; + + if (!urb->status) + data->hdev->stat.byte_tx += skb->len; + else + data->hdev->stat.err_tx++; + + read_lock(&data->lock); + + skb_unlink(skb, &data->pending_q); + skb_queue_tail(&data->completed_q, skb); + + bfusb_tx_wakeup(data); + + read_unlock(&data->lock); +} + + +static int bfusb_rx_submit(struct bfusb_data *data, struct urb *urb) +{ + struct bfusb_data_scb *scb; + struct sk_buff *skb; + int err, pipe, size = HCI_MAX_FRAME_SIZE + 32; + + BT_DBG("bfusb %p urb %p", data, urb); + + if (!urb) { + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + return -ENOMEM; + } + + skb = bt_skb_alloc(size, GFP_ATOMIC); + if (!skb) { + usb_free_urb(urb); + return -ENOMEM; + } + + skb->dev = (void *) data; + + scb = (struct bfusb_data_scb *) skb->cb; + scb->urb = urb; + + pipe = usb_rcvbulkpipe(data->udev, data->bulk_in_ep); + + usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, size, + bfusb_rx_complete, skb); + + skb_queue_tail(&data->pending_q, skb); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + bt_dev_err(data->hdev, "bulk rx submit failed urb %p err %d", + urb, err); + skb_unlink(skb, &data->pending_q); + kfree_skb(skb); + usb_free_urb(urb); + } + + return err; +} + +static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned char *buf, int len) +{ + BT_DBG("bfusb %p hdr 0x%02x data %p len %d", data, hdr, buf, len); + + if (hdr & 0x10) { + bt_dev_err(data->hdev, "error in block"); + kfree_skb(data->reassembly); + data->reassembly = NULL; + return -EIO; + } + + if (hdr & 0x04) { + struct sk_buff *skb; + unsigned char pkt_type; + int pkt_len = 0; + + if (data->reassembly) { + bt_dev_err(data->hdev, "unexpected start block"); + kfree_skb(data->reassembly); + data->reassembly = NULL; + } + + if (len < 1) { + bt_dev_err(data->hdev, "no packet type found"); + return -EPROTO; + } + + pkt_type = *buf++; len--; + + switch (pkt_type) { + case HCI_EVENT_PKT: + if (len >= HCI_EVENT_HDR_SIZE) { + struct hci_event_hdr *hdr = (struct hci_event_hdr *) buf; + pkt_len = HCI_EVENT_HDR_SIZE + hdr->plen; + } else { + bt_dev_err(data->hdev, "event block is too short"); + return -EILSEQ; + } + break; + + case HCI_ACLDATA_PKT: + if (len >= HCI_ACL_HDR_SIZE) { + struct hci_acl_hdr *hdr = (struct hci_acl_hdr *) buf; + pkt_len = HCI_ACL_HDR_SIZE + __le16_to_cpu(hdr->dlen); + } else { + bt_dev_err(data->hdev, "data block is too short"); + return -EILSEQ; + } + break; + + case HCI_SCODATA_PKT: + if (len >= HCI_SCO_HDR_SIZE) { + struct hci_sco_hdr *hdr = (struct hci_sco_hdr *) buf; + pkt_len = HCI_SCO_HDR_SIZE + hdr->dlen; + } else { + bt_dev_err(data->hdev, "audio block is too short"); + return -EILSEQ; + } + break; + } + + skb = bt_skb_alloc(pkt_len, GFP_ATOMIC); + if (!skb) { + bt_dev_err(data->hdev, "no memory for the packet"); + return -ENOMEM; + } + + hci_skb_pkt_type(skb) = pkt_type; + + data->reassembly = skb; + } else { + if (!data->reassembly) { + bt_dev_err(data->hdev, "unexpected continuation block"); + return -EIO; + } + } + + if (len > 0) + skb_put_data(data->reassembly, buf, len); + + if (hdr & 0x08) { + hci_recv_frame(data->hdev, data->reassembly); + data->reassembly = NULL; + } + + return 0; +} + +static void bfusb_rx_complete(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *) urb->context; + struct bfusb_data *data = (struct bfusb_data *) skb->dev; + unsigned char *buf = urb->transfer_buffer; + int count = urb->actual_length; + int err, hdr, len; + + BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len); + + read_lock(&data->lock); + + if (!test_bit(HCI_RUNNING, &data->hdev->flags)) + goto unlock; + + if (urb->status || !count) + goto resubmit; + + data->hdev->stat.byte_rx += count; + + skb_put(skb, count); + + while (count) { + hdr = buf[0] | (buf[1] << 8); + + if (hdr & 0x4000) { + len = 0; + count -= 2; + buf += 2; + } else { + len = (buf[2] == 0) ? 256 : buf[2]; + count -= 3; + buf += 3; + } + + if (count < len) { + bt_dev_err(data->hdev, "block extends over URB buffer ranges"); + } + + if ((hdr & 0xe1) == 0xc1) + bfusb_recv_block(data, hdr, buf, len); + + count -= len; + buf += len; + } + + skb_unlink(skb, &data->pending_q); + kfree_skb(skb); + + bfusb_rx_submit(data, urb); + + read_unlock(&data->lock); + + return; + +resubmit: + urb->dev = data->udev; + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + bt_dev_err(data->hdev, "bulk resubmit failed urb %p err %d", + urb, err); + } + +unlock: + read_unlock(&data->lock); +} + +static int bfusb_open(struct hci_dev *hdev) +{ + struct bfusb_data *data = hci_get_drvdata(hdev); + unsigned long flags; + int i, err; + + BT_DBG("hdev %p bfusb %p", hdev, data); + + write_lock_irqsave(&data->lock, flags); + + err = bfusb_rx_submit(data, NULL); + if (!err) { + for (i = 1; i < BFUSB_MAX_BULK_RX; i++) + bfusb_rx_submit(data, NULL); + } + + write_unlock_irqrestore(&data->lock, flags); + + return err; +} + +static int bfusb_flush(struct hci_dev *hdev) +{ + struct bfusb_data *data = hci_get_drvdata(hdev); + + BT_DBG("hdev %p bfusb %p", hdev, data); + + skb_queue_purge(&data->transmit_q); + + return 0; +} + +static int bfusb_close(struct hci_dev *hdev) +{ + struct bfusb_data *data = hci_get_drvdata(hdev); + unsigned long flags; + + BT_DBG("hdev %p bfusb %p", hdev, data); + + write_lock_irqsave(&data->lock, flags); + write_unlock_irqrestore(&data->lock, flags); + + bfusb_unlink_urbs(data); + bfusb_flush(hdev); + + return 0; +} + +static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct bfusb_data *data = hci_get_drvdata(hdev); + struct sk_buff *nskb; + unsigned char buf[3]; + int sent = 0, size, count; + + BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, + hci_skb_pkt_type(skb), skb->len); + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + + count = skb->len; + + /* Max HCI frame size seems to be 1511 + 1 */ + nskb = bt_skb_alloc(count + 32, GFP_KERNEL); + if (!nskb) { + bt_dev_err(hdev, "Can't allocate memory for new packet"); + return -ENOMEM; + } + + nskb->dev = (void *) data; + + while (count) { + size = min_t(uint, count, BFUSB_MAX_BLOCK_SIZE); + + buf[0] = 0xc1 | ((sent == 0) ? 0x04 : 0) | ((count == size) ? 0x08 : 0); + buf[1] = 0x00; + buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size; + + skb_put_data(nskb, buf, 3); + skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size); + + sent += size; + count -= size; + } + + /* Don't send frame with multiple size of bulk max packet */ + if ((nskb->len % data->bulk_pkt_size) == 0) { + buf[0] = 0xdd; + buf[1] = 0x00; + skb_put_data(nskb, buf, 2); + } + + read_lock(&data->lock); + + skb_queue_tail(&data->transmit_q, nskb); + bfusb_tx_wakeup(data); + + read_unlock(&data->lock); + + kfree_skb(skb); + + return 0; +} + +static int bfusb_load_firmware(struct bfusb_data *data, + const unsigned char *firmware, int count) +{ + unsigned char *buf; + int err, pipe, len, size, sent = 0; + + BT_DBG("bfusb %p udev %p", data, data->udev); + + BT_INFO("BlueFRITZ! USB loading firmware"); + + buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_KERNEL); + if (!buf) { + BT_ERR("Can't allocate memory chunk for firmware"); + return -ENOMEM; + } + + pipe = usb_sndctrlpipe(data->udev, 0); + + if (usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, + 0, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT) < 0) { + BT_ERR("Can't change to loading configuration"); + kfree(buf); + return -EBUSY; + } + + data->udev->toggle[0] = data->udev->toggle[1] = 0; + + pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep); + + while (count) { + size = min_t(uint, count, BFUSB_MAX_BLOCK_SIZE + 3); + + memcpy(buf, firmware + sent, size); + + err = usb_bulk_msg(data->udev, pipe, buf, size, + &len, BFUSB_BLOCK_TIMEOUT); + + if (err || (len != size)) { + BT_ERR("Error in firmware loading"); + goto error; + } + + sent += size; + count -= size; + } + + err = usb_bulk_msg(data->udev, pipe, NULL, 0, + &len, BFUSB_BLOCK_TIMEOUT); + if (err < 0) { + BT_ERR("Error in null packet request"); + goto error; + } + + pipe = usb_sndctrlpipe(data->udev, 0); + + err = usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, + 0, 2, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); + if (err < 0) { + BT_ERR("Can't change to running configuration"); + goto error; + } + + data->udev->toggle[0] = data->udev->toggle[1] = 0; + + BT_INFO("BlueFRITZ! USB device ready"); + + kfree(buf); + return 0; + +error: + kfree(buf); + + pipe = usb_sndctrlpipe(data->udev, 0); + + usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, + 0, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); + + return err; +} + +static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *id) +{ + const struct firmware *firmware; + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_host_endpoint *bulk_out_ep; + struct usb_host_endpoint *bulk_in_ep; + struct hci_dev *hdev; + struct bfusb_data *data; + + BT_DBG("intf %p id %p", intf, id); + + /* Check number of endpoints */ + if (intf->cur_altsetting->desc.bNumEndpoints < 2) + return -EIO; + + bulk_out_ep = &intf->cur_altsetting->endpoint[0]; + bulk_in_ep = &intf->cur_altsetting->endpoint[1]; + + if (!bulk_out_ep || !bulk_in_ep) { + BT_ERR("Bulk endpoints not found"); + goto done; + } + + /* Initialize control structure and load firmware */ + data = devm_kzalloc(&intf->dev, sizeof(struct bfusb_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->udev = udev; + data->bulk_in_ep = bulk_in_ep->desc.bEndpointAddress; + data->bulk_out_ep = bulk_out_ep->desc.bEndpointAddress; + data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize); + + if (!data->bulk_pkt_size) + goto done; + + rwlock_init(&data->lock); + + data->reassembly = NULL; + + skb_queue_head_init(&data->transmit_q); + skb_queue_head_init(&data->pending_q); + skb_queue_head_init(&data->completed_q); + + if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) { + BT_ERR("Firmware request failed"); + goto done; + } + + BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); + + if (bfusb_load_firmware(data, firmware->data, firmware->size) < 0) { + BT_ERR("Firmware loading failed"); + goto release; + } + + release_firmware(firmware); + + /* Initialize and register HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + goto done; + } + + data->hdev = hdev; + + hdev->bus = HCI_USB; + hci_set_drvdata(hdev, data); + SET_HCIDEV_DEV(hdev, &intf->dev); + + hdev->open = bfusb_open; + hdev->close = bfusb_close; + hdev->flush = bfusb_flush; + hdev->send = bfusb_send_frame; + + set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks); + + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + hci_free_dev(hdev); + goto done; + } + + usb_set_intfdata(intf, data); + + return 0; + +release: + release_firmware(firmware); + +done: + return -EIO; +} + +static void bfusb_disconnect(struct usb_interface *intf) +{ + struct bfusb_data *data = usb_get_intfdata(intf); + struct hci_dev *hdev = data->hdev; + + BT_DBG("intf %p", intf); + + if (!hdev) + return; + + usb_set_intfdata(intf, NULL); + + bfusb_close(hdev); + + hci_unregister_dev(hdev); + hci_free_dev(hdev); +} + +static struct usb_driver bfusb_driver = { + .name = "bfusb", + .probe = bfusb_probe, + .disconnect = bfusb_disconnect, + .id_table = bfusb_table, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(bfusb_driver); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("BlueFRITZ! USB driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("bfubase.frm"); diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c new file mode 100644 index 000000000..36eabf617 --- /dev/null +++ b/drivers/bluetooth/bluecard_cs.c @@ -0,0 +1,908 @@ +/* + * + * Bluetooth driver for the Anycom BlueCard (LSE039/LSE041) + * + * Copyright (C) 2001-2002 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * The initial developer of the original code is David A. Hinds + * . Portions created by David A. Hinds + * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include + + + +/* ======================== Module parameters ======================== */ + + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth driver for the Anycom BlueCard (LSE039/LSE041)"); +MODULE_LICENSE("GPL"); + + + +/* ======================== Local structures ======================== */ + + +struct bluecard_info { + struct pcmcia_device *p_dev; + + struct hci_dev *hdev; + + spinlock_t lock; /* For serializing operations */ + struct timer_list timer; /* For LED control */ + + struct sk_buff_head txq; + unsigned long tx_state; + + unsigned long rx_state; + unsigned long rx_count; + struct sk_buff *rx_skb; + + unsigned char ctrl_reg; + unsigned long hw_state; /* Status of the hardware and LED control */ +}; + + +static int bluecard_config(struct pcmcia_device *link); +static void bluecard_release(struct pcmcia_device *link); + +static void bluecard_detach(struct pcmcia_device *p_dev); + + +/* Default baud rate: 57600, 115200, 230400 or 460800 */ +#define DEFAULT_BAUD_RATE 230400 + + +/* Hardware states */ +#define CARD_READY 1 +#define CARD_ACTIVITY 2 +#define CARD_HAS_PCCARD_ID 4 +#define CARD_HAS_POWER_LED 5 +#define CARD_HAS_ACTIVITY_LED 6 + +/* Transmit states */ +#define XMIT_SENDING 1 +#define XMIT_WAKEUP 2 +#define XMIT_BUFFER_NUMBER 5 /* unset = buffer one, set = buffer two */ +#define XMIT_BUF_ONE_READY 6 +#define XMIT_BUF_TWO_READY 7 +#define XMIT_SENDING_READY 8 + +/* Receiver states */ +#define RECV_WAIT_PACKET_TYPE 0 +#define RECV_WAIT_EVENT_HEADER 1 +#define RECV_WAIT_ACL_HEADER 2 +#define RECV_WAIT_SCO_HEADER 3 +#define RECV_WAIT_DATA 4 + +/* Special packet types */ +#define PKT_BAUD_RATE_57600 0x80 +#define PKT_BAUD_RATE_115200 0x81 +#define PKT_BAUD_RATE_230400 0x82 +#define PKT_BAUD_RATE_460800 0x83 + + +/* These are the register offsets */ +#define REG_COMMAND 0x20 +#define REG_INTERRUPT 0x21 +#define REG_CONTROL 0x22 +#define REG_RX_CONTROL 0x24 +#define REG_CARD_RESET 0x30 +#define REG_LED_CTRL 0x30 + +/* REG_COMMAND */ +#define REG_COMMAND_TX_BUF_ONE 0x01 +#define REG_COMMAND_TX_BUF_TWO 0x02 +#define REG_COMMAND_RX_BUF_ONE 0x04 +#define REG_COMMAND_RX_BUF_TWO 0x08 +#define REG_COMMAND_RX_WIN_ONE 0x00 +#define REG_COMMAND_RX_WIN_TWO 0x10 + +/* REG_CONTROL */ +#define REG_CONTROL_BAUD_RATE_57600 0x00 +#define REG_CONTROL_BAUD_RATE_115200 0x01 +#define REG_CONTROL_BAUD_RATE_230400 0x02 +#define REG_CONTROL_BAUD_RATE_460800 0x03 +#define REG_CONTROL_RTS 0x04 +#define REG_CONTROL_BT_ON 0x08 +#define REG_CONTROL_BT_RESET 0x10 +#define REG_CONTROL_BT_RES_PU 0x20 +#define REG_CONTROL_INTERRUPT 0x40 +#define REG_CONTROL_CARD_RESET 0x80 + +/* REG_RX_CONTROL */ +#define RTS_LEVEL_SHIFT_BITS 0x02 + + + +/* ======================== LED handling routines ======================== */ + + +static void bluecard_activity_led_timeout(struct timer_list *t) +{ + struct bluecard_info *info = from_timer(info, t, timer); + unsigned int iobase = info->p_dev->resource[0]->start; + + if (test_bit(CARD_ACTIVITY, &(info->hw_state))) { + /* leave LED in inactive state for HZ/10 for blink effect */ + clear_bit(CARD_ACTIVITY, &(info->hw_state)); + mod_timer(&(info->timer), jiffies + HZ / 10); + } + + /* Disable activity LED, enable power LED */ + outb(0x08 | 0x20, iobase + 0x30); +} + + +static void bluecard_enable_activity_led(struct bluecard_info *info) +{ + unsigned int iobase = info->p_dev->resource[0]->start; + + /* don't disturb running blink timer */ + if (timer_pending(&(info->timer))) + return; + + set_bit(CARD_ACTIVITY, &(info->hw_state)); + + if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) { + /* Enable activity LED, keep power LED enabled */ + outb(0x18 | 0x60, iobase + 0x30); + } else { + /* Disable power LED */ + outb(0x00, iobase + 0x30); + } + + /* Stop the LED after HZ/10 */ + mod_timer(&(info->timer), jiffies + HZ / 10); +} + + + +/* ======================== Interrupt handling ======================== */ + + +static int bluecard_write(unsigned int iobase, unsigned int offset, __u8 *buf, int len) +{ + int i, actual; + + actual = (len > 15) ? 15 : len; + + outb_p(actual, iobase + offset); + + for (i = 0; i < actual; i++) + outb_p(buf[i], iobase + offset + i + 1); + + return actual; +} + + +static void bluecard_write_wakeup(struct bluecard_info *info) +{ + if (!info) { + BT_ERR("Unknown device"); + return; + } + + if (!test_bit(XMIT_SENDING_READY, &(info->tx_state))) + return; + + if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) { + set_bit(XMIT_WAKEUP, &(info->tx_state)); + return; + } + + do { + unsigned int iobase = info->p_dev->resource[0]->start; + unsigned int offset; + unsigned char command; + unsigned long ready_bit; + register struct sk_buff *skb; + int len; + + clear_bit(XMIT_WAKEUP, &(info->tx_state)); + + if (!pcmcia_dev_present(info->p_dev)) + return; + + if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) { + if (!test_bit(XMIT_BUF_TWO_READY, &(info->tx_state))) + break; + offset = 0x10; + command = REG_COMMAND_TX_BUF_TWO; + ready_bit = XMIT_BUF_TWO_READY; + } else { + if (!test_bit(XMIT_BUF_ONE_READY, &(info->tx_state))) + break; + offset = 0x00; + command = REG_COMMAND_TX_BUF_ONE; + ready_bit = XMIT_BUF_ONE_READY; + } + + skb = skb_dequeue(&(info->txq)); + if (!skb) + break; + + if (hci_skb_pkt_type(skb) & 0x80) { + /* Disable RTS */ + info->ctrl_reg |= REG_CONTROL_RTS; + outb(info->ctrl_reg, iobase + REG_CONTROL); + } + + /* Activate LED */ + bluecard_enable_activity_led(info); + + /* Send frame */ + len = bluecard_write(iobase, offset, skb->data, skb->len); + + /* Tell the FPGA to send the data */ + outb_p(command, iobase + REG_COMMAND); + + /* Mark the buffer as dirty */ + clear_bit(ready_bit, &(info->tx_state)); + + if (hci_skb_pkt_type(skb) & 0x80) { + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DEFINE_WAIT(wait); + + unsigned char baud_reg; + + switch (hci_skb_pkt_type(skb)) { + case PKT_BAUD_RATE_460800: + baud_reg = REG_CONTROL_BAUD_RATE_460800; + break; + case PKT_BAUD_RATE_230400: + baud_reg = REG_CONTROL_BAUD_RATE_230400; + break; + case PKT_BAUD_RATE_115200: + baud_reg = REG_CONTROL_BAUD_RATE_115200; + break; + case PKT_BAUD_RATE_57600: + default: + baud_reg = REG_CONTROL_BAUD_RATE_57600; + break; + } + + /* Wait until the command reaches the baseband */ + mdelay(100); + + /* Set baud on baseband */ + info->ctrl_reg &= ~0x03; + info->ctrl_reg |= baud_reg; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Enable RTS */ + info->ctrl_reg &= ~REG_CONTROL_RTS; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Wait before the next HCI packet can be send */ + mdelay(1000); + } + + if (len == skb->len) { + kfree_skb(skb); + } else { + skb_pull(skb, len); + skb_queue_head(&(info->txq), skb); + } + + info->hdev->stat.byte_tx += len; + + /* Change buffer */ + change_bit(XMIT_BUFFER_NUMBER, &(info->tx_state)); + + } while (test_bit(XMIT_WAKEUP, &(info->tx_state))); + + clear_bit(XMIT_SENDING, &(info->tx_state)); +} + + +static int bluecard_read(unsigned int iobase, unsigned int offset, __u8 *buf, int size) +{ + int i, n, len; + + outb(REG_COMMAND_RX_WIN_ONE, iobase + REG_COMMAND); + + len = inb(iobase + offset); + n = 0; + i = 1; + + while (n < len) { + + if (i == 16) { + outb(REG_COMMAND_RX_WIN_TWO, iobase + REG_COMMAND); + i = 0; + } + + buf[n] = inb(iobase + offset + i); + + n++; + i++; + + } + + return len; +} + + +static void bluecard_receive(struct bluecard_info *info, + unsigned int offset) +{ + unsigned int iobase; + unsigned char buf[31]; + int i, len; + + if (!info) { + BT_ERR("Unknown device"); + return; + } + + iobase = info->p_dev->resource[0]->start; + + if (test_bit(XMIT_SENDING_READY, &(info->tx_state))) + bluecard_enable_activity_led(info); + + len = bluecard_read(iobase, offset, buf, sizeof(buf)); + + for (i = 0; i < len; i++) { + + /* Allocate packet */ + if (!info->rx_skb) { + info->rx_state = RECV_WAIT_PACKET_TYPE; + info->rx_count = 0; + info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); + if (!info->rx_skb) { + BT_ERR("Can't allocate mem for new packet"); + return; + } + } + + if (info->rx_state == RECV_WAIT_PACKET_TYPE) { + + hci_skb_pkt_type(info->rx_skb) = buf[i]; + + switch (hci_skb_pkt_type(info->rx_skb)) { + + case 0x00: + /* init packet */ + if (offset != 0x00) { + set_bit(XMIT_BUF_ONE_READY, &(info->tx_state)); + set_bit(XMIT_BUF_TWO_READY, &(info->tx_state)); + set_bit(XMIT_SENDING_READY, &(info->tx_state)); + bluecard_write_wakeup(info); + } + + kfree_skb(info->rx_skb); + info->rx_skb = NULL; + break; + + case HCI_EVENT_PKT: + info->rx_state = RECV_WAIT_EVENT_HEADER; + info->rx_count = HCI_EVENT_HDR_SIZE; + break; + + case HCI_ACLDATA_PKT: + info->rx_state = RECV_WAIT_ACL_HEADER; + info->rx_count = HCI_ACL_HDR_SIZE; + break; + + case HCI_SCODATA_PKT: + info->rx_state = RECV_WAIT_SCO_HEADER; + info->rx_count = HCI_SCO_HDR_SIZE; + break; + + default: + /* unknown packet */ + BT_ERR("Unknown HCI packet with type 0x%02x received", + hci_skb_pkt_type(info->rx_skb)); + info->hdev->stat.err_rx++; + + kfree_skb(info->rx_skb); + info->rx_skb = NULL; + break; + + } + + } else { + + skb_put_u8(info->rx_skb, buf[i]); + info->rx_count--; + + if (info->rx_count == 0) { + + int dlen; + struct hci_event_hdr *eh; + struct hci_acl_hdr *ah; + struct hci_sco_hdr *sh; + + switch (info->rx_state) { + + case RECV_WAIT_EVENT_HEADER: + eh = hci_event_hdr(info->rx_skb); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = eh->plen; + break; + + case RECV_WAIT_ACL_HEADER: + ah = hci_acl_hdr(info->rx_skb); + dlen = __le16_to_cpu(ah->dlen); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = dlen; + break; + + case RECV_WAIT_SCO_HEADER: + sh = hci_sco_hdr(info->rx_skb); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = sh->dlen; + break; + + case RECV_WAIT_DATA: + hci_recv_frame(info->hdev, info->rx_skb); + info->rx_skb = NULL; + break; + + } + + } + + } + + + } + + info->hdev->stat.byte_rx += len; +} + + +static irqreturn_t bluecard_interrupt(int irq, void *dev_inst) +{ + struct bluecard_info *info = dev_inst; + unsigned int iobase; + unsigned char reg; + + if (!info || !info->hdev) + /* our irq handler is shared */ + return IRQ_NONE; + + if (!test_bit(CARD_READY, &(info->hw_state))) + return IRQ_HANDLED; + + iobase = info->p_dev->resource[0]->start; + + spin_lock(&(info->lock)); + + /* Disable interrupt */ + info->ctrl_reg &= ~REG_CONTROL_INTERRUPT; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + reg = inb(iobase + REG_INTERRUPT); + + if ((reg != 0x00) && (reg != 0xff)) { + + if (reg & 0x04) { + bluecard_receive(info, 0x00); + outb(0x04, iobase + REG_INTERRUPT); + outb(REG_COMMAND_RX_BUF_ONE, iobase + REG_COMMAND); + } + + if (reg & 0x08) { + bluecard_receive(info, 0x10); + outb(0x08, iobase + REG_INTERRUPT); + outb(REG_COMMAND_RX_BUF_TWO, iobase + REG_COMMAND); + } + + if (reg & 0x01) { + set_bit(XMIT_BUF_ONE_READY, &(info->tx_state)); + outb(0x01, iobase + REG_INTERRUPT); + bluecard_write_wakeup(info); + } + + if (reg & 0x02) { + set_bit(XMIT_BUF_TWO_READY, &(info->tx_state)); + outb(0x02, iobase + REG_INTERRUPT); + bluecard_write_wakeup(info); + } + + } + + /* Enable interrupt */ + info->ctrl_reg |= REG_CONTROL_INTERRUPT; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + spin_unlock(&(info->lock)); + + return IRQ_HANDLED; +} + + + +/* ======================== Device specific HCI commands ======================== */ + + +static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud) +{ + struct bluecard_info *info = hci_get_drvdata(hdev); + struct sk_buff *skb; + + /* Ericsson baud rate command */ + unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 }; + + skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_KERNEL); + if (!skb) { + BT_ERR("Can't allocate mem for new packet"); + return -1; + } + + switch (baud) { + case 460800: + cmd[4] = 0x00; + hci_skb_pkt_type(skb) = PKT_BAUD_RATE_460800; + break; + case 230400: + cmd[4] = 0x01; + hci_skb_pkt_type(skb) = PKT_BAUD_RATE_230400; + break; + case 115200: + cmd[4] = 0x02; + hci_skb_pkt_type(skb) = PKT_BAUD_RATE_115200; + break; + case 57600: + default: + cmd[4] = 0x03; + hci_skb_pkt_type(skb) = PKT_BAUD_RATE_57600; + break; + } + + skb_put_data(skb, cmd, sizeof(cmd)); + + skb_queue_tail(&(info->txq), skb); + + bluecard_write_wakeup(info); + + return 0; +} + + + +/* ======================== HCI interface ======================== */ + + +static int bluecard_hci_flush(struct hci_dev *hdev) +{ + struct bluecard_info *info = hci_get_drvdata(hdev); + + /* Drop TX queue */ + skb_queue_purge(&(info->txq)); + + return 0; +} + + +static int bluecard_hci_open(struct hci_dev *hdev) +{ + struct bluecard_info *info = hci_get_drvdata(hdev); + unsigned int iobase = info->p_dev->resource[0]->start; + + if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) + bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE); + + /* Enable power LED */ + outb(0x08 | 0x20, iobase + 0x30); + + return 0; +} + + +static int bluecard_hci_close(struct hci_dev *hdev) +{ + struct bluecard_info *info = hci_get_drvdata(hdev); + unsigned int iobase = info->p_dev->resource[0]->start; + + bluecard_hci_flush(hdev); + + /* Stop LED timer */ + del_timer_sync(&(info->timer)); + + /* Disable power LED */ + outb(0x00, iobase + 0x30); + + return 0; +} + + +static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct bluecard_info *info = hci_get_drvdata(hdev); + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + skb_queue_tail(&(info->txq), skb); + + bluecard_write_wakeup(info); + + return 0; +} + + + +/* ======================== Card services HCI interaction ======================== */ + + +static int bluecard_open(struct bluecard_info *info) +{ + unsigned int iobase = info->p_dev->resource[0]->start; + struct hci_dev *hdev; + unsigned char id; + + spin_lock_init(&(info->lock)); + + timer_setup(&info->timer, bluecard_activity_led_timeout, 0); + + skb_queue_head_init(&(info->txq)); + + info->rx_state = RECV_WAIT_PACKET_TYPE; + info->rx_count = 0; + info->rx_skb = NULL; + + /* Initialize HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + return -ENOMEM; + } + + info->hdev = hdev; + + hdev->bus = HCI_PCCARD; + hci_set_drvdata(hdev, info); + SET_HCIDEV_DEV(hdev, &info->p_dev->dev); + + hdev->open = bluecard_hci_open; + hdev->close = bluecard_hci_close; + hdev->flush = bluecard_hci_flush; + hdev->send = bluecard_hci_send_frame; + + id = inb(iobase + 0x30); + + if ((id & 0x0f) == 0x02) + set_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)); + + if (id & 0x10) + set_bit(CARD_HAS_POWER_LED, &(info->hw_state)); + + if (id & 0x20) + set_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state)); + + /* Reset card */ + info->ctrl_reg = REG_CONTROL_BT_RESET | REG_CONTROL_CARD_RESET; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Turn FPGA off */ + outb(0x80, iobase + 0x30); + + /* Wait some time */ + msleep(10); + + /* Turn FPGA on */ + outb(0x00, iobase + 0x30); + + /* Activate card */ + info->ctrl_reg = REG_CONTROL_BT_ON | REG_CONTROL_BT_RES_PU; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Enable interrupt */ + outb(0xff, iobase + REG_INTERRUPT); + info->ctrl_reg |= REG_CONTROL_INTERRUPT; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + if ((id & 0x0f) == 0x03) { + /* Disable RTS */ + info->ctrl_reg |= REG_CONTROL_RTS; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Set baud rate */ + info->ctrl_reg |= 0x03; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Enable RTS */ + info->ctrl_reg &= ~REG_CONTROL_RTS; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + set_bit(XMIT_BUF_ONE_READY, &(info->tx_state)); + set_bit(XMIT_BUF_TWO_READY, &(info->tx_state)); + set_bit(XMIT_SENDING_READY, &(info->tx_state)); + } + + /* Start the RX buffers */ + outb(REG_COMMAND_RX_BUF_ONE, iobase + REG_COMMAND); + outb(REG_COMMAND_RX_BUF_TWO, iobase + REG_COMMAND); + + /* Signal that the hardware is ready */ + set_bit(CARD_READY, &(info->hw_state)); + + /* Drop TX queue */ + skb_queue_purge(&(info->txq)); + + /* Control the point at which RTS is enabled */ + outb((0x0f << RTS_LEVEL_SHIFT_BITS) | 1, iobase + REG_RX_CONTROL); + + /* Timeout before it is safe to send the first HCI packet */ + msleep(1250); + + /* Register HCI device */ + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + info->hdev = NULL; + hci_free_dev(hdev); + return -ENODEV; + } + + return 0; +} + + +static int bluecard_close(struct bluecard_info *info) +{ + unsigned int iobase = info->p_dev->resource[0]->start; + struct hci_dev *hdev = info->hdev; + + if (!hdev) + return -ENODEV; + + bluecard_hci_close(hdev); + + clear_bit(CARD_READY, &(info->hw_state)); + + /* Reset card */ + info->ctrl_reg = REG_CONTROL_BT_RESET | REG_CONTROL_CARD_RESET; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Turn FPGA off */ + outb(0x80, iobase + 0x30); + + hci_unregister_dev(hdev); + hci_free_dev(hdev); + + return 0; +} + +static int bluecard_probe(struct pcmcia_device *link) +{ + struct bluecard_info *info; + + /* Create new info device */ + info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->p_dev = link; + link->priv = info; + + link->config_flags |= CONF_ENABLE_IRQ; + + return bluecard_config(link); +} + + +static void bluecard_detach(struct pcmcia_device *link) +{ + bluecard_release(link); +} + + +static int bluecard_config(struct pcmcia_device *link) +{ + struct bluecard_info *info = link->priv; + int i, n; + + link->config_index = 0x20; + + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + link->resource[0]->end = 64; + link->io_lines = 6; + + for (n = 0; n < 0x400; n += 0x40) { + link->resource[0]->start = n ^ 0x300; + i = pcmcia_request_io(link); + if (i == 0) + break; + } + + if (i != 0) + goto failed; + + i = pcmcia_request_irq(link, bluecard_interrupt); + if (i != 0) + goto failed; + + i = pcmcia_enable_device(link); + if (i != 0) + goto failed; + + if (bluecard_open(info) != 0) + goto failed; + + return 0; + +failed: + bluecard_release(link); + return -ENODEV; +} + + +static void bluecard_release(struct pcmcia_device *link) +{ + struct bluecard_info *info = link->priv; + + bluecard_close(info); + + del_timer_sync(&(info->timer)); + + pcmcia_disable_device(link); +} + +static const struct pcmcia_device_id bluecard_ids[] = { + PCMCIA_DEVICE_PROD_ID12("BlueCard", "LSE041", 0xbaf16fbf, 0x657cc15e), + PCMCIA_DEVICE_PROD_ID12("BTCFCARD", "LSE139", 0xe3987764, 0x2524b59c), + PCMCIA_DEVICE_PROD_ID12("WSS", "LSE039", 0x0a0736ec, 0x24e6dfab), + PCMCIA_DEVICE_NULL +}; +MODULE_DEVICE_TABLE(pcmcia, bluecard_ids); + +static struct pcmcia_driver bluecard_driver = { + .owner = THIS_MODULE, + .name = "bluecard_cs", + .probe = bluecard_probe, + .remove = bluecard_detach, + .id_table = bluecard_ids, +}; +module_pcmcia_driver(bluecard_driver); diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c new file mode 100644 index 000000000..1fa58c059 --- /dev/null +++ b/drivers/bluetooth/bpa10x.c @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Digianswer Bluetooth USB driver + * + * Copyright (C) 2004-2007 Marcel Holtmann + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "h4_recv.h" + +#define VERSION "0.11" + +static const struct usb_device_id bpa10x_table[] = { + /* Tektronix BPA 100/105 (Digianswer) */ + { USB_DEVICE(0x08fd, 0x0002) }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, bpa10x_table); + +struct bpa10x_data { + struct hci_dev *hdev; + struct usb_device *udev; + + struct usb_anchor tx_anchor; + struct usb_anchor rx_anchor; + + struct sk_buff *rx_skb[2]; +}; + +static void bpa10x_tx_complete(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct hci_dev *hdev = (struct hci_dev *) skb->dev; + + BT_DBG("%s urb %p status %d count %d", hdev->name, + urb, urb->status, urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + goto done; + + if (!urb->status) + hdev->stat.byte_tx += urb->transfer_buffer_length; + else + hdev->stat.err_tx++; + +done: + kfree(urb->setup_packet); + + kfree_skb(skb); +} + +#define HCI_VENDOR_HDR_SIZE 5 + +#define HCI_RECV_VENDOR \ + .type = HCI_VENDOR_PKT, \ + .hlen = HCI_VENDOR_HDR_SIZE, \ + .loff = 3, \ + .lsize = 2, \ + .maxlen = HCI_MAX_FRAME_SIZE + +static const struct h4_recv_pkt bpa10x_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, + { HCI_RECV_VENDOR, .recv = hci_recv_diag }, +}; + +static void bpa10x_rx_complete(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct bpa10x_data *data = hci_get_drvdata(hdev); + int err; + + BT_DBG("%s urb %p status %d count %d", hdev->name, + urb, urb->status, urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return; + + if (urb->status == 0) { + bool idx = usb_pipebulk(urb->pipe); + + data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx], + urb->transfer_buffer, + urb->actual_length, + bpa10x_recv_pkts, + ARRAY_SIZE(bpa10x_recv_pkts)); + if (IS_ERR(data->rx_skb[idx])) { + bt_dev_err(hdev, "corrupted event packet"); + hdev->stat.err_rx++; + data->rx_skb[idx] = NULL; + } + } + + usb_anchor_urb(urb, &data->rx_anchor); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + bt_dev_err(hdev, "urb %p failed to resubmit (%d)", urb, -err); + usb_unanchor_urb(urb); + } +} + +static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev) +{ + struct bpa10x_data *data = hci_get_drvdata(hdev); + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size = 16; + + BT_DBG("%s", hdev->name); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return -ENOMEM; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvintpipe(data->udev, 0x81); + + usb_fill_int_urb(urb, data->udev, pipe, buf, size, + bpa10x_rx_complete, hdev, 1); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_anchor_urb(urb, &data->rx_anchor); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err < 0) { + bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev) +{ + struct bpa10x_data *data = hci_get_drvdata(hdev); + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size = 64; + + BT_DBG("%s", hdev->name); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return -ENOMEM; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvbulkpipe(data->udev, 0x82); + + usb_fill_bulk_urb(urb, data->udev, pipe, + buf, size, bpa10x_rx_complete, hdev); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_anchor_urb(urb, &data->rx_anchor); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err < 0) { + bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static int bpa10x_open(struct hci_dev *hdev) +{ + struct bpa10x_data *data = hci_get_drvdata(hdev); + int err; + + BT_DBG("%s", hdev->name); + + err = bpa10x_submit_intr_urb(hdev); + if (err < 0) + goto error; + + err = bpa10x_submit_bulk_urb(hdev); + if (err < 0) + goto error; + + return 0; + +error: + usb_kill_anchored_urbs(&data->rx_anchor); + + return err; +} + +static int bpa10x_close(struct hci_dev *hdev) +{ + struct bpa10x_data *data = hci_get_drvdata(hdev); + + BT_DBG("%s", hdev->name); + + usb_kill_anchored_urbs(&data->rx_anchor); + + return 0; +} + +static int bpa10x_flush(struct hci_dev *hdev) +{ + struct bpa10x_data *data = hci_get_drvdata(hdev); + + BT_DBG("%s", hdev->name); + + usb_kill_anchored_urbs(&data->tx_anchor); + + return 0; +} + +static int bpa10x_setup(struct hci_dev *hdev) +{ + static const u8 req[] = { 0x07 }; + struct sk_buff *skb; + + BT_DBG("%s", hdev->name); + + /* Read revision string */ + skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + bt_dev_info(hdev, "%s", (char *)(skb->data + 1)); + + hci_set_fw_info(hdev, "%s", skb->data + 1); + + kfree_skb(skb); + return 0; +} + +static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct bpa10x_data *data = hci_get_drvdata(hdev); + struct usb_ctrlrequest *dr; + struct urb *urb; + unsigned int pipe; + int err; + + BT_DBG("%s", hdev->name); + + skb->dev = (void *) hdev; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return -ENOMEM; + + /* Prepend skb with frame type */ + *(u8 *)skb_push(skb, 1) = hci_skb_pkt_type(skb); + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + dr = kmalloc(sizeof(*dr), GFP_KERNEL); + if (!dr) { + usb_free_urb(urb); + return -ENOMEM; + } + + dr->bRequestType = USB_TYPE_VENDOR; + dr->bRequest = 0; + dr->wIndex = 0; + dr->wValue = 0; + dr->wLength = __cpu_to_le16(skb->len); + + pipe = usb_sndctrlpipe(data->udev, 0x00); + + usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, + skb->data, skb->len, bpa10x_tx_complete, skb); + + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + pipe = usb_sndbulkpipe(data->udev, 0x02); + + usb_fill_bulk_urb(urb, data->udev, pipe, + skb->data, skb->len, bpa10x_tx_complete, skb); + + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + pipe = usb_sndbulkpipe(data->udev, 0x02); + + usb_fill_bulk_urb(urb, data->udev, pipe, + skb->data, skb->len, bpa10x_tx_complete, skb); + + hdev->stat.sco_tx++; + break; + + default: + usb_free_urb(urb); + return -EILSEQ; + } + + usb_anchor_urb(urb, &data->tx_anchor); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err < 0) { + bt_dev_err(hdev, "urb %p submission failed", urb); + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static int bpa10x_set_diag(struct hci_dev *hdev, bool enable) +{ + const u8 req[] = { 0x00, enable }; + struct sk_buff *skb; + + BT_DBG("%s", hdev->name); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -ENETDOWN; + + /* Enable sniffer operation */ + skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static int bpa10x_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct bpa10x_data *data; + struct hci_dev *hdev; + int err; + + BT_DBG("intf %p id %p", intf, id); + + if (intf->cur_altsetting->desc.bInterfaceNumber != 0) + return -ENODEV; + + data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->udev = interface_to_usbdev(intf); + + init_usb_anchor(&data->tx_anchor); + init_usb_anchor(&data->rx_anchor); + + hdev = hci_alloc_dev(); + if (!hdev) + return -ENOMEM; + + hdev->bus = HCI_USB; + hci_set_drvdata(hdev, data); + + data->hdev = hdev; + + SET_HCIDEV_DEV(hdev, &intf->dev); + + hdev->open = bpa10x_open; + hdev->close = bpa10x_close; + hdev->flush = bpa10x_flush; + hdev->setup = bpa10x_setup; + hdev->send = bpa10x_send_frame; + hdev->set_diag = bpa10x_set_diag; + + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + + err = hci_register_dev(hdev); + if (err < 0) { + hci_free_dev(hdev); + return err; + } + + usb_set_intfdata(intf, data); + + return 0; +} + +static void bpa10x_disconnect(struct usb_interface *intf) +{ + struct bpa10x_data *data = usb_get_intfdata(intf); + + BT_DBG("intf %p", intf); + + if (!data) + return; + + usb_set_intfdata(intf, NULL); + + hci_unregister_dev(data->hdev); + + hci_free_dev(data->hdev); + kfree_skb(data->rx_skb[0]); + kfree_skb(data->rx_skb[1]); +} + +static struct usb_driver bpa10x_driver = { + .name = "bpa10x", + .probe = bpa10x_probe, + .disconnect = bpa10x_disconnect, + .id_table = bpa10x_table, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(bpa10x_driver); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Digianswer Bluetooth USB driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c new file mode 100644 index 000000000..547138339 --- /dev/null +++ b/drivers/bluetooth/bt3c_cs.c @@ -0,0 +1,749 @@ +/* + * + * Driver for the 3Com Bluetooth PCMCIA card + * + * Copyright (C) 2001-2002 Marcel Holtmann + * Jose Orlando Pereira + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * The initial developer of the original code is David A. Hinds + * . Portions created by David A. Hinds + * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include + + + +/* ======================== Module parameters ======================== */ + + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth driver for the 3Com Bluetooth PCMCIA card"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("BT3CPCC.bin"); + + + +/* ======================== Local structures ======================== */ + + +struct bt3c_info { + struct pcmcia_device *p_dev; + + struct hci_dev *hdev; + + spinlock_t lock; /* For serializing operations */ + + struct sk_buff_head txq; + unsigned long tx_state; + + unsigned long rx_state; + unsigned long rx_count; + struct sk_buff *rx_skb; +}; + + +static int bt3c_config(struct pcmcia_device *link); +static void bt3c_release(struct pcmcia_device *link); + +static void bt3c_detach(struct pcmcia_device *p_dev); + + +/* Transmit states */ +#define XMIT_SENDING 1 +#define XMIT_WAKEUP 2 +#define XMIT_WAITING 8 + +/* Receiver states */ +#define RECV_WAIT_PACKET_TYPE 0 +#define RECV_WAIT_EVENT_HEADER 1 +#define RECV_WAIT_ACL_HEADER 2 +#define RECV_WAIT_SCO_HEADER 3 +#define RECV_WAIT_DATA 4 + + + +/* ======================== Special I/O functions ======================== */ + + +#define DATA_L 0 +#define DATA_H 1 +#define ADDR_L 2 +#define ADDR_H 3 +#define CONTROL 4 + + +static inline void bt3c_address(unsigned int iobase, unsigned short addr) +{ + outb(addr & 0xff, iobase + ADDR_L); + outb((addr >> 8) & 0xff, iobase + ADDR_H); +} + + +static inline void bt3c_put(unsigned int iobase, unsigned short value) +{ + outb(value & 0xff, iobase + DATA_L); + outb((value >> 8) & 0xff, iobase + DATA_H); +} + + +static inline void bt3c_io_write(unsigned int iobase, unsigned short addr, unsigned short value) +{ + bt3c_address(iobase, addr); + bt3c_put(iobase, value); +} + + +static inline unsigned short bt3c_get(unsigned int iobase) +{ + unsigned short value = inb(iobase + DATA_L); + + value |= inb(iobase + DATA_H) << 8; + + return value; +} + + +static inline unsigned short bt3c_read(unsigned int iobase, unsigned short addr) +{ + bt3c_address(iobase, addr); + + return bt3c_get(iobase); +} + + + +/* ======================== Interrupt handling ======================== */ + + +static int bt3c_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) +{ + int actual = 0; + + bt3c_address(iobase, 0x7080); + + /* Fill FIFO with current frame */ + while (actual < len) { + /* Transmit next byte */ + bt3c_put(iobase, buf[actual]); + actual++; + } + + bt3c_io_write(iobase, 0x7005, actual); + + return actual; +} + + +static void bt3c_write_wakeup(struct bt3c_info *info) +{ + if (!info) { + BT_ERR("Unknown device"); + return; + } + + if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) + return; + + do { + unsigned int iobase = info->p_dev->resource[0]->start; + register struct sk_buff *skb; + int len; + + if (!pcmcia_dev_present(info->p_dev)) + break; + + skb = skb_dequeue(&(info->txq)); + if (!skb) { + clear_bit(XMIT_SENDING, &(info->tx_state)); + break; + } + + /* Send frame */ + len = bt3c_write(iobase, 256, skb->data, skb->len); + + if (len != skb->len) + BT_ERR("Very strange"); + + kfree_skb(skb); + + info->hdev->stat.byte_tx += len; + + } while (0); +} + + +static void bt3c_receive(struct bt3c_info *info) +{ + unsigned int iobase; + int size = 0, avail; + + if (!info) { + BT_ERR("Unknown device"); + return; + } + + iobase = info->p_dev->resource[0]->start; + + avail = bt3c_read(iobase, 0x7006); + + bt3c_address(iobase, 0x7480); + while (size < avail) { + size++; + info->hdev->stat.byte_rx++; + + /* Allocate packet */ + if (!info->rx_skb) { + info->rx_state = RECV_WAIT_PACKET_TYPE; + info->rx_count = 0; + info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); + if (!info->rx_skb) { + BT_ERR("Can't allocate mem for new packet"); + return; + } + } + + + if (info->rx_state == RECV_WAIT_PACKET_TYPE) { + + hci_skb_pkt_type(info->rx_skb) = inb(iobase + DATA_L); + inb(iobase + DATA_H); + + switch (hci_skb_pkt_type(info->rx_skb)) { + + case HCI_EVENT_PKT: + info->rx_state = RECV_WAIT_EVENT_HEADER; + info->rx_count = HCI_EVENT_HDR_SIZE; + break; + + case HCI_ACLDATA_PKT: + info->rx_state = RECV_WAIT_ACL_HEADER; + info->rx_count = HCI_ACL_HDR_SIZE; + break; + + case HCI_SCODATA_PKT: + info->rx_state = RECV_WAIT_SCO_HEADER; + info->rx_count = HCI_SCO_HDR_SIZE; + break; + + default: + /* Unknown packet */ + BT_ERR("Unknown HCI packet with type 0x%02x received", + hci_skb_pkt_type(info->rx_skb)); + info->hdev->stat.err_rx++; + + kfree_skb(info->rx_skb); + info->rx_skb = NULL; + break; + + } + + } else { + + __u8 x = inb(iobase + DATA_L); + + skb_put_u8(info->rx_skb, x); + inb(iobase + DATA_H); + info->rx_count--; + + if (info->rx_count == 0) { + + int dlen; + struct hci_event_hdr *eh; + struct hci_acl_hdr *ah; + struct hci_sco_hdr *sh; + + switch (info->rx_state) { + + case RECV_WAIT_EVENT_HEADER: + eh = hci_event_hdr(info->rx_skb); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = eh->plen; + break; + + case RECV_WAIT_ACL_HEADER: + ah = hci_acl_hdr(info->rx_skb); + dlen = __le16_to_cpu(ah->dlen); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = dlen; + break; + + case RECV_WAIT_SCO_HEADER: + sh = hci_sco_hdr(info->rx_skb); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = sh->dlen; + break; + + case RECV_WAIT_DATA: + hci_recv_frame(info->hdev, info->rx_skb); + info->rx_skb = NULL; + break; + + } + + } + + } + + } + + bt3c_io_write(iobase, 0x7006, 0x0000); +} + + +static irqreturn_t bt3c_interrupt(int irq, void *dev_inst) +{ + struct bt3c_info *info = dev_inst; + unsigned int iobase; + int iir; + irqreturn_t r = IRQ_NONE; + + if (!info || !info->hdev) + /* our irq handler is shared */ + return IRQ_NONE; + + iobase = info->p_dev->resource[0]->start; + + spin_lock(&(info->lock)); + + iir = inb(iobase + CONTROL); + if (iir & 0x80) { + int stat = bt3c_read(iobase, 0x7001); + + if ((stat & 0xff) == 0x7f) { + BT_ERR("Very strange (stat=0x%04x)", stat); + } else if ((stat & 0xff) != 0xff) { + if (stat & 0x0020) { + int status = bt3c_read(iobase, 0x7002) & 0x10; + bt_dev_info(info->hdev, "Antenna %s", + status ? "out" : "in"); + } + if (stat & 0x0001) + bt3c_receive(info); + if (stat & 0x0002) { + clear_bit(XMIT_SENDING, &(info->tx_state)); + bt3c_write_wakeup(info); + } + + bt3c_io_write(iobase, 0x7001, 0x0000); + + outb(iir, iobase + CONTROL); + } + r = IRQ_HANDLED; + } + + spin_unlock(&(info->lock)); + + return r; +} + + + +/* ======================== HCI interface ======================== */ + + +static int bt3c_hci_flush(struct hci_dev *hdev) +{ + struct bt3c_info *info = hci_get_drvdata(hdev); + + /* Drop TX queue */ + skb_queue_purge(&(info->txq)); + + return 0; +} + + +static int bt3c_hci_open(struct hci_dev *hdev) +{ + return 0; +} + + +static int bt3c_hci_close(struct hci_dev *hdev) +{ + bt3c_hci_flush(hdev); + + return 0; +} + + +static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct bt3c_info *info = hci_get_drvdata(hdev); + unsigned long flags; + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + skb_queue_tail(&(info->txq), skb); + + spin_lock_irqsave(&(info->lock), flags); + + bt3c_write_wakeup(info); + + spin_unlock_irqrestore(&(info->lock), flags); + + return 0; +} + + + +/* ======================== Card services HCI interaction ======================== */ + + +static int bt3c_load_firmware(struct bt3c_info *info, + const unsigned char *firmware, + int count) +{ + char *ptr = (char *) firmware; + char b[9]; + unsigned int iobase, tmp, tn; + unsigned long size, addr, fcs; + int i, err = 0; + + iobase = info->p_dev->resource[0]->start; + + /* Reset */ + bt3c_io_write(iobase, 0x8040, 0x0404); + bt3c_io_write(iobase, 0x8040, 0x0400); + + udelay(1); + + bt3c_io_write(iobase, 0x8040, 0x0404); + + udelay(17); + + /* Load */ + while (count) { + if (ptr[0] != 'S') { + BT_ERR("Bad address in firmware"); + err = -EFAULT; + goto error; + } + + memset(b, 0, sizeof(b)); + memcpy(b, ptr + 2, 2); + if (kstrtoul(b, 16, &size) < 0) + return -EINVAL; + + memset(b, 0, sizeof(b)); + memcpy(b, ptr + 4, 8); + if (kstrtoul(b, 16, &addr) < 0) + return -EINVAL; + + memset(b, 0, sizeof(b)); + memcpy(b, ptr + (size * 2) + 2, 2); + if (kstrtoul(b, 16, &fcs) < 0) + return -EINVAL; + + memset(b, 0, sizeof(b)); + for (tmp = 0, i = 0; i < size; i++) { + memcpy(b, ptr + (i * 2) + 2, 2); + if (kstrtouint(b, 16, &tn)) + return -EINVAL; + tmp += tn; + } + + if (((tmp + fcs) & 0xff) != 0xff) { + BT_ERR("Checksum error in firmware"); + err = -EILSEQ; + goto error; + } + + if (ptr[1] == '3') { + bt3c_address(iobase, addr); + + memset(b, 0, sizeof(b)); + for (i = 0; i < (size - 4) / 2; i++) { + memcpy(b, ptr + (i * 4) + 12, 4); + if (kstrtouint(b, 16, &tmp)) + return -EINVAL; + bt3c_put(iobase, tmp); + } + } + + ptr += (size * 2) + 6; + count -= (size * 2) + 6; + } + + udelay(17); + + /* Boot */ + bt3c_address(iobase, 0x3000); + outb(inb(iobase + CONTROL) | 0x40, iobase + CONTROL); + +error: + udelay(17); + + /* Clear */ + bt3c_io_write(iobase, 0x7006, 0x0000); + bt3c_io_write(iobase, 0x7005, 0x0000); + bt3c_io_write(iobase, 0x7001, 0x0000); + + return err; +} + + +static int bt3c_open(struct bt3c_info *info) +{ + const struct firmware *firmware; + struct hci_dev *hdev; + int err; + + spin_lock_init(&(info->lock)); + + skb_queue_head_init(&(info->txq)); + + info->rx_state = RECV_WAIT_PACKET_TYPE; + info->rx_count = 0; + info->rx_skb = NULL; + + /* Initialize HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + return -ENOMEM; + } + + info->hdev = hdev; + + hdev->bus = HCI_PCCARD; + hci_set_drvdata(hdev, info); + SET_HCIDEV_DEV(hdev, &info->p_dev->dev); + + hdev->open = bt3c_hci_open; + hdev->close = bt3c_hci_close; + hdev->flush = bt3c_hci_flush; + hdev->send = bt3c_hci_send_frame; + + /* Load firmware */ + err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev); + if (err < 0) { + BT_ERR("Firmware request failed"); + goto error; + } + + err = bt3c_load_firmware(info, firmware->data, firmware->size); + + release_firmware(firmware); + + if (err < 0) { + BT_ERR("Firmware loading failed"); + goto error; + } + + /* Timeout before it is safe to send the first HCI packet */ + msleep(1000); + + /* Register HCI device */ + err = hci_register_dev(hdev); + if (err < 0) { + BT_ERR("Can't register HCI device"); + goto error; + } + + return 0; + +error: + info->hdev = NULL; + hci_free_dev(hdev); + return err; +} + + +static int bt3c_close(struct bt3c_info *info) +{ + struct hci_dev *hdev = info->hdev; + + if (!hdev) + return -ENODEV; + + bt3c_hci_close(hdev); + + hci_unregister_dev(hdev); + hci_free_dev(hdev); + + return 0; +} + +static int bt3c_probe(struct pcmcia_device *link) +{ + struct bt3c_info *info; + + /* Create new info device */ + info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->p_dev = link; + link->priv = info; + + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | + CONF_AUTO_SET_IO; + + return bt3c_config(link); +} + + +static void bt3c_detach(struct pcmcia_device *link) +{ + bt3c_release(link); +} + +static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data) +{ + int *try = priv_data; + + if (!try) + p_dev->io_lines = 16; + + if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0)) + return -EINVAL; + + p_dev->resource[0]->end = 8; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + + return pcmcia_request_io(p_dev); +} + +static int bt3c_check_config_notpicky(struct pcmcia_device *p_dev, + void *priv_data) +{ + static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; + int j; + + if (p_dev->io_lines > 3) + return -ENODEV; + + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[0]->end = 8; + + for (j = 0; j < 5; j++) { + p_dev->resource[0]->start = base[j]; + p_dev->io_lines = base[j] ? 16 : 3; + if (!pcmcia_request_io(p_dev)) + return 0; + } + return -ENODEV; +} + +static int bt3c_config(struct pcmcia_device *link) +{ + struct bt3c_info *info = link->priv; + int i; + unsigned long try; + + /* First pass: look for a config entry that looks normal. + * Two tries: without IO aliases, then with aliases + */ + for (try = 0; try < 2; try++) + if (!pcmcia_loop_config(link, bt3c_check_config, (void *) try)) + goto found_port; + + /* Second pass: try to find an entry that isn't picky about + * its base address, then try to grab any standard serial port + * address, and finally try to get any free port. + */ + if (!pcmcia_loop_config(link, bt3c_check_config_notpicky, NULL)) + goto found_port; + + BT_ERR("No usable port range found"); + goto failed; + +found_port: + i = pcmcia_request_irq(link, &bt3c_interrupt); + if (i != 0) + goto failed; + + i = pcmcia_enable_device(link); + if (i != 0) + goto failed; + + if (bt3c_open(info) != 0) + goto failed; + + return 0; + +failed: + bt3c_release(link); + return -ENODEV; +} + + +static void bt3c_release(struct pcmcia_device *link) +{ + struct bt3c_info *info = link->priv; + + bt3c_close(info); + + pcmcia_disable_device(link); +} + + +static const struct pcmcia_device_id bt3c_ids[] = { + PCMCIA_DEVICE_PROD_ID13("3COM", "Bluetooth PC Card", 0xefce0a31, 0xd4ce9b02), + PCMCIA_DEVICE_NULL +}; +MODULE_DEVICE_TABLE(pcmcia, bt3c_ids); + +static struct pcmcia_driver bt3c_driver = { + .owner = THIS_MODULE, + .name = "bt3c_cs", + .probe = bt3c_probe, + .remove = bt3c_detach, + .id_table = bt3c_ids, +}; +module_pcmcia_driver(bt3c_driver); diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c new file mode 100644 index 000000000..0a5445ac5 --- /dev/null +++ b/drivers/bluetooth/btbcm.c @@ -0,0 +1,786 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Bluetooth support for Broadcom devices + * + * Copyright (C) 2015 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "btbcm.h" + +#define VERSION "0.1" + +#define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}}) +#define BDADDR_BCM20702A1 (&(bdaddr_t) {{0x00, 0x00, 0xa0, 0x02, 0x70, 0x20}}) +#define BDADDR_BCM2076B1 (&(bdaddr_t) {{0x79, 0x56, 0x00, 0xa0, 0x76, 0x20}}) +#define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}}) +#define BDADDR_BCM43430A1 (&(bdaddr_t) {{0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa}}) +#define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}}) +#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}}) +#define BDADDR_BCM4334B0 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb0, 0x34, 0x43}}) +#define BDADDR_BCM4345C5 (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0xc5, 0x45, 0x43}}) +#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}}) + +#define BCM_FW_NAME_LEN 64 +#define BCM_FW_NAME_COUNT_MAX 4 +/* For kmalloc-ing the fw-name array instead of putting it on the stack */ +typedef char bcm_fw_name[BCM_FW_NAME_LEN]; + +#ifdef CONFIG_EFI +static int btbcm_set_bdaddr_from_efi(struct hci_dev *hdev) +{ + efi_guid_t guid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61, 0xb5, 0x1f, + 0x43, 0x26, 0x81, 0x23, 0xd1, 0x13); + bdaddr_t efi_bdaddr, bdaddr; + efi_status_t status; + unsigned long len; + int ret; + + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) + return -EOPNOTSUPP; + + len = sizeof(efi_bdaddr); + status = efi.get_variable(L"BDADDR", &guid, NULL, &len, &efi_bdaddr); + if (status != EFI_SUCCESS) + return -ENXIO; + + if (len != sizeof(efi_bdaddr)) + return -EIO; + + baswap(&bdaddr, &efi_bdaddr); + + ret = btbcm_set_bdaddr(hdev, &bdaddr); + if (ret) + return ret; + + bt_dev_info(hdev, "BCM: Using EFI device address (%pMR)", &bdaddr); + return 0; +} +#else +static int btbcm_set_bdaddr_from_efi(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} +#endif + +int btbcm_check_bdaddr(struct hci_dev *hdev) +{ + struct hci_rp_read_bd_addr *bda; + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + int err = PTR_ERR(skb); + + bt_dev_err(hdev, "BCM: Reading device address failed (%d)", err); + return err; + } + + if (skb->len != sizeof(*bda)) { + bt_dev_err(hdev, "BCM: Device address length mismatch"); + kfree_skb(skb); + return -EIO; + } + + bda = (struct hci_rp_read_bd_addr *)skb->data; + + /* Check if the address indicates a controller with either an + * invalid or default address. In both cases the device needs + * to be marked as not having a valid address. + * + * The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller + * with no configured address. + * + * The address 20:70:02:A0:00:00 indicates a BCM20702A1 controller + * with no configured address. + * + * The address 20:76:A0:00:56:79 indicates a BCM2076B1 controller + * with no configured address. + * + * The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller + * with waiting for configuration state. + * + * The address 43:30:B1:00:00:00 indicates a BCM4330B1 controller + * with waiting for configuration state. + * + * The address 43:43:A0:12:1F:AC indicates a BCM43430A0 controller + * with no configured address. + * + * The address AA:AA:AA:AA:AA:AA indicates a BCM43430A1 controller + * with no configured address. + */ + if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) || + !bacmp(&bda->bdaddr, BDADDR_BCM20702A1) || + !bacmp(&bda->bdaddr, BDADDR_BCM2076B1) || + !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) || + !bacmp(&bda->bdaddr, BDADDR_BCM4330B1) || + !bacmp(&bda->bdaddr, BDADDR_BCM4334B0) || + !bacmp(&bda->bdaddr, BDADDR_BCM4345C5) || + !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) || + !bacmp(&bda->bdaddr, BDADDR_BCM43430A1) || + !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) { + /* Try falling back to BDADDR EFI variable */ + if (btbcm_set_bdaddr_from_efi(hdev) != 0) { + bt_dev_info(hdev, "BCM: Using default device address (%pMR)", + &bda->bdaddr); + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + } + } + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btbcm_check_bdaddr); + +int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "BCM: Change address command failed (%d)", err); + return err; + } + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btbcm_set_bdaddr); + +int btbcm_read_pcm_int_params(struct hci_dev *hdev, + struct bcm_set_pcm_int_params *params) +{ + struct sk_buff *skb; + int err = 0; + + skb = __hci_cmd_sync(hdev, 0xfc1d, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "BCM: Read PCM int params failed (%d)", err); + return err; + } + + if (skb->len != 6 || skb->data[0]) { + bt_dev_err(hdev, "BCM: Read PCM int params length mismatch"); + kfree_skb(skb); + return -EIO; + } + + if (params) + memcpy(params, skb->data + 1, 5); + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btbcm_read_pcm_int_params); + +int btbcm_write_pcm_int_params(struct hci_dev *hdev, + const struct bcm_set_pcm_int_params *params) +{ + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync(hdev, 0xfc1c, 5, params, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "BCM: Write PCM int params failed (%d)", err); + return err; + } + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btbcm_write_pcm_int_params); + +int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw) +{ + const struct hci_command_hdr *cmd; + const u8 *fw_ptr; + size_t fw_size; + struct sk_buff *skb; + u16 opcode; + int err = 0; + + /* Start Download */ + skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "BCM: Download Minidrv command failed (%d)", + err); + goto done; + } + kfree_skb(skb); + + /* 50 msec delay after Download Minidrv completes */ + msleep(50); + + fw_ptr = fw->data; + fw_size = fw->size; + + while (fw_size >= sizeof(*cmd)) { + const u8 *cmd_param; + + cmd = (struct hci_command_hdr *)fw_ptr; + fw_ptr += sizeof(*cmd); + fw_size -= sizeof(*cmd); + + if (fw_size < cmd->plen) { + bt_dev_err(hdev, "BCM: Patch is corrupted"); + err = -EINVAL; + goto done; + } + + cmd_param = fw_ptr; + fw_ptr += cmd->plen; + fw_size -= cmd->plen; + + opcode = le16_to_cpu(cmd->opcode); + + skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "BCM: Patch command %04x failed (%d)", + opcode, err); + goto done; + } + kfree_skb(skb); + } + + /* 250 msec delay after Launch Ram completes */ + msleep(250); + +done: + return err; +} +EXPORT_SYMBOL(btbcm_patchram); + +static int btbcm_reset(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + int err = PTR_ERR(skb); + + bt_dev_err(hdev, "BCM: Reset failed (%d)", err); + return err; + } + kfree_skb(skb); + + /* 100 msec delay for module to complete reset process */ + msleep(100); + + return 0; +} + +static struct sk_buff *btbcm_read_local_name(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "BCM: Reading local name failed (%ld)", + PTR_ERR(skb)); + return skb; + } + + if (skb->len != sizeof(struct hci_rp_read_local_name)) { + bt_dev_err(hdev, "BCM: Local name length mismatch"); + kfree_skb(skb); + return ERR_PTR(-EIO); + } + + return skb; +} + +static struct sk_buff *btbcm_read_local_version(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "BCM: Reading local version info failed (%ld)", + PTR_ERR(skb)); + return skb; + } + + if (skb->len != sizeof(struct hci_rp_read_local_version)) { + bt_dev_err(hdev, "BCM: Local version length mismatch"); + kfree_skb(skb); + return ERR_PTR(-EIO); + } + + return skb; +} + +static struct sk_buff *btbcm_read_verbose_config(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc79, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "BCM: Read verbose config info failed (%ld)", + PTR_ERR(skb)); + return skb; + } + + if (skb->len != 7) { + bt_dev_err(hdev, "BCM: Verbose config length mismatch"); + kfree_skb(skb); + return ERR_PTR(-EIO); + } + + return skb; +} + +static struct sk_buff *btbcm_read_controller_features(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc6e, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "BCM: Read controller features failed (%ld)", + PTR_ERR(skb)); + return skb; + } + + if (skb->len != 9) { + bt_dev_err(hdev, "BCM: Controller features length mismatch"); + kfree_skb(skb); + return ERR_PTR(-EIO); + } + + return skb; +} + +static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc5a, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "BCM: Read USB product info failed (%ld)", + PTR_ERR(skb)); + return skb; + } + + if (skb->len != 5) { + bt_dev_err(hdev, "BCM: USB product length mismatch"); + kfree_skb(skb); + return ERR_PTR(-EIO); + } + + return skb; +} + +static const struct dmi_system_id disable_broken_read_transmit_power[] = { + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,1"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,2"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,4"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir8,1"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir8,2"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac20,1"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac20,2"), + }, + }, + { } +}; + +static int btbcm_read_info(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Read Verbose Config Version Info */ + skb = btbcm_read_verbose_config(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + bt_dev_info(hdev, "BCM: chip id %u", skb->data[1]); + kfree_skb(skb); + + return 0; +} + +static int btbcm_print_controller_features(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Read Controller Features */ + skb = btbcm_read_controller_features(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]); + kfree_skb(skb); + + /* Read DMI and disable broken Read LE Min/Max Tx Power */ + if (dmi_first_match(disable_broken_read_transmit_power)) + set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks); + + return 0; +} + +static int btbcm_print_local_name(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Read Local Name */ + skb = btbcm_read_local_name(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + bt_dev_info(hdev, "%s", (char *)(skb->data + 1)); + kfree_skb(skb); + + return 0; +} + +struct bcm_subver_table { + u16 subver; + const char *name; +}; + +static const struct bcm_subver_table bcm_uart_subver_table[] = { + { 0x1111, "BCM4362A2" }, /* 000.017.017 */ + { 0x4103, "BCM4330B1" }, /* 002.001.003 */ + { 0x410d, "BCM4334B0" }, /* 002.001.013 */ + { 0x410e, "BCM43341B0" }, /* 002.001.014 */ + { 0x4204, "BCM2076B1" }, /* 002.002.004 */ + { 0x4406, "BCM4324B3" }, /* 002.004.006 */ + { 0x4606, "BCM4324B5" }, /* 002.006.006 */ + { 0x6109, "BCM4335C0" }, /* 003.001.009 */ + { 0x610c, "BCM4354" }, /* 003.001.012 */ + { 0x2122, "BCM4343A0" }, /* 001.001.034 */ + { 0x2209, "BCM43430A1" }, /* 001.002.009 */ + { 0x6119, "BCM4345C0" }, /* 003.001.025 */ + { 0x6606, "BCM4345C5" }, /* 003.006.006 */ + { 0x230f, "BCM4356A2" }, /* 001.003.015 */ + { 0x220e, "BCM20702A1" }, /* 001.002.014 */ + { 0x420d, "BCM4349B1" }, /* 002.002.013 */ + { 0x420e, "BCM4349B1" }, /* 002.002.014 */ + { 0x4217, "BCM4329B1" }, /* 002.002.023 */ + { 0x6106, "BCM4359C0" }, /* 003.001.006 */ + { 0x4106, "BCM4335A0" }, /* 002.001.006 */ + { 0x410c, "BCM43430B0" }, /* 002.001.012 */ + { 0x2119, "BCM4373A0" }, /* 001.001.025 */ + { } +}; + +static const struct bcm_subver_table bcm_usb_subver_table[] = { + { 0x2105, "BCM20703A1" }, /* 001.001.005 */ + { 0x210b, "BCM43142A0" }, /* 001.001.011 */ + { 0x2112, "BCM4314A0" }, /* 001.001.018 */ + { 0x2118, "BCM20702A0" }, /* 001.001.024 */ + { 0x2126, "BCM4335A0" }, /* 001.001.038 */ + { 0x220e, "BCM20702A1" }, /* 001.002.014 */ + { 0x230f, "BCM4356A2" }, /* 001.003.015 */ + { 0x4106, "BCM4335B0" }, /* 002.001.006 */ + { 0x410e, "BCM20702B0" }, /* 002.001.014 */ + { 0x6109, "BCM4335C0" }, /* 003.001.009 */ + { 0x610c, "BCM4354" }, /* 003.001.012 */ + { 0x6607, "BCM4350C5" }, /* 003.006.007 */ + { } +}; + +/* + * This currently only looks up the device tree board appendix, + * but can be expanded to other mechanisms. + */ +static const char *btbcm_get_board_name(struct device *dev) +{ +#ifdef CONFIG_OF + struct device_node *root; + char *board_type; + const char *tmp; + int len; + int i; + + root = of_find_node_by_path("/"); + if (!root) + return NULL; + + if (of_property_read_string_index(root, "compatible", 0, &tmp)) + return NULL; + + /* get rid of any '/' in the compatible string */ + len = strlen(tmp) + 1; + board_type = devm_kzalloc(dev, len, GFP_KERNEL); + strscpy(board_type, tmp, len); + for (i = 0; i < len; i++) { + if (board_type[i] == '/') + board_type[i] = '-'; + } + of_node_put(root); + + return board_type; +#else + return NULL; +#endif +} + +int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode) +{ + u16 subver, rev, pid, vid; + struct sk_buff *skb; + struct hci_rp_read_local_version *ver; + const struct bcm_subver_table *bcm_subver_table; + const char *hw_name = NULL; + const char *board_name; + char postfix[16] = ""; + int fw_name_count = 0; + bcm_fw_name *fw_name; + const struct firmware *fw; + int i, err; + + board_name = btbcm_get_board_name(&hdev->dev); + + /* Reset */ + err = btbcm_reset(hdev); + if (err) + return err; + + /* Read Local Version Info */ + skb = btbcm_read_local_version(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + ver = (struct hci_rp_read_local_version *)skb->data; + rev = le16_to_cpu(ver->hci_rev); + subver = le16_to_cpu(ver->lmp_subver); + kfree_skb(skb); + + /* Read controller information */ + if (!(*fw_load_done)) { + err = btbcm_read_info(hdev); + if (err) + return err; + } + + if (!use_autobaud_mode) { + err = btbcm_print_controller_features(hdev); + if (err) + return err; + + err = btbcm_print_local_name(hdev); + if (err) + return err; + } + + bcm_subver_table = (hdev->bus == HCI_USB) ? bcm_usb_subver_table : + bcm_uart_subver_table; + + for (i = 0; bcm_subver_table[i].name; i++) { + if (subver == bcm_subver_table[i].subver) { + hw_name = bcm_subver_table[i].name; + break; + } + } + + bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u", + hw_name ? hw_name : "BCM", (subver & 0xe000) >> 13, + (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); + + if (*fw_load_done) + return 0; + + if (hdev->bus == HCI_USB) { + /* Read USB Product Info */ + skb = btbcm_read_usb_product(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + vid = get_unaligned_le16(skb->data + 1); + pid = get_unaligned_le16(skb->data + 3); + kfree_skb(skb); + + snprintf(postfix, sizeof(postfix), "-%4.4x-%4.4x", vid, pid); + } + + fw_name = kmalloc(BCM_FW_NAME_COUNT_MAX * BCM_FW_NAME_LEN, GFP_KERNEL); + if (!fw_name) + return -ENOMEM; + + if (hw_name) { + if (board_name) { + snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN, + "brcm/%s%s.%s.hcd", hw_name, postfix, board_name); + fw_name_count++; + } + snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN, + "brcm/%s%s.hcd", hw_name, postfix); + fw_name_count++; + } + + if (board_name) { + snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN, + "brcm/BCM%s.%s.hcd", postfix, board_name); + fw_name_count++; + } + snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN, + "brcm/BCM%s.hcd", postfix); + fw_name_count++; + + for (i = 0; i < fw_name_count; i++) { + err = firmware_request_nowarn(&fw, fw_name[i], &hdev->dev); + if (err == 0) { + bt_dev_info(hdev, "%s '%s' Patch", + hw_name ? hw_name : "BCM", fw_name[i]); + *fw_load_done = true; + break; + } + } + + if (*fw_load_done) { + err = btbcm_patchram(hdev, fw); + if (err) + bt_dev_info(hdev, "BCM: Patch failed (%d)", err); + + release_firmware(fw); + } else { + bt_dev_err(hdev, "BCM: firmware Patch file not found, tried:"); + for (i = 0; i < fw_name_count; i++) + bt_dev_err(hdev, "BCM: '%s'", fw_name[i]); + } + + kfree(fw_name); + return 0; +} +EXPORT_SYMBOL_GPL(btbcm_initialize); + +int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode) +{ + int err; + + /* Re-initialize if necessary */ + if (*fw_load_done) { + err = btbcm_initialize(hdev, fw_load_done, use_autobaud_mode); + if (err) + return err; + } + + btbcm_check_bdaddr(hdev); + + set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + + return 0; +} +EXPORT_SYMBOL_GPL(btbcm_finalize); + +int btbcm_setup_patchram(struct hci_dev *hdev) +{ + bool fw_load_done = false; + bool use_autobaud_mode = false; + int err; + + /* Initialize */ + err = btbcm_initialize(hdev, &fw_load_done, use_autobaud_mode); + if (err) + return err; + + /* Re-initialize after loading Patch */ + return btbcm_finalize(hdev, &fw_load_done, use_autobaud_mode); +} +EXPORT_SYMBOL_GPL(btbcm_setup_patchram); + +int btbcm_setup_apple(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int err; + + /* Reset */ + err = btbcm_reset(hdev); + if (err) + return err; + + /* Read Verbose Config Version Info */ + skb = btbcm_read_verbose_config(hdev); + if (!IS_ERR(skb)) { + bt_dev_info(hdev, "BCM: chip id %u build %4.4u", + skb->data[1], get_unaligned_le16(skb->data + 5)); + kfree_skb(skb); + } + + /* Read USB Product Info */ + skb = btbcm_read_usb_product(hdev); + if (!IS_ERR(skb)) { + bt_dev_info(hdev, "BCM: product %4.4x:%4.4x", + get_unaligned_le16(skb->data + 1), + get_unaligned_le16(skb->data + 3)); + kfree_skb(skb); + } + + /* Read Controller Features */ + skb = btbcm_read_controller_features(hdev); + if (!IS_ERR(skb)) { + bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]); + kfree_skb(skb); + } + + /* Read Local Name */ + skb = btbcm_read_local_name(hdev); + if (!IS_ERR(skb)) { + bt_dev_info(hdev, "%s", (char *)(skb->data + 1)); + kfree_skb(skb); + } + + set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + + return 0; +} +EXPORT_SYMBOL_GPL(btbcm_setup_apple); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth support for Broadcom devices ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h new file mode 100644 index 000000000..b4cb24231 --- /dev/null +++ b/drivers/bluetooth/btbcm.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * + * Bluetooth support for Broadcom devices + * + * Copyright (C) 2015 Intel Corporation + */ + +#define BCM_UART_CLOCK_48MHZ 0x01 +#define BCM_UART_CLOCK_24MHZ 0x02 + +struct bcm_update_uart_baud_rate { + __le16 zero; + __le32 baud_rate; +} __packed; + +struct bcm_write_uart_clock_setting { + __u8 type; +} __packed; + +struct bcm_set_sleep_mode { + __u8 sleep_mode; + __u8 idle_host; + __u8 idle_dev; + __u8 bt_wake_active; + __u8 host_wake_active; + __u8 allow_host_sleep; + __u8 combine_modes; + __u8 tristate_control; + __u8 usb_auto_sleep; + __u8 usb_resume_timeout; + __u8 break_to_host; + __u8 pulsed_host_wake; +} __packed; + +struct bcm_set_pcm_int_params { + __u8 routing; + __u8 rate; + __u8 frame_sync; + __u8 sync_mode; + __u8 clock_mode; +} __packed; + +struct bcm_set_pcm_format_params { + __u8 lsb_first; + __u8 fill_value; + __u8 fill_method; + __u8 fill_num; + __u8 right_justify; +} __packed; + +#if IS_ENABLED(CONFIG_BT_BCM) + +int btbcm_check_bdaddr(struct hci_dev *hdev); +int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); +int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw); +int btbcm_read_pcm_int_params(struct hci_dev *hdev, + struct bcm_set_pcm_int_params *params); +int btbcm_write_pcm_int_params(struct hci_dev *hdev, + const struct bcm_set_pcm_int_params *params); + +int btbcm_setup_patchram(struct hci_dev *hdev); +int btbcm_setup_apple(struct hci_dev *hdev); + +int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode); +int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode); + +#else + +static inline int btbcm_check_bdaddr(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + return -EOPNOTSUPP; +} + +static inline int btbcm_read_pcm_int_params(struct hci_dev *hdev, + struct bcm_set_pcm_int_params *params) +{ + return -EOPNOTSUPP; +} + +static inline int btbcm_write_pcm_int_params(struct hci_dev *hdev, + const struct bcm_set_pcm_int_params *params) +{ + return -EOPNOTSUPP; +} + +static inline int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw) +{ + return -EOPNOTSUPP; +} + +static inline int btbcm_setup_patchram(struct hci_dev *hdev) +{ + return 0; +} + +static inline int btbcm_setup_apple(struct hci_dev *hdev) +{ + return 0; +} + +static inline int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode) +{ + return 0; +} + +static inline int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode) +{ + return 0; +} + +#endif diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c new file mode 100644 index 000000000..2462796a5 --- /dev/null +++ b/drivers/bluetooth/btintel.c @@ -0,0 +1,3074 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Bluetooth support for Intel devices + * + * Copyright (C) 2015 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "btintel.h" + +#define VERSION "0.1" + +#define BDADDR_INTEL (&(bdaddr_t){{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}}) +#define RSA_HEADER_LEN 644 +#define CSS_HEADER_OFFSET 8 +#define ECDSA_OFFSET 644 +#define ECDSA_HEADER_LEN 320 + +#define BTINTEL_PPAG_NAME "PPAG" + +enum { + DSM_SET_WDISABLE2_DELAY = 1, + DSM_SET_RESET_METHOD = 3, +}; + +/* structure to store the PPAG data read from ACPI table */ +struct btintel_ppag { + u32 domain; + u32 mode; + acpi_status status; + struct hci_dev *hdev; +}; + +#define CMD_WRITE_BOOT_PARAMS 0xfc0e +struct cmd_write_boot_params { + __le32 boot_addr; + u8 fw_build_num; + u8 fw_build_ww; + u8 fw_build_yy; +} __packed; + +static struct { + const char *driver_name; + u8 hw_variant; + u32 fw_build_num; +} coredump_info; + +static const guid_t btintel_guid_dsm = + GUID_INIT(0xaa10f4e0, 0x81ac, 0x4233, + 0xab, 0xf6, 0x3b, 0x2a, 0xc5, 0x0e, 0x28, 0xd9); + +int btintel_check_bdaddr(struct hci_dev *hdev) +{ + struct hci_rp_read_bd_addr *bda; + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + int err = PTR_ERR(skb); + bt_dev_err(hdev, "Reading Intel device address failed (%d)", + err); + return err; + } + + if (skb->len != sizeof(*bda)) { + bt_dev_err(hdev, "Intel device address length mismatch"); + kfree_skb(skb); + return -EIO; + } + + bda = (struct hci_rp_read_bd_addr *)skb->data; + + /* For some Intel based controllers, the default Bluetooth device + * address 00:03:19:9E:8B:00 can be found. These controllers are + * fully operational, but have the danger of duplicate addresses + * and that in turn can cause problems with Bluetooth operation. + */ + if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) { + bt_dev_err(hdev, "Found Intel default device address (%pMR)", + &bda->bdaddr); + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + } + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_check_bdaddr); + +int btintel_enter_mfg(struct hci_dev *hdev) +{ + static const u8 param[] = { 0x01, 0x00 }; + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Entering manufacturer mode failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_enter_mfg); + +int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched) +{ + u8 param[] = { 0x00, 0x00 }; + struct sk_buff *skb; + + /* The 2nd command parameter specifies the manufacturing exit method: + * 0x00: Just disable the manufacturing mode (0x00). + * 0x01: Disable manufacturing mode and reset with patches deactivated. + * 0x02: Disable manufacturing mode and reset with patches activated. + */ + if (reset) + param[1] |= patched ? 0x02 : 0x01; + + skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Exiting manufacturer mode failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_exit_mfg); + +int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Changing Intel device address failed (%d)", + err); + return err; + } + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_set_bdaddr); + +static int btintel_set_event_mask(struct hci_dev *hdev, bool debug) +{ + u8 mask[8] = { 0x87, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + struct sk_buff *skb; + int err; + + if (debug) + mask[1] |= 0x62; + + skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Setting Intel event mask failed (%d)", err); + return err; + } + kfree_skb(skb); + + return 0; +} + +int btintel_set_diag(struct hci_dev *hdev, bool enable) +{ + struct sk_buff *skb; + u8 param[3]; + int err; + + if (enable) { + param[0] = 0x03; + param[1] = 0x03; + param[2] = 0x03; + } else { + param[0] = 0x00; + param[1] = 0x00; + param[2] = 0x00; + } + + skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + if (err == -ENODATA) + goto done; + bt_dev_err(hdev, "Changing Intel diagnostic mode failed (%d)", + err); + return err; + } + kfree_skb(skb); + +done: + btintel_set_event_mask(hdev, enable); + return 0; +} +EXPORT_SYMBOL_GPL(btintel_set_diag); + +static int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable) +{ + int err, ret; + + err = btintel_enter_mfg(hdev); + if (err) + return err; + + ret = btintel_set_diag(hdev, enable); + + err = btintel_exit_mfg(hdev, false, false); + if (err) + return err; + + return ret; +} + +static int btintel_set_diag_combined(struct hci_dev *hdev, bool enable) +{ + int ret; + + /* Legacy ROM device needs to be in the manufacturer mode to apply + * diagnostic setting + * + * This flag is set after reading the Intel version. + */ + if (btintel_test_flag(hdev, INTEL_ROM_LEGACY)) + ret = btintel_set_diag_mfg(hdev, enable); + else + ret = btintel_set_diag(hdev, enable); + + return ret; +} + +static void btintel_hw_error(struct hci_dev *hdev, u8 code) +{ + struct sk_buff *skb; + u8 type = 0x00; + + bt_dev_err(hdev, "Hardware error 0x%2.2x", code); + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reset after hardware error failed (%ld)", + PTR_ERR(skb)); + return; + } + kfree_skb(skb); + + skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Retrieving Intel exception info failed (%ld)", + PTR_ERR(skb)); + return; + } + + if (skb->len != 13) { + bt_dev_err(hdev, "Exception info size mismatch"); + kfree_skb(skb); + return; + } + + bt_dev_err(hdev, "Exception info %s", (char *)(skb->data + 1)); + + kfree_skb(skb); +} + +int btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) +{ + const char *variant; + + /* The hardware platform number has a fixed value of 0x37 and + * for now only accept this single value. + */ + if (ver->hw_platform != 0x37) { + bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", + ver->hw_platform); + return -EINVAL; + } + + /* Check for supported iBT hardware variants of this firmware + * loading method. + * + * This check has been put in place to ensure correct forward + * compatibility options when newer hardware variants come along. + */ + switch (ver->hw_variant) { + case 0x07: /* WP - Legacy ROM */ + case 0x08: /* StP - Legacy ROM */ + case 0x0b: /* SfP */ + case 0x0c: /* WsP */ + case 0x11: /* JfP */ + case 0x12: /* ThP */ + case 0x13: /* HrP */ + case 0x14: /* CcP */ + break; + default: + bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", + ver->hw_variant); + return -EINVAL; + } + + switch (ver->fw_variant) { + case 0x01: + variant = "Legacy ROM 2.5"; + break; + case 0x06: + variant = "Bootloader"; + break; + case 0x22: + variant = "Legacy ROM 2.x"; + break; + case 0x23: + variant = "Firmware"; + break; + default: + bt_dev_err(hdev, "Unsupported firmware variant(%02x)", ver->fw_variant); + return -EINVAL; + } + + coredump_info.hw_variant = ver->hw_variant; + coredump_info.fw_build_num = ver->fw_build_num; + + bt_dev_info(hdev, "%s revision %u.%u build %u week %u %u", + variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f, + ver->fw_build_num, ver->fw_build_ww, + 2000 + ver->fw_build_yy); + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_version_info); + +static int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen, + const void *param) +{ + while (plen > 0) { + struct sk_buff *skb; + u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen; + + cmd_param[0] = fragment_type; + memcpy(cmd_param + 1, param, fragment_len); + + skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1, + cmd_param, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + + plen -= fragment_len; + param += fragment_len; + } + + return 0; +} + +int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name) +{ + const struct firmware *fw; + struct sk_buff *skb; + const u8 *fw_ptr; + int err; + + err = request_firmware_direct(&fw, ddc_name, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Failed to load Intel DDC file %s (%d)", + ddc_name, err); + return err; + } + + bt_dev_info(hdev, "Found Intel DDC parameters: %s", ddc_name); + + fw_ptr = fw->data; + + /* DDC file contains one or more DDC structure which has + * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2). + */ + while (fw->size > fw_ptr - fw->data) { + u8 cmd_plen = fw_ptr[0] + sizeof(u8); + + skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Failed to send Intel_Write_DDC (%ld)", + PTR_ERR(skb)); + release_firmware(fw); + return PTR_ERR(skb); + } + + fw_ptr += cmd_plen; + kfree_skb(skb); + } + + release_firmware(fw); + + bt_dev_info(hdev, "Applying Intel DDC parameters completed"); + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_load_ddc_config); + +int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug) +{ + int err, ret; + + err = btintel_enter_mfg(hdev); + if (err) + return err; + + ret = btintel_set_event_mask(hdev, debug); + + err = btintel_exit_mfg(hdev, false, false); + if (err) + return err; + + return ret; +} +EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg); + +int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading Intel version information failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*ver)) { + bt_dev_err(hdev, "Intel version event size mismatch"); + kfree_skb(skb); + return -EILSEQ; + } + + memcpy(ver, skb->data, sizeof(*ver)); + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_read_version); + +static int btintel_version_info_tlv(struct hci_dev *hdev, + struct intel_version_tlv *version) +{ + const char *variant; + + /* The hardware platform number has a fixed value of 0x37 and + * for now only accept this single value. + */ + if (INTEL_HW_PLATFORM(version->cnvi_bt) != 0x37) { + bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)", + INTEL_HW_PLATFORM(version->cnvi_bt)); + return -EINVAL; + } + + /* Check for supported iBT hardware variants of this firmware + * loading method. + * + * This check has been put in place to ensure correct forward + * compatibility options when newer hardware variants come along. + */ + switch (INTEL_HW_VARIANT(version->cnvi_bt)) { + case 0x17: /* TyP */ + case 0x18: /* Slr */ + case 0x19: /* Slr-F */ + case 0x1b: /* Mgr */ + case 0x1c: /* Gale Peak (GaP) */ + break; + default: + bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)", + INTEL_HW_VARIANT(version->cnvi_bt)); + return -EINVAL; + } + + switch (version->img_type) { + case 0x01: + variant = "Bootloader"; + /* It is required that every single firmware fragment is acknowledged + * with a command complete event. If the boot parameters indicate + * that this bootloader does not send them, then abort the setup. + */ + if (version->limited_cce != 0x00) { + bt_dev_err(hdev, "Unsupported Intel firmware loading method (0x%x)", + version->limited_cce); + return -EINVAL; + } + + /* Secure boot engine type should be either 1 (ECDSA) or 0 (RSA) */ + if (version->sbe_type > 0x01) { + bt_dev_err(hdev, "Unsupported Intel secure boot engine type (0x%x)", + version->sbe_type); + return -EINVAL; + } + + bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id); + bt_dev_info(hdev, "Secure boot is %s", + version->secure_boot ? "enabled" : "disabled"); + bt_dev_info(hdev, "OTP lock is %s", + version->otp_lock ? "enabled" : "disabled"); + bt_dev_info(hdev, "API lock is %s", + version->api_lock ? "enabled" : "disabled"); + bt_dev_info(hdev, "Debug lock is %s", + version->debug_lock ? "enabled" : "disabled"); + bt_dev_info(hdev, "Minimum firmware build %u week %u %u", + version->min_fw_build_nn, version->min_fw_build_cw, + 2000 + version->min_fw_build_yy); + break; + case 0x03: + variant = "Firmware"; + break; + default: + bt_dev_err(hdev, "Unsupported image type(%02x)", version->img_type); + return -EINVAL; + } + + coredump_info.hw_variant = INTEL_HW_VARIANT(version->cnvi_bt); + coredump_info.fw_build_num = version->build_num; + + bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant, + 2000 + (version->timestamp >> 8), version->timestamp & 0xff, + version->build_type, version->build_num); + + return 0; +} + +static int btintel_parse_version_tlv(struct hci_dev *hdev, + struct intel_version_tlv *version, + struct sk_buff *skb) +{ + /* Consume Command Complete Status field */ + skb_pull(skb, 1); + + /* Event parameters contatin multiple TLVs. Read each of them + * and only keep the required data. Also, it use existing legacy + * version field like hw_platform, hw_variant, and fw_variant + * to keep the existing setup flow + */ + while (skb->len) { + struct intel_tlv *tlv; + + /* Make sure skb has a minimum length of the header */ + if (skb->len < sizeof(*tlv)) + return -EINVAL; + + tlv = (struct intel_tlv *)skb->data; + + /* Make sure skb has a enough data */ + if (skb->len < tlv->len + sizeof(*tlv)) + return -EINVAL; + + switch (tlv->type) { + case INTEL_TLV_CNVI_TOP: + version->cnvi_top = get_unaligned_le32(tlv->val); + break; + case INTEL_TLV_CNVR_TOP: + version->cnvr_top = get_unaligned_le32(tlv->val); + break; + case INTEL_TLV_CNVI_BT: + version->cnvi_bt = get_unaligned_le32(tlv->val); + break; + case INTEL_TLV_CNVR_BT: + version->cnvr_bt = get_unaligned_le32(tlv->val); + break; + case INTEL_TLV_DEV_REV_ID: + version->dev_rev_id = get_unaligned_le16(tlv->val); + break; + case INTEL_TLV_IMAGE_TYPE: + version->img_type = tlv->val[0]; + break; + case INTEL_TLV_TIME_STAMP: + /* If image type is Operational firmware (0x03), then + * running FW Calendar Week and Year information can + * be extracted from Timestamp information + */ + version->min_fw_build_cw = tlv->val[0]; + version->min_fw_build_yy = tlv->val[1]; + version->timestamp = get_unaligned_le16(tlv->val); + break; + case INTEL_TLV_BUILD_TYPE: + version->build_type = tlv->val[0]; + break; + case INTEL_TLV_BUILD_NUM: + /* If image type is Operational firmware (0x03), then + * running FW build number can be extracted from the + * Build information + */ + version->min_fw_build_nn = tlv->val[0]; + version->build_num = get_unaligned_le32(tlv->val); + break; + case INTEL_TLV_SECURE_BOOT: + version->secure_boot = tlv->val[0]; + break; + case INTEL_TLV_OTP_LOCK: + version->otp_lock = tlv->val[0]; + break; + case INTEL_TLV_API_LOCK: + version->api_lock = tlv->val[0]; + break; + case INTEL_TLV_DEBUG_LOCK: + version->debug_lock = tlv->val[0]; + break; + case INTEL_TLV_MIN_FW: + version->min_fw_build_nn = tlv->val[0]; + version->min_fw_build_cw = tlv->val[1]; + version->min_fw_build_yy = tlv->val[2]; + break; + case INTEL_TLV_LIMITED_CCE: + version->limited_cce = tlv->val[0]; + break; + case INTEL_TLV_SBE_TYPE: + version->sbe_type = tlv->val[0]; + break; + case INTEL_TLV_OTP_BDADDR: + memcpy(&version->otp_bd_addr, tlv->val, + sizeof(bdaddr_t)); + break; + default: + /* Ignore rest of information */ + break; + } + /* consume the current tlv and move to next*/ + skb_pull(skb, tlv->len + sizeof(*tlv)); + } + + return 0; +} + +static int btintel_read_version_tlv(struct hci_dev *hdev, + struct intel_version_tlv *version) +{ + struct sk_buff *skb; + const u8 param[1] = { 0xFF }; + + if (!version) + return -EINVAL; + + skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading Intel version information failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->data[0]) { + bt_dev_err(hdev, "Intel Read Version command failed (%02x)", + skb->data[0]); + kfree_skb(skb); + return -EIO; + } + + btintel_parse_version_tlv(hdev, version, skb); + + kfree_skb(skb); + return 0; +} + +/* ------- REGMAP IBT SUPPORT ------- */ + +#define IBT_REG_MODE_8BIT 0x00 +#define IBT_REG_MODE_16BIT 0x01 +#define IBT_REG_MODE_32BIT 0x02 + +struct regmap_ibt_context { + struct hci_dev *hdev; + __u16 op_write; + __u16 op_read; +}; + +struct ibt_cp_reg_access { + __le32 addr; + __u8 mode; + __u8 len; + __u8 data[]; +} __packed; + +struct ibt_rp_reg_access { + __u8 status; + __le32 addr; + __u8 data[]; +} __packed; + +static int regmap_ibt_read(void *context, const void *addr, size_t reg_size, + void *val, size_t val_size) +{ + struct regmap_ibt_context *ctx = context; + struct ibt_cp_reg_access cp; + struct ibt_rp_reg_access *rp; + struct sk_buff *skb; + int err = 0; + + if (reg_size != sizeof(__le32)) + return -EINVAL; + + switch (val_size) { + case 1: + cp.mode = IBT_REG_MODE_8BIT; + break; + case 2: + cp.mode = IBT_REG_MODE_16BIT; + break; + case 4: + cp.mode = IBT_REG_MODE_32BIT; + break; + default: + return -EINVAL; + } + + /* regmap provides a little-endian formatted addr */ + cp.addr = *(__le32 *)addr; + cp.len = val_size; + + bt_dev_dbg(ctx->hdev, "Register (0x%x) read", le32_to_cpu(cp.addr)); + + skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error (%d)", + le32_to_cpu(cp.addr), err); + return err; + } + + if (skb->len != sizeof(*rp) + val_size) { + bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad len", + le32_to_cpu(cp.addr)); + err = -EINVAL; + goto done; + } + + rp = (struct ibt_rp_reg_access *)skb->data; + + if (rp->addr != cp.addr) { + bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad addr", + le32_to_cpu(rp->addr)); + err = -EINVAL; + goto done; + } + + memcpy(val, rp->data, val_size); + +done: + kfree_skb(skb); + return err; +} + +static int regmap_ibt_gather_write(void *context, + const void *addr, size_t reg_size, + const void *val, size_t val_size) +{ + struct regmap_ibt_context *ctx = context; + struct ibt_cp_reg_access *cp; + struct sk_buff *skb; + int plen = sizeof(*cp) + val_size; + u8 mode; + int err = 0; + + if (reg_size != sizeof(__le32)) + return -EINVAL; + + switch (val_size) { + case 1: + mode = IBT_REG_MODE_8BIT; + break; + case 2: + mode = IBT_REG_MODE_16BIT; + break; + case 4: + mode = IBT_REG_MODE_32BIT; + break; + default: + return -EINVAL; + } + + cp = kmalloc(plen, GFP_KERNEL); + if (!cp) + return -ENOMEM; + + /* regmap provides a little-endian formatted addr/value */ + cp->addr = *(__le32 *)addr; + cp->mode = mode; + cp->len = val_size; + memcpy(&cp->data, val, val_size); + + bt_dev_dbg(ctx->hdev, "Register (0x%x) write", le32_to_cpu(cp->addr)); + + skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(ctx->hdev, "regmap: Register (0x%x) write error (%d)", + le32_to_cpu(cp->addr), err); + goto done; + } + kfree_skb(skb); + +done: + kfree(cp); + return err; +} + +static int regmap_ibt_write(void *context, const void *data, size_t count) +{ + /* data contains register+value, since we only support 32bit addr, + * minimum data size is 4 bytes. + */ + if (WARN_ONCE(count < 4, "Invalid register access")) + return -EINVAL; + + return regmap_ibt_gather_write(context, data, 4, data + 4, count - 4); +} + +static void regmap_ibt_free_context(void *context) +{ + kfree(context); +} + +static const struct regmap_bus regmap_ibt = { + .read = regmap_ibt_read, + .write = regmap_ibt_write, + .gather_write = regmap_ibt_gather_write, + .free_context = regmap_ibt_free_context, + .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +/* Config is the same for all register regions */ +static const struct regmap_config regmap_ibt_cfg = { + .name = "btintel_regmap", + .reg_bits = 32, + .val_bits = 32, +}; + +struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, + u16 opcode_write) +{ + struct regmap_ibt_context *ctx; + + bt_dev_info(hdev, "regmap: Init R%x-W%x region", opcode_read, + opcode_write); + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + ctx->op_read = opcode_read; + ctx->op_write = opcode_write; + ctx->hdev = hdev; + + return regmap_init(&hdev->dev, ®map_ibt, ctx, ®map_ibt_cfg); +} +EXPORT_SYMBOL_GPL(btintel_regmap_init); + +int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param) +{ + struct intel_reset params = { 0x00, 0x01, 0x00, 0x01, 0x00000000 }; + struct sk_buff *skb; + + params.boot_param = cpu_to_le32(boot_param); + + skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), ¶ms, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Failed to send Intel Reset command"); + return PTR_ERR(skb); + } + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_send_intel_reset); + +int btintel_read_boot_params(struct hci_dev *hdev, + struct intel_boot_params *params) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*params)) { + bt_dev_err(hdev, "Intel boot parameters size mismatch"); + kfree_skb(skb); + return -EILSEQ; + } + + memcpy(params, skb->data, sizeof(*params)); + + kfree_skb(skb); + + if (params->status) { + bt_dev_err(hdev, "Intel boot parameters command failed (%02x)", + params->status); + return -bt_to_errno(params->status); + } + + bt_dev_info(hdev, "Device revision is %u", + le16_to_cpu(params->dev_revid)); + + bt_dev_info(hdev, "Secure boot is %s", + params->secure_boot ? "enabled" : "disabled"); + + bt_dev_info(hdev, "OTP lock is %s", + params->otp_lock ? "enabled" : "disabled"); + + bt_dev_info(hdev, "API lock is %s", + params->api_lock ? "enabled" : "disabled"); + + bt_dev_info(hdev, "Debug lock is %s", + params->debug_lock ? "enabled" : "disabled"); + + bt_dev_info(hdev, "Minimum firmware build %u week %u %u", + params->min_fw_build_nn, params->min_fw_build_cw, + 2000 + params->min_fw_build_yy); + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_read_boot_params); + +static int btintel_sfi_rsa_header_secure_send(struct hci_dev *hdev, + const struct firmware *fw) +{ + int err; + + /* Start the firmware download transaction with the Init fragment + * represented by the 128 bytes of CSS header. + */ + err = btintel_secure_send(hdev, 0x00, 128, fw->data); + if (err < 0) { + bt_dev_err(hdev, "Failed to send firmware header (%d)", err); + goto done; + } + + /* Send the 256 bytes of public key information from the firmware + * as the PKey fragment. + */ + err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128); + if (err < 0) { + bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err); + goto done; + } + + /* Send the 256 bytes of signature information from the firmware + * as the Sign fragment. + */ + err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388); + if (err < 0) { + bt_dev_err(hdev, "Failed to send firmware signature (%d)", err); + goto done; + } + +done: + return err; +} + +static int btintel_sfi_ecdsa_header_secure_send(struct hci_dev *hdev, + const struct firmware *fw) +{ + int err; + + /* Start the firmware download transaction with the Init fragment + * represented by the 128 bytes of CSS header. + */ + err = btintel_secure_send(hdev, 0x00, 128, fw->data + 644); + if (err < 0) { + bt_dev_err(hdev, "Failed to send firmware header (%d)", err); + return err; + } + + /* Send the 96 bytes of public key information from the firmware + * as the PKey fragment. + */ + err = btintel_secure_send(hdev, 0x03, 96, fw->data + 644 + 128); + if (err < 0) { + bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err); + return err; + } + + /* Send the 96 bytes of signature information from the firmware + * as the Sign fragment + */ + err = btintel_secure_send(hdev, 0x02, 96, fw->data + 644 + 224); + if (err < 0) { + bt_dev_err(hdev, "Failed to send firmware signature (%d)", + err); + return err; + } + return 0; +} + +static int btintel_download_firmware_payload(struct hci_dev *hdev, + const struct firmware *fw, + size_t offset) +{ + int err; + const u8 *fw_ptr; + u32 frag_len; + + fw_ptr = fw->data + offset; + frag_len = 0; + err = -EINVAL; + + while (fw_ptr - fw->data < fw->size) { + struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len); + + frag_len += sizeof(*cmd) + cmd->plen; + + /* The parameter length of the secure send command requires + * a 4 byte alignment. It happens so that the firmware file + * contains proper Intel_NOP commands to align the fragments + * as needed. + * + * Send set of commands with 4 byte alignment from the + * firmware data buffer as a single Data fragement. + */ + if (!(frag_len % 4)) { + err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr); + if (err < 0) { + bt_dev_err(hdev, + "Failed to send firmware data (%d)", + err); + goto done; + } + + fw_ptr += frag_len; + frag_len = 0; + } + } + +done: + return err; +} + +static bool btintel_firmware_version(struct hci_dev *hdev, + u8 num, u8 ww, u8 yy, + const struct firmware *fw, + u32 *boot_addr) +{ + const u8 *fw_ptr; + + fw_ptr = fw->data; + + while (fw_ptr - fw->data < fw->size) { + struct hci_command_hdr *cmd = (void *)(fw_ptr); + + /* Each SKU has a different reset parameter to use in the + * HCI_Intel_Reset command and it is embedded in the firmware + * data. So, instead of using static value per SKU, check + * the firmware data and save it for later use. + */ + if (le16_to_cpu(cmd->opcode) == CMD_WRITE_BOOT_PARAMS) { + struct cmd_write_boot_params *params; + + params = (void *)(fw_ptr + sizeof(*cmd)); + + *boot_addr = le32_to_cpu(params->boot_addr); + + bt_dev_info(hdev, "Boot Address: 0x%x", *boot_addr); + + bt_dev_info(hdev, "Firmware Version: %u-%u.%u", + params->fw_build_num, params->fw_build_ww, + params->fw_build_yy); + + return (num == params->fw_build_num && + ww == params->fw_build_ww && + yy == params->fw_build_yy); + } + + fw_ptr += sizeof(*cmd) + cmd->plen; + } + + return false; +} + +int btintel_download_firmware(struct hci_dev *hdev, + struct intel_version *ver, + const struct firmware *fw, + u32 *boot_param) +{ + int err; + + /* SfP and WsP don't seem to update the firmware version on file + * so version checking is currently not possible. + */ + switch (ver->hw_variant) { + case 0x0b: /* SfP */ + case 0x0c: /* WsP */ + /* Skip version checking */ + break; + default: + + /* Skip download if firmware has the same version */ + if (btintel_firmware_version(hdev, ver->fw_build_num, + ver->fw_build_ww, ver->fw_build_yy, + fw, boot_param)) { + bt_dev_info(hdev, "Firmware already loaded"); + /* Return -EALREADY to indicate that the firmware has + * already been loaded. + */ + return -EALREADY; + } + } + + /* The firmware variant determines if the device is in bootloader + * mode or is running operational firmware. The value 0x06 identifies + * the bootloader and the value 0x23 identifies the operational + * firmware. + * + * If the firmware version has changed that means it needs to be reset + * to bootloader when operational so the new firmware can be loaded. + */ + if (ver->fw_variant == 0x23) + return -EINVAL; + + err = btintel_sfi_rsa_header_secure_send(hdev, fw); + if (err) + return err; + + return btintel_download_firmware_payload(hdev, fw, RSA_HEADER_LEN); +} +EXPORT_SYMBOL_GPL(btintel_download_firmware); + +static int btintel_download_fw_tlv(struct hci_dev *hdev, + struct intel_version_tlv *ver, + const struct firmware *fw, u32 *boot_param, + u8 hw_variant, u8 sbe_type) +{ + int err; + u32 css_header_ver; + + /* Skip download if firmware has the same version */ + if (btintel_firmware_version(hdev, ver->min_fw_build_nn, + ver->min_fw_build_cw, + ver->min_fw_build_yy, + fw, boot_param)) { + bt_dev_info(hdev, "Firmware already loaded"); + /* Return -EALREADY to indicate that firmware has + * already been loaded. + */ + return -EALREADY; + } + + /* The firmware variant determines if the device is in bootloader + * mode or is running operational firmware. The value 0x01 identifies + * the bootloader and the value 0x03 identifies the operational + * firmware. + * + * If the firmware version has changed that means it needs to be reset + * to bootloader when operational so the new firmware can be loaded. + */ + if (ver->img_type == 0x03) + return -EINVAL; + + /* iBT hardware variants 0x0b, 0x0c, 0x11, 0x12, 0x13, 0x14 support + * only RSA secure boot engine. Hence, the corresponding sfi file will + * have RSA header of 644 bytes followed by Command Buffer. + * + * iBT hardware variants 0x17, 0x18 onwards support both RSA and ECDSA + * secure boot engine. As a result, the corresponding sfi file will + * have RSA header of 644, ECDSA header of 320 bytes followed by + * Command Buffer. + * + * CSS Header byte positions 0x08 to 0x0B represent the CSS Header + * version: RSA(0x00010000) , ECDSA (0x00020000) + */ + css_header_ver = get_unaligned_le32(fw->data + CSS_HEADER_OFFSET); + if (css_header_ver != 0x00010000) { + bt_dev_err(hdev, "Invalid CSS Header version"); + return -EINVAL; + } + + if (hw_variant <= 0x14) { + if (sbe_type != 0x00) { + bt_dev_err(hdev, "Invalid SBE type for hardware variant (%d)", + hw_variant); + return -EINVAL; + } + + err = btintel_sfi_rsa_header_secure_send(hdev, fw); + if (err) + return err; + + err = btintel_download_firmware_payload(hdev, fw, RSA_HEADER_LEN); + if (err) + return err; + } else if (hw_variant >= 0x17) { + /* Check if CSS header for ECDSA follows the RSA header */ + if (fw->data[ECDSA_OFFSET] != 0x06) + return -EINVAL; + + /* Check if the CSS Header version is ECDSA(0x00020000) */ + css_header_ver = get_unaligned_le32(fw->data + ECDSA_OFFSET + CSS_HEADER_OFFSET); + if (css_header_ver != 0x00020000) { + bt_dev_err(hdev, "Invalid CSS Header version"); + return -EINVAL; + } + + if (sbe_type == 0x00) { + err = btintel_sfi_rsa_header_secure_send(hdev, fw); + if (err) + return err; + + err = btintel_download_firmware_payload(hdev, fw, + RSA_HEADER_LEN + ECDSA_HEADER_LEN); + if (err) + return err; + } else if (sbe_type == 0x01) { + err = btintel_sfi_ecdsa_header_secure_send(hdev, fw); + if (err) + return err; + + err = btintel_download_firmware_payload(hdev, fw, + RSA_HEADER_LEN + ECDSA_HEADER_LEN); + if (err) + return err; + } + } + return 0; +} + +static void btintel_reset_to_bootloader(struct hci_dev *hdev) +{ + struct intel_reset params; + struct sk_buff *skb; + + /* Send Intel Reset command. This will result in + * re-enumeration of BT controller. + * + * Intel Reset parameter description: + * reset_type : 0x00 (Soft reset), + * 0x01 (Hard reset) + * patch_enable : 0x00 (Do not enable), + * 0x01 (Enable) + * ddc_reload : 0x00 (Do not reload), + * 0x01 (Reload) + * boot_option: 0x00 (Current image), + * 0x01 (Specified boot address) + * boot_param: Boot address + * + */ + params.reset_type = 0x01; + params.patch_enable = 0x01; + params.ddc_reload = 0x01; + params.boot_option = 0x00; + params.boot_param = cpu_to_le32(0x00000000); + + skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), + ¶ms, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "FW download error recovery failed (%ld)", + PTR_ERR(skb)); + return; + } + bt_dev_info(hdev, "Intel reset sent to retry FW download"); + kfree_skb(skb); + + /* Current Intel BT controllers(ThP/JfP) hold the USB reset + * lines for 2ms when it receives Intel Reset in bootloader mode. + * Whereas, the upcoming Intel BT controllers will hold USB reset + * for 150ms. To keep the delay generic, 150ms is chosen here. + */ + msleep(150); +} + +static int btintel_read_debug_features(struct hci_dev *hdev, + struct intel_debug_features *features) +{ + struct sk_buff *skb; + u8 page_no = 1; + + /* Intel controller supports two pages, each page is of 128-bit + * feature bit mask. And each bit defines specific feature support + */ + skb = __hci_cmd_sync(hdev, 0xfca6, sizeof(page_no), &page_no, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading supported features failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != (sizeof(features->page1) + 3)) { + bt_dev_err(hdev, "Supported features event size mismatch"); + kfree_skb(skb); + return -EILSEQ; + } + + memcpy(features->page1, skb->data + 3, sizeof(features->page1)); + + /* Read the supported features page2 if required in future. + */ + kfree_skb(skb); + return 0; +} + +static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data, + void **ret) +{ + acpi_status status; + size_t len; + struct btintel_ppag *ppag = data; + union acpi_object *p, *elements; + struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL}; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + struct hci_dev *hdev = ppag->hdev; + + status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); + if (ACPI_FAILURE(status)) { + bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); + return status; + } + + len = strlen(string.pointer); + if (len < strlen(BTINTEL_PPAG_NAME)) { + kfree(string.pointer); + return AE_OK; + } + + if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) { + kfree(string.pointer); + return AE_OK; + } + kfree(string.pointer); + + status = acpi_evaluate_object(handle, NULL, NULL, &buffer); + if (ACPI_FAILURE(status)) { + ppag->status = status; + bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); + return status; + } + + p = buffer.pointer; + ppag = (struct btintel_ppag *)data; + + if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) { + kfree(buffer.pointer); + bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d", + p->type, p->package.count); + ppag->status = AE_ERROR; + return AE_ERROR; + } + + elements = p->package.elements; + + /* PPAG table is located at element[1] */ + p = &elements[1]; + + ppag->domain = (u32)p->package.elements[0].integer.value; + ppag->mode = (u32)p->package.elements[1].integer.value; + ppag->status = AE_OK; + kfree(buffer.pointer); + return AE_CTRL_TERMINATE; +} + +static int btintel_set_debug_features(struct hci_dev *hdev, + const struct intel_debug_features *features) +{ + u8 mask[11] = { 0x0a, 0x92, 0x02, 0x7f, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00 }; + u8 period[5] = { 0x04, 0x91, 0x02, 0x05, 0x00 }; + u8 trace_enable = 0x02; + struct sk_buff *skb; + + if (!features) { + bt_dev_warn(hdev, "Debug features not read"); + return -EINVAL; + } + + if (!(features->page1[0] & 0x3f)) { + bt_dev_info(hdev, "Telemetry exception format not supported"); + return 0; + } + + skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + skb = __hci_cmd_sync(hdev, 0xfc8b, 5, period, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Setting periodicity for link statistics traces failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Enable tracing of link statistics events failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + bt_dev_info(hdev, "set debug features: trace_enable 0x%02x mask 0x%02x", + trace_enable, mask[3]); + + return 0; +} + +static int btintel_reset_debug_features(struct hci_dev *hdev, + const struct intel_debug_features *features) +{ + u8 mask[11] = { 0x0a, 0x92, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00 }; + u8 trace_enable = 0x00; + struct sk_buff *skb; + + if (!features) { + bt_dev_warn(hdev, "Debug features not read"); + return -EINVAL; + } + + if (!(features->page1[0] & 0x3f)) { + bt_dev_info(hdev, "Telemetry exception format not supported"); + return 0; + } + + /* Should stop the trace before writing ddc event mask. */ + skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Stop tracing of link statistics events failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + bt_dev_info(hdev, "reset debug features: trace_enable 0x%02x mask 0x%02x", + trace_enable, mask[3]); + + return 0; +} + +int btintel_set_quality_report(struct hci_dev *hdev, bool enable) +{ + struct intel_debug_features features; + int err; + + bt_dev_dbg(hdev, "enable %d", enable); + + /* Read the Intel supported features and if new exception formats + * supported, need to load the additional DDC config to enable. + */ + err = btintel_read_debug_features(hdev, &features); + if (err) + return err; + + /* Set or reset the debug features. */ + if (enable) + err = btintel_set_debug_features(hdev, &features); + else + err = btintel_reset_debug_features(hdev, &features); + + return err; +} +EXPORT_SYMBOL_GPL(btintel_set_quality_report); + +static void btintel_coredump(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc4e, 0, NULL, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Coredump failed (%ld)", PTR_ERR(skb)); + return; + } + + kfree_skb(skb); +} + +static void btintel_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb) +{ + char buf[80]; + + snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n", + coredump_info.hw_variant); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Firmware Version: 0x%X\n", + coredump_info.fw_build_num); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Driver: %s\n", coredump_info.driver_name); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Vendor: Intel\n"); + skb_put_data(skb, buf, strlen(buf)); +} + +static int btintel_register_devcoredump_support(struct hci_dev *hdev) +{ + struct intel_debug_features features; + int err; + + err = btintel_read_debug_features(hdev, &features); + if (err) { + bt_dev_info(hdev, "Error reading debug features"); + return err; + } + + if (!(features.page1[0] & 0x3f)) { + bt_dev_dbg(hdev, "Telemetry exception format not supported"); + return -EOPNOTSUPP; + } + + hci_devcd_register(hdev, btintel_coredump, btintel_dmp_hdr, NULL); + + return err; +} + +static const struct firmware *btintel_legacy_rom_get_fw(struct hci_dev *hdev, + struct intel_version *ver) +{ + const struct firmware *fw; + char fwname[64]; + int ret; + + snprintf(fwname, sizeof(fwname), + "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.bseq", + ver->hw_platform, ver->hw_variant, ver->hw_revision, + ver->fw_variant, ver->fw_revision, ver->fw_build_num, + ver->fw_build_ww, ver->fw_build_yy); + + ret = request_firmware(&fw, fwname, &hdev->dev); + if (ret < 0) { + if (ret == -EINVAL) { + bt_dev_err(hdev, "Intel firmware file request failed (%d)", + ret); + return NULL; + } + + bt_dev_err(hdev, "failed to open Intel firmware file: %s (%d)", + fwname, ret); + + /* If the correct firmware patch file is not found, use the + * default firmware patch file instead + */ + snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bseq", + ver->hw_platform, ver->hw_variant); + if (request_firmware(&fw, fwname, &hdev->dev) < 0) { + bt_dev_err(hdev, "failed to open default fw file: %s", + fwname); + return NULL; + } + } + + bt_dev_info(hdev, "Intel Bluetooth firmware file: %s", fwname); + + return fw; +} + +static int btintel_legacy_rom_patching(struct hci_dev *hdev, + const struct firmware *fw, + const u8 **fw_ptr, int *disable_patch) +{ + struct sk_buff *skb; + struct hci_command_hdr *cmd; + const u8 *cmd_param; + struct hci_event_hdr *evt = NULL; + const u8 *evt_param = NULL; + int remain = fw->size - (*fw_ptr - fw->data); + + /* The first byte indicates the types of the patch command or event. + * 0x01 means HCI command and 0x02 is HCI event. If the first bytes + * in the current firmware buffer doesn't start with 0x01 or + * the size of remain buffer is smaller than HCI command header, + * the firmware file is corrupted and it should stop the patching + * process. + */ + if (remain > HCI_COMMAND_HDR_SIZE && *fw_ptr[0] != 0x01) { + bt_dev_err(hdev, "Intel fw corrupted: invalid cmd read"); + return -EINVAL; + } + (*fw_ptr)++; + remain--; + + cmd = (struct hci_command_hdr *)(*fw_ptr); + *fw_ptr += sizeof(*cmd); + remain -= sizeof(*cmd); + + /* Ensure that the remain firmware data is long enough than the length + * of command parameter. If not, the firmware file is corrupted. + */ + if (remain < cmd->plen) { + bt_dev_err(hdev, "Intel fw corrupted: invalid cmd len"); + return -EFAULT; + } + + /* If there is a command that loads a patch in the firmware + * file, then enable the patch upon success, otherwise just + * disable the manufacturer mode, for example patch activation + * is not required when the default firmware patch file is used + * because there are no patch data to load. + */ + if (*disable_patch && le16_to_cpu(cmd->opcode) == 0xfc8e) + *disable_patch = 0; + + cmd_param = *fw_ptr; + *fw_ptr += cmd->plen; + remain -= cmd->plen; + + /* This reads the expected events when the above command is sent to the + * device. Some vendor commands expects more than one events, for + * example command status event followed by vendor specific event. + * For this case, it only keeps the last expected event. so the command + * can be sent with __hci_cmd_sync_ev() which returns the sk_buff of + * last expected event. + */ + while (remain > HCI_EVENT_HDR_SIZE && *fw_ptr[0] == 0x02) { + (*fw_ptr)++; + remain--; + + evt = (struct hci_event_hdr *)(*fw_ptr); + *fw_ptr += sizeof(*evt); + remain -= sizeof(*evt); + + if (remain < evt->plen) { + bt_dev_err(hdev, "Intel fw corrupted: invalid evt len"); + return -EFAULT; + } + + evt_param = *fw_ptr; + *fw_ptr += evt->plen; + remain -= evt->plen; + } + + /* Every HCI commands in the firmware file has its correspond event. + * If event is not found or remain is smaller than zero, the firmware + * file is corrupted. + */ + if (!evt || !evt_param || remain < 0) { + bt_dev_err(hdev, "Intel fw corrupted: invalid evt read"); + return -EFAULT; + } + + skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen, + cmd_param, evt->evt, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "sending Intel patch command (0x%4.4x) failed (%ld)", + cmd->opcode, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + /* It ensures that the returned event matches the event data read from + * the firmware file. At fist, it checks the length and then + * the contents of the event. + */ + if (skb->len != evt->plen) { + bt_dev_err(hdev, "mismatch event length (opcode 0x%4.4x)", + le16_to_cpu(cmd->opcode)); + kfree_skb(skb); + return -EFAULT; + } + + if (memcmp(skb->data, evt_param, evt->plen)) { + bt_dev_err(hdev, "mismatch event parameter (opcode 0x%4.4x)", + le16_to_cpu(cmd->opcode)); + kfree_skb(skb); + return -EFAULT; + } + kfree_skb(skb); + + return 0; +} + +static int btintel_legacy_rom_setup(struct hci_dev *hdev, + struct intel_version *ver) +{ + const struct firmware *fw; + const u8 *fw_ptr; + int disable_patch, err; + struct intel_version new_ver; + + BT_DBG("%s", hdev->name); + + /* fw_patch_num indicates the version of patch the device currently + * have. If there is no patch data in the device, it is always 0x00. + * So, if it is other than 0x00, no need to patch the device again. + */ + if (ver->fw_patch_num) { + bt_dev_info(hdev, + "Intel device is already patched. patch num: %02x", + ver->fw_patch_num); + goto complete; + } + + /* Opens the firmware patch file based on the firmware version read + * from the controller. If it fails to open the matching firmware + * patch file, it tries to open the default firmware patch file. + * If no patch file is found, allow the device to operate without + * a patch. + */ + fw = btintel_legacy_rom_get_fw(hdev, ver); + if (!fw) + goto complete; + fw_ptr = fw->data; + + /* Enable the manufacturer mode of the controller. + * Only while this mode is enabled, the driver can download the + * firmware patch data and configuration parameters. + */ + err = btintel_enter_mfg(hdev); + if (err) { + release_firmware(fw); + return err; + } + + disable_patch = 1; + + /* The firmware data file consists of list of Intel specific HCI + * commands and its expected events. The first byte indicates the + * type of the message, either HCI command or HCI event. + * + * It reads the command and its expected event from the firmware file, + * and send to the controller. Once __hci_cmd_sync_ev() returns, + * the returned event is compared with the event read from the firmware + * file and it will continue until all the messages are downloaded to + * the controller. + * + * Once the firmware patching is completed successfully, + * the manufacturer mode is disabled with reset and activating the + * downloaded patch. + * + * If the firmware patching fails, the manufacturer mode is + * disabled with reset and deactivating the patch. + * + * If the default patch file is used, no reset is done when disabling + * the manufacturer. + */ + while (fw->size > fw_ptr - fw->data) { + int ret; + + ret = btintel_legacy_rom_patching(hdev, fw, &fw_ptr, + &disable_patch); + if (ret < 0) + goto exit_mfg_deactivate; + } + + release_firmware(fw); + + if (disable_patch) + goto exit_mfg_disable; + + /* Patching completed successfully and disable the manufacturer mode + * with reset and activate the downloaded firmware patches. + */ + err = btintel_exit_mfg(hdev, true, true); + if (err) + return err; + + /* Need build number for downloaded fw patches in + * every power-on boot + */ + err = btintel_read_version(hdev, &new_ver); + if (err) + return err; + + bt_dev_info(hdev, "Intel BT fw patch 0x%02x completed & activated", + new_ver.fw_patch_num); + + goto complete; + +exit_mfg_disable: + /* Disable the manufacturer mode without reset */ + err = btintel_exit_mfg(hdev, false, false); + if (err) + return err; + + bt_dev_info(hdev, "Intel firmware patch completed"); + + goto complete; + +exit_mfg_deactivate: + release_firmware(fw); + + /* Patching failed. Disable the manufacturer mode with reset and + * deactivate the downloaded firmware patches. + */ + err = btintel_exit_mfg(hdev, true, false); + if (err) + return err; + + bt_dev_info(hdev, "Intel firmware patch completed and deactivated"); + +complete: + /* Set the event mask for Intel specific vendor events. This enables + * a few extra events that are useful during general operation. + */ + btintel_set_event_mask_mfg(hdev, false); + + btintel_check_bdaddr(hdev); + + return 0; +} + +static int btintel_download_wait(struct hci_dev *hdev, ktime_t calltime, int msec) +{ + ktime_t delta, rettime; + unsigned long long duration; + int err; + + btintel_set_flag(hdev, INTEL_FIRMWARE_LOADED); + + bt_dev_info(hdev, "Waiting for firmware download to complete"); + + err = btintel_wait_on_flag_timeout(hdev, INTEL_DOWNLOADING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(msec)); + if (err == -EINTR) { + bt_dev_err(hdev, "Firmware loading interrupted"); + return err; + } + + if (err) { + bt_dev_err(hdev, "Firmware loading timeout"); + return -ETIMEDOUT; + } + + if (btintel_test_flag(hdev, INTEL_FIRMWARE_FAILED)) { + bt_dev_err(hdev, "Firmware loading failed"); + return -ENOEXEC; + } + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long)ktime_to_ns(delta) >> 10; + + bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration); + + return 0; +} + +static int btintel_boot_wait(struct hci_dev *hdev, ktime_t calltime, int msec) +{ + ktime_t delta, rettime; + unsigned long long duration; + int err; + + bt_dev_info(hdev, "Waiting for device to boot"); + + err = btintel_wait_on_flag_timeout(hdev, INTEL_BOOTING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(msec)); + if (err == -EINTR) { + bt_dev_err(hdev, "Device boot interrupted"); + return -EINTR; + } + + if (err) { + bt_dev_err(hdev, "Device boot timeout"); + return -ETIMEDOUT; + } + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + + bt_dev_info(hdev, "Device booted in %llu usecs", duration); + + return 0; +} + +static int btintel_boot(struct hci_dev *hdev, u32 boot_addr) +{ + ktime_t calltime; + int err; + + calltime = ktime_get(); + + btintel_set_flag(hdev, INTEL_BOOTING); + + err = btintel_send_intel_reset(hdev, boot_addr); + if (err) { + bt_dev_err(hdev, "Intel Soft Reset failed (%d)", err); + btintel_reset_to_bootloader(hdev); + return err; + } + + /* The bootloader will not indicate when the device is ready. This + * is done by the operational firmware sending bootup notification. + * + * Booting into operational firmware should not take longer than + * 1 second. However if that happens, then just fail the setup + * since something went wrong. + */ + err = btintel_boot_wait(hdev, calltime, 1000); + if (err == -ETIMEDOUT) + btintel_reset_to_bootloader(hdev); + + return err; +} + +static int btintel_get_fw_name(struct intel_version *ver, + struct intel_boot_params *params, + char *fw_name, size_t len, + const char *suffix) +{ + switch (ver->hw_variant) { + case 0x0b: /* SfP */ + case 0x0c: /* WsP */ + snprintf(fw_name, len, "intel/ibt-%u-%u.%s", + ver->hw_variant, + le16_to_cpu(params->dev_revid), + suffix); + break; + case 0x11: /* JfP */ + case 0x12: /* ThP */ + case 0x13: /* HrP */ + case 0x14: /* CcP */ + snprintf(fw_name, len, "intel/ibt-%u-%u-%u.%s", + ver->hw_variant, + ver->hw_revision, + ver->fw_revision, + suffix); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int btintel_download_fw(struct hci_dev *hdev, + struct intel_version *ver, + struct intel_boot_params *params, + u32 *boot_param) +{ + const struct firmware *fw; + char fwname[64]; + int err; + ktime_t calltime; + + if (!ver || !params) + return -EINVAL; + + /* The firmware variant determines if the device is in bootloader + * mode or is running operational firmware. The value 0x06 identifies + * the bootloader and the value 0x23 identifies the operational + * firmware. + * + * When the operational firmware is already present, then only + * the check for valid Bluetooth device address is needed. This + * determines if the device will be added as configured or + * unconfigured controller. + * + * It is not possible to use the Secure Boot Parameters in this + * case since that command is only available in bootloader mode. + */ + if (ver->fw_variant == 0x23) { + btintel_clear_flag(hdev, INTEL_BOOTLOADER); + btintel_check_bdaddr(hdev); + + /* SfP and WsP don't seem to update the firmware version on file + * so version checking is currently possible. + */ + switch (ver->hw_variant) { + case 0x0b: /* SfP */ + case 0x0c: /* WsP */ + return 0; + } + + /* Proceed to download to check if the version matches */ + goto download; + } + + /* Read the secure boot parameters to identify the operating + * details of the bootloader. + */ + err = btintel_read_boot_params(hdev, params); + if (err) + return err; + + /* It is required that every single firmware fragment is acknowledged + * with a command complete event. If the boot parameters indicate + * that this bootloader does not send them, then abort the setup. + */ + if (params->limited_cce != 0x00) { + bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)", + params->limited_cce); + return -EINVAL; + } + + /* If the OTP has no valid Bluetooth device address, then there will + * also be no valid address for the operational firmware. + */ + if (!bacmp(¶ms->otp_bdaddr, BDADDR_ANY)) { + bt_dev_info(hdev, "No device address configured"); + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + } + +download: + /* With this Intel bootloader only the hardware variant and device + * revision information are used to select the right firmware for SfP + * and WsP. + * + * The firmware filename is ibt--.sfi. + * + * Currently the supported hardware variants are: + * 11 (0x0b) for iBT3.0 (LnP/SfP) + * 12 (0x0c) for iBT3.5 (WsP) + * + * For ThP/JfP and for future SKU's, the FW name varies based on HW + * variant, HW revision and FW revision, as these are dependent on CNVi + * and RF Combination. + * + * 17 (0x11) for iBT3.5 (JfP) + * 18 (0x12) for iBT3.5 (ThP) + * + * The firmware file name for these will be + * ibt---.sfi. + * + */ + err = btintel_get_fw_name(ver, params, fwname, sizeof(fwname), "sfi"); + if (err < 0) { + if (!btintel_test_flag(hdev, INTEL_BOOTLOADER)) { + /* Firmware has already been loaded */ + btintel_set_flag(hdev, INTEL_FIRMWARE_LOADED); + return 0; + } + + bt_dev_err(hdev, "Unsupported Intel firmware naming"); + return -EINVAL; + } + + err = firmware_request_nowarn(&fw, fwname, &hdev->dev); + if (err < 0) { + if (!btintel_test_flag(hdev, INTEL_BOOTLOADER)) { + /* Firmware has already been loaded */ + btintel_set_flag(hdev, INTEL_FIRMWARE_LOADED); + return 0; + } + + bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)", + fwname, err); + return err; + } + + bt_dev_info(hdev, "Found device firmware: %s", fwname); + + if (fw->size < 644) { + bt_dev_err(hdev, "Invalid size of firmware file (%zu)", + fw->size); + err = -EBADF; + goto done; + } + + calltime = ktime_get(); + + btintel_set_flag(hdev, INTEL_DOWNLOADING); + + /* Start firmware downloading and get boot parameter */ + err = btintel_download_firmware(hdev, ver, fw, boot_param); + if (err < 0) { + if (err == -EALREADY) { + /* Firmware has already been loaded */ + btintel_set_flag(hdev, INTEL_FIRMWARE_LOADED); + err = 0; + goto done; + } + + /* When FW download fails, send Intel Reset to retry + * FW download. + */ + btintel_reset_to_bootloader(hdev); + goto done; + } + + /* Before switching the device into operational mode and with that + * booting the loaded firmware, wait for the bootloader notification + * that all fragments have been successfully received. + * + * When the event processing receives the notification, then the + * INTEL_DOWNLOADING flag will be cleared. + * + * The firmware loading should not take longer than 5 seconds + * and thus just timeout if that happens and fail the setup + * of this device. + */ + err = btintel_download_wait(hdev, calltime, 5000); + if (err == -ETIMEDOUT) + btintel_reset_to_bootloader(hdev); + +done: + release_firmware(fw); + return err; +} + +static int btintel_bootloader_setup(struct hci_dev *hdev, + struct intel_version *ver) +{ + struct intel_version new_ver; + struct intel_boot_params params; + u32 boot_param; + char ddcname[64]; + int err; + + BT_DBG("%s", hdev->name); + + /* Set the default boot parameter to 0x0 and it is updated to + * SKU specific boot parameter after reading Intel_Write_Boot_Params + * command while downloading the firmware. + */ + boot_param = 0x00000000; + + btintel_set_flag(hdev, INTEL_BOOTLOADER); + + err = btintel_download_fw(hdev, ver, ¶ms, &boot_param); + if (err) + return err; + + /* controller is already having an operational firmware */ + if (ver->fw_variant == 0x23) + goto finish; + + err = btintel_boot(hdev, boot_param); + if (err) + return err; + + btintel_clear_flag(hdev, INTEL_BOOTLOADER); + + err = btintel_get_fw_name(ver, ¶ms, ddcname, + sizeof(ddcname), "ddc"); + + if (err < 0) { + bt_dev_err(hdev, "Unsupported Intel firmware naming"); + } else { + /* Once the device is running in operational mode, it needs to + * apply the device configuration (DDC) parameters. + * + * The device can work without DDC parameters, so even if it + * fails to load the file, no need to fail the setup. + */ + btintel_load_ddc_config(hdev, ddcname); + } + + hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); + + /* Read the Intel version information after loading the FW */ + err = btintel_read_version(hdev, &new_ver); + if (err) + return err; + + btintel_version_info(hdev, &new_ver); + +finish: + /* Set the event mask for Intel specific vendor events. This enables + * a few extra events that are useful during general operation. It + * does not enable any debugging related events. + * + * The device will function correctly without these events enabled + * and thus no need to fail the setup. + */ + btintel_set_event_mask(hdev, false); + + return 0; +} + +static void btintel_get_fw_name_tlv(const struct intel_version_tlv *ver, + char *fw_name, size_t len, + const char *suffix) +{ + /* The firmware file name for new generation controllers will be + * ibt-- + */ + snprintf(fw_name, len, "intel/ibt-%04x-%04x.%s", + INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top), + INTEL_CNVX_TOP_STEP(ver->cnvi_top)), + INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top), + INTEL_CNVX_TOP_STEP(ver->cnvr_top)), + suffix); +} + +static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev, + struct intel_version_tlv *ver, + u32 *boot_param) +{ + const struct firmware *fw; + char fwname[64]; + int err; + ktime_t calltime; + + if (!ver || !boot_param) + return -EINVAL; + + /* The firmware variant determines if the device is in bootloader + * mode or is running operational firmware. The value 0x03 identifies + * the bootloader and the value 0x23 identifies the operational + * firmware. + * + * When the operational firmware is already present, then only + * the check for valid Bluetooth device address is needed. This + * determines if the device will be added as configured or + * unconfigured controller. + * + * It is not possible to use the Secure Boot Parameters in this + * case since that command is only available in bootloader mode. + */ + if (ver->img_type == 0x03) { + btintel_clear_flag(hdev, INTEL_BOOTLOADER); + btintel_check_bdaddr(hdev); + } else { + /* + * Check for valid bd address in boot loader mode. Device + * will be marked as unconfigured if empty bd address is + * found. + */ + if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) { + bt_dev_info(hdev, "No device address configured"); + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + } + } + + btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi"); + err = firmware_request_nowarn(&fw, fwname, &hdev->dev); + if (err < 0) { + if (!btintel_test_flag(hdev, INTEL_BOOTLOADER)) { + /* Firmware has already been loaded */ + btintel_set_flag(hdev, INTEL_FIRMWARE_LOADED); + return 0; + } + + bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)", + fwname, err); + + return err; + } + + bt_dev_info(hdev, "Found device firmware: %s", fwname); + + if (fw->size < 644) { + bt_dev_err(hdev, "Invalid size of firmware file (%zu)", + fw->size); + err = -EBADF; + goto done; + } + + calltime = ktime_get(); + + btintel_set_flag(hdev, INTEL_DOWNLOADING); + + /* Start firmware downloading and get boot parameter */ + err = btintel_download_fw_tlv(hdev, ver, fw, boot_param, + INTEL_HW_VARIANT(ver->cnvi_bt), + ver->sbe_type); + if (err < 0) { + if (err == -EALREADY) { + /* Firmware has already been loaded */ + btintel_set_flag(hdev, INTEL_FIRMWARE_LOADED); + err = 0; + goto done; + } + + /* When FW download fails, send Intel Reset to retry + * FW download. + */ + btintel_reset_to_bootloader(hdev); + goto done; + } + + /* Before switching the device into operational mode and with that + * booting the loaded firmware, wait for the bootloader notification + * that all fragments have been successfully received. + * + * When the event processing receives the notification, then the + * BTUSB_DOWNLOADING flag will be cleared. + * + * The firmware loading should not take longer than 5 seconds + * and thus just timeout if that happens and fail the setup + * of this device. + */ + err = btintel_download_wait(hdev, calltime, 5000); + if (err == -ETIMEDOUT) + btintel_reset_to_bootloader(hdev); + +done: + release_firmware(fw); + return err; +} + +static int btintel_get_codec_config_data(struct hci_dev *hdev, + __u8 link, struct bt_codec *codec, + __u8 *ven_len, __u8 **ven_data) +{ + int err = 0; + + if (!ven_data || !ven_len) + return -EINVAL; + + *ven_len = 0; + *ven_data = NULL; + + if (link != ESCO_LINK) { + bt_dev_err(hdev, "Invalid link type(%u)", link); + return -EINVAL; + } + + *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL); + if (!*ven_data) { + err = -ENOMEM; + goto error; + } + + /* supports only CVSD and mSBC offload codecs */ + switch (codec->id) { + case 0x02: + **ven_data = 0x00; + break; + case 0x05: + **ven_data = 0x01; + break; + default: + err = -EINVAL; + bt_dev_err(hdev, "Invalid codec id(%u)", codec->id); + goto error; + } + /* codec and its capabilities are pre-defined to ids + * preset id = 0x00 represents CVSD codec with sampling rate 8K + * preset id = 0x01 represents mSBC codec with sampling rate 16K + */ + *ven_len = sizeof(__u8); + return err; + +error: + kfree(*ven_data); + *ven_data = NULL; + return err; +} + +static int btintel_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id) +{ + /* Intel uses 1 as data path id for all the usecases */ + *data_path_id = 1; + return 0; +} + +static int btintel_configure_offload(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int err = 0; + struct intel_offload_use_cases *use_cases; + + skb = __hci_cmd_sync(hdev, 0xfc86, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading offload use cases failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len < sizeof(*use_cases)) { + err = -EIO; + goto error; + } + + use_cases = (void *)skb->data; + + if (use_cases->status) { + err = -bt_to_errno(skb->data[0]); + goto error; + } + + if (use_cases->preset[0] & 0x03) { + hdev->get_data_path_id = btintel_get_data_path_id; + hdev->get_codec_config_data = btintel_get_codec_config_data; + } +error: + kfree_skb(skb); + return err; +} + +static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver) +{ + struct btintel_ppag ppag; + struct sk_buff *skb; + struct hci_ppag_enable_cmd ppag_cmd; + acpi_handle handle; + + /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */ + switch (ver->cnvr_top & 0xFFF) { + case 0x504: /* Hrp2 */ + case 0x202: /* Jfp2 */ + case 0x201: /* Jfp1 */ + bt_dev_dbg(hdev, "PPAG not supported for Intel CNVr (0x%3x)", + ver->cnvr_top & 0xFFF); + return; + } + + handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev)); + if (!handle) { + bt_dev_info(hdev, "No support for BT device in ACPI firmware"); + return; + } + + memset(&ppag, 0, sizeof(ppag)); + + ppag.hdev = hdev; + ppag.status = AE_NOT_FOUND; + acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL, + btintel_ppag_callback, &ppag, NULL); + + if (ACPI_FAILURE(ppag.status)) { + if (ppag.status == AE_NOT_FOUND) { + bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found"); + return; + } + return; + } + + if (ppag.domain != 0x12) { + bt_dev_dbg(hdev, "PPAG-BT: Bluetooth domain is disabled in ACPI firmware"); + return; + } + + /* PPAG mode + * BIT 0 : 0 Disabled in EU + * 1 Enabled in EU + * BIT 1 : 0 Disabled in China + * 1 Enabled in China + */ + if ((ppag.mode & 0x01) != BIT(0) && (ppag.mode & 0x02) != BIT(1)) { + bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in CB/BIOS"); + return; + } + + ppag_cmd.ppag_enable_flags = cpu_to_le32(ppag.mode); + + skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb)); + return; + } + bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", ppag.mode); + kfree_skb(skb); +} + +static int btintel_acpi_reset_method(struct hci_dev *hdev) +{ + int ret = 0; + acpi_status status; + union acpi_object *p, *ref; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + + status = acpi_evaluate_object(ACPI_HANDLE(GET_HCIDEV_DEV(hdev)), "_PRR", NULL, &buffer); + if (ACPI_FAILURE(status)) { + bt_dev_err(hdev, "Failed to run _PRR method"); + ret = -ENODEV; + return ret; + } + p = buffer.pointer; + + if (p->package.count != 1 || p->type != ACPI_TYPE_PACKAGE) { + bt_dev_err(hdev, "Invalid arguments"); + ret = -EINVAL; + goto exit_on_error; + } + + ref = &p->package.elements[0]; + if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) { + bt_dev_err(hdev, "Invalid object type: 0x%x", ref->type); + ret = -EINVAL; + goto exit_on_error; + } + + status = acpi_evaluate_object(ref->reference.handle, "_RST", NULL, NULL); + if (ACPI_FAILURE(status)) { + bt_dev_err(hdev, "Failed to run_RST method"); + ret = -ENODEV; + goto exit_on_error; + } + +exit_on_error: + kfree(buffer.pointer); + return ret; +} + +static void btintel_set_dsm_reset_method(struct hci_dev *hdev, + struct intel_version_tlv *ver_tlv) +{ + struct btintel_data *data = hci_get_priv(hdev); + acpi_handle handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev)); + u8 reset_payload[4] = {0x01, 0x00, 0x01, 0x00}; + union acpi_object *obj, argv4; + enum { + RESET_TYPE_WDISABLE2, + RESET_TYPE_VSEC + }; + + handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev)); + + if (!handle) { + bt_dev_dbg(hdev, "No support for bluetooth device in ACPI firmware"); + return; + } + + if (!acpi_has_method(handle, "_PRR")) { + bt_dev_err(hdev, "No support for _PRR ACPI method"); + return; + } + + switch (ver_tlv->cnvi_top & 0xfff) { + case 0x910: /* GalePeak2 */ + reset_payload[2] = RESET_TYPE_VSEC; + break; + default: + /* WDISABLE2 is the default reset method */ + reset_payload[2] = RESET_TYPE_WDISABLE2; + + if (!acpi_check_dsm(handle, &btintel_guid_dsm, 0, + BIT(DSM_SET_WDISABLE2_DELAY))) { + bt_dev_err(hdev, "No dsm support to set reset delay"); + return; + } + argv4.integer.type = ACPI_TYPE_INTEGER; + /* delay required to toggle BT power */ + argv4.integer.value = 160; + obj = acpi_evaluate_dsm(handle, &btintel_guid_dsm, 0, + DSM_SET_WDISABLE2_DELAY, &argv4); + if (!obj) { + bt_dev_err(hdev, "Failed to call dsm to set reset delay"); + return; + } + ACPI_FREE(obj); + } + + bt_dev_info(hdev, "DSM reset method type: 0x%02x", reset_payload[2]); + + if (!acpi_check_dsm(handle, &btintel_guid_dsm, 0, + DSM_SET_RESET_METHOD)) { + bt_dev_warn(hdev, "No support for dsm to set reset method"); + return; + } + argv4.buffer.type = ACPI_TYPE_BUFFER; + argv4.buffer.length = sizeof(reset_payload); + argv4.buffer.pointer = reset_payload; + + obj = acpi_evaluate_dsm(handle, &btintel_guid_dsm, 0, + DSM_SET_RESET_METHOD, &argv4); + if (!obj) { + bt_dev_err(hdev, "Failed to call dsm to set reset method"); + return; + } + ACPI_FREE(obj); + data->acpi_reset_method = btintel_acpi_reset_method; +} + +static int btintel_bootloader_setup_tlv(struct hci_dev *hdev, + struct intel_version_tlv *ver) +{ + u32 boot_param; + char ddcname[64]; + int err; + struct intel_version_tlv new_ver; + + bt_dev_dbg(hdev, ""); + + /* Set the default boot parameter to 0x0 and it is updated to + * SKU specific boot parameter after reading Intel_Write_Boot_Params + * command while downloading the firmware. + */ + boot_param = 0x00000000; + + btintel_set_flag(hdev, INTEL_BOOTLOADER); + + err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param); + if (err) + return err; + + /* check if controller is already having an operational firmware */ + if (ver->img_type == 0x03) + goto finish; + + err = btintel_boot(hdev, boot_param); + if (err) + return err; + + btintel_clear_flag(hdev, INTEL_BOOTLOADER); + + btintel_get_fw_name_tlv(ver, ddcname, sizeof(ddcname), "ddc"); + /* Once the device is running in operational mode, it needs to + * apply the device configuration (DDC) parameters. + * + * The device can work without DDC parameters, so even if it + * fails to load the file, no need to fail the setup. + */ + btintel_load_ddc_config(hdev, ddcname); + + /* Read supported use cases and set callbacks to fetch datapath id */ + btintel_configure_offload(hdev); + + hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); + + /* Set PPAG feature */ + btintel_set_ppag(hdev, ver); + + /* Read the Intel version information after loading the FW */ + err = btintel_read_version_tlv(hdev, &new_ver); + if (err) + return err; + + btintel_version_info_tlv(hdev, &new_ver); + +finish: + /* Set the event mask for Intel specific vendor events. This enables + * a few extra events that are useful during general operation. It + * does not enable any debugging related events. + * + * The device will function correctly without these events enabled + * and thus no need to fail the setup. + */ + btintel_set_event_mask(hdev, false); + + return 0; +} + +static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant) +{ + switch (hw_variant) { + /* Legacy bootloader devices that supports MSFT Extension */ + case 0x11: /* JfP */ + case 0x12: /* ThP */ + case 0x13: /* HrP */ + case 0x14: /* CcP */ + /* All Intel new genration controllers support the Microsoft vendor + * extension are using 0xFC1E for VsMsftOpCode. + */ + case 0x17: + case 0x18: + case 0x19: + case 0x1b: + case 0x1c: + hci_set_msft_opcode(hdev, 0xFC1E); + break; + default: + /* Not supported */ + break; + } +} + +static int btintel_setup_combined(struct hci_dev *hdev) +{ + const u8 param[1] = { 0xFF }; + struct intel_version ver; + struct intel_version_tlv ver_tlv; + struct sk_buff *skb; + int err; + + BT_DBG("%s", hdev->name); + + /* The some controllers have a bug with the first HCI command sent to it + * returning number of completed commands as zero. This would stall the + * command processing in the Bluetooth core. + * + * As a workaround, send HCI Reset command first which will reset the + * number of completed commands and allow normal command processing + * from now on. + * + * Regarding the INTEL_BROKEN_SHUTDOWN_LED flag, these devices maybe + * in the SW_RFKILL ON state as a workaround of fixing LED issue during + * the shutdown() procedure, and once the device is in SW_RFKILL ON + * state, the only way to exit out of it is sending the HCI_Reset + * command. + */ + if (btintel_test_flag(hdev, INTEL_BROKEN_INITIAL_NCMD) || + btintel_test_flag(hdev, INTEL_BROKEN_SHUTDOWN_LED)) { + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, + "sending initial HCI reset failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + } + + /* Starting from TyP device, the command parameter and response are + * changed even though the OCF for HCI_Intel_Read_Version command + * remains same. The legacy devices can handle even if the + * command has a parameter and returns a correct version information. + * So, it uses new format to support both legacy and new format. + */ + skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading Intel version command failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + /* Check the status */ + if (skb->data[0]) { + bt_dev_err(hdev, "Intel Read Version command failed (%02x)", + skb->data[0]); + err = -EIO; + goto exit_error; + } + + /* Apply the common HCI quirks for Intel device */ + set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); + + /* Set up the quality report callback for Intel devices */ + hdev->set_quality_report = btintel_set_quality_report; + + /* For Legacy device, check the HW platform value and size */ + if (skb->len == sizeof(ver) && skb->data[1] == 0x37) { + bt_dev_dbg(hdev, "Read the legacy Intel version information"); + + memcpy(&ver, skb->data, sizeof(ver)); + + /* Display version information */ + btintel_version_info(hdev, &ver); + + /* Check for supported iBT hardware variants of this firmware + * loading method. + * + * This check has been put in place to ensure correct forward + * compatibility options when newer hardware variants come + * along. + */ + switch (ver.hw_variant) { + case 0x07: /* WP */ + case 0x08: /* StP */ + /* Legacy ROM product */ + btintel_set_flag(hdev, INTEL_ROM_LEGACY); + + /* Apply the device specific HCI quirks + * + * WBS for SdP - For the Legacy ROM products, only SdP + * supports the WBS. But the version information is not + * enough to use here because the StP2 and SdP have same + * hw_variant and fw_variant. So, this flag is set by + * the transport driver (btusb) based on the HW info + * (idProduct) + */ + if (!btintel_test_flag(hdev, + INTEL_ROM_LEGACY_NO_WBS_SUPPORT)) + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, + &hdev->quirks); + if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22) + set_bit(HCI_QUIRK_VALID_LE_STATES, + &hdev->quirks); + + err = btintel_legacy_rom_setup(hdev, &ver); + break; + case 0x0b: /* SfP */ + case 0x11: /* JfP */ + case 0x12: /* ThP */ + case 0x13: /* HrP */ + case 0x14: /* CcP */ + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + fallthrough; + case 0x0c: /* WsP */ + /* Apply the device specific HCI quirks + * + * All Legacy bootloader devices support WBS + */ + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, + &hdev->quirks); + + /* These variants don't seem to support LE Coded PHY */ + set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks); + + /* Setup MSFT Extension support */ + btintel_set_msft_opcode(hdev, ver.hw_variant); + + err = btintel_bootloader_setup(hdev, &ver); + btintel_register_devcoredump_support(hdev); + break; + default: + bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", + ver.hw_variant); + err = -EINVAL; + } + + goto exit_error; + } + + /* memset ver_tlv to start with clean state as few fields are exclusive + * to bootloader mode and are not populated in operational mode + */ + memset(&ver_tlv, 0, sizeof(ver_tlv)); + /* For TLV type device, parse the tlv data */ + err = btintel_parse_version_tlv(hdev, &ver_tlv, skb); + if (err) { + bt_dev_err(hdev, "Failed to parse TLV version information"); + goto exit_error; + } + + if (INTEL_HW_PLATFORM(ver_tlv.cnvi_bt) != 0x37) { + bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)", + INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)); + err = -EINVAL; + goto exit_error; + } + + /* Check for supported iBT hardware variants of this firmware + * loading method. + * + * This check has been put in place to ensure correct forward + * compatibility options when newer hardware variants come + * along. + */ + switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) { + case 0x11: /* JfP */ + case 0x12: /* ThP */ + case 0x13: /* HrP */ + case 0x14: /* CcP */ + /* Some legacy bootloader devices starting from JfP, + * the operational firmware supports both old and TLV based + * HCI_Intel_Read_Version command based on the command + * parameter. + * + * For upgrading firmware case, the TLV based version cannot + * be used because the firmware filename for legacy bootloader + * is based on the old format. + * + * Also, it is not easy to convert TLV based version from the + * legacy version format. + * + * So, as a workaround for those devices, use the legacy + * HCI_Intel_Read_Version to get the version information and + * run the legacy bootloader setup. + */ + err = btintel_read_version(hdev, &ver); + if (err) + break; + + /* Apply the device specific HCI quirks + * + * All Legacy bootloader devices support WBS + */ + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + + /* These variants don't seem to support LE Coded PHY */ + set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks); + + /* Set Valid LE States quirk */ + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + + /* Setup MSFT Extension support */ + btintel_set_msft_opcode(hdev, ver.hw_variant); + + err = btintel_bootloader_setup(hdev, &ver); + btintel_register_devcoredump_support(hdev); + break; + case 0x17: + case 0x18: + case 0x19: + case 0x1b: + case 0x1c: + /* Display version information of TLV type */ + btintel_version_info_tlv(hdev, &ver_tlv); + + /* Apply the device specific HCI quirks for TLV based devices + * + * All TLV based devices support WBS + */ + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + + /* Apply LE States quirk from solar onwards */ + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + + /* Setup MSFT Extension support */ + btintel_set_msft_opcode(hdev, + INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); + btintel_set_dsm_reset_method(hdev, &ver_tlv); + + err = btintel_bootloader_setup_tlv(hdev, &ver_tlv); + btintel_register_devcoredump_support(hdev); + break; + default: + bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", + INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); + err = -EINVAL; + break; + } + +exit_error: + kfree_skb(skb); + + return err; +} + +static int btintel_shutdown_combined(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int ret; + + /* Send HCI Reset to the controller to stop any BT activity which + * were triggered. This will help to save power and maintain the + * sync b/w Host and controller + */ + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "HCI reset during shutdown failed"); + return PTR_ERR(skb); + } + kfree_skb(skb); + + + /* Some platforms have an issue with BT LED when the interface is + * down or BT radio is turned off, which takes 5 seconds to BT LED + * goes off. As a workaround, sends HCI_Intel_SW_RFKILL to put the + * device in the RFKILL ON state which turns off the BT LED immediately. + */ + if (btintel_test_flag(hdev, INTEL_BROKEN_SHUTDOWN_LED)) { + skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + bt_dev_err(hdev, "turning off Intel device LED failed"); + return ret; + } + kfree_skb(skb); + } + + return 0; +} + +int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name) +{ + hdev->manufacturer = 2; + hdev->setup = btintel_setup_combined; + hdev->shutdown = btintel_shutdown_combined; + hdev->hw_error = btintel_hw_error; + hdev->set_diag = btintel_set_diag_combined; + hdev->set_bdaddr = btintel_set_bdaddr; + + coredump_info.driver_name = driver_name; + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_configure_setup); + +static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct intel_tlv *tlv = (void *)&skb->data[5]; + + /* The first event is always an event type TLV */ + if (tlv->type != INTEL_TLV_TYPE_ID) + goto recv_frame; + + switch (tlv->val[0]) { + case INTEL_TLV_SYSTEM_EXCEPTION: + case INTEL_TLV_FATAL_EXCEPTION: + case INTEL_TLV_DEBUG_EXCEPTION: + case INTEL_TLV_TEST_EXCEPTION: + /* Generate devcoredump from exception */ + if (!hci_devcd_init(hdev, skb->len)) { + hci_devcd_append(hdev, skb); + hci_devcd_complete(hdev); + } else { + bt_dev_err(hdev, "Failed to generate devcoredump"); + kfree_skb(skb); + } + return 0; + default: + bt_dev_err(hdev, "Invalid exception type %02X", tlv->val[0]); + } + +recv_frame: + return hci_recv_frame(hdev, skb); +} + +int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_event_hdr *hdr = (void *)skb->data; + const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 }; + + if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff && + hdr->plen > 0) { + const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1; + unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1; + + if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) { + switch (skb->data[2]) { + case 0x02: + /* When switching to the operational firmware + * the device sends a vendor specific event + * indicating that the bootup completed. + */ + btintel_bootup(hdev, ptr, len); + break; + case 0x06: + /* When the firmware loading completes the + * device sends out a vendor specific event + * indicating the result of the firmware + * loading. + */ + btintel_secure_send_result(hdev, ptr, len); + break; + } + } + + /* Handle all diagnostics events separately. May still call + * hci_recv_frame. + */ + if (len >= sizeof(diagnostics_hdr) && + memcmp(&skb->data[2], diagnostics_hdr, + sizeof(diagnostics_hdr)) == 0) { + return btintel_diagnostics(hdev, skb); + } + } + + return hci_recv_frame(hdev, skb); +} +EXPORT_SYMBOL_GPL(btintel_recv_event); + +void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len) +{ + const struct intel_bootup *evt = ptr; + + if (len != sizeof(*evt)) + return; + + if (btintel_test_and_clear_flag(hdev, INTEL_BOOTING)) + btintel_wake_up_flag(hdev, INTEL_BOOTING); +} +EXPORT_SYMBOL_GPL(btintel_bootup); + +void btintel_secure_send_result(struct hci_dev *hdev, + const void *ptr, unsigned int len) +{ + const struct intel_secure_send_result *evt = ptr; + + if (len != sizeof(*evt)) + return; + + if (evt->result) + btintel_set_flag(hdev, INTEL_FIRMWARE_FAILED); + + if (btintel_test_and_clear_flag(hdev, INTEL_DOWNLOADING) && + btintel_test_flag(hdev, INTEL_FIRMWARE_LOADED)) + btintel_wake_up_flag(hdev, INTEL_DOWNLOADING); +} +EXPORT_SYMBOL_GPL(btintel_secure_send_result); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("intel/ibt-11-5.sfi"); +MODULE_FIRMWARE("intel/ibt-11-5.ddc"); +MODULE_FIRMWARE("intel/ibt-12-16.sfi"); +MODULE_FIRMWARE("intel/ibt-12-16.ddc"); diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h new file mode 100644 index 000000000..3a2d5b421 --- /dev/null +++ b/drivers/bluetooth/btintel.h @@ -0,0 +1,325 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * + * Bluetooth support for Intel devices + * + * Copyright (C) 2015 Intel Corporation + */ + +/* List of tlv type */ +enum { + INTEL_TLV_CNVI_TOP = 0x10, + INTEL_TLV_CNVR_TOP, + INTEL_TLV_CNVI_BT, + INTEL_TLV_CNVR_BT, + INTEL_TLV_CNVI_OTP, + INTEL_TLV_CNVR_OTP, + INTEL_TLV_DEV_REV_ID, + INTEL_TLV_USB_VENDOR_ID, + INTEL_TLV_USB_PRODUCT_ID, + INTEL_TLV_PCIE_VENDOR_ID, + INTEL_TLV_PCIE_DEVICE_ID, + INTEL_TLV_PCIE_SUBSYSTEM_ID, + INTEL_TLV_IMAGE_TYPE, + INTEL_TLV_TIME_STAMP, + INTEL_TLV_BUILD_TYPE, + INTEL_TLV_BUILD_NUM, + INTEL_TLV_FW_BUILD_PRODUCT, + INTEL_TLV_FW_BUILD_HW, + INTEL_TLV_FW_STEP, + INTEL_TLV_BT_SPEC, + INTEL_TLV_MFG_NAME, + INTEL_TLV_HCI_REV, + INTEL_TLV_LMP_SUBVER, + INTEL_TLV_OTP_PATCH_VER, + INTEL_TLV_SECURE_BOOT, + INTEL_TLV_KEY_FROM_HDR, + INTEL_TLV_OTP_LOCK, + INTEL_TLV_API_LOCK, + INTEL_TLV_DEBUG_LOCK, + INTEL_TLV_MIN_FW, + INTEL_TLV_LIMITED_CCE, + INTEL_TLV_SBE_TYPE, + INTEL_TLV_OTP_BDADDR, + INTEL_TLV_UNLOCKED_STATE +}; + +struct intel_tlv { + u8 type; + u8 len; + u8 val[]; +} __packed; + +struct intel_version_tlv { + u32 cnvi_top; + u32 cnvr_top; + u32 cnvi_bt; + u32 cnvr_bt; + u16 dev_rev_id; + u8 img_type; + u16 timestamp; + u8 build_type; + u32 build_num; + u8 secure_boot; + u8 otp_lock; + u8 api_lock; + u8 debug_lock; + u8 min_fw_build_nn; + u8 min_fw_build_cw; + u8 min_fw_build_yy; + u8 limited_cce; + u8 sbe_type; + bdaddr_t otp_bd_addr; +}; + +struct intel_version { + u8 status; + u8 hw_platform; + u8 hw_variant; + u8 hw_revision; + u8 fw_variant; + u8 fw_revision; + u8 fw_build_num; + u8 fw_build_ww; + u8 fw_build_yy; + u8 fw_patch_num; +} __packed; + +struct intel_boot_params { + __u8 status; + __u8 otp_format; + __u8 otp_content; + __u8 otp_patch; + __le16 dev_revid; + __u8 secure_boot; + __u8 key_from_hdr; + __u8 key_type; + __u8 otp_lock; + __u8 api_lock; + __u8 debug_lock; + bdaddr_t otp_bdaddr; + __u8 min_fw_build_nn; + __u8 min_fw_build_cw; + __u8 min_fw_build_yy; + __u8 limited_cce; + __u8 unlocked_state; +} __packed; + +struct intel_bootup { + __u8 zero; + __u8 num_cmds; + __u8 source; + __u8 reset_type; + __u8 reset_reason; + __u8 ddc_status; +} __packed; + +struct intel_secure_send_result { + __u8 result; + __le16 opcode; + __u8 status; +} __packed; + +struct intel_reset { + __u8 reset_type; + __u8 patch_enable; + __u8 ddc_reload; + __u8 boot_option; + __le32 boot_param; +} __packed; + +struct intel_debug_features { + __u8 page1[16]; +} __packed; + +struct intel_offload_use_cases { + __u8 status; + __u8 preset[8]; +} __packed; + +#define INTEL_OP_PPAG_CMD 0xFE0B +struct hci_ppag_enable_cmd { + __le32 ppag_enable_flags; +} __packed; + +#define INTEL_TLV_TYPE_ID 0x01 + +#define INTEL_TLV_SYSTEM_EXCEPTION 0x00 +#define INTEL_TLV_FATAL_EXCEPTION 0x01 +#define INTEL_TLV_DEBUG_EXCEPTION 0x02 +#define INTEL_TLV_TEST_EXCEPTION 0xDE + +#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8)) +#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16)) +#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff) +#define INTEL_CNVX_TOP_STEP(cnvx_top) (((cnvx_top) & 0x0f000000) >> 24) +#define INTEL_CNVX_TOP_PACK_SWAB(t, s) __swab16(((__u16)(((t) << 4) | (s)))) + +enum { + INTEL_BOOTLOADER, + INTEL_DOWNLOADING, + INTEL_FIRMWARE_LOADED, + INTEL_FIRMWARE_FAILED, + INTEL_BOOTING, + INTEL_BROKEN_INITIAL_NCMD, + INTEL_BROKEN_SHUTDOWN_LED, + INTEL_ROM_LEGACY, + INTEL_ROM_LEGACY_NO_WBS_SUPPORT, + INTEL_ACPI_RESET_ACTIVE, + + __INTEL_NUM_FLAGS, +}; + +struct btintel_data { + DECLARE_BITMAP(flags, __INTEL_NUM_FLAGS); + int (*acpi_reset_method)(struct hci_dev *hdev); +}; + +#define btintel_set_flag(hdev, nr) \ + do { \ + struct btintel_data *intel = hci_get_priv((hdev)); \ + set_bit((nr), intel->flags); \ + } while (0) + +#define btintel_clear_flag(hdev, nr) \ + do { \ + struct btintel_data *intel = hci_get_priv((hdev)); \ + clear_bit((nr), intel->flags); \ + } while (0) + +#define btintel_wake_up_flag(hdev, nr) \ + do { \ + struct btintel_data *intel = hci_get_priv((hdev)); \ + wake_up_bit(intel->flags, (nr)); \ + } while (0) + +#define btintel_get_flag(hdev) \ + (((struct btintel_data *)hci_get_priv(hdev))->flags) + +#define btintel_test_flag(hdev, nr) test_bit((nr), btintel_get_flag(hdev)) +#define btintel_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), btintel_get_flag(hdev)) +#define btintel_wait_on_flag_timeout(hdev, nr, m, to) \ + wait_on_bit_timeout(btintel_get_flag(hdev), (nr), m, to) + +#if IS_ENABLED(CONFIG_BT_INTEL) + +int btintel_check_bdaddr(struct hci_dev *hdev); +int btintel_enter_mfg(struct hci_dev *hdev); +int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched); +int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); +int btintel_set_diag(struct hci_dev *hdev, bool enable); + +int btintel_version_info(struct hci_dev *hdev, struct intel_version *ver); +int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name); +int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug); +int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver); +struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, + u16 opcode_write); +int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param); +int btintel_read_boot_params(struct hci_dev *hdev, + struct intel_boot_params *params); +int btintel_download_firmware(struct hci_dev *dev, struct intel_version *ver, + const struct firmware *fw, u32 *boot_param); +int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name); +int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb); +void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len); +void btintel_secure_send_result(struct hci_dev *hdev, + const void *ptr, unsigned int len); +int btintel_set_quality_report(struct hci_dev *hdev, bool enable); +#else + +static inline int btintel_check_bdaddr(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_enter_mfg(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_set_diag(struct hci_dev *hdev, bool enable) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_version_info(struct hci_dev *hdev, + struct intel_version *ver) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_load_ddc_config(struct hci_dev *hdev, + const char *ddc_name) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_read_version(struct hci_dev *hdev, + struct intel_version *ver) +{ + return -EOPNOTSUPP; +} + +static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev, + u16 opcode_read, + u16 opcode_write) +{ + return ERR_PTR(-EINVAL); +} + +static inline int btintel_send_intel_reset(struct hci_dev *hdev, + u32 reset_param) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_read_boot_params(struct hci_dev *hdev, + struct intel_boot_params *params) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_download_firmware(struct hci_dev *dev, + const struct firmware *fw, + u32 *boot_param) +{ + return -EOPNOTSUPP; +} + +static inline int btintel_configure_setup(struct hci_dev *hdev, + const char *driver_name) +{ + return -ENODEV; +} + +static inline void btintel_bootup(struct hci_dev *hdev, + const void *ptr, unsigned int len) +{ +} + +static inline void btintel_secure_send_result(struct hci_dev *hdev, + const void *ptr, unsigned int len) +{ +} + +static inline int btintel_set_quality_report(struct hci_dev *hdev, bool enable) +{ + return -ENODEV; +} +#endif diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c new file mode 100644 index 000000000..32329a2e5 --- /dev/null +++ b/drivers/bluetooth/btmrvl_debugfs.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Marvell Bluetooth driver: debugfs related functions + * + * Copyright (C) 2009, Marvell International Ltd. + **/ + +#include +#include + +#include +#include + +#include "btmrvl_drv.h" + +struct btmrvl_debugfs_data { + struct dentry *config_dir; + struct dentry *status_dir; +}; + +static ssize_t btmrvl_hscfgcmd_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + long result, ret; + + ret = kstrtol_from_user(ubuf, count, 10, &result); + if (ret) + return ret; + + priv->btmrvl_dev.hscfgcmd = result; + + if (priv->btmrvl_dev.hscfgcmd) { + btmrvl_prepare_command(priv); + wake_up_interruptible(&priv->main_thread.wait_q); + } + + return count; +} + +static ssize_t btmrvl_hscfgcmd_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", + priv->btmrvl_dev.hscfgcmd); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_hscfgcmd_fops = { + .read = btmrvl_hscfgcmd_read, + .write = btmrvl_hscfgcmd_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + long result, ret; + + ret = kstrtol_from_user(ubuf, count, 10, &result); + if (ret) + return ret; + + priv->btmrvl_dev.pscmd = result; + + if (priv->btmrvl_dev.pscmd) { + btmrvl_prepare_command(priv); + wake_up_interruptible(&priv->main_thread.wait_q); + } + + return count; + +} + +static ssize_t btmrvl_pscmd_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.pscmd); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_pscmd_fops = { + .read = btmrvl_pscmd_read, + .write = btmrvl_pscmd_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + long result, ret; + + ret = kstrtol_from_user(ubuf, count, 10, &result); + if (ret) + return ret; + + priv->btmrvl_dev.hscmd = result; + if (priv->btmrvl_dev.hscmd) { + btmrvl_prepare_command(priv); + wake_up_interruptible(&priv->main_thread.wait_q); + } + + return count; +} + +static ssize_t btmrvl_hscmd_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hscmd); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_hscmd_fops = { + .read = btmrvl_hscmd_read, + .write = btmrvl_hscmd_write, + .open = simple_open, + .llseek = default_llseek, +}; + +void btmrvl_debugfs_init(struct hci_dev *hdev) +{ + struct btmrvl_private *priv = hci_get_drvdata(hdev); + struct btmrvl_debugfs_data *dbg; + + if (!hdev->debugfs) + return; + + dbg = kzalloc(sizeof(*dbg), GFP_KERNEL); + priv->debugfs_data = dbg; + + if (!dbg) { + BT_ERR("Can not allocate memory for btmrvl_debugfs_data."); + return; + } + + dbg->config_dir = debugfs_create_dir("config", hdev->debugfs); + + debugfs_create_u8("psmode", 0644, dbg->config_dir, + &priv->btmrvl_dev.psmode); + debugfs_create_file("pscmd", 0644, dbg->config_dir, + priv, &btmrvl_pscmd_fops); + debugfs_create_x16("gpiogap", 0644, dbg->config_dir, + &priv->btmrvl_dev.gpio_gap); + debugfs_create_u8("hsmode", 0644, dbg->config_dir, + &priv->btmrvl_dev.hsmode); + debugfs_create_file("hscmd", 0644, dbg->config_dir, + priv, &btmrvl_hscmd_fops); + debugfs_create_file("hscfgcmd", 0644, dbg->config_dir, + priv, &btmrvl_hscfgcmd_fops); + + dbg->status_dir = debugfs_create_dir("status", hdev->debugfs); + debugfs_create_u8("curpsmode", 0444, dbg->status_dir, + &priv->adapter->psmode); + debugfs_create_u8("psstate", 0444, dbg->status_dir, + &priv->adapter->ps_state); + debugfs_create_u8("hsstate", 0444, dbg->status_dir, + &priv->adapter->hs_state); + debugfs_create_u8("txdnldready", 0444, dbg->status_dir, + &priv->btmrvl_dev.tx_dnld_rdy); +} + +void btmrvl_debugfs_remove(struct hci_dev *hdev) +{ + struct btmrvl_private *priv = hci_get_drvdata(hdev); + struct btmrvl_debugfs_data *dbg = priv->debugfs_data; + + if (!dbg) + return; + + debugfs_remove_recursive(dbg->config_dir); + debugfs_remove_recursive(dbg->status_dir); + + kfree(dbg); +} diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h new file mode 100644 index 000000000..d7df05c56 --- /dev/null +++ b/drivers/bluetooth/btmrvl_drv.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Marvell Bluetooth driver: global definitions & declarations + * + * Copyright (C) 2009, Marvell International Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BTM_HEADER_LEN 4 +#define BTM_UPLD_SIZE 2312 + +/* Time to wait until Host Sleep state change in millisecond */ +#define WAIT_UNTIL_HS_STATE_CHANGED msecs_to_jiffies(5000) +/* Time to wait for command response in millisecond */ +#define WAIT_UNTIL_CMD_RESP msecs_to_jiffies(5000) + +enum rdwr_status { + RDWR_STATUS_SUCCESS = 0, + RDWR_STATUS_FAILURE = 1, + RDWR_STATUS_DONE = 2 +}; + +#define FW_DUMP_MAX_NAME_LEN 8 +#define FW_DUMP_HOST_READY 0xEE +#define FW_DUMP_DONE 0xFF +#define FW_DUMP_READ_DONE 0xFE + +struct memory_type_mapping { + u8 mem_name[FW_DUMP_MAX_NAME_LEN]; + u8 *mem_ptr; + u32 mem_size; + u8 done_flag; +}; + +struct btmrvl_thread { + struct task_struct *task; + wait_queue_head_t wait_q; + void *priv; +}; + +struct btmrvl_device { + void *card; + struct hci_dev *hcidev; + + u8 dev_type; + + u8 tx_dnld_rdy; + + u8 psmode; + u8 pscmd; + u8 hsmode; + u8 hscmd; + + /* Low byte is gap, high byte is GPIO */ + u16 gpio_gap; + + u8 hscfgcmd; + u8 sendcmdflag; +}; + +struct btmrvl_adapter { + void *hw_regs_buf; + u8 *hw_regs; + u32 int_count; + struct sk_buff_head tx_queue; + u8 psmode; + u8 ps_state; + u8 hs_state; + u8 wakeup_tries; + wait_queue_head_t cmd_wait_q; + wait_queue_head_t event_hs_wait_q; + u8 cmd_complete; + bool is_suspended; + bool is_suspending; +}; + +struct btmrvl_private { + struct btmrvl_device btmrvl_dev; + struct btmrvl_adapter *adapter; + struct btmrvl_thread main_thread; + int (*hw_host_to_card)(struct btmrvl_private *priv, + u8 *payload, u16 nb); + int (*hw_wakeup_firmware)(struct btmrvl_private *priv); + int (*hw_process_int_status)(struct btmrvl_private *priv); + spinlock_t driver_lock; /* spinlock used by driver */ +#ifdef CONFIG_DEBUG_FS + void *debugfs_data; +#endif + bool surprise_removed; +}; + +#define MRVL_VENDOR_PKT 0xFE + +/* Vendor specific Bluetooth commands */ +#define BT_CMD_PSCAN_WIN_REPORT_ENABLE 0xFC03 +#define BT_CMD_ROUTE_SCO_TO_HOST 0xFC1D +#define BT_CMD_SET_BDADDR 0xFC22 +#define BT_CMD_AUTO_SLEEP_MODE 0xFC23 +#define BT_CMD_HOST_SLEEP_CONFIG 0xFC59 +#define BT_CMD_HOST_SLEEP_ENABLE 0xFC5A +#define BT_CMD_MODULE_CFG_REQ 0xFC5B +#define BT_CMD_LOAD_CONFIG_DATA 0xFC61 + +/* Sub-commands: Module Bringup/Shutdown Request/Response */ +#define MODULE_BRINGUP_REQ 0xF1 +#define MODULE_BROUGHT_UP 0x00 +#define MODULE_ALREADY_UP 0x0C + +#define MODULE_SHUTDOWN_REQ 0xF2 + +/* Vendor specific Bluetooth events */ +#define BT_EVENT_AUTO_SLEEP_MODE 0x23 +#define BT_EVENT_HOST_SLEEP_CONFIG 0x59 +#define BT_EVENT_HOST_SLEEP_ENABLE 0x5A +#define BT_EVENT_MODULE_CFG_REQ 0x5B +#define BT_EVENT_POWER_STATE 0x20 + +/* Bluetooth Power States */ +#define BT_PS_ENABLE 0x02 +#define BT_PS_DISABLE 0x03 +#define BT_PS_SLEEP 0x01 + +/* Host Sleep states */ +#define HS_ACTIVATED 0x01 +#define HS_DEACTIVATED 0x00 + +/* Power Save modes */ +#define PS_SLEEP 0x01 +#define PS_AWAKE 0x00 + +#define BT_CAL_HDR_LEN 4 +#define BT_CAL_DATA_SIZE 28 + +struct btmrvl_event { + u8 ec; /* event counter */ + u8 length; + u8 data[4]; +} __packed; + +/* Prototype of global function */ + +int btmrvl_register_hdev(struct btmrvl_private *priv); +struct btmrvl_private *btmrvl_add_card(void *card); +int btmrvl_remove_card(struct btmrvl_private *priv); + +void btmrvl_interrupt(struct btmrvl_private *priv); + +bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb); +int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb); + +int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd); +int btmrvl_pscan_window_reporting(struct btmrvl_private *priv, u8 subcmd); +int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv); +int btmrvl_enable_ps(struct btmrvl_private *priv); +int btmrvl_prepare_command(struct btmrvl_private *priv); +int btmrvl_enable_hs(struct btmrvl_private *priv); + +#ifdef CONFIG_DEBUG_FS +void btmrvl_debugfs_init(struct hci_dev *hdev); +void btmrvl_debugfs_remove(struct hci_dev *hdev); +#endif diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c new file mode 100644 index 000000000..9658b33c8 --- /dev/null +++ b/drivers/bluetooth/btmrvl_main.c @@ -0,0 +1,793 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Marvell Bluetooth driver + * + * Copyright (C) 2009, Marvell International Ltd. + **/ + +#include +#include +#include +#include +#include + +#include "btmrvl_drv.h" +#include "btmrvl_sdio.h" + +#define VERSION "1.0" + +/* + * This function is called by interface specific interrupt handler. + * It updates Power Save & Host Sleep states, and wakes up the main + * thread. + */ +void btmrvl_interrupt(struct btmrvl_private *priv) +{ + priv->adapter->ps_state = PS_AWAKE; + + priv->adapter->wakeup_tries = 0; + + priv->adapter->int_count++; + + if (priv->adapter->hs_state == HS_ACTIVATED) { + BT_DBG("BT: HS DEACTIVATED in ISR!"); + priv->adapter->hs_state = HS_DEACTIVATED; + } + + wake_up_interruptible(&priv->main_thread.wait_q); +} +EXPORT_SYMBOL_GPL(btmrvl_interrupt); + +bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb) +{ + struct hci_event_hdr *hdr = (void *) skb->data; + + if (hdr->evt == HCI_EV_CMD_COMPLETE) { + struct hci_ev_cmd_complete *ec; + u16 opcode; + + ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE); + opcode = __le16_to_cpu(ec->opcode); + + if (priv->btmrvl_dev.sendcmdflag) { + priv->btmrvl_dev.sendcmdflag = false; + priv->adapter->cmd_complete = true; + wake_up_interruptible(&priv->adapter->cmd_wait_q); + + if (hci_opcode_ogf(opcode) == 0x3F) { + BT_DBG("vendor event skipped: opcode=%#4.4x", + opcode); + kfree_skb(skb); + return false; + } + } + } + + return true; +} +EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt); + +int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb) +{ + struct btmrvl_adapter *adapter = priv->adapter; + struct btmrvl_event *event; + int ret = 0; + + event = (struct btmrvl_event *) skb->data; + if (event->ec != 0xff) { + BT_DBG("Not Marvell Event=%x", event->ec); + ret = -EINVAL; + goto exit; + } + + switch (event->data[0]) { + case BT_EVENT_AUTO_SLEEP_MODE: + if (!event->data[2]) { + if (event->data[1] == BT_PS_ENABLE) + adapter->psmode = 1; + else + adapter->psmode = 0; + BT_DBG("PS Mode:%s", + (adapter->psmode) ? "Enable" : "Disable"); + } else { + BT_DBG("PS Mode command failed"); + } + break; + + case BT_EVENT_HOST_SLEEP_CONFIG: + if (!event->data[3]) + BT_DBG("gpio=%x, gap=%x", event->data[1], + event->data[2]); + else + BT_DBG("HSCFG command failed"); + break; + + case BT_EVENT_HOST_SLEEP_ENABLE: + if (!event->data[1]) { + adapter->hs_state = HS_ACTIVATED; + if (adapter->psmode) + adapter->ps_state = PS_SLEEP; + wake_up_interruptible(&adapter->event_hs_wait_q); + BT_DBG("HS ACTIVATED!"); + } else { + BT_DBG("HS Enable failed"); + } + break; + + case BT_EVENT_MODULE_CFG_REQ: + if (priv->btmrvl_dev.sendcmdflag && + event->data[1] == MODULE_BRINGUP_REQ) { + BT_DBG("EVENT:%s", + ((event->data[2] == MODULE_BROUGHT_UP) || + (event->data[2] == MODULE_ALREADY_UP)) ? + "Bring-up succeed" : "Bring-up failed"); + + if (event->length > 3 && event->data[3]) + priv->btmrvl_dev.dev_type = HCI_AMP; + else + priv->btmrvl_dev.dev_type = HCI_PRIMARY; + + BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type); + } else if (priv->btmrvl_dev.sendcmdflag && + event->data[1] == MODULE_SHUTDOWN_REQ) { + BT_DBG("EVENT:%s", (event->data[2]) ? + "Shutdown failed" : "Shutdown succeed"); + } else { + BT_DBG("BT_CMD_MODULE_CFG_REQ resp for APP"); + ret = -EINVAL; + } + break; + + case BT_EVENT_POWER_STATE: + if (event->data[1] == BT_PS_SLEEP) + adapter->ps_state = PS_SLEEP; + BT_DBG("EVENT:%s", + (adapter->ps_state) ? "PS_SLEEP" : "PS_AWAKE"); + break; + + default: + BT_DBG("Unknown Event=%d", event->data[0]); + ret = -EINVAL; + break; + } + +exit: + if (!ret) + kfree_skb(skb); + + return ret; +} +EXPORT_SYMBOL_GPL(btmrvl_process_event); + +static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode, + const void *param, u8 len) +{ + struct sk_buff *skb; + struct hci_command_hdr *hdr; + + if (priv->surprise_removed) { + BT_ERR("Card is removed"); + return -EFAULT; + } + + skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_KERNEL); + if (!skb) { + BT_ERR("No free skb"); + return -ENOMEM; + } + + hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); + hdr->opcode = cpu_to_le16(opcode); + hdr->plen = len; + + if (len) + skb_put_data(skb, param, len); + + hci_skb_pkt_type(skb) = MRVL_VENDOR_PKT; + + skb_queue_head(&priv->adapter->tx_queue, skb); + + priv->btmrvl_dev.sendcmdflag = true; + + priv->adapter->cmd_complete = false; + + wake_up_interruptible(&priv->main_thread.wait_q); + + if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q, + priv->adapter->cmd_complete || + priv->surprise_removed, + WAIT_UNTIL_CMD_RESP)) + return -ETIMEDOUT; + + if (priv->surprise_removed) + return -EFAULT; + + return 0; +} + +int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd) +{ + int ret; + + ret = btmrvl_send_sync_cmd(priv, BT_CMD_MODULE_CFG_REQ, &subcmd, 1); + if (ret) + BT_ERR("module_cfg_cmd(%x) failed", subcmd); + + return ret; +} +EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd); + +static int btmrvl_enable_sco_routing_to_host(struct btmrvl_private *priv) +{ + int ret; + u8 subcmd = 0; + + ret = btmrvl_send_sync_cmd(priv, BT_CMD_ROUTE_SCO_TO_HOST, &subcmd, 1); + if (ret) + BT_ERR("BT_CMD_ROUTE_SCO_TO_HOST command failed: %#x", ret); + + return ret; +} + +int btmrvl_pscan_window_reporting(struct btmrvl_private *priv, u8 subcmd) +{ + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + int ret; + + if (!card->support_pscan_win_report) + return 0; + + ret = btmrvl_send_sync_cmd(priv, BT_CMD_PSCAN_WIN_REPORT_ENABLE, + &subcmd, 1); + if (ret) + BT_ERR("PSCAN_WIN_REPORT_ENABLE command failed: %#x", ret); + + return ret; +} +EXPORT_SYMBOL_GPL(btmrvl_pscan_window_reporting); + +int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv) +{ + int ret; + u8 param[2]; + + param[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8; + param[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff); + + BT_DBG("Sending HSCFG Command, gpio=0x%x, gap=0x%x", + param[0], param[1]); + + ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_CONFIG, param, 2); + if (ret) + BT_ERR("HSCFG command failed"); + + return ret; +} +EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd); + +int btmrvl_enable_ps(struct btmrvl_private *priv) +{ + int ret; + u8 param; + + if (priv->btmrvl_dev.psmode) + param = BT_PS_ENABLE; + else + param = BT_PS_DISABLE; + + ret = btmrvl_send_sync_cmd(priv, BT_CMD_AUTO_SLEEP_MODE, ¶m, 1); + if (ret) + BT_ERR("PSMODE command failed"); + + return 0; +} +EXPORT_SYMBOL_GPL(btmrvl_enable_ps); + +int btmrvl_enable_hs(struct btmrvl_private *priv) +{ + struct btmrvl_adapter *adapter = priv->adapter; + int ret; + + ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0); + if (ret) { + BT_ERR("Host sleep enable command failed"); + return ret; + } + + ret = wait_event_interruptible_timeout(adapter->event_hs_wait_q, + adapter->hs_state || + priv->surprise_removed, + WAIT_UNTIL_HS_STATE_CHANGED); + if (ret < 0 || priv->surprise_removed) { + BT_ERR("event_hs_wait_q terminated (%d): %d,%d,%d", + ret, adapter->hs_state, adapter->ps_state, + adapter->wakeup_tries); + } else if (!ret) { + BT_ERR("hs_enable timeout: %d,%d,%d", adapter->hs_state, + adapter->ps_state, adapter->wakeup_tries); + ret = -ETIMEDOUT; + } else { + BT_DBG("host sleep enabled: %d,%d,%d", adapter->hs_state, + adapter->ps_state, adapter->wakeup_tries); + ret = 0; + } + + return ret; +} +EXPORT_SYMBOL_GPL(btmrvl_enable_hs); + +int btmrvl_prepare_command(struct btmrvl_private *priv) +{ + int ret = 0; + + if (priv->btmrvl_dev.hscfgcmd) { + priv->btmrvl_dev.hscfgcmd = 0; + btmrvl_send_hscfg_cmd(priv); + } + + if (priv->btmrvl_dev.pscmd) { + priv->btmrvl_dev.pscmd = 0; + btmrvl_enable_ps(priv); + } + + if (priv->btmrvl_dev.hscmd) { + priv->btmrvl_dev.hscmd = 0; + + if (priv->btmrvl_dev.hsmode) { + ret = btmrvl_enable_hs(priv); + } else { + ret = priv->hw_wakeup_firmware(priv); + priv->adapter->hs_state = HS_DEACTIVATED; + BT_DBG("BT: HS DEACTIVATED due to host activity!"); + } + } + + return ret; +} + +static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb) +{ + int ret = 0; + + if (!skb || !skb->data) + return -EINVAL; + + if (!skb->len || ((skb->len + BTM_HEADER_LEN) > BTM_UPLD_SIZE)) { + BT_ERR("Tx Error: Bad skb length %d : %d", + skb->len, BTM_UPLD_SIZE); + return -EINVAL; + } + + skb_push(skb, BTM_HEADER_LEN); + + /* header type: byte[3] + * HCI_COMMAND = 1, ACL_DATA = 2, SCO_DATA = 3, 0xFE = Vendor + * header length: byte[2][1][0] + */ + + skb->data[0] = (skb->len & 0x0000ff); + skb->data[1] = (skb->len & 0x00ff00) >> 8; + skb->data[2] = (skb->len & 0xff0000) >> 16; + skb->data[3] = hci_skb_pkt_type(skb); + + if (priv->hw_host_to_card) + ret = priv->hw_host_to_card(priv, skb->data, skb->len); + + return ret; +} + +static void btmrvl_init_adapter(struct btmrvl_private *priv) +{ + int buf_size; + + skb_queue_head_init(&priv->adapter->tx_queue); + + priv->adapter->ps_state = PS_AWAKE; + + buf_size = ALIGN_SZ(SDIO_BLOCK_SIZE, BTSDIO_DMA_ALIGN); + priv->adapter->hw_regs_buf = kzalloc(buf_size, GFP_KERNEL); + if (!priv->adapter->hw_regs_buf) { + priv->adapter->hw_regs = NULL; + BT_ERR("Unable to allocate buffer for hw_regs."); + } else { + priv->adapter->hw_regs = + (u8 *)ALIGN_ADDR(priv->adapter->hw_regs_buf, + BTSDIO_DMA_ALIGN); + BT_DBG("hw_regs_buf=%p hw_regs=%p", + priv->adapter->hw_regs_buf, priv->adapter->hw_regs); + } + + init_waitqueue_head(&priv->adapter->cmd_wait_q); + init_waitqueue_head(&priv->adapter->event_hs_wait_q); +} + +static void btmrvl_free_adapter(struct btmrvl_private *priv) +{ + skb_queue_purge(&priv->adapter->tx_queue); + + kfree(priv->adapter->hw_regs_buf); + kfree(priv->adapter); + + priv->adapter = NULL; +} + +static int btmrvl_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmrvl_private *priv = hci_get_drvdata(hdev); + + BT_DBG("type=%d, len=%d", hci_skb_pkt_type(skb), skb->len); + + if (priv->adapter->is_suspending || priv->adapter->is_suspended) { + BT_ERR("%s: Device is suspending or suspended", __func__); + return -EBUSY; + } + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } + + skb_queue_tail(&priv->adapter->tx_queue, skb); + + if (!priv->adapter->is_suspended) + wake_up_interruptible(&priv->main_thread.wait_q); + + return 0; +} + +static int btmrvl_flush(struct hci_dev *hdev) +{ + struct btmrvl_private *priv = hci_get_drvdata(hdev); + + skb_queue_purge(&priv->adapter->tx_queue); + + return 0; +} + +static int btmrvl_close(struct hci_dev *hdev) +{ + struct btmrvl_private *priv = hci_get_drvdata(hdev); + + skb_queue_purge(&priv->adapter->tx_queue); + + return 0; +} + +static int btmrvl_open(struct hci_dev *hdev) +{ + return 0; +} + +static int btmrvl_download_cal_data(struct btmrvl_private *priv, + u8 *data, int len) +{ + int ret; + + data[0] = 0x00; + data[1] = 0x00; + data[2] = 0x00; + data[3] = len; + + print_hex_dump_bytes("Calibration data: ", + DUMP_PREFIX_OFFSET, data, BT_CAL_HDR_LEN + len); + + ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data, + BT_CAL_HDR_LEN + len); + if (ret) + BT_ERR("Failed to download calibration data"); + + return 0; +} + +static int btmrvl_check_device_tree(struct btmrvl_private *priv) +{ + struct device_node *dt_node; + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + u8 cal_data[BT_CAL_HDR_LEN + BT_CAL_DATA_SIZE]; + int ret = 0; + u16 gpio, gap; + + if (card->plt_of_node) { + dt_node = card->plt_of_node; + ret = of_property_read_u16(dt_node, "marvell,wakeup-pin", + &gpio); + if (ret) + gpio = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8; + + ret = of_property_read_u16(dt_node, "marvell,wakeup-gap-ms", + &gap); + if (ret) + gap = (u8)(priv->btmrvl_dev.gpio_gap & 0x00ff); + + priv->btmrvl_dev.gpio_gap = (gpio << 8) + gap; + + ret = of_property_read_u8_array(dt_node, "marvell,cal-data", + cal_data + BT_CAL_HDR_LEN, + BT_CAL_DATA_SIZE); + if (ret) + return ret; + + BT_DBG("Use cal data from device tree"); + ret = btmrvl_download_cal_data(priv, cal_data, + BT_CAL_DATA_SIZE); + if (ret) + BT_ERR("Fail to download calibrate data"); + } + + return ret; +} + +static int btmrvl_setup(struct hci_dev *hdev) +{ + struct btmrvl_private *priv = hci_get_drvdata(hdev); + int ret; + + ret = btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ); + if (ret) + return ret; + + priv->btmrvl_dev.gpio_gap = 0xfffe; + + btmrvl_check_device_tree(priv); + + btmrvl_enable_sco_routing_to_host(priv); + + btmrvl_pscan_window_reporting(priv, 0x01); + + priv->btmrvl_dev.psmode = 1; + btmrvl_enable_ps(priv); + + btmrvl_send_hscfg_cmd(priv); + + return 0; +} + +static int btmrvl_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + long ret; + u8 buf[8]; + + buf[0] = MRVL_VENDOR_PKT; + buf[1] = sizeof(bdaddr_t); + memcpy(buf + 2, bdaddr, sizeof(bdaddr_t)); + + skb = __hci_cmd_sync(hdev, BT_CMD_SET_BDADDR, sizeof(buf), buf, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + BT_ERR("%s: changing btmrvl device address failed (%ld)", + hdev->name, ret); + return ret; + } + kfree_skb(skb); + + return 0; +} + +static bool btmrvl_wakeup(struct hci_dev *hdev) +{ + struct btmrvl_private *priv = hci_get_drvdata(hdev); + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + + return device_may_wakeup(&card->func->dev); +} + +/* + * This function handles the event generated by firmware, rx data + * received from firmware, and tx data sent from kernel. + */ +static int btmrvl_service_main_thread(void *data) +{ + struct btmrvl_thread *thread = data; + struct btmrvl_private *priv = thread->priv; + struct btmrvl_adapter *adapter = priv->adapter; + wait_queue_entry_t wait; + struct sk_buff *skb; + ulong flags; + + init_waitqueue_entry(&wait, current); + + for (;;) { + add_wait_queue(&thread->wait_q, &wait); + + set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop() || priv->surprise_removed) { + BT_DBG("main_thread: break from main thread"); + break; + } + + if (adapter->wakeup_tries || + ((!adapter->int_count) && + (!priv->btmrvl_dev.tx_dnld_rdy || + skb_queue_empty(&adapter->tx_queue)))) { + BT_DBG("main_thread is sleeping..."); + schedule(); + } + + set_current_state(TASK_RUNNING); + + remove_wait_queue(&thread->wait_q, &wait); + + BT_DBG("main_thread woke up"); + + if (kthread_should_stop() || priv->surprise_removed) { + BT_DBG("main_thread: break from main thread"); + break; + } + + spin_lock_irqsave(&priv->driver_lock, flags); + if (adapter->int_count) { + adapter->int_count = 0; + spin_unlock_irqrestore(&priv->driver_lock, flags); + priv->hw_process_int_status(priv); + } else if (adapter->ps_state == PS_SLEEP && + !skb_queue_empty(&adapter->tx_queue)) { + spin_unlock_irqrestore(&priv->driver_lock, flags); + adapter->wakeup_tries++; + priv->hw_wakeup_firmware(priv); + continue; + } else { + spin_unlock_irqrestore(&priv->driver_lock, flags); + } + + if (adapter->ps_state == PS_SLEEP) + continue; + + if (!priv->btmrvl_dev.tx_dnld_rdy || + priv->adapter->is_suspended) + continue; + + skb = skb_dequeue(&adapter->tx_queue); + if (skb) { + if (btmrvl_tx_pkt(priv, skb)) + priv->btmrvl_dev.hcidev->stat.err_tx++; + else + priv->btmrvl_dev.hcidev->stat.byte_tx += skb->len; + + kfree_skb(skb); + } + } + + return 0; +} + +int btmrvl_register_hdev(struct btmrvl_private *priv) +{ + struct hci_dev *hdev = NULL; + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + int ret; + + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can not allocate HCI device"); + goto err_hdev; + } + + priv->btmrvl_dev.hcidev = hdev; + hci_set_drvdata(hdev, priv); + + hdev->bus = HCI_SDIO; + hdev->open = btmrvl_open; + hdev->close = btmrvl_close; + hdev->flush = btmrvl_flush; + hdev->send = btmrvl_send_frame; + hdev->setup = btmrvl_setup; + hdev->set_bdaddr = btmrvl_set_bdaddr; + hdev->wakeup = btmrvl_wakeup; + SET_HCIDEV_DEV(hdev, &card->func->dev); + + hdev->dev_type = priv->btmrvl_dev.dev_type; + + ret = hci_register_dev(hdev); + if (ret < 0) { + BT_ERR("Can not register HCI device"); + goto err_hci_register_dev; + } + +#ifdef CONFIG_DEBUG_FS + btmrvl_debugfs_init(hdev); +#endif + + return 0; + +err_hci_register_dev: + hci_free_dev(hdev); + +err_hdev: + /* Stop the thread servicing the interrupts */ + kthread_stop(priv->main_thread.task); + + btmrvl_free_adapter(priv); + kfree(priv); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(btmrvl_register_hdev); + +struct btmrvl_private *btmrvl_add_card(void *card) +{ + struct btmrvl_private *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + BT_ERR("Can not allocate priv"); + goto err_priv; + } + + priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL); + if (!priv->adapter) { + BT_ERR("Allocate buffer for btmrvl_adapter failed!"); + goto err_adapter; + } + + btmrvl_init_adapter(priv); + + BT_DBG("Starting kthread..."); + priv->main_thread.priv = priv; + spin_lock_init(&priv->driver_lock); + + init_waitqueue_head(&priv->main_thread.wait_q); + priv->main_thread.task = kthread_run(btmrvl_service_main_thread, + &priv->main_thread, "btmrvl_main_service"); + if (IS_ERR(priv->main_thread.task)) + goto err_thread; + + priv->btmrvl_dev.card = card; + priv->btmrvl_dev.tx_dnld_rdy = true; + + return priv; + +err_thread: + btmrvl_free_adapter(priv); + +err_adapter: + kfree(priv); + +err_priv: + return NULL; +} +EXPORT_SYMBOL_GPL(btmrvl_add_card); + +int btmrvl_remove_card(struct btmrvl_private *priv) +{ + struct hci_dev *hdev; + + hdev = priv->btmrvl_dev.hcidev; + + wake_up_interruptible(&priv->adapter->cmd_wait_q); + wake_up_interruptible(&priv->adapter->event_hs_wait_q); + + kthread_stop(priv->main_thread.task); + +#ifdef CONFIG_DEBUG_FS + btmrvl_debugfs_remove(hdev); +#endif + + hci_unregister_dev(hdev); + + hci_free_dev(hdev); + + priv->btmrvl_dev.hcidev = NULL; + + btmrvl_free_adapter(priv); + + kfree(priv); + + return 0; +} +EXPORT_SYMBOL_GPL(btmrvl_remove_card); + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION("Marvell Bluetooth driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c new file mode 100644 index 000000000..d76c79955 --- /dev/null +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -0,0 +1,1781 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Marvell BT-over-SDIO driver: SDIO interface related functions. + * + * Copyright (C) 2009, Marvell International Ltd. + **/ + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include "btmrvl_drv.h" +#include "btmrvl_sdio.h" + +#define VERSION "1.0" + +static struct memory_type_mapping mem_type_mapping_tbl[] = { + {"ITCM", NULL, 0, 0xF0}, + {"DTCM", NULL, 0, 0xF1}, + {"SQRAM", NULL, 0, 0xF2}, + {"APU", NULL, 0, 0xF3}, + {"CIU", NULL, 0, 0xF4}, + {"ICU", NULL, 0, 0xF5}, + {"MAC", NULL, 0, 0xF6}, + {"EXT7", NULL, 0, 0xF7}, + {"EXT8", NULL, 0, 0xF8}, + {"EXT9", NULL, 0, 0xF9}, + {"EXT10", NULL, 0, 0xFA}, + {"EXT11", NULL, 0, 0xFB}, + {"EXT12", NULL, 0, 0xFC}, + {"EXT13", NULL, 0, 0xFD}, + {"EXTLAST", NULL, 0, 0xFE}, +}; + +static const struct of_device_id btmrvl_sdio_of_match_table[] __maybe_unused = { + { .compatible = "marvell,sd8897-bt" }, + { .compatible = "marvell,sd8997-bt" }, + { } +}; + +static irqreturn_t btmrvl_wake_irq_bt(int irq, void *priv) +{ + struct btmrvl_sdio_card *card = priv; + struct device *dev = &card->func->dev; + struct btmrvl_plt_wake_cfg *cfg = card->plt_wake_cfg; + + dev_info(dev, "wake by bt\n"); + cfg->wake_by_bt = true; + disable_irq_nosync(irq); + + pm_wakeup_event(dev, 0); + pm_system_wakeup(); + + return IRQ_HANDLED; +} + +/* This function parses device tree node using mmc subnode devicetree API. + * The device node is saved in card->plt_of_node. + * If the device tree node exists and includes interrupts attributes, this + * function will request platform specific wakeup interrupt. + */ +static int btmrvl_sdio_probe_of(struct device *dev, + struct btmrvl_sdio_card *card) +{ + struct btmrvl_plt_wake_cfg *cfg; + int ret; + + if (!dev->of_node || + !of_match_node(btmrvl_sdio_of_match_table, dev->of_node)) { + dev_info(dev, "sdio device tree data not available\n"); + return -1; + } + + card->plt_of_node = dev->of_node; + + card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg), + GFP_KERNEL); + cfg = card->plt_wake_cfg; + if (cfg && card->plt_of_node) { + cfg->irq_bt = irq_of_parse_and_map(card->plt_of_node, 0); + if (!cfg->irq_bt) { + dev_err(dev, "fail to parse irq_bt from device tree\n"); + cfg->irq_bt = -1; + } else { + ret = devm_request_irq(dev, cfg->irq_bt, + btmrvl_wake_irq_bt, + 0, "bt_wake", card); + if (ret) { + dev_err(dev, + "Failed to request irq_bt %d (%d)\n", + cfg->irq_bt, ret); + } + + /* Configure wakeup (enabled by default) */ + device_init_wakeup(dev, true); + disable_irq(cfg->irq_bt); + } + } + + return 0; +} + +/* The btmrvl_sdio_remove() callback function is called + * when user removes this module from kernel space or ejects + * the card from the slot. The driver handles these 2 cases + * differently. + * If the user is removing the module, a MODULE_SHUTDOWN_REQ + * command is sent to firmware and interrupt will be disabled. + * If the card is removed, there is no need to send command + * or disable interrupt. + * + * The variable 'user_rmmod' is used to distinguish these two + * scenarios. This flag is initialized as FALSE in case the card + * is removed, and will be set to TRUE for module removal when + * module_exit function is called. + */ +static u8 user_rmmod; +static u8 sdio_ireg; + +static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = { + .cfg = 0x03, + .host_int_mask = 0x04, + .host_intstatus = 0x05, + .card_status = 0x20, + .sq_read_base_addr_a0 = 0x10, + .sq_read_base_addr_a1 = 0x11, + .card_fw_status0 = 0x40, + .card_fw_status1 = 0x41, + .card_rx_len = 0x42, + .card_rx_unit = 0x43, + .io_port_0 = 0x00, + .io_port_1 = 0x01, + .io_port_2 = 0x02, + .int_read_to_clear = false, +}; +static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = { + .cfg = 0x00, + .host_int_mask = 0x02, + .host_intstatus = 0x03, + .card_status = 0x30, + .sq_read_base_addr_a0 = 0x40, + .sq_read_base_addr_a1 = 0x41, + .card_revision = 0x5c, + .card_fw_status0 = 0x60, + .card_fw_status1 = 0x61, + .card_rx_len = 0x62, + .card_rx_unit = 0x63, + .io_port_0 = 0x78, + .io_port_1 = 0x79, + .io_port_2 = 0x7a, + .int_read_to_clear = false, +}; + +static const struct btmrvl_sdio_card_reg btmrvl_reg_8887 = { + .cfg = 0x00, + .host_int_mask = 0x08, + .host_intstatus = 0x0C, + .card_status = 0x5C, + .sq_read_base_addr_a0 = 0x6C, + .sq_read_base_addr_a1 = 0x6D, + .card_revision = 0xC8, + .card_fw_status0 = 0x88, + .card_fw_status1 = 0x89, + .card_rx_len = 0x8A, + .card_rx_unit = 0x8B, + .io_port_0 = 0xE4, + .io_port_1 = 0xE5, + .io_port_2 = 0xE6, + .int_read_to_clear = true, + .host_int_rsr = 0x04, + .card_misc_cfg = 0xD8, +}; + +static const struct btmrvl_sdio_card_reg btmrvl_reg_8897 = { + .cfg = 0x00, + .host_int_mask = 0x02, + .host_intstatus = 0x03, + .card_status = 0x50, + .sq_read_base_addr_a0 = 0x60, + .sq_read_base_addr_a1 = 0x61, + .card_revision = 0xbc, + .card_fw_status0 = 0xc0, + .card_fw_status1 = 0xc1, + .card_rx_len = 0xc2, + .card_rx_unit = 0xc3, + .io_port_0 = 0xd8, + .io_port_1 = 0xd9, + .io_port_2 = 0xda, + .int_read_to_clear = true, + .host_int_rsr = 0x01, + .card_misc_cfg = 0xcc, + .fw_dump_ctrl = 0xe2, + .fw_dump_start = 0xe3, + .fw_dump_end = 0xea, +}; + +static const struct btmrvl_sdio_card_reg btmrvl_reg_89xx = { + .cfg = 0x00, + .host_int_mask = 0x08, + .host_intstatus = 0x0c, + .card_status = 0x5c, + .sq_read_base_addr_a0 = 0xf8, + .sq_read_base_addr_a1 = 0xf9, + .card_revision = 0xc8, + .card_fw_status0 = 0xe8, + .card_fw_status1 = 0xe9, + .card_rx_len = 0xea, + .card_rx_unit = 0xeb, + .io_port_0 = 0xe4, + .io_port_1 = 0xe5, + .io_port_2 = 0xe6, + .int_read_to_clear = true, + .host_int_rsr = 0x04, + .card_misc_cfg = 0xd8, + .fw_dump_ctrl = 0xf0, + .fw_dump_start = 0xf1, + .fw_dump_end = 0xf8, +}; + +static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { + .helper = "mrvl/sd8688_helper.bin", + .firmware = "mrvl/sd8688.bin", + .reg = &btmrvl_reg_8688, + .support_pscan_win_report = false, + .sd_blksz_fw_dl = 64, + .supports_fw_dump = false, +}; + +static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = { + .helper = NULL, + .firmware = "mrvl/sd8787_uapsta.bin", + .reg = &btmrvl_reg_87xx, + .support_pscan_win_report = false, + .sd_blksz_fw_dl = 256, + .supports_fw_dump = false, +}; + +static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = { + .helper = NULL, + .firmware = "mrvl/sd8797_uapsta.bin", + .reg = &btmrvl_reg_87xx, + .support_pscan_win_report = false, + .sd_blksz_fw_dl = 256, + .supports_fw_dump = false, +}; + +static const struct btmrvl_sdio_device btmrvl_sdio_sd8887 = { + .helper = NULL, + .firmware = "mrvl/sd8887_uapsta.bin", + .reg = &btmrvl_reg_8887, + .support_pscan_win_report = true, + .sd_blksz_fw_dl = 256, + .supports_fw_dump = false, +}; + +static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { + .helper = NULL, + .firmware = "mrvl/sd8897_uapsta.bin", + .reg = &btmrvl_reg_8897, + .support_pscan_win_report = true, + .sd_blksz_fw_dl = 256, + .supports_fw_dump = true, +}; + +static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = { + .helper = NULL, + .firmware = "mrvl/sdsd8977_combo_v2.bin", + .reg = &btmrvl_reg_89xx, + .support_pscan_win_report = true, + .sd_blksz_fw_dl = 256, + .supports_fw_dump = true, +}; + +static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = { + .helper = NULL, + .firmware = "mrvl/sd8987_uapsta.bin", + .reg = &btmrvl_reg_89xx, + .support_pscan_win_report = true, + .sd_blksz_fw_dl = 256, + .supports_fw_dump = true, +}; + +static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = { + .helper = NULL, + .firmware = "mrvl/sdsd8997_combo_v4.bin", + .reg = &btmrvl_reg_89xx, + .support_pscan_win_report = true, + .sd_blksz_fw_dl = 256, + .supports_fw_dump = true, +}; + +static const struct sdio_device_id btmrvl_sdio_ids[] = { + /* Marvell SD8688 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8688_BT), + .driver_data = (unsigned long)&btmrvl_sdio_sd8688 }, + /* Marvell SD8787 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787_BT), + .driver_data = (unsigned long)&btmrvl_sdio_sd8787 }, + /* Marvell SD8787 Bluetooth AMP device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787_BT_AMP), + .driver_data = (unsigned long)&btmrvl_sdio_sd8787 }, + /* Marvell SD8797 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_BT), + .driver_data = (unsigned long)&btmrvl_sdio_sd8797 }, + /* Marvell SD8887 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887_BT), + .driver_data = (unsigned long)&btmrvl_sdio_sd8887 }, + /* Marvell SD8897 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897_BT), + .driver_data = (unsigned long)&btmrvl_sdio_sd8897 }, + /* Marvell SD8977 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977_BT), + .driver_data = (unsigned long)&btmrvl_sdio_sd8977 }, + /* Marvell SD8987 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987_BT), + .driver_data = (unsigned long)&btmrvl_sdio_sd8987 }, + /* Marvell SD8997 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997_BT), + .driver_data = (unsigned long)&btmrvl_sdio_sd8997 }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(sdio, btmrvl_sdio_ids); + +static int btmrvl_sdio_get_rx_unit(struct btmrvl_sdio_card *card) +{ + u8 reg; + int ret; + + reg = sdio_readb(card->func, card->reg->card_rx_unit, &ret); + if (!ret) + card->rx_unit = reg; + + return ret; +} + +static int btmrvl_sdio_read_fw_status(struct btmrvl_sdio_card *card, u16 *dat) +{ + u8 fws0, fws1; + int ret; + + *dat = 0; + + fws0 = sdio_readb(card->func, card->reg->card_fw_status0, &ret); + if (ret) + return -EIO; + + fws1 = sdio_readb(card->func, card->reg->card_fw_status1, &ret); + if (ret) + return -EIO; + + *dat = (((u16) fws1) << 8) | fws0; + + return 0; +} + +static int btmrvl_sdio_read_rx_len(struct btmrvl_sdio_card *card, u16 *dat) +{ + u8 reg; + int ret; + + reg = sdio_readb(card->func, card->reg->card_rx_len, &ret); + if (!ret) + *dat = (u16) reg << card->rx_unit; + + return ret; +} + +static int btmrvl_sdio_enable_host_int_mask(struct btmrvl_sdio_card *card, + u8 mask) +{ + int ret; + + sdio_writeb(card->func, mask, card->reg->host_int_mask, &ret); + if (ret) { + BT_ERR("Unable to enable the host interrupt!"); + ret = -EIO; + } + + return ret; +} + +static int btmrvl_sdio_disable_host_int_mask(struct btmrvl_sdio_card *card, + u8 mask) +{ + u8 host_int_mask; + int ret; + + host_int_mask = sdio_readb(card->func, card->reg->host_int_mask, &ret); + if (ret) + return -EIO; + + host_int_mask &= ~mask; + + sdio_writeb(card->func, host_int_mask, card->reg->host_int_mask, &ret); + if (ret < 0) { + BT_ERR("Unable to disable the host interrupt!"); + return -EIO; + } + + return 0; +} + +static int btmrvl_sdio_poll_card_status(struct btmrvl_sdio_card *card, u8 bits) +{ + unsigned int tries; + u8 status; + int ret; + + for (tries = 0; tries < MAX_POLL_TRIES * 1000; tries++) { + status = sdio_readb(card->func, card->reg->card_status, &ret); + if (ret) + goto failed; + if ((status & bits) == bits) + return ret; + + udelay(1); + } + + ret = -ETIMEDOUT; + +failed: + BT_ERR("FAILED! ret=%d", ret); + + return ret; +} + +static int btmrvl_sdio_verify_fw_download(struct btmrvl_sdio_card *card, + int pollnum) +{ + u16 firmwarestat; + int tries, ret; + + /* Wait for firmware to become ready */ + for (tries = 0; tries < pollnum; tries++) { + sdio_claim_host(card->func); + ret = btmrvl_sdio_read_fw_status(card, &firmwarestat); + sdio_release_host(card->func); + if (ret < 0) + continue; + + if (firmwarestat == FIRMWARE_READY) + return 0; + + msleep(100); + } + + return -ETIMEDOUT; +} + +static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card) +{ + const struct firmware *fw_helper = NULL; + const u8 *helper = NULL; + int ret; + void *tmphlprbuf = NULL; + int tmphlprbufsz, hlprblknow, helperlen; + u8 *helperbuf; + u32 tx_len; + + ret = request_firmware(&fw_helper, card->helper, + &card->func->dev); + if ((ret < 0) || !fw_helper) { + BT_ERR("request_firmware(helper) failed, error code = %d", + ret); + ret = -ENOENT; + goto done; + } + + helper = fw_helper->data; + helperlen = fw_helper->size; + + BT_DBG("Downloading helper image (%d bytes), block size %d bytes", + helperlen, SDIO_BLOCK_SIZE); + + tmphlprbufsz = ALIGN_SZ(BTM_UPLD_SIZE, BTSDIO_DMA_ALIGN); + + tmphlprbuf = kzalloc(tmphlprbufsz, GFP_KERNEL); + if (!tmphlprbuf) { + BT_ERR("Unable to allocate buffer for helper." + " Terminating download"); + ret = -ENOMEM; + goto done; + } + + helperbuf = (u8 *) ALIGN_ADDR(tmphlprbuf, BTSDIO_DMA_ALIGN); + + /* Perform helper data transfer */ + tx_len = (FIRMWARE_TRANSFER_NBLOCK * SDIO_BLOCK_SIZE) + - SDIO_HEADER_LEN; + hlprblknow = 0; + + do { + ret = btmrvl_sdio_poll_card_status(card, + CARD_IO_READY | DN_LD_CARD_RDY); + if (ret < 0) { + BT_ERR("Helper download poll status timeout @ %d", + hlprblknow); + goto done; + } + + /* Check if there is more data? */ + if (hlprblknow >= helperlen) + break; + + if (helperlen - hlprblknow < tx_len) + tx_len = helperlen - hlprblknow; + + /* Little-endian */ + helperbuf[0] = ((tx_len & 0x000000ff) >> 0); + helperbuf[1] = ((tx_len & 0x0000ff00) >> 8); + helperbuf[2] = ((tx_len & 0x00ff0000) >> 16); + helperbuf[3] = ((tx_len & 0xff000000) >> 24); + + memcpy(&helperbuf[SDIO_HEADER_LEN], &helper[hlprblknow], + tx_len); + + /* Now send the data */ + ret = sdio_writesb(card->func, card->ioport, helperbuf, + FIRMWARE_TRANSFER_NBLOCK * SDIO_BLOCK_SIZE); + if (ret < 0) { + BT_ERR("IO error during helper download @ %d", + hlprblknow); + goto done; + } + + hlprblknow += tx_len; + } while (true); + + BT_DBG("Transferring helper image EOF block"); + + memset(helperbuf, 0x0, SDIO_BLOCK_SIZE); + + ret = sdio_writesb(card->func, card->ioport, helperbuf, + SDIO_BLOCK_SIZE); + if (ret < 0) { + BT_ERR("IO error in writing helper image EOF block"); + goto done; + } + + ret = 0; + +done: + kfree(tmphlprbuf); + release_firmware(fw_helper); + return ret; +} + +static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card) +{ + const struct firmware *fw_firmware = NULL; + const u8 *firmware = NULL; + int firmwarelen, tmpfwbufsz, ret; + unsigned int tries, offset; + u8 base0, base1; + void *tmpfwbuf = NULL; + u8 *fwbuf; + u16 len, blksz_dl = card->sd_blksz_fw_dl; + int txlen = 0, tx_blocks = 0, count = 0; + + ret = request_firmware(&fw_firmware, card->firmware, + &card->func->dev); + if ((ret < 0) || !fw_firmware) { + BT_ERR("request_firmware(firmware) failed, error code = %d", + ret); + ret = -ENOENT; + goto done; + } + + firmware = fw_firmware->data; + firmwarelen = fw_firmware->size; + + BT_DBG("Downloading FW image (%d bytes)", firmwarelen); + + tmpfwbufsz = ALIGN_SZ(BTM_UPLD_SIZE, BTSDIO_DMA_ALIGN); + tmpfwbuf = kzalloc(tmpfwbufsz, GFP_KERNEL); + if (!tmpfwbuf) { + BT_ERR("Unable to allocate buffer for firmware." + " Terminating download"); + ret = -ENOMEM; + goto done; + } + + /* Ensure aligned firmware buffer */ + fwbuf = (u8 *) ALIGN_ADDR(tmpfwbuf, BTSDIO_DMA_ALIGN); + + /* Perform firmware data transfer */ + offset = 0; + do { + ret = btmrvl_sdio_poll_card_status(card, + CARD_IO_READY | DN_LD_CARD_RDY); + if (ret < 0) { + BT_ERR("FW download with helper poll status" + " timeout @ %d", offset); + goto done; + } + + /* Check if there is more data ? */ + if (offset >= firmwarelen) + break; + + for (tries = 0; tries < MAX_POLL_TRIES; tries++) { + base0 = sdio_readb(card->func, + card->reg->sq_read_base_addr_a0, &ret); + if (ret) { + BT_ERR("BASE0 register read failed:" + " base0 = 0x%04X(%d)." + " Terminating download", + base0, base0); + ret = -EIO; + goto done; + } + base1 = sdio_readb(card->func, + card->reg->sq_read_base_addr_a1, &ret); + if (ret) { + BT_ERR("BASE1 register read failed:" + " base1 = 0x%04X(%d)." + " Terminating download", + base1, base1); + ret = -EIO; + goto done; + } + + len = (((u16) base1) << 8) | base0; + if (len) + break; + + udelay(10); + } + + if (!len) + break; + else if (len > BTM_UPLD_SIZE) { + BT_ERR("FW download failure @%d, invalid length %d", + offset, len); + ret = -EINVAL; + goto done; + } + + txlen = len; + + if (len & BIT(0)) { + count++; + if (count > MAX_WRITE_IOMEM_RETRY) { + BT_ERR("FW download failure @%d, " + "over max retry count", offset); + ret = -EIO; + goto done; + } + BT_ERR("FW CRC error indicated by the helper: " + "len = 0x%04X, txlen = %d", len, txlen); + len &= ~BIT(0); + /* Set txlen to 0 so as to resend from same offset */ + txlen = 0; + } else { + count = 0; + + /* Last block ? */ + if (firmwarelen - offset < txlen) + txlen = firmwarelen - offset; + + tx_blocks = DIV_ROUND_UP(txlen, blksz_dl); + + memcpy(fwbuf, &firmware[offset], txlen); + } + + ret = sdio_writesb(card->func, card->ioport, fwbuf, + tx_blocks * blksz_dl); + + if (ret < 0) { + BT_ERR("FW download, writesb(%d) failed @%d", + count, offset); + sdio_writeb(card->func, HOST_CMD53_FIN, + card->reg->cfg, &ret); + if (ret) + BT_ERR("writeb failed (CFG)"); + } + + offset += txlen; + } while (true); + + BT_INFO("FW download over, size %d bytes", offset); + + ret = 0; + +done: + kfree(tmpfwbuf); + release_firmware(fw_firmware); + return ret; +} + +static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) +{ + u16 buf_len = 0; + int ret, num_blocks, blksz; + struct sk_buff *skb = NULL; + u32 type; + u8 *payload; + struct hci_dev *hdev = priv->btmrvl_dev.hcidev; + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + + if (!card || !card->func) { + BT_ERR("card or function is NULL!"); + ret = -EINVAL; + goto exit; + } + + /* Read the length of data to be transferred */ + ret = btmrvl_sdio_read_rx_len(card, &buf_len); + if (ret < 0) { + BT_ERR("read rx_len failed"); + ret = -EIO; + goto exit; + } + + blksz = SDIO_BLOCK_SIZE; + num_blocks = DIV_ROUND_UP(buf_len, blksz); + + if (buf_len <= SDIO_HEADER_LEN + || (num_blocks * blksz) > ALLOC_BUF_SIZE) { + BT_ERR("invalid packet length: %d", buf_len); + ret = -EINVAL; + goto exit; + } + + /* Allocate buffer */ + skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_KERNEL); + if (!skb) { + BT_ERR("No free skb"); + ret = -ENOMEM; + goto exit; + } + + if ((unsigned long) skb->data & (BTSDIO_DMA_ALIGN - 1)) { + skb_put(skb, (unsigned long) skb->data & + (BTSDIO_DMA_ALIGN - 1)); + skb_pull(skb, (unsigned long) skb->data & + (BTSDIO_DMA_ALIGN - 1)); + } + + payload = skb->data; + + ret = sdio_readsb(card->func, payload, card->ioport, + num_blocks * blksz); + if (ret < 0) { + BT_ERR("readsb failed: %d", ret); + ret = -EIO; + goto exit; + } + + /* This is SDIO specific header length: byte[2][1][0], type: byte[3] + * (HCI_COMMAND = 1, ACL_DATA = 2, SCO_DATA = 3, 0xFE = Vendor) + */ + + buf_len = payload[0]; + buf_len |= payload[1] << 8; + buf_len |= payload[2] << 16; + + if (buf_len > blksz * num_blocks) { + BT_ERR("Skip incorrect packet: hdrlen %d buffer %d", + buf_len, blksz * num_blocks); + ret = -EIO; + goto exit; + } + + type = payload[3]; + + switch (type) { + case HCI_ACLDATA_PKT: + case HCI_SCODATA_PKT: + case HCI_EVENT_PKT: + hci_skb_pkt_type(skb) = type; + skb_put(skb, buf_len); + skb_pull(skb, SDIO_HEADER_LEN); + + if (type == HCI_EVENT_PKT) { + if (btmrvl_check_evtpkt(priv, skb)) + hci_recv_frame(hdev, skb); + } else { + hci_recv_frame(hdev, skb); + } + + hdev->stat.byte_rx += buf_len; + break; + + case MRVL_VENDOR_PKT: + hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; + skb_put(skb, buf_len); + skb_pull(skb, SDIO_HEADER_LEN); + + if (btmrvl_process_event(priv, skb)) + hci_recv_frame(hdev, skb); + + hdev->stat.byte_rx += buf_len; + break; + + default: + BT_ERR("Unknown packet type:%d", type); + BT_ERR("hex: %*ph", blksz * num_blocks, payload); + + kfree_skb(skb); + skb = NULL; + break; + } + +exit: + if (ret) { + hdev->stat.err_rx++; + kfree_skb(skb); + } + + return ret; +} + +static int btmrvl_sdio_process_int_status(struct btmrvl_private *priv) +{ + ulong flags; + u8 ireg; + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + + spin_lock_irqsave(&priv->driver_lock, flags); + ireg = sdio_ireg; + sdio_ireg = 0; + spin_unlock_irqrestore(&priv->driver_lock, flags); + + sdio_claim_host(card->func); + if (ireg & DN_LD_HOST_INT_STATUS) { + if (priv->btmrvl_dev.tx_dnld_rdy) + BT_DBG("tx_done already received: " + " int_status=0x%x", ireg); + else + priv->btmrvl_dev.tx_dnld_rdy = true; + } + + if (ireg & UP_LD_HOST_INT_STATUS) + btmrvl_sdio_card_to_host(priv); + + sdio_release_host(card->func); + + return 0; +} + +static int btmrvl_sdio_read_to_clear(struct btmrvl_sdio_card *card, u8 *ireg) +{ + struct btmrvl_adapter *adapter = card->priv->adapter; + int ret; + + ret = sdio_readsb(card->func, adapter->hw_regs, 0, SDIO_BLOCK_SIZE); + if (ret) { + BT_ERR("sdio_readsb: read int hw_regs failed: %d", ret); + return ret; + } + + *ireg = adapter->hw_regs[card->reg->host_intstatus]; + BT_DBG("hw_regs[%#x]=%#x", card->reg->host_intstatus, *ireg); + + return 0; +} + +static int btmrvl_sdio_write_to_clear(struct btmrvl_sdio_card *card, u8 *ireg) +{ + int ret; + + *ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret); + if (ret) { + BT_ERR("sdio_readb: read int status failed: %d", ret); + return ret; + } + + if (*ireg) { + /* + * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS + * Clear the interrupt status register and re-enable the + * interrupt. + */ + BT_DBG("int_status = 0x%x", *ireg); + + sdio_writeb(card->func, ~(*ireg) & (DN_LD_HOST_INT_STATUS | + UP_LD_HOST_INT_STATUS), + card->reg->host_intstatus, &ret); + if (ret) { + BT_ERR("sdio_writeb: clear int status failed: %d", ret); + return ret; + } + } + + return 0; +} + +static void btmrvl_sdio_interrupt(struct sdio_func *func) +{ + struct btmrvl_private *priv; + struct btmrvl_sdio_card *card; + ulong flags; + u8 ireg = 0; + int ret; + + card = sdio_get_drvdata(func); + if (!card || !card->priv) { + BT_ERR("sbi_interrupt(%p) card or priv is NULL, card=%p", + func, card); + return; + } + + priv = card->priv; + + if (priv->surprise_removed) + return; + + if (card->reg->int_read_to_clear) + ret = btmrvl_sdio_read_to_clear(card, &ireg); + else + ret = btmrvl_sdio_write_to_clear(card, &ireg); + + if (ret) + return; + + spin_lock_irqsave(&priv->driver_lock, flags); + sdio_ireg |= ireg; + spin_unlock_irqrestore(&priv->driver_lock, flags); + + btmrvl_interrupt(priv); +} + +static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card) +{ + struct sdio_func *func; + u8 reg; + int ret; + + if (!card || !card->func) { + BT_ERR("Error: card or function is NULL!"); + ret = -EINVAL; + goto failed; + } + + func = card->func; + + sdio_claim_host(func); + + ret = sdio_enable_func(func); + if (ret) { + BT_ERR("sdio_enable_func() failed: ret=%d", ret); + ret = -EIO; + goto release_host; + } + + ret = sdio_claim_irq(func, btmrvl_sdio_interrupt); + if (ret) { + BT_ERR("sdio_claim_irq failed: ret=%d", ret); + ret = -EIO; + goto disable_func; + } + + ret = sdio_set_block_size(card->func, SDIO_BLOCK_SIZE); + if (ret) { + BT_ERR("cannot set SDIO block size"); + ret = -EIO; + goto release_irq; + } + + reg = sdio_readb(func, card->reg->io_port_0, &ret); + if (ret < 0) { + ret = -EIO; + goto release_irq; + } + + card->ioport = reg; + + reg = sdio_readb(func, card->reg->io_port_1, &ret); + if (ret < 0) { + ret = -EIO; + goto release_irq; + } + + card->ioport |= (reg << 8); + + reg = sdio_readb(func, card->reg->io_port_2, &ret); + if (ret < 0) { + ret = -EIO; + goto release_irq; + } + + card->ioport |= (reg << 16); + + BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport); + + if (card->reg->int_read_to_clear) { + reg = sdio_readb(func, card->reg->host_int_rsr, &ret); + if (ret < 0) { + ret = -EIO; + goto release_irq; + } + sdio_writeb(func, reg | 0x3f, card->reg->host_int_rsr, &ret); + if (ret < 0) { + ret = -EIO; + goto release_irq; + } + + reg = sdio_readb(func, card->reg->card_misc_cfg, &ret); + if (ret < 0) { + ret = -EIO; + goto release_irq; + } + sdio_writeb(func, reg | 0x10, card->reg->card_misc_cfg, &ret); + if (ret < 0) { + ret = -EIO; + goto release_irq; + } + } + + sdio_set_drvdata(func, card); + + sdio_release_host(func); + + return 0; + +release_irq: + sdio_release_irq(func); + +disable_func: + sdio_disable_func(func); + +release_host: + sdio_release_host(func); + +failed: + return ret; +} + +static int btmrvl_sdio_unregister_dev(struct btmrvl_sdio_card *card) +{ + if (card && card->func) { + sdio_claim_host(card->func); + sdio_release_irq(card->func); + sdio_disable_func(card->func); + sdio_release_host(card->func); + sdio_set_drvdata(card->func, NULL); + } + + return 0; +} + +static int btmrvl_sdio_enable_host_int(struct btmrvl_sdio_card *card) +{ + int ret; + + if (!card || !card->func) + return -EINVAL; + + sdio_claim_host(card->func); + + ret = btmrvl_sdio_enable_host_int_mask(card, HIM_ENABLE); + + btmrvl_sdio_get_rx_unit(card); + + sdio_release_host(card->func); + + return ret; +} + +static int btmrvl_sdio_disable_host_int(struct btmrvl_sdio_card *card) +{ + int ret; + + if (!card || !card->func) + return -EINVAL; + + sdio_claim_host(card->func); + + ret = btmrvl_sdio_disable_host_int_mask(card, HIM_DISABLE); + + sdio_release_host(card->func); + + return ret; +} + +static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, + u8 *payload, u16 nb) +{ + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + int ret = 0; + int blksz; + int i = 0; + u8 *buf = NULL; + void *tmpbuf = NULL; + int tmpbufsz; + + if (!card || !card->func) { + BT_ERR("card or function is NULL!"); + return -EINVAL; + } + + blksz = DIV_ROUND_UP(nb, SDIO_BLOCK_SIZE) * SDIO_BLOCK_SIZE; + + buf = payload; + if ((unsigned long) payload & (BTSDIO_DMA_ALIGN - 1) || + nb < blksz) { + tmpbufsz = ALIGN_SZ(blksz, BTSDIO_DMA_ALIGN) + + BTSDIO_DMA_ALIGN; + tmpbuf = kzalloc(tmpbufsz, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + buf = (u8 *) ALIGN_ADDR(tmpbuf, BTSDIO_DMA_ALIGN); + memcpy(buf, payload, nb); + } + + sdio_claim_host(card->func); + + do { + /* Transfer data to card */ + ret = sdio_writesb(card->func, card->ioport, buf, + blksz); + if (ret < 0) { + i++; + BT_ERR("i=%d writesb failed: %d", i, ret); + BT_ERR("hex: %*ph", nb, payload); + ret = -EIO; + if (i > MAX_WRITE_IOMEM_RETRY) + goto exit; + } + } while (ret); + + priv->btmrvl_dev.tx_dnld_rdy = false; + +exit: + sdio_release_host(card->func); + kfree(tmpbuf); + + return ret; +} + +static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) +{ + int ret; + u8 fws0; + int pollnum = MAX_POLL_TRIES; + + if (!card || !card->func) { + BT_ERR("card or function is NULL!"); + return -EINVAL; + } + + if (!btmrvl_sdio_verify_fw_download(card, 1)) { + BT_DBG("Firmware already downloaded!"); + return 0; + } + + sdio_claim_host(card->func); + + /* Check if other function driver is downloading the firmware */ + fws0 = sdio_readb(card->func, card->reg->card_fw_status0, &ret); + if (ret) { + BT_ERR("Failed to read FW downloading status!"); + ret = -EIO; + goto done; + } + if (fws0) { + BT_DBG("BT not the winner (%#x). Skip FW downloading", fws0); + + /* Give other function more time to download the firmware */ + pollnum *= 10; + } else { + if (card->helper) { + ret = btmrvl_sdio_download_helper(card); + if (ret) { + BT_ERR("Failed to download helper!"); + ret = -EIO; + goto done; + } + } + + if (btmrvl_sdio_download_fw_w_helper(card)) { + BT_ERR("Failed to download firmware!"); + ret = -EIO; + goto done; + } + } + + /* + * winner or not, with this test the FW synchronizes when the + * module can continue its initialization + */ + if (btmrvl_sdio_verify_fw_download(card, pollnum)) { + BT_ERR("FW failed to be active in time!"); + ret = -ETIMEDOUT; + goto done; + } + + sdio_release_host(card->func); + + return 0; + +done: + sdio_release_host(card->func); + return ret; +} + +static int btmrvl_sdio_wakeup_fw(struct btmrvl_private *priv) +{ + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + int ret = 0; + + if (!card || !card->func) { + BT_ERR("card or function is NULL!"); + return -EINVAL; + } + + sdio_claim_host(card->func); + + sdio_writeb(card->func, HOST_POWER_UP, card->reg->cfg, &ret); + + sdio_release_host(card->func); + + BT_DBG("wake up firmware"); + + return ret; +} + +static void btmrvl_sdio_dump_regs(struct btmrvl_private *priv) +{ + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + int ret = 0; + unsigned int reg, reg_start, reg_end; + char buf[256], *ptr; + u8 loop, func, data; + int MAX_LOOP = 2; + + btmrvl_sdio_wakeup_fw(priv); + sdio_claim_host(card->func); + + for (loop = 0; loop < MAX_LOOP; loop++) { + memset(buf, 0, sizeof(buf)); + ptr = buf; + + if (loop == 0) { + /* Read the registers of SDIO function0 */ + func = loop; + reg_start = 0; + reg_end = 9; + } else { + func = 2; + reg_start = 0; + reg_end = 0x09; + } + + ptr += sprintf(ptr, "SDIO Func%d (%#x-%#x): ", + func, reg_start, reg_end); + for (reg = reg_start; reg <= reg_end; reg++) { + if (func == 0) + data = sdio_f0_readb(card->func, reg, &ret); + else + data = sdio_readb(card->func, reg, &ret); + + if (!ret) { + ptr += sprintf(ptr, "%02x ", data); + } else { + ptr += sprintf(ptr, "ERR"); + break; + } + } + + BT_INFO("%s", buf); + } + + sdio_release_host(card->func); +} + +/* This function read/write firmware */ +static enum +rdwr_status btmrvl_sdio_rdwr_firmware(struct btmrvl_private *priv, + u8 doneflag) +{ + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + int ret, tries; + u8 ctrl_data = 0; + + sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl, + &ret); + + if (ret) { + BT_ERR("SDIO write err"); + return RDWR_STATUS_FAILURE; + } + + for (tries = 0; tries < MAX_POLL_TRIES; tries++) { + ctrl_data = sdio_readb(card->func, card->reg->fw_dump_ctrl, + &ret); + + if (ret) { + BT_ERR("SDIO read err"); + return RDWR_STATUS_FAILURE; + } + + if (ctrl_data == FW_DUMP_DONE) + break; + if (doneflag && ctrl_data == doneflag) + return RDWR_STATUS_DONE; + if (ctrl_data != FW_DUMP_HOST_READY) { + BT_INFO("The ctrl reg was changed, re-try again!"); + sdio_writeb(card->func, FW_DUMP_HOST_READY, + card->reg->fw_dump_ctrl, &ret); + if (ret) { + BT_ERR("SDIO write err"); + return RDWR_STATUS_FAILURE; + } + } + usleep_range(100, 200); + } + + if (ctrl_data == FW_DUMP_HOST_READY) { + BT_ERR("Fail to pull ctrl_data"); + return RDWR_STATUS_FAILURE; + } + + return RDWR_STATUS_SUCCESS; +} + +/* This function dump sdio register and memory data */ +static void btmrvl_sdio_coredump(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct btmrvl_sdio_card *card; + struct btmrvl_private *priv; + int ret = 0; + unsigned int reg, reg_start, reg_end; + enum rdwr_status stat; + u8 *dbg_ptr, *end_ptr, *fw_dump_data, *fw_dump_ptr; + u8 dump_num = 0, idx, i, read_reg, doneflag = 0; + u32 memory_size, fw_dump_len = 0; + int size = 0; + + card = sdio_get_drvdata(func); + priv = card->priv; + + /* dump sdio register first */ + btmrvl_sdio_dump_regs(priv); + + if (!card->supports_fw_dump) { + BT_ERR("Firmware dump not supported for this card!"); + return; + } + + for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) { + struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; + + if (entry->mem_ptr) { + vfree(entry->mem_ptr); + entry->mem_ptr = NULL; + } + entry->mem_size = 0; + } + + btmrvl_sdio_wakeup_fw(priv); + sdio_claim_host(card->func); + + BT_INFO("== btmrvl firmware dump start =="); + + stat = btmrvl_sdio_rdwr_firmware(priv, doneflag); + if (stat == RDWR_STATUS_FAILURE) + goto done; + + reg = card->reg->fw_dump_start; + /* Read the number of the memories which will dump */ + dump_num = sdio_readb(card->func, reg, &ret); + + if (ret) { + BT_ERR("SDIO read memory length err"); + goto done; + } + + /* Read the length of every memory which will dump */ + for (idx = 0; idx < dump_num; idx++) { + struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; + + stat = btmrvl_sdio_rdwr_firmware(priv, doneflag); + if (stat == RDWR_STATUS_FAILURE) + goto done; + + memory_size = 0; + reg = card->reg->fw_dump_start; + for (i = 0; i < 4; i++) { + read_reg = sdio_readb(card->func, reg, &ret); + if (ret) { + BT_ERR("SDIO read err"); + goto done; + } + memory_size |= (read_reg << i*8); + reg++; + } + + if (memory_size == 0) { + BT_INFO("Firmware dump finished!"); + sdio_writeb(card->func, FW_DUMP_READ_DONE, + card->reg->fw_dump_ctrl, &ret); + if (ret) { + BT_ERR("SDIO Write MEMDUMP_FINISH ERR"); + goto done; + } + break; + } + + BT_INFO("%s_SIZE=0x%x", entry->mem_name, memory_size); + entry->mem_ptr = vzalloc(memory_size + 1); + entry->mem_size = memory_size; + if (!entry->mem_ptr) { + BT_ERR("Vzalloc %s failed", entry->mem_name); + goto done; + } + + fw_dump_len += (strlen("========Start dump ") + + strlen(entry->mem_name) + + strlen("========\n") + + (memory_size + 1) + + strlen("\n========End dump========\n")); + + dbg_ptr = entry->mem_ptr; + end_ptr = dbg_ptr + memory_size; + + doneflag = entry->done_flag; + BT_INFO("Start %s output, please wait...", + entry->mem_name); + + do { + stat = btmrvl_sdio_rdwr_firmware(priv, doneflag); + if (stat == RDWR_STATUS_FAILURE) + goto done; + + reg_start = card->reg->fw_dump_start; + reg_end = card->reg->fw_dump_end; + for (reg = reg_start; reg <= reg_end; reg++) { + *dbg_ptr = sdio_readb(card->func, reg, &ret); + if (ret) { + BT_ERR("SDIO read err"); + goto done; + } + if (dbg_ptr < end_ptr) + dbg_ptr++; + else + BT_ERR("Allocated buffer not enough"); + } + + if (stat == RDWR_STATUS_DONE) { + BT_INFO("%s done: size=0x%tx", + entry->mem_name, + dbg_ptr - entry->mem_ptr); + break; + } + } while (1); + } + + BT_INFO("== btmrvl firmware dump end =="); + +done: + sdio_release_host(card->func); + + if (fw_dump_len == 0) + return; + + fw_dump_data = vzalloc(fw_dump_len + 1); + if (!fw_dump_data) { + BT_ERR("Vzalloc fw_dump_data fail!"); + return; + } + fw_dump_ptr = fw_dump_data; + + /* Dump all the memory data into single file, a userspace script will + * be used to split all the memory data to multiple files + */ + BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump start"); + for (idx = 0; idx < dump_num; idx++) { + struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; + + if (entry->mem_ptr) { + size += scnprintf(fw_dump_ptr + size, + fw_dump_len + 1 - size, + "========Start dump %s========\n", + entry->mem_name); + + memcpy(fw_dump_ptr + size, entry->mem_ptr, + entry->mem_size); + size += entry->mem_size; + + size += scnprintf(fw_dump_ptr + size, + fw_dump_len + 1 - size, + "\n========End dump========\n"); + + vfree(mem_type_mapping_tbl[idx].mem_ptr); + mem_type_mapping_tbl[idx].mem_ptr = NULL; + } + } + + /* fw_dump_data will be free in device coredump release function + * after 5 min + */ + dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL); + BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end"); +} + +static int btmrvl_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int ret = 0; + struct btmrvl_private *priv = NULL; + struct btmrvl_sdio_card *card = NULL; + + BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d", + id->vendor, id->device, id->class, func->num); + + card = devm_kzalloc(&func->dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; + + card->func = func; + + if (id->driver_data) { + struct btmrvl_sdio_device *data = (void *) id->driver_data; + card->helper = data->helper; + card->firmware = data->firmware; + card->reg = data->reg; + card->sd_blksz_fw_dl = data->sd_blksz_fw_dl; + card->support_pscan_win_report = data->support_pscan_win_report; + card->supports_fw_dump = data->supports_fw_dump; + } + + if (btmrvl_sdio_register_dev(card) < 0) { + BT_ERR("Failed to register BT device!"); + return -ENODEV; + } + + /* Disable the interrupts on the card */ + btmrvl_sdio_disable_host_int(card); + + if (btmrvl_sdio_download_fw(card)) { + BT_ERR("Downloading firmware failed!"); + ret = -ENODEV; + goto unreg_dev; + } + + btmrvl_sdio_enable_host_int(card); + + /* Device tree node parsing and platform specific configuration*/ + btmrvl_sdio_probe_of(&func->dev, card); + + priv = btmrvl_add_card(card); + if (!priv) { + BT_ERR("Initializing card failed!"); + ret = -ENODEV; + goto disable_host_int; + } + + card->priv = priv; + + /* Initialize the interface specific function pointers */ + priv->hw_host_to_card = btmrvl_sdio_host_to_card; + priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw; + priv->hw_process_int_status = btmrvl_sdio_process_int_status; + + if (btmrvl_register_hdev(priv)) { + BT_ERR("Register hdev failed!"); + ret = -ENODEV; + goto disable_host_int; + } + + return 0; + +disable_host_int: + btmrvl_sdio_disable_host_int(card); +unreg_dev: + btmrvl_sdio_unregister_dev(card); + return ret; +} + +static void btmrvl_sdio_remove(struct sdio_func *func) +{ + struct btmrvl_sdio_card *card; + + if (func) { + card = sdio_get_drvdata(func); + if (card) { + /* Send SHUTDOWN command & disable interrupt + * if user removes the module. + */ + if (user_rmmod) { + btmrvl_send_module_cfg_cmd(card->priv, + MODULE_SHUTDOWN_REQ); + btmrvl_sdio_disable_host_int(card); + } + + BT_DBG("unregister dev"); + card->priv->surprise_removed = true; + btmrvl_sdio_unregister_dev(card); + btmrvl_remove_card(card->priv); + } + } +} + +static int btmrvl_sdio_suspend(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct btmrvl_sdio_card *card; + struct btmrvl_private *priv; + mmc_pm_flag_t pm_flags; + struct hci_dev *hcidev; + + if (func) { + pm_flags = sdio_get_host_pm_caps(func); + BT_DBG("%s: suspend: PM flags = 0x%x", sdio_func_id(func), + pm_flags); + if (!(pm_flags & MMC_PM_KEEP_POWER)) { + BT_ERR("%s: cannot remain alive while suspended", + sdio_func_id(func)); + return -ENOSYS; + } + card = sdio_get_drvdata(func); + if (!card || !card->priv) { + BT_ERR("card or priv structure is not valid"); + return 0; + } + } else { + BT_ERR("sdio_func is not specified"); + return 0; + } + + /* Enable platform specific wakeup interrupt */ + if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 && + device_may_wakeup(dev)) { + card->plt_wake_cfg->wake_by_bt = false; + enable_irq(card->plt_wake_cfg->irq_bt); + enable_irq_wake(card->plt_wake_cfg->irq_bt); + } + + priv = card->priv; + priv->adapter->is_suspending = true; + hcidev = priv->btmrvl_dev.hcidev; + BT_DBG("%s: SDIO suspend", hcidev->name); + hci_suspend_dev(hcidev); + + if (priv->adapter->hs_state != HS_ACTIVATED) { + if (btmrvl_enable_hs(priv)) { + BT_ERR("HS not activated, suspend failed!"); + /* Disable platform specific wakeup interrupt */ + if (card->plt_wake_cfg && + card->plt_wake_cfg->irq_bt >= 0 && + device_may_wakeup(dev)) { + disable_irq_wake(card->plt_wake_cfg->irq_bt); + disable_irq(card->plt_wake_cfg->irq_bt); + } + + priv->adapter->is_suspending = false; + return -EBUSY; + } + } + + priv->adapter->is_suspending = false; + priv->adapter->is_suspended = true; + + /* We will keep the power when hs enabled successfully */ + if (priv->adapter->hs_state == HS_ACTIVATED) { + BT_DBG("suspend with MMC_PM_KEEP_POWER"); + return sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + } + + BT_DBG("suspend without MMC_PM_KEEP_POWER"); + return 0; +} + +static int btmrvl_sdio_resume(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct btmrvl_sdio_card *card; + struct btmrvl_private *priv; + mmc_pm_flag_t pm_flags; + struct hci_dev *hcidev; + + if (func) { + pm_flags = sdio_get_host_pm_caps(func); + BT_DBG("%s: resume: PM flags = 0x%x", sdio_func_id(func), + pm_flags); + card = sdio_get_drvdata(func); + if (!card || !card->priv) { + BT_ERR("card or priv structure is not valid"); + return 0; + } + } else { + BT_ERR("sdio_func is not specified"); + return 0; + } + priv = card->priv; + + if (!priv->adapter->is_suspended) { + BT_DBG("device already resumed"); + return 0; + } + + priv->hw_wakeup_firmware(priv); + priv->adapter->hs_state = HS_DEACTIVATED; + hcidev = priv->btmrvl_dev.hcidev; + BT_DBG("%s: HS DEACTIVATED in resume!", hcidev->name); + priv->adapter->is_suspended = false; + BT_DBG("%s: SDIO resume", hcidev->name); + hci_resume_dev(hcidev); + + /* Disable platform specific wakeup interrupt */ + if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 && + device_may_wakeup(dev)) { + disable_irq_wake(card->plt_wake_cfg->irq_bt); + disable_irq(card->plt_wake_cfg->irq_bt); + if (card->plt_wake_cfg->wake_by_bt) + /* Undo our disable, since interrupt handler already + * did this. + */ + enable_irq(card->plt_wake_cfg->irq_bt); + } + + return 0; +} + +static const struct dev_pm_ops btmrvl_sdio_pm_ops = { + .suspend = btmrvl_sdio_suspend, + .resume = btmrvl_sdio_resume, +}; + +static struct sdio_driver bt_mrvl_sdio = { + .name = "btmrvl_sdio", + .id_table = btmrvl_sdio_ids, + .probe = btmrvl_sdio_probe, + .remove = btmrvl_sdio_remove, + .drv = { + .owner = THIS_MODULE, + .coredump = btmrvl_sdio_coredump, + .pm = &btmrvl_sdio_pm_ops, + } +}; + +static int __init btmrvl_sdio_init_module(void) +{ + if (sdio_register_driver(&bt_mrvl_sdio) != 0) { + BT_ERR("SDIO Driver Registration Failed"); + return -ENODEV; + } + + /* Clear the flag in case user removes the card. */ + user_rmmod = 0; + + return 0; +} + +static void __exit btmrvl_sdio_exit_module(void) +{ + /* Set the flag as user is removing this module. */ + user_rmmod = 1; + + sdio_unregister_driver(&bt_mrvl_sdio); +} + +module_init(btmrvl_sdio_init_module); +module_exit(btmrvl_sdio_exit_module); + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION("Marvell BT-over-SDIO driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE("mrvl/sd8688_helper.bin"); +MODULE_FIRMWARE("mrvl/sd8688.bin"); +MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin"); +MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sdsd8997_combo_v4.bin"); diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h new file mode 100644 index 000000000..72dd3b7d8 --- /dev/null +++ b/drivers/bluetooth/btmrvl_sdio.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/** + * Marvell BT-over-SDIO driver: SDIO interface related definitions + * + * Copyright (C) 2009, Marvell International Ltd. + **/ + +#define SDIO_HEADER_LEN 4 + +/* SD block size can not bigger than 64 due to buf size limit in firmware */ +/* define SD block size for data Tx/Rx */ +#define SDIO_BLOCK_SIZE 64 + +/* Number of blocks for firmware transfer */ +#define FIRMWARE_TRANSFER_NBLOCK 2 + +/* This is for firmware specific length */ +#define FW_EXTRA_LEN 36 + +#define MRVDRV_SIZE_OF_CMD_BUFFER (2 * 1024) + +#define MRVDRV_BT_RX_PACKET_BUFFER_SIZE \ + (HCI_MAX_FRAME_SIZE + FW_EXTRA_LEN) + +#define ALLOC_BUF_SIZE (((max_t (int, MRVDRV_BT_RX_PACKET_BUFFER_SIZE, \ + MRVDRV_SIZE_OF_CMD_BUFFER) + SDIO_HEADER_LEN \ + + SDIO_BLOCK_SIZE - 1) / SDIO_BLOCK_SIZE) \ + * SDIO_BLOCK_SIZE) + +/* The number of times to try when polling for status */ +#define MAX_POLL_TRIES 100 + +/* Max retry number of CMD53 write */ +#define MAX_WRITE_IOMEM_RETRY 2 + +/* register bitmasks */ +#define HOST_POWER_UP BIT(1) +#define HOST_CMD53_FIN BIT(2) + +#define HIM_DISABLE 0xff +#define HIM_ENABLE (BIT(0) | BIT(1)) + +#define UP_LD_HOST_INT_STATUS BIT(0) +#define DN_LD_HOST_INT_STATUS BIT(1) + +#define DN_LD_CARD_RDY BIT(0) +#define CARD_IO_READY BIT(3) + +#define FIRMWARE_READY 0xfedc + +struct btmrvl_plt_wake_cfg { + int irq_bt; + bool wake_by_bt; +}; + +struct btmrvl_sdio_card_reg { + u8 cfg; + u8 host_int_mask; + u8 host_intstatus; + u8 card_status; + u8 sq_read_base_addr_a0; + u8 sq_read_base_addr_a1; + u8 card_revision; + u8 card_fw_status0; + u8 card_fw_status1; + u8 card_rx_len; + u8 card_rx_unit; + u8 io_port_0; + u8 io_port_1; + u8 io_port_2; + bool int_read_to_clear; + u8 host_int_rsr; + u8 card_misc_cfg; + u8 fw_dump_ctrl; + u8 fw_dump_start; + u8 fw_dump_end; +}; + +struct btmrvl_sdio_card { + struct sdio_func *func; + u32 ioport; + const char *helper; + const char *firmware; + const struct btmrvl_sdio_card_reg *reg; + bool support_pscan_win_report; + bool supports_fw_dump; + u16 sd_blksz_fw_dl; + u8 rx_unit; + struct btmrvl_private *priv; + struct device_node *plt_of_node; + struct btmrvl_plt_wake_cfg *plt_wake_cfg; +}; + +struct btmrvl_sdio_device { + const char *helper; + const char *firmware; + const struct btmrvl_sdio_card_reg *reg; + const bool support_pscan_win_report; + u16 sd_blksz_fw_dl; + bool supports_fw_dump; +}; + + +/* Platform specific DMA alignment */ +#define BTSDIO_DMA_ALIGN 8 + +/* Macros for Data Alignment : size */ +#define ALIGN_SZ(p, a) \ + (((p) + ((a) - 1)) & ~((a) - 1)) + +/* Macros for Data Alignment : address */ +#define ALIGN_ADDR(p, a) \ + ((((unsigned long)(p)) + (((unsigned long)(a)) - 1)) & \ + ~(((unsigned long)(a)) - 1)) diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c new file mode 100644 index 000000000..aaabb7320 --- /dev/null +++ b/drivers/bluetooth/btmtk.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2021 MediaTek Inc. + * + */ +#include +#include + +#include +#include + +#include "btmtk.h" + +#define VERSION "0.1" + +/* It is for mt79xx download rom patch*/ +#define MTK_FW_ROM_PATCH_HEADER_SIZE 32 +#define MTK_FW_ROM_PATCH_GD_SIZE 64 +#define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64 +#define MTK_SEC_MAP_COMMON_SIZE 12 +#define MTK_SEC_MAP_NEED_SEND_SIZE 52 + +struct btmtk_patch_header { + u8 datetime[16]; + u8 platform[4]; + __le16 hwver; + __le16 swver; + __le32 magicnum; +} __packed; + +struct btmtk_global_desc { + __le32 patch_ver; + __le32 sub_sys; + __le32 feature_opt; + __le32 section_num; +} __packed; + +struct btmtk_section_map { + __le32 sectype; + __le32 secoffset; + __le32 secsize; + union { + __le32 u4SecSpec[13]; + struct { + __le32 dlAddr; + __le32 dlsize; + __le32 seckeyidx; + __le32 alignlen; + __le32 sectype; + __le32 dlmodecrctype; + __le32 crc; + __le32 reserved[6]; + } bin_info_spec; + }; +} __packed; + +static void btmtk_coredump(struct hci_dev *hdev) +{ + int err; + + err = __hci_cmd_send(hdev, 0xfd5b, 0, NULL); + if (err < 0) + bt_dev_err(hdev, "Coredump failed (%d)", err); +} + +static void btmtk_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmediatek_data *data = hci_get_priv(hdev); + char buf[80]; + + snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n", + data->dev_id); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Firmware Version: 0x%X\n", + data->cd_info.fw_version); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Driver: %s\n", + data->cd_info.driver_name); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Vendor: MediaTek\n"); + skb_put_data(skb, buf, strlen(buf)); +} + +static void btmtk_coredump_notify(struct hci_dev *hdev, int state) +{ + struct btmediatek_data *data = hci_get_priv(hdev); + + switch (state) { + case HCI_DEVCOREDUMP_IDLE: + data->cd_info.state = HCI_DEVCOREDUMP_IDLE; + break; + case HCI_DEVCOREDUMP_ACTIVE: + data->cd_info.state = HCI_DEVCOREDUMP_ACTIVE; + break; + case HCI_DEVCOREDUMP_TIMEOUT: + case HCI_DEVCOREDUMP_ABORT: + case HCI_DEVCOREDUMP_DONE: + data->cd_info.state = HCI_DEVCOREDUMP_IDLE; + btmtk_reset_sync(hdev); + break; + } +} + +int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname, + wmt_cmd_sync_func_t wmt_cmd_sync) +{ + struct btmtk_hci_wmt_params wmt_params; + struct btmtk_patch_header *hdr; + struct btmtk_global_desc *globaldesc = NULL; + struct btmtk_section_map *sectionmap; + const struct firmware *fw; + const u8 *fw_ptr; + const u8 *fw_bin_ptr; + int err, dlen, i, status; + u8 flag, first_block, retry; + u32 section_num, dl_size, section_offset; + u8 cmd[64]; + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Failed to load firmware file (%d)", err); + return err; + } + + fw_ptr = fw->data; + fw_bin_ptr = fw_ptr; + hdr = (struct btmtk_patch_header *)fw_ptr; + globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE); + section_num = le32_to_cpu(globaldesc->section_num); + + bt_dev_info(hdev, "HW/SW Version: 0x%04x%04x, Build Time: %s", + le16_to_cpu(hdr->hwver), le16_to_cpu(hdr->swver), hdr->datetime); + + for (i = 0; i < section_num; i++) { + first_block = 1; + fw_ptr = fw_bin_ptr; + sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE + + MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i); + + section_offset = le32_to_cpu(sectionmap->secoffset); + dl_size = le32_to_cpu(sectionmap->bin_info_spec.dlsize); + + if (dl_size > 0) { + retry = 20; + while (retry > 0) { + cmd[0] = 0; /* 0 means legacy dl mode. */ + memcpy(cmd + 1, + fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE + + MTK_FW_ROM_PATCH_GD_SIZE + + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i + + MTK_SEC_MAP_COMMON_SIZE, + MTK_SEC_MAP_NEED_SEND_SIZE + 1); + + wmt_params.op = BTMTK_WMT_PATCH_DWNLD; + wmt_params.status = &status; + wmt_params.flag = 0; + wmt_params.dlen = MTK_SEC_MAP_NEED_SEND_SIZE + 1; + wmt_params.data = &cmd; + + err = wmt_cmd_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)", + err); + goto err_release_fw; + } + + if (status == BTMTK_WMT_PATCH_UNDONE) { + break; + } else if (status == BTMTK_WMT_PATCH_PROGRESS) { + msleep(100); + retry--; + } else if (status == BTMTK_WMT_PATCH_DONE) { + goto next_section; + } else { + bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)", + status); + err = -EIO; + goto err_release_fw; + } + } + + fw_ptr += section_offset; + wmt_params.op = BTMTK_WMT_PATCH_DWNLD; + wmt_params.status = NULL; + + while (dl_size > 0) { + dlen = min_t(int, 250, dl_size); + if (first_block == 1) { + flag = 1; + first_block = 0; + } else if (dl_size - dlen <= 0) { + flag = 3; + } else { + flag = 2; + } + + wmt_params.flag = flag; + wmt_params.dlen = dlen; + wmt_params.data = fw_ptr; + + err = wmt_cmd_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)", + err); + goto err_release_fw; + } + + dl_size -= dlen; + fw_ptr += dlen; + } + } +next_section: + continue; + } + /* Wait a few moments for firmware activation done */ + usleep_range(100000, 120000); + +err_release_fw: + release_firmware(fw); + + return err; +} +EXPORT_SYMBOL_GPL(btmtk_setup_firmware_79xx); + +int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname, + wmt_cmd_sync_func_t wmt_cmd_sync) +{ + struct btmtk_hci_wmt_params wmt_params; + const struct firmware *fw; + const u8 *fw_ptr; + size_t fw_size; + int err, dlen; + u8 flag, param; + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Failed to load firmware file (%d)", err); + return err; + } + + /* Power on data RAM the firmware relies on. */ + param = 1; + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 3; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = wmt_cmd_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err); + goto err_release_fw; + } + + fw_ptr = fw->data; + fw_size = fw->size; + + /* The size of patch header is 30 bytes, should be skip */ + if (fw_size < 30) { + err = -EINVAL; + goto err_release_fw; + } + + fw_size -= 30; + fw_ptr += 30; + flag = 1; + + wmt_params.op = BTMTK_WMT_PATCH_DWNLD; + wmt_params.status = NULL; + + while (fw_size > 0) { + dlen = min_t(int, 250, fw_size); + + /* Tell device the position in sequence */ + if (fw_size - dlen <= 0) + flag = 3; + else if (fw_size < fw->size - 30) + flag = 2; + + wmt_params.flag = flag; + wmt_params.dlen = dlen; + wmt_params.data = fw_ptr; + + err = wmt_cmd_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)", + err); + goto err_release_fw; + } + + fw_size -= dlen; + fw_ptr += dlen; + } + + wmt_params.op = BTMTK_WMT_RST; + wmt_params.flag = 4; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = NULL; + + /* Activate funciton the firmware providing to */ + err = wmt_cmd_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt rst (%d)", err); + goto err_release_fw; + } + + /* Wait a few moments for firmware activation done */ + usleep_range(10000, 12000); + +err_release_fw: + release_firmware(fw); + + return err; +} +EXPORT_SYMBOL_GPL(btmtk_setup_firmware); + +int btmtk_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + long ret; + + skb = __hci_cmd_sync(hdev, 0xfc1a, 6, bdaddr, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + bt_dev_err(hdev, "changing Mediatek device address failed (%ld)", + ret); + return ret; + } + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btmtk_set_bdaddr); + +void btmtk_reset_sync(struct hci_dev *hdev) +{ + struct btmediatek_data *reset_work = hci_get_priv(hdev); + int err; + + hci_dev_lock(hdev); + + err = hci_cmd_sync_queue(hdev, reset_work->reset_sync, NULL, NULL); + if (err) + bt_dev_err(hdev, "failed to reset (%d)", err); + + hci_dev_unlock(hdev); +} +EXPORT_SYMBOL_GPL(btmtk_reset_sync); + +int btmtk_register_coredump(struct hci_dev *hdev, const char *name, + u32 fw_version) +{ + struct btmediatek_data *data = hci_get_priv(hdev); + + if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) + return -EOPNOTSUPP; + + data->cd_info.fw_version = fw_version; + data->cd_info.state = HCI_DEVCOREDUMP_IDLE; + data->cd_info.driver_name = name; + + return hci_devcd_register(hdev, btmtk_coredump, btmtk_coredump_hdr, + btmtk_coredump_notify); +} +EXPORT_SYMBOL_GPL(btmtk_register_coredump); + +int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmediatek_data *data = hci_get_priv(hdev); + int err; + + if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) + return 0; + + switch (data->cd_info.state) { + case HCI_DEVCOREDUMP_IDLE: + err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE); + if (err < 0) + break; + data->cd_info.cnt = 0; + + /* It is supposed coredump can be done within 5 seconds */ + schedule_delayed_work(&hdev->dump.dump_timeout, + msecs_to_jiffies(5000)); + fallthrough; + case HCI_DEVCOREDUMP_ACTIVE: + default: + err = hci_devcd_append(hdev, skb); + if (err < 0) + break; + data->cd_info.cnt++; + + /* Mediatek coredump data would be more than MTK_COREDUMP_NUM */ + if (data->cd_info.cnt > MTK_COREDUMP_NUM && + skb->len > MTK_COREDUMP_END_LEN) + if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN], + MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) { + bt_dev_info(hdev, "Mediatek coredump end"); + hci_devcd_complete(hdev); + } + + break; + } + + if (err < 0) + kfree_skb(skb); + + return err; +} +EXPORT_SYMBOL_GPL(btmtk_process_coredump); + +MODULE_AUTHOR("Sean Wang "); +MODULE_AUTHOR("Mark Chen "); +MODULE_DESCRIPTION("Bluetooth support for MediaTek devices ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_MT7622); +MODULE_FIRMWARE(FIRMWARE_MT7663); +MODULE_FIRMWARE(FIRMWARE_MT7668); +MODULE_FIRMWARE(FIRMWARE_MT7961); +MODULE_FIRMWARE(FIRMWARE_MT7925); diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h new file mode 100644 index 000000000..56f5502ba --- /dev/null +++ b/drivers/bluetooth/btmtk.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2021 MediaTek Inc. */ + +#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin" +#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin" +#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin" +#define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin" +#define FIRMWARE_MT7925 "mediatek/mt7925/BT_RAM_CODE_MT7925_1_1_hdr.bin" + +#define HCI_EV_WMT 0xe4 +#define HCI_WMT_MAX_EVENT_SIZE 64 + +#define BTMTK_WMT_REG_WRITE 0x1 +#define BTMTK_WMT_REG_READ 0x2 + +#define MT7921_BTSYS_RST 0x70002610 +#define MT7921_BTSYS_RST_WITH_GPIO BIT(7) + +#define MT7921_PINMUX_0 0x70005050 +#define MT7921_PINMUX_1 0x70005054 + +#define MT7921_DLSTATUS 0x7c053c10 +#define BT_DL_STATE BIT(1) + +#define MTK_COREDUMP_SIZE (1024 * 1000) +#define MTK_COREDUMP_END "coredump end" +#define MTK_COREDUMP_END_LEN (sizeof(MTK_COREDUMP_END)) +#define MTK_COREDUMP_NUM 255 + +enum { + BTMTK_WMT_PATCH_DWNLD = 0x1, + BTMTK_WMT_TEST = 0x2, + BTMTK_WMT_WAKEUP = 0x3, + BTMTK_WMT_HIF = 0x4, + BTMTK_WMT_FUNC_CTRL = 0x6, + BTMTK_WMT_RST = 0x7, + BTMTK_WMT_REGISTER = 0x8, + BTMTK_WMT_SEMAPHORE = 0x17, +}; + +enum { + BTMTK_WMT_INVALID, + BTMTK_WMT_PATCH_UNDONE, + BTMTK_WMT_PATCH_PROGRESS, + BTMTK_WMT_PATCH_DONE, + BTMTK_WMT_ON_UNDONE, + BTMTK_WMT_ON_DONE, + BTMTK_WMT_ON_PROGRESS, +}; + +struct btmtk_wmt_hdr { + u8 dir; + u8 op; + __le16 dlen; + u8 flag; +} __packed; + +struct btmtk_hci_wmt_cmd { + struct btmtk_wmt_hdr hdr; + u8 data[]; +} __packed; + +struct btmtk_hci_wmt_evt { + struct hci_event_hdr hhdr; + struct btmtk_wmt_hdr whdr; +} __packed; + +struct btmtk_hci_wmt_evt_funcc { + struct btmtk_hci_wmt_evt hwhdr; + __be16 status; +} __packed; + +struct btmtk_hci_wmt_evt_reg { + struct btmtk_hci_wmt_evt hwhdr; + u8 rsv[2]; + u8 num; + __le32 addr; + __le32 val; +} __packed; + +struct btmtk_tci_sleep { + u8 mode; + __le16 duration; + __le16 host_duration; + u8 host_wakeup_pin; + u8 time_compensation; +} __packed; + +struct btmtk_wakeon { + u8 mode; + u8 gpo; + u8 active_high; + __le16 enable_delay; + __le16 wakeup_delay; +} __packed; + +struct btmtk_sco { + u8 clock_config; + u8 transmit_format_config; + u8 channel_format_config; + u8 channel_select_config; +} __packed; + +struct reg_read_cmd { + u8 type; + u8 rsv; + u8 num; + __le32 addr; +} __packed; + +struct reg_write_cmd { + u8 type; + u8 rsv; + u8 num; + __le32 addr; + __le32 data; + __le32 mask; +} __packed; + +struct btmtk_hci_wmt_params { + u8 op; + u8 flag; + u16 dlen; + const void *data; + u32 *status; +}; + +typedef int (*btmtk_reset_sync_func_t)(struct hci_dev *, void *); + +struct btmtk_coredump_info { + const char *driver_name; + u32 fw_version; + u16 cnt; + int state; +}; + +struct btmediatek_data { + u32 dev_id; + btmtk_reset_sync_func_t reset_sync; + struct btmtk_coredump_info cd_info; +}; + +typedef int (*wmt_cmd_sync_func_t)(struct hci_dev *, + struct btmtk_hci_wmt_params *); + +#if IS_ENABLED(CONFIG_BT_MTK) + +int btmtk_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); + +int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname, + wmt_cmd_sync_func_t wmt_cmd_sync); + +int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname, + wmt_cmd_sync_func_t wmt_cmd_sync); + +void btmtk_reset_sync(struct hci_dev *hdev); + +int btmtk_register_coredump(struct hci_dev *hdev, const char *name, + u32 fw_version); + +int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb); +#else + +static inline int btmtk_set_bdaddr(struct hci_dev *hdev, + const bdaddr_t *bdaddr) +{ + return -EOPNOTSUPP; +} + +static int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname, + wmt_cmd_sync_func_t wmt_cmd_sync) +{ + return -EOPNOTSUPP; +} + +static int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname, + wmt_cmd_sync_func_t wmt_cmd_sync) +{ + return -EOPNOTSUPP; +} + +static void btmtk_reset_sync(struct hci_dev *hdev) +{ +} + +static int btmtk_register_coredump(struct hci_dev *hdev, const char *name, + u32 fw_version) +{ + return -EOPNOTSUPP; +} + +static int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb) +{ + return -EOPNOTSUPP; +} +#endif diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c new file mode 100644 index 000000000..f9a344475 --- /dev/null +++ b/drivers/bluetooth/btmtksdio.c @@ -0,0 +1,1503 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 MediaTek Inc. + +/* + * Bluetooth support for MediaTek SDIO devices + * + * This file is written based on btsdio.c and btmtkuart.c. + * + * Author: Sean Wang + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "h4_recv.h" +#include "btmtk.h" + +#define VERSION "0.1" + +#define MTKBTSDIO_AUTOSUSPEND_DELAY 1000 + +static bool enable_autosuspend = true; + +struct btmtksdio_data { + const char *fwname; + u16 chipid; + bool lp_mbox_supported; +}; + +static const struct btmtksdio_data mt7663_data = { + .fwname = FIRMWARE_MT7663, + .chipid = 0x7663, + .lp_mbox_supported = false, +}; + +static const struct btmtksdio_data mt7668_data = { + .fwname = FIRMWARE_MT7668, + .chipid = 0x7668, + .lp_mbox_supported = false, +}; + +static const struct btmtksdio_data mt7921_data = { + .fwname = FIRMWARE_MT7961, + .chipid = 0x7921, + .lp_mbox_supported = true, +}; + +static const struct sdio_device_id btmtksdio_table[] = { + {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7663), + .driver_data = (kernel_ulong_t)&mt7663_data }, + {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7668), + .driver_data = (kernel_ulong_t)&mt7668_data }, + {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7961), + .driver_data = (kernel_ulong_t)&mt7921_data }, + { } /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(sdio, btmtksdio_table); + +#define MTK_REG_CHLPCR 0x4 /* W1S */ +#define C_INT_EN_SET BIT(0) +#define C_INT_EN_CLR BIT(1) +#define C_FW_OWN_REQ_SET BIT(8) /* For write */ +#define C_COM_DRV_OWN BIT(8) /* For read */ +#define C_FW_OWN_REQ_CLR BIT(9) + +#define MTK_REG_CSDIOCSR 0x8 +#define SDIO_RE_INIT_EN BIT(0) +#define SDIO_INT_CTL BIT(2) + +#define MTK_REG_CHCR 0xc +#define C_INT_CLR_CTRL BIT(1) +#define BT_RST_DONE BIT(8) + +/* CHISR have the same bits field definition with CHIER */ +#define MTK_REG_CHISR 0x10 +#define MTK_REG_CHIER 0x14 +#define FW_OWN_BACK_INT BIT(0) +#define RX_DONE_INT BIT(1) +#define TX_EMPTY BIT(2) +#define TX_FIFO_OVERFLOW BIT(8) +#define FW_MAILBOX_INT BIT(15) +#define INT_MASK GENMASK(15, 0) +#define RX_PKT_LEN GENMASK(31, 16) + +#define MTK_REG_CSICR 0xc0 +#define CSICR_CLR_MBOX_ACK BIT(0) +#define MTK_REG_PH2DSM0R 0xc4 +#define PH2DSM0R_DRIVER_OWN BIT(0) +#define MTK_REG_PD2HRM0R 0xdc +#define PD2HRM0R_DRV_OWN BIT(0) + +#define MTK_REG_CTDR 0x18 + +#define MTK_REG_CRDR 0x1c + +#define MTK_REG_CRPLR 0x24 + +#define MTK_SDIO_BLOCK_SIZE 256 + +#define BTMTKSDIO_TX_WAIT_VND_EVT 1 +#define BTMTKSDIO_HW_TX_READY 2 +#define BTMTKSDIO_FUNC_ENABLED 3 +#define BTMTKSDIO_PATCH_ENABLED 4 +#define BTMTKSDIO_HW_RESET_ACTIVE 5 + +struct mtkbtsdio_hdr { + __le16 len; + __le16 reserved; + u8 bt_type; +} __packed; + +struct btmtksdio_dev { + struct hci_dev *hdev; + struct sdio_func *func; + struct device *dev; + + struct work_struct txrx_work; + unsigned long tx_state; + struct sk_buff_head txq; + + struct sk_buff *evt_skb; + + const struct btmtksdio_data *data; + + struct gpio_desc *reset; +}; + +static int mtk_hci_wmt_sync(struct hci_dev *hdev, + struct btmtk_hci_wmt_params *wmt_params) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; + struct btmtk_hci_wmt_evt_reg *wmt_evt_reg; + u32 hlen, status = BTMTK_WMT_INVALID; + struct btmtk_hci_wmt_evt *wmt_evt; + struct btmtk_hci_wmt_cmd *wc; + struct btmtk_wmt_hdr *hdr; + int err; + + /* Send the WMT command and wait until the WMT event returns */ + hlen = sizeof(*hdr) + wmt_params->dlen; + if (hlen > 255) + return -EINVAL; + + wc = kzalloc(hlen, GFP_KERNEL); + if (!wc) + return -ENOMEM; + + hdr = &wc->hdr; + hdr->dir = 1; + hdr->op = wmt_params->op; + hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); + hdr->flag = wmt_params->flag; + memcpy(wc->data, wmt_params->data, wmt_params->dlen); + + set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); + + err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); + if (err < 0) { + clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); + goto err_free_wc; + } + + /* The vendor specific WMT commands are all answered by a vendor + * specific event and will not have the Command Status or Command + * Complete as with usual HCI command flow control. + * + * After sending the command, wait for BTMTKSDIO_TX_WAIT_VND_EVT + * state to be cleared. The driver specific event receive routine + * will clear that state and with that indicate completion of the + * WMT command. + */ + err = wait_on_bit_timeout(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT, + TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); + if (err == -EINTR) { + bt_dev_err(hdev, "Execution of wmt command interrupted"); + clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); + goto err_free_wc; + } + + if (err) { + bt_dev_err(hdev, "Execution of wmt command timed out"); + clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); + err = -ETIMEDOUT; + goto err_free_wc; + } + + /* Parse and handle the return WMT event */ + wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data; + if (wmt_evt->whdr.op != hdr->op) { + bt_dev_err(hdev, "Wrong op received %d expected %d", + wmt_evt->whdr.op, hdr->op); + err = -EIO; + goto err_free_skb; + } + + switch (wmt_evt->whdr.op) { + case BTMTK_WMT_SEMAPHORE: + if (wmt_evt->whdr.flag == 2) + status = BTMTK_WMT_PATCH_UNDONE; + else + status = BTMTK_WMT_PATCH_DONE; + break; + case BTMTK_WMT_FUNC_CTRL: + wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; + if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) + status = BTMTK_WMT_ON_DONE; + else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) + status = BTMTK_WMT_ON_PROGRESS; + else + status = BTMTK_WMT_ON_UNDONE; + break; + case BTMTK_WMT_PATCH_DWNLD: + if (wmt_evt->whdr.flag == 2) + status = BTMTK_WMT_PATCH_DONE; + else if (wmt_evt->whdr.flag == 1) + status = BTMTK_WMT_PATCH_PROGRESS; + else + status = BTMTK_WMT_PATCH_UNDONE; + break; + case BTMTK_WMT_REGISTER: + wmt_evt_reg = (struct btmtk_hci_wmt_evt_reg *)wmt_evt; + if (le16_to_cpu(wmt_evt->whdr.dlen) == 12) + status = le32_to_cpu(wmt_evt_reg->val); + break; + } + + if (wmt_params->status) + *wmt_params->status = status; + +err_free_skb: + kfree_skb(bdev->evt_skb); + bdev->evt_skb = NULL; +err_free_wc: + kfree(wc); + + return err; +} + +static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev, + struct sk_buff *skb) +{ + struct mtkbtsdio_hdr *sdio_hdr; + int err; + + /* Make sure that there are enough rooms for SDIO header */ + if (unlikely(skb_headroom(skb) < sizeof(*sdio_hdr))) { + err = pskb_expand_head(skb, sizeof(*sdio_hdr), 0, + GFP_ATOMIC); + if (err < 0) + return err; + } + + /* Prepend MediaTek SDIO Specific Header */ + skb_push(skb, sizeof(*sdio_hdr)); + + sdio_hdr = (void *)skb->data; + sdio_hdr->len = cpu_to_le16(skb->len); + sdio_hdr->reserved = cpu_to_le16(0); + sdio_hdr->bt_type = hci_skb_pkt_type(skb); + + clear_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state); + err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data, + round_up(skb->len, MTK_SDIO_BLOCK_SIZE)); + if (err < 0) + goto err_skb_pull; + + bdev->hdev->stat.byte_tx += skb->len; + + kfree_skb(skb); + + return 0; + +err_skb_pull: + skb_pull(skb, sizeof(*sdio_hdr)); + + return err; +} + +static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev) +{ + return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL); +} + +static u32 btmtksdio_drv_own_query_79xx(struct btmtksdio_dev *bdev) +{ + return sdio_readl(bdev->func, MTK_REG_PD2HRM0R, NULL); +} + +static u32 btmtksdio_chcr_query(struct btmtksdio_dev *bdev) +{ + return sdio_readl(bdev->func, MTK_REG_CHCR, NULL); +} + +static int btmtksdio_fw_pmctrl(struct btmtksdio_dev *bdev) +{ + u32 status; + int err; + + sdio_claim_host(bdev->func); + + if (bdev->data->lp_mbox_supported && + test_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state)) { + sdio_writel(bdev->func, CSICR_CLR_MBOX_ACK, MTK_REG_CSICR, + &err); + err = readx_poll_timeout(btmtksdio_drv_own_query_79xx, bdev, + status, !(status & PD2HRM0R_DRV_OWN), + 2000, 1000000); + if (err < 0) { + bt_dev_err(bdev->hdev, "mailbox ACK not cleared"); + goto out; + } + } + + /* Return ownership to the device */ + sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err); + if (err < 0) + goto out; + + err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status, + !(status & C_COM_DRV_OWN), 2000, 1000000); + +out: + sdio_release_host(bdev->func); + + if (err < 0) + bt_dev_err(bdev->hdev, "Cannot return ownership to device"); + + return err; +} + +static int btmtksdio_drv_pmctrl(struct btmtksdio_dev *bdev) +{ + u32 status; + int err; + + sdio_claim_host(bdev->func); + + /* Get ownership from the device */ + sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err); + if (err < 0) + goto out; + + err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status, + status & C_COM_DRV_OWN, 2000, 1000000); + + if (!err && bdev->data->lp_mbox_supported && + test_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state)) + err = readx_poll_timeout(btmtksdio_drv_own_query_79xx, bdev, + status, status & PD2HRM0R_DRV_OWN, + 2000, 1000000); + +out: + sdio_release_host(bdev->func); + + if (err < 0) + bt_dev_err(bdev->hdev, "Cannot get ownership from device"); + + return err; +} + +static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + struct hci_event_hdr *hdr = (void *)skb->data; + u8 evt = hdr->evt; + int err; + + /* When someone waits for the WMT event, the skb is being cloned + * and being processed the events from there then. + */ + if (test_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) { + bdev->evt_skb = skb_clone(skb, GFP_KERNEL); + if (!bdev->evt_skb) { + err = -ENOMEM; + goto err_out; + } + } + + err = hci_recv_frame(hdev, skb); + if (err < 0) + goto err_free_skb; + + if (evt == HCI_EV_WMT) { + if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, + &bdev->tx_state)) { + /* Barrier to sync with other CPUs */ + smp_mb__after_atomic(); + wake_up_bit(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT); + } + } + + return 0; + +err_free_skb: + kfree_skb(bdev->evt_skb); + bdev->evt_skb = NULL; + +err_out: + return err; +} + +static int btmtksdio_recv_acl(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle); + + switch (handle) { + case 0xfc6f: + /* Firmware dump from device: when the firmware hangs, the + * device can no longer suspend and thus disable auto-suspend. + */ + pm_runtime_forbid(bdev->dev); + fallthrough; + case 0x05ff: + case 0x05fe: + /* Firmware debug logging */ + return hci_recv_diag(hdev, skb); + } + + return hci_recv_frame(hdev, skb); +} + +static const struct h4_recv_pkt mtk_recv_pkts[] = { + { H4_RECV_ACL, .recv = btmtksdio_recv_acl }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = btmtksdio_recv_event }, +}; + +static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size) +{ + const struct h4_recv_pkt *pkts = mtk_recv_pkts; + int pkts_count = ARRAY_SIZE(mtk_recv_pkts); + struct mtkbtsdio_hdr *sdio_hdr; + int err, i, pad_size; + struct sk_buff *skb; + u16 dlen; + + if (rx_size < sizeof(*sdio_hdr)) + return -EILSEQ; + + /* A SDIO packet is exactly containing a Bluetooth packet */ + skb = bt_skb_alloc(rx_size, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_put(skb, rx_size); + + err = sdio_readsb(bdev->func, skb->data, MTK_REG_CRDR, rx_size); + if (err < 0) + goto err_kfree_skb; + + sdio_hdr = (void *)skb->data; + + /* We assume the default error as -EILSEQ simply to make the error path + * be cleaner. + */ + err = -EILSEQ; + + if (rx_size != le16_to_cpu(sdio_hdr->len)) { + bt_dev_err(bdev->hdev, "Rx size in sdio header is mismatched "); + goto err_kfree_skb; + } + + hci_skb_pkt_type(skb) = sdio_hdr->bt_type; + + /* Remove MediaTek SDIO header */ + skb_pull(skb, sizeof(*sdio_hdr)); + + /* We have to dig into the packet to get payload size and then know how + * many padding bytes at the tail, these padding bytes should be removed + * before the packet is indicated to the core layer. + */ + for (i = 0; i < pkts_count; i++) { + if (sdio_hdr->bt_type == (&pkts[i])->type) + break; + } + + if (i >= pkts_count) { + bt_dev_err(bdev->hdev, "Invalid bt type 0x%02x", + sdio_hdr->bt_type); + goto err_kfree_skb; + } + + /* Remaining bytes cannot hold a header*/ + if (skb->len < (&pkts[i])->hlen) { + bt_dev_err(bdev->hdev, "The size of bt header is mismatched"); + goto err_kfree_skb; + } + + switch ((&pkts[i])->lsize) { + case 1: + dlen = skb->data[(&pkts[i])->loff]; + break; + case 2: + dlen = get_unaligned_le16(skb->data + + (&pkts[i])->loff); + break; + default: + goto err_kfree_skb; + } + + pad_size = skb->len - (&pkts[i])->hlen - dlen; + + /* Remaining bytes cannot hold a payload */ + if (pad_size < 0) { + bt_dev_err(bdev->hdev, "The size of bt payload is mismatched"); + goto err_kfree_skb; + } + + /* Remove padding bytes */ + skb_trim(skb, skb->len - pad_size); + + /* Complete frame */ + (&pkts[i])->recv(bdev->hdev, skb); + + bdev->hdev->stat.byte_rx += rx_size; + + return 0; + +err_kfree_skb: + kfree_skb(skb); + + return err; +} + +static void btmtksdio_txrx_work(struct work_struct *work) +{ + struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev, + txrx_work); + unsigned long txrx_timeout; + u32 int_status, rx_size; + struct sk_buff *skb; + int err; + + pm_runtime_get_sync(bdev->dev); + + sdio_claim_host(bdev->func); + + /* Disable interrupt */ + sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0); + + txrx_timeout = jiffies + 5 * HZ; + + do { + int_status = sdio_readl(bdev->func, MTK_REG_CHISR, NULL); + + /* Ack an interrupt as soon as possible before any operation on + * hardware. + * + * Note that we don't ack any status during operations to avoid race + * condition between the host and the device such as it's possible to + * mistakenly ack RX_DONE for the next packet and then cause interrupts + * not be raised again but there is still pending data in the hardware + * FIFO. + */ + sdio_writel(bdev->func, int_status, MTK_REG_CHISR, NULL); + int_status &= INT_MASK; + + if ((int_status & FW_MAILBOX_INT) && + bdev->data->chipid == 0x7921) { + sdio_writel(bdev->func, PH2DSM0R_DRIVER_OWN, + MTK_REG_PH2DSM0R, 0); + } + + if (int_status & FW_OWN_BACK_INT) + bt_dev_dbg(bdev->hdev, "Get fw own back"); + + if (int_status & TX_EMPTY) + set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state); + + else if (unlikely(int_status & TX_FIFO_OVERFLOW)) + bt_dev_warn(bdev->hdev, "Tx fifo overflow"); + + if (test_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state)) { + skb = skb_dequeue(&bdev->txq); + if (skb) { + err = btmtksdio_tx_packet(bdev, skb); + if (err < 0) { + bdev->hdev->stat.err_tx++; + skb_queue_head(&bdev->txq, skb); + } + } + } + + if (int_status & RX_DONE_INT) { + rx_size = sdio_readl(bdev->func, MTK_REG_CRPLR, NULL); + rx_size = (rx_size & RX_PKT_LEN) >> 16; + if (btmtksdio_rx_packet(bdev, rx_size) < 0) + bdev->hdev->stat.err_rx++; + } + } while (int_status || time_is_before_jiffies(txrx_timeout)); + + /* Enable interrupt */ + sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, 0); + + sdio_release_host(bdev->func); + + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); +} + +static void btmtksdio_interrupt(struct sdio_func *func) +{ + struct btmtksdio_dev *bdev = sdio_get_drvdata(func); + + /* Disable interrupt */ + sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0); + + schedule_work(&bdev->txrx_work); +} + +static int btmtksdio_open(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + u32 val; + int err; + + sdio_claim_host(bdev->func); + + err = sdio_enable_func(bdev->func); + if (err < 0) + goto err_release_host; + + set_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state); + + err = btmtksdio_drv_pmctrl(bdev); + if (err < 0) + goto err_disable_func; + + /* Disable interrupt & mask out all interrupt sources */ + sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err); + if (err < 0) + goto err_disable_func; + + sdio_writel(bdev->func, 0, MTK_REG_CHIER, &err); + if (err < 0) + goto err_disable_func; + + err = sdio_claim_irq(bdev->func, btmtksdio_interrupt); + if (err < 0) + goto err_disable_func; + + err = sdio_set_block_size(bdev->func, MTK_SDIO_BLOCK_SIZE); + if (err < 0) + goto err_release_irq; + + /* SDIO CMD 5 allows the SDIO device back to idle state an + * synchronous interrupt is supported in SDIO 4-bit mode + */ + val = sdio_readl(bdev->func, MTK_REG_CSDIOCSR, &err); + if (err < 0) + goto err_release_irq; + + val |= SDIO_INT_CTL; + sdio_writel(bdev->func, val, MTK_REG_CSDIOCSR, &err); + if (err < 0) + goto err_release_irq; + + /* Explitly set write-1-clear method */ + val = sdio_readl(bdev->func, MTK_REG_CHCR, &err); + if (err < 0) + goto err_release_irq; + + val |= C_INT_CLR_CTRL; + sdio_writel(bdev->func, val, MTK_REG_CHCR, &err); + if (err < 0) + goto err_release_irq; + + /* Setup interrupt sources */ + sdio_writel(bdev->func, RX_DONE_INT | TX_EMPTY | TX_FIFO_OVERFLOW, + MTK_REG_CHIER, &err); + if (err < 0) + goto err_release_irq; + + /* Enable interrupt */ + sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, &err); + if (err < 0) + goto err_release_irq; + + sdio_release_host(bdev->func); + + return 0; + +err_release_irq: + sdio_release_irq(bdev->func); + +err_disable_func: + sdio_disable_func(bdev->func); + +err_release_host: + sdio_release_host(bdev->func); + + return err; +} + +static int btmtksdio_close(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + + sdio_claim_host(bdev->func); + + /* Disable interrupt */ + sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL); + + sdio_release_irq(bdev->func); + + cancel_work_sync(&bdev->txrx_work); + + btmtksdio_fw_pmctrl(bdev); + + clear_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state); + sdio_disable_func(bdev->func); + + sdio_release_host(bdev->func); + + return 0; +} + +static int btmtksdio_flush(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + + skb_queue_purge(&bdev->txq); + + cancel_work_sync(&bdev->txrx_work); + + return 0; +} + +static int btmtksdio_func_query(struct hci_dev *hdev) +{ + struct btmtk_hci_wmt_params wmt_params; + int status, err; + u8 param = 0; + + /* Query whether the function is enabled */ + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 4; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = &status; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to query function status (%d)", err); + return err; + } + + return status; +} + +static int mt76xx_setup(struct hci_dev *hdev, const char *fwname) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_params wmt_params; + struct btmtk_tci_sleep tci_sleep; + struct sk_buff *skb; + int err, status; + u8 param = 0x1; + + /* Query whether the firmware is already download */ + wmt_params.op = BTMTK_WMT_SEMAPHORE; + wmt_params.flag = 1; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = &status; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to query firmware status (%d)", err); + return err; + } + + if (status == BTMTK_WMT_PATCH_DONE) { + bt_dev_info(hdev, "Firmware already downloaded"); + goto ignore_setup_fw; + } + + /* Setup a firmware which the device definitely requires */ + err = btmtk_setup_firmware(hdev, fwname, mtk_hci_wmt_sync); + if (err < 0) + return err; + +ignore_setup_fw: + /* Query whether the device is already enabled */ + err = readx_poll_timeout(btmtksdio_func_query, hdev, status, + status < 0 || status != BTMTK_WMT_ON_PROGRESS, + 2000, 5000000); + /* -ETIMEDOUT happens */ + if (err < 0) + return err; + + /* The other errors happen in btusb_mtk_func_query */ + if (status < 0) + return status; + + if (status == BTMTK_WMT_ON_DONE) { + bt_dev_info(hdev, "function already on"); + goto ignore_func_on; + } + + /* Enable Bluetooth protocol */ + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + + set_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state); + +ignore_func_on: + /* Apply the low power environment setup */ + tci_sleep.mode = 0x5; + tci_sleep.duration = cpu_to_le16(0x640); + tci_sleep.host_duration = cpu_to_le16(0x640); + tci_sleep.host_wakeup_pin = 0; + tci_sleep.time_compensation = 0; + + skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); + return err; + } + kfree_skb(skb); + + return 0; +} + +static int mt79xx_setup(struct hci_dev *hdev, const char *fwname) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_params wmt_params; + u8 param = 0x1; + int err; + + err = btmtk_setup_firmware_79xx(hdev, fwname, mtk_hci_wmt_sync); + if (err < 0) { + bt_dev_err(hdev, "Failed to setup 79xx firmware (%d)", err); + return err; + } + + err = btmtksdio_fw_pmctrl(bdev); + if (err < 0) + return err; + + err = btmtksdio_drv_pmctrl(bdev); + if (err < 0) + return err; + + /* Enable Bluetooth protocol */ + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + + hci_set_msft_opcode(hdev, 0xFD30); + hci_set_aosp_capable(hdev); + set_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state); + + return err; +} + +static int btmtksdio_mtk_reg_read(struct hci_dev *hdev, u32 reg, u32 *val) +{ + struct btmtk_hci_wmt_params wmt_params; + struct reg_read_cmd reg_read = { + .type = 1, + .num = 1, + }; + u32 status; + int err; + + reg_read.addr = cpu_to_le32(reg); + wmt_params.op = BTMTK_WMT_REGISTER; + wmt_params.flag = BTMTK_WMT_REG_READ; + wmt_params.dlen = sizeof(reg_read); + wmt_params.data = ®_read; + wmt_params.status = &status; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to read reg (%d)", err); + return err; + } + + *val = status; + + return err; +} + +static int btmtksdio_mtk_reg_write(struct hci_dev *hdev, u32 reg, u32 val, u32 mask) +{ + struct btmtk_hci_wmt_params wmt_params; + const struct reg_write_cmd reg_write = { + .type = 1, + .num = 1, + .addr = cpu_to_le32(reg), + .data = cpu_to_le32(val), + .mask = cpu_to_le32(mask), + }; + int err, status; + + wmt_params.op = BTMTK_WMT_REGISTER; + wmt_params.flag = BTMTK_WMT_REG_WRITE; + wmt_params.dlen = sizeof(reg_write); + wmt_params.data = ®_write; + wmt_params.status = &status; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) + bt_dev_err(hdev, "Failed to write reg (%d)", err); + + return err; +} + +static int btmtksdio_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id) +{ + /* uses 1 as data path id for all the usecases */ + *data_path_id = 1; + return 0; +} + +static int btmtksdio_get_codec_config_data(struct hci_dev *hdev, + __u8 link, struct bt_codec *codec, + __u8 *ven_len, __u8 **ven_data) +{ + int err = 0; + + if (!ven_data || !ven_len) + return -EINVAL; + + *ven_len = 0; + *ven_data = NULL; + + if (link != ESCO_LINK) { + bt_dev_err(hdev, "Invalid link type(%u)", link); + return -EINVAL; + } + + *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL); + if (!*ven_data) { + err = -ENOMEM; + goto error; + } + + /* supports only CVSD and mSBC offload codecs */ + switch (codec->id) { + case 0x02: + **ven_data = 0x00; + break; + case 0x05: + **ven_data = 0x01; + break; + default: + err = -EINVAL; + bt_dev_err(hdev, "Invalid codec id(%u)", codec->id); + goto error; + } + /* codec and its capabilities are pre-defined to ids + * preset id = 0x00 represents CVSD codec with sampling rate 8K + * preset id = 0x01 represents mSBC codec with sampling rate 16K + */ + *ven_len = sizeof(__u8); + return err; + +error: + kfree(*ven_data); + *ven_data = NULL; + return err; +} + +static int btmtksdio_sco_setting(struct hci_dev *hdev) +{ + const struct btmtk_sco sco_setting = { + .clock_config = 0x49, + .channel_format_config = 0x80, + }; + struct sk_buff *skb; + u32 val; + int err; + + /* Enable SCO over I2S/PCM for MediaTek chipset */ + skb = __hci_cmd_sync(hdev, 0xfc72, sizeof(sco_setting), + &sco_setting, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + + err = btmtksdio_mtk_reg_read(hdev, MT7921_PINMUX_0, &val); + if (err < 0) + return err; + + val |= 0x11000000; + err = btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_0, val, ~0); + if (err < 0) + return err; + + err = btmtksdio_mtk_reg_read(hdev, MT7921_PINMUX_1, &val); + if (err < 0) + return err; + + val |= 0x00000101; + err = btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_1, val, ~0); + if (err < 0) + return err; + + hdev->get_data_path_id = btmtksdio_get_data_path_id; + hdev->get_codec_config_data = btmtksdio_get_codec_config_data; + + return err; +} + +static int btmtksdio_reset_setting(struct hci_dev *hdev) +{ + int err; + u32 val; + + err = btmtksdio_mtk_reg_read(hdev, MT7921_PINMUX_1, &val); + if (err < 0) + return err; + + val |= 0x20; /* set the pin (bit field 11:8) work as GPIO mode */ + err = btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_1, val, ~0); + if (err < 0) + return err; + + err = btmtksdio_mtk_reg_read(hdev, MT7921_BTSYS_RST, &val); + if (err < 0) + return err; + + val |= MT7921_BTSYS_RST_WITH_GPIO; + return btmtksdio_mtk_reg_write(hdev, MT7921_BTSYS_RST, val, ~0); +} + +static int btmtksdio_setup(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + ktime_t calltime, delta, rettime; + unsigned long long duration; + char fwname[64]; + int err, dev_id; + u32 fw_version = 0, val; + + calltime = ktime_get(); + set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state); + + switch (bdev->data->chipid) { + case 0x7921: + if (test_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state)) { + err = btmtksdio_mtk_reg_read(hdev, MT7921_DLSTATUS, + &val); + if (err < 0) + return err; + + val &= ~BT_DL_STATE; + err = btmtksdio_mtk_reg_write(hdev, MT7921_DLSTATUS, + val, ~0); + if (err < 0) + return err; + + btmtksdio_fw_pmctrl(bdev); + msleep(20); + btmtksdio_drv_pmctrl(bdev); + + clear_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state); + } + + err = btmtksdio_mtk_reg_read(hdev, 0x70010200, &dev_id); + if (err < 0) { + bt_dev_err(hdev, "Failed to get device id (%d)", err); + return err; + } + + err = btmtksdio_mtk_reg_read(hdev, 0x80021004, &fw_version); + if (err < 0) { + bt_dev_err(hdev, "Failed to get fw version (%d)", err); + return err; + } + + snprintf(fwname, sizeof(fwname), + "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", + dev_id & 0xffff, (fw_version & 0xff) + 1); + err = mt79xx_setup(hdev, fwname); + if (err < 0) + return err; + + /* Enable SCO over I2S/PCM */ + err = btmtksdio_sco_setting(hdev); + if (err < 0) { + bt_dev_err(hdev, "Failed to enable SCO setting (%d)", err); + return err; + } + + /* Enable WBS with mSBC codec */ + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + + /* Enable GPIO reset mechanism */ + if (bdev->reset) { + err = btmtksdio_reset_setting(hdev); + if (err < 0) { + bt_dev_err(hdev, "Failed to enable Reset setting (%d)", err); + devm_gpiod_put(bdev->dev, bdev->reset); + bdev->reset = NULL; + } + } + + /* Valid LE States quirk for MediaTek 7921 */ + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + + break; + case 0x7663: + case 0x7668: + err = mt76xx_setup(hdev, bdev->data->fwname); + if (err < 0) + return err; + break; + default: + return -ENODEV; + } + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long)ktime_to_ns(delta) >> 10; + + pm_runtime_set_autosuspend_delay(bdev->dev, + MTKBTSDIO_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(bdev->dev); + + err = pm_runtime_set_active(bdev->dev); + if (err < 0) + return err; + + /* Default forbid runtime auto suspend, that can be allowed by + * enable_autosuspend flag or the PM runtime entry under sysfs. + */ + pm_runtime_forbid(bdev->dev); + pm_runtime_enable(bdev->dev); + + if (enable_autosuspend) + pm_runtime_allow(bdev->dev); + + bt_dev_info(hdev, "Device setup in %llu usecs", duration); + + return 0; +} + +static int btmtksdio_shutdown(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_params wmt_params; + u8 param = 0x0; + int err; + + /* Get back the state to be consistent with the state + * in btmtksdio_setup. + */ + pm_runtime_get_sync(bdev->dev); + + /* wmt command only works until the reset is complete */ + if (test_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state)) + goto ignore_wmt_cmd; + + /* Disable the device */ + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + +ignore_wmt_cmd: + pm_runtime_put_noidle(bdev->dev); + pm_runtime_disable(bdev->dev); + + return 0; +} + +static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + + default: + return -EILSEQ; + } + + skb_queue_tail(&bdev->txq, skb); + + schedule_work(&bdev->txrx_work); + + return 0; +} + +static void btmtksdio_cmd_timeout(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + u32 status; + int err; + + if (!bdev->reset || bdev->data->chipid != 0x7921) + return; + + pm_runtime_get_sync(bdev->dev); + + if (test_and_set_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state)) + return; + + sdio_claim_host(bdev->func); + + sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL); + skb_queue_purge(&bdev->txq); + cancel_work_sync(&bdev->txrx_work); + + gpiod_set_value_cansleep(bdev->reset, 1); + msleep(100); + gpiod_set_value_cansleep(bdev->reset, 0); + + err = readx_poll_timeout(btmtksdio_chcr_query, bdev, status, + status & BT_RST_DONE, 100000, 2000000); + if (err < 0) { + bt_dev_err(hdev, "Failed to reset (%d)", err); + goto err; + } + + clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state); +err: + sdio_release_host(bdev->func); + + pm_runtime_put_noidle(bdev->dev); + pm_runtime_disable(bdev->dev); + + hci_reset_dev(hdev); +} + +static bool btmtksdio_sdio_inband_wakeup(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + + return device_may_wakeup(bdev->dev); +} + +static bool btmtksdio_sdio_wakeup(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + bool may_wakeup = device_may_wakeup(bdev->dev); + const struct btmtk_wakeon bt_awake = { + .mode = 0x1, + .gpo = 0, + .active_high = 0x1, + .enable_delay = cpu_to_le16(0xc80), + .wakeup_delay = cpu_to_le16(0x20), + }; + + if (may_wakeup && bdev->data->chipid == 0x7921) { + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc27, sizeof(bt_awake), + &bt_awake, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) + may_wakeup = false; + else + kfree_skb(skb); + } + + return may_wakeup; +} + +static int btmtksdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct btmtksdio_dev *bdev; + struct hci_dev *hdev; + int err; + + bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL); + if (!bdev) + return -ENOMEM; + + bdev->data = (void *)id->driver_data; + if (!bdev->data) + return -ENODEV; + + bdev->dev = &func->dev; + bdev->func = func; + + INIT_WORK(&bdev->txrx_work, btmtksdio_txrx_work); + skb_queue_head_init(&bdev->txq); + + /* Initialize and register HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + dev_err(&func->dev, "Can't allocate HCI device\n"); + return -ENOMEM; + } + + bdev->hdev = hdev; + + hdev->bus = HCI_SDIO; + hci_set_drvdata(hdev, bdev); + + hdev->open = btmtksdio_open; + hdev->close = btmtksdio_close; + hdev->cmd_timeout = btmtksdio_cmd_timeout; + hdev->flush = btmtksdio_flush; + hdev->setup = btmtksdio_setup; + hdev->shutdown = btmtksdio_shutdown; + hdev->send = btmtksdio_send_frame; + hdev->wakeup = btmtksdio_sdio_wakeup; + /* + * If SDIO controller supports wake on Bluetooth, sending a wakeon + * command is not necessary. + */ + if (device_can_wakeup(func->card->host->parent)) + hdev->wakeup = btmtksdio_sdio_inband_wakeup; + else + hdev->wakeup = btmtksdio_sdio_wakeup; + hdev->set_bdaddr = btmtk_set_bdaddr; + + SET_HCIDEV_DEV(hdev, &func->dev); + + hdev->manufacturer = 70; + set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + + sdio_set_drvdata(func, bdev); + + err = hci_register_dev(hdev); + if (err < 0) { + dev_err(&func->dev, "Can't register HCI device\n"); + hci_free_dev(hdev); + return err; + } + + /* pm_runtime_enable would be done after the firmware is being + * downloaded because the core layer probably already enables + * runtime PM for this func such as the case host->caps & + * MMC_CAP_POWER_OFF_CARD. + */ + if (pm_runtime_enabled(bdev->dev)) + pm_runtime_disable(bdev->dev); + + /* As explaination in drivers/mmc/core/sdio_bus.c tells us: + * Unbound SDIO functions are always suspended. + * During probe, the function is set active and the usage count + * is incremented. If the driver supports runtime PM, + * it should call pm_runtime_put_noidle() in its probe routine and + * pm_runtime_get_noresume() in its remove routine. + * + * So, put a pm_runtime_put_noidle here ! + */ + pm_runtime_put_noidle(bdev->dev); + + err = device_init_wakeup(bdev->dev, true); + if (err) + bt_dev_err(hdev, "failed to initialize device wakeup"); + + bdev->dev->of_node = of_find_compatible_node(NULL, NULL, + "mediatek,mt7921s-bluetooth"); + bdev->reset = devm_gpiod_get_optional(bdev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(bdev->reset)) + err = PTR_ERR(bdev->reset); + + return err; +} + +static void btmtksdio_remove(struct sdio_func *func) +{ + struct btmtksdio_dev *bdev = sdio_get_drvdata(func); + struct hci_dev *hdev; + + if (!bdev) + return; + + /* Be consistent the state in btmtksdio_probe */ + pm_runtime_get_noresume(bdev->dev); + + hdev = bdev->hdev; + + sdio_set_drvdata(func, NULL); + hci_unregister_dev(hdev); + hci_free_dev(hdev); +} + +#ifdef CONFIG_PM +static int btmtksdio_runtime_suspend(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct btmtksdio_dev *bdev; + int err; + + bdev = sdio_get_drvdata(func); + if (!bdev) + return 0; + + if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) + return 0; + + sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + + err = btmtksdio_fw_pmctrl(bdev); + + bt_dev_dbg(bdev->hdev, "status (%d) return ownership to device", err); + + return err; +} + +static int btmtksdio_runtime_resume(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct btmtksdio_dev *bdev; + int err; + + bdev = sdio_get_drvdata(func); + if (!bdev) + return 0; + + if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) + return 0; + + err = btmtksdio_drv_pmctrl(bdev); + + bt_dev_dbg(bdev->hdev, "status (%d) get ownership from device", err); + + return err; +} + +static UNIVERSAL_DEV_PM_OPS(btmtksdio_pm_ops, btmtksdio_runtime_suspend, + btmtksdio_runtime_resume, NULL); +#define BTMTKSDIO_PM_OPS (&btmtksdio_pm_ops) +#else /* CONFIG_PM */ +#define BTMTKSDIO_PM_OPS NULL +#endif /* CONFIG_PM */ + +static struct sdio_driver btmtksdio_driver = { + .name = "btmtksdio", + .probe = btmtksdio_probe, + .remove = btmtksdio_remove, + .id_table = btmtksdio_table, + .drv = { + .owner = THIS_MODULE, + .pm = BTMTKSDIO_PM_OPS, + } +}; + +module_sdio_driver(btmtksdio_driver); + +module_param(enable_autosuspend, bool, 0644); +MODULE_PARM_DESC(enable_autosuspend, "Enable autosuspend by default"); + +MODULE_AUTHOR("Sean Wang "); +MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c new file mode 100644 index 000000000..203a000a8 --- /dev/null +++ b/drivers/bluetooth/btmtkuart.c @@ -0,0 +1,994 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 MediaTek Inc. + +/* + * Bluetooth support for MediaTek serial devices + * + * Author: Sean Wang + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "h4_recv.h" +#include "btmtk.h" + +#define VERSION "0.2" + +#define MTK_STP_TLR_SIZE 2 + +#define BTMTKUART_TX_STATE_ACTIVE 1 +#define BTMTKUART_TX_STATE_WAKEUP 2 +#define BTMTKUART_TX_WAIT_VND_EVT 3 +#define BTMTKUART_REQUIRED_WAKEUP 4 + +#define BTMTKUART_FLAG_STANDALONE_HW BIT(0) + +struct mtk_stp_hdr { + u8 prefix; + __be16 dlen; + u8 cs; +} __packed; + +struct btmtkuart_data { + unsigned int flags; + const char *fwname; +}; + +struct btmtkuart_dev { + struct hci_dev *hdev; + struct serdev_device *serdev; + + struct clk *clk; + struct clk *osc; + struct regulator *vcc; + struct gpio_desc *reset; + struct gpio_desc *boot; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_runtime; + struct pinctrl_state *pins_boot; + speed_t desired_speed; + speed_t curr_speed; + + struct work_struct tx_work; + unsigned long tx_state; + struct sk_buff_head txq; + + struct sk_buff *rx_skb; + struct sk_buff *evt_skb; + + u8 stp_pad[6]; + u8 stp_cursor; + u16 stp_dlen; + + const struct btmtkuart_data *data; +}; + +#define btmtkuart_is_standalone(bdev) \ + ((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW) +#define btmtkuart_is_builtin_soc(bdev) \ + !((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW) + +static int mtk_hci_wmt_sync(struct hci_dev *hdev, + struct btmtk_hci_wmt_params *wmt_params) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; + u32 hlen, status = BTMTK_WMT_INVALID; + struct btmtk_hci_wmt_evt *wmt_evt; + struct btmtk_hci_wmt_cmd *wc; + struct btmtk_wmt_hdr *hdr; + int err; + + /* Send the WMT command and wait until the WMT event returns */ + hlen = sizeof(*hdr) + wmt_params->dlen; + if (hlen > 255) { + err = -EINVAL; + goto err_free_skb; + } + + wc = kzalloc(hlen, GFP_KERNEL); + if (!wc) { + err = -ENOMEM; + goto err_free_skb; + } + + hdr = &wc->hdr; + hdr->dir = 1; + hdr->op = wmt_params->op; + hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); + hdr->flag = wmt_params->flag; + memcpy(wc->data, wmt_params->data, wmt_params->dlen); + + set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); + + err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); + if (err < 0) { + clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); + goto err_free_wc; + } + + /* The vendor specific WMT commands are all answered by a vendor + * specific event and will not have the Command Status or Command + * Complete as with usual HCI command flow control. + * + * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT + * state to be cleared. The driver specific event receive routine + * will clear that state and with that indicate completion of the + * WMT command. + */ + err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT, + TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); + if (err == -EINTR) { + bt_dev_err(hdev, "Execution of wmt command interrupted"); + clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); + goto err_free_wc; + } + + if (err) { + bt_dev_err(hdev, "Execution of wmt command timed out"); + clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); + err = -ETIMEDOUT; + goto err_free_wc; + } + + /* Parse and handle the return WMT event */ + wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data; + if (wmt_evt->whdr.op != hdr->op) { + bt_dev_err(hdev, "Wrong op received %d expected %d", + wmt_evt->whdr.op, hdr->op); + err = -EIO; + goto err_free_wc; + } + + switch (wmt_evt->whdr.op) { + case BTMTK_WMT_SEMAPHORE: + if (wmt_evt->whdr.flag == 2) + status = BTMTK_WMT_PATCH_UNDONE; + else + status = BTMTK_WMT_PATCH_DONE; + break; + case BTMTK_WMT_FUNC_CTRL: + wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; + if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) + status = BTMTK_WMT_ON_DONE; + else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) + status = BTMTK_WMT_ON_PROGRESS; + else + status = BTMTK_WMT_ON_UNDONE; + break; + } + + if (wmt_params->status) + *wmt_params->status = status; + +err_free_wc: + kfree(wc); +err_free_skb: + kfree_skb(bdev->evt_skb); + bdev->evt_skb = NULL; + + return err; +} + +static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct hci_event_hdr *hdr = (void *)skb->data; + int err; + + /* When someone waits for the WMT event, the skb is being cloned + * and being processed the events from there then. + */ + if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) { + bdev->evt_skb = skb_clone(skb, GFP_KERNEL); + if (!bdev->evt_skb) { + err = -ENOMEM; + goto err_out; + } + } + + err = hci_recv_frame(hdev, skb); + if (err < 0) + goto err_free_skb; + + if (hdr->evt == HCI_EV_WMT) { + if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT, + &bdev->tx_state)) { + /* Barrier to sync with other CPUs */ + smp_mb__after_atomic(); + wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT); + } + } + + return 0; + +err_free_skb: + kfree_skb(bdev->evt_skb); + bdev->evt_skb = NULL; + +err_out: + return err; +} + +static const struct h4_recv_pkt mtk_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = btmtkuart_recv_event }, +}; + +static void btmtkuart_tx_work(struct work_struct *work) +{ + struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev, + tx_work); + struct serdev_device *serdev = bdev->serdev; + struct hci_dev *hdev = bdev->hdev; + + while (1) { + clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state); + + while (1) { + struct sk_buff *skb = skb_dequeue(&bdev->txq); + int len; + + if (!skb) + break; + + len = serdev_device_write_buf(serdev, skb->data, + skb->len); + hdev->stat.byte_tx += len; + + skb_pull(skb, len); + if (skb->len > 0) { + skb_queue_head(&bdev->txq, skb); + break; + } + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } + + kfree_skb(skb); + } + + if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state)) + break; + } + + clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state); +} + +static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev) +{ + if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state)) + set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state); + + schedule_work(&bdev->tx_work); +} + +static const unsigned char * +mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count, + int *sz_h4) +{ + struct mtk_stp_hdr *shdr; + + /* The cursor is reset when all the data of STP is consumed out */ + if (!bdev->stp_dlen && bdev->stp_cursor >= 6) + bdev->stp_cursor = 0; + + /* Filling pad until all STP info is obtained */ + while (bdev->stp_cursor < 6 && count > 0) { + bdev->stp_pad[bdev->stp_cursor] = *data; + bdev->stp_cursor++; + data++; + count--; + } + + /* Retrieve STP info and have a sanity check */ + if (!bdev->stp_dlen && bdev->stp_cursor >= 6) { + shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2]; + bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff; + + /* Resync STP when unexpected data is being read */ + if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) { + bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)", + shdr->prefix, bdev->stp_dlen); + bdev->stp_cursor = 2; + bdev->stp_dlen = 0; + } + } + + /* Directly quit when there's no data found for H4 can process */ + if (count <= 0) + return NULL; + + /* Tranlate to how much the size of data H4 can handle so far */ + *sz_h4 = min_t(int, count, bdev->stp_dlen); + + /* Update the remaining size of STP packet */ + bdev->stp_dlen -= *sz_h4; + + /* Data points to STP payload which can be handled by H4 */ + return data; +} + +static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + const unsigned char *p_left = data, *p_h4; + int sz_left = count, sz_h4, adv; + int err; + + while (sz_left > 0) { + /* The serial data received from MT7622 BT controller is + * at all time padded around with the STP header and tailer. + * + * A full STP packet is looking like + * ----------------------------------- + * | STP header | H:4 | STP tailer | + * ----------------------------------- + * but it doesn't guarantee to contain a full H:4 packet which + * means that it's possible for multiple STP packets forms a + * full H:4 packet that means extra STP header + length doesn't + * indicate a full H:4 frame, things can fragment. Whose length + * recorded in STP header just shows up the most length the + * H:4 engine can handle currently. + */ + + p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4); + if (!p_h4) + break; + + adv = p_h4 - p_left; + sz_left -= adv; + p_left += adv; + + bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4, + sz_h4, mtk_recv_pkts, + ARRAY_SIZE(mtk_recv_pkts)); + if (IS_ERR(bdev->rx_skb)) { + err = PTR_ERR(bdev->rx_skb); + bt_dev_err(bdev->hdev, + "Frame reassembly failed (%d)", err); + bdev->rx_skb = NULL; + return; + } + + sz_left -= sz_h4; + p_left += sz_h4; + } +} + +static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data, + size_t count) +{ + struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); + + btmtkuart_recv(bdev->hdev, data, count); + + bdev->hdev->stat.byte_rx += count; + + return count; +} + +static void btmtkuart_write_wakeup(struct serdev_device *serdev) +{ + struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); + + btmtkuart_tx_wakeup(bdev); +} + +static const struct serdev_device_ops btmtkuart_client_ops = { + .receive_buf = btmtkuart_receive_buf, + .write_wakeup = btmtkuart_write_wakeup, +}; + +static int btmtkuart_open(struct hci_dev *hdev) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct device *dev; + int err; + + err = serdev_device_open(bdev->serdev); + if (err) { + bt_dev_err(hdev, "Unable to open UART device %s", + dev_name(&bdev->serdev->dev)); + goto err_open; + } + + if (btmtkuart_is_standalone(bdev)) { + if (bdev->curr_speed != bdev->desired_speed) + err = serdev_device_set_baudrate(bdev->serdev, + 115200); + else + err = serdev_device_set_baudrate(bdev->serdev, + bdev->desired_speed); + + if (err < 0) { + bt_dev_err(hdev, "Unable to set baudrate UART device %s", + dev_name(&bdev->serdev->dev)); + goto err_serdev_close; + } + + serdev_device_set_flow_control(bdev->serdev, false); + } + + bdev->stp_cursor = 2; + bdev->stp_dlen = 0; + + dev = &bdev->serdev->dev; + + /* Enable the power domain and clock the device requires */ + pm_runtime_enable(dev); + err = pm_runtime_resume_and_get(dev); + if (err < 0) + goto err_disable_rpm; + + err = clk_prepare_enable(bdev->clk); + if (err < 0) + goto err_put_rpm; + + return 0; + +err_put_rpm: + pm_runtime_put_sync(dev); +err_disable_rpm: + pm_runtime_disable(dev); +err_serdev_close: + serdev_device_close(bdev->serdev); +err_open: + return err; +} + +static int btmtkuart_close(struct hci_dev *hdev) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct device *dev = &bdev->serdev->dev; + + /* Shutdown the clock and power domain the device requires */ + clk_disable_unprepare(bdev->clk); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + + serdev_device_close(bdev->serdev); + + return 0; +} + +static int btmtkuart_flush(struct hci_dev *hdev) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + + /* Flush any pending characters */ + serdev_device_write_flush(bdev->serdev); + skb_queue_purge(&bdev->txq); + + cancel_work_sync(&bdev->tx_work); + + kfree_skb(bdev->rx_skb); + bdev->rx_skb = NULL; + + bdev->stp_cursor = 2; + bdev->stp_dlen = 0; + + return 0; +} + +static int btmtkuart_func_query(struct hci_dev *hdev) +{ + struct btmtk_hci_wmt_params wmt_params; + int status, err; + u8 param = 0; + + /* Query whether the function is enabled */ + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 4; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = &status; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to query function status (%d)", err); + return err; + } + + return status; +} + +static int btmtkuart_change_baudrate(struct hci_dev *hdev) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_params wmt_params; + __le32 baudrate; + u8 param; + int err; + + /* Indicate the device to enter the probe state the host is + * ready to change a new baudrate. + */ + baudrate = cpu_to_le32(bdev->desired_speed); + wmt_params.op = BTMTK_WMT_HIF; + wmt_params.flag = 1; + wmt_params.dlen = 4; + wmt_params.data = &baudrate; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to device baudrate (%d)", err); + return err; + } + + err = serdev_device_set_baudrate(bdev->serdev, + bdev->desired_speed); + if (err < 0) { + bt_dev_err(hdev, "Failed to set up host baudrate (%d)", + err); + return err; + } + + serdev_device_set_flow_control(bdev->serdev, false); + + /* Send a dummy byte 0xff to activate the new baudrate */ + param = 0xff; + err = serdev_device_write_buf(bdev->serdev, ¶m, sizeof(param)); + if (err < 0 || err < sizeof(param)) + return err; + + serdev_device_wait_until_sent(bdev->serdev, 0); + + /* Wait some time for the device changing baudrate done */ + usleep_range(20000, 22000); + + /* Test the new baudrate */ + wmt_params.op = BTMTK_WMT_TEST; + wmt_params.flag = 7; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to test new baudrate (%d)", + err); + return err; + } + + bdev->curr_speed = bdev->desired_speed; + + return 0; +} + +static int btmtkuart_setup(struct hci_dev *hdev) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_params wmt_params; + ktime_t calltime, delta, rettime; + struct btmtk_tci_sleep tci_sleep; + unsigned long long duration; + struct sk_buff *skb; + int err, status; + u8 param = 0x1; + + calltime = ktime_get(); + + /* Wakeup MCUSYS is required for certain devices before we start to + * do any setups. + */ + if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) { + wmt_params.op = BTMTK_WMT_WAKEUP; + wmt_params.flag = 3; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to wakeup the chip (%d)", err); + return err; + } + + clear_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state); + } + + if (btmtkuart_is_standalone(bdev)) + btmtkuart_change_baudrate(hdev); + + /* Query whether the firmware is already download */ + wmt_params.op = BTMTK_WMT_SEMAPHORE; + wmt_params.flag = 1; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = &status; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to query firmware status (%d)", err); + return err; + } + + if (status == BTMTK_WMT_PATCH_DONE) { + bt_dev_info(hdev, "Firmware already downloaded"); + goto ignore_setup_fw; + } + + /* Setup a firmware which the device definitely requires */ + err = btmtk_setup_firmware(hdev, bdev->data->fwname, mtk_hci_wmt_sync); + if (err < 0) + return err; + +ignore_setup_fw: + /* Query whether the device is already enabled */ + err = readx_poll_timeout(btmtkuart_func_query, hdev, status, + status < 0 || status != BTMTK_WMT_ON_PROGRESS, + 2000, 5000000); + /* -ETIMEDOUT happens */ + if (err < 0) + return err; + + /* The other errors happen in btusb_mtk_func_query */ + if (status < 0) + return status; + + if (status == BTMTK_WMT_ON_DONE) { + bt_dev_info(hdev, "function already on"); + goto ignore_func_on; + } + + /* Enable Bluetooth protocol */ + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + +ignore_func_on: + /* Apply the low power environment setup */ + tci_sleep.mode = 0x5; + tci_sleep.duration = cpu_to_le16(0x640); + tci_sleep.host_duration = cpu_to_le16(0x640); + tci_sleep.host_wakeup_pin = 0; + tci_sleep.time_compensation = 0; + + skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); + return err; + } + kfree_skb(skb); + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long)ktime_to_ns(delta) >> 10; + + bt_dev_info(hdev, "Device setup in %llu usecs", duration); + + return 0; +} + +static int btmtkuart_shutdown(struct hci_dev *hdev) +{ + struct btmtk_hci_wmt_params wmt_params; + u8 param = 0x0; + int err; + + /* Disable the device */ + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + + return 0; +} + +static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct mtk_stp_hdr *shdr; + int err, dlen, type = 0; + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + + /* Make sure that there is enough rooms for STP header and trailer */ + if (unlikely(skb_headroom(skb) < sizeof(*shdr)) || + (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) { + err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE, + GFP_ATOMIC); + if (err < 0) + return err; + } + + /* Add the STP header */ + dlen = skb->len; + shdr = skb_push(skb, sizeof(*shdr)); + shdr->prefix = 0x80; + shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12)); + shdr->cs = 0; /* MT7622 doesn't care about checksum value */ + + /* Add the STP trailer */ + skb_put_zero(skb, MTK_STP_TLR_SIZE); + + skb_queue_tail(&bdev->txq, skb); + + btmtkuart_tx_wakeup(bdev); + return 0; +} + +static int btmtkuart_parse_dt(struct serdev_device *serdev) +{ + struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); + struct device_node *node = serdev->dev.of_node; + u32 speed = 921600; + int err; + + if (btmtkuart_is_standalone(bdev)) { + of_property_read_u32(node, "current-speed", &speed); + + bdev->desired_speed = speed; + + bdev->vcc = devm_regulator_get(&serdev->dev, "vcc"); + if (IS_ERR(bdev->vcc)) { + err = PTR_ERR(bdev->vcc); + return err; + } + + bdev->osc = devm_clk_get_optional(&serdev->dev, "osc"); + if (IS_ERR(bdev->osc)) { + err = PTR_ERR(bdev->osc); + return err; + } + + bdev->boot = devm_gpiod_get_optional(&serdev->dev, "boot", + GPIOD_OUT_LOW); + if (IS_ERR(bdev->boot)) { + err = PTR_ERR(bdev->boot); + return err; + } + + bdev->pinctrl = devm_pinctrl_get(&serdev->dev); + if (IS_ERR(bdev->pinctrl)) { + err = PTR_ERR(bdev->pinctrl); + return err; + } + + bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl, + "default"); + if (IS_ERR(bdev->pins_boot) && !bdev->boot) { + err = PTR_ERR(bdev->pins_boot); + dev_err(&serdev->dev, + "Should assign RXD to LOW at boot stage\n"); + return err; + } + + bdev->pins_runtime = pinctrl_lookup_state(bdev->pinctrl, + "runtime"); + if (IS_ERR(bdev->pins_runtime)) { + err = PTR_ERR(bdev->pins_runtime); + return err; + } + + bdev->reset = devm_gpiod_get_optional(&serdev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(bdev->reset)) { + err = PTR_ERR(bdev->reset); + return err; + } + } else if (btmtkuart_is_builtin_soc(bdev)) { + bdev->clk = devm_clk_get(&serdev->dev, "ref"); + if (IS_ERR(bdev->clk)) + return PTR_ERR(bdev->clk); + } + + return 0; +} + +static int btmtkuart_probe(struct serdev_device *serdev) +{ + struct btmtkuart_dev *bdev; + struct hci_dev *hdev; + int err; + + bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL); + if (!bdev) + return -ENOMEM; + + bdev->data = of_device_get_match_data(&serdev->dev); + if (!bdev->data) + return -ENODEV; + + bdev->serdev = serdev; + serdev_device_set_drvdata(serdev, bdev); + + serdev_device_set_client_ops(serdev, &btmtkuart_client_ops); + + err = btmtkuart_parse_dt(serdev); + if (err < 0) + return err; + + INIT_WORK(&bdev->tx_work, btmtkuart_tx_work); + skb_queue_head_init(&bdev->txq); + + /* Initialize and register HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + dev_err(&serdev->dev, "Can't allocate HCI device\n"); + return -ENOMEM; + } + + bdev->hdev = hdev; + + hdev->bus = HCI_UART; + hci_set_drvdata(hdev, bdev); + + hdev->open = btmtkuart_open; + hdev->close = btmtkuart_close; + hdev->flush = btmtkuart_flush; + hdev->setup = btmtkuart_setup; + hdev->shutdown = btmtkuart_shutdown; + hdev->send = btmtkuart_send_frame; + hdev->set_bdaddr = btmtk_set_bdaddr; + SET_HCIDEV_DEV(hdev, &serdev->dev); + + hdev->manufacturer = 70; + set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + + if (btmtkuart_is_standalone(bdev)) { + err = clk_prepare_enable(bdev->osc); + if (err < 0) + goto err_hci_free_dev; + + if (bdev->boot) { + gpiod_set_value_cansleep(bdev->boot, 1); + } else { + /* Switch to the specific pin state for the booting + * requires. + */ + pinctrl_select_state(bdev->pinctrl, bdev->pins_boot); + } + + /* Power on */ + err = regulator_enable(bdev->vcc); + if (err < 0) + goto err_clk_disable_unprepare; + + /* Reset if the reset-gpios is available otherwise the board + * -level design should be guaranteed. + */ + if (bdev->reset) { + gpiod_set_value_cansleep(bdev->reset, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(bdev->reset, 0); + } + + /* Wait some time until device got ready and switch to the pin + * mode the device requires for UART transfers. + */ + msleep(50); + + if (bdev->boot) + devm_gpiod_put(&serdev->dev, bdev->boot); + + pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime); + + /* A standalone device doesn't depends on power domain on SoC, + * so mark it as no callbacks. + */ + pm_runtime_no_callbacks(&serdev->dev); + + set_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state); + } + + err = hci_register_dev(hdev); + if (err < 0) { + dev_err(&serdev->dev, "Can't register HCI device\n"); + goto err_regulator_disable; + } + + return 0; + +err_regulator_disable: + if (btmtkuart_is_standalone(bdev)) + regulator_disable(bdev->vcc); +err_clk_disable_unprepare: + if (btmtkuart_is_standalone(bdev)) + clk_disable_unprepare(bdev->osc); +err_hci_free_dev: + hci_free_dev(hdev); + + return err; +} + +static void btmtkuart_remove(struct serdev_device *serdev) +{ + struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); + struct hci_dev *hdev = bdev->hdev; + + if (btmtkuart_is_standalone(bdev)) { + regulator_disable(bdev->vcc); + clk_disable_unprepare(bdev->osc); + } + + hci_unregister_dev(hdev); + hci_free_dev(hdev); +} + +static const struct btmtkuart_data mt7622_data __maybe_unused = { + .fwname = FIRMWARE_MT7622, +}; + +static const struct btmtkuart_data mt7663_data __maybe_unused = { + .flags = BTMTKUART_FLAG_STANDALONE_HW, + .fwname = FIRMWARE_MT7663, +}; + +static const struct btmtkuart_data mt7668_data __maybe_unused = { + .flags = BTMTKUART_FLAG_STANDALONE_HW, + .fwname = FIRMWARE_MT7668, +}; + +#ifdef CONFIG_OF +static const struct of_device_id mtk_of_match_table[] = { + { .compatible = "mediatek,mt7622-bluetooth", .data = &mt7622_data}, + { .compatible = "mediatek,mt7663u-bluetooth", .data = &mt7663_data}, + { .compatible = "mediatek,mt7668u-bluetooth", .data = &mt7668_data}, + { } +}; +MODULE_DEVICE_TABLE(of, mtk_of_match_table); +#endif + +static struct serdev_device_driver btmtkuart_driver = { + .probe = btmtkuart_probe, + .remove = btmtkuart_remove, + .driver = { + .name = "btmtkuart", + .of_match_table = of_match_ptr(mtk_of_match_table), + }, +}; + +module_serdev_device_driver(btmtkuart_driver); + +MODULE_AUTHOR("Sean Wang "); +MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c new file mode 100644 index 000000000..951fe3014 --- /dev/null +++ b/drivers/bluetooth/btnxpuart.c @@ -0,0 +1,1412 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * NXP Bluetooth driver + * Copyright 2023 NXP + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "h4_recv.h" + +#define MANUFACTURER_NXP 37 + +#define BTNXPUART_TX_STATE_ACTIVE 1 +#define BTNXPUART_FW_DOWNLOADING 2 +#define BTNXPUART_CHECK_BOOT_SIGNATURE 3 +#define BTNXPUART_SERDEV_OPEN 4 +#define BTNXPUART_IR_IN_PROGRESS 5 + +/* NXP HW err codes */ +#define BTNXPUART_IR_HW_ERR 0xb0 + +#define FIRMWARE_W8987 "nxp/uartuart8987_bt.bin" +#define FIRMWARE_W8997 "nxp/uartuart8997_bt_v4.bin" +#define FIRMWARE_W9098 "nxp/uartuart9098_bt_v1.bin" +#define FIRMWARE_IW416 "nxp/uartiw416_bt_v0.bin" +#define FIRMWARE_IW612 "nxp/uartspi_n61x_v1.bin.se" +#define FIRMWARE_IW624 "nxp/uartiw624_bt.bin" +#define FIRMWARE_SECURE_IW624 "nxp/uartiw624_bt.bin.se" +#define FIRMWARE_AW693 "nxp/uartaw693_bt.bin" +#define FIRMWARE_SECURE_AW693 "nxp/uartaw693_bt.bin.se" +#define FIRMWARE_HELPER "nxp/helper_uart_3000000.bin" + +#define CHIP_ID_W9098 0x5c03 +#define CHIP_ID_IW416 0x7201 +#define CHIP_ID_IW612 0x7601 +#define CHIP_ID_IW624a 0x8000 +#define CHIP_ID_IW624c 0x8001 +#define CHIP_ID_AW693 0x8200 + +#define FW_SECURE_MASK 0xc0 +#define FW_OPEN 0x00 +#define FW_AUTH_ILLEGAL 0x40 +#define FW_AUTH_PLAIN 0x80 +#define FW_AUTH_ENC 0xc0 + +#define HCI_NXP_PRI_BAUDRATE 115200 +#define HCI_NXP_SEC_BAUDRATE 3000000 + +#define MAX_FW_FILE_NAME_LEN 50 + +/* Default ps timeout period in milliseconds */ +#define PS_DEFAULT_TIMEOUT_PERIOD_MS 2000 + +/* wakeup methods */ +#define WAKEUP_METHOD_DTR 0 +#define WAKEUP_METHOD_BREAK 1 +#define WAKEUP_METHOD_EXT_BREAK 2 +#define WAKEUP_METHOD_RTS 3 +#define WAKEUP_METHOD_INVALID 0xff + +/* power save mode status */ +#define PS_MODE_DISABLE 0 +#define PS_MODE_ENABLE 1 + +/* Power Save Commands to ps_work_func */ +#define PS_CMD_EXIT_PS 1 +#define PS_CMD_ENTER_PS 2 + +/* power save state */ +#define PS_STATE_AWAKE 0 +#define PS_STATE_SLEEP 1 + +/* Bluetooth vendor command : Sleep mode */ +#define HCI_NXP_AUTO_SLEEP_MODE 0xfc23 +/* Bluetooth vendor command : Wakeup method */ +#define HCI_NXP_WAKEUP_METHOD 0xfc53 +/* Bluetooth vendor command : Set operational baudrate */ +#define HCI_NXP_SET_OPER_SPEED 0xfc09 +/* Bluetooth vendor command: Independent Reset */ +#define HCI_NXP_IND_RESET 0xfcfc + +/* Bluetooth Power State : Vendor cmd params */ +#define BT_PS_ENABLE 0x02 +#define BT_PS_DISABLE 0x03 + +/* Bluetooth Host Wakeup Methods */ +#define BT_HOST_WAKEUP_METHOD_NONE 0x00 +#define BT_HOST_WAKEUP_METHOD_DTR 0x01 +#define BT_HOST_WAKEUP_METHOD_BREAK 0x02 +#define BT_HOST_WAKEUP_METHOD_GPIO 0x03 + +/* Bluetooth Chip Wakeup Methods */ +#define BT_CTRL_WAKEUP_METHOD_DSR 0x00 +#define BT_CTRL_WAKEUP_METHOD_BREAK 0x01 +#define BT_CTRL_WAKEUP_METHOD_GPIO 0x02 +#define BT_CTRL_WAKEUP_METHOD_EXT_BREAK 0x04 +#define BT_CTRL_WAKEUP_METHOD_RTS 0x05 + +struct ps_data { + u8 target_ps_mode; /* ps mode to be set */ + u8 cur_psmode; /* current ps_mode */ + u8 ps_state; /* controller's power save state */ + u8 ps_cmd; + u8 h2c_wakeupmode; + u8 cur_h2c_wakeupmode; + u8 c2h_wakeupmode; + u8 c2h_wakeup_gpio; + u8 h2c_wakeup_gpio; + bool driver_sent_cmd; + u16 h2c_ps_interval; + u16 c2h_ps_interval; + struct hci_dev *hdev; + struct work_struct work; + struct timer_list ps_timer; +}; + +struct wakeup_cmd_payload { + u8 c2h_wakeupmode; + u8 c2h_wakeup_gpio; + u8 h2c_wakeupmode; + u8 h2c_wakeup_gpio; +} __packed; + +struct psmode_cmd_payload { + u8 ps_cmd; + __le16 c2h_ps_interval; +} __packed; + +struct btnxpuart_data { + const char *helper_fw_name; + const char *fw_name; +}; + +struct btnxpuart_dev { + struct hci_dev *hdev; + struct serdev_device *serdev; + + struct work_struct tx_work; + unsigned long tx_state; + struct sk_buff_head txq; + struct sk_buff *rx_skb; + + const struct firmware *fw; + u8 fw_name[MAX_FW_FILE_NAME_LEN]; + u32 fw_dnld_v1_offset; + u32 fw_v1_sent_bytes; + u32 fw_v3_offset_correction; + u32 fw_v1_expected_len; + u32 boot_reg_offset; + wait_queue_head_t fw_dnld_done_wait_q; + wait_queue_head_t check_boot_sign_wait_q; + + u32 new_baudrate; + u32 current_baudrate; + u32 fw_init_baudrate; + bool timeout_changed; + bool baudrate_changed; + bool helper_downloaded; + + struct ps_data psdata; + struct btnxpuart_data *nxp_data; +}; + +#define NXP_V1_FW_REQ_PKT 0xa5 +#define NXP_V1_CHIP_VER_PKT 0xaa +#define NXP_V3_FW_REQ_PKT 0xa7 +#define NXP_V3_CHIP_VER_PKT 0xab + +#define NXP_ACK_V1 0x5a +#define NXP_NAK_V1 0xbf +#define NXP_ACK_V3 0x7a +#define NXP_NAK_V3 0x7b +#define NXP_CRC_ERROR_V3 0x7c + +#define HDR_LEN 16 + +#define NXP_RECV_CHIP_VER_V1 \ + .type = NXP_V1_CHIP_VER_PKT, \ + .hlen = 4, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = 4 + +#define NXP_RECV_FW_REQ_V1 \ + .type = NXP_V1_FW_REQ_PKT, \ + .hlen = 4, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = 4 + +#define NXP_RECV_CHIP_VER_V3 \ + .type = NXP_V3_CHIP_VER_PKT, \ + .hlen = 4, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = 4 + +#define NXP_RECV_FW_REQ_V3 \ + .type = NXP_V3_FW_REQ_PKT, \ + .hlen = 9, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = 9 + +struct v1_data_req { + __le16 len; + __le16 len_comp; +} __packed; + +struct v1_start_ind { + __le16 chip_id; + __le16 chip_id_comp; +} __packed; + +struct v3_data_req { + __le16 len; + __le32 offset; + __le16 error; + u8 crc; +} __packed; + +struct v3_start_ind { + __le16 chip_id; + u8 loader_ver; + u8 crc; +} __packed; + +/* UART register addresses of BT chip */ +#define CLKDIVADDR 0x7f00008f +#define UARTDIVADDR 0x7f000090 +#define UARTMCRADDR 0x7f000091 +#define UARTREINITADDR 0x7f000092 +#define UARTICRADDR 0x7f000093 +#define UARTFCRADDR 0x7f000094 + +#define MCR 0x00000022 +#define INIT 0x00000001 +#define ICR 0x000000c7 +#define FCR 0x000000c7 + +#define POLYNOMIAL8 0x07 + +struct uart_reg { + __le32 address; + __le32 value; +} __packed; + +struct uart_config { + struct uart_reg clkdiv; + struct uart_reg uartdiv; + struct uart_reg mcr; + struct uart_reg re_init; + struct uart_reg icr; + struct uart_reg fcr; + __be32 crc; +} __packed; + +struct nxp_bootloader_cmd { + __le32 header; + __le32 arg; + __le32 payload_len; + __be32 crc; +} __packed; + +static u8 crc8_table[CRC8_TABLE_SIZE]; + +/* Default configurations */ +#define DEFAULT_H2C_WAKEUP_MODE WAKEUP_METHOD_BREAK +#define DEFAULT_PS_MODE PS_MODE_DISABLE +#define FW_INIT_BAUDRATE HCI_NXP_PRI_BAUDRATE + +static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode, + u32 plen, + void *param) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + struct sk_buff *skb; + + /* set flag to prevent nxp_enqueue from parsing values from this command and + * calling hci_cmd_sync_queue() again. + */ + psdata->driver_sent_cmd = true; + skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_CMD_TIMEOUT); + psdata->driver_sent_cmd = false; + + return skb; +} + +static void btnxpuart_tx_wakeup(struct btnxpuart_dev *nxpdev) +{ + if (schedule_work(&nxpdev->tx_work)) + set_bit(BTNXPUART_TX_STATE_ACTIVE, &nxpdev->tx_state); +} + +/* NXP Power Save Feature */ +static void ps_start_timer(struct btnxpuart_dev *nxpdev) +{ + struct ps_data *psdata = &nxpdev->psdata; + + if (!psdata) + return; + + if (psdata->cur_psmode == PS_MODE_ENABLE) + mod_timer(&psdata->ps_timer, jiffies + msecs_to_jiffies(psdata->h2c_ps_interval)); +} + +static void ps_cancel_timer(struct btnxpuart_dev *nxpdev) +{ + struct ps_data *psdata = &nxpdev->psdata; + + flush_work(&psdata->work); + del_timer_sync(&psdata->ps_timer); +} + +static void ps_control(struct hci_dev *hdev, u8 ps_state) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + int status; + + if (psdata->ps_state == ps_state || + !test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state)) + return; + + switch (psdata->cur_h2c_wakeupmode) { + case WAKEUP_METHOD_DTR: + if (ps_state == PS_STATE_AWAKE) + status = serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0); + else + status = serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR); + break; + case WAKEUP_METHOD_BREAK: + default: + if (ps_state == PS_STATE_AWAKE) + status = serdev_device_break_ctl(nxpdev->serdev, 0); + else + status = serdev_device_break_ctl(nxpdev->serdev, -1); + bt_dev_dbg(hdev, "Set UART break: %s, status=%d", + str_on_off(ps_state == PS_STATE_SLEEP), status); + break; + } + if (!status) + psdata->ps_state = ps_state; + if (ps_state == PS_STATE_AWAKE) + btnxpuart_tx_wakeup(nxpdev); +} + +static void ps_work_func(struct work_struct *work) +{ + struct ps_data *data = container_of(work, struct ps_data, work); + + if (data->ps_cmd == PS_CMD_ENTER_PS && data->cur_psmode == PS_MODE_ENABLE) + ps_control(data->hdev, PS_STATE_SLEEP); + else if (data->ps_cmd == PS_CMD_EXIT_PS) + ps_control(data->hdev, PS_STATE_AWAKE); +} + +static void ps_timeout_func(struct timer_list *t) +{ + struct ps_data *data = from_timer(data, t, ps_timer); + struct hci_dev *hdev = data->hdev; + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + + if (test_bit(BTNXPUART_TX_STATE_ACTIVE, &nxpdev->tx_state)) { + ps_start_timer(nxpdev); + } else { + data->ps_cmd = PS_CMD_ENTER_PS; + schedule_work(&data->work); + } +} + +static void ps_setup(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + + psdata->hdev = hdev; + INIT_WORK(&psdata->work, ps_work_func); + timer_setup(&psdata->ps_timer, ps_timeout_func, 0); +} + +static void ps_wakeup(struct btnxpuart_dev *nxpdev) +{ + struct ps_data *psdata = &nxpdev->psdata; + + if (psdata->ps_state != PS_STATE_AWAKE) { + psdata->ps_cmd = PS_CMD_EXIT_PS; + schedule_work(&psdata->work); + } +} + +static int send_ps_cmd(struct hci_dev *hdev, void *data) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + struct psmode_cmd_payload pcmd; + struct sk_buff *skb; + u8 *status; + + if (psdata->target_ps_mode == PS_MODE_ENABLE) + pcmd.ps_cmd = BT_PS_ENABLE; + else + pcmd.ps_cmd = BT_PS_DISABLE; + pcmd.c2h_ps_interval = __cpu_to_le16(psdata->c2h_ps_interval); + + skb = nxp_drv_send_cmd(hdev, HCI_NXP_AUTO_SLEEP_MODE, sizeof(pcmd), &pcmd); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Setting Power Save mode failed (%ld)", PTR_ERR(skb)); + return PTR_ERR(skb); + } + + status = skb_pull_data(skb, 1); + if (status) { + if (!*status) + psdata->cur_psmode = psdata->target_ps_mode; + else + psdata->target_ps_mode = psdata->cur_psmode; + if (psdata->cur_psmode == PS_MODE_ENABLE) + ps_start_timer(nxpdev); + else + ps_wakeup(nxpdev); + bt_dev_dbg(hdev, "Power Save mode response: status=%d, ps_mode=%d", + *status, psdata->cur_psmode); + } + kfree_skb(skb); + + return 0; +} + +static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + struct wakeup_cmd_payload pcmd; + struct sk_buff *skb; + u8 *status; + + pcmd.c2h_wakeupmode = psdata->c2h_wakeupmode; + pcmd.c2h_wakeup_gpio = psdata->c2h_wakeup_gpio; + switch (psdata->h2c_wakeupmode) { + case WAKEUP_METHOD_DTR: + pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_DSR; + break; + case WAKEUP_METHOD_BREAK: + default: + pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_BREAK; + break; + } + pcmd.h2c_wakeup_gpio = 0xff; + + skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd), &pcmd); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Setting wake-up method failed (%ld)", PTR_ERR(skb)); + return PTR_ERR(skb); + } + + status = skb_pull_data(skb, 1); + if (status) { + if (*status == 0) + psdata->cur_h2c_wakeupmode = psdata->h2c_wakeupmode; + else + psdata->h2c_wakeupmode = psdata->cur_h2c_wakeupmode; + bt_dev_dbg(hdev, "Set Wakeup Method response: status=%d, h2c_wakeupmode=%d", + *status, psdata->cur_h2c_wakeupmode); + } + kfree_skb(skb); + + return 0; +} + +static void ps_init(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + + serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_RTS); + usleep_range(5000, 10000); + serdev_device_set_tiocm(nxpdev->serdev, TIOCM_RTS, 0); + usleep_range(5000, 10000); + + psdata->ps_state = PS_STATE_AWAKE; + psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE; + psdata->c2h_wakeup_gpio = 0xff; + + psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID; + psdata->h2c_ps_interval = PS_DEFAULT_TIMEOUT_PERIOD_MS; + switch (DEFAULT_H2C_WAKEUP_MODE) { + case WAKEUP_METHOD_DTR: + psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR; + serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR); + serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0); + break; + case WAKEUP_METHOD_BREAK: + default: + psdata->h2c_wakeupmode = WAKEUP_METHOD_BREAK; + serdev_device_break_ctl(nxpdev->serdev, -1); + usleep_range(5000, 10000); + serdev_device_break_ctl(nxpdev->serdev, 0); + usleep_range(5000, 10000); + break; + } + + psdata->cur_psmode = PS_MODE_DISABLE; + psdata->target_ps_mode = DEFAULT_PS_MODE; + + if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode) + hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL); + if (psdata->cur_psmode != psdata->target_ps_mode) + hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL); +} + +/* NXP Firmware Download Feature */ +static int nxp_download_firmware(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + int err = 0; + + nxpdev->fw_dnld_v1_offset = 0; + nxpdev->fw_v1_sent_bytes = 0; + nxpdev->fw_v1_expected_len = HDR_LEN; + nxpdev->boot_reg_offset = 0; + nxpdev->fw_v3_offset_correction = 0; + nxpdev->baudrate_changed = false; + nxpdev->timeout_changed = false; + nxpdev->helper_downloaded = false; + + serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE); + serdev_device_set_flow_control(nxpdev->serdev, false); + nxpdev->current_baudrate = HCI_NXP_PRI_BAUDRATE; + + /* Wait till FW is downloaded */ + err = wait_event_interruptible_timeout(nxpdev->fw_dnld_done_wait_q, + !test_bit(BTNXPUART_FW_DOWNLOADING, + &nxpdev->tx_state), + msecs_to_jiffies(60000)); + if (err == 0) { + bt_dev_err(hdev, "FW Download Timeout."); + return -ETIMEDOUT; + } + + serdev_device_set_flow_control(nxpdev->serdev, true); + release_firmware(nxpdev->fw); + memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); + + /* Allow the downloaded FW to initialize */ + msleep(1200); + + return 0; +} + +static void nxp_send_ack(u8 ack, struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + u8 ack_nak[2]; + int len = 1; + + ack_nak[0] = ack; + if (ack == NXP_ACK_V3) { + ack_nak[1] = crc8(crc8_table, ack_nak, 1, 0xff); + len = 2; + } + serdev_device_write_buf(nxpdev->serdev, ack_nak, len); +} + +static bool nxp_fw_change_baudrate(struct hci_dev *hdev, u16 req_len) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct nxp_bootloader_cmd nxp_cmd5; + struct uart_config uart_config; + u32 clkdivaddr = CLKDIVADDR - nxpdev->boot_reg_offset; + u32 uartdivaddr = UARTDIVADDR - nxpdev->boot_reg_offset; + u32 uartmcraddr = UARTMCRADDR - nxpdev->boot_reg_offset; + u32 uartreinitaddr = UARTREINITADDR - nxpdev->boot_reg_offset; + u32 uarticraddr = UARTICRADDR - nxpdev->boot_reg_offset; + u32 uartfcraddr = UARTFCRADDR - nxpdev->boot_reg_offset; + + if (req_len == sizeof(nxp_cmd5)) { + nxp_cmd5.header = __cpu_to_le32(5); + nxp_cmd5.arg = 0; + nxp_cmd5.payload_len = __cpu_to_le32(sizeof(uart_config)); + /* FW expects swapped CRC bytes */ + nxp_cmd5.crc = __cpu_to_be32(crc32_be(0UL, (char *)&nxp_cmd5, + sizeof(nxp_cmd5) - 4)); + + serdev_device_write_buf(nxpdev->serdev, (u8 *)&nxp_cmd5, sizeof(nxp_cmd5)); + nxpdev->fw_v3_offset_correction += req_len; + } else if (req_len == sizeof(uart_config)) { + uart_config.clkdiv.address = __cpu_to_le32(clkdivaddr); + uart_config.clkdiv.value = __cpu_to_le32(0x00c00000); + uart_config.uartdiv.address = __cpu_to_le32(uartdivaddr); + uart_config.uartdiv.value = __cpu_to_le32(1); + uart_config.mcr.address = __cpu_to_le32(uartmcraddr); + uart_config.mcr.value = __cpu_to_le32(MCR); + uart_config.re_init.address = __cpu_to_le32(uartreinitaddr); + uart_config.re_init.value = __cpu_to_le32(INIT); + uart_config.icr.address = __cpu_to_le32(uarticraddr); + uart_config.icr.value = __cpu_to_le32(ICR); + uart_config.fcr.address = __cpu_to_le32(uartfcraddr); + uart_config.fcr.value = __cpu_to_le32(FCR); + /* FW expects swapped CRC bytes */ + uart_config.crc = __cpu_to_be32(crc32_be(0UL, (char *)&uart_config, + sizeof(uart_config) - 4)); + + serdev_device_write_buf(nxpdev->serdev, (u8 *)&uart_config, sizeof(uart_config)); + serdev_device_wait_until_sent(nxpdev->serdev, 0); + nxpdev->fw_v3_offset_correction += req_len; + return true; + } + return false; +} + +static bool nxp_fw_change_timeout(struct hci_dev *hdev, u16 req_len) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct nxp_bootloader_cmd nxp_cmd7; + + if (req_len != sizeof(nxp_cmd7)) + return false; + + nxp_cmd7.header = __cpu_to_le32(7); + nxp_cmd7.arg = __cpu_to_le32(0x70); + nxp_cmd7.payload_len = 0; + /* FW expects swapped CRC bytes */ + nxp_cmd7.crc = __cpu_to_be32(crc32_be(0UL, (char *)&nxp_cmd7, + sizeof(nxp_cmd7) - 4)); + serdev_device_write_buf(nxpdev->serdev, (u8 *)&nxp_cmd7, sizeof(nxp_cmd7)); + serdev_device_wait_until_sent(nxpdev->serdev, 0); + nxpdev->fw_v3_offset_correction += req_len; + return true; +} + +static u32 nxp_get_data_len(const u8 *buf) +{ + struct nxp_bootloader_cmd *hdr = (struct nxp_bootloader_cmd *)buf; + + return __le32_to_cpu(hdr->payload_len); +} + +static bool is_fw_downloading(struct btnxpuart_dev *nxpdev) +{ + return test_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); +} + +static bool process_boot_signature(struct btnxpuart_dev *nxpdev) +{ + if (test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state)) { + clear_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state); + wake_up_interruptible(&nxpdev->check_boot_sign_wait_q); + return false; + } + return is_fw_downloading(nxpdev); +} + +static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + int err = 0; + + if (!fw_name) + return -ENOENT; + + if (!strlen(nxpdev->fw_name)) { + snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "%s", fw_name); + + bt_dev_dbg(hdev, "Request Firmware: %s", nxpdev->fw_name); + err = request_firmware(&nxpdev->fw, nxpdev->fw_name, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Firmware file %s not found", nxpdev->fw_name); + clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + } + } + return err; +} + +/* for legacy chipsets with V1 bootloader */ +static int nxp_recv_chip_ver_v1(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct v1_start_ind *req; + __u16 chip_id; + + req = skb_pull_data(skb, sizeof(*req)); + if (!req) + goto free_skb; + + chip_id = le16_to_cpu(req->chip_id ^ req->chip_id_comp); + if (chip_id == 0xffff && nxpdev->fw_dnld_v1_offset) { + nxpdev->fw_dnld_v1_offset = 0; + nxpdev->fw_v1_sent_bytes = 0; + nxpdev->fw_v1_expected_len = HDR_LEN; + release_firmware(nxpdev->fw); + memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); + nxp_send_ack(NXP_ACK_V1, hdev); + } + +free_skb: + kfree_skb(skb); + return 0; +} + +static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct btnxpuart_data *nxp_data = nxpdev->nxp_data; + struct v1_data_req *req; + __u16 len; + + if (!process_boot_signature(nxpdev)) + goto free_skb; + + req = skb_pull_data(skb, sizeof(*req)); + if (!req) + goto free_skb; + + len = __le16_to_cpu(req->len ^ req->len_comp); + if (len != 0xffff) { + bt_dev_dbg(hdev, "ERR: Send NAK"); + nxp_send_ack(NXP_NAK_V1, hdev); + goto free_skb; + } + nxp_send_ack(NXP_ACK_V1, hdev); + + len = __le16_to_cpu(req->len); + + if (!nxp_data->helper_fw_name) { + if (!nxpdev->timeout_changed) { + nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, + len); + goto free_skb; + } + if (!nxpdev->baudrate_changed) { + nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, + len); + if (nxpdev->baudrate_changed) { + serdev_device_set_baudrate(nxpdev->serdev, + HCI_NXP_SEC_BAUDRATE); + serdev_device_set_flow_control(nxpdev->serdev, true); + nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE; + } + goto free_skb; + } + } + + if (!nxp_data->helper_fw_name || nxpdev->helper_downloaded) { + if (nxp_request_firmware(hdev, nxp_data->fw_name)) + goto free_skb; + } else if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) { + if (nxp_request_firmware(hdev, nxp_data->helper_fw_name)) + goto free_skb; + } + + if (!len) { + bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes", + nxpdev->fw->size); + if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) { + nxpdev->helper_downloaded = true; + serdev_device_wait_until_sent(nxpdev->serdev, 0); + serdev_device_set_baudrate(nxpdev->serdev, + HCI_NXP_SEC_BAUDRATE); + serdev_device_set_flow_control(nxpdev->serdev, true); + } else { + clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); + } + goto free_skb; + } + if (len & 0x01) { + /* The CRC did not match at the other end. + * Simply send the same bytes again. + */ + len = nxpdev->fw_v1_sent_bytes; + bt_dev_dbg(hdev, "CRC error. Resend %d bytes of FW.", len); + } else { + nxpdev->fw_dnld_v1_offset += nxpdev->fw_v1_sent_bytes; + + /* The FW bin file is made up of many blocks of + * 16 byte header and payload data chunks. If the + * FW has requested a header, read the payload length + * info from the header, before sending the header. + * In the next iteration, the FW should request the + * payload data chunk, which should be equal to the + * payload length read from header. If there is a + * mismatch, clearly the driver and FW are out of sync, + * and we need to re-send the previous header again. + */ + if (len == nxpdev->fw_v1_expected_len) { + if (len == HDR_LEN) + nxpdev->fw_v1_expected_len = nxp_get_data_len(nxpdev->fw->data + + nxpdev->fw_dnld_v1_offset); + else + nxpdev->fw_v1_expected_len = HDR_LEN; + } else if (len == HDR_LEN) { + /* FW download out of sync. Send previous chunk again */ + nxpdev->fw_dnld_v1_offset -= nxpdev->fw_v1_sent_bytes; + nxpdev->fw_v1_expected_len = HDR_LEN; + } + } + + if (nxpdev->fw_dnld_v1_offset + len <= nxpdev->fw->size) + serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + + nxpdev->fw_dnld_v1_offset, len); + nxpdev->fw_v1_sent_bytes = len; + +free_skb: + kfree_skb(skb); + return 0; +} + +static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid, + u8 loader_ver) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + char *fw_name = NULL; + + switch (chipid) { + case CHIP_ID_W9098: + fw_name = FIRMWARE_W9098; + break; + case CHIP_ID_IW416: + fw_name = FIRMWARE_IW416; + break; + case CHIP_ID_IW612: + fw_name = FIRMWARE_IW612; + break; + case CHIP_ID_IW624a: + case CHIP_ID_IW624c: + nxpdev->boot_reg_offset = 1; + if ((loader_ver & FW_SECURE_MASK) == FW_OPEN) + fw_name = FIRMWARE_IW624; + else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL) + fw_name = FIRMWARE_SECURE_IW624; + else + bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); + break; + case CHIP_ID_AW693: + if ((loader_ver & FW_SECURE_MASK) == FW_OPEN) + fw_name = FIRMWARE_AW693; + else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL) + fw_name = FIRMWARE_SECURE_AW693; + else + bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); + break; + default: + bt_dev_err(hdev, "Unknown chip signature %04x", chipid); + break; + } + return fw_name; +} + +static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct v3_start_ind *req = skb_pull_data(skb, sizeof(*req)); + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + u16 chip_id; + u8 loader_ver; + + if (!process_boot_signature(nxpdev)) + goto free_skb; + + chip_id = le16_to_cpu(req->chip_id); + loader_ver = req->loader_ver; + if (!nxp_request_firmware(hdev, nxp_get_fw_name_from_chipid(hdev, + chip_id, loader_ver))) + nxp_send_ack(NXP_ACK_V3, hdev); + +free_skb: + kfree_skb(skb); + return 0; +} + +static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct v3_data_req *req; + __u16 len; + __u32 offset; + + if (!process_boot_signature(nxpdev)) + goto free_skb; + + req = skb_pull_data(skb, sizeof(*req)); + if (!req || !nxpdev->fw) + goto free_skb; + + nxp_send_ack(NXP_ACK_V3, hdev); + + len = __le16_to_cpu(req->len); + + if (!nxpdev->timeout_changed) { + nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, len); + goto free_skb; + } + + if (!nxpdev->baudrate_changed) { + nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, len); + if (nxpdev->baudrate_changed) { + serdev_device_set_baudrate(nxpdev->serdev, + HCI_NXP_SEC_BAUDRATE); + serdev_device_set_flow_control(nxpdev->serdev, true); + nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE; + } + goto free_skb; + } + + if (req->len == 0) { + bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes", + nxpdev->fw->size); + clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); + goto free_skb; + } + if (req->error) + bt_dev_dbg(hdev, "FW Download received err 0x%02x from chip", + req->error); + + offset = __le32_to_cpu(req->offset); + if (offset < nxpdev->fw_v3_offset_correction) { + /* This scenario should ideally never occur. But if it ever does, + * FW is out of sync and needs a power cycle. + */ + bt_dev_err(hdev, "Something went wrong during FW download"); + bt_dev_err(hdev, "Please power cycle and try again"); + goto free_skb; + } + + serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + offset - + nxpdev->fw_v3_offset_correction, len); + +free_skb: + kfree_skb(skb); + return 0; +} + +static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + __le32 new_baudrate = __cpu_to_le32(nxpdev->new_baudrate); + struct ps_data *psdata = &nxpdev->psdata; + struct sk_buff *skb; + u8 *status; + + if (!psdata) + return 0; + + skb = nxp_drv_send_cmd(hdev, HCI_NXP_SET_OPER_SPEED, 4, (u8 *)&new_baudrate); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Setting baudrate failed (%ld)", PTR_ERR(skb)); + return PTR_ERR(skb); + } + + status = (u8 *)skb_pull_data(skb, 1); + if (status) { + if (*status == 0) { + serdev_device_set_baudrate(nxpdev->serdev, nxpdev->new_baudrate); + nxpdev->current_baudrate = nxpdev->new_baudrate; + } + bt_dev_dbg(hdev, "Set baudrate response: status=%d, baudrate=%d", + *status, nxpdev->new_baudrate); + } + kfree_skb(skb); + + return 0; +} + +static int nxp_check_boot_sign(struct btnxpuart_dev *nxpdev) +{ + serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE); + if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) + serdev_device_set_flow_control(nxpdev->serdev, false); + else + serdev_device_set_flow_control(nxpdev->serdev, true); + set_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state); + + return wait_event_interruptible_timeout(nxpdev->check_boot_sign_wait_q, + !test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, + &nxpdev->tx_state), + msecs_to_jiffies(1000)); +} + +static int nxp_set_ind_reset(struct hci_dev *hdev, void *data) +{ + static const u8 ir_hw_err[] = { HCI_EV_HARDWARE_ERROR, + 0x01, BTNXPUART_IR_HW_ERR }; + struct sk_buff *skb; + + skb = bt_skb_alloc(3, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + skb_put_data(skb, ir_hw_err, 3); + + /* Inject Hardware Error to upper stack */ + return hci_recv_frame(hdev, skb); +} + +/* NXP protocol */ +static int nxp_setup(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + int err = 0; + + if (nxp_check_boot_sign(nxpdev)) { + bt_dev_dbg(hdev, "Need FW Download."); + err = nxp_download_firmware(hdev); + if (err < 0) + return err; + } else { + bt_dev_dbg(hdev, "FW already running."); + clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + } + + serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate); + nxpdev->current_baudrate = nxpdev->fw_init_baudrate; + + if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) { + nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE; + hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL); + } + + ps_init(hdev); + + if (test_and_clear_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) + hci_dev_clear_flag(hdev, HCI_SETUP); + + return 0; +} + +static void nxp_hw_err(struct hci_dev *hdev, u8 code) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + + switch (code) { + case BTNXPUART_IR_HW_ERR: + set_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state); + hci_dev_set_flag(hdev, HCI_SETUP); + break; + default: + break; + } +} + +static int nxp_shutdown(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct sk_buff *skb; + u8 *status; + u8 pcmd = 0; + + if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) { + skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + status = skb_pull_data(skb, 1); + if (status) { + serdev_device_set_flow_control(nxpdev->serdev, false); + set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + } + kfree_skb(skb); + } + + return 0; +} + +static int btnxpuart_queue_skb(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + skb_queue_tail(&nxpdev->txq, skb); + btnxpuart_tx_wakeup(nxpdev); + return 0; +} + +static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + struct hci_command_hdr *hdr; + struct psmode_cmd_payload ps_parm; + struct wakeup_cmd_payload wakeup_parm; + __le32 baudrate_parm; + + /* if vendor commands are received from user space (e.g. hcitool), update + * driver flags accordingly and ask driver to re-send the command to FW. + * In case the payload for any command does not match expected payload + * length, let the firmware and user space program handle it, or throw + * an error. + */ + if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT && !psdata->driver_sent_cmd) { + hdr = (struct hci_command_hdr *)skb->data; + if (hdr->plen != (skb->len - HCI_COMMAND_HDR_SIZE)) + return btnxpuart_queue_skb(hdev, skb); + + switch (__le16_to_cpu(hdr->opcode)) { + case HCI_NXP_AUTO_SLEEP_MODE: + if (hdr->plen == sizeof(ps_parm)) { + memcpy(&ps_parm, skb->data + HCI_COMMAND_HDR_SIZE, hdr->plen); + if (ps_parm.ps_cmd == BT_PS_ENABLE) + psdata->target_ps_mode = PS_MODE_ENABLE; + else if (ps_parm.ps_cmd == BT_PS_DISABLE) + psdata->target_ps_mode = PS_MODE_DISABLE; + psdata->c2h_ps_interval = __le16_to_cpu(ps_parm.c2h_ps_interval); + hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL); + goto free_skb; + } + break; + case HCI_NXP_WAKEUP_METHOD: + if (hdr->plen == sizeof(wakeup_parm)) { + memcpy(&wakeup_parm, skb->data + HCI_COMMAND_HDR_SIZE, hdr->plen); + psdata->c2h_wakeupmode = wakeup_parm.c2h_wakeupmode; + psdata->c2h_wakeup_gpio = wakeup_parm.c2h_wakeup_gpio; + psdata->h2c_wakeup_gpio = wakeup_parm.h2c_wakeup_gpio; + switch (wakeup_parm.h2c_wakeupmode) { + case BT_CTRL_WAKEUP_METHOD_DSR: + psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR; + break; + case BT_CTRL_WAKEUP_METHOD_BREAK: + default: + psdata->h2c_wakeupmode = WAKEUP_METHOD_BREAK; + break; + } + hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL); + goto free_skb; + } + break; + case HCI_NXP_SET_OPER_SPEED: + if (hdr->plen == sizeof(baudrate_parm)) { + memcpy(&baudrate_parm, skb->data + HCI_COMMAND_HDR_SIZE, hdr->plen); + nxpdev->new_baudrate = __le32_to_cpu(baudrate_parm); + hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL); + goto free_skb; + } + break; + case HCI_NXP_IND_RESET: + if (hdr->plen == 1) { + hci_cmd_sync_queue(hdev, nxp_set_ind_reset, NULL, NULL); + goto free_skb; + } + break; + default: + break; + } + } + + return btnxpuart_queue_skb(hdev, skb); + +free_skb: + kfree_skb(skb); + return 0; +} + +static struct sk_buff *nxp_dequeue(void *data) +{ + struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)data; + + ps_wakeup(nxpdev); + ps_start_timer(nxpdev); + return skb_dequeue(&nxpdev->txq); +} + +/* btnxpuart based on serdev */ +static void btnxpuart_tx_work(struct work_struct *work) +{ + struct btnxpuart_dev *nxpdev = container_of(work, struct btnxpuart_dev, + tx_work); + struct serdev_device *serdev = nxpdev->serdev; + struct hci_dev *hdev = nxpdev->hdev; + struct sk_buff *skb; + int len; + + while ((skb = nxp_dequeue(nxpdev))) { + len = serdev_device_write_buf(serdev, skb->data, skb->len); + hdev->stat.byte_tx += len; + + skb_pull(skb, len); + if (skb->len > 0) { + skb_queue_head(&nxpdev->txq, skb); + break; + } + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } + + kfree_skb(skb); + } + clear_bit(BTNXPUART_TX_STATE_ACTIVE, &nxpdev->tx_state); +} + +static int btnxpuart_open(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + int err = 0; + + err = serdev_device_open(nxpdev->serdev); + if (err) { + bt_dev_err(hdev, "Unable to open UART device %s", + dev_name(&nxpdev->serdev->dev)); + } else { + set_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state); + } + return err; +} + +static int btnxpuart_close(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + + ps_wakeup(nxpdev); + serdev_device_close(nxpdev->serdev); + clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state); + return 0; +} + +static int btnxpuart_flush(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + + /* Flush any pending characters */ + serdev_device_write_flush(nxpdev->serdev); + skb_queue_purge(&nxpdev->txq); + + cancel_work_sync(&nxpdev->tx_work); + + kfree_skb(nxpdev->rx_skb); + nxpdev->rx_skb = NULL; + + return 0; +} + +static const struct h4_recv_pkt nxp_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, + { NXP_RECV_CHIP_VER_V1, .recv = nxp_recv_chip_ver_v1 }, + { NXP_RECV_FW_REQ_V1, .recv = nxp_recv_fw_req_v1 }, + { NXP_RECV_CHIP_VER_V3, .recv = nxp_recv_chip_ver_v3 }, + { NXP_RECV_FW_REQ_V3, .recv = nxp_recv_fw_req_v3 }, +}; + +static int btnxpuart_receive_buf(struct serdev_device *serdev, const u8 *data, + size_t count) +{ + struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev); + + ps_start_timer(nxpdev); + + nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count, + nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts)); + if (IS_ERR(nxpdev->rx_skb)) { + int err = PTR_ERR(nxpdev->rx_skb); + /* Safe to ignore out-of-sync bootloader signatures */ + if (!is_fw_downloading(nxpdev)) + bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err); + nxpdev->rx_skb = NULL; + return count; + } + if (!is_fw_downloading(nxpdev)) + nxpdev->hdev->stat.byte_rx += count; + return count; +} + +static void btnxpuart_write_wakeup(struct serdev_device *serdev) +{ + serdev_device_write_wakeup(serdev); +} + +static const struct serdev_device_ops btnxpuart_client_ops = { + .receive_buf = btnxpuart_receive_buf, + .write_wakeup = btnxpuart_write_wakeup, +}; + +static int nxp_serdev_probe(struct serdev_device *serdev) +{ + struct hci_dev *hdev; + struct btnxpuart_dev *nxpdev; + + nxpdev = devm_kzalloc(&serdev->dev, sizeof(*nxpdev), GFP_KERNEL); + if (!nxpdev) + return -ENOMEM; + + nxpdev->nxp_data = (struct btnxpuart_data *)device_get_match_data(&serdev->dev); + + nxpdev->serdev = serdev; + serdev_device_set_drvdata(serdev, nxpdev); + + serdev_device_set_client_ops(serdev, &btnxpuart_client_ops); + + INIT_WORK(&nxpdev->tx_work, btnxpuart_tx_work); + skb_queue_head_init(&nxpdev->txq); + + init_waitqueue_head(&nxpdev->fw_dnld_done_wait_q); + init_waitqueue_head(&nxpdev->check_boot_sign_wait_q); + + device_property_read_u32(&nxpdev->serdev->dev, "fw-init-baudrate", + &nxpdev->fw_init_baudrate); + if (!nxpdev->fw_init_baudrate) + nxpdev->fw_init_baudrate = FW_INIT_BAUDRATE; + + set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + + crc8_populate_msb(crc8_table, POLYNOMIAL8); + + /* Initialize and register HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + dev_err(&serdev->dev, "Can't allocate HCI device\n"); + return -ENOMEM; + } + + nxpdev->hdev = hdev; + + hdev->bus = HCI_UART; + hci_set_drvdata(hdev, nxpdev); + + hdev->manufacturer = MANUFACTURER_NXP; + hdev->open = btnxpuart_open; + hdev->close = btnxpuart_close; + hdev->flush = btnxpuart_flush; + hdev->setup = nxp_setup; + hdev->send = nxp_enqueue; + hdev->hw_error = nxp_hw_err; + hdev->shutdown = nxp_shutdown; + SET_HCIDEV_DEV(hdev, &serdev->dev); + + if (hci_register_dev(hdev) < 0) { + dev_err(&serdev->dev, "Can't register HCI device\n"); + hci_free_dev(hdev); + return -ENODEV; + } + + ps_setup(hdev); + + return 0; +} + +static void nxp_serdev_remove(struct serdev_device *serdev) +{ + struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev); + struct hci_dev *hdev = nxpdev->hdev; + + /* Restore FW baudrate to fw_init_baudrate if changed. + * This will ensure FW baudrate is in sync with + * driver baudrate in case this driver is re-inserted. + */ + if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { + nxpdev->new_baudrate = nxpdev->fw_init_baudrate; + nxp_set_baudrate_cmd(hdev, NULL); + } + + ps_cancel_timer(nxpdev); + hci_unregister_dev(hdev); + hci_free_dev(hdev); +} + +static struct btnxpuart_data w8987_data __maybe_unused = { + .helper_fw_name = NULL, + .fw_name = FIRMWARE_W8987, +}; + +static struct btnxpuart_data w8997_data __maybe_unused = { + .helper_fw_name = FIRMWARE_HELPER, + .fw_name = FIRMWARE_W8997, +}; + +static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = { + { .compatible = "nxp,88w8987-bt", .data = &w8987_data }, + { .compatible = "nxp,88w8997-bt", .data = &w8997_data }, + { } +}; +MODULE_DEVICE_TABLE(of, nxpuart_of_match_table); + +static struct serdev_device_driver nxp_serdev_driver = { + .probe = nxp_serdev_probe, + .remove = nxp_serdev_remove, + .driver = { + .name = "btnxpuart", + .of_match_table = of_match_ptr(nxpuart_of_match_table), + }, +}; + +module_serdev_device_driver(nxp_serdev_driver); + +MODULE_AUTHOR("Neeraj Sanjay Kale "); +MODULE_DESCRIPTION("NXP Bluetooth Serial driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c new file mode 100644 index 000000000..5a35ac413 --- /dev/null +++ b/drivers/bluetooth/btqca.c @@ -0,0 +1,782 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Bluetooth supports for Qualcomm Atheros chips + * + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + */ +#include +#include +#include + +#include +#include + +#include "btqca.h" + +#define VERSION "0.1" + +int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver, + enum qca_btsoc_type soc_type) +{ + struct sk_buff *skb; + struct edl_event_hdr *edl; + char cmd; + int err = 0; + u8 event_type = HCI_EV_VENDOR; + u8 rlen = sizeof(*edl) + sizeof(*ver); + u8 rtype = EDL_APP_VER_RES_EVT; + + bt_dev_dbg(hdev, "QCA Version Request"); + + /* Unlike other SoC's sending version command response as payload to + * VSE event. WCN3991 sends version command response as a payload to + * command complete event. + */ + if (soc_type >= QCA_WCN3991) { + event_type = 0; + rlen += 1; + rtype = EDL_PATCH_VER_REQ_CMD; + } + + cmd = EDL_PATCH_VER_REQ_CMD; + skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN, + &cmd, event_type, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Reading QCA version information failed (%d)", + err); + return err; + } + + if (skb->len != rlen) { + bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len); + err = -EILSEQ; + goto out; + } + + edl = (struct edl_event_hdr *)(skb->data); + if (!edl) { + bt_dev_err(hdev, "QCA TLV with no header"); + err = -EILSEQ; + goto out; + } + + if (edl->cresp != EDL_CMD_REQ_RES_EVT || + edl->rtype != rtype) { + bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp, + edl->rtype); + err = -EIO; + goto out; + } + + if (soc_type >= QCA_WCN3991) + memcpy(ver, edl->data + 1, sizeof(*ver)); + else + memcpy(ver, &edl->data, sizeof(*ver)); + + bt_dev_info(hdev, "QCA Product ID :0x%08x", + le32_to_cpu(ver->product_id)); + bt_dev_info(hdev, "QCA SOC Version :0x%08x", + le32_to_cpu(ver->soc_id)); + bt_dev_info(hdev, "QCA ROM Version :0x%08x", + le16_to_cpu(ver->rom_ver)); + bt_dev_info(hdev, "QCA Patch Version:0x%08x", + le16_to_cpu(ver->patch_ver)); + + if (ver->soc_id == 0 || ver->rom_ver == 0) + err = -EILSEQ; + +out: + kfree_skb(skb); + if (err) + bt_dev_err(hdev, "QCA Failed to get version (%d)", err); + + return err; +} +EXPORT_SYMBOL_GPL(qca_read_soc_version); + +static int qca_read_fw_build_info(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct edl_event_hdr *edl; + char cmd, build_label[QCA_FW_BUILD_VER_LEN]; + int build_lbl_len, err = 0; + + bt_dev_dbg(hdev, "QCA read fw build info"); + + cmd = EDL_GET_BUILD_INFO_CMD; + skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN, + &cmd, 0, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Reading QCA fw build info failed (%d)", + err); + return err; + } + + edl = (struct edl_event_hdr *)(skb->data); + if (!edl) { + bt_dev_err(hdev, "QCA read fw build info with no header"); + err = -EILSEQ; + goto out; + } + + if (edl->cresp != EDL_CMD_REQ_RES_EVT || + edl->rtype != EDL_GET_BUILD_INFO_CMD) { + bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp, + edl->rtype); + err = -EIO; + goto out; + } + + build_lbl_len = edl->data[0]; + if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) { + memcpy(build_label, edl->data + 1, build_lbl_len); + *(build_label + build_lbl_len) = '\0'; + } + + hci_set_fw_info(hdev, "%s", build_label); + +out: + kfree_skb(skb); + return err; +} + +static int qca_send_patch_config_cmd(struct hci_dev *hdev) +{ + const u8 cmd[] = { EDL_PATCH_CONFIG_CMD, 0x01, 0, 0, 0 }; + struct sk_buff *skb; + struct edl_event_hdr *edl; + int err; + + bt_dev_dbg(hdev, "QCA Patch config"); + + skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd), + cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err); + return err; + } + + if (skb->len != 2) { + bt_dev_err(hdev, "QCA Patch config cmd size mismatch len %d", skb->len); + err = -EILSEQ; + goto out; + } + + edl = (struct edl_event_hdr *)(skb->data); + if (!edl) { + bt_dev_err(hdev, "QCA Patch config with no header"); + err = -EILSEQ; + goto out; + } + + if (edl->cresp != EDL_PATCH_CONFIG_RES_EVT || edl->rtype != EDL_PATCH_CONFIG_CMD) { + bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp, + edl->rtype); + err = -EIO; + goto out; + } + + err = 0; + +out: + kfree_skb(skb); + return err; +} + +static int qca_send_reset(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int err; + + bt_dev_dbg(hdev, "QCA HCI_RESET"); + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "QCA Reset failed (%d)", err); + return err; + } + + kfree_skb(skb); + + return 0; +} + +int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int err; + + bt_dev_dbg(hdev, "QCA pre shutdown cmd"); + + skb = __hci_cmd_sync_ev(hdev, QCA_PRE_SHUTDOWN_CMD, 0, + NULL, HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT); + + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err); + return err; + } + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd); + +static void qca_tlv_check_data(struct hci_dev *hdev, + struct qca_fw_config *config, + u8 *fw_data, enum qca_btsoc_type soc_type) +{ + const u8 *data; + u32 type_len; + u16 tag_id, tag_len; + int idx, length; + struct tlv_type_hdr *tlv; + struct tlv_type_patch *tlv_patch; + struct tlv_type_nvm *tlv_nvm; + uint8_t nvm_baud_rate = config->user_baud_rate; + + config->dnld_mode = QCA_SKIP_EVT_NONE; + config->dnld_type = QCA_SKIP_EVT_NONE; + + switch (config->type) { + case ELF_TYPE_PATCH: + config->dnld_mode = QCA_SKIP_EVT_VSE_CC; + config->dnld_type = QCA_SKIP_EVT_VSE_CC; + + bt_dev_dbg(hdev, "File Class : 0x%x", fw_data[4]); + bt_dev_dbg(hdev, "Data Encoding : 0x%x", fw_data[5]); + bt_dev_dbg(hdev, "File version : 0x%x", fw_data[6]); + break; + case TLV_TYPE_PATCH: + tlv = (struct tlv_type_hdr *)fw_data; + type_len = le32_to_cpu(tlv->type_len); + tlv_patch = (struct tlv_type_patch *)tlv->data; + + /* For Rome version 1.1 to 3.1, all segment commands + * are acked by a vendor specific event (VSE). + * For Rome >= 3.2, the download mode field indicates + * if VSE is skipped by the controller. + * In case VSE is skipped, only the last segment is acked. + */ + config->dnld_mode = tlv_patch->download_mode; + config->dnld_type = config->dnld_mode; + + BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff); + BT_DBG("Total Length : %d bytes", + le32_to_cpu(tlv_patch->total_size)); + BT_DBG("Patch Data Length : %d bytes", + le32_to_cpu(tlv_patch->data_length)); + BT_DBG("Signing Format Version : 0x%x", + tlv_patch->format_version); + BT_DBG("Signature Algorithm : 0x%x", + tlv_patch->signature); + BT_DBG("Download mode : 0x%x", + tlv_patch->download_mode); + BT_DBG("Reserved : 0x%x", + tlv_patch->reserved1); + BT_DBG("Product ID : 0x%04x", + le16_to_cpu(tlv_patch->product_id)); + BT_DBG("Rom Build Version : 0x%04x", + le16_to_cpu(tlv_patch->rom_build)); + BT_DBG("Patch Version : 0x%04x", + le16_to_cpu(tlv_patch->patch_version)); + BT_DBG("Reserved : 0x%x", + le16_to_cpu(tlv_patch->reserved2)); + BT_DBG("Patch Entry Address : 0x%x", + le32_to_cpu(tlv_patch->entry)); + break; + + case TLV_TYPE_NVM: + tlv = (struct tlv_type_hdr *)fw_data; + + type_len = le32_to_cpu(tlv->type_len); + length = (type_len >> 8) & 0x00ffffff; + + BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff); + BT_DBG("Length\t\t : %d bytes", length); + + idx = 0; + data = tlv->data; + while (idx < length) { + tlv_nvm = (struct tlv_type_nvm *)(data + idx); + + tag_id = le16_to_cpu(tlv_nvm->tag_id); + tag_len = le16_to_cpu(tlv_nvm->tag_len); + + /* Update NVM tags as needed */ + switch (tag_id) { + case EDL_TAG_ID_HCI: + /* HCI transport layer parameters + * enabling software inband sleep + * onto controller side. + */ + tlv_nvm->data[0] |= 0x80; + + /* UART Baud Rate */ + if (soc_type >= QCA_WCN3991) + tlv_nvm->data[1] = nvm_baud_rate; + else + tlv_nvm->data[2] = nvm_baud_rate; + + break; + + case EDL_TAG_ID_DEEP_SLEEP: + /* Sleep enable mask + * enabling deep sleep feature on controller. + */ + tlv_nvm->data[0] |= 0x01; + + break; + } + + idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len); + } + break; + + default: + BT_ERR("Unknown TLV type %d", config->type); + break; + } +} + +static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size, + const u8 *data, enum qca_tlv_dnld_mode mode, + enum qca_btsoc_type soc_type) +{ + struct sk_buff *skb; + struct edl_event_hdr *edl; + struct tlv_seg_resp *tlv_resp; + u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2]; + int err = 0; + u8 event_type = HCI_EV_VENDOR; + u8 rlen = (sizeof(*edl) + sizeof(*tlv_resp)); + u8 rtype = EDL_TVL_DNLD_RES_EVT; + + cmd[0] = EDL_PATCH_TLV_REQ_CMD; + cmd[1] = seg_size; + memcpy(cmd + 2, data, seg_size); + + if (mode == QCA_SKIP_EVT_VSE_CC || mode == QCA_SKIP_EVT_VSE) + return __hci_cmd_send(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, + cmd); + + /* Unlike other SoC's sending version command response as payload to + * VSE event. WCN3991 sends version command response as a payload to + * command complete event. + */ + if (soc_type >= QCA_WCN3991) { + event_type = 0; + rlen = sizeof(*edl); + rtype = EDL_PATCH_TLV_REQ_CMD; + } + + skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd, + event_type, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err); + return err; + } + + if (skb->len != rlen) { + bt_dev_err(hdev, "QCA TLV response size mismatch"); + err = -EILSEQ; + goto out; + } + + edl = (struct edl_event_hdr *)(skb->data); + if (!edl) { + bt_dev_err(hdev, "TLV with no header"); + err = -EILSEQ; + goto out; + } + + if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != rtype) { + bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x", + edl->cresp, edl->rtype); + err = -EIO; + } + + if (soc_type >= QCA_WCN3991) + goto out; + + tlv_resp = (struct tlv_seg_resp *)(edl->data); + if (tlv_resp->result) { + bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)", + edl->cresp, edl->rtype, tlv_resp->result); + } + +out: + kfree_skb(skb); + + return err; +} + +static int qca_inject_cmd_complete_event(struct hci_dev *hdev) +{ + struct hci_event_hdr *hdr; + struct hci_ev_cmd_complete *evt; + struct sk_buff *skb; + + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = skb_put(skb, sizeof(*hdr)); + hdr->evt = HCI_EV_CMD_COMPLETE; + hdr->plen = sizeof(*evt) + 1; + + evt = skb_put(skb, sizeof(*evt)); + evt->ncmd = 1; + evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE); + + skb_put_u8(skb, QCA_HCI_CC_SUCCESS); + + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + + return hci_recv_frame(hdev, skb); +} + +static int qca_download_firmware(struct hci_dev *hdev, + struct qca_fw_config *config, + enum qca_btsoc_type soc_type, + u8 rom_ver) +{ + const struct firmware *fw; + u8 *data; + const u8 *segment; + int ret, size, remain, i = 0; + + bt_dev_info(hdev, "QCA Downloading %s", config->fwname); + + ret = request_firmware(&fw, config->fwname, &hdev->dev); + if (ret) { + /* For WCN6750, if mbn file is not present then check for + * tlv file. + */ + if (soc_type == QCA_WCN6750 && config->type == ELF_TYPE_PATCH) { + bt_dev_dbg(hdev, "QCA Failed to request file: %s (%d)", + config->fwname, ret); + config->type = TLV_TYPE_PATCH; + snprintf(config->fwname, sizeof(config->fwname), + "qca/msbtfw%02x.tlv", rom_ver); + bt_dev_info(hdev, "QCA Downloading %s", config->fwname); + ret = request_firmware(&fw, config->fwname, &hdev->dev); + if (ret) { + bt_dev_err(hdev, "QCA Failed to request file: %s (%d)", + config->fwname, ret); + return ret; + } + } else { + bt_dev_err(hdev, "QCA Failed to request file: %s (%d)", + config->fwname, ret); + return ret; + } + } + + size = fw->size; + data = vmalloc(fw->size); + if (!data) { + bt_dev_err(hdev, "QCA Failed to allocate memory for file: %s", + config->fwname); + release_firmware(fw); + return -ENOMEM; + } + + memcpy(data, fw->data, size); + release_firmware(fw); + + qca_tlv_check_data(hdev, config, data, soc_type); + + segment = data; + remain = size; + while (remain > 0) { + int segsize = min(MAX_SIZE_PER_TLV_SEGMENT, remain); + + bt_dev_dbg(hdev, "Send segment %d, size %d", i++, segsize); + + remain -= segsize; + /* The last segment is always acked regardless download mode */ + if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT) + config->dnld_mode = QCA_SKIP_EVT_NONE; + + ret = qca_tlv_send_segment(hdev, segsize, segment, + config->dnld_mode, soc_type); + if (ret) + goto out; + + segment += segsize; + } + + /* Latest qualcomm chipsets are not sending a command complete event + * for every fw packet sent. They only respond with a vendor specific + * event for the last packet. This optimization in the chip will + * decrease the BT in initialization time. Here we will inject a command + * complete event to avoid a command timeout error message. + */ + if (config->dnld_type == QCA_SKIP_EVT_VSE_CC || + config->dnld_type == QCA_SKIP_EVT_VSE) + ret = qca_inject_cmd_complete_event(hdev); + +out: + vfree(data); + + return ret; +} + +static int qca_disable_soc_logging(struct hci_dev *hdev) +{ + struct sk_buff *skb; + u8 cmd[2]; + int err; + + cmd[0] = QCA_DISABLE_LOGGING_SUB_OP; + cmd[1] = 0x00; + skb = __hci_cmd_sync_ev(hdev, QCA_DISABLE_LOGGING, sizeof(cmd), cmd, + HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "QCA Failed to disable soc logging(%d)", err); + return err; + } + + kfree_skb(skb); + + return 0; +} + +int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + u8 cmd[9]; + int err; + + cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD; + cmd[1] = 0x02; /* TAG ID */ + cmd[2] = sizeof(bdaddr_t); /* size */ + memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t)); + skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd, + HCI_EV_VENDOR, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "QCA Change address command failed (%d)", err); + return err; + } + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome); + +int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, + enum qca_btsoc_type soc_type, struct qca_btsoc_version ver, + const char *firmware_name) +{ + struct qca_fw_config config; + int err; + u8 rom_ver = 0; + u32 soc_ver; + + bt_dev_dbg(hdev, "QCA setup on UART"); + + soc_ver = get_soc_ver(ver.soc_id, ver.rom_ver); + + bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver); + + config.user_baud_rate = baudrate; + + /* Firmware files to download are based on ROM version. + * ROM version is derived from last two bytes of soc_ver. + */ + if (soc_type == QCA_WCN3988) + rom_ver = ((soc_ver & 0x00000f00) >> 0x05) | (soc_ver & 0x0000000f); + else + rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f); + + if (soc_type == QCA_WCN6750) + qca_send_patch_config_cmd(hdev); + + /* Download rampatch file */ + config.type = TLV_TYPE_PATCH; + switch (soc_type) { + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + snprintf(config.fwname, sizeof(config.fwname), + "qca/crbtfw%02x.tlv", rom_ver); + break; + case QCA_WCN3988: + snprintf(config.fwname, sizeof(config.fwname), + "qca/apbtfw%02x.tlv", rom_ver); + break; + case QCA_QCA6390: + snprintf(config.fwname, sizeof(config.fwname), + "qca/htbtfw%02x.tlv", rom_ver); + break; + case QCA_WCN6750: + /* Choose mbn file by default.If mbn file is not found + * then choose tlv file + */ + config.type = ELF_TYPE_PATCH; + snprintf(config.fwname, sizeof(config.fwname), + "qca/msbtfw%02x.mbn", rom_ver); + break; + case QCA_WCN6855: + snprintf(config.fwname, sizeof(config.fwname), + "qca/hpbtfw%02x.tlv", rom_ver); + break; + case QCA_WCN7850: + snprintf(config.fwname, sizeof(config.fwname), + "qca/hmtbtfw%02x.tlv", rom_ver); + break; + default: + snprintf(config.fwname, sizeof(config.fwname), + "qca/rampatch_%08x.bin", soc_ver); + } + + err = qca_download_firmware(hdev, &config, soc_type, rom_ver); + if (err < 0) { + bt_dev_err(hdev, "QCA Failed to download patch (%d)", err); + return err; + } + + /* Give the controller some time to get ready to receive the NVM */ + msleep(10); + + /* Download NVM configuration */ + config.type = TLV_TYPE_NVM; + if (firmware_name) { + snprintf(config.fwname, sizeof(config.fwname), + "qca/%s", firmware_name); + } else { + switch (soc_type) { + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) { + snprintf(config.fwname, sizeof(config.fwname), + "qca/crnv%02xu.bin", rom_ver); + } else { + snprintf(config.fwname, sizeof(config.fwname), + "qca/crnv%02x.bin", rom_ver); + } + break; + case QCA_WCN3988: + snprintf(config.fwname, sizeof(config.fwname), + "qca/apnv%02x.bin", rom_ver); + break; + case QCA_QCA6390: + snprintf(config.fwname, sizeof(config.fwname), + "qca/htnv%02x.bin", rom_ver); + break; + case QCA_WCN6750: + snprintf(config.fwname, sizeof(config.fwname), + "qca/msnv%02x.bin", rom_ver); + break; + case QCA_WCN6855: + snprintf(config.fwname, sizeof(config.fwname), + "qca/hpnv%02x.bin", rom_ver); + break; + case QCA_WCN7850: + snprintf(config.fwname, sizeof(config.fwname), + "qca/hmtnv%02x.bin", rom_ver); + break; + + default: + snprintf(config.fwname, sizeof(config.fwname), + "qca/nvm_%08x.bin", soc_ver); + } + } + + err = qca_download_firmware(hdev, &config, soc_type, rom_ver); + if (err < 0) { + bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err); + return err; + } + + switch (soc_type) { + case QCA_WCN3991: + case QCA_QCA6390: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + err = qca_disable_soc_logging(hdev); + if (err < 0) + return err; + break; + default: + break; + } + + /* WCN399x and WCN6750 supports the Microsoft vendor extension with 0xFD70 as the + * VsMsftOpCode. + */ + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + hci_set_msft_opcode(hdev, 0xFD70); + break; + default: + break; + } + + /* Perform HCI reset */ + err = qca_send_reset(hdev); + if (err < 0) { + bt_dev_err(hdev, "QCA Failed to run HCI_RESET (%d)", err); + return err; + } + + switch (soc_type) { + case QCA_WCN3991: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + /* get fw build info */ + err = qca_read_fw_build_info(hdev); + if (err < 0) + return err; + break; + default: + break; + } + + bt_dev_info(hdev, "QCA setup on UART is completed"); + + return 0; +} +EXPORT_SYMBOL_GPL(qca_uart_setup); + +int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr, + HCI_EV_VENDOR, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err); + return err; + } + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(qca_set_bdaddr); + + +MODULE_AUTHOR("Ben Young Tae Kim "); +MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h new file mode 100644 index 000000000..03bff5c00 --- /dev/null +++ b/drivers/bluetooth/btqca.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Bluetooth supports for Qualcomm Atheros ROME chips + * + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + */ + +#define EDL_PATCH_CMD_OPCODE (0xFC00) +#define EDL_NVM_ACCESS_OPCODE (0xFC0B) +#define EDL_WRITE_BD_ADDR_OPCODE (0xFC14) +#define EDL_PATCH_CMD_LEN (1) +#define EDL_PATCH_VER_REQ_CMD (0x19) +#define EDL_PATCH_TLV_REQ_CMD (0x1E) +#define EDL_GET_BUILD_INFO_CMD (0x20) +#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) +#define EDL_PATCH_CONFIG_CMD (0x28) +#define MAX_SIZE_PER_TLV_SEGMENT (243) +#define QCA_PRE_SHUTDOWN_CMD (0xFC08) +#define QCA_DISABLE_LOGGING (0xFC17) + +#define EDL_CMD_REQ_RES_EVT (0x00) +#define EDL_PATCH_VER_RES_EVT (0x19) +#define EDL_APP_VER_RES_EVT (0x02) +#define EDL_TVL_DNLD_RES_EVT (0x04) +#define EDL_CMD_EXE_STATUS_EVT (0x00) +#define EDL_SET_BAUDRATE_RSP_EVT (0x92) +#define EDL_NVM_ACCESS_CODE_EVT (0x0B) +#define EDL_PATCH_CONFIG_RES_EVT (0x00) +#define QCA_DISABLE_LOGGING_SUB_OP (0x14) + +#define EDL_TAG_ID_HCI (17) +#define EDL_TAG_ID_DEEP_SLEEP (27) + +#define QCA_WCN3990_POWERON_PULSE 0xFC +#define QCA_WCN3990_POWEROFF_PULSE 0xC0 + +#define QCA_HCI_CC_OPCODE 0xFC00 +#define QCA_HCI_CC_SUCCESS 0x00 + +#define QCA_WCN3991_SOC_ID (0x40014320) + +/* QCA chipset version can be decided by patch and SoC + * version, combination with upper 2 bytes from SoC + * and lower 2 bytes from patch will be used. + */ +#define get_soc_ver(soc_id, rom_ver) \ + ((le32_to_cpu(soc_id) << 16) | (le16_to_cpu(rom_ver))) + +#define QCA_FW_BUILD_VER_LEN 255 + + +enum qca_baudrate { + QCA_BAUDRATE_115200 = 0, + QCA_BAUDRATE_57600, + QCA_BAUDRATE_38400, + QCA_BAUDRATE_19200, + QCA_BAUDRATE_9600, + QCA_BAUDRATE_230400, + QCA_BAUDRATE_250000, + QCA_BAUDRATE_460800, + QCA_BAUDRATE_500000, + QCA_BAUDRATE_720000, + QCA_BAUDRATE_921600, + QCA_BAUDRATE_1000000, + QCA_BAUDRATE_1250000, + QCA_BAUDRATE_2000000, + QCA_BAUDRATE_3000000, + QCA_BAUDRATE_4000000, + QCA_BAUDRATE_1600000, + QCA_BAUDRATE_3200000, + QCA_BAUDRATE_3500000, + QCA_BAUDRATE_AUTO = 0xFE, + QCA_BAUDRATE_RESERVED +}; + +enum qca_tlv_dnld_mode { + QCA_SKIP_EVT_NONE, + QCA_SKIP_EVT_VSE, + QCA_SKIP_EVT_CC, + QCA_SKIP_EVT_VSE_CC +}; + +enum qca_tlv_type { + TLV_TYPE_PATCH = 1, + TLV_TYPE_NVM, + ELF_TYPE_PATCH, +}; + +struct qca_fw_config { + u8 type; + char fwname[64]; + uint8_t user_baud_rate; + enum qca_tlv_dnld_mode dnld_mode; + enum qca_tlv_dnld_mode dnld_type; +}; + +struct edl_event_hdr { + __u8 cresp; + __u8 rtype; + __u8 data[]; +} __packed; + +struct qca_btsoc_version { + __le32 product_id; + __le16 patch_ver; + __le16 rom_ver; + __le32 soc_id; +} __packed; + +struct tlv_seg_resp { + __u8 result; +} __packed; + +struct tlv_type_patch { + __le32 total_size; + __le32 data_length; + __u8 format_version; + __u8 signature; + __u8 download_mode; + __u8 reserved1; + __le16 product_id; + __le16 rom_build; + __le16 patch_version; + __le16 reserved2; + __le32 entry; +} __packed; + +struct tlv_type_nvm { + __le16 tag_id; + __le16 tag_len; + __le32 reserve1; + __le32 reserve2; + __u8 data[]; +} __packed; + +struct tlv_type_hdr { + __le32 type_len; + __u8 data[]; +} __packed; + +enum qca_btsoc_type { + QCA_INVALID = -1, + QCA_AR3002, + QCA_ROME, + QCA_WCN3988, + QCA_WCN3990, + QCA_WCN3998, + QCA_WCN3991, + QCA_QCA6390, + QCA_WCN6750, + QCA_WCN6855, + QCA_WCN7850, +}; + +#if IS_ENABLED(CONFIG_BT_QCA) + +int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr); +int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, + enum qca_btsoc_type soc_type, struct qca_btsoc_version ver, + const char *firmware_name); +int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver, + enum qca_btsoc_type); +int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); +int qca_send_pre_shutdown_cmd(struct hci_dev *hdev); +#else + +static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + return -EOPNOTSUPP; +} + +static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, + enum qca_btsoc_type soc_type, + struct qca_btsoc_version ver, + const char *firmware_name) +{ + return -EOPNOTSUPP; +} + +static inline int qca_read_soc_version(struct hci_dev *hdev, + struct qca_btsoc_version *ver, + enum qca_btsoc_type) +{ + return -EOPNOTSUPP; +} + +static inline int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + return -EOPNOTSUPP; +} + +static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} +#endif diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c new file mode 100644 index 000000000..11c7e04bf --- /dev/null +++ b/drivers/bluetooth/btqcomsmd.c @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016, Linaro Ltd. + * Copyright (c) 2015, Sony Mobile Communications Inc. + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "btqca.h" + +struct btqcomsmd { + struct hci_dev *hdev; + + struct rpmsg_endpoint *acl_channel; + struct rpmsg_endpoint *cmd_channel; +}; + +static int btqcomsmd_recv(struct hci_dev *hdev, unsigned int type, + const void *data, size_t count) +{ + struct sk_buff *skb; + + /* Use GFP_ATOMIC as we're in IRQ context */ + skb = bt_skb_alloc(count, GFP_ATOMIC); + if (!skb) { + hdev->stat.err_rx++; + return -ENOMEM; + } + + hci_skb_pkt_type(skb) = type; + skb_put_data(skb, data, count); + + return hci_recv_frame(hdev, skb); +} + +static int btqcomsmd_acl_callback(struct rpmsg_device *rpdev, void *data, + int count, void *priv, u32 addr) +{ + struct btqcomsmd *btq = priv; + + btq->hdev->stat.byte_rx += count; + return btqcomsmd_recv(btq->hdev, HCI_ACLDATA_PKT, data, count); +} + +static int btqcomsmd_cmd_callback(struct rpmsg_device *rpdev, void *data, + int count, void *priv, u32 addr) +{ + struct btqcomsmd *btq = priv; + + btq->hdev->stat.byte_rx += count; + return btqcomsmd_recv(btq->hdev, HCI_EVENT_PKT, data, count); +} + +static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btqcomsmd *btq = hci_get_drvdata(hdev); + int ret; + + switch (hci_skb_pkt_type(skb)) { + case HCI_ACLDATA_PKT: + ret = rpmsg_send(btq->acl_channel, skb->data, skb->len); + if (ret) { + hdev->stat.err_tx++; + break; + } + hdev->stat.acl_tx++; + hdev->stat.byte_tx += skb->len; + break; + case HCI_COMMAND_PKT: + ret = rpmsg_send(btq->cmd_channel, skb->data, skb->len); + if (ret) { + hdev->stat.err_tx++; + break; + } + hdev->stat.cmd_tx++; + hdev->stat.byte_tx += skb->len; + break; + default: + ret = -EILSEQ; + break; + } + + if (!ret) + kfree_skb(skb); + + return ret; +} + +static int btqcomsmd_open(struct hci_dev *hdev) +{ + return 0; +} + +static int btqcomsmd_close(struct hci_dev *hdev) +{ + return 0; +} + +static int btqcomsmd_setup(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + kfree_skb(skb); + + /* Devices do not have persistent storage for BD address. Retrieve + * it from the firmware node property. + */ + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); + + return 0; +} + +static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + int ret; + + ret = qca_set_bdaddr_rome(hdev, bdaddr); + if (ret) + return ret; + + /* The firmware stops responding for a while after setting the bdaddr, + * causing timeouts for subsequent commands. Sleep a bit to avoid this. + */ + usleep_range(1000, 10000); + return 0; +} + +static int btqcomsmd_probe(struct platform_device *pdev) +{ + struct btqcomsmd *btq; + struct hci_dev *hdev; + void *wcnss; + int ret; + + btq = devm_kzalloc(&pdev->dev, sizeof(*btq), GFP_KERNEL); + if (!btq) + return -ENOMEM; + + wcnss = dev_get_drvdata(pdev->dev.parent); + + btq->acl_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_ACL", + btqcomsmd_acl_callback, btq); + if (IS_ERR(btq->acl_channel)) + return PTR_ERR(btq->acl_channel); + + btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD", + btqcomsmd_cmd_callback, btq); + if (IS_ERR(btq->cmd_channel)) { + ret = PTR_ERR(btq->cmd_channel); + goto destroy_acl_channel; + } + + hdev = hci_alloc_dev(); + if (!hdev) { + ret = -ENOMEM; + goto destroy_cmd_channel; + } + + hci_set_drvdata(hdev, btq); + btq->hdev = hdev; + SET_HCIDEV_DEV(hdev, &pdev->dev); + + hdev->bus = HCI_SMD; + hdev->open = btqcomsmd_open; + hdev->close = btqcomsmd_close; + hdev->send = btqcomsmd_send; + hdev->setup = btqcomsmd_setup; + hdev->set_bdaddr = btqcomsmd_set_bdaddr; + + ret = hci_register_dev(hdev); + if (ret < 0) + goto hci_free_dev; + + platform_set_drvdata(pdev, btq); + + return 0; + +hci_free_dev: + hci_free_dev(hdev); +destroy_cmd_channel: + rpmsg_destroy_ept(btq->cmd_channel); +destroy_acl_channel: + rpmsg_destroy_ept(btq->acl_channel); + + return ret; +} + +static int btqcomsmd_remove(struct platform_device *pdev) +{ + struct btqcomsmd *btq = platform_get_drvdata(pdev); + + hci_unregister_dev(btq->hdev); + hci_free_dev(btq->hdev); + + rpmsg_destroy_ept(btq->cmd_channel); + rpmsg_destroy_ept(btq->acl_channel); + + return 0; +} + +static const struct of_device_id btqcomsmd_of_match[] = { + { .compatible = "qcom,wcnss-bt", }, + { }, +}; +MODULE_DEVICE_TABLE(of, btqcomsmd_of_match); + +static struct platform_driver btqcomsmd_driver = { + .probe = btqcomsmd_probe, + .remove = btqcomsmd_remove, + .driver = { + .name = "btqcomsmd", + .of_match_table = btqcomsmd_of_match, + }, +}; + +module_platform_driver(btqcomsmd_driver); + +MODULE_AUTHOR("Bjorn Andersson "); +MODULE_DESCRIPTION("Qualcomm SMD HCI driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c new file mode 100644 index 000000000..634cf8f5e --- /dev/null +++ b/drivers/bluetooth/btrsi.c @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2017 Redpine Signals Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include +#include +#include +#include +#include + +#define RSI_DMA_ALIGN 8 +#define RSI_FRAME_DESC_SIZE 16 +#define RSI_HEADROOM_FOR_BT_HAL (RSI_FRAME_DESC_SIZE + RSI_DMA_ALIGN) + +struct rsi_hci_adapter { + void *priv; + struct rsi_proto_ops *proto_ops; + struct hci_dev *hdev; +}; + +static int rsi_hci_open(struct hci_dev *hdev) +{ + return 0; +} + +static int rsi_hci_close(struct hci_dev *hdev) +{ + return 0; +} + +static int rsi_hci_flush(struct hci_dev *hdev) +{ + return 0; +} + +static int rsi_hci_send_pkt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct rsi_hci_adapter *h_adapter = hci_get_drvdata(hdev); + struct sk_buff *new_skb = NULL; + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } + + if (skb_headroom(skb) < RSI_HEADROOM_FOR_BT_HAL) { + /* Insufficient skb headroom - allocate a new skb */ + new_skb = skb_realloc_headroom(skb, RSI_HEADROOM_FOR_BT_HAL); + if (unlikely(!new_skb)) + return -ENOMEM; + bt_cb(new_skb)->pkt_type = hci_skb_pkt_type(skb); + kfree_skb(skb); + skb = new_skb; + if (!IS_ALIGNED((unsigned long)skb->data, RSI_DMA_ALIGN)) { + u8 *skb_data = skb->data; + int skb_len = skb->len; + + skb_push(skb, RSI_DMA_ALIGN); + skb_pull(skb, PTR_ALIGN(skb->data, + RSI_DMA_ALIGN) - skb->data); + memmove(skb->data, skb_data, skb_len); + skb_trim(skb, skb_len); + } + } + + return h_adapter->proto_ops->coex_send_pkt(h_adapter->priv, skb, + RSI_BT_Q); +} + +static int rsi_hci_recv_pkt(void *priv, const u8 *pkt) +{ + struct rsi_hci_adapter *h_adapter = priv; + struct hci_dev *hdev = h_adapter->hdev; + struct sk_buff *skb; + int pkt_len = get_unaligned_le16(pkt) & 0x0fff; + + skb = dev_alloc_skb(pkt_len); + if (!skb) + return -ENOMEM; + + memcpy(skb->data, pkt + RSI_FRAME_DESC_SIZE, pkt_len); + skb_put(skb, pkt_len); + h_adapter->hdev->stat.byte_rx += skb->len; + + hci_skb_pkt_type(skb) = pkt[14]; + + return hci_recv_frame(hdev, skb); +} + +static int rsi_hci_attach(void *priv, struct rsi_proto_ops *ops) +{ + struct rsi_hci_adapter *h_adapter = NULL; + struct hci_dev *hdev; + int err = 0; + + h_adapter = kzalloc(sizeof(*h_adapter), GFP_KERNEL); + if (!h_adapter) + return -ENOMEM; + + h_adapter->priv = priv; + ops->set_bt_context(priv, h_adapter); + h_adapter->proto_ops = ops; + + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Failed to alloc HCI device"); + goto err; + } + + h_adapter->hdev = hdev; + + if (ops->get_host_intf(priv) == RSI_HOST_INTF_SDIO) + hdev->bus = HCI_SDIO; + else + hdev->bus = HCI_USB; + + hci_set_drvdata(hdev, h_adapter); + hdev->dev_type = HCI_PRIMARY; + hdev->open = rsi_hci_open; + hdev->close = rsi_hci_close; + hdev->flush = rsi_hci_flush; + hdev->send = rsi_hci_send_pkt; + + err = hci_register_dev(hdev); + if (err < 0) { + BT_ERR("HCI registration failed with errcode %d", err); + hci_free_dev(hdev); + goto err; + } + + return 0; +err: + h_adapter->hdev = NULL; + kfree(h_adapter); + return -EINVAL; +} + +static void rsi_hci_detach(void *priv) +{ + struct rsi_hci_adapter *h_adapter = priv; + struct hci_dev *hdev; + + if (!h_adapter) + return; + + hdev = h_adapter->hdev; + if (hdev) { + hci_unregister_dev(hdev); + hci_free_dev(hdev); + h_adapter->hdev = NULL; + } + + kfree(h_adapter); +} + +const struct rsi_mod_ops rsi_bt_ops = { + .attach = rsi_hci_attach, + .detach = rsi_hci_detach, + .recv_pkt = rsi_hci_recv_pkt, +}; +EXPORT_SYMBOL(rsi_bt_ops); + +static int rsi_91x_bt_module_init(void) +{ + return 0; +} + +static void rsi_91x_bt_module_exit(void) +{ + return; +} + +module_init(rsi_91x_bt_module_init); +module_exit(rsi_91x_bt_module_exit); +MODULE_AUTHOR("Redpine Signals Inc"); +MODULE_DESCRIPTION("RSI BT driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c new file mode 100644 index 000000000..277d039ec --- /dev/null +++ b/drivers/bluetooth/btrtl.c @@ -0,0 +1,1510 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Bluetooth support for Realtek devices + * + * Copyright (C) 2015 Endless Mobile, Inc. + */ + +#include +#include +#include +#include + +#include +#include + +#include "btrtl.h" + +#define VERSION "0.1" + +#define RTL_CHIP_8723CS_CG 3 +#define RTL_CHIP_8723CS_VF 4 +#define RTL_CHIP_8723CS_XX 5 +#define RTL_EPATCH_SIGNATURE "Realtech" +#define RTL_EPATCH_SIGNATURE_V2 "RTBTCore" +#define RTL_ROM_LMP_8703B 0x8703 +#define RTL_ROM_LMP_8723A 0x1200 +#define RTL_ROM_LMP_8723B 0x8723 +#define RTL_ROM_LMP_8821A 0x8821 +#define RTL_ROM_LMP_8761A 0x8761 +#define RTL_ROM_LMP_8822B 0x8822 +#define RTL_ROM_LMP_8852A 0x8852 +#define RTL_ROM_LMP_8851B 0x8851 +#define RTL_CONFIG_MAGIC 0x8723ab55 + +#define RTL_VSC_OP_COREDUMP 0xfcff + +#define IC_MATCH_FL_LMPSUBV (1 << 0) +#define IC_MATCH_FL_HCIREV (1 << 1) +#define IC_MATCH_FL_HCIVER (1 << 2) +#define IC_MATCH_FL_HCIBUS (1 << 3) +#define IC_MATCH_FL_CHIP_TYPE (1 << 4) +#define IC_INFO(lmps, hcir, hciv, bus) \ + .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | \ + IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS, \ + .lmp_subver = (lmps), \ + .hci_rev = (hcir), \ + .hci_ver = (hciv), \ + .hci_bus = (bus) + +#define RTL_CHIP_SUBVER (&(struct rtl_vendor_cmd) {{0x10, 0x38, 0x04, 0x28, 0x80}}) +#define RTL_CHIP_REV (&(struct rtl_vendor_cmd) {{0x10, 0x3A, 0x04, 0x28, 0x80}}) +#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0x0D, 0x00, 0xb0}}) + +#define RTL_PATCH_SNIPPETS 0x01 +#define RTL_PATCH_DUMMY_HEADER 0x02 +#define RTL_PATCH_SECURITY_HEADER 0x03 + +enum btrtl_chip_id { + CHIP_ID_8723A, + CHIP_ID_8723B, + CHIP_ID_8821A, + CHIP_ID_8761A, + CHIP_ID_8822B = 8, + CHIP_ID_8723D, + CHIP_ID_8821C, + CHIP_ID_8822C = 13, + CHIP_ID_8761B, + CHIP_ID_8852A = 18, + CHIP_ID_8852B = 20, + CHIP_ID_8852C = 25, + CHIP_ID_8851B = 36, +}; + +struct id_table { + __u16 match_flags; + __u16 lmp_subver; + __u16 hci_rev; + __u8 hci_ver; + __u8 hci_bus; + __u8 chip_type; + bool config_needed; + bool has_rom_version; + bool has_msft_ext; + char *fw_name; + char *cfg_name; + char *hw_info; +}; + +struct btrtl_device_info { + const struct id_table *ic_info; + u8 rom_version; + u8 *fw_data; + int fw_len; + u8 *cfg_data; + int cfg_len; + bool drop_fw; + int project_id; + u8 key_id; + struct list_head patch_subsecs; +}; + +static const struct id_table ic_id_table[] = { + /* 8723A */ + { IC_INFO(RTL_ROM_LMP_8723A, 0xb, 0x6, HCI_USB), + .config_needed = false, + .has_rom_version = false, + .fw_name = "rtl_bt/rtl8723a_fw", + .cfg_name = NULL, + .hw_info = "rtl8723au" }, + + /* 8723BS */ + { IC_INFO(RTL_ROM_LMP_8723B, 0xb, 0x6, HCI_UART), + .config_needed = true, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8723bs_fw", + .cfg_name = "rtl_bt/rtl8723bs_config", + .hw_info = "rtl8723bs" }, + + /* 8723B */ + { IC_INFO(RTL_ROM_LMP_8723B, 0xb, 0x6, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8723b_fw", + .cfg_name = "rtl_bt/rtl8723b_config", + .hw_info = "rtl8723bu" }, + + /* 8723CS-CG */ + { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE | + IC_MATCH_FL_HCIBUS, + .lmp_subver = RTL_ROM_LMP_8703B, + .chip_type = RTL_CHIP_8723CS_CG, + .hci_bus = HCI_UART, + .config_needed = true, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8723cs_cg_fw", + .cfg_name = "rtl_bt/rtl8723cs_cg_config", + .hw_info = "rtl8723cs-cg" }, + + /* 8723CS-VF */ + { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE | + IC_MATCH_FL_HCIBUS, + .lmp_subver = RTL_ROM_LMP_8703B, + .chip_type = RTL_CHIP_8723CS_VF, + .hci_bus = HCI_UART, + .config_needed = true, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8723cs_vf_fw", + .cfg_name = "rtl_bt/rtl8723cs_vf_config", + .hw_info = "rtl8723cs-vf" }, + + /* 8723CS-XX */ + { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE | + IC_MATCH_FL_HCIBUS, + .lmp_subver = RTL_ROM_LMP_8703B, + .chip_type = RTL_CHIP_8723CS_XX, + .hci_bus = HCI_UART, + .config_needed = true, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8723cs_xx_fw", + .cfg_name = "rtl_bt/rtl8723cs_xx_config", + .hw_info = "rtl8723cs" }, + + /* 8723D */ + { IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_USB), + .config_needed = true, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8723d_fw", + .cfg_name = "rtl_bt/rtl8723d_config", + .hw_info = "rtl8723du" }, + + /* 8723DS */ + { IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_UART), + .config_needed = true, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8723ds_fw", + .cfg_name = "rtl_bt/rtl8723ds_config", + .hw_info = "rtl8723ds" }, + + /* 8821A */ + { IC_INFO(RTL_ROM_LMP_8821A, 0xa, 0x6, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8821a_fw", + .cfg_name = "rtl_bt/rtl8821a_config", + .hw_info = "rtl8821au" }, + + /* 8821C */ + { IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8821c_fw", + .cfg_name = "rtl_bt/rtl8821c_config", + .hw_info = "rtl8821cu" }, + + /* 8821CS */ + { IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_UART), + .config_needed = true, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8821cs_fw", + .cfg_name = "rtl_bt/rtl8821cs_config", + .hw_info = "rtl8821cs" }, + + /* 8761A */ + { IC_INFO(RTL_ROM_LMP_8761A, 0xa, 0x6, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8761a_fw", + .cfg_name = "rtl_bt/rtl8761a_config", + .hw_info = "rtl8761au" }, + + /* 8761B */ + { IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_UART), + .config_needed = false, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8761b_fw", + .cfg_name = "rtl_bt/rtl8761b_config", + .hw_info = "rtl8761btv" }, + + /* 8761BU */ + { IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8761bu_fw", + .cfg_name = "rtl_bt/rtl8761bu_config", + .hw_info = "rtl8761bu" }, + + /* 8822C with UART interface */ + { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0x8, HCI_UART), + .config_needed = true, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8822cs_fw", + .cfg_name = "rtl_bt/rtl8822cs_config", + .hw_info = "rtl8822cs" }, + + /* 8822C with UART interface */ + { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_UART), + .config_needed = true, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8822cs_fw", + .cfg_name = "rtl_bt/rtl8822cs_config", + .hw_info = "rtl8822cs" }, + + /* 8822C with USB interface */ + { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8822cu_fw", + .cfg_name = "rtl_bt/rtl8822cu_config", + .hw_info = "rtl8822cu" }, + + /* 8822B */ + { IC_INFO(RTL_ROM_LMP_8822B, 0xb, 0x7, HCI_USB), + .config_needed = true, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8822b_fw", + .cfg_name = "rtl_bt/rtl8822b_config", + .hw_info = "rtl8822bu" }, + + /* 8852A */ + { IC_INFO(RTL_ROM_LMP_8852A, 0xa, 0xb, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8852au_fw", + .cfg_name = "rtl_bt/rtl8852au_config", + .hw_info = "rtl8852au" }, + + /* 8852B with UART interface */ + { IC_INFO(RTL_ROM_LMP_8852A, 0xb, 0xb, HCI_UART), + .config_needed = true, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8852bs_fw", + .cfg_name = "rtl_bt/rtl8852bs_config", + .hw_info = "rtl8852bs" }, + + /* 8852B */ + { IC_INFO(RTL_ROM_LMP_8852A, 0xb, 0xb, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8852bu_fw", + .cfg_name = "rtl_bt/rtl8852bu_config", + .hw_info = "rtl8852bu" }, + + /* 8852C */ + { IC_INFO(RTL_ROM_LMP_8852A, 0xc, 0xc, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8852cu_fw", + .cfg_name = "rtl_bt/rtl8852cu_config", + .hw_info = "rtl8852cu" }, + + /* 8851B */ + { IC_INFO(RTL_ROM_LMP_8851B, 0xb, 0xc, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .has_msft_ext = false, + .fw_name = "rtl_bt/rtl8851bu_fw", + .cfg_name = "rtl_bt/rtl8851bu_config", + .hw_info = "rtl8851bu" }, + }; + +static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev, + u8 hci_ver, u8 hci_bus, + u8 chip_type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) { + if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) && + (ic_id_table[i].lmp_subver != lmp_subver)) + continue; + if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) && + (ic_id_table[i].hci_rev != hci_rev)) + continue; + if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIVER) && + (ic_id_table[i].hci_ver != hci_ver)) + continue; + if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) && + (ic_id_table[i].hci_bus != hci_bus)) + continue; + if ((ic_id_table[i].match_flags & IC_MATCH_FL_CHIP_TYPE) && + (ic_id_table[i].chip_type != chip_type)) + continue; + + break; + } + if (i >= ARRAY_SIZE(ic_id_table)) + return NULL; + + return &ic_id_table[i]; +} + +static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION failed (%ld)", + PTR_ERR(skb)); + return skb; + } + + if (skb->len != sizeof(struct hci_rp_read_local_version)) { + rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION event length mismatch"); + kfree_skb(skb); + return ERR_PTR(-EIO); + } + + return skb; +} + +static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) +{ + struct rtl_rom_version_evt *rom_version; + struct sk_buff *skb; + + /* Read RTL ROM version command */ + skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + rtl_dev_err(hdev, "Read ROM version failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*rom_version)) { + rtl_dev_err(hdev, "version event length mismatch"); + kfree_skb(skb); + return -EIO; + } + + rom_version = (struct rtl_rom_version_evt *)skb->data; + rtl_dev_info(hdev, "rom_version status=%x version=%x", + rom_version->status, rom_version->version); + + *version = rom_version->version; + + kfree_skb(skb); + return 0; +} + +static int btrtl_vendor_read_reg16(struct hci_dev *hdev, + struct rtl_vendor_cmd *cmd, u8 *rp) +{ + struct sk_buff *skb; + int err = 0; + + skb = __hci_cmd_sync(hdev, 0xfc61, sizeof(*cmd), cmd, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + rtl_dev_err(hdev, "RTL: Read reg16 failed (%d)", err); + return err; + } + + if (skb->len != 3 || skb->data[0]) { + bt_dev_err(hdev, "RTL: Read reg16 length mismatch"); + kfree_skb(skb); + return -EIO; + } + + if (rp) + memcpy(rp, skb->data + 1, 2); + + kfree_skb(skb); + + return 0; +} + +static void *rtl_iov_pull_data(struct rtl_iovec *iov, u32 len) +{ + void *data = iov->data; + + if (iov->len < len) + return NULL; + + iov->data += len; + iov->len -= len; + + return data; +} + +static void btrtl_insert_ordered_subsec(struct rtl_subsection *node, + struct btrtl_device_info *btrtl_dev) +{ + struct list_head *pos; + struct list_head *next; + struct rtl_subsection *subsec; + + list_for_each_safe(pos, next, &btrtl_dev->patch_subsecs) { + subsec = list_entry(pos, struct rtl_subsection, list); + if (subsec->prio >= node->prio) + break; + } + __list_add(&node->list, pos->prev, pos); +} + +static int btrtl_parse_section(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev, u32 opcode, + u8 *data, u32 len) +{ + struct rtl_section_hdr *hdr; + struct rtl_subsection *subsec; + struct rtl_common_subsec *common_subsec; + struct rtl_sec_hdr *sec_hdr; + int i; + u8 *ptr; + u16 num_subsecs; + u32 subsec_len; + int rc = 0; + struct rtl_iovec iov = { + .data = data, + .len = len, + }; + + hdr = rtl_iov_pull_data(&iov, sizeof(*hdr)); + if (!hdr) + return -EINVAL; + num_subsecs = le16_to_cpu(hdr->num); + + for (i = 0; i < num_subsecs; i++) { + common_subsec = rtl_iov_pull_data(&iov, sizeof(*common_subsec)); + if (!common_subsec) + break; + subsec_len = le32_to_cpu(common_subsec->len); + + rtl_dev_dbg(hdev, "subsec, eco 0x%02x, len %08x", + common_subsec->eco, subsec_len); + + ptr = rtl_iov_pull_data(&iov, subsec_len); + if (!ptr) + break; + + if (common_subsec->eco != btrtl_dev->rom_version + 1) + continue; + + switch (opcode) { + case RTL_PATCH_SECURITY_HEADER: + sec_hdr = (void *)common_subsec; + if (sec_hdr->key_id != btrtl_dev->key_id) + continue; + break; + } + + subsec = kzalloc(sizeof(*subsec), GFP_KERNEL); + if (!subsec) + return -ENOMEM; + subsec->opcode = opcode; + subsec->prio = common_subsec->prio; + subsec->len = subsec_len; + subsec->data = ptr; + btrtl_insert_ordered_subsec(subsec, btrtl_dev); + rc += subsec_len; + } + + return rc; +} + +static int rtlbt_parse_firmware_v2(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev, + unsigned char **_buf) +{ + struct rtl_epatch_header_v2 *hdr; + int rc; + u8 reg_val[2]; + u8 key_id; + u32 num_sections; + struct rtl_section *section; + struct rtl_subsection *entry, *tmp; + u32 section_len; + u32 opcode; + int len = 0; + int i; + u8 *ptr; + struct rtl_iovec iov = { + .data = btrtl_dev->fw_data, + .len = btrtl_dev->fw_len - 7, /* Cut the tail */ + }; + + rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val); + if (rc < 0) + return -EIO; + key_id = reg_val[0]; + + rtl_dev_dbg(hdev, "%s: key id %u", __func__, key_id); + + btrtl_dev->key_id = key_id; + + hdr = rtl_iov_pull_data(&iov, sizeof(*hdr)); + if (!hdr) + return -EINVAL; + num_sections = le32_to_cpu(hdr->num_sections); + + rtl_dev_dbg(hdev, "FW version %08x-%08x", *((u32 *)hdr->fw_version), + *((u32 *)(hdr->fw_version + 4))); + + for (i = 0; i < num_sections; i++) { + section = rtl_iov_pull_data(&iov, sizeof(*section)); + if (!section) + break; + section_len = le32_to_cpu(section->len); + opcode = le32_to_cpu(section->opcode); + + rtl_dev_dbg(hdev, "opcode 0x%04x", section->opcode); + + ptr = rtl_iov_pull_data(&iov, section_len); + if (!ptr) + break; + + switch (opcode) { + case RTL_PATCH_SNIPPETS: + rc = btrtl_parse_section(hdev, btrtl_dev, opcode, + ptr, section_len); + break; + case RTL_PATCH_SECURITY_HEADER: + /* If key_id from chip is zero, ignore all security + * headers. + */ + if (!key_id) + break; + rc = btrtl_parse_section(hdev, btrtl_dev, opcode, + ptr, section_len); + break; + case RTL_PATCH_DUMMY_HEADER: + rc = btrtl_parse_section(hdev, btrtl_dev, opcode, + ptr, section_len); + break; + default: + rc = 0; + break; + } + if (rc < 0) { + rtl_dev_err(hdev, "RTL: Parse section (%u) err %d", + opcode, rc); + return rc; + } + len += rc; + } + + if (!len) + return -ENODATA; + + /* Allocate mem and copy all found subsecs. */ + ptr = kvmalloc(len, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + len = 0; + list_for_each_entry_safe(entry, tmp, &btrtl_dev->patch_subsecs, list) { + rtl_dev_dbg(hdev, "RTL: opcode %08x, addr %p, len 0x%x", + entry->opcode, entry->data, entry->len); + memcpy(ptr + len, entry->data, entry->len); + len += entry->len; + } + + if (!len) + return -EPERM; + + *_buf = ptr; + return len; +} + +static int rtlbt_parse_firmware(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev, + unsigned char **_buf) +{ + static const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; + struct btrealtek_data *coredump_info = hci_get_priv(hdev); + struct rtl_epatch_header *epatch_info; + unsigned char *buf; + int i, len; + size_t min_size; + u8 opcode, length, data; + int project_id = -1; + const unsigned char *fwptr, *chip_id_base; + const unsigned char *patch_length_base, *patch_offset_base; + u32 patch_offset = 0; + u16 patch_length, num_patches; + static const struct { + __u16 lmp_subver; + __u8 id; + } project_id_to_lmp_subver[] = { + { RTL_ROM_LMP_8723A, 0 }, + { RTL_ROM_LMP_8723B, 1 }, + { RTL_ROM_LMP_8821A, 2 }, + { RTL_ROM_LMP_8761A, 3 }, + { RTL_ROM_LMP_8703B, 7 }, + { RTL_ROM_LMP_8822B, 8 }, + { RTL_ROM_LMP_8723B, 9 }, /* 8723D */ + { RTL_ROM_LMP_8821A, 10 }, /* 8821C */ + { RTL_ROM_LMP_8822B, 13 }, /* 8822C */ + { RTL_ROM_LMP_8761A, 14 }, /* 8761B */ + { RTL_ROM_LMP_8852A, 18 }, /* 8852A */ + { RTL_ROM_LMP_8852A, 20 }, /* 8852B */ + { RTL_ROM_LMP_8852A, 25 }, /* 8852C */ + { RTL_ROM_LMP_8851B, 36 }, /* 8851B */ + }; + + if (btrtl_dev->fw_len <= 8) + return -EINVAL; + + if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) + min_size = sizeof(struct rtl_epatch_header) + + sizeof(extension_sig) + 3; + else if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE_V2, 8)) + min_size = sizeof(struct rtl_epatch_header_v2) + + sizeof(extension_sig) + 3; + else + return -EINVAL; + + if (btrtl_dev->fw_len < min_size) + return -EINVAL; + + fwptr = btrtl_dev->fw_data + btrtl_dev->fw_len - sizeof(extension_sig); + if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) { + rtl_dev_err(hdev, "extension section signature mismatch"); + return -EINVAL; + } + + /* Loop from the end of the firmware parsing instructions, until + * we find an instruction that identifies the "project ID" for the + * hardware supported by this firwmare file. + * Once we have that, we double-check that project_id is suitable + * for the hardware we are working with. + */ + while (fwptr >= btrtl_dev->fw_data + (sizeof(*epatch_info) + 3)) { + opcode = *--fwptr; + length = *--fwptr; + data = *--fwptr; + + BT_DBG("check op=%x len=%x data=%x", opcode, length, data); + + if (opcode == 0xff) /* EOF */ + break; + + if (length == 0) { + rtl_dev_err(hdev, "found instruction with length 0"); + return -EINVAL; + } + + if (opcode == 0 && length == 1) { + project_id = data; + break; + } + + fwptr -= length; + } + + if (project_id < 0) { + rtl_dev_err(hdev, "failed to find version instruction"); + return -EINVAL; + } + + /* Find project_id in table */ + for (i = 0; i < ARRAY_SIZE(project_id_to_lmp_subver); i++) { + if (project_id == project_id_to_lmp_subver[i].id) { + btrtl_dev->project_id = project_id; + break; + } + } + + if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) { + rtl_dev_err(hdev, "unknown project id %d", project_id); + return -EINVAL; + } + + if (btrtl_dev->ic_info->lmp_subver != + project_id_to_lmp_subver[i].lmp_subver) { + rtl_dev_err(hdev, "firmware is for %x but this is a %x", + project_id_to_lmp_subver[i].lmp_subver, + btrtl_dev->ic_info->lmp_subver); + return -EINVAL; + } + + if (memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8) != 0) { + if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE_V2, 8)) + return rtlbt_parse_firmware_v2(hdev, btrtl_dev, _buf); + rtl_dev_err(hdev, "bad EPATCH signature"); + return -EINVAL; + } + + epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data; + num_patches = le16_to_cpu(epatch_info->num_patches); + + BT_DBG("fw_version=%x, num_patches=%d", + le32_to_cpu(epatch_info->fw_version), num_patches); + coredump_info->rtl_dump.fw_version = le32_to_cpu(epatch_info->fw_version); + + /* After the rtl_epatch_header there is a funky patch metadata section. + * Assuming 2 patches, the layout is: + * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2 + * + * Find the right patch for this chip. + */ + min_size += 8 * num_patches; + if (btrtl_dev->fw_len < min_size) + return -EINVAL; + + chip_id_base = btrtl_dev->fw_data + sizeof(struct rtl_epatch_header); + patch_length_base = chip_id_base + (sizeof(u16) * num_patches); + patch_offset_base = patch_length_base + (sizeof(u16) * num_patches); + for (i = 0; i < num_patches; i++) { + u16 chip_id = get_unaligned_le16(chip_id_base + + (i * sizeof(u16))); + if (chip_id == btrtl_dev->rom_version + 1) { + patch_length = get_unaligned_le16(patch_length_base + + (i * sizeof(u16))); + patch_offset = get_unaligned_le32(patch_offset_base + + (i * sizeof(u32))); + break; + } + } + + if (!patch_offset) { + rtl_dev_err(hdev, "didn't find patch for chip id %d", + btrtl_dev->rom_version); + return -EINVAL; + } + + BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i); + min_size = patch_offset + patch_length; + if (btrtl_dev->fw_len < min_size) + return -EINVAL; + + /* Copy the firmware into a new buffer and write the version at + * the end. + */ + len = patch_length; + buf = kvmalloc(patch_length, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4); + memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4); + + *_buf = buf; + return len; +} + +static int rtl_download_firmware(struct hci_dev *hdev, + const unsigned char *data, int fw_len) +{ + struct rtl_download_cmd *dl_cmd; + int frag_num = fw_len / RTL_FRAG_LEN + 1; + int frag_len = RTL_FRAG_LEN; + int ret = 0; + int i; + int j = 0; + struct sk_buff *skb; + struct hci_rp_read_local_version *rp; + + dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL); + if (!dl_cmd) + return -ENOMEM; + + for (i = 0; i < frag_num; i++) { + struct sk_buff *skb; + + dl_cmd->index = j++; + if (dl_cmd->index == 0x7f) + j = 1; + + if (i == (frag_num - 1)) { + dl_cmd->index |= 0x80; /* data end */ + frag_len = fw_len % RTL_FRAG_LEN; + } + rtl_dev_dbg(hdev, "download fw (%d/%d). index = %d", i, + frag_num, dl_cmd->index); + memcpy(dl_cmd->data, data, frag_len); + + /* Send download command */ + skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + rtl_dev_err(hdev, "download fw command failed (%ld)", + PTR_ERR(skb)); + ret = PTR_ERR(skb); + goto out; + } + + if (skb->len != sizeof(struct rtl_download_response)) { + rtl_dev_err(hdev, "download fw event length mismatch"); + kfree_skb(skb); + ret = -EIO; + goto out; + } + + kfree_skb(skb); + data += RTL_FRAG_LEN; + } + + skb = btrtl_read_local_version(hdev); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + rtl_dev_err(hdev, "read local version failed"); + goto out; + } + + rp = (struct hci_rp_read_local_version *)skb->data; + rtl_dev_info(hdev, "fw version 0x%04x%04x", + __le16_to_cpu(rp->hci_rev), __le16_to_cpu(rp->lmp_subver)); + kfree_skb(skb); + +out: + kfree(dl_cmd); + return ret; +} + +static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff) +{ + const struct firmware *fw; + int ret; + + rtl_dev_info(hdev, "loading %s", name); + ret = request_firmware(&fw, name, &hdev->dev); + if (ret < 0) + return ret; + ret = fw->size; + *buff = kvmalloc(fw->size, GFP_KERNEL); + if (*buff) + memcpy(*buff, fw->data, ret); + else + ret = -ENOMEM; + + release_firmware(fw); + + return ret; +} + +static int btrtl_setup_rtl8723a(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev) +{ + if (btrtl_dev->fw_len < 8) + return -EINVAL; + + /* Check that the firmware doesn't have the epatch signature + * (which is only for RTL8723B and newer). + */ + if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) { + rtl_dev_err(hdev, "unexpected EPATCH signature!"); + return -EINVAL; + } + + return rtl_download_firmware(hdev, btrtl_dev->fw_data, + btrtl_dev->fw_len); +} + +static int btrtl_setup_rtl8723b(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev) +{ + unsigned char *fw_data = NULL; + int ret; + u8 *tbuff; + + ret = rtlbt_parse_firmware(hdev, btrtl_dev, &fw_data); + if (ret < 0) + goto out; + + if (btrtl_dev->cfg_len > 0) { + tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); + if (!tbuff) { + ret = -ENOMEM; + goto out; + } + + memcpy(tbuff, fw_data, ret); + kvfree(fw_data); + + memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len); + ret += btrtl_dev->cfg_len; + + fw_data = tbuff; + } + + rtl_dev_info(hdev, "cfg_sz %d, total sz %d", btrtl_dev->cfg_len, ret); + + ret = rtl_download_firmware(hdev, fw_data, ret); + +out: + kvfree(fw_data); + return ret; +} + +static void btrtl_coredump(struct hci_dev *hdev) +{ + static const u8 param[] = { 0x00, 0x00 }; + + __hci_cmd_send(hdev, RTL_VSC_OP_COREDUMP, sizeof(param), param); +} + +static void btrtl_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btrealtek_data *coredump_info = hci_get_priv(hdev); + char buf[80]; + + if (coredump_info->rtl_dump.controller) + snprintf(buf, sizeof(buf), "Controller Name: %s\n", + coredump_info->rtl_dump.controller); + else + snprintf(buf, sizeof(buf), "Controller Name: Unknown\n"); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Firmware Version: 0x%X\n", + coredump_info->rtl_dump.fw_version); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Driver: %s\n", coredump_info->rtl_dump.driver_name); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Vendor: Realtek\n"); + skb_put_data(skb, buf, strlen(buf)); +} + +static void btrtl_register_devcoredump_support(struct hci_dev *hdev) +{ + hci_devcd_register(hdev, btrtl_coredump, btrtl_dmp_hdr, NULL); + +} + +void btrtl_set_driver_name(struct hci_dev *hdev, const char *driver_name) +{ + struct btrealtek_data *coredump_info = hci_get_priv(hdev); + + coredump_info->rtl_dump.driver_name = driver_name; +} +EXPORT_SYMBOL_GPL(btrtl_set_driver_name); + +static bool rtl_has_chip_type(u16 lmp_subver) +{ + switch (lmp_subver) { + case RTL_ROM_LMP_8703B: + return true; + default: + break; + } + + return false; +} + +static int rtl_read_chip_type(struct hci_dev *hdev, u8 *type) +{ + struct rtl_chip_type_evt *chip_type; + struct sk_buff *skb; + const unsigned char cmd_buf[] = {0x00, 0x94, 0xa0, 0x00, 0xb0}; + + /* Read RTL chip type command */ + skb = __hci_cmd_sync(hdev, 0xfc61, 5, cmd_buf, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + rtl_dev_err(hdev, "Read chip type failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + chip_type = skb_pull_data(skb, sizeof(*chip_type)); + if (!chip_type) { + rtl_dev_err(hdev, "RTL chip type event length mismatch"); + kfree_skb(skb); + return -EIO; + } + + rtl_dev_info(hdev, "chip_type status=%x type=%x", + chip_type->status, chip_type->type); + + *type = chip_type->type & 0x0f; + + kfree_skb(skb); + return 0; +} + +void btrtl_free(struct btrtl_device_info *btrtl_dev) +{ + struct rtl_subsection *entry, *tmp; + + kvfree(btrtl_dev->fw_data); + kvfree(btrtl_dev->cfg_data); + + list_for_each_entry_safe(entry, tmp, &btrtl_dev->patch_subsecs, list) { + list_del(&entry->list); + kfree(entry); + } + + kfree(btrtl_dev); +} +EXPORT_SYMBOL_GPL(btrtl_free); + +struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, + const char *postfix) +{ + struct btrealtek_data *coredump_info = hci_get_priv(hdev); + struct btrtl_device_info *btrtl_dev; + struct sk_buff *skb; + struct hci_rp_read_local_version *resp; + struct hci_command_hdr *cmd; + char fw_name[40]; + char cfg_name[40]; + u16 hci_rev, lmp_subver; + u8 hci_ver, lmp_ver, chip_type = 0; + int ret; + u8 reg_val[2]; + + btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL); + if (!btrtl_dev) { + ret = -ENOMEM; + goto err_alloc; + } + + INIT_LIST_HEAD(&btrtl_dev->patch_subsecs); + +check_version: + ret = btrtl_vendor_read_reg16(hdev, RTL_CHIP_SUBVER, reg_val); + if (ret < 0) + goto err_free; + lmp_subver = get_unaligned_le16(reg_val); + + if (lmp_subver == RTL_ROM_LMP_8822B) { + ret = btrtl_vendor_read_reg16(hdev, RTL_CHIP_REV, reg_val); + if (ret < 0) + goto err_free; + hci_rev = get_unaligned_le16(reg_val); + + /* 8822E */ + if (hci_rev == 0x000e) { + hci_ver = 0x0c; + lmp_ver = 0x0c; + btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, + hci_ver, hdev->bus, + chip_type); + goto next; + } + } + + skb = btrtl_read_local_version(hdev); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + goto err_free; + } + + resp = (struct hci_rp_read_local_version *)skb->data; + + hci_ver = resp->hci_ver; + hci_rev = le16_to_cpu(resp->hci_rev); + lmp_ver = resp->lmp_ver; + lmp_subver = le16_to_cpu(resp->lmp_subver); + + kfree_skb(skb); + + if (rtl_has_chip_type(lmp_subver)) { + ret = rtl_read_chip_type(hdev, &chip_type); + if (ret) + goto err_free; + } + + btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, + hdev->bus, chip_type); + +next: + rtl_dev_info(hdev, "examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x", + hci_ver, hci_rev, + lmp_ver, lmp_subver); + + if (!btrtl_dev->ic_info && !btrtl_dev->drop_fw) + btrtl_dev->drop_fw = true; + else + btrtl_dev->drop_fw = false; + + if (btrtl_dev->drop_fw) { + skb = bt_skb_alloc(sizeof(*cmd), GFP_KERNEL); + if (!skb) + goto err_free; + + cmd = skb_put(skb, HCI_COMMAND_HDR_SIZE); + cmd->opcode = cpu_to_le16(0xfc66); + cmd->plen = 0; + + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; + + ret = hdev->send(hdev, skb); + if (ret < 0) { + bt_dev_err(hdev, "sending frame failed (%d)", ret); + kfree_skb(skb); + goto err_free; + } + + /* Ensure the above vendor command is sent to controller and + * process has done. + */ + msleep(200); + + goto check_version; + } + + if (!btrtl_dev->ic_info) { + rtl_dev_info(hdev, "unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", + lmp_subver, hci_rev, hci_ver); + return btrtl_dev; + } + + if (btrtl_dev->ic_info->has_rom_version) { + ret = rtl_read_rom_version(hdev, &btrtl_dev->rom_version); + if (ret) + goto err_free; + } + + if (!btrtl_dev->ic_info->fw_name) { + ret = -ENOMEM; + goto err_free; + } + + btrtl_dev->fw_len = -EIO; + if (lmp_subver == RTL_ROM_LMP_8852A && hci_rev == 0x000c) { + snprintf(fw_name, sizeof(fw_name), "%s_v2.bin", + btrtl_dev->ic_info->fw_name); + btrtl_dev->fw_len = rtl_load_file(hdev, fw_name, + &btrtl_dev->fw_data); + } + + if (btrtl_dev->fw_len < 0) { + snprintf(fw_name, sizeof(fw_name), "%s.bin", + btrtl_dev->ic_info->fw_name); + btrtl_dev->fw_len = rtl_load_file(hdev, fw_name, + &btrtl_dev->fw_data); + } + + if (btrtl_dev->fw_len < 0) { + rtl_dev_err(hdev, "firmware file %s not found", + btrtl_dev->ic_info->fw_name); + ret = btrtl_dev->fw_len; + goto err_free; + } + + if (btrtl_dev->ic_info->cfg_name) { + if (postfix) { + snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin", + btrtl_dev->ic_info->cfg_name, postfix); + } else { + snprintf(cfg_name, sizeof(cfg_name), "%s.bin", + btrtl_dev->ic_info->cfg_name); + } + btrtl_dev->cfg_len = rtl_load_file(hdev, cfg_name, + &btrtl_dev->cfg_data); + if (btrtl_dev->ic_info->config_needed && + btrtl_dev->cfg_len <= 0) { + rtl_dev_err(hdev, "mandatory config file %s not found", + btrtl_dev->ic_info->cfg_name); + ret = btrtl_dev->cfg_len; + goto err_free; + } + } + + /* The following chips supports the Microsoft vendor extension, + * therefore set the corresponding VsMsftOpCode. + */ + if (btrtl_dev->ic_info->has_msft_ext) + hci_set_msft_opcode(hdev, 0xFCF0); + + if (btrtl_dev->ic_info) + coredump_info->rtl_dump.controller = btrtl_dev->ic_info->hw_info; + + return btrtl_dev; + +err_free: + btrtl_free(btrtl_dev); +err_alloc: + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(btrtl_initialize); + +int btrtl_download_firmware(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev) +{ + int err = 0; + + /* Match a set of subver values that correspond to stock firmware, + * which is not compatible with standard btusb. + * If matched, upload an alternative firmware that does conform to + * standard btusb. Once that firmware is uploaded, the subver changes + * to a different value. + */ + if (!btrtl_dev->ic_info) { + rtl_dev_info(hdev, "assuming no firmware upload needed"); + err = 0; + goto done; + } + + switch (btrtl_dev->ic_info->lmp_subver) { + case RTL_ROM_LMP_8723A: + err = btrtl_setup_rtl8723a(hdev, btrtl_dev); + break; + case RTL_ROM_LMP_8723B: + case RTL_ROM_LMP_8821A: + case RTL_ROM_LMP_8761A: + case RTL_ROM_LMP_8822B: + case RTL_ROM_LMP_8852A: + case RTL_ROM_LMP_8703B: + case RTL_ROM_LMP_8851B: + err = btrtl_setup_rtl8723b(hdev, btrtl_dev); + break; + default: + rtl_dev_info(hdev, "assuming no firmware upload needed"); + break; + } + +done: + btrtl_register_devcoredump_support(hdev); + + return err; +} +EXPORT_SYMBOL_GPL(btrtl_download_firmware); + +void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev) +{ + /* Enable controller to do both LE scan and BR/EDR inquiry + * simultaneously. + */ + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + + /* Enable central-peripheral role (able to create new connections with + * an existing connection in slave role). + */ + /* Enable WBS supported for the specific Realtek devices. */ + switch (btrtl_dev->project_id) { + case CHIP_ID_8822C: + case CHIP_ID_8852A: + case CHIP_ID_8852B: + case CHIP_ID_8852C: + case CHIP_ID_8851B: + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + + /* RTL8852C needs to transmit mSBC data continuously without + * the zero length of USB packets for the ALT 6 supported chips + */ + if (btrtl_dev->project_id == CHIP_ID_8852C) + btrealtek_set_flag(hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP); + + if (btrtl_dev->project_id == CHIP_ID_8852A || + btrtl_dev->project_id == CHIP_ID_8852C) + set_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks); + + hci_set_aosp_capable(hdev); + break; + default: + rtl_dev_dbg(hdev, "Central-peripheral role not enabled."); + rtl_dev_dbg(hdev, "WBS supported not enabled."); + break; + } + + if (!btrtl_dev->ic_info) + return; + + switch (btrtl_dev->ic_info->lmp_subver) { + case RTL_ROM_LMP_8703B: + /* 8723CS reports two pages for local ext features, + * but it doesn't support any features from page 2 - + * it either responds with garbage or with error status + */ + set_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2, + &hdev->quirks); + break; + default: + break; + } +} +EXPORT_SYMBOL_GPL(btrtl_set_quirks); + +int btrtl_setup_realtek(struct hci_dev *hdev) +{ + struct btrtl_device_info *btrtl_dev; + int ret; + + btrtl_dev = btrtl_initialize(hdev, NULL); + if (IS_ERR(btrtl_dev)) + return PTR_ERR(btrtl_dev); + + ret = btrtl_download_firmware(hdev, btrtl_dev); + + btrtl_set_quirks(hdev, btrtl_dev); + + btrtl_free(btrtl_dev); + return ret; +} +EXPORT_SYMBOL_GPL(btrtl_setup_realtek); + +int btrtl_shutdown_realtek(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int ret; + + /* According to the vendor driver, BT must be reset on close to avoid + * firmware crash. + */ + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + bt_dev_err(hdev, "HCI reset during shutdown failed"); + return ret; + } + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(btrtl_shutdown_realtek); + +static unsigned int btrtl_convert_baudrate(u32 device_baudrate) +{ + switch (device_baudrate) { + case 0x0252a00a: + return 230400; + + case 0x05f75004: + return 921600; + + case 0x00005004: + return 1000000; + + case 0x04928002: + case 0x01128002: + return 1500000; + + case 0x00005002: + return 2000000; + + case 0x0000b001: + return 2500000; + + case 0x04928001: + return 3000000; + + case 0x052a6001: + return 3500000; + + case 0x00005001: + return 4000000; + + case 0x0252c014: + default: + return 115200; + } +} + +int btrtl_get_uart_settings(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev, + unsigned int *controller_baudrate, + u32 *device_baudrate, bool *flow_control) +{ + struct rtl_vendor_config *config; + struct rtl_vendor_config_entry *entry; + int i, total_data_len; + bool found = false; + + total_data_len = btrtl_dev->cfg_len - sizeof(*config); + if (total_data_len <= 0) { + rtl_dev_warn(hdev, "no config loaded"); + return -EINVAL; + } + + config = (struct rtl_vendor_config *)btrtl_dev->cfg_data; + if (le32_to_cpu(config->signature) != RTL_CONFIG_MAGIC) { + rtl_dev_err(hdev, "invalid config magic"); + return -EINVAL; + } + + if (total_data_len < le16_to_cpu(config->total_len)) { + rtl_dev_err(hdev, "config is too short"); + return -EINVAL; + } + + for (i = 0; i < total_data_len; ) { + entry = ((void *)config->entry) + i; + + switch (le16_to_cpu(entry->offset)) { + case 0xc: + if (entry->len < sizeof(*device_baudrate)) { + rtl_dev_err(hdev, "invalid UART config entry"); + return -EINVAL; + } + + *device_baudrate = get_unaligned_le32(entry->data); + *controller_baudrate = btrtl_convert_baudrate( + *device_baudrate); + + if (entry->len >= 13) + *flow_control = !!(entry->data[12] & BIT(2)); + else + *flow_control = false; + + found = true; + break; + + default: + rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)", + le16_to_cpu(entry->offset), entry->len); + break; + } + + i += sizeof(*entry) + entry->len; + } + + if (!found) { + rtl_dev_err(hdev, "no UART config entry found"); + return -ENOENT; + } + + rtl_dev_dbg(hdev, "device baudrate = 0x%08x", *device_baudrate); + rtl_dev_dbg(hdev, "controller baudrate = %u", *controller_baudrate); + rtl_dev_dbg(hdev, "flow control %d", *flow_control); + + return 0; +} +EXPORT_SYMBOL_GPL(btrtl_get_uart_settings); + +MODULE_AUTHOR("Daniel Drake "); +MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("rtl_bt/rtl8723a_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723b_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723b_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723bs_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723bs_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723cs_cg_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723cs_cg_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723d_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723d_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8761a_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8761b_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8761b_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8761bu_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8761bu_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8821c_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8821c_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8821cs_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8821cs_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8822cs_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8822cs_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8822cu_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8822cu_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8851bu_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8851bu_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852au_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852au_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852bs_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852bs_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw_v2.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin"); diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h new file mode 100644 index 000000000..a2d9d34f9 --- /dev/null +++ b/drivers/bluetooth/btrtl.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Bluetooth support for Realtek devices + * + * Copyright (C) 2015 Endless Mobile, Inc. + */ + +#define RTL_FRAG_LEN 252 + +#define rtl_dev_err(dev, fmt, ...) bt_dev_err(dev, "RTL: " fmt, ##__VA_ARGS__) +#define rtl_dev_warn(dev, fmt, ...) bt_dev_warn(dev, "RTL: " fmt, ##__VA_ARGS__) +#define rtl_dev_info(dev, fmt, ...) bt_dev_info(dev, "RTL: " fmt, ##__VA_ARGS__) +#define rtl_dev_dbg(dev, fmt, ...) bt_dev_dbg(dev, "RTL: " fmt, ##__VA_ARGS__) + +struct btrtl_device_info; + +struct rtl_chip_type_evt { + __u8 status; + __u8 type; +} __packed; + +struct rtl_download_cmd { + __u8 index; + __u8 data[RTL_FRAG_LEN]; +} __packed; + +struct rtl_download_response { + __u8 status; + __u8 index; +} __packed; + +struct rtl_rom_version_evt { + __u8 status; + __u8 version; +} __packed; + +struct rtl_epatch_header { + __u8 signature[8]; + __le32 fw_version; + __le16 num_patches; +} __packed; + +struct rtl_vendor_config_entry { + __le16 offset; + __u8 len; + __u8 data[]; +} __packed; + +struct rtl_vendor_config { + __le32 signature; + __le16 total_len; + __u8 entry[]; +} __packed; + +struct rtl_epatch_header_v2 { + __u8 signature[8]; + __u8 fw_version[8]; + __le32 num_sections; +} __packed; + +struct rtl_section { + __le32 opcode; + __le32 len; + u8 data[]; +} __packed; + +struct rtl_section_hdr { + __le16 num; + __le16 reserved; +} __packed; + +struct rtl_common_subsec { + __u8 eco; + __u8 prio; + __u8 cb[2]; + __le32 len; + __u8 data[]; +}; + +struct rtl_sec_hdr { + __u8 eco; + __u8 prio; + __u8 key_id; + __u8 reserved; + __le32 len; + __u8 data[]; +} __packed; + +struct rtl_subsection { + struct list_head list; + u32 opcode; + u32 len; + u8 prio; + u8 *data; +}; + +struct rtl_iovec { + u8 *data; + u32 len; +}; + +struct rtl_vendor_cmd { + __u8 param[5]; +} __packed; + +enum { + REALTEK_ALT6_CONTINUOUS_TX_CHIP, + + __REALTEK_NUM_FLAGS, +}; + +struct rtl_dump_info { + const char *driver_name; + char *controller; + u32 fw_version; +}; + +struct btrealtek_data { + DECLARE_BITMAP(flags, __REALTEK_NUM_FLAGS); + + struct rtl_dump_info rtl_dump; +}; + +#define btrealtek_set_flag(hdev, nr) \ + do { \ + struct btrealtek_data *realtek = hci_get_priv((hdev)); \ + set_bit((nr), realtek->flags); \ + } while (0) + +#define btrealtek_get_flag(hdev) \ + (((struct btrealtek_data *)hci_get_priv(hdev))->flags) + +#define btrealtek_test_flag(hdev, nr) test_bit((nr), btrealtek_get_flag(hdev)) + +#if IS_ENABLED(CONFIG_BT_RTL) + +struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, + const char *postfix); +void btrtl_free(struct btrtl_device_info *btrtl_dev); +int btrtl_download_firmware(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev); +void btrtl_set_quirks(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev); +int btrtl_setup_realtek(struct hci_dev *hdev); +int btrtl_shutdown_realtek(struct hci_dev *hdev); +int btrtl_get_uart_settings(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev, + unsigned int *controller_baudrate, + u32 *device_baudrate, bool *flow_control); +void btrtl_set_driver_name(struct hci_dev *hdev, const char *driver_name); + +#else + +static inline struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, + const char *postfix) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void btrtl_free(struct btrtl_device_info *btrtl_dev) +{ +} + +static inline int btrtl_download_firmware(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev) +{ + return -EOPNOTSUPP; +} + +static inline void btrtl_set_quirks(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev) +{ +} + +static inline int btrtl_setup_realtek(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int btrtl_shutdown_realtek(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int btrtl_get_uart_settings(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev, + unsigned int *controller_baudrate, + u32 *device_baudrate, + bool *flow_control) +{ + return -ENOENT; +} + +static inline void btrtl_set_driver_name(struct hci_dev *hdev, const char *driver_name) +{ +} + +#endif diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c new file mode 100644 index 000000000..f19d31ee3 --- /dev/null +++ b/drivers/bluetooth/btsdio.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Generic Bluetooth SDIO driver + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * Copyright (C) 2007 Marcel Holtmann + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#define VERSION "0.1" + +static const struct sdio_device_id btsdio_table[] = { + /* Generic Bluetooth Type-A SDIO device */ + { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_A) }, + + /* Generic Bluetooth Type-B SDIO device */ + { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) }, + + /* Generic Bluetooth AMP controller */ + { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_AMP) }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(sdio, btsdio_table); + +struct btsdio_data { + struct hci_dev *hdev; + struct sdio_func *func; + + struct work_struct work; + + struct sk_buff_head txq; +}; + +#define REG_RDAT 0x00 /* Receiver Data */ +#define REG_TDAT 0x00 /* Transmitter Data */ +#define REG_PC_RRT 0x10 /* Read Packet Control */ +#define REG_PC_WRT 0x11 /* Write Packet Control */ +#define REG_RTC_STAT 0x12 /* Retry Control Status */ +#define REG_RTC_SET 0x12 /* Retry Control Set */ +#define REG_INTRD 0x13 /* Interrupt Indication */ +#define REG_CL_INTRD 0x13 /* Interrupt Clear */ +#define REG_EN_INTRD 0x14 /* Interrupt Enable */ +#define REG_MD_STAT 0x20 /* Bluetooth Mode Status */ +#define REG_MD_SET 0x20 /* Bluetooth Mode Set */ + +static int btsdio_tx_packet(struct btsdio_data *data, struct sk_buff *skb) +{ + int err; + + BT_DBG("%s", data->hdev->name); + + /* Prepend Type-A header */ + skb_push(skb, 4); + skb->data[0] = (skb->len & 0x0000ff); + skb->data[1] = (skb->len & 0x00ff00) >> 8; + skb->data[2] = (skb->len & 0xff0000) >> 16; + skb->data[3] = hci_skb_pkt_type(skb); + + err = sdio_writesb(data->func, REG_TDAT, skb->data, skb->len); + if (err < 0) { + skb_pull(skb, 4); + sdio_writeb(data->func, 0x01, REG_PC_WRT, NULL); + return err; + } + + data->hdev->stat.byte_tx += skb->len; + + kfree_skb(skb); + + return 0; +} + +static void btsdio_work(struct work_struct *work) +{ + struct btsdio_data *data = container_of(work, struct btsdio_data, work); + struct sk_buff *skb; + int err; + + BT_DBG("%s", data->hdev->name); + + sdio_claim_host(data->func); + + while ((skb = skb_dequeue(&data->txq))) { + err = btsdio_tx_packet(data, skb); + if (err < 0) { + data->hdev->stat.err_tx++; + skb_queue_head(&data->txq, skb); + break; + } + } + + sdio_release_host(data->func); +} + +static int btsdio_rx_packet(struct btsdio_data *data) +{ + u8 hdr[4] __attribute__ ((aligned(4))); + struct sk_buff *skb; + int err, len; + + BT_DBG("%s", data->hdev->name); + + err = sdio_readsb(data->func, hdr, REG_RDAT, 4); + if (err < 0) + return err; + + len = hdr[0] | (hdr[1] << 8) | (hdr[2] << 16); + if (len < 4 || len > 65543) + return -EILSEQ; + + skb = bt_skb_alloc(len - 4, GFP_KERNEL); + if (!skb) { + /* Out of memory. Prepare a read retry and just + * return with the expectation that the next time + * we're called we'll have more memory. + */ + return -ENOMEM; + } + + skb_put(skb, len - 4); + + err = sdio_readsb(data->func, skb->data, REG_RDAT, len - 4); + if (err < 0) { + kfree_skb(skb); + return err; + } + + data->hdev->stat.byte_rx += len; + + switch (hdr[3]) { + case HCI_EVENT_PKT: + case HCI_ACLDATA_PKT: + case HCI_SCODATA_PKT: + case HCI_ISODATA_PKT: + hci_skb_pkt_type(skb) = hdr[3]; + err = hci_recv_frame(data->hdev, skb); + if (err < 0) + return err; + break; + default: + kfree_skb(skb); + return -EINVAL; + } + + sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL); + + return 0; +} + +static void btsdio_interrupt(struct sdio_func *func) +{ + struct btsdio_data *data = sdio_get_drvdata(func); + int intrd; + + BT_DBG("%s", data->hdev->name); + + intrd = sdio_readb(func, REG_INTRD, NULL); + if (intrd & 0x01) { + sdio_writeb(func, 0x01, REG_CL_INTRD, NULL); + + if (btsdio_rx_packet(data) < 0) { + data->hdev->stat.err_rx++; + sdio_writeb(data->func, 0x01, REG_PC_RRT, NULL); + } + } +} + +static int btsdio_open(struct hci_dev *hdev) +{ + struct btsdio_data *data = hci_get_drvdata(hdev); + int err; + + BT_DBG("%s", hdev->name); + + sdio_claim_host(data->func); + + err = sdio_enable_func(data->func); + if (err < 0) + goto release; + + err = sdio_claim_irq(data->func, btsdio_interrupt); + if (err < 0) { + sdio_disable_func(data->func); + goto release; + } + + if (data->func->class == SDIO_CLASS_BT_B) + sdio_writeb(data->func, 0x00, REG_MD_SET, NULL); + + sdio_writeb(data->func, 0x01, REG_EN_INTRD, NULL); + +release: + sdio_release_host(data->func); + + return err; +} + +static int btsdio_close(struct hci_dev *hdev) +{ + struct btsdio_data *data = hci_get_drvdata(hdev); + + BT_DBG("%s", hdev->name); + + sdio_claim_host(data->func); + + sdio_writeb(data->func, 0x00, REG_EN_INTRD, NULL); + + sdio_release_irq(data->func); + sdio_disable_func(data->func); + + sdio_release_host(data->func); + + return 0; +} + +static int btsdio_flush(struct hci_dev *hdev) +{ + struct btsdio_data *data = hci_get_drvdata(hdev); + + BT_DBG("%s", hdev->name); + + skb_queue_purge(&data->txq); + + return 0; +} + +static int btsdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btsdio_data *data = hci_get_drvdata(hdev); + + BT_DBG("%s", hdev->name); + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + + default: + return -EILSEQ; + } + + skb_queue_tail(&data->txq, skb); + + schedule_work(&data->work); + + return 0; +} + +static int btsdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct btsdio_data *data; + struct hci_dev *hdev; + struct sdio_func_tuple *tuple = func->tuples; + int err; + + BT_DBG("func %p id %p class 0x%04x", func, id, func->class); + + while (tuple) { + BT_DBG("code 0x%x size %d", tuple->code, tuple->size); + tuple = tuple->next; + } + + /* Broadcom devices soldered onto the PCB (non-removable) use an + * UART connection for Bluetooth, ignore the BT SDIO interface. + */ + if (func->vendor == SDIO_VENDOR_ID_BROADCOM && + !mmc_card_is_removable(func->card->host)) { + switch (func->device) { + case SDIO_DEVICE_ID_BROADCOM_43341: + case SDIO_DEVICE_ID_BROADCOM_43430: + case SDIO_DEVICE_ID_BROADCOM_4345: + case SDIO_DEVICE_ID_BROADCOM_43455: + case SDIO_DEVICE_ID_BROADCOM_4356: + return -ENODEV; + } + } + + data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->func = func; + + INIT_WORK(&data->work, btsdio_work); + + skb_queue_head_init(&data->txq); + + hdev = hci_alloc_dev(); + if (!hdev) + return -ENOMEM; + + hdev->bus = HCI_SDIO; + hci_set_drvdata(hdev, data); + + if (id->class == SDIO_CLASS_BT_AMP) + hdev->dev_type = HCI_AMP; + else + hdev->dev_type = HCI_PRIMARY; + + data->hdev = hdev; + + SET_HCIDEV_DEV(hdev, &func->dev); + + hdev->open = btsdio_open; + hdev->close = btsdio_close; + hdev->flush = btsdio_flush; + hdev->send = btsdio_send_frame; + + if (func->vendor == 0x0104 && func->device == 0x00c5) + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + + err = hci_register_dev(hdev); + if (err < 0) { + hci_free_dev(hdev); + return err; + } + + sdio_set_drvdata(func, data); + + return 0; +} + +static void btsdio_remove(struct sdio_func *func) +{ + struct btsdio_data *data = sdio_get_drvdata(func); + struct hci_dev *hdev; + + BT_DBG("func %p", func); + + if (!data) + return; + + cancel_work_sync(&data->work); + hdev = data->hdev; + + sdio_set_drvdata(func, NULL); + + hci_unregister_dev(hdev); + + hci_free_dev(hdev); +} + +static struct sdio_driver btsdio_driver = { + .name = "btsdio", + .probe = btsdio_probe, + .remove = btsdio_remove, + .id_table = btsdio_table, +}; + +module_sdio_driver(btsdio_driver); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Generic Bluetooth SDIO driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c new file mode 100644 index 000000000..66080fae0 --- /dev/null +++ b/drivers/bluetooth/btusb.c @@ -0,0 +1,4814 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Generic Bluetooth USB driver + * + * Copyright (C) 2005-2008 Marcel Holtmann + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "btintel.h" +#include "btbcm.h" +#include "btrtl.h" +#include "btmtk.h" + +#define VERSION "0.8" + +static bool disable_scofix; +static bool force_scofix; +static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND); +static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC); +static bool reset = true; + +static struct usb_driver btusb_driver; + +#define BTUSB_IGNORE BIT(0) +#define BTUSB_DIGIANSWER BIT(1) +#define BTUSB_CSR BIT(2) +#define BTUSB_SNIFFER BIT(3) +#define BTUSB_BCM92035 BIT(4) +#define BTUSB_BROKEN_ISOC BIT(5) +#define BTUSB_WRONG_SCO_MTU BIT(6) +#define BTUSB_ATH3012 BIT(7) +#define BTUSB_INTEL_COMBINED BIT(8) +#define BTUSB_INTEL_BOOT BIT(9) +#define BTUSB_BCM_PATCHRAM BIT(10) +#define BTUSB_MARVELL BIT(11) +#define BTUSB_SWAVE BIT(12) +#define BTUSB_AMP BIT(13) +#define BTUSB_QCA_ROME BIT(14) +#define BTUSB_BCM_APPLE BIT(15) +#define BTUSB_REALTEK BIT(16) +#define BTUSB_BCM2045 BIT(17) +#define BTUSB_IFNUM_2 BIT(18) +#define BTUSB_CW6622 BIT(19) +#define BTUSB_MEDIATEK BIT(20) +#define BTUSB_WIDEBAND_SPEECH BIT(21) +#define BTUSB_VALID_LE_STATES BIT(22) +#define BTUSB_QCA_WCN6855 BIT(23) +#define BTUSB_INTEL_BROKEN_SHUTDOWN_LED BIT(24) +#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25) +#define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26) +#define BTUSB_ACTIONS_SEMI BIT(27) + +static const struct usb_device_id btusb_table[] = { + /* Generic Bluetooth USB device */ + { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, + + /* Generic Bluetooth AMP device */ + { USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP }, + + /* Generic Bluetooth USB interface */ + { USB_INTERFACE_INFO(0xe0, 0x01, 0x01) }, + + /* Apple-specific (Broadcom) devices */ + { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_APPLE | BTUSB_IFNUM_2 }, + + /* MediaTek MT76x0E */ + { USB_DEVICE(0x0e8d, 0x763f) }, + + /* Broadcom SoftSailing reporting vendor specific */ + { USB_DEVICE(0x0a5c, 0x21e1) }, + + /* Apple MacBookPro 7,1 */ + { USB_DEVICE(0x05ac, 0x8213) }, + + /* Apple iMac11,1 */ + { USB_DEVICE(0x05ac, 0x8215) }, + + /* Apple MacBookPro6,2 */ + { USB_DEVICE(0x05ac, 0x8218) }, + + /* Apple MacBookAir3,1, MacBookAir3,2 */ + { USB_DEVICE(0x05ac, 0x821b) }, + + /* Apple MacBookAir4,1 */ + { USB_DEVICE(0x05ac, 0x821f) }, + + /* Apple MacBookPro8,2 */ + { USB_DEVICE(0x05ac, 0x821a) }, + + /* Apple MacMini5,1 */ + { USB_DEVICE(0x05ac, 0x8281) }, + + /* AVM BlueFRITZ! USB v2.0 */ + { USB_DEVICE(0x057c, 0x3800), .driver_info = BTUSB_SWAVE }, + + /* Bluetooth Ultraport Module from IBM */ + { USB_DEVICE(0x04bf, 0x030a) }, + + /* ALPS Modules with non-standard id */ + { USB_DEVICE(0x044e, 0x3001) }, + { USB_DEVICE(0x044e, 0x3002) }, + + /* Ericsson with non-standard id */ + { USB_DEVICE(0x0bdb, 0x1002) }, + + /* Canyon CN-BTU1 with HID interfaces */ + { USB_DEVICE(0x0c10, 0x0000) }, + + /* Broadcom BCM20702B0 (Dynex/Insignia) */ + { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM }, + + /* Broadcom BCM43142A0 (Foxconn/Lenovo) */ + { USB_VENDOR_AND_INTERFACE_INFO(0x105b, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + + /* Broadcom BCM920703 (HTC Vive) */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0bb4, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + + /* Foxconn - Hon Hai */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + + /* Lite-On Technology - Broadcom based */ + { USB_VENDOR_AND_INTERFACE_INFO(0x04ca, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + + /* Broadcom devices with vendor specific id */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + + /* ASUSTek Computer - Broadcom based */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + + /* Belkin F8065bf - Broadcom based */ + { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + + /* IMC Networks - Broadcom based */ + { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + + /* Dell Computer - Broadcom based */ + { USB_VENDOR_AND_INTERFACE_INFO(0x413c, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + + /* Toshiba Corp - Broadcom based */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + + /* Intel Bluetooth USB Bootloader (RAM module) */ + { USB_DEVICE(0x8087, 0x0a5a), + .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, btusb_table); + +static const struct usb_device_id quirks_table[] = { + /* CSR BlueCore devices */ + { USB_DEVICE(0x0a12, 0x0001), .driver_info = BTUSB_CSR }, + + /* Broadcom BCM2033 without firmware */ + { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE }, + + /* Broadcom BCM2045 devices */ + { USB_DEVICE(0x0a5c, 0x2045), .driver_info = BTUSB_BCM2045 }, + + /* Atheros 3011 with sflash firmware */ + { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, + + /* Atheros AR9285 Malbec with sflash firmware */ + { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, + + /* Atheros 3012 with sflash firmware */ + { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x817b), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 }, + + /* Atheros AR5BBU12 with sflash firmware */ + { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, + + /* Atheros AR5BBU12 with sflash firmware */ + { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, + + /* QCA ROME chipset */ + { USB_DEVICE(0x0cf3, 0x535b), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cf3, 0xe500), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04ca, 0x3021), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME | + BTUSB_WIDEBAND_SPEECH }, + + /* QCA WCN6855 chipset */ + { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9109), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9208), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9209), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9608), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9609), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + + /* QCA WCN785x chipset */ + { USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + + /* Broadcom BCM2035 */ + { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, + { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Broadcom BCM2045 */ + { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* IBM/Lenovo ThinkPad with Broadcom chip */ + { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* HP laptop with Broadcom chip */ + { USB_DEVICE(0x03f0, 0x171d), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Dell laptop with Broadcom chip */ + { USB_DEVICE(0x413c, 0x8126), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Dell Wireless 370 and 410 devices */ + { USB_DEVICE(0x413c, 0x8152), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x413c, 0x8156), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Belkin F8T012 and F8T013 devices */ + { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Asus WL-BTD202 device */ + { USB_DEVICE(0x0b05, 0x1715), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Kensington Bluetooth USB adapter */ + { USB_DEVICE(0x047d, 0x105e), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* RTX Telecom based adapters with buggy SCO support */ + { USB_DEVICE(0x0400, 0x0807), .driver_info = BTUSB_BROKEN_ISOC }, + { USB_DEVICE(0x0400, 0x080a), .driver_info = BTUSB_BROKEN_ISOC }, + + /* CONWISE Technology based adapters with buggy SCO support */ + { USB_DEVICE(0x0e5e, 0x6622), + .driver_info = BTUSB_BROKEN_ISOC | BTUSB_CW6622}, + + /* Roper Class 1 Bluetooth Dongle (Silicon Wave based) */ + { USB_DEVICE(0x1310, 0x0001), .driver_info = BTUSB_SWAVE }, + + /* Digianswer devices */ + { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER }, + { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE }, + + /* CSR BlueCore Bluetooth Sniffer */ + { USB_DEVICE(0x0a12, 0x0002), + .driver_info = BTUSB_SNIFFER | BTUSB_BROKEN_ISOC }, + + /* Frontline ComProbe Bluetooth Sniffer */ + { USB_DEVICE(0x16d3, 0x0002), + .driver_info = BTUSB_SNIFFER | BTUSB_BROKEN_ISOC }, + + /* Marvell Bluetooth devices */ + { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL }, + { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, + { USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL }, + + /* Intel Bluetooth devices */ + { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_COMBINED }, + { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_COMBINED }, + { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_COMBINED }, + { USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_COMBINED }, + { USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_COMBINED }, + { USB_DEVICE(0x8087, 0x0035), .driver_info = BTUSB_INTEL_COMBINED }, + { USB_DEVICE(0x8087, 0x0036), .driver_info = BTUSB_INTEL_COMBINED }, + { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, + { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED | + BTUSB_INTEL_NO_WBS_SUPPORT | + BTUSB_INTEL_BROKEN_INITIAL_NCMD | + BTUSB_INTEL_BROKEN_SHUTDOWN_LED }, + { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL_COMBINED | + BTUSB_INTEL_NO_WBS_SUPPORT | + BTUSB_INTEL_BROKEN_SHUTDOWN_LED }, + { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_COMBINED }, + { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL_COMBINED | + BTUSB_INTEL_BROKEN_SHUTDOWN_LED }, + { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_COMBINED }, + + /* Other Intel Bluetooth devices */ + { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01), + .driver_info = BTUSB_IGNORE }, + + /* Realtek 8821CE Bluetooth devices */ + { USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + + /* Realtek 8822CE Bluetooth devices */ + { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0bda, 0xc822), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + + /* Realtek 8822CU Bluetooth devices */ + { USB_DEVICE(0x13d3, 0x3549), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + + /* Realtek 8852AE Bluetooth devices */ + { USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0bda, 0x385a), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0bda, 0x4852), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04c5, 0x165c), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cb8, 0xc549), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + + /* Realtek 8852CE Bluetooth devices */ + { USB_DEVICE(0x04ca, 0x4007), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04c5, 0x1675), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cb8, 0xc558), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3587), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + + /* Realtek 8852BE Bluetooth devices */ + { USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3570), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + + /* Realtek Bluetooth devices */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), + .driver_info = BTUSB_REALTEK }, + + /* MediaTek Bluetooth devices */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01), + .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + + /* Additional MediaTek MT7615E Bluetooth devices */ + { USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK}, + + /* Additional MediaTek MT7663 Bluetooth devices */ + { USB_DEVICE(0x043e, 0x310c), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x04ca, 0x3801), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + + /* Additional MediaTek MT7668 Bluetooth devices */ + { USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + + /* Additional MediaTek MT7921 Bluetooth devices */ + { USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + + /* MediaTek MT7922A Bluetooth devices */ + { USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0e2), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0e4), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0f1), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0f6), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + + /* Additional Realtek 8723AE Bluetooth devices */ + { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK }, + + /* Additional Realtek 8723BE Bluetooth devices */ + { USB_DEVICE(0x0489, 0xe085), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x0489, 0xe08b), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x04f2, 0xb49f), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK }, + + /* Additional Realtek 8723BU Bluetooth devices */ + { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK }, + + /* Additional Realtek 8723DE Bluetooth devices */ + { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, + + /* Additional Realtek 8761BUV Bluetooth devices */ + { USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x2550, 0x8761), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0bda, 0x8771), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x6655, 0x8771), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x7392, 0xc611), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x2b89, 0x8761), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + + /* Additional Realtek 8821AE Bluetooth devices */ + { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3458), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK }, + + /* Additional Realtek 8822BE Bluetooth devices */ + { USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK }, + + /* Additional Realtek 8822CE Bluetooth devices */ + { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04c5, 0x161f), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0b05, 0x18ef), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3548), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3549), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3553), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3555), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x2ff8, 0x3051), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x1358, 0xc123), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0bda, 0xc123), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + + /* Actions Semiconductor ATS2851 based devices */ + { USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI }, + + /* Silicon Wave based devices */ + { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, + + { } /* Terminating entry */ +}; + +/* The Bluetooth USB module build into some devices needs to be reset on resume, + * this is a problem with the platform (likely shutting off all power) not with + * the module itself. So we use a DMI list to match known broken platforms. + */ +static const struct dmi_system_id btusb_needs_reset_resume_table[] = { + { + /* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"), + }, + }, + { + /* Dell XPS 9360 (QCA ROME device 0cf3:e300) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), + }, + }, + { + /* Dell Inspiron 5565 (QCA ROME device 0cf3:e009) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5565"), + }, + }, + {} +}; + +struct qca_dump_info { + /* fields for dump collection */ + u16 id_vendor; + u16 id_product; + u32 fw_version; + u32 controller_id; + u32 ram_dump_size; + u16 ram_dump_seqno; +}; + +#define BTUSB_MAX_ISOC_FRAMES 10 + +#define BTUSB_INTR_RUNNING 0 +#define BTUSB_BULK_RUNNING 1 +#define BTUSB_ISOC_RUNNING 2 +#define BTUSB_SUSPENDING 3 +#define BTUSB_DID_ISO_RESUME 4 +#define BTUSB_BOOTLOADER 5 +#define BTUSB_DOWNLOADING 6 +#define BTUSB_FIRMWARE_LOADED 7 +#define BTUSB_FIRMWARE_FAILED 8 +#define BTUSB_BOOTING 9 +#define BTUSB_DIAG_RUNNING 10 +#define BTUSB_OOB_WAKE_ENABLED 11 +#define BTUSB_HW_RESET_ACTIVE 12 +#define BTUSB_TX_WAIT_VND_EVT 13 +#define BTUSB_WAKEUP_AUTOSUSPEND 14 +#define BTUSB_USE_ALT3_FOR_WBS 15 +#define BTUSB_ALT6_CONTINUOUS_TX 16 +#define BTUSB_HW_SSR_ACTIVE 17 + +struct btusb_data { + struct hci_dev *hdev; + struct usb_device *udev; + struct usb_interface *intf; + struct usb_interface *isoc; + struct usb_interface *diag; + unsigned isoc_ifnum; + + unsigned long flags; + + bool poll_sync; + int intr_interval; + struct work_struct work; + struct work_struct waker; + struct delayed_work rx_work; + + struct sk_buff_head acl_q; + + struct usb_anchor deferred; + struct usb_anchor tx_anchor; + int tx_in_flight; + spinlock_t txlock; + + struct usb_anchor intr_anchor; + struct usb_anchor bulk_anchor; + struct usb_anchor isoc_anchor; + struct usb_anchor diag_anchor; + struct usb_anchor ctrl_anchor; + spinlock_t rxlock; + + struct sk_buff *evt_skb; + struct sk_buff *acl_skb; + struct sk_buff *sco_skb; + + struct usb_endpoint_descriptor *intr_ep; + struct usb_endpoint_descriptor *bulk_tx_ep; + struct usb_endpoint_descriptor *bulk_rx_ep; + struct usb_endpoint_descriptor *isoc_tx_ep; + struct usb_endpoint_descriptor *isoc_rx_ep; + struct usb_endpoint_descriptor *diag_tx_ep; + struct usb_endpoint_descriptor *diag_rx_ep; + + struct gpio_desc *reset_gpio; + + __u8 cmdreq_type; + __u8 cmdreq; + + unsigned int sco_num; + unsigned int air_mode; + bool usb_alt6_packet_flow; + int isoc_altsetting; + int suspend_count; + + int (*recv_event)(struct hci_dev *hdev, struct sk_buff *skb); + int (*recv_acl)(struct hci_dev *hdev, struct sk_buff *skb); + int (*recv_bulk)(struct btusb_data *data, void *buffer, int count); + + int (*setup_on_usb)(struct hci_dev *hdev); + + int oob_wake_irq; /* irq for out-of-band wake-on-bt */ + unsigned cmd_timeout_cnt; + + struct qca_dump_info qca_dump; +}; + +static void btusb_reset(struct hci_dev *hdev) +{ + struct btusb_data *data; + int err; + + if (hdev->reset) { + hdev->reset(hdev); + return; + } + + data = hci_get_drvdata(hdev); + /* This is not an unbalanced PM reference since the device will reset */ + err = usb_autopm_get_interface(data->intf); + if (err) { + bt_dev_err(hdev, "Failed usb_autopm_get_interface: %d", err); + return; + } + + bt_dev_err(hdev, "Resetting usb device."); + usb_queue_reset_device(data->intf); +} + +static void btusb_intel_cmd_timeout(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct gpio_desc *reset_gpio = data->reset_gpio; + struct btintel_data *intel_data = hci_get_priv(hdev); + + if (++data->cmd_timeout_cnt < 5) + return; + + if (intel_data->acpi_reset_method) { + if (test_and_set_bit(INTEL_ACPI_RESET_ACTIVE, intel_data->flags)) { + bt_dev_err(hdev, "acpi: last reset failed ? Not resetting again"); + return; + } + + bt_dev_err(hdev, "Initiating acpi reset method"); + /* If ACPI reset method fails, lets try with legacy GPIO + * toggling + */ + if (!intel_data->acpi_reset_method(hdev)) { + return; + } + } + + if (!reset_gpio) { + btusb_reset(hdev); + return; + } + + /* + * Toggle the hard reset line if the platform provides one. The reset + * is going to yank the device off the USB and then replug. So doing + * once is enough. The cleanup is handled correctly on the way out + * (standard USB disconnect), and the new device is detected cleanly + * and bound to the driver again like it should be. + */ + if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { + bt_dev_err(hdev, "last reset failed? Not resetting again"); + return; + } + + bt_dev_err(hdev, "Initiating HW reset via gpio"); + gpiod_set_value_cansleep(reset_gpio, 1); + msleep(100); + gpiod_set_value_cansleep(reset_gpio, 0); +} + +#define RTK_DEVCOREDUMP_CODE_MEMDUMP 0x01 +#define RTK_DEVCOREDUMP_CODE_HW_ERR 0x02 +#define RTK_DEVCOREDUMP_CODE_CMD_TIMEOUT 0x03 + +#define RTK_SUB_EVENT_CODE_COREDUMP 0x34 + +struct rtk_dev_coredump_hdr { + u8 type; + u8 code; + u8 reserved[2]; +} __packed; + +static inline void btusb_rtl_alloc_devcoredump(struct hci_dev *hdev, + struct rtk_dev_coredump_hdr *hdr, u8 *buf, u32 len) +{ + struct sk_buff *skb; + + skb = alloc_skb(len + sizeof(*hdr), GFP_ATOMIC); + if (!skb) + return; + + skb_put_data(skb, hdr, sizeof(*hdr)); + if (len) + skb_put_data(skb, buf, len); + + if (!hci_devcd_init(hdev, skb->len)) { + hci_devcd_append(hdev, skb); + hci_devcd_complete(hdev); + } else { + bt_dev_err(hdev, "RTL: Failed to generate devcoredump"); + kfree_skb(skb); + } +} + +static void btusb_rtl_cmd_timeout(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct gpio_desc *reset_gpio = data->reset_gpio; + struct rtk_dev_coredump_hdr hdr = { + .type = RTK_DEVCOREDUMP_CODE_CMD_TIMEOUT, + }; + + btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0); + + if (++data->cmd_timeout_cnt < 5) + return; + + if (!reset_gpio) { + btusb_reset(hdev); + return; + } + + /* Toggle the hard reset line. The Realtek device is going to + * yank itself off the USB and then replug. The cleanup is handled + * correctly on the way out (standard USB disconnect), and the new + * device is detected cleanly and bound to the driver again like + * it should be. + */ + if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { + bt_dev_err(hdev, "last reset failed? Not resetting again"); + return; + } + + bt_dev_err(hdev, "Reset Realtek device via gpio"); + gpiod_set_value_cansleep(reset_gpio, 1); + msleep(200); + gpiod_set_value_cansleep(reset_gpio, 0); +} + +static void btusb_rtl_hw_error(struct hci_dev *hdev, u8 code) +{ + struct rtk_dev_coredump_hdr hdr = { + .type = RTK_DEVCOREDUMP_CODE_HW_ERR, + .code = code, + }; + + bt_dev_err(hdev, "RTL: hw err, trigger devcoredump (%d)", code); + + btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0); +} + +static void btusb_qca_cmd_timeout(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct gpio_desc *reset_gpio = data->reset_gpio; + + if (test_bit(BTUSB_HW_SSR_ACTIVE, &data->flags)) { + bt_dev_info(hdev, "Ramdump in progress, defer cmd_timeout"); + return; + } + + if (++data->cmd_timeout_cnt < 5) + return; + + if (reset_gpio) { + bt_dev_err(hdev, "Reset qca device via bt_en gpio"); + + /* Toggle the hard reset line. The qca bt device is going to + * yank itself off the USB and then replug. The cleanup is handled + * correctly on the way out (standard USB disconnect), and the new + * device is detected cleanly and bound to the driver again like + * it should be. + */ + if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { + bt_dev_err(hdev, "last reset failed? Not resetting again"); + return; + } + + gpiod_set_value_cansleep(reset_gpio, 0); + msleep(200); + gpiod_set_value_cansleep(reset_gpio, 1); + + return; + } + + btusb_reset(hdev); +} + +static inline void btusb_free_frags(struct btusb_data *data) +{ + unsigned long flags; + + spin_lock_irqsave(&data->rxlock, flags); + + dev_kfree_skb_irq(data->evt_skb); + data->evt_skb = NULL; + + dev_kfree_skb_irq(data->acl_skb); + data->acl_skb = NULL; + + dev_kfree_skb_irq(data->sco_skb); + data->sco_skb = NULL; + + spin_unlock_irqrestore(&data->rxlock, flags); +} + +static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb) +{ + if (data->intr_interval) { + /* Trigger dequeue immediatelly if an event is received */ + schedule_delayed_work(&data->rx_work, 0); + } + + return data->recv_event(data->hdev, skb); +} + +static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) +{ + struct sk_buff *skb; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&data->rxlock, flags); + skb = data->evt_skb; + + while (count) { + int len; + + if (!skb) { + skb = bt_skb_alloc(HCI_MAX_EVENT_SIZE, GFP_ATOMIC); + if (!skb) { + err = -ENOMEM; + break; + } + + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + hci_skb_expect(skb) = HCI_EVENT_HDR_SIZE; + } + + len = min_t(uint, hci_skb_expect(skb), count); + skb_put_data(skb, buffer, len); + + count -= len; + buffer += len; + hci_skb_expect(skb) -= len; + + if (skb->len == HCI_EVENT_HDR_SIZE) { + /* Complete event header */ + hci_skb_expect(skb) = hci_event_hdr(skb)->plen; + + if (skb_tailroom(skb) < hci_skb_expect(skb)) { + kfree_skb(skb); + skb = NULL; + + err = -EILSEQ; + break; + } + } + + if (!hci_skb_expect(skb)) { + /* Complete frame */ + btusb_recv_event(data, skb); + skb = NULL; + } + } + + data->evt_skb = skb; + spin_unlock_irqrestore(&data->rxlock, flags); + + return err; +} + +static int btusb_recv_acl(struct btusb_data *data, struct sk_buff *skb) +{ + /* Only queue ACL packet if intr_interval is set as it means + * force_poll_sync has been enabled. + */ + if (!data->intr_interval) + return data->recv_acl(data->hdev, skb); + + skb_queue_tail(&data->acl_q, skb); + schedule_delayed_work(&data->rx_work, data->intr_interval); + + return 0; +} + +static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) +{ + struct sk_buff *skb; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&data->rxlock, flags); + skb = data->acl_skb; + + while (count) { + int len; + + if (!skb) { + skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); + if (!skb) { + err = -ENOMEM; + break; + } + + hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; + hci_skb_expect(skb) = HCI_ACL_HDR_SIZE; + } + + len = min_t(uint, hci_skb_expect(skb), count); + skb_put_data(skb, buffer, len); + + count -= len; + buffer += len; + hci_skb_expect(skb) -= len; + + if (skb->len == HCI_ACL_HDR_SIZE) { + __le16 dlen = hci_acl_hdr(skb)->dlen; + + /* Complete ACL header */ + hci_skb_expect(skb) = __le16_to_cpu(dlen); + + if (skb_tailroom(skb) < hci_skb_expect(skb)) { + kfree_skb(skb); + skb = NULL; + + err = -EILSEQ; + break; + } + } + + if (!hci_skb_expect(skb)) { + /* Complete frame */ + btusb_recv_acl(data, skb); + skb = NULL; + } + } + + data->acl_skb = skb; + spin_unlock_irqrestore(&data->rxlock, flags); + + return err; +} + +static bool btusb_validate_sco_handle(struct hci_dev *hdev, + struct hci_sco_hdr *hdr) +{ + __u16 handle; + + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) + // Can't validate, userspace controls everything. + return true; + + /* + * USB isochronous transfers are not designed to be reliable and may + * lose fragments. When this happens, the next first fragment + * encountered might actually be a continuation fragment. + * Validate the handle to detect it and drop it, or else the upper + * layer will get garbage for a while. + */ + + handle = hci_handle(__le16_to_cpu(hdr->handle)); + + switch (hci_conn_lookup_type(hdev, handle)) { + case SCO_LINK: + case ESCO_LINK: + return true; + default: + return false; + } +} + +static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count) +{ + struct sk_buff *skb; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&data->rxlock, flags); + skb = data->sco_skb; + + while (count) { + int len; + + if (!skb) { + skb = bt_skb_alloc(HCI_MAX_SCO_SIZE, GFP_ATOMIC); + if (!skb) { + err = -ENOMEM; + break; + } + + hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; + hci_skb_expect(skb) = HCI_SCO_HDR_SIZE; + } + + len = min_t(uint, hci_skb_expect(skb), count); + skb_put_data(skb, buffer, len); + + count -= len; + buffer += len; + hci_skb_expect(skb) -= len; + + if (skb->len == HCI_SCO_HDR_SIZE) { + /* Complete SCO header */ + struct hci_sco_hdr *hdr = hci_sco_hdr(skb); + + hci_skb_expect(skb) = hdr->dlen; + + if (skb_tailroom(skb) < hci_skb_expect(skb) || + !btusb_validate_sco_handle(data->hdev, hdr)) { + kfree_skb(skb); + skb = NULL; + + err = -EILSEQ; + break; + } + } + + if (!hci_skb_expect(skb)) { + /* Complete frame */ + hci_recv_frame(data->hdev, skb); + skb = NULL; + } + } + + data->sco_skb = skb; + spin_unlock_irqrestore(&data->rxlock, flags); + + return err; +} + +static void btusb_intr_complete(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct btusb_data *data = hci_get_drvdata(hdev); + int err; + + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return; + + if (urb->status == 0) { + hdev->stat.byte_rx += urb->actual_length; + + if (btusb_recv_intr(data, urb->transfer_buffer, + urb->actual_length) < 0) { + bt_dev_err(hdev, "corrupted event packet"); + hdev->stat.err_rx++; + } + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; + } + + if (!test_bit(BTUSB_INTR_RUNNING, &data->flags)) + return; + + usb_mark_last_busy(data->udev); + usb_anchor_urb(urb, &data->intr_anchor); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + /* -EPERM: urb is being killed; + * -ENODEV: device got disconnected + */ + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p failed to resubmit (%d)", + urb, -err); + if (err != -EPERM) + hci_cmd_sync_cancel(hdev, -err); + usb_unanchor_urb(urb); + } +} + +static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size; + + BT_DBG("%s", hdev->name); + + if (!data->intr_ep) + return -ENODEV; + + urb = usb_alloc_urb(0, mem_flags); + if (!urb) + return -ENOMEM; + + size = le16_to_cpu(data->intr_ep->wMaxPacketSize); + + buf = kmalloc(size, mem_flags); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress); + + usb_fill_int_urb(urb, data->udev, pipe, buf, size, + btusb_intr_complete, hdev, data->intr_ep->bInterval); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_anchor_urb(urb, &data->intr_anchor); + + err = usb_submit_urb(urb, mem_flags); + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p submission failed (%d)", + urb, -err); + if (err != -EPERM) + hci_cmd_sync_cancel(hdev, -err); + usb_unanchor_urb(urb); + } + + /* Only initialize intr_interval if URB poll sync is enabled */ + if (!data->poll_sync) + goto done; + + /* The units are frames (milliseconds) for full and low speed devices, + * and microframes (1/8 millisecond) for highspeed and SuperSpeed + * devices. + * + * This is done once on open/resume so it shouldn't change even if + * force_poll_sync changes. + */ + switch (urb->dev->speed) { + case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: /* units are 125us */ + data->intr_interval = usecs_to_jiffies(urb->interval * 125); + break; + default: + data->intr_interval = msecs_to_jiffies(urb->interval); + break; + } + +done: + usb_free_urb(urb); + + return err; +} + +static void btusb_bulk_complete(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct btusb_data *data = hci_get_drvdata(hdev); + int err; + + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return; + + if (urb->status == 0) { + hdev->stat.byte_rx += urb->actual_length; + + if (data->recv_bulk(data, urb->transfer_buffer, + urb->actual_length) < 0) { + bt_dev_err(hdev, "corrupted ACL packet"); + hdev->stat.err_rx++; + } + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; + } + + if (!test_bit(BTUSB_BULK_RUNNING, &data->flags)) + return; + + usb_anchor_urb(urb, &data->bulk_anchor); + usb_mark_last_busy(data->udev); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + /* -EPERM: urb is being killed; + * -ENODEV: device got disconnected + */ + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p failed to resubmit (%d)", + urb, -err); + usb_unanchor_urb(urb); + } +} + +static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size = HCI_MAX_FRAME_SIZE; + + BT_DBG("%s", hdev->name); + + if (!data->bulk_rx_ep) + return -ENODEV; + + urb = usb_alloc_urb(0, mem_flags); + if (!urb) + return -ENOMEM; + + buf = kmalloc(size, mem_flags); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress); + + usb_fill_bulk_urb(urb, data->udev, pipe, buf, size, + btusb_bulk_complete, hdev); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_mark_last_busy(data->udev); + usb_anchor_urb(urb, &data->bulk_anchor); + + err = usb_submit_urb(urb, mem_flags); + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p submission failed (%d)", + urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static void btusb_isoc_complete(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct btusb_data *data = hci_get_drvdata(hdev); + int i, err; + + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return; + + if (urb->status == 0) { + for (i = 0; i < urb->number_of_packets; i++) { + unsigned int offset = urb->iso_frame_desc[i].offset; + unsigned int length = urb->iso_frame_desc[i].actual_length; + + if (urb->iso_frame_desc[i].status) + continue; + + hdev->stat.byte_rx += length; + + if (btusb_recv_isoc(data, urb->transfer_buffer + offset, + length) < 0) { + bt_dev_err(hdev, "corrupted SCO packet"); + hdev->stat.err_rx++; + } + } + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; + } + + if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) + return; + + usb_anchor_urb(urb, &data->isoc_anchor); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + /* -EPERM: urb is being killed; + * -ENODEV: device got disconnected + */ + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p failed to resubmit (%d)", + urb, -err); + usb_unanchor_urb(urb); + } +} + +static inline void __fill_isoc_descriptor_msbc(struct urb *urb, int len, + int mtu, struct btusb_data *data) +{ + int i = 0, offset = 0; + unsigned int interval; + + BT_DBG("len %d mtu %d", len, mtu); + + /* For mSBC ALT 6 settings some chips need to transmit the data + * continuously without the zero length of USB packets. + */ + if (test_bit(BTUSB_ALT6_CONTINUOUS_TX, &data->flags)) + goto ignore_usb_alt6_packet_flow; + + /* For mSBC ALT 6 setting the host will send the packet at continuous + * flow. As per core spec 5, vol 4, part B, table 2.1. For ALT setting + * 6 the HCI PACKET INTERVAL should be 7.5ms for every usb packets. + * To maintain the rate we send 63bytes of usb packets alternatively for + * 7ms and 8ms to maintain the rate as 7.5ms. + */ + if (data->usb_alt6_packet_flow) { + interval = 7; + data->usb_alt6_packet_flow = false; + } else { + interval = 6; + data->usb_alt6_packet_flow = true; + } + + for (i = 0; i < interval; i++) { + urb->iso_frame_desc[i].offset = offset; + urb->iso_frame_desc[i].length = offset; + } + +ignore_usb_alt6_packet_flow: + if (len && i < BTUSB_MAX_ISOC_FRAMES) { + urb->iso_frame_desc[i].offset = offset; + urb->iso_frame_desc[i].length = len; + i++; + } + + urb->number_of_packets = i; +} + +static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu) +{ + int i, offset = 0; + + BT_DBG("len %d mtu %d", len, mtu); + + for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu; + i++, offset += mtu, len -= mtu) { + urb->iso_frame_desc[i].offset = offset; + urb->iso_frame_desc[i].length = mtu; + } + + if (len && i < BTUSB_MAX_ISOC_FRAMES) { + urb->iso_frame_desc[i].offset = offset; + urb->iso_frame_desc[i].length = len; + i++; + } + + urb->number_of_packets = i; +} + +static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size; + + BT_DBG("%s", hdev->name); + + if (!data->isoc_rx_ep) + return -ENODEV; + + urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, mem_flags); + if (!urb) + return -ENOMEM; + + size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) * + BTUSB_MAX_ISOC_FRAMES; + + buf = kmalloc(size, mem_flags); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); + + usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete, + hdev, data->isoc_rx_ep->bInterval); + + urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; + + __fill_isoc_descriptor(urb, size, + le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); + + usb_anchor_urb(urb, &data->isoc_anchor); + + err = usb_submit_urb(urb, mem_flags); + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p submission failed (%d)", + urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static void btusb_diag_complete(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct btusb_data *data = hci_get_drvdata(hdev); + int err; + + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); + + if (urb->status == 0) { + struct sk_buff *skb; + + skb = bt_skb_alloc(urb->actual_length, GFP_ATOMIC); + if (skb) { + skb_put_data(skb, urb->transfer_buffer, + urb->actual_length); + hci_recv_diag(hdev, skb); + } + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; + } + + if (!test_bit(BTUSB_DIAG_RUNNING, &data->flags)) + return; + + usb_anchor_urb(urb, &data->diag_anchor); + usb_mark_last_busy(data->udev); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + /* -EPERM: urb is being killed; + * -ENODEV: device got disconnected + */ + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p failed to resubmit (%d)", + urb, -err); + usb_unanchor_urb(urb); + } +} + +static int btusb_submit_diag_urb(struct hci_dev *hdev, gfp_t mem_flags) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size = HCI_MAX_FRAME_SIZE; + + BT_DBG("%s", hdev->name); + + if (!data->diag_rx_ep) + return -ENODEV; + + urb = usb_alloc_urb(0, mem_flags); + if (!urb) + return -ENOMEM; + + buf = kmalloc(size, mem_flags); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvbulkpipe(data->udev, data->diag_rx_ep->bEndpointAddress); + + usb_fill_bulk_urb(urb, data->udev, pipe, buf, size, + btusb_diag_complete, hdev); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_mark_last_busy(data->udev); + usb_anchor_urb(urb, &data->diag_anchor); + + err = usb_submit_urb(urb, mem_flags); + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p submission failed (%d)", + urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static void btusb_tx_complete(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct hci_dev *hdev = (struct hci_dev *)skb->dev; + struct btusb_data *data = hci_get_drvdata(hdev); + unsigned long flags; + + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + goto done; + + if (!urb->status) { + hdev->stat.byte_tx += urb->transfer_buffer_length; + } else { + if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) + hci_cmd_sync_cancel(hdev, -urb->status); + hdev->stat.err_tx++; + } + +done: + spin_lock_irqsave(&data->txlock, flags); + data->tx_in_flight--; + spin_unlock_irqrestore(&data->txlock, flags); + + kfree(urb->setup_packet); + + kfree_skb(skb); +} + +static void btusb_isoc_tx_complete(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct hci_dev *hdev = (struct hci_dev *)skb->dev; + + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + goto done; + + if (!urb->status) + hdev->stat.byte_tx += urb->transfer_buffer_length; + else + hdev->stat.err_tx++; + +done: + kfree(urb->setup_packet); + + kfree_skb(skb); +} + +static int btusb_open(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + int err; + + BT_DBG("%s", hdev->name); + + err = usb_autopm_get_interface(data->intf); + if (err < 0) + return err; + + /* Patching USB firmware files prior to starting any URBs of HCI path + * It is more safe to use USB bulk channel for downloading USB patch + */ + if (data->setup_on_usb) { + err = data->setup_on_usb(hdev); + if (err < 0) + goto setup_fail; + } + + data->intf->needs_remote_wakeup = 1; + + if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) + goto done; + + err = btusb_submit_intr_urb(hdev, GFP_KERNEL); + if (err < 0) + goto failed; + + err = btusb_submit_bulk_urb(hdev, GFP_KERNEL); + if (err < 0) { + usb_kill_anchored_urbs(&data->intr_anchor); + goto failed; + } + + set_bit(BTUSB_BULK_RUNNING, &data->flags); + btusb_submit_bulk_urb(hdev, GFP_KERNEL); + + if (data->diag) { + if (!btusb_submit_diag_urb(hdev, GFP_KERNEL)) + set_bit(BTUSB_DIAG_RUNNING, &data->flags); + } + +done: + usb_autopm_put_interface(data->intf); + return 0; + +failed: + clear_bit(BTUSB_INTR_RUNNING, &data->flags); +setup_fail: + usb_autopm_put_interface(data->intf); + return err; +} + +static void btusb_stop_traffic(struct btusb_data *data) +{ + usb_kill_anchored_urbs(&data->intr_anchor); + usb_kill_anchored_urbs(&data->bulk_anchor); + usb_kill_anchored_urbs(&data->isoc_anchor); + usb_kill_anchored_urbs(&data->diag_anchor); + usb_kill_anchored_urbs(&data->ctrl_anchor); +} + +static int btusb_close(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + int err; + + BT_DBG("%s", hdev->name); + + cancel_delayed_work(&data->rx_work); + cancel_work_sync(&data->work); + cancel_work_sync(&data->waker); + + skb_queue_purge(&data->acl_q); + + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + clear_bit(BTUSB_BULK_RUNNING, &data->flags); + clear_bit(BTUSB_INTR_RUNNING, &data->flags); + clear_bit(BTUSB_DIAG_RUNNING, &data->flags); + + btusb_stop_traffic(data); + btusb_free_frags(data); + + err = usb_autopm_get_interface(data->intf); + if (err < 0) + goto failed; + + data->intf->needs_remote_wakeup = 0; + + /* Enable remote wake up for auto-suspend */ + if (test_bit(BTUSB_WAKEUP_AUTOSUSPEND, &data->flags)) + data->intf->needs_remote_wakeup = 1; + + usb_autopm_put_interface(data->intf); + +failed: + usb_scuttle_anchored_urbs(&data->deferred); + return 0; +} + +static int btusb_flush(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + + BT_DBG("%s", hdev->name); + + cancel_delayed_work(&data->rx_work); + + skb_queue_purge(&data->acl_q); + + usb_kill_anchored_urbs(&data->tx_anchor); + btusb_free_frags(data); + + return 0; +} + +static struct urb *alloc_ctrl_urb(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct usb_ctrlrequest *dr; + struct urb *urb; + unsigned int pipe; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return ERR_PTR(-ENOMEM); + + dr = kmalloc(sizeof(*dr), GFP_KERNEL); + if (!dr) { + usb_free_urb(urb); + return ERR_PTR(-ENOMEM); + } + + dr->bRequestType = data->cmdreq_type; + dr->bRequest = data->cmdreq; + dr->wIndex = 0; + dr->wValue = 0; + dr->wLength = __cpu_to_le16(skb->len); + + pipe = usb_sndctrlpipe(data->udev, 0x00); + + usb_fill_control_urb(urb, data->udev, pipe, (void *)dr, + skb->data, skb->len, btusb_tx_complete, skb); + + skb->dev = (void *)hdev; + + return urb; +} + +static struct urb *alloc_bulk_urb(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct urb *urb; + unsigned int pipe; + + if (!data->bulk_tx_ep) + return ERR_PTR(-ENODEV); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return ERR_PTR(-ENOMEM); + + pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress); + + usb_fill_bulk_urb(urb, data->udev, pipe, + skb->data, skb->len, btusb_tx_complete, skb); + + skb->dev = (void *)hdev; + + return urb; +} + +static struct urb *alloc_isoc_urb(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct urb *urb; + unsigned int pipe; + + if (!data->isoc_tx_ep) + return ERR_PTR(-ENODEV); + + urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_KERNEL); + if (!urb) + return ERR_PTR(-ENOMEM); + + pipe = usb_sndisocpipe(data->udev, data->isoc_tx_ep->bEndpointAddress); + + usb_fill_int_urb(urb, data->udev, pipe, + skb->data, skb->len, btusb_isoc_tx_complete, + skb, data->isoc_tx_ep->bInterval); + + urb->transfer_flags = URB_ISO_ASAP; + + if (data->isoc_altsetting == 6) + __fill_isoc_descriptor_msbc(urb, skb->len, + le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize), + data); + else + __fill_isoc_descriptor(urb, skb->len, + le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); + skb->dev = (void *)hdev; + + return urb; +} + +static int submit_tx_urb(struct hci_dev *hdev, struct urb *urb) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + int err; + + usb_anchor_urb(urb, &data->tx_anchor); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p submission failed (%d)", + urb, -err); + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + } else { + usb_mark_last_busy(data->udev); + } + + usb_free_urb(urb); + return err; +} + +static int submit_or_queue_tx_urb(struct hci_dev *hdev, struct urb *urb) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + unsigned long flags; + bool suspending; + + spin_lock_irqsave(&data->txlock, flags); + suspending = test_bit(BTUSB_SUSPENDING, &data->flags); + if (!suspending) + data->tx_in_flight++; + spin_unlock_irqrestore(&data->txlock, flags); + + if (!suspending) + return submit_tx_urb(hdev, urb); + + usb_anchor_urb(urb, &data->deferred); + schedule_work(&data->waker); + + usb_free_urb(urb); + return 0; +} + +static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct urb *urb; + + BT_DBG("%s", hdev->name); + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + urb = alloc_ctrl_urb(hdev, skb); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + hdev->stat.cmd_tx++; + return submit_or_queue_tx_urb(hdev, urb); + + case HCI_ACLDATA_PKT: + urb = alloc_bulk_urb(hdev, skb); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + hdev->stat.acl_tx++; + return submit_or_queue_tx_urb(hdev, urb); + + case HCI_SCODATA_PKT: + if (hci_conn_num(hdev, SCO_LINK) < 1) + return -ENODEV; + + urb = alloc_isoc_urb(hdev, skb); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + hdev->stat.sco_tx++; + return submit_tx_urb(hdev, urb); + + case HCI_ISODATA_PKT: + urb = alloc_bulk_urb(hdev, skb); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + return submit_or_queue_tx_urb(hdev, urb); + } + + return -EILSEQ; +} + +static void btusb_notify(struct hci_dev *hdev, unsigned int evt) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + + BT_DBG("%s evt %d", hdev->name, evt); + + if (hci_conn_num(hdev, SCO_LINK) != data->sco_num) { + data->sco_num = hci_conn_num(hdev, SCO_LINK); + data->air_mode = evt; + schedule_work(&data->work); + } +} + +static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct usb_interface *intf = data->isoc; + struct usb_endpoint_descriptor *ep_desc; + int i, err; + + if (!data->isoc) + return -ENODEV; + + err = usb_set_interface(data->udev, data->isoc_ifnum, altsetting); + if (err < 0) { + bt_dev_err(hdev, "setting interface failed (%d)", -err); + return err; + } + + data->isoc_altsetting = altsetting; + + data->isoc_tx_ep = NULL; + data->isoc_rx_ep = NULL; + + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { + ep_desc = &intf->cur_altsetting->endpoint[i].desc; + + if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) { + data->isoc_tx_ep = ep_desc; + continue; + } + + if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) { + data->isoc_rx_ep = ep_desc; + continue; + } + } + + if (!data->isoc_tx_ep || !data->isoc_rx_ep) { + bt_dev_err(hdev, "invalid SCO descriptors"); + return -ENODEV; + } + + return 0; +} + +static int btusb_switch_alt_setting(struct hci_dev *hdev, int new_alts) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + int err; + + if (data->isoc_altsetting != new_alts) { + unsigned long flags; + + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + usb_kill_anchored_urbs(&data->isoc_anchor); + + /* When isochronous alternate setting needs to be + * changed, because SCO connection has been added + * or removed, a packet fragment may be left in the + * reassembling state. This could lead to wrongly + * assembled fragments. + * + * Clear outstanding fragment when selecting a new + * alternate setting. + */ + spin_lock_irqsave(&data->rxlock, flags); + dev_kfree_skb_irq(data->sco_skb); + data->sco_skb = NULL; + spin_unlock_irqrestore(&data->rxlock, flags); + + err = __set_isoc_interface(hdev, new_alts); + if (err < 0) + return err; + } + + if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) { + if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0) + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + else + btusb_submit_isoc_urb(hdev, GFP_KERNEL); + } + + return 0; +} + +static struct usb_host_interface *btusb_find_altsetting(struct btusb_data *data, + int alt) +{ + struct usb_interface *intf = data->isoc; + int i; + + BT_DBG("Looking for Alt no :%d", alt); + + if (!intf) + return NULL; + + for (i = 0; i < intf->num_altsetting; i++) { + if (intf->altsetting[i].desc.bAlternateSetting == alt) + return &intf->altsetting[i]; + } + + return NULL; +} + +static void btusb_work(struct work_struct *work) +{ + struct btusb_data *data = container_of(work, struct btusb_data, work); + struct hci_dev *hdev = data->hdev; + int new_alts = 0; + int err; + + if (data->sco_num > 0) { + if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { + err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf); + if (err < 0) { + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + usb_kill_anchored_urbs(&data->isoc_anchor); + return; + } + + set_bit(BTUSB_DID_ISO_RESUME, &data->flags); + } + + if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_CVSD) { + if (hdev->voice_setting & 0x0020) { + static const int alts[3] = { 2, 4, 5 }; + + new_alts = alts[data->sco_num - 1]; + } else { + new_alts = data->sco_num; + } + } else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) { + /* Bluetooth USB spec recommends alt 6 (63 bytes), but + * many adapters do not support it. Alt 1 appears to + * work for all adapters that do not have alt 6, and + * which work with WBS at all. Some devices prefer + * alt 3 (HCI payload >= 60 Bytes let air packet + * data satisfy 60 bytes), requiring + * MTU >= 3 (packets) * 25 (size) - 3 (headers) = 72 + * see also Core spec 5, vol 4, B 2.1.1 & Table 2.1. + */ + if (btusb_find_altsetting(data, 6)) + new_alts = 6; + else if (btusb_find_altsetting(data, 3) && + hdev->sco_mtu >= 72 && + test_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags)) + new_alts = 3; + else + new_alts = 1; + } + + if (btusb_switch_alt_setting(hdev, new_alts) < 0) + bt_dev_err(hdev, "set USB alt:(%d) failed!", new_alts); + } else { + usb_kill_anchored_urbs(&data->isoc_anchor); + + if (test_and_clear_bit(BTUSB_ISOC_RUNNING, &data->flags)) + __set_isoc_interface(hdev, 0); + + if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) + usb_autopm_put_interface(data->isoc ? data->isoc : data->intf); + } +} + +static void btusb_waker(struct work_struct *work) +{ + struct btusb_data *data = container_of(work, struct btusb_data, waker); + int err; + + err = usb_autopm_get_interface(data->intf); + if (err < 0) + return; + + usb_autopm_put_interface(data->intf); +} + +static void btusb_rx_work(struct work_struct *work) +{ + struct btusb_data *data = container_of(work, struct btusb_data, + rx_work.work); + struct sk_buff *skb; + + /* Dequeue ACL data received during the interval */ + while ((skb = skb_dequeue(&data->acl_q))) + data->recv_acl(data->hdev, skb); +} + +static int btusb_setup_bcm92035(struct hci_dev *hdev) +{ + struct sk_buff *skb; + u8 val = 0x00; + + BT_DBG("%s", hdev->name); + + skb = __hci_cmd_sync(hdev, 0xfc3b, 1, &val, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + bt_dev_err(hdev, "BCM92035 command failed (%ld)", PTR_ERR(skb)); + else + kfree_skb(skb); + + return 0; +} + +static int btusb_setup_csr(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + u16 bcdDevice = le16_to_cpu(data->udev->descriptor.bcdDevice); + struct hci_rp_read_local_version *rp; + struct sk_buff *skb; + bool is_fake = false; + int ret; + + BT_DBG("%s", hdev->name); + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + int err = PTR_ERR(skb); + bt_dev_err(hdev, "CSR: Local version failed (%d)", err); + return err; + } + + rp = skb_pull_data(skb, sizeof(*rp)); + if (!rp) { + bt_dev_err(hdev, "CSR: Local version length mismatch"); + kfree_skb(skb); + return -EIO; + } + + bt_dev_info(hdev, "CSR: Setting up dongle with HCI ver=%u rev=%04x", + rp->hci_ver, le16_to_cpu(rp->hci_rev)); + + bt_dev_info(hdev, "LMP ver=%u subver=%04x; manufacturer=%u", + rp->lmp_ver, le16_to_cpu(rp->lmp_subver), + le16_to_cpu(rp->manufacturer)); + + /* Detect a wide host of Chinese controllers that aren't CSR. + * + * Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891 + * + * The main thing they have in common is that these are really popular low-cost + * options that support newer Bluetooth versions but rely on heavy VID/PID + * squatting of this poor old Bluetooth 1.1 device. Even sold as such. + * + * We detect actual CSR devices by checking that the HCI manufacturer code + * is Cambridge Silicon Radio (10) and ensuring that LMP sub-version and + * HCI rev values always match. As they both store the firmware number. + */ + if (le16_to_cpu(rp->manufacturer) != 10 || + le16_to_cpu(rp->hci_rev) != le16_to_cpu(rp->lmp_subver)) + is_fake = true; + + /* Known legit CSR firmware build numbers and their supported BT versions: + * - 1.1 (0x1) -> 0x0073, 0x020d, 0x033c, 0x034e + * - 1.2 (0x2) -> 0x04d9, 0x0529 + * - 2.0 (0x3) -> 0x07a6, 0x07ad, 0x0c5c + * - 2.1 (0x4) -> 0x149c, 0x1735, 0x1899 (0x1899 is a BlueCore4-External) + * - 4.0 (0x6) -> 0x1d86, 0x2031, 0x22bb + * + * e.g. Real CSR dongles with LMP subversion 0x73 are old enough that + * support BT 1.1 only; so it's a dead giveaway when some + * third-party BT 4.0 dongle reuses it. + */ + else if (le16_to_cpu(rp->lmp_subver) <= 0x034e && + rp->hci_ver > BLUETOOTH_VER_1_1) + is_fake = true; + + else if (le16_to_cpu(rp->lmp_subver) <= 0x0529 && + rp->hci_ver > BLUETOOTH_VER_1_2) + is_fake = true; + + else if (le16_to_cpu(rp->lmp_subver) <= 0x0c5c && + rp->hci_ver > BLUETOOTH_VER_2_0) + is_fake = true; + + else if (le16_to_cpu(rp->lmp_subver) <= 0x1899 && + rp->hci_ver > BLUETOOTH_VER_2_1) + is_fake = true; + + else if (le16_to_cpu(rp->lmp_subver) <= 0x22bb && + rp->hci_ver > BLUETOOTH_VER_4_0) + is_fake = true; + + /* Other clones which beat all the above checks */ + else if (bcdDevice == 0x0134 && + le16_to_cpu(rp->lmp_subver) == 0x0c5c && + rp->hci_ver == BLUETOOTH_VER_2_0) + is_fake = true; + + if (is_fake) { + bt_dev_warn(hdev, "CSR: Unbranded CSR clone detected; adding workarounds and force-suspending once..."); + + /* Generally these clones have big discrepancies between + * advertised features and what's actually supported. + * Probably will need to be expanded in the future; + * without these the controller will lock up. + */ + set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks); + set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks); + + /* Clear the reset quirk since this is not an actual + * early Bluetooth 1.1 device from CSR. + */ + clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + clear_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + + /* + * Special workaround for these BT 4.0 chip clones, and potentially more: + * + * - 0x0134: a Barrot 8041a02 (HCI rev: 0x0810 sub: 0x1012) + * - 0x7558: IC markings FR3191AHAL 749H15143 (HCI rev/sub-version: 0x0709) + * + * These controllers are really messed-up. + * + * 1. Their bulk RX endpoint will never report any data unless + * the device was suspended at least once (yes, really). + * 2. They will not wakeup when autosuspended and receiving data + * on their bulk RX endpoint from e.g. a keyboard or mouse + * (IOW remote-wakeup support is broken for the bulk endpoint). + * + * To fix 1. enable runtime-suspend, force-suspend the + * HCI and then wake-it up by disabling runtime-suspend. + * + * To fix 2. clear the HCI's can_wake flag, this way the HCI + * will still be autosuspended when it is not open. + * + * -- + * + * Because these are widespread problems we prefer generic solutions; so + * apply this initialization quirk to every controller that gets here, + * it should be harmless. The alternative is to not work at all. + */ + pm_runtime_allow(&data->udev->dev); + + ret = pm_runtime_suspend(&data->udev->dev); + if (ret >= 0) + msleep(200); + else + bt_dev_warn(hdev, "CSR: Couldn't suspend the device for our Barrot 8041a02 receive-issue workaround"); + + pm_runtime_forbid(&data->udev->dev); + + device_set_wakeup_capable(&data->udev->dev, false); + + /* Re-enable autosuspend if this was requested */ + if (enable_autosuspend) + usb_enable_autosuspend(data->udev); + } + + kfree_skb(skb); + + return 0; +} + +static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) +{ + struct sk_buff *skb; + struct hci_event_hdr *hdr; + struct hci_ev_cmd_complete *evt; + + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = skb_put(skb, sizeof(*hdr)); + hdr->evt = HCI_EV_CMD_COMPLETE; + hdr->plen = sizeof(*evt) + 1; + + evt = skb_put(skb, sizeof(*evt)); + evt->ncmd = 0x01; + evt->opcode = cpu_to_le16(opcode); + + skb_put_u8(skb, 0x00); + + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + + return hci_recv_frame(hdev, skb); +} + +static int btusb_recv_bulk_intel(struct btusb_data *data, void *buffer, + int count) +{ + struct hci_dev *hdev = data->hdev; + + /* When the device is in bootloader mode, then it can send + * events via the bulk endpoint. These events are treated the + * same way as the ones received from the interrupt endpoint. + */ + if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) + return btusb_recv_intr(data, buffer, count); + + return btusb_recv_bulk(data, buffer, count); +} + +static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct urb *urb; + + BT_DBG("%s", hdev->name); + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) { + struct hci_command_hdr *cmd = (void *)skb->data; + __u16 opcode = le16_to_cpu(cmd->opcode); + + /* When in bootloader mode and the command 0xfc09 + * is received, it needs to be send down the + * bulk endpoint. So allocate a bulk URB instead. + */ + if (opcode == 0xfc09) + urb = alloc_bulk_urb(hdev, skb); + else + urb = alloc_ctrl_urb(hdev, skb); + + /* When the 0xfc01 command is issued to boot into + * the operational firmware, it will actually not + * send a command complete event. To keep the flow + * control working inject that event here. + */ + if (opcode == 0xfc01) + inject_cmd_complete(hdev, opcode); + } else { + urb = alloc_ctrl_urb(hdev, skb); + } + if (IS_ERR(urb)) + return PTR_ERR(urb); + + hdev->stat.cmd_tx++; + return submit_or_queue_tx_urb(hdev, urb); + + case HCI_ACLDATA_PKT: + urb = alloc_bulk_urb(hdev, skb); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + hdev->stat.acl_tx++; + return submit_or_queue_tx_urb(hdev, urb); + + case HCI_SCODATA_PKT: + if (hci_conn_num(hdev, SCO_LINK) < 1) + return -ENODEV; + + urb = alloc_isoc_urb(hdev, skb); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + hdev->stat.sco_tx++; + return submit_tx_urb(hdev, urb); + + case HCI_ISODATA_PKT: + urb = alloc_bulk_urb(hdev, skb); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + return submit_or_queue_tx_urb(hdev, urb); + } + + return -EILSEQ; +} + +static int btusb_setup_realtek(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + int ret; + + ret = btrtl_setup_realtek(hdev); + + if (btrealtek_test_flag(data->hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP)) + set_bit(BTUSB_ALT6_CONTINUOUS_TX, &data->flags); + + return ret; +} + +static int btusb_recv_event_realtek(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (skb->data[0] == HCI_VENDOR_PKT && skb->data[2] == RTK_SUB_EVENT_CODE_COREDUMP) { + struct rtk_dev_coredump_hdr hdr = { + .code = RTK_DEVCOREDUMP_CODE_MEMDUMP, + }; + + bt_dev_dbg(hdev, "RTL: received coredump vendor evt, len %u", + skb->len); + + btusb_rtl_alloc_devcoredump(hdev, &hdr, skb->data, skb->len); + kfree_skb(skb); + + return 0; + } + + return hci_recv_frame(hdev, skb); +} + +/* UHW CR mapping */ +#define MTK_BT_MISC 0x70002510 +#define MTK_BT_SUBSYS_RST 0x70002610 +#define MTK_UDMA_INT_STA_BT 0x74000024 +#define MTK_UDMA_INT_STA_BT1 0x74000308 +#define MTK_BT_WDT_STATUS 0x740003A0 +#define MTK_EP_RST_OPT 0x74011890 +#define MTK_EP_RST_IN_OUT_OPT 0x00010001 +#define MTK_BT_RST_DONE 0x00000100 +#define MTK_BT_RESET_REG_CONNV3 0x70028610 +#define MTK_BT_READ_DEV_ID 0x70010200 + + +static void btusb_mtk_wmt_recv(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct btusb_data *data = hci_get_drvdata(hdev); + struct sk_buff *skb; + int err; + + if (urb->status == 0 && urb->actual_length > 0) { + hdev->stat.byte_rx += urb->actual_length; + + /* WMT event shouldn't be fragmented and the size should be + * less than HCI_WMT_MAX_EVENT_SIZE. + */ + skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC); + if (!skb) { + hdev->stat.err_rx++; + kfree(urb->setup_packet); + return; + } + + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + skb_put_data(skb, urb->transfer_buffer, urb->actual_length); + + /* When someone waits for the WMT event, the skb is being cloned + * and being processed the events from there then. + */ + if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) { + data->evt_skb = skb_clone(skb, GFP_ATOMIC); + if (!data->evt_skb) { + kfree_skb(skb); + kfree(urb->setup_packet); + return; + } + } + + err = hci_recv_frame(hdev, skb); + if (err < 0) { + kfree_skb(data->evt_skb); + data->evt_skb = NULL; + kfree(urb->setup_packet); + return; + } + + if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT, + &data->flags)) { + /* Barrier to sync with other CPUs */ + smp_mb__after_atomic(); + wake_up_bit(&data->flags, + BTUSB_TX_WAIT_VND_EVT); + } + kfree(urb->setup_packet); + return; + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; + } + + usb_mark_last_busy(data->udev); + + /* The URB complete handler is still called with urb->actual_length = 0 + * when the event is not available, so we should keep re-submitting + * URB until WMT event returns, Also, It's necessary to wait some time + * between the two consecutive control URBs to relax the target device + * to generate the event. Otherwise, the WMT event cannot return from + * the device successfully. + */ + udelay(500); + + usb_anchor_urb(urb, &data->ctrl_anchor); + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + kfree(urb->setup_packet); + /* -EPERM: urb is being killed; + * -ENODEV: device got disconnected + */ + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p failed to resubmit (%d)", + urb, -err); + usb_unanchor_urb(urb); + } +} + +static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct usb_ctrlrequest *dr; + unsigned char *buf; + int err, size = 64; + unsigned int pipe; + struct urb *urb; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return -ENOMEM; + + dr = kmalloc(sizeof(*dr), GFP_KERNEL); + if (!dr) { + usb_free_urb(urb); + return -ENOMEM; + } + + dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN; + dr->bRequest = 1; + dr->wIndex = cpu_to_le16(0); + dr->wValue = cpu_to_le16(48); + dr->wLength = cpu_to_le16(size); + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + kfree(dr); + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvctrlpipe(data->udev, 0); + + usb_fill_control_urb(urb, data->udev, pipe, (void *)dr, + buf, size, btusb_mtk_wmt_recv, hdev); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_anchor_urb(urb, &data->ctrl_anchor); + err = usb_submit_urb(urb, GFP_KERNEL); + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p submission failed (%d)", + urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, + struct btmtk_hci_wmt_params *wmt_params) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; + u32 hlen, status = BTMTK_WMT_INVALID; + struct btmtk_hci_wmt_evt *wmt_evt; + struct btmtk_hci_wmt_cmd *wc; + struct btmtk_wmt_hdr *hdr; + int err; + + /* Send the WMT command and wait until the WMT event returns */ + hlen = sizeof(*hdr) + wmt_params->dlen; + if (hlen > 255) + return -EINVAL; + + wc = kzalloc(hlen, GFP_KERNEL); + if (!wc) + return -ENOMEM; + + hdr = &wc->hdr; + hdr->dir = 1; + hdr->op = wmt_params->op; + hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); + hdr->flag = wmt_params->flag; + memcpy(wc->data, wmt_params->data, wmt_params->dlen); + + set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); + + /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling, + * it needs constantly polling control pipe until the host received the + * WMT event, thus, we should require to specifically acquire PM counter + * on the USB to prevent the interface from entering auto suspended + * while WMT cmd/event in progress. + */ + err = usb_autopm_get_interface(data->intf); + if (err < 0) + goto err_free_wc; + + err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); + + if (err < 0) { + clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); + usb_autopm_put_interface(data->intf); + goto err_free_wc; + } + + /* Submit control IN URB on demand to process the WMT event */ + err = btusb_mtk_submit_wmt_recv_urb(hdev); + + usb_autopm_put_interface(data->intf); + + if (err < 0) + goto err_free_wc; + + /* The vendor specific WMT commands are all answered by a vendor + * specific event and will have the Command Status or Command + * Complete as with usual HCI command flow control. + * + * After sending the command, wait for BTUSB_TX_WAIT_VND_EVT + * state to be cleared. The driver specific event receive routine + * will clear that state and with that indicate completion of the + * WMT command. + */ + err = wait_on_bit_timeout(&data->flags, BTUSB_TX_WAIT_VND_EVT, + TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); + if (err == -EINTR) { + bt_dev_err(hdev, "Execution of wmt command interrupted"); + clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); + goto err_free_wc; + } + + if (err) { + bt_dev_err(hdev, "Execution of wmt command timed out"); + clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); + err = -ETIMEDOUT; + goto err_free_wc; + } + + if (data->evt_skb == NULL) + goto err_free_wc; + + /* Parse and handle the return WMT event */ + wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data; + if (wmt_evt->whdr.op != hdr->op) { + bt_dev_err(hdev, "Wrong op received %d expected %d", + wmt_evt->whdr.op, hdr->op); + err = -EIO; + goto err_free_skb; + } + + switch (wmt_evt->whdr.op) { + case BTMTK_WMT_SEMAPHORE: + if (wmt_evt->whdr.flag == 2) + status = BTMTK_WMT_PATCH_UNDONE; + else + status = BTMTK_WMT_PATCH_DONE; + break; + case BTMTK_WMT_FUNC_CTRL: + wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; + if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) + status = BTMTK_WMT_ON_DONE; + else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) + status = BTMTK_WMT_ON_PROGRESS; + else + status = BTMTK_WMT_ON_UNDONE; + break; + case BTMTK_WMT_PATCH_DWNLD: + if (wmt_evt->whdr.flag == 2) + status = BTMTK_WMT_PATCH_DONE; + else if (wmt_evt->whdr.flag == 1) + status = BTMTK_WMT_PATCH_PROGRESS; + else + status = BTMTK_WMT_PATCH_UNDONE; + break; + } + + if (wmt_params->status) + *wmt_params->status = status; + +err_free_skb: + kfree_skb(data->evt_skb); + data->evt_skb = NULL; +err_free_wc: + kfree(wc); + return err; +} + +static int btusb_mtk_func_query(struct hci_dev *hdev) +{ + struct btmtk_hci_wmt_params wmt_params; + int status, err; + u8 param = 0; + + /* Query whether the function is enabled */ + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 4; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = &status; + + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to query function status (%d)", err); + return err; + } + + return status; +} + +static int btusb_mtk_uhw_reg_write(struct btusb_data *data, u32 reg, u32 val) +{ + struct hci_dev *hdev = data->hdev; + int pipe, err; + void *buf; + + buf = kzalloc(4, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + put_unaligned_le32(val, buf); + + pipe = usb_sndctrlpipe(data->udev, 0); + err = usb_control_msg(data->udev, pipe, 0x02, + 0x5E, + reg >> 16, reg & 0xffff, + buf, 4, USB_CTRL_SET_TIMEOUT); + if (err < 0) { + bt_dev_err(hdev, "Failed to write uhw reg(%d)", err); + goto err_free_buf; + } + +err_free_buf: + kfree(buf); + + return err; +} + +static int btusb_mtk_uhw_reg_read(struct btusb_data *data, u32 reg, u32 *val) +{ + struct hci_dev *hdev = data->hdev; + int pipe, err; + void *buf; + + buf = kzalloc(4, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pipe = usb_rcvctrlpipe(data->udev, 0); + err = usb_control_msg(data->udev, pipe, 0x01, + 0xDE, + reg >> 16, reg & 0xffff, + buf, 4, USB_CTRL_SET_TIMEOUT); + if (err < 0) { + bt_dev_err(hdev, "Failed to read uhw reg(%d)", err); + goto err_free_buf; + } + + *val = get_unaligned_le32(buf); + bt_dev_dbg(hdev, "reg=%x, value=0x%08x", reg, *val); + +err_free_buf: + kfree(buf); + + return err; +} + +static int btusb_mtk_reg_read(struct btusb_data *data, u32 reg, u32 *val) +{ + int pipe, err, size = sizeof(u32); + void *buf; + + buf = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pipe = usb_rcvctrlpipe(data->udev, 0); + err = usb_control_msg(data->udev, pipe, 0x63, + USB_TYPE_VENDOR | USB_DIR_IN, + reg >> 16, reg & 0xffff, + buf, size, USB_CTRL_SET_TIMEOUT); + if (err < 0) + goto err_free_buf; + + *val = get_unaligned_le32(buf); + +err_free_buf: + kfree(buf); + + return err; +} + +static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id) +{ + return btusb_mtk_reg_read(data, reg, id); +} + +static u32 btusb_mtk_reset_done(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + u32 val = 0; + + btusb_mtk_uhw_reg_read(data, MTK_BT_MISC, &val); + + return val & MTK_BT_RST_DONE; +} + +static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct btmediatek_data *mediatek; + u32 val; + int err; + + /* It's MediaTek specific bluetooth reset mechanism via USB */ + if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { + bt_dev_err(hdev, "last reset failed? Not resetting again"); + return -EBUSY; + } + + err = usb_autopm_get_interface(data->intf); + if (err < 0) + return err; + + btusb_stop_traffic(data); + usb_kill_anchored_urbs(&data->tx_anchor); + mediatek = hci_get_priv(hdev); + + if (mediatek->dev_id == 0x7925) { + btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val); + val |= (1 << 5); + btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val); + btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val); + val &= 0xFFFF00FF; + val |= (1 << 13); + btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val); + btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, 0x00010001); + btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val); + val |= (1 << 0); + btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val); + btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF); + btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val); + btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF); + btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val); + msleep(100); + } else { + /* It's Device EndPoint Reset Option Register */ + bt_dev_dbg(hdev, "Initiating reset mechanism via uhw"); + btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); + btusb_mtk_uhw_reg_read(data, MTK_BT_WDT_STATUS, &val); + + /* Reset the bluetooth chip via USB interface. */ + btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 1); + btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF); + btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val); + btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF); + btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val); + /* MT7921 need to delay 20ms between toggle reset bit */ + msleep(20); + btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 0); + btusb_mtk_uhw_reg_read(data, MTK_BT_SUBSYS_RST, &val); + } + + err = readx_poll_timeout(btusb_mtk_reset_done, hdev, val, + val & MTK_BT_RST_DONE, 20000, 1000000); + if (err < 0) + bt_dev_err(hdev, "Reset timeout"); + + btusb_mtk_id_get(data, 0x70010200, &val); + if (!val) + bt_dev_err(hdev, "Can't get device id, subsys reset fail."); + + usb_queue_reset_device(data->intf); + + clear_bit(BTUSB_HW_RESET_ACTIVE, &data->flags); + + return err; +} + +static int btusb_mtk_setup(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_params wmt_params; + ktime_t calltime, delta, rettime; + struct btmtk_tci_sleep tci_sleep; + unsigned long long duration; + struct sk_buff *skb; + const char *fwname; + int err, status; + u32 dev_id = 0; + char fw_bin_name[64]; + u32 fw_version = 0; + u8 param; + struct btmediatek_data *mediatek; + + calltime = ktime_get(); + + err = btusb_mtk_id_get(data, 0x80000008, &dev_id); + if (err < 0) { + bt_dev_err(hdev, "Failed to get device id (%d)", err); + return err; + } + + if (!dev_id || dev_id != 0x7663) { + err = btusb_mtk_id_get(data, 0x70010200, &dev_id); + if (err < 0) { + bt_dev_err(hdev, "Failed to get device id (%d)", err); + return err; + } + err = btusb_mtk_id_get(data, 0x80021004, &fw_version); + if (err < 0) { + bt_dev_err(hdev, "Failed to get fw version (%d)", err); + return err; + } + } + + mediatek = hci_get_priv(hdev); + mediatek->dev_id = dev_id; + mediatek->reset_sync = btusb_mtk_reset; + + err = btmtk_register_coredump(hdev, btusb_driver.name, fw_version); + if (err < 0) + bt_dev_err(hdev, "Failed to register coredump (%d)", err); + + switch (dev_id) { + case 0x7663: + fwname = FIRMWARE_MT7663; + break; + case 0x7668: + fwname = FIRMWARE_MT7668; + break; + case 0x7922: + case 0x7961: + case 0x7925: + if (dev_id == 0x7925) + snprintf(fw_bin_name, sizeof(fw_bin_name), + "mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", + dev_id & 0xffff, dev_id & 0xffff, (fw_version & 0xff) + 1); + else + snprintf(fw_bin_name, sizeof(fw_bin_name), + "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", + dev_id & 0xffff, (fw_version & 0xff) + 1); + + err = btmtk_setup_firmware_79xx(hdev, fw_bin_name, + btusb_mtk_hci_wmt_sync); + if (err < 0) { + bt_dev_err(hdev, "Failed to set up firmware (%d)", err); + return err; + } + + /* It's Device EndPoint Reset Option Register */ + btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); + + /* Enable Bluetooth protocol */ + param = 1; + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + + hci_set_msft_opcode(hdev, 0xFD30); + hci_set_aosp_capable(hdev); + goto done; + default: + bt_dev_err(hdev, "Unsupported hardware variant (%08x)", + dev_id); + return -ENODEV; + } + + /* Query whether the firmware is already download */ + wmt_params.op = BTMTK_WMT_SEMAPHORE; + wmt_params.flag = 1; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = &status; + + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to query firmware status (%d)", err); + return err; + } + + if (status == BTMTK_WMT_PATCH_DONE) { + bt_dev_info(hdev, "firmware already downloaded"); + goto ignore_setup_fw; + } + + /* Setup a firmware which the device definitely requires */ + err = btmtk_setup_firmware(hdev, fwname, + btusb_mtk_hci_wmt_sync); + if (err < 0) + return err; + +ignore_setup_fw: + err = readx_poll_timeout(btusb_mtk_func_query, hdev, status, + status < 0 || status != BTMTK_WMT_ON_PROGRESS, + 2000, 5000000); + /* -ETIMEDOUT happens */ + if (err < 0) + return err; + + /* The other errors happen in btusb_mtk_func_query */ + if (status < 0) + return status; + + if (status == BTMTK_WMT_ON_DONE) { + bt_dev_info(hdev, "function already on"); + goto ignore_func_on; + } + + /* Enable Bluetooth protocol */ + param = 1; + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + +ignore_func_on: + /* Apply the low power environment setup */ + tci_sleep.mode = 0x5; + tci_sleep.duration = cpu_to_le16(0x640); + tci_sleep.host_duration = cpu_to_le16(0x640); + tci_sleep.host_wakeup_pin = 0; + tci_sleep.time_compensation = 0; + + skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); + return err; + } + kfree_skb(skb); + +done: + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long)ktime_to_ns(delta) >> 10; + + bt_dev_info(hdev, "Device setup in %llu usecs", duration); + + return 0; +} + +static int btusb_mtk_shutdown(struct hci_dev *hdev) +{ + struct btmtk_hci_wmt_params wmt_params; + u8 param = 0; + int err; + + /* Disable the device */ + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + + return 0; +} + +static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle); + struct sk_buff *skb_cd; + + switch (handle) { + case 0xfc6f: /* Firmware dump from device */ + /* When the firmware hangs, the device can no longer + * suspend and thus disable auto-suspend. + */ + usb_disable_autosuspend(data->udev); + + /* We need to forward the diagnostic packet to userspace daemon + * for backward compatibility, so we have to clone the packet + * extraly for the in-kernel coredump support. + */ + skb_cd = skb_clone(skb, GFP_ATOMIC); + if (skb_cd) + btmtk_process_coredump(hdev, skb_cd); + + fallthrough; + case 0x05ff: /* Firmware debug logging 1 */ + case 0x05fe: /* Firmware debug logging 2 */ + return hci_recv_diag(hdev, skb); + } + + return hci_recv_frame(hdev, skb); +} + +#ifdef CONFIG_PM +/* Configure an out-of-band gpio as wake-up pin, if specified in device tree */ +static int marvell_config_oob_wake(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct btusb_data *data = hci_get_drvdata(hdev); + struct device *dev = &data->udev->dev; + u16 pin, gap, opcode; + int ret; + u8 cmd[5]; + + /* Move on if no wakeup pin specified */ + if (of_property_read_u16(dev->of_node, "marvell,wakeup-pin", &pin) || + of_property_read_u16(dev->of_node, "marvell,wakeup-gap-ms", &gap)) + return 0; + + /* Vendor specific command to configure a GPIO as wake-up pin */ + opcode = hci_opcode_pack(0x3F, 0x59); + cmd[0] = opcode & 0xFF; + cmd[1] = opcode >> 8; + cmd[2] = 2; /* length of parameters that follow */ + cmd[3] = pin; + cmd[4] = gap; /* time in ms, for which wakeup pin should be asserted */ + + skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); + if (!skb) { + bt_dev_err(hdev, "%s: No memory", __func__); + return -ENOMEM; + } + + skb_put_data(skb, cmd, sizeof(cmd)); + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; + + ret = btusb_send_frame(hdev, skb); + if (ret) { + bt_dev_err(hdev, "%s: configuration failed", __func__); + kfree_skb(skb); + return ret; + } + + return 0; +} +#endif + +static int btusb_set_bdaddr_marvell(struct hci_dev *hdev, + const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + u8 buf[8]; + long ret; + + buf[0] = 0xfe; + buf[1] = sizeof(bdaddr_t); + memcpy(buf + 2, bdaddr, sizeof(bdaddr_t)); + + skb = __hci_cmd_sync(hdev, 0xfc22, sizeof(buf), buf, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + bt_dev_err(hdev, "changing Marvell device address failed (%ld)", + ret); + return ret; + } + kfree_skb(skb); + + return 0; +} + +static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev, + const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + u8 buf[10]; + long ret; + + buf[0] = 0x01; + buf[1] = 0x01; + buf[2] = 0x00; + buf[3] = sizeof(bdaddr_t); + memcpy(buf + 4, bdaddr, sizeof(bdaddr_t)); + + skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + bt_dev_err(hdev, "Change address command failed (%ld)", ret); + return ret; + } + kfree_skb(skb); + + return 0; +} + +static int btusb_set_bdaddr_wcn6855(struct hci_dev *hdev, + const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + u8 buf[6]; + long ret; + + memcpy(buf, bdaddr, sizeof(bdaddr_t)); + + skb = __hci_cmd_sync_ev(hdev, 0xfc14, sizeof(buf), buf, + HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + bt_dev_err(hdev, "Change address command failed (%ld)", ret); + return ret; + } + kfree_skb(skb); + + return 0; +} + +#define QCA_MEMDUMP_ACL_HANDLE 0x2EDD +#define QCA_MEMDUMP_SIZE_MAX 0x100000 +#define QCA_MEMDUMP_VSE_CLASS 0x01 +#define QCA_MEMDUMP_MSG_TYPE 0x08 +#define QCA_MEMDUMP_PKT_SIZE 248 +#define QCA_LAST_SEQUENCE_NUM 0xffff + +struct qca_dump_hdr { + u8 vse_class; + u8 msg_type; + __le16 seqno; + u8 reserved; + union { + u8 data[0]; + struct { + __le32 ram_dump_size; + u8 data0[0]; + } __packed; + }; +} __packed; + + +static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb) +{ + char buf[128]; + struct btusb_data *btdata = hci_get_drvdata(hdev); + + snprintf(buf, sizeof(buf), "Controller Name: 0x%x\n", + btdata->qca_dump.controller_id); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Firmware Version: 0x%x\n", + btdata->qca_dump.fw_version); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Driver: %s\nVendor: qca\n", + btusb_driver.name); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "VID: 0x%x\nPID:0x%x\n", + btdata->qca_dump.id_vendor, btdata->qca_dump.id_product); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Lmp Subversion: 0x%x\n", + hdev->lmp_subver); + skb_put_data(skb, buf, strlen(buf)); +} + +static void btusb_coredump_qca(struct hci_dev *hdev) +{ + static const u8 param[] = { 0x26 }; + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) + bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb)); + kfree_skb(skb); +} + +/* + * ==0: not a dump pkt. + * < 0: fails to handle a dump pkt + * > 0: otherwise. + */ +static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) +{ + int ret = 1; + u8 pkt_type; + u8 *sk_ptr; + unsigned int sk_len; + u16 seqno; + u32 dump_size; + + struct hci_event_hdr *event_hdr; + struct hci_acl_hdr *acl_hdr; + struct qca_dump_hdr *dump_hdr; + struct btusb_data *btdata = hci_get_drvdata(hdev); + struct usb_device *udev = btdata->udev; + + pkt_type = hci_skb_pkt_type(skb); + sk_ptr = skb->data; + sk_len = skb->len; + + if (pkt_type == HCI_ACLDATA_PKT) { + acl_hdr = hci_acl_hdr(skb); + if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) + return 0; + sk_ptr += HCI_ACL_HDR_SIZE; + sk_len -= HCI_ACL_HDR_SIZE; + event_hdr = (struct hci_event_hdr *)sk_ptr; + } else { + event_hdr = hci_event_hdr(skb); + } + + if ((event_hdr->evt != HCI_VENDOR_PKT) + || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) + return 0; + + sk_ptr += HCI_EVENT_HDR_SIZE; + sk_len -= HCI_EVENT_HDR_SIZE; + + dump_hdr = (struct qca_dump_hdr *)sk_ptr; + if ((sk_len < offsetof(struct qca_dump_hdr, data)) + || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) + || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) + return 0; + + /*it is dump pkt now*/ + seqno = le16_to_cpu(dump_hdr->seqno); + if (seqno == 0) { + set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); + dump_size = le32_to_cpu(dump_hdr->ram_dump_size); + if (!dump_size || (dump_size > QCA_MEMDUMP_SIZE_MAX)) { + ret = -EILSEQ; + bt_dev_err(hdev, "Invalid memdump size(%u)", + dump_size); + goto out; + } + + ret = hci_devcd_init(hdev, dump_size); + if (ret < 0) { + bt_dev_err(hdev, "memdump init error(%d)", ret); + goto out; + } + + btdata->qca_dump.ram_dump_size = dump_size; + btdata->qca_dump.ram_dump_seqno = 0; + sk_ptr += offsetof(struct qca_dump_hdr, data0); + sk_len -= offsetof(struct qca_dump_hdr, data0); + + usb_disable_autosuspend(udev); + bt_dev_info(hdev, "%s memdump size(%u)\n", + (pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event", + dump_size); + } else { + sk_ptr += offsetof(struct qca_dump_hdr, data); + sk_len -= offsetof(struct qca_dump_hdr, data); + } + + if (!btdata->qca_dump.ram_dump_size) { + ret = -EINVAL; + bt_dev_err(hdev, "memdump is not active"); + goto out; + } + + if ((seqno > btdata->qca_dump.ram_dump_seqno + 1) && (seqno != QCA_LAST_SEQUENCE_NUM)) { + dump_size = QCA_MEMDUMP_PKT_SIZE * (seqno - btdata->qca_dump.ram_dump_seqno - 1); + hci_devcd_append_pattern(hdev, 0x0, dump_size); + bt_dev_err(hdev, + "expected memdump seqno(%u) is not received(%u)\n", + btdata->qca_dump.ram_dump_seqno, seqno); + btdata->qca_dump.ram_dump_seqno = seqno; + kfree_skb(skb); + return ret; + } + + skb_pull(skb, skb->len - sk_len); + hci_devcd_append(hdev, skb); + btdata->qca_dump.ram_dump_seqno++; + if (seqno == QCA_LAST_SEQUENCE_NUM) { + bt_dev_info(hdev, + "memdump done: pkts(%u), total(%u)\n", + btdata->qca_dump.ram_dump_seqno, btdata->qca_dump.ram_dump_size); + + hci_devcd_complete(hdev); + goto out; + } + return ret; + +out: + if (btdata->qca_dump.ram_dump_size) + usb_enable_autosuspend(udev); + btdata->qca_dump.ram_dump_size = 0; + btdata->qca_dump.ram_dump_seqno = 0; + clear_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); + + if (ret < 0) + kfree_skb(skb); + return ret; +} + +static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (handle_dump_pkt_qca(hdev, skb)) + return 0; + return hci_recv_frame(hdev, skb); +} + +static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (handle_dump_pkt_qca(hdev, skb)) + return 0; + return hci_recv_frame(hdev, skb); +} + + +#define QCA_DFU_PACKET_LEN 4096 + +#define QCA_GET_TARGET_VERSION 0x09 +#define QCA_CHECK_STATUS 0x05 +#define QCA_DFU_DOWNLOAD 0x01 + +#define QCA_SYSCFG_UPDATED 0x40 +#define QCA_PATCH_UPDATED 0x80 +#define QCA_DFU_TIMEOUT 3000 +#define QCA_FLAG_MULTI_NVM 0x80 +#define QCA_BT_RESET_WAIT_MS 100 + +#define WCN6855_2_0_RAM_VERSION_GF 0x400c1200 +#define WCN6855_2_1_RAM_VERSION_GF 0x400c1211 + +struct qca_version { + __le32 rom_version; + __le32 patch_version; + __le32 ram_version; + __u8 chip_id; + __u8 platform_id; + __le16 flag; + __u8 reserved[4]; +} __packed; + +struct qca_rampatch_version { + __le16 rom_version_high; + __le16 rom_version_low; + __le16 patch_version; +} __packed; + +struct qca_device_info { + u32 rom_version; + u8 rampatch_hdr; /* length of header in rampatch */ + u8 nvm_hdr; /* length of header in NVM */ + u8 ver_offset; /* offset of version structure in rampatch */ +}; + +static const struct qca_device_info qca_devices_table[] = { + { 0x00000100, 20, 4, 8 }, /* Rome 1.0 */ + { 0x00000101, 20, 4, 8 }, /* Rome 1.1 */ + { 0x00000200, 28, 4, 16 }, /* Rome 2.0 */ + { 0x00000201, 28, 4, 16 }, /* Rome 2.1 */ + { 0x00000300, 28, 4, 16 }, /* Rome 3.0 */ + { 0x00000302, 28, 4, 16 }, /* Rome 3.2 */ + { 0x00130100, 40, 4, 16 }, /* WCN6855 1.0 */ + { 0x00130200, 40, 4, 16 }, /* WCN6855 2.0 */ + { 0x00130201, 40, 4, 16 }, /* WCN6855 2.1 */ + { 0x00190200, 40, 4, 16 }, /* WCN785x 2.0 */ +}; + +static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request, + void *data, u16 size) +{ + int pipe, err; + u8 *buf; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Found some of USB hosts have IOT issues with ours so that we should + * not wait until HCI layer is ready. + */ + pipe = usb_rcvctrlpipe(udev, 0); + err = usb_control_msg(udev, pipe, request, USB_TYPE_VENDOR | USB_DIR_IN, + 0, 0, buf, size, USB_CTRL_SET_TIMEOUT); + if (err < 0) { + dev_err(&udev->dev, "Failed to access otp area (%d)", err); + goto done; + } + + memcpy(data, buf, size); + +done: + kfree(buf); + + return err; +} + +static int btusb_setup_qca_download_fw(struct hci_dev *hdev, + const struct firmware *firmware, + size_t hdr_size) +{ + struct btusb_data *btdata = hci_get_drvdata(hdev); + struct usb_device *udev = btdata->udev; + size_t count, size, sent = 0; + int pipe, len, err; + u8 *buf; + + buf = kmalloc(QCA_DFU_PACKET_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + count = firmware->size; + + size = min_t(size_t, count, hdr_size); + memcpy(buf, firmware->data, size); + + /* USB patches should go down to controller through USB path + * because binary format fits to go down through USB channel. + * USB control path is for patching headers and USB bulk is for + * patch body. + */ + pipe = usb_sndctrlpipe(udev, 0); + err = usb_control_msg(udev, pipe, QCA_DFU_DOWNLOAD, USB_TYPE_VENDOR, + 0, 0, buf, size, USB_CTRL_SET_TIMEOUT); + if (err < 0) { + bt_dev_err(hdev, "Failed to send headers (%d)", err); + goto done; + } + + sent += size; + count -= size; + + /* ep2 need time to switch from function acl to function dfu, + * so we add 20ms delay here. + */ + msleep(20); + + while (count) { + size = min_t(size_t, count, QCA_DFU_PACKET_LEN); + + memcpy(buf, firmware->data + sent, size); + + pipe = usb_sndbulkpipe(udev, 0x02); + err = usb_bulk_msg(udev, pipe, buf, size, &len, + QCA_DFU_TIMEOUT); + if (err < 0) { + bt_dev_err(hdev, "Failed to send body at %zd of %zd (%d)", + sent, firmware->size, err); + break; + } + + if (size != len) { + bt_dev_err(hdev, "Failed to get bulk buffer"); + err = -EILSEQ; + break; + } + + sent += size; + count -= size; + } + +done: + kfree(buf); + return err; +} + +static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev, + struct qca_version *ver, + const struct qca_device_info *info) +{ + struct qca_rampatch_version *rver; + const struct firmware *fw; + u32 ver_rom, ver_patch, rver_rom; + u16 rver_rom_low, rver_rom_high, rver_patch; + char fwname[64]; + int err; + + ver_rom = le32_to_cpu(ver->rom_version); + ver_patch = le32_to_cpu(ver->patch_version); + + snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin", ver_rom); + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err) { + bt_dev_err(hdev, "failed to request rampatch file: %s (%d)", + fwname, err); + return err; + } + + bt_dev_info(hdev, "using rampatch file: %s", fwname); + + rver = (struct qca_rampatch_version *)(fw->data + info->ver_offset); + rver_rom_low = le16_to_cpu(rver->rom_version_low); + rver_patch = le16_to_cpu(rver->patch_version); + + if (ver_rom & ~0xffffU) { + rver_rom_high = le16_to_cpu(rver->rom_version_high); + rver_rom = rver_rom_high << 16 | rver_rom_low; + } else { + rver_rom = rver_rom_low; + } + + bt_dev_info(hdev, "QCA: patch rome 0x%x build 0x%x, " + "firmware rome 0x%x build 0x%x", + rver_rom, rver_patch, ver_rom, ver_patch); + + if (rver_rom != ver_rom || rver_patch <= ver_patch) { + bt_dev_err(hdev, "rampatch file version did not match with firmware"); + err = -EINVAL; + goto done; + } + + err = btusb_setup_qca_download_fw(hdev, fw, info->rampatch_hdr); + +done: + release_firmware(fw); + + return err; +} + +static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size, + const struct qca_version *ver) +{ + u32 rom_version = le32_to_cpu(ver->rom_version); + u16 flag = le16_to_cpu(ver->flag); + + if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) { + /* The board_id should be split into two bytes + * The 1st byte is chip ID, and the 2nd byte is platform ID + * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID + * we have several platforms, and platform IDs are continuously added + * Platform ID: + * 0x00 is for Mobile + * 0x01 is for X86 + * 0x02 is for Automotive + * 0x03 is for Consumer electronic + */ + u16 board_id = (ver->chip_id << 8) + ver->platform_id; + const char *variant; + + switch (le32_to_cpu(ver->ram_version)) { + case WCN6855_2_0_RAM_VERSION_GF: + case WCN6855_2_1_RAM_VERSION_GF: + variant = "_gf"; + break; + default: + variant = ""; + break; + } + + if (board_id == 0) { + snprintf(fwname, max_size, "qca/nvm_usb_%08x%s.bin", + rom_version, variant); + } else { + snprintf(fwname, max_size, "qca/nvm_usb_%08x%s_%04x.bin", + rom_version, variant, board_id); + } + } else { + snprintf(fwname, max_size, "qca/nvm_usb_%08x.bin", + rom_version); + } + +} + +static int btusb_setup_qca_load_nvm(struct hci_dev *hdev, + struct qca_version *ver, + const struct qca_device_info *info) +{ + const struct firmware *fw; + char fwname[64]; + int err; + + btusb_generate_qca_nvm_name(fwname, sizeof(fwname), ver); + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err) { + bt_dev_err(hdev, "failed to request NVM file: %s (%d)", + fwname, err); + return err; + } + + bt_dev_info(hdev, "using NVM file: %s", fwname); + + err = btusb_setup_qca_download_fw(hdev, fw, info->nvm_hdr); + + release_firmware(fw); + + return err; +} + +/* identify the ROM version and check whether patches are needed */ +static bool btusb_qca_need_patch(struct usb_device *udev) +{ + struct qca_version ver; + + if (btusb_qca_send_vendor_req(udev, QCA_GET_TARGET_VERSION, &ver, + sizeof(ver)) < 0) + return false; + /* only low ROM versions need patches */ + return !(le32_to_cpu(ver.rom_version) & ~0xffffU); +} + +static int btusb_setup_qca(struct hci_dev *hdev) +{ + struct btusb_data *btdata = hci_get_drvdata(hdev); + struct usb_device *udev = btdata->udev; + const struct qca_device_info *info = NULL; + struct qca_version ver; + u32 ver_rom; + u8 status; + int i, err; + + err = btusb_qca_send_vendor_req(udev, QCA_GET_TARGET_VERSION, &ver, + sizeof(ver)); + if (err < 0) + return err; + + ver_rom = le32_to_cpu(ver.rom_version); + + for (i = 0; i < ARRAY_SIZE(qca_devices_table); i++) { + if (ver_rom == qca_devices_table[i].rom_version) + info = &qca_devices_table[i]; + } + if (!info) { + /* If the rom_version is not matched in the qca_devices_table + * and the high ROM version is not zero, we assume this chip no + * need to load the rampatch and nvm. + */ + if (ver_rom & ~0xffffU) + return 0; + + bt_dev_err(hdev, "don't support firmware rome 0x%x", ver_rom); + return -ENODEV; + } + + err = btusb_qca_send_vendor_req(udev, QCA_CHECK_STATUS, &status, + sizeof(status)); + if (err < 0) + return err; + + if (!(status & QCA_PATCH_UPDATED)) { + err = btusb_setup_qca_load_rampatch(hdev, &ver, info); + if (err < 0) + return err; + } + + err = btusb_qca_send_vendor_req(udev, QCA_GET_TARGET_VERSION, &ver, + sizeof(ver)); + if (err < 0) + return err; + + btdata->qca_dump.fw_version = le32_to_cpu(ver.patch_version); + btdata->qca_dump.controller_id = le32_to_cpu(ver.rom_version); + + if (!(status & QCA_SYSCFG_UPDATED)) { + err = btusb_setup_qca_load_nvm(hdev, &ver, info); + if (err < 0) + return err; + + /* WCN6855 2.1 and later will reset to apply firmware downloaded here, so + * wait ~100ms for reset Done then go ahead, otherwise, it maybe + * cause potential enable failure. + */ + if (info->rom_version >= 0x00130201) + msleep(QCA_BT_RESET_WAIT_MS); + } + + /* Mark HCI_OP_ENHANCED_SETUP_SYNC_CONN as broken as it doesn't seem to + * work with the likes of HSP/HFP mSBC. + */ + set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks); + + return 0; +} + +static inline int __set_diag_interface(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct usb_interface *intf = data->diag; + int i; + + if (!data->diag) + return -ENODEV; + + data->diag_tx_ep = NULL; + data->diag_rx_ep = NULL; + + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { + struct usb_endpoint_descriptor *ep_desc; + + ep_desc = &intf->cur_altsetting->endpoint[i].desc; + + if (!data->diag_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) { + data->diag_tx_ep = ep_desc; + continue; + } + + if (!data->diag_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) { + data->diag_rx_ep = ep_desc; + continue; + } + } + + if (!data->diag_tx_ep || !data->diag_rx_ep) { + bt_dev_err(hdev, "invalid diagnostic descriptors"); + return -ENODEV; + } + + return 0; +} + +static struct urb *alloc_diag_urb(struct hci_dev *hdev, bool enable) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct sk_buff *skb; + struct urb *urb; + unsigned int pipe; + + if (!data->diag_tx_ep) + return ERR_PTR(-ENODEV); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return ERR_PTR(-ENOMEM); + + skb = bt_skb_alloc(2, GFP_KERNEL); + if (!skb) { + usb_free_urb(urb); + return ERR_PTR(-ENOMEM); + } + + skb_put_u8(skb, 0xf0); + skb_put_u8(skb, enable); + + pipe = usb_sndbulkpipe(data->udev, data->diag_tx_ep->bEndpointAddress); + + usb_fill_bulk_urb(urb, data->udev, pipe, + skb->data, skb->len, btusb_tx_complete, skb); + + skb->dev = (void *)hdev; + + return urb; +} + +static int btusb_bcm_set_diag(struct hci_dev *hdev, bool enable) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct urb *urb; + + if (!data->diag) + return -ENODEV; + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -ENETDOWN; + + urb = alloc_diag_urb(hdev, enable); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + return submit_or_queue_tx_urb(hdev, urb); +} + +#ifdef CONFIG_PM +static irqreturn_t btusb_oob_wake_handler(int irq, void *priv) +{ + struct btusb_data *data = priv; + + pm_wakeup_event(&data->udev->dev, 0); + pm_system_wakeup(); + + /* Disable only if not already disabled (keep it balanced) */ + if (test_and_clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags)) { + disable_irq_nosync(irq); + disable_irq_wake(irq); + } + return IRQ_HANDLED; +} + +static const struct of_device_id btusb_match_table[] = { + { .compatible = "usb1286,204e" }, + { .compatible = "usbcf3,e300" }, /* QCA6174A */ + { .compatible = "usb4ca,301a" }, /* QCA6174A (Lite-On) */ + { } +}; +MODULE_DEVICE_TABLE(of, btusb_match_table); + +/* Use an oob wakeup pin? */ +static int btusb_config_oob_wake(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct device *dev = &data->udev->dev; + int irq, ret; + + clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags); + + if (!of_match_device(btusb_match_table, dev)) + return 0; + + /* Move on if no IRQ specified */ + irq = of_irq_get_byname(dev->of_node, "wakeup"); + if (irq <= 0) { + bt_dev_dbg(hdev, "%s: no OOB Wakeup IRQ in DT", __func__); + return 0; + } + + irq_set_status_flags(irq, IRQ_NOAUTOEN); + ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler, + 0, "OOB Wake-on-BT", data); + if (ret) { + bt_dev_err(hdev, "%s: IRQ request failed", __func__); + return ret; + } + + ret = device_init_wakeup(dev, true); + if (ret) { + bt_dev_err(hdev, "%s: failed to init_wakeup", __func__); + return ret; + } + + data->oob_wake_irq = irq; + bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq); + return 0; +} +#endif + +static void btusb_check_needs_reset_resume(struct usb_interface *intf) +{ + if (dmi_check_system(btusb_needs_reset_resume_table)) + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; +} + +static bool btusb_wakeup(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + + return device_may_wakeup(&data->udev->dev); +} + +static int btusb_shutdown_qca(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "HCI reset during shutdown failed"); + return PTR_ERR(skb); + } + kfree_skb(skb); + + return 0; +} + +static ssize_t force_poll_sync_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct btusb_data *data = file->private_data; + char buf[3]; + + buf[0] = data->poll_sync ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_poll_sync_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct btusb_data *data = file->private_data; + bool enable; + int err; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + /* Only allow changes while the adapter is down */ + if (test_bit(HCI_UP, &data->hdev->flags)) + return -EPERM; + + if (data->poll_sync == enable) + return -EALREADY; + + data->poll_sync = enable; + + return count; +} + +static const struct file_operations force_poll_sync_fops = { + .open = simple_open, + .read = force_poll_sync_read, + .write = force_poll_sync_write, + .llseek = default_llseek, +}; + +static int btusb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_endpoint_descriptor *ep_desc; + struct gpio_desc *reset_gpio; + struct btusb_data *data; + struct hci_dev *hdev; + unsigned ifnum_base; + int i, err, priv_size; + + BT_DBG("intf %p id %p", intf, id); + + if ((id->driver_info & BTUSB_IFNUM_2) && + (intf->cur_altsetting->desc.bInterfaceNumber != 0) && + (intf->cur_altsetting->desc.bInterfaceNumber != 2)) + return -ENODEV; + + ifnum_base = intf->cur_altsetting->desc.bInterfaceNumber; + + if (!id->driver_info) { + const struct usb_device_id *match; + + match = usb_match_id(intf, quirks_table); + if (match) + id = match; + } + + if (id->driver_info == BTUSB_IGNORE) + return -ENODEV; + + if (id->driver_info & BTUSB_ATH3012) { + struct usb_device *udev = interface_to_usbdev(intf); + + /* Old firmware would otherwise let ath3k driver load + * patch and sysconfig files + */ + if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001 && + !btusb_qca_need_patch(udev)) + return -ENODEV; + } + + data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { + ep_desc = &intf->cur_altsetting->endpoint[i].desc; + + if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) { + data->intr_ep = ep_desc; + continue; + } + + if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) { + data->bulk_tx_ep = ep_desc; + continue; + } + + if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) { + data->bulk_rx_ep = ep_desc; + continue; + } + } + + if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) + return -ENODEV; + + if (id->driver_info & BTUSB_AMP) { + data->cmdreq_type = USB_TYPE_CLASS | 0x01; + data->cmdreq = 0x2b; + } else { + data->cmdreq_type = USB_TYPE_CLASS; + data->cmdreq = 0x00; + } + + data->udev = interface_to_usbdev(intf); + data->intf = intf; + + INIT_WORK(&data->work, btusb_work); + INIT_WORK(&data->waker, btusb_waker); + INIT_DELAYED_WORK(&data->rx_work, btusb_rx_work); + + skb_queue_head_init(&data->acl_q); + + init_usb_anchor(&data->deferred); + init_usb_anchor(&data->tx_anchor); + spin_lock_init(&data->txlock); + + init_usb_anchor(&data->intr_anchor); + init_usb_anchor(&data->bulk_anchor); + init_usb_anchor(&data->isoc_anchor); + init_usb_anchor(&data->diag_anchor); + init_usb_anchor(&data->ctrl_anchor); + spin_lock_init(&data->rxlock); + + priv_size = 0; + + data->recv_event = hci_recv_frame; + data->recv_bulk = btusb_recv_bulk; + + if (id->driver_info & BTUSB_INTEL_COMBINED) { + /* Allocate extra space for Intel device */ + priv_size += sizeof(struct btintel_data); + + /* Override the rx handlers */ + data->recv_event = btintel_recv_event; + data->recv_bulk = btusb_recv_bulk_intel; + } else if (id->driver_info & BTUSB_REALTEK) { + /* Allocate extra space for Realtek device */ + priv_size += sizeof(struct btrealtek_data); + + data->recv_event = btusb_recv_event_realtek; + } else if (id->driver_info & BTUSB_MEDIATEK) { + /* Allocate extra space for Mediatek device */ + priv_size += sizeof(struct btmediatek_data); + } + + data->recv_acl = hci_recv_frame; + + hdev = hci_alloc_dev_priv(priv_size); + if (!hdev) + return -ENOMEM; + + hdev->bus = HCI_USB; + hci_set_drvdata(hdev, data); + + if (id->driver_info & BTUSB_AMP) + hdev->dev_type = HCI_AMP; + else + hdev->dev_type = HCI_PRIMARY; + + data->hdev = hdev; + + SET_HCIDEV_DEV(hdev, &intf->dev); + + reset_gpio = gpiod_get_optional(&data->udev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(reset_gpio)) { + err = PTR_ERR(reset_gpio); + goto out_free_dev; + } else if (reset_gpio) { + data->reset_gpio = reset_gpio; + } + + hdev->open = btusb_open; + hdev->close = btusb_close; + hdev->flush = btusb_flush; + hdev->send = btusb_send_frame; + hdev->notify = btusb_notify; + hdev->wakeup = btusb_wakeup; + +#ifdef CONFIG_PM + err = btusb_config_oob_wake(hdev); + if (err) + goto out_free_dev; + + /* Marvell devices may need a specific chip configuration */ + if (id->driver_info & BTUSB_MARVELL && data->oob_wake_irq) { + err = marvell_config_oob_wake(hdev); + if (err) + goto out_free_dev; + } +#endif + if (id->driver_info & BTUSB_CW6622) + set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); + + if (id->driver_info & BTUSB_BCM2045) + set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); + + if (id->driver_info & BTUSB_BCM92035) + hdev->setup = btusb_setup_bcm92035; + + if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && + (id->driver_info & BTUSB_BCM_PATCHRAM)) { + hdev->manufacturer = 15; + hdev->setup = btbcm_setup_patchram; + hdev->set_diag = btusb_bcm_set_diag; + hdev->set_bdaddr = btbcm_set_bdaddr; + + /* Broadcom LM_DIAG Interface numbers are hardcoded */ + data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2); + } + + if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && + (id->driver_info & BTUSB_BCM_APPLE)) { + hdev->manufacturer = 15; + hdev->setup = btbcm_setup_apple; + hdev->set_diag = btusb_bcm_set_diag; + + /* Broadcom LM_DIAG Interface numbers are hardcoded */ + data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2); + } + + /* Combined Intel Device setup to support multiple setup routine */ + if (id->driver_info & BTUSB_INTEL_COMBINED) { + err = btintel_configure_setup(hdev, btusb_driver.name); + if (err) + goto out_free_dev; + + /* Transport specific configuration */ + hdev->send = btusb_send_frame_intel; + hdev->cmd_timeout = btusb_intel_cmd_timeout; + + if (id->driver_info & BTUSB_INTEL_NO_WBS_SUPPORT) + btintel_set_flag(hdev, INTEL_ROM_LEGACY_NO_WBS_SUPPORT); + + if (id->driver_info & BTUSB_INTEL_BROKEN_INITIAL_NCMD) + btintel_set_flag(hdev, INTEL_BROKEN_INITIAL_NCMD); + + if (id->driver_info & BTUSB_INTEL_BROKEN_SHUTDOWN_LED) + btintel_set_flag(hdev, INTEL_BROKEN_SHUTDOWN_LED); + } + + if (id->driver_info & BTUSB_MARVELL) + hdev->set_bdaddr = btusb_set_bdaddr_marvell; + + if (IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK) && + (id->driver_info & BTUSB_MEDIATEK)) { + hdev->setup = btusb_mtk_setup; + hdev->shutdown = btusb_mtk_shutdown; + hdev->manufacturer = 70; + hdev->cmd_timeout = btmtk_reset_sync; + hdev->set_bdaddr = btmtk_set_bdaddr; + set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks); + set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + data->recv_acl = btusb_recv_acl_mtk; + } + + if (id->driver_info & BTUSB_SWAVE) { + set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks); + } + + if (id->driver_info & BTUSB_INTEL_BOOT) { + hdev->manufacturer = 2; + set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + } + + if (id->driver_info & BTUSB_ATH3012) { + data->setup_on_usb = btusb_setup_qca; + hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + } + + if (id->driver_info & BTUSB_QCA_ROME) { + data->setup_on_usb = btusb_setup_qca; + hdev->shutdown = btusb_shutdown_qca; + hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + hdev->cmd_timeout = btusb_qca_cmd_timeout; + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + btusb_check_needs_reset_resume(intf); + } + + if (id->driver_info & BTUSB_QCA_WCN6855) { + data->qca_dump.id_vendor = id->idVendor; + data->qca_dump.id_product = id->idProduct; + data->recv_event = btusb_recv_evt_qca; + data->recv_acl = btusb_recv_acl_qca; + hci_devcd_register(hdev, btusb_coredump_qca, btusb_dump_hdr_qca, NULL); + data->setup_on_usb = btusb_setup_qca; + hdev->shutdown = btusb_shutdown_qca; + hdev->set_bdaddr = btusb_set_bdaddr_wcn6855; + hdev->cmd_timeout = btusb_qca_cmd_timeout; + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + hci_set_msft_opcode(hdev, 0xFD70); + } + + if (id->driver_info & BTUSB_AMP) { + /* AMP controllers do not support SCO packets */ + data->isoc = NULL; + } else { + /* Interface orders are hardcoded in the specification */ + data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1); + data->isoc_ifnum = ifnum_base + 1; + } + + if (IS_ENABLED(CONFIG_BT_HCIBTUSB_RTL) && + (id->driver_info & BTUSB_REALTEK)) { + btrtl_set_driver_name(hdev, btusb_driver.name); + hdev->setup = btusb_setup_realtek; + hdev->shutdown = btrtl_shutdown_realtek; + hdev->cmd_timeout = btusb_rtl_cmd_timeout; + hdev->hw_error = btusb_rtl_hw_error; + + /* Realtek devices need to set remote wakeup on auto-suspend */ + set_bit(BTUSB_WAKEUP_AUTOSUSPEND, &data->flags); + set_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags); + } + + if (id->driver_info & BTUSB_ACTIONS_SEMI) { + /* Support is advertised, but not implemented */ + set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks); + } + + if (!reset) + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + + if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { + if (!disable_scofix) + set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); + } + + if (id->driver_info & BTUSB_BROKEN_ISOC) + data->isoc = NULL; + + if (id->driver_info & BTUSB_WIDEBAND_SPEECH) + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + + if (id->driver_info & BTUSB_VALID_LE_STATES) + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + + if (id->driver_info & BTUSB_DIGIANSWER) { + data->cmdreq_type = USB_TYPE_VENDOR; + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + } + + if (id->driver_info & BTUSB_CSR) { + struct usb_device *udev = data->udev; + u16 bcdDevice = le16_to_cpu(udev->descriptor.bcdDevice); + + /* Old firmware would otherwise execute USB reset */ + if (bcdDevice < 0x117) + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + + /* This must be set first in case we disable it for fakes */ + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + + /* Fake CSR devices with broken commands */ + if (le16_to_cpu(udev->descriptor.idVendor) == 0x0a12 && + le16_to_cpu(udev->descriptor.idProduct) == 0x0001) + hdev->setup = btusb_setup_csr; + } + + if (id->driver_info & BTUSB_SNIFFER) { + struct usb_device *udev = data->udev; + + /* New sniffer firmware has crippled HCI interface */ + if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) + set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + } + + if (id->driver_info & BTUSB_INTEL_BOOT) { + /* A bug in the bootloader causes that interrupt interface is + * only enabled after receiving SetInterface(0, AltSetting=0). + */ + err = usb_set_interface(data->udev, 0, 0); + if (err < 0) { + BT_ERR("failed to set interface 0, alt 0 %d", err); + goto out_free_dev; + } + } + + if (data->isoc) { + err = usb_driver_claim_interface(&btusb_driver, + data->isoc, data); + if (err < 0) + goto out_free_dev; + } + + if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && data->diag) { + if (!usb_driver_claim_interface(&btusb_driver, + data->diag, data)) + __set_diag_interface(hdev); + else + data->diag = NULL; + } + + if (enable_autosuspend) + usb_enable_autosuspend(data->udev); + + data->poll_sync = enable_poll_sync; + + err = hci_register_dev(hdev); + if (err < 0) + goto out_free_dev; + + usb_set_intfdata(intf, data); + + debugfs_create_file("force_poll_sync", 0644, hdev->debugfs, data, + &force_poll_sync_fops); + + return 0; + +out_free_dev: + if (data->reset_gpio) + gpiod_put(data->reset_gpio); + hci_free_dev(hdev); + return err; +} + +static void btusb_disconnect(struct usb_interface *intf) +{ + struct btusb_data *data = usb_get_intfdata(intf); + struct hci_dev *hdev; + + BT_DBG("intf %p", intf); + + if (!data) + return; + + hdev = data->hdev; + usb_set_intfdata(data->intf, NULL); + + if (data->isoc) + usb_set_intfdata(data->isoc, NULL); + + if (data->diag) + usb_set_intfdata(data->diag, NULL); + + hci_unregister_dev(hdev); + + if (intf == data->intf) { + if (data->isoc) + usb_driver_release_interface(&btusb_driver, data->isoc); + if (data->diag) + usb_driver_release_interface(&btusb_driver, data->diag); + } else if (intf == data->isoc) { + if (data->diag) + usb_driver_release_interface(&btusb_driver, data->diag); + usb_driver_release_interface(&btusb_driver, data->intf); + } else if (intf == data->diag) { + usb_driver_release_interface(&btusb_driver, data->intf); + if (data->isoc) + usb_driver_release_interface(&btusb_driver, data->isoc); + } + + if (data->oob_wake_irq) + device_init_wakeup(&data->udev->dev, false); + + if (data->reset_gpio) + gpiod_put(data->reset_gpio); + + hci_free_dev(hdev); +} + +#ifdef CONFIG_PM +static int btusb_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct btusb_data *data = usb_get_intfdata(intf); + + BT_DBG("intf %p", intf); + + if (data->suspend_count++) + return 0; + + spin_lock_irq(&data->txlock); + if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) { + set_bit(BTUSB_SUSPENDING, &data->flags); + spin_unlock_irq(&data->txlock); + } else { + spin_unlock_irq(&data->txlock); + data->suspend_count--; + return -EBUSY; + } + + cancel_work_sync(&data->work); + + btusb_stop_traffic(data); + usb_kill_anchored_urbs(&data->tx_anchor); + + if (data->oob_wake_irq && device_may_wakeup(&data->udev->dev)) { + set_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags); + enable_irq_wake(data->oob_wake_irq); + enable_irq(data->oob_wake_irq); + } + + /* For global suspend, Realtek devices lose the loaded fw + * in them. But for autosuspend, firmware should remain. + * Actually, it depends on whether the usb host sends + * set feature (enable wakeup) or not. + */ + if (test_bit(BTUSB_WAKEUP_AUTOSUSPEND, &data->flags)) { + if (PMSG_IS_AUTO(message) && + device_can_wakeup(&data->udev->dev)) + data->udev->do_remote_wakeup = 1; + else if (!PMSG_IS_AUTO(message) && + !device_may_wakeup(&data->udev->dev)) { + data->udev->do_remote_wakeup = 0; + data->udev->reset_resume = 1; + } + } + + return 0; +} + +static void play_deferred(struct btusb_data *data) +{ + struct urb *urb; + int err; + + while ((urb = usb_get_from_anchor(&data->deferred))) { + usb_anchor_urb(urb, &data->tx_anchor); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + BT_ERR("%s urb %p submission failed (%d)", + data->hdev->name, urb, -err); + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + usb_free_urb(urb); + break; + } + + data->tx_in_flight++; + usb_free_urb(urb); + } + + /* Cleanup the rest deferred urbs. */ + while ((urb = usb_get_from_anchor(&data->deferred))) { + kfree(urb->setup_packet); + usb_free_urb(urb); + } +} + +static int btusb_resume(struct usb_interface *intf) +{ + struct btusb_data *data = usb_get_intfdata(intf); + struct hci_dev *hdev = data->hdev; + int err = 0; + + BT_DBG("intf %p", intf); + + if (--data->suspend_count) + return 0; + + /* Disable only if not already disabled (keep it balanced) */ + if (test_and_clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags)) { + disable_irq(data->oob_wake_irq); + disable_irq_wake(data->oob_wake_irq); + } + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + goto done; + + if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) { + err = btusb_submit_intr_urb(hdev, GFP_NOIO); + if (err < 0) { + clear_bit(BTUSB_INTR_RUNNING, &data->flags); + goto failed; + } + } + + if (test_bit(BTUSB_BULK_RUNNING, &data->flags)) { + err = btusb_submit_bulk_urb(hdev, GFP_NOIO); + if (err < 0) { + clear_bit(BTUSB_BULK_RUNNING, &data->flags); + goto failed; + } + + btusb_submit_bulk_urb(hdev, GFP_NOIO); + } + + if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) { + if (btusb_submit_isoc_urb(hdev, GFP_NOIO) < 0) + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + else + btusb_submit_isoc_urb(hdev, GFP_NOIO); + } + + spin_lock_irq(&data->txlock); + play_deferred(data); + clear_bit(BTUSB_SUSPENDING, &data->flags); + spin_unlock_irq(&data->txlock); + schedule_work(&data->work); + + return 0; + +failed: + usb_scuttle_anchored_urbs(&data->deferred); +done: + spin_lock_irq(&data->txlock); + clear_bit(BTUSB_SUSPENDING, &data->flags); + spin_unlock_irq(&data->txlock); + + return err; +} +#endif + +#ifdef CONFIG_DEV_COREDUMP +static void btusb_coredump(struct device *dev) +{ + struct btusb_data *data = dev_get_drvdata(dev); + struct hci_dev *hdev = data->hdev; + + if (hdev->dump.coredump) + hdev->dump.coredump(hdev); +} +#endif + +static struct usb_driver btusb_driver = { + .name = "btusb", + .probe = btusb_probe, + .disconnect = btusb_disconnect, +#ifdef CONFIG_PM + .suspend = btusb_suspend, + .resume = btusb_resume, +#endif + .id_table = btusb_table, + .supports_autosuspend = 1, + .disable_hub_initiated_lpm = 1, + +#ifdef CONFIG_DEV_COREDUMP + .drvwrap = { + .driver = { + .coredump = btusb_coredump, + }, + }, +#endif +}; + +module_usb_driver(btusb_driver); + +module_param(disable_scofix, bool, 0644); +MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size"); + +module_param(force_scofix, bool, 0644); +MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size"); + +module_param(enable_autosuspend, bool, 0644); +MODULE_PARM_DESC(enable_autosuspend, "Enable USB autosuspend by default"); + +module_param(reset, bool, 0644); +MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Generic Bluetooth USB driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c new file mode 100644 index 000000000..2adfe4fad --- /dev/null +++ b/drivers/bluetooth/dtl1_cs.c @@ -0,0 +1,614 @@ +/* + * + * A driver for Nokia Connectivity Card DTL-1 devices + * + * Copyright (C) 2001-2002 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * The initial developer of the original code is David A. Hinds + * . Portions created by David A. Hinds + * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + + + +/* ======================== Module parameters ======================== */ + + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth driver for Nokia Connectivity Card DTL-1"); +MODULE_LICENSE("GPL"); + + + +/* ======================== Local structures ======================== */ + + +struct dtl1_info { + struct pcmcia_device *p_dev; + + struct hci_dev *hdev; + + spinlock_t lock; /* For serializing operations */ + + unsigned long flowmask; /* HCI flow mask */ + int ri_latch; + + struct sk_buff_head txq; + unsigned long tx_state; + + unsigned long rx_state; + unsigned long rx_count; + struct sk_buff *rx_skb; +}; + + +static int dtl1_config(struct pcmcia_device *link); + + +/* Transmit states */ +#define XMIT_SENDING 1 +#define XMIT_WAKEUP 2 +#define XMIT_WAITING 8 + +/* Receiver States */ +#define RECV_WAIT_NSH 0 +#define RECV_WAIT_DATA 1 + + +struct nsh { + u8 type; + u8 zero; + u16 len; +} __packed; /* Nokia Specific Header */ + +#define NSHL 4 /* Nokia Specific Header Length */ + + + +/* ======================== Interrupt handling ======================== */ + + +static int dtl1_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) +{ + int actual = 0; + + /* Tx FIFO should be empty */ + if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) + return 0; + + /* Fill FIFO with current frame */ + while ((fifo_size-- > 0) && (actual < len)) { + /* Transmit next byte */ + outb(buf[actual], iobase + UART_TX); + actual++; + } + + return actual; +} + + +static void dtl1_write_wakeup(struct dtl1_info *info) +{ + if (!info) { + BT_ERR("Unknown device"); + return; + } + + if (test_bit(XMIT_WAITING, &(info->tx_state))) { + set_bit(XMIT_WAKEUP, &(info->tx_state)); + return; + } + + if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) { + set_bit(XMIT_WAKEUP, &(info->tx_state)); + return; + } + + do { + unsigned int iobase = info->p_dev->resource[0]->start; + register struct sk_buff *skb; + int len; + + clear_bit(XMIT_WAKEUP, &(info->tx_state)); + + if (!pcmcia_dev_present(info->p_dev)) + return; + + skb = skb_dequeue(&(info->txq)); + if (!skb) + break; + + /* Send frame */ + len = dtl1_write(iobase, 32, skb->data, skb->len); + + if (len == skb->len) { + set_bit(XMIT_WAITING, &(info->tx_state)); + kfree_skb(skb); + } else { + skb_pull(skb, len); + skb_queue_head(&(info->txq), skb); + } + + info->hdev->stat.byte_tx += len; + + } while (test_bit(XMIT_WAKEUP, &(info->tx_state))); + + clear_bit(XMIT_SENDING, &(info->tx_state)); +} + + +static void dtl1_control(struct dtl1_info *info, struct sk_buff *skb) +{ + u8 flowmask = *(u8 *)skb->data; + int i; + + printk(KERN_INFO "Bluetooth: Nokia control data ="); + for (i = 0; i < skb->len; i++) + printk(" %02x", skb->data[i]); + + printk("\n"); + + /* transition to active state */ + if (((info->flowmask & 0x07) == 0) && ((flowmask & 0x07) != 0)) { + clear_bit(XMIT_WAITING, &(info->tx_state)); + dtl1_write_wakeup(info); + } + + info->flowmask = flowmask; + + kfree_skb(skb); +} + + +static void dtl1_receive(struct dtl1_info *info) +{ + unsigned int iobase; + struct nsh *nsh; + int boguscount = 0; + + if (!info) { + BT_ERR("Unknown device"); + return; + } + + iobase = info->p_dev->resource[0]->start; + + do { + info->hdev->stat.byte_rx++; + + /* Allocate packet */ + if (info->rx_skb == NULL) { + info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); + if (!info->rx_skb) { + BT_ERR("Can't allocate mem for new packet"); + info->rx_state = RECV_WAIT_NSH; + info->rx_count = NSHL; + return; + } + } + + skb_put_u8(info->rx_skb, inb(iobase + UART_RX)); + nsh = (struct nsh *)info->rx_skb->data; + + info->rx_count--; + + if (info->rx_count == 0) { + + switch (info->rx_state) { + case RECV_WAIT_NSH: + info->rx_state = RECV_WAIT_DATA; + info->rx_count = nsh->len + (nsh->len & 0x0001); + break; + case RECV_WAIT_DATA: + hci_skb_pkt_type(info->rx_skb) = nsh->type; + + /* remove PAD byte if it exists */ + if (nsh->len & 0x0001) { + info->rx_skb->tail--; + info->rx_skb->len--; + } + + /* remove NSH */ + skb_pull(info->rx_skb, NSHL); + + switch (hci_skb_pkt_type(info->rx_skb)) { + case 0x80: + /* control data for the Nokia Card */ + dtl1_control(info, info->rx_skb); + break; + case 0x82: + case 0x83: + case 0x84: + /* send frame to the HCI layer */ + hci_skb_pkt_type(info->rx_skb) &= 0x0f; + hci_recv_frame(info->hdev, info->rx_skb); + break; + default: + /* unknown packet */ + BT_ERR("Unknown HCI packet with type 0x%02x received", + hci_skb_pkt_type(info->rx_skb)); + kfree_skb(info->rx_skb); + break; + } + + info->rx_state = RECV_WAIT_NSH; + info->rx_count = NSHL; + info->rx_skb = NULL; + break; + } + + } + + /* Make sure we don't stay here too long */ + if (boguscount++ > 32) + break; + + } while (inb(iobase + UART_LSR) & UART_LSR_DR); +} + + +static irqreturn_t dtl1_interrupt(int irq, void *dev_inst) +{ + struct dtl1_info *info = dev_inst; + unsigned int iobase; + unsigned char msr; + int boguscount = 0; + int iir, lsr; + irqreturn_t r = IRQ_NONE; + + if (!info || !info->hdev) + /* our irq handler is shared */ + return IRQ_NONE; + + iobase = info->p_dev->resource[0]->start; + + spin_lock(&(info->lock)); + + iir = inb(iobase + UART_IIR) & UART_IIR_ID; + while (iir) { + + r = IRQ_HANDLED; + /* Clear interrupt */ + lsr = inb(iobase + UART_LSR); + + switch (iir) { + case UART_IIR_RLSI: + BT_ERR("RLSI"); + break; + case UART_IIR_RDI: + /* Receive interrupt */ + dtl1_receive(info); + break; + case UART_IIR_THRI: + if (lsr & UART_LSR_THRE) { + /* Transmitter ready for data */ + dtl1_write_wakeup(info); + } + break; + default: + BT_ERR("Unhandled IIR=%#x", iir); + break; + } + + /* Make sure we don't stay here too long */ + if (boguscount++ > 100) + break; + + iir = inb(iobase + UART_IIR) & UART_IIR_ID; + + } + + msr = inb(iobase + UART_MSR); + + if (info->ri_latch ^ (msr & UART_MSR_RI)) { + info->ri_latch = msr & UART_MSR_RI; + clear_bit(XMIT_WAITING, &(info->tx_state)); + dtl1_write_wakeup(info); + r = IRQ_HANDLED; + } + + spin_unlock(&(info->lock)); + + return r; +} + + + +/* ======================== HCI interface ======================== */ + + +static int dtl1_hci_open(struct hci_dev *hdev) +{ + return 0; +} + + +static int dtl1_hci_flush(struct hci_dev *hdev) +{ + struct dtl1_info *info = hci_get_drvdata(hdev); + + /* Drop TX queue */ + skb_queue_purge(&(info->txq)); + + return 0; +} + + +static int dtl1_hci_close(struct hci_dev *hdev) +{ + dtl1_hci_flush(hdev); + + return 0; +} + + +static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct dtl1_info *info = hci_get_drvdata(hdev); + struct sk_buff *s; + struct nsh nsh; + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + nsh.type = 0x81; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + nsh.type = 0x82; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + nsh.type = 0x83; + break; + default: + return -EILSEQ; + } + + nsh.zero = 0; + nsh.len = skb->len; + + s = bt_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC); + if (!s) + return -ENOMEM; + + skb_reserve(s, NSHL); + skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len); + if (skb->len & 0x0001) + skb_put_u8(s, 0); /* PAD */ + + /* Prepend skb with Nokia frame header and queue */ + memcpy(skb_push(s, NSHL), &nsh, NSHL); + skb_queue_tail(&(info->txq), s); + + dtl1_write_wakeup(info); + + kfree_skb(skb); + + return 0; +} + + + +/* ======================== Card services HCI interaction ======================== */ + + +static int dtl1_open(struct dtl1_info *info) +{ + unsigned long flags; + unsigned int iobase = info->p_dev->resource[0]->start; + struct hci_dev *hdev; + + spin_lock_init(&(info->lock)); + + skb_queue_head_init(&(info->txq)); + + info->rx_state = RECV_WAIT_NSH; + info->rx_count = NSHL; + info->rx_skb = NULL; + + set_bit(XMIT_WAITING, &(info->tx_state)); + + /* Initialize HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + return -ENOMEM; + } + + info->hdev = hdev; + + hdev->bus = HCI_PCCARD; + hci_set_drvdata(hdev, info); + SET_HCIDEV_DEV(hdev, &info->p_dev->dev); + + hdev->open = dtl1_hci_open; + hdev->close = dtl1_hci_close; + hdev->flush = dtl1_hci_flush; + hdev->send = dtl1_hci_send_frame; + + spin_lock_irqsave(&(info->lock), flags); + + /* Reset UART */ + outb(0, iobase + UART_MCR); + + /* Turn off interrupts */ + outb(0, iobase + UART_IER); + + /* Initialize UART */ + outb(UART_LCR_WLEN8, iobase + UART_LCR); /* Reset DLAB */ + outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR); + + info->ri_latch = inb(info->p_dev->resource[0]->start + UART_MSR) + & UART_MSR_RI; + + /* Turn on interrupts */ + outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER); + + spin_unlock_irqrestore(&(info->lock), flags); + + /* Timeout before it is safe to send the first HCI packet */ + msleep(2000); + + /* Register HCI device */ + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + info->hdev = NULL; + hci_free_dev(hdev); + return -ENODEV; + } + + return 0; +} + + +static int dtl1_close(struct dtl1_info *info) +{ + unsigned long flags; + unsigned int iobase = info->p_dev->resource[0]->start; + struct hci_dev *hdev = info->hdev; + + if (!hdev) + return -ENODEV; + + dtl1_hci_close(hdev); + + spin_lock_irqsave(&(info->lock), flags); + + /* Reset UART */ + outb(0, iobase + UART_MCR); + + /* Turn off interrupts */ + outb(0, iobase + UART_IER); + + spin_unlock_irqrestore(&(info->lock), flags); + + hci_unregister_dev(hdev); + hci_free_dev(hdev); + + return 0; +} + +static int dtl1_probe(struct pcmcia_device *link) +{ + struct dtl1_info *info; + + /* Create new info device */ + info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->p_dev = link; + link->priv = info; + + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + + return dtl1_config(link); +} + + +static void dtl1_detach(struct pcmcia_device *link) +{ + struct dtl1_info *info = link->priv; + + dtl1_close(info); + pcmcia_disable_device(link); +} + +static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data) +{ + if ((p_dev->resource[1]->end) || (p_dev->resource[1]->end < 8)) + return -ENODEV; + + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + + return pcmcia_request_io(p_dev); +} + +static int dtl1_config(struct pcmcia_device *link) +{ + struct dtl1_info *info = link->priv; + int ret; + + /* Look for a generic full-sized window */ + link->resource[0]->end = 8; + ret = pcmcia_loop_config(link, dtl1_confcheck, NULL); + if (ret) + goto failed; + + ret = pcmcia_request_irq(link, dtl1_interrupt); + if (ret) + goto failed; + + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + ret = dtl1_open(info); + if (ret) + goto failed; + + return 0; + +failed: + dtl1_detach(link); + return ret; +} + +static const struct pcmcia_device_id dtl1_ids[] = { + PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-1", 0xe1bfdd64, 0xe168480d), + PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-4", 0xe1bfdd64, 0x9102bc82), + PCMCIA_DEVICE_PROD_ID12("Socket", "CF", 0xb38bcc2e, 0x44ebf863), + PCMCIA_DEVICE_PROD_ID12("Socket", "CF+ Personal Network Card", 0xb38bcc2e, 0xe732bae3), + PCMCIA_DEVICE_NULL +}; +MODULE_DEVICE_TABLE(pcmcia, dtl1_ids); + +static struct pcmcia_driver dtl1_driver = { + .owner = THIS_MODULE, + .name = "dtl1_cs", + .probe = dtl1_probe, + .remove = dtl1_detach, + .id_table = dtl1_ids, +}; +module_pcmcia_driver(dtl1_driver); diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h new file mode 100644 index 000000000..4f2c89742 --- /dev/null +++ b/drivers/bluetooth/h4_recv.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * + * Generic Bluetooth HCI UART driver + * + * Copyright (C) 2015-2018 Intel Corporation + */ + +#include + +struct h4_recv_pkt { + u8 type; /* Packet type */ + u8 hlen; /* Header length */ + u8 loff; /* Data length offset in header */ + u8 lsize; /* Data length field size */ + u16 maxlen; /* Max overall packet length */ + int (*recv)(struct hci_dev *hdev, struct sk_buff *skb); +}; + +#define H4_RECV_ACL \ + .type = HCI_ACLDATA_PKT, \ + .hlen = HCI_ACL_HDR_SIZE, \ + .loff = 2, \ + .lsize = 2, \ + .maxlen = HCI_MAX_FRAME_SIZE \ + +#define H4_RECV_SCO \ + .type = HCI_SCODATA_PKT, \ + .hlen = HCI_SCO_HDR_SIZE, \ + .loff = 2, \ + .lsize = 1, \ + .maxlen = HCI_MAX_SCO_SIZE + +#define H4_RECV_EVENT \ + .type = HCI_EVENT_PKT, \ + .hlen = HCI_EVENT_HDR_SIZE, \ + .loff = 1, \ + .lsize = 1, \ + .maxlen = HCI_MAX_EVENT_SIZE + +static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev, + struct sk_buff *skb, + const unsigned char *buffer, + int count, + const struct h4_recv_pkt *pkts, + int pkts_count) +{ + /* Check for error from previous call */ + if (IS_ERR(skb)) + skb = NULL; + + while (count) { + int i, len; + + if (!skb) { + for (i = 0; i < pkts_count; i++) { + if (buffer[0] != (&pkts[i])->type) + continue; + + skb = bt_skb_alloc((&pkts[i])->maxlen, + GFP_ATOMIC); + if (!skb) + return ERR_PTR(-ENOMEM); + + hci_skb_pkt_type(skb) = (&pkts[i])->type; + hci_skb_expect(skb) = (&pkts[i])->hlen; + break; + } + + /* Check for invalid packet type */ + if (!skb) + return ERR_PTR(-EILSEQ); + + count -= 1; + buffer += 1; + } + + len = min_t(uint, hci_skb_expect(skb) - skb->len, count); + skb_put_data(skb, buffer, len); + + count -= len; + buffer += len; + + /* Check for partial packet */ + if (skb->len < hci_skb_expect(skb)) + continue; + + for (i = 0; i < pkts_count; i++) { + if (hci_skb_pkt_type(skb) == (&pkts[i])->type) + break; + } + + if (i >= pkts_count) { + kfree_skb(skb); + return ERR_PTR(-EILSEQ); + } + + if (skb->len == (&pkts[i])->hlen) { + u16 dlen; + + switch ((&pkts[i])->lsize) { + case 0: + /* No variable data length */ + dlen = 0; + break; + case 1: + /* Single octet variable length */ + dlen = skb->data[(&pkts[i])->loff]; + hci_skb_expect(skb) += dlen; + + if (skb_tailroom(skb) < dlen) { + kfree_skb(skb); + return ERR_PTR(-EMSGSIZE); + } + break; + case 2: + /* Double octet variable length */ + dlen = get_unaligned_le16(skb->data + + (&pkts[i])->loff); + hci_skb_expect(skb) += dlen; + + if (skb_tailroom(skb) < dlen) { + kfree_skb(skb); + return ERR_PTR(-EMSGSIZE); + } + break; + default: + /* Unsupported variable length */ + kfree_skb(skb); + return ERR_PTR(-EILSEQ); + } + + if (!dlen) { + /* No more data, complete frame */ + (&pkts[i])->recv(hdev, skb); + skb = NULL; + } + } else { + /* Complete frame */ + (&pkts[i])->recv(hdev, skb); + skb = NULL; + } + } + + return skb; +} diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c new file mode 100644 index 000000000..2d4030240 --- /dev/null +++ b/drivers/bluetooth/hci_ag6xx.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Bluetooth HCI UART driver for Intel/AG6xx devices + * + * Copyright (C) 2016 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" +#include "btintel.h" + +struct ag6xx_data { + struct sk_buff *rx_skb; + struct sk_buff_head txq; +}; + +struct pbn_entry { + __le32 addr; + __le32 plen; + __u8 data[]; +} __packed; + +static int ag6xx_open(struct hci_uart *hu) +{ + struct ag6xx_data *ag6xx; + + BT_DBG("hu %p", hu); + + ag6xx = kzalloc(sizeof(*ag6xx), GFP_KERNEL); + if (!ag6xx) + return -ENOMEM; + + skb_queue_head_init(&ag6xx->txq); + + hu->priv = ag6xx; + return 0; +} + +static int ag6xx_close(struct hci_uart *hu) +{ + struct ag6xx_data *ag6xx = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&ag6xx->txq); + kfree_skb(ag6xx->rx_skb); + kfree(ag6xx); + + hu->priv = NULL; + return 0; +} + +static int ag6xx_flush(struct hci_uart *hu) +{ + struct ag6xx_data *ag6xx = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&ag6xx->txq); + return 0; +} + +static struct sk_buff *ag6xx_dequeue(struct hci_uart *hu) +{ + struct ag6xx_data *ag6xx = hu->priv; + struct sk_buff *skb; + + skb = skb_dequeue(&ag6xx->txq); + if (!skb) + return skb; + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + return skb; +} + +static int ag6xx_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct ag6xx_data *ag6xx = hu->priv; + + skb_queue_tail(&ag6xx->txq, skb); + return 0; +} + +static const struct h4_recv_pkt ag6xx_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, +}; + +static int ag6xx_recv(struct hci_uart *hu, const void *data, int count) +{ + struct ag6xx_data *ag6xx = hu->priv; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count, + ag6xx_recv_pkts, + ARRAY_SIZE(ag6xx_recv_pkts)); + if (IS_ERR(ag6xx->rx_skb)) { + int err = PTR_ERR(ag6xx->rx_skb); + bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); + ag6xx->rx_skb = NULL; + return err; + } + + return count; +} + +static int intel_mem_write(struct hci_dev *hdev, u32 addr, u32 plen, + const void *data) +{ + /* Can write a maximum of 247 bytes per HCI command. + * HCI cmd Header (3), Intel mem write header (6), data (247). + */ + while (plen > 0) { + struct sk_buff *skb; + u8 cmd_param[253], fragment_len = (plen > 247) ? 247 : plen; + __le32 leaddr = cpu_to_le32(addr); + + memcpy(cmd_param, &leaddr, 4); + cmd_param[4] = 0; + cmd_param[5] = fragment_len; + memcpy(cmd_param + 6, data, fragment_len); + + skb = __hci_cmd_sync(hdev, 0xfc8e, fragment_len + 6, cmd_param, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + kfree_skb(skb); + + plen -= fragment_len; + data += fragment_len; + addr += fragment_len; + } + + return 0; +} + +static int ag6xx_setup(struct hci_uart *hu) +{ + struct hci_dev *hdev = hu->hdev; + struct sk_buff *skb; + struct intel_version ver; + const struct firmware *fw; + const u8 *fw_ptr; + char fwname[64]; + bool patched = false; + int err; + + hu->hdev->set_diag = btintel_set_diag; + hu->hdev->set_bdaddr = btintel_set_bdaddr; + + err = btintel_enter_mfg(hdev); + if (err) + return err; + + err = btintel_read_version(hdev, &ver); + if (err) + return err; + + btintel_version_info(hdev, &ver); + + /* The hardware platform number has a fixed value of 0x37 and + * for now only accept this single value. + */ + if (ver.hw_platform != 0x37) { + bt_dev_err(hdev, "Unsupported Intel hardware platform: 0x%X", + ver.hw_platform); + return -EINVAL; + } + + /* Only the hardware variant iBT 2.1 (AG6XX) is supported by this + * firmware setup method. + */ + if (ver.hw_variant != 0x0a) { + bt_dev_err(hdev, "Unsupported Intel hardware variant: 0x%x", + ver.hw_variant); + return -EINVAL; + } + + snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bddata", + ver.hw_platform, ver.hw_variant); + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Failed to open Intel bddata file: %s (%d)", + fwname, err); + goto patch; + } + + bt_dev_info(hdev, "Applying bddata (%s)", fwname); + + skb = __hci_cmd_sync_ev(hdev, 0xfc2f, fw->size, fw->data, + HCI_EV_CMD_STATUS, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Applying bddata failed (%ld)", PTR_ERR(skb)); + release_firmware(fw); + return PTR_ERR(skb); + } + kfree_skb(skb); + + release_firmware(fw); + +patch: + /* If there is no applied patch, fw_patch_num is always 0x00. In other + * cases, current firmware is already patched. No need to patch it. + */ + if (ver.fw_patch_num) { + bt_dev_info(hdev, "Device is already patched. patch num: %02x", + ver.fw_patch_num); + patched = true; + goto complete; + } + + snprintf(fwname, sizeof(fwname), + "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.pbn", + ver.hw_platform, ver.hw_variant, ver.hw_revision, + ver.fw_variant, ver.fw_revision, ver.fw_build_num, + ver.fw_build_ww, ver.fw_build_yy); + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Failed to open Intel patch file: %s(%d)", + fwname, err); + goto complete; + } + fw_ptr = fw->data; + + bt_dev_info(hdev, "Patching firmware file (%s)", fwname); + + /* PBN patch file contains a list of binary patches to be applied on top + * of the embedded firmware. Each patch entry header contains the target + * address and patch size. + * + * Patch entry: + * | addr(le) | patch_len(le) | patch_data | + * | 4 Bytes | 4 Bytes | n Bytes | + * + * PBN file is terminated by a patch entry whose address is 0xffffffff. + */ + while (fw->size > fw_ptr - fw->data) { + struct pbn_entry *pbn = (void *)fw_ptr; + u32 addr, plen; + + if (pbn->addr == 0xffffffff) { + bt_dev_info(hdev, "Patching complete"); + patched = true; + break; + } + + addr = le32_to_cpu(pbn->addr); + plen = le32_to_cpu(pbn->plen); + + if (fw->data + fw->size <= pbn->data + plen) { + bt_dev_info(hdev, "Invalid patch len (%d)", plen); + break; + } + + bt_dev_info(hdev, "Patching %td/%zu", (fw_ptr - fw->data), + fw->size); + + err = intel_mem_write(hdev, addr, plen, pbn->data); + if (err) { + bt_dev_err(hdev, "Patching failed"); + break; + } + + fw_ptr = pbn->data + plen; + } + + release_firmware(fw); + +complete: + /* Exit manufacturing mode and reset */ + err = btintel_exit_mfg(hdev, true, patched); + if (err) + return err; + + /* Set the event mask for Intel specific vendor events. This enables + * a few extra events that are useful during general operation. + */ + btintel_set_event_mask_mfg(hdev, false); + + btintel_check_bdaddr(hdev); + return 0; +} + +static const struct hci_uart_proto ag6xx_proto = { + .id = HCI_UART_AG6XX, + .name = "AG6XX", + .manufacturer = 2, + .open = ag6xx_open, + .close = ag6xx_close, + .flush = ag6xx_flush, + .setup = ag6xx_setup, + .recv = ag6xx_recv, + .enqueue = ag6xx_enqueue, + .dequeue = ag6xx_dequeue, +}; + +int __init ag6xx_init(void) +{ + return hci_uart_register_proto(&ag6xx_proto); +} + +int __exit ag6xx_deinit(void) +{ + return hci_uart_unregister_proto(&ag6xx_proto); +} diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c new file mode 100644 index 000000000..dbfe34664 --- /dev/null +++ b/drivers/bluetooth/hci_ath.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Atheros Communication Bluetooth HCIATH3K UART protocol + * + * HCIATH3K (HCI Atheros AR300x Protocol) is a Atheros Communication's + * power management protocol extension to H4 to support AR300x Bluetooth Chip. + * + * Copyright (c) 2009-2010 Atheros Communications Inc. + * + * Acknowledgements: + * This file is based on hci_h4.c, which was written + * by Maxim Krasnyansky and Marcel Holtmann. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" + +struct ath_struct { + struct hci_uart *hu; + unsigned int cur_sleep; + + struct sk_buff *rx_skb; + struct sk_buff_head txq; + struct work_struct ctxtsw; +}; + +#define OP_WRITE_TAG 0x01 + +#define INDEX_BDADDR 0x01 + +struct ath_vendor_cmd { + __u8 opcode; + __le16 index; + __u8 len; + __u8 data[251]; +} __packed; + +static int ath_wakeup_ar3k(struct tty_struct *tty) +{ + int status = tty->driver->ops->tiocmget(tty); + + if (status & TIOCM_CTS) + return status; + + /* Clear RTS first */ + tty->driver->ops->tiocmget(tty); + tty->driver->ops->tiocmset(tty, 0x00, TIOCM_RTS); + msleep(20); + + /* Set RTS, wake up board */ + tty->driver->ops->tiocmget(tty); + tty->driver->ops->tiocmset(tty, TIOCM_RTS, 0x00); + msleep(20); + + status = tty->driver->ops->tiocmget(tty); + return status; +} + +static void ath_hci_uart_work(struct work_struct *work) +{ + int status; + struct ath_struct *ath; + struct hci_uart *hu; + struct tty_struct *tty; + + ath = container_of(work, struct ath_struct, ctxtsw); + + hu = ath->hu; + tty = hu->tty; + + /* verify and wake up controller */ + if (ath->cur_sleep) { + status = ath_wakeup_ar3k(tty); + if (!(status & TIOCM_CTS)) + return; + } + + /* Ready to send Data */ + clear_bit(HCI_UART_SENDING, &hu->tx_state); + hci_uart_tx_wakeup(hu); +} + +static int ath_open(struct hci_uart *hu) +{ + struct ath_struct *ath; + + BT_DBG("hu %p", hu); + + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + + ath = kzalloc(sizeof(*ath), GFP_KERNEL); + if (!ath) + return -ENOMEM; + + skb_queue_head_init(&ath->txq); + + hu->priv = ath; + ath->hu = hu; + + INIT_WORK(&ath->ctxtsw, ath_hci_uart_work); + + return 0; +} + +static int ath_close(struct hci_uart *hu) +{ + struct ath_struct *ath = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&ath->txq); + + kfree_skb(ath->rx_skb); + + cancel_work_sync(&ath->ctxtsw); + + hu->priv = NULL; + kfree(ath); + + return 0; +} + +static int ath_flush(struct hci_uart *hu) +{ + struct ath_struct *ath = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&ath->txq); + + return 0; +} + +static int ath_vendor_cmd(struct hci_dev *hdev, uint8_t opcode, uint16_t index, + const void *data, size_t dlen) +{ + struct sk_buff *skb; + struct ath_vendor_cmd cmd; + + if (dlen > sizeof(cmd.data)) + return -EINVAL; + + cmd.opcode = opcode; + cmd.index = cpu_to_le16(index); + cmd.len = dlen; + memcpy(cmd.data, data, dlen); + + skb = __hci_cmd_sync(hdev, 0xfc0b, dlen + 4, &cmd, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + kfree_skb(skb); + + return 0; +} + +static int ath_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + return ath_vendor_cmd(hdev, OP_WRITE_TAG, INDEX_BDADDR, bdaddr, + sizeof(*bdaddr)); +} + +static int ath_setup(struct hci_uart *hu) +{ + BT_DBG("hu %p", hu); + + hu->hdev->set_bdaddr = ath_set_bdaddr; + + return 0; +} + +static const struct h4_recv_pkt ath_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, +}; + +static int ath_recv(struct hci_uart *hu, const void *data, int count) +{ + struct ath_struct *ath = hu->priv; + + ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count, + ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts)); + if (IS_ERR(ath->rx_skb)) { + int err = PTR_ERR(ath->rx_skb); + bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); + ath->rx_skb = NULL; + return err; + } + + return count; +} + +#define HCI_OP_ATH_SLEEP 0xFC04 + +static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct ath_struct *ath = hu->priv; + + if (hci_skb_pkt_type(skb) == HCI_SCODATA_PKT) { + kfree_skb(skb); + return 0; + } + + /* Update power management enable flag with parameters of + * HCI sleep enable vendor specific HCI command. + */ + if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) { + struct hci_command_hdr *hdr = (void *)skb->data; + + if (__le16_to_cpu(hdr->opcode) == HCI_OP_ATH_SLEEP) + ath->cur_sleep = skb->data[HCI_COMMAND_HDR_SIZE]; + } + + BT_DBG("hu %p skb %p", hu, skb); + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + + skb_queue_tail(&ath->txq, skb); + set_bit(HCI_UART_SENDING, &hu->tx_state); + + schedule_work(&ath->ctxtsw); + + return 0; +} + +static struct sk_buff *ath_dequeue(struct hci_uart *hu) +{ + struct ath_struct *ath = hu->priv; + + return skb_dequeue(&ath->txq); +} + +static const struct hci_uart_proto athp = { + .id = HCI_UART_ATH3K, + .name = "ATH3K", + .manufacturer = 69, + .open = ath_open, + .close = ath_close, + .flush = ath_flush, + .setup = ath_setup, + .recv = ath_recv, + .enqueue = ath_enqueue, + .dequeue = ath_dequeue, +}; + +int __init ath_init(void) +{ + return hci_uart_register_proto(&athp); +} + +int __exit ath_deinit(void) +{ + return hci_uart_unregister_proto(&athp); +} diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c new file mode 100644 index 000000000..874d23089 --- /dev/null +++ b/drivers/bluetooth/hci_bcm.c @@ -0,0 +1,1645 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Bluetooth HCI UART driver for Broadcom devices + * + * Copyright (C) 2015 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "btbcm.h" +#include "hci_uart.h" + +#define BCM_NULL_PKT 0x00 +#define BCM_NULL_SIZE 0 + +#define BCM_LM_DIAG_PKT 0x07 +#define BCM_LM_DIAG_SIZE 63 + +#define BCM_TYPE49_PKT 0x31 +#define BCM_TYPE49_SIZE 0 + +#define BCM_TYPE52_PKT 0x34 +#define BCM_TYPE52_SIZE 0 + +#define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */ + +#define BCM_NUM_SUPPLIES 2 + +/** + * struct bcm_device_data - device specific data + * @no_early_set_baudrate: Disallow set baudrate before driver setup() + * @drive_rts_on_open: drive RTS signal on ->open() when platform requires it + * @no_uart_clock_set: UART clock set command for >3Mbps mode is unavailable + * @max_autobaud_speed: max baudrate supported by device in autobaud mode + * @max_speed: max baudrate supported + */ +struct bcm_device_data { + bool no_early_set_baudrate; + bool drive_rts_on_open; + bool no_uart_clock_set; + u32 max_autobaud_speed; + u32 max_speed; +}; + +/** + * struct bcm_device - device driver resources + * @serdev_hu: HCI UART controller struct + * @list: bcm_device_list node + * @dev: physical UART slave + * @name: device name logged by bt_dev_*() functions + * @device_wakeup: BT_WAKE pin, + * assert = Bluetooth device must wake up or remain awake, + * deassert = Bluetooth device may sleep when sleep criteria are met + * @shutdown: BT_REG_ON pin, + * power up or power down Bluetooth device internal regulators + * @reset: BT_RST_N pin, + * active low resets the Bluetooth logic core + * @set_device_wakeup: callback to toggle BT_WAKE pin + * either by accessing @device_wakeup or by calling @btlp + * @set_shutdown: callback to toggle BT_REG_ON pin + * either by accessing @shutdown or by calling @btpu/@btpd + * @btlp: Apple ACPI method to toggle BT_WAKE pin ("Bluetooth Low Power") + * @btpu: Apple ACPI method to drive BT_REG_ON pin high ("Bluetooth Power Up") + * @btpd: Apple ACPI method to drive BT_REG_ON pin low ("Bluetooth Power Down") + * @gpio_count: internal counter for GPIO resources associated with ACPI device + * @gpio_int_idx: index in _CRS for GpioInt() resource + * @txco_clk: external reference frequency clock used by Bluetooth device + * @lpo_clk: external LPO clock used by Bluetooth device + * @supplies: VBAT and VDDIO supplies used by Bluetooth device + * @res_enabled: whether clocks and supplies are prepared and enabled + * @init_speed: default baudrate of Bluetooth device; + * the host UART is initially set to this baudrate so that + * it can configure the Bluetooth device for @oper_speed + * @oper_speed: preferred baudrate of Bluetooth device; + * set to 0 if @init_speed is already the preferred baudrate + * @irq: interrupt triggered by HOST_WAKE_BT pin + * @irq_active_low: whether @irq is active low + * @irq_acquired: flag to show if IRQ handler has been assigned + * @hu: pointer to HCI UART controller struct, + * used to disable flow control during runtime suspend and system sleep + * @is_suspended: whether flow control is currently disabled + * @no_early_set_baudrate: don't set_baudrate before setup() + * @drive_rts_on_open: drive RTS signal on ->open() when platform requires it + * @no_uart_clock_set: UART clock set command for >3Mbps mode is unavailable + * @pcm_int_params: keep the initial PCM configuration + * @use_autobaud_mode: start Bluetooth device in autobaud mode + * @max_autobaud_speed: max baudrate supported by device in autobaud mode + */ +struct bcm_device { + /* Must be the first member, hci_serdev.c expects this. */ + struct hci_uart serdev_hu; + struct list_head list; + + struct device *dev; + + const char *name; + struct gpio_desc *device_wakeup; + struct gpio_desc *shutdown; + struct gpio_desc *reset; + int (*set_device_wakeup)(struct bcm_device *, bool); + int (*set_shutdown)(struct bcm_device *, bool); +#ifdef CONFIG_ACPI + acpi_handle btlp, btpu, btpd; + int gpio_count; + int gpio_int_idx; +#endif + + struct clk *txco_clk; + struct clk *lpo_clk; + struct regulator_bulk_data supplies[BCM_NUM_SUPPLIES]; + bool res_enabled; + + u32 init_speed; + u32 oper_speed; + int irq; + bool irq_active_low; + bool irq_acquired; + +#ifdef CONFIG_PM + struct hci_uart *hu; + bool is_suspended; +#endif + bool no_early_set_baudrate; + bool drive_rts_on_open; + bool no_uart_clock_set; + bool use_autobaud_mode; + u8 pcm_int_params[5]; + u32 max_autobaud_speed; +}; + +/* generic bcm uart resources */ +struct bcm_data { + struct sk_buff *rx_skb; + struct sk_buff_head txq; + + struct bcm_device *dev; +}; + +/* List of BCM BT UART devices */ +static DEFINE_MUTEX(bcm_device_lock); +static LIST_HEAD(bcm_device_list); + +static int irq_polarity = -1; +module_param(irq_polarity, int, 0444); +MODULE_PARM_DESC(irq_polarity, "IRQ polarity 0: active-high 1: active-low"); + +static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed) +{ + if (hu->serdev) + serdev_device_set_baudrate(hu->serdev, speed); + else + hci_uart_set_baudrate(hu, speed); +} + +static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed) +{ + struct hci_dev *hdev = hu->hdev; + struct bcm_data *bcm = hu->priv; + struct sk_buff *skb; + struct bcm_update_uart_baud_rate param; + + if (speed > 3000000 && !bcm->dev->no_uart_clock_set) { + struct bcm_write_uart_clock_setting clock; + + clock.type = BCM_UART_CLOCK_48MHZ; + + bt_dev_dbg(hdev, "Set Controller clock (%d)", clock.type); + + /* This Broadcom specific command changes the UART's controller + * clock for baud rate > 3000000. + */ + skb = __hci_cmd_sync(hdev, 0xfc45, 1, &clock, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + int err = PTR_ERR(skb); + bt_dev_err(hdev, "BCM: failed to write clock (%d)", + err); + return err; + } + + kfree_skb(skb); + } + + bt_dev_dbg(hdev, "Set Controller UART speed to %d bit/s", speed); + + param.zero = cpu_to_le16(0); + param.baud_rate = cpu_to_le32(speed); + + /* This Broadcom specific command changes the UART's controller baud + * rate. + */ + skb = __hci_cmd_sync(hdev, 0xfc18, sizeof(param), ¶m, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + int err = PTR_ERR(skb); + bt_dev_err(hdev, "BCM: failed to write update baudrate (%d)", + err); + return err; + } + + kfree_skb(skb); + + return 0; +} + +/* bcm_device_exists should be protected by bcm_device_lock */ +static bool bcm_device_exists(struct bcm_device *device) +{ + struct list_head *p; + +#ifdef CONFIG_PM + /* Devices using serdev always exist */ + if (device && device->hu && device->hu->serdev) + return true; +#endif + + list_for_each(p, &bcm_device_list) { + struct bcm_device *dev = list_entry(p, struct bcm_device, list); + + if (device == dev) + return true; + } + + return false; +} + +static int bcm_gpio_set_power(struct bcm_device *dev, bool powered) +{ + int err; + + if (powered && !dev->res_enabled) { + /* Intel Macs use bcm_apple_get_resources() and don't + * have regulator supplies configured. + */ + if (dev->supplies[0].supply) { + err = regulator_bulk_enable(BCM_NUM_SUPPLIES, + dev->supplies); + if (err) + return err; + } + + /* LPO clock needs to be 32.768 kHz */ + err = clk_set_rate(dev->lpo_clk, 32768); + if (err) { + dev_err(dev->dev, "Could not set LPO clock rate\n"); + goto err_regulator_disable; + } + + err = clk_prepare_enable(dev->lpo_clk); + if (err) + goto err_regulator_disable; + + err = clk_prepare_enable(dev->txco_clk); + if (err) + goto err_lpo_clk_disable; + } + + err = dev->set_shutdown(dev, powered); + if (err) + goto err_txco_clk_disable; + + err = dev->set_device_wakeup(dev, powered); + if (err) + goto err_revert_shutdown; + + if (!powered && dev->res_enabled) { + clk_disable_unprepare(dev->txco_clk); + clk_disable_unprepare(dev->lpo_clk); + + /* Intel Macs use bcm_apple_get_resources() and don't + * have regulator supplies configured. + */ + if (dev->supplies[0].supply) + regulator_bulk_disable(BCM_NUM_SUPPLIES, + dev->supplies); + } + + /* wait for device to power on and come out of reset */ + usleep_range(100000, 120000); + + dev->res_enabled = powered; + + return 0; + +err_revert_shutdown: + dev->set_shutdown(dev, !powered); +err_txco_clk_disable: + if (powered && !dev->res_enabled) + clk_disable_unprepare(dev->txco_clk); +err_lpo_clk_disable: + if (powered && !dev->res_enabled) + clk_disable_unprepare(dev->lpo_clk); +err_regulator_disable: + if (powered && !dev->res_enabled) + regulator_bulk_disable(BCM_NUM_SUPPLIES, dev->supplies); + return err; +} + +#ifdef CONFIG_PM +static irqreturn_t bcm_host_wake(int irq, void *data) +{ + struct bcm_device *bdev = data; + + bt_dev_dbg(bdev, "Host wake IRQ"); + + pm_runtime_get(bdev->dev); + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); + + return IRQ_HANDLED; +} + +static int bcm_request_irq(struct bcm_data *bcm) +{ + struct bcm_device *bdev = bcm->dev; + int err; + + mutex_lock(&bcm_device_lock); + if (!bcm_device_exists(bdev)) { + err = -ENODEV; + goto unlock; + } + + if (bdev->irq <= 0) { + err = -EOPNOTSUPP; + goto unlock; + } + + err = devm_request_irq(bdev->dev, bdev->irq, bcm_host_wake, + bdev->irq_active_low ? IRQF_TRIGGER_FALLING : + IRQF_TRIGGER_RISING, + "host_wake", bdev); + if (err) { + bdev->irq = err; + goto unlock; + } + + bdev->irq_acquired = true; + + device_init_wakeup(bdev->dev, true); + + pm_runtime_set_autosuspend_delay(bdev->dev, + BCM_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(bdev->dev); + pm_runtime_set_active(bdev->dev); + pm_runtime_enable(bdev->dev); + +unlock: + mutex_unlock(&bcm_device_lock); + + return err; +} + +static const struct bcm_set_sleep_mode default_sleep_params = { + .sleep_mode = 1, /* 0=Disabled, 1=UART, 2=Reserved, 3=USB */ + .idle_host = 2, /* idle threshold HOST, in 300ms */ + .idle_dev = 2, /* idle threshold device, in 300ms */ + .bt_wake_active = 1, /* BT_WAKE active mode: 1 = high, 0 = low */ + .host_wake_active = 0, /* HOST_WAKE active mode: 1 = high, 0 = low */ + .allow_host_sleep = 1, /* Allow host sleep in SCO flag */ + .combine_modes = 1, /* Combine sleep and LPM flag */ + .tristate_control = 0, /* Allow tri-state control of UART tx flag */ + /* Irrelevant USB flags */ + .usb_auto_sleep = 0, + .usb_resume_timeout = 0, + .break_to_host = 0, + .pulsed_host_wake = 1, +}; + +static int bcm_setup_sleep(struct hci_uart *hu) +{ + struct bcm_data *bcm = hu->priv; + struct sk_buff *skb; + struct bcm_set_sleep_mode sleep_params = default_sleep_params; + + sleep_params.host_wake_active = !bcm->dev->irq_active_low; + + skb = __hci_cmd_sync(hu->hdev, 0xfc27, sizeof(sleep_params), + &sleep_params, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + int err = PTR_ERR(skb); + bt_dev_err(hu->hdev, "Sleep VSC failed (%d)", err); + return err; + } + kfree_skb(skb); + + bt_dev_dbg(hu->hdev, "Set Sleep Parameters VSC succeeded"); + + return 0; +} +#else +static inline int bcm_request_irq(struct bcm_data *bcm) { return 0; } +static inline int bcm_setup_sleep(struct hci_uart *hu) { return 0; } +#endif + +static int bcm_set_diag(struct hci_dev *hdev, bool enable) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct bcm_data *bcm = hu->priv; + struct sk_buff *skb; + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -ENETDOWN; + + skb = bt_skb_alloc(3, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_put_u8(skb, BCM_LM_DIAG_PKT); + skb_put_u8(skb, 0xf0); + skb_put_u8(skb, enable); + + skb_queue_tail(&bcm->txq, skb); + hci_uart_tx_wakeup(hu); + + return 0; +} + +static int bcm_open(struct hci_uart *hu) +{ + struct bcm_data *bcm; + struct list_head *p; + int err; + + bt_dev_dbg(hu->hdev, "hu %p", hu); + + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + + bcm = kzalloc(sizeof(*bcm), GFP_KERNEL); + if (!bcm) + return -ENOMEM; + + skb_queue_head_init(&bcm->txq); + + hu->priv = bcm; + + mutex_lock(&bcm_device_lock); + + if (hu->serdev) { + bcm->dev = serdev_device_get_drvdata(hu->serdev); + goto out; + } + + if (!hu->tty->dev) + goto out; + + list_for_each(p, &bcm_device_list) { + struct bcm_device *dev = list_entry(p, struct bcm_device, list); + + /* Retrieve saved bcm_device based on parent of the + * platform device (saved during device probe) and + * parent of tty device used by hci_uart + */ + if (hu->tty->dev->parent == dev->dev->parent) { + bcm->dev = dev; +#ifdef CONFIG_PM + dev->hu = hu; +#endif + break; + } + } + +out: + if (bcm->dev) { + if (bcm->dev->use_autobaud_mode) + hci_uart_set_flow_control(hu, false); /* Assert BT_UART_CTS_N */ + else if (bcm->dev->drive_rts_on_open) + hci_uart_set_flow_control(hu, true); + + if (bcm->dev->use_autobaud_mode && bcm->dev->max_autobaud_speed) + hu->init_speed = min(bcm->dev->oper_speed, bcm->dev->max_autobaud_speed); + else + hu->init_speed = bcm->dev->init_speed; + + /* If oper_speed is set, ldisc/serdev will set the baudrate + * before calling setup() + */ + if (!bcm->dev->no_early_set_baudrate && !bcm->dev->use_autobaud_mode) + hu->oper_speed = bcm->dev->oper_speed; + + err = bcm_gpio_set_power(bcm->dev, true); + + if (bcm->dev->drive_rts_on_open) + hci_uart_set_flow_control(hu, false); + + if (err) + goto err_unset_hu; + } + + mutex_unlock(&bcm_device_lock); + return 0; + +err_unset_hu: +#ifdef CONFIG_PM + if (!hu->serdev) + bcm->dev->hu = NULL; +#endif + mutex_unlock(&bcm_device_lock); + hu->priv = NULL; + kfree(bcm); + return err; +} + +static int bcm_close(struct hci_uart *hu) +{ + struct bcm_data *bcm = hu->priv; + struct bcm_device *bdev = NULL; + int err; + + bt_dev_dbg(hu->hdev, "hu %p", hu); + + /* Protect bcm->dev against removal of the device or driver */ + mutex_lock(&bcm_device_lock); + + if (hu->serdev) { + bdev = serdev_device_get_drvdata(hu->serdev); + } else if (bcm_device_exists(bcm->dev)) { + bdev = bcm->dev; +#ifdef CONFIG_PM + bdev->hu = NULL; +#endif + } + + if (bdev) { + if (IS_ENABLED(CONFIG_PM) && bdev->irq_acquired) { + devm_free_irq(bdev->dev, bdev->irq, bdev); + device_init_wakeup(bdev->dev, false); + pm_runtime_disable(bdev->dev); + } + + err = bcm_gpio_set_power(bdev, false); + if (err) + bt_dev_err(hu->hdev, "Failed to power down"); + else + pm_runtime_set_suspended(bdev->dev); + } + mutex_unlock(&bcm_device_lock); + + skb_queue_purge(&bcm->txq); + kfree_skb(bcm->rx_skb); + kfree(bcm); + + hu->priv = NULL; + return 0; +} + +static int bcm_flush(struct hci_uart *hu) +{ + struct bcm_data *bcm = hu->priv; + + bt_dev_dbg(hu->hdev, "hu %p", hu); + + skb_queue_purge(&bcm->txq); + + return 0; +} + +static int bcm_setup(struct hci_uart *hu) +{ + struct bcm_data *bcm = hu->priv; + bool fw_load_done = false; + bool use_autobaud_mode = (bcm->dev ? bcm->dev->use_autobaud_mode : 0); + unsigned int speed; + int err; + + bt_dev_dbg(hu->hdev, "hu %p", hu); + + hu->hdev->set_diag = bcm_set_diag; + hu->hdev->set_bdaddr = btbcm_set_bdaddr; + + err = btbcm_initialize(hu->hdev, &fw_load_done, use_autobaud_mode); + if (err) + return err; + + if (!fw_load_done) + return 0; + + /* Init speed if any */ + if (bcm->dev && bcm->dev->init_speed) + speed = bcm->dev->init_speed; + else if (hu->proto->init_speed) + speed = hu->proto->init_speed; + else + speed = 0; + + if (speed) + host_set_baudrate(hu, speed); + + /* Operational speed if any */ + if (hu->oper_speed) + speed = hu->oper_speed; + else if (bcm->dev && bcm->dev->oper_speed) + speed = bcm->dev->oper_speed; + else if (hu->proto->oper_speed) + speed = hu->proto->oper_speed; + else + speed = 0; + + if (speed) { + err = bcm_set_baudrate(hu, speed); + if (!err) + host_set_baudrate(hu, speed); + } + + /* PCM parameters if provided */ + if (bcm->dev && bcm->dev->pcm_int_params[0] != 0xff) { + struct bcm_set_pcm_int_params params; + + btbcm_read_pcm_int_params(hu->hdev, ¶ms); + + memcpy(¶ms, bcm->dev->pcm_int_params, 5); + btbcm_write_pcm_int_params(hu->hdev, ¶ms); + } + + err = btbcm_finalize(hu->hdev, &fw_load_done, use_autobaud_mode); + if (err) + return err; + + /* Some devices ship with the controller default address. + * Allow the bootloader to set a valid address through the + * device tree. + */ + if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks)) + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hu->hdev->quirks); + + if (!bcm_request_irq(bcm)) + err = bcm_setup_sleep(hu); + + return err; +} + +#define BCM_RECV_LM_DIAG \ + .type = BCM_LM_DIAG_PKT, \ + .hlen = BCM_LM_DIAG_SIZE, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = BCM_LM_DIAG_SIZE + +#define BCM_RECV_NULL \ + .type = BCM_NULL_PKT, \ + .hlen = BCM_NULL_SIZE, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = BCM_NULL_SIZE + +#define BCM_RECV_TYPE49 \ + .type = BCM_TYPE49_PKT, \ + .hlen = BCM_TYPE49_SIZE, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = BCM_TYPE49_SIZE + +#define BCM_RECV_TYPE52 \ + .type = BCM_TYPE52_PKT, \ + .hlen = BCM_TYPE52_SIZE, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = BCM_TYPE52_SIZE + +static const struct h4_recv_pkt bcm_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, + { H4_RECV_ISO, .recv = hci_recv_frame }, + { BCM_RECV_LM_DIAG, .recv = hci_recv_diag }, + { BCM_RECV_NULL, .recv = hci_recv_diag }, + { BCM_RECV_TYPE49, .recv = hci_recv_diag }, + { BCM_RECV_TYPE52, .recv = hci_recv_diag }, +}; + +static int bcm_recv(struct hci_uart *hu, const void *data, int count) +{ + struct bcm_data *bcm = hu->priv; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count, + bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts)); + if (IS_ERR(bcm->rx_skb)) { + int err = PTR_ERR(bcm->rx_skb); + bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); + bcm->rx_skb = NULL; + return err; + } else if (!bcm->rx_skb) { + /* Delay auto-suspend when receiving completed packet */ + mutex_lock(&bcm_device_lock); + if (bcm->dev && bcm_device_exists(bcm->dev)) { + pm_runtime_get(bcm->dev->dev); + pm_runtime_mark_last_busy(bcm->dev->dev); + pm_runtime_put_autosuspend(bcm->dev->dev); + } + mutex_unlock(&bcm_device_lock); + } + + return count; +} + +static int bcm_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct bcm_data *bcm = hu->priv; + + bt_dev_dbg(hu->hdev, "hu %p skb %p", hu, skb); + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + skb_queue_tail(&bcm->txq, skb); + + return 0; +} + +static struct sk_buff *bcm_dequeue(struct hci_uart *hu) +{ + struct bcm_data *bcm = hu->priv; + struct sk_buff *skb = NULL; + struct bcm_device *bdev = NULL; + + mutex_lock(&bcm_device_lock); + + if (bcm_device_exists(bcm->dev)) { + bdev = bcm->dev; + pm_runtime_get_sync(bdev->dev); + /* Shall be resumed here */ + } + + skb = skb_dequeue(&bcm->txq); + + if (bdev) { + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); + } + + mutex_unlock(&bcm_device_lock); + + return skb; +} + +#ifdef CONFIG_PM +static int bcm_suspend_device(struct device *dev) +{ + struct bcm_device *bdev = dev_get_drvdata(dev); + int err; + + bt_dev_dbg(bdev, ""); + + if (!bdev->is_suspended && bdev->hu) { + hci_uart_set_flow_control(bdev->hu, true); + + /* Once this returns, driver suspends BT via GPIO */ + bdev->is_suspended = true; + } + + /* Suspend the device */ + err = bdev->set_device_wakeup(bdev, false); + if (err) { + if (bdev->is_suspended && bdev->hu) { + bdev->is_suspended = false; + hci_uart_set_flow_control(bdev->hu, false); + } + return -EBUSY; + } + + bt_dev_dbg(bdev, "suspend, delaying 15 ms"); + msleep(15); + + return 0; +} + +static int bcm_resume_device(struct device *dev) +{ + struct bcm_device *bdev = dev_get_drvdata(dev); + int err; + + bt_dev_dbg(bdev, ""); + + err = bdev->set_device_wakeup(bdev, true); + if (err) { + dev_err(dev, "Failed to power up\n"); + return err; + } + + bt_dev_dbg(bdev, "resume, delaying 15 ms"); + msleep(15); + + /* When this executes, the device has woken up already */ + if (bdev->is_suspended && bdev->hu) { + bdev->is_suspended = false; + + hci_uart_set_flow_control(bdev->hu, false); + } + + return 0; +} +#endif + +#ifdef CONFIG_PM_SLEEP +/* suspend callback */ +static int bcm_suspend(struct device *dev) +{ + struct bcm_device *bdev = dev_get_drvdata(dev); + int error; + + bt_dev_dbg(bdev, "suspend: is_suspended %d", bdev->is_suspended); + + /* + * When used with a device instantiated as platform_device, bcm_suspend + * can be called at any time as long as the platform device is bound, + * so it should use bcm_device_lock to protect access to hci_uart + * and device_wake-up GPIO. + */ + mutex_lock(&bcm_device_lock); + + if (!bdev->hu) + goto unlock; + + if (pm_runtime_active(dev)) + bcm_suspend_device(dev); + + if (device_may_wakeup(dev) && bdev->irq > 0) { + error = enable_irq_wake(bdev->irq); + if (!error) + bt_dev_dbg(bdev, "BCM irq: enabled"); + } + +unlock: + mutex_unlock(&bcm_device_lock); + + return 0; +} + +/* resume callback */ +static int bcm_resume(struct device *dev) +{ + struct bcm_device *bdev = dev_get_drvdata(dev); + int err = 0; + + bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended); + + /* + * When used with a device instantiated as platform_device, bcm_resume + * can be called at any time as long as platform device is bound, + * so it should use bcm_device_lock to protect access to hci_uart + * and device_wake-up GPIO. + */ + mutex_lock(&bcm_device_lock); + + if (!bdev->hu) + goto unlock; + + if (device_may_wakeup(dev) && bdev->irq > 0) { + disable_irq_wake(bdev->irq); + bt_dev_dbg(bdev, "BCM irq: disabled"); + } + + err = bcm_resume_device(dev); + +unlock: + mutex_unlock(&bcm_device_lock); + + if (!err) { + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + + return 0; +} +#endif + +/* Some firmware reports an IRQ which does not work (wrong pin in fw table?) */ +static struct gpiod_lookup_table irq_on_int33fc02_pin17_gpios = { + .dev_id = "serial0-0", + .table = { + GPIO_LOOKUP("INT33FC:02", 17, "host-wakeup-alt", GPIO_ACTIVE_HIGH), + { } + }, +}; + +static const struct dmi_system_id bcm_broken_irq_dmi_table[] = { + { + .ident = "Acer Iconia One 7 B1-750", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"), + }, + .driver_data = &irq_on_int33fc02_pin17_gpios, + }, + { + .ident = "Asus TF103C", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), + }, + .driver_data = &irq_on_int33fc02_pin17_gpios, + }, + { + .ident = "Lenovo Yoga Tablet 2 830F/L / 1050F/L", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), + DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), + DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), + /* Partial match on beginning of BIOS version */ + DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), + }, + .driver_data = &irq_on_int33fc02_pin17_gpios, + }, + { + .ident = "Meegopad T08", + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, + "To be filled by OEM."), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "T3 MRD"), + DMI_EXACT_MATCH(DMI_BOARD_VERSION, "V1.1"), + }, + }, + { } +}; + +#ifdef CONFIG_ACPI +static const struct acpi_gpio_params first_gpio = { 0, 0, false }; +static const struct acpi_gpio_params second_gpio = { 1, 0, false }; +static const struct acpi_gpio_params third_gpio = { 2, 0, false }; + +static const struct acpi_gpio_mapping acpi_bcm_int_last_gpios[] = { + { "device-wakeup-gpios", &first_gpio, 1 }, + { "shutdown-gpios", &second_gpio, 1 }, + { "host-wakeup-gpios", &third_gpio, 1 }, + { }, +}; + +static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = { + { "host-wakeup-gpios", &first_gpio, 1 }, + { "device-wakeup-gpios", &second_gpio, 1 }, + { "shutdown-gpios", &third_gpio, 1 }, + { }, +}; + +static int bcm_resource(struct acpi_resource *ares, void *data) +{ + struct bcm_device *dev = data; + struct acpi_resource_extended_irq *irq; + struct acpi_resource_gpio *gpio; + struct acpi_resource_uart_serialbus *sb; + + switch (ares->type) { + case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: + irq = &ares->data.extended_irq; + if (irq->polarity != ACPI_ACTIVE_LOW) + dev_info(dev->dev, "ACPI Interrupt resource is active-high, this is usually wrong, treating the IRQ as active-low\n"); + dev->irq_active_low = true; + break; + + case ACPI_RESOURCE_TYPE_GPIO: + gpio = &ares->data.gpio; + if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) { + dev->gpio_int_idx = dev->gpio_count; + dev->irq_active_low = gpio->polarity == ACPI_ACTIVE_LOW; + } + dev->gpio_count++; + break; + + case ACPI_RESOURCE_TYPE_SERIAL_BUS: + sb = &ares->data.uart_serial_bus; + if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) { + dev->init_speed = sb->default_baud_rate; + dev->oper_speed = 4000000; + } + break; + + default: + break; + } + + return 0; +} + +static int bcm_apple_set_device_wakeup(struct bcm_device *dev, bool awake) +{ + if (ACPI_FAILURE(acpi_execute_simple_method(dev->btlp, NULL, !awake))) + return -EIO; + + return 0; +} + +static int bcm_apple_set_shutdown(struct bcm_device *dev, bool powered) +{ + if (ACPI_FAILURE(acpi_evaluate_object(powered ? dev->btpu : dev->btpd, + NULL, NULL, NULL))) + return -EIO; + + return 0; +} + +static int bcm_apple_get_resources(struct bcm_device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev->dev); + const union acpi_object *obj; + + if (!adev || + ACPI_FAILURE(acpi_get_handle(adev->handle, "BTLP", &dev->btlp)) || + ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPU", &dev->btpu)) || + ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPD", &dev->btpd))) + return -ENODEV; + + if (!acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, &obj) && + obj->buffer.length == 8) + dev->init_speed = *(u64 *)obj->buffer.pointer; + + dev->set_device_wakeup = bcm_apple_set_device_wakeup; + dev->set_shutdown = bcm_apple_set_shutdown; + + return 0; +} +#else +static inline int bcm_apple_get_resources(struct bcm_device *dev) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_ACPI */ + +static int bcm_gpio_set_device_wakeup(struct bcm_device *dev, bool awake) +{ + gpiod_set_value_cansleep(dev->device_wakeup, awake); + return 0; +} + +static int bcm_gpio_set_shutdown(struct bcm_device *dev, bool powered) +{ + gpiod_set_value_cansleep(dev->shutdown, powered); + if (dev->reset) + /* + * The reset line is asserted on powerdown and deasserted + * on poweron so the inverse of powered is used. Notice + * that the GPIO line BT_RST_N needs to be specified as + * active low in the device tree or similar system + * description. + */ + gpiod_set_value_cansleep(dev->reset, !powered); + return 0; +} + +/* Try a bunch of names for TXCO */ +static struct clk *bcm_get_txco(struct device *dev) +{ + struct clk *clk; + + /* New explicit name */ + clk = devm_clk_get(dev, "txco"); + if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER) + return clk; + + /* Deprecated name */ + clk = devm_clk_get(dev, "extclk"); + if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER) + return clk; + + /* Original code used no name at all */ + return devm_clk_get(dev, NULL); +} + +static int bcm_get_resources(struct bcm_device *dev) +{ + const struct dmi_system_id *broken_irq_dmi_id; + const char *irq_con_id = "host-wakeup"; + int err; + + dev->name = dev_name(dev->dev); + + if (x86_apple_machine && !bcm_apple_get_resources(dev)) + return 0; + + dev->txco_clk = bcm_get_txco(dev->dev); + + /* Handle deferred probing */ + if (dev->txco_clk == ERR_PTR(-EPROBE_DEFER)) + return PTR_ERR(dev->txco_clk); + + /* Ignore all other errors as before */ + if (IS_ERR(dev->txco_clk)) + dev->txco_clk = NULL; + + dev->lpo_clk = devm_clk_get(dev->dev, "lpo"); + if (dev->lpo_clk == ERR_PTR(-EPROBE_DEFER)) + return PTR_ERR(dev->lpo_clk); + + if (IS_ERR(dev->lpo_clk)) + dev->lpo_clk = NULL; + + /* Check if we accidentally fetched the lpo clock twice */ + if (dev->lpo_clk && clk_is_match(dev->lpo_clk, dev->txco_clk)) { + devm_clk_put(dev->dev, dev->txco_clk); + dev->txco_clk = NULL; + } + + dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup", + GPIOD_OUT_LOW); + if (IS_ERR(dev->device_wakeup)) + return PTR_ERR(dev->device_wakeup); + + dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown", + GPIOD_OUT_LOW); + if (IS_ERR(dev->shutdown)) + return PTR_ERR(dev->shutdown); + + dev->reset = devm_gpiod_get_optional(dev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(dev->reset)) + return PTR_ERR(dev->reset); + + dev->set_device_wakeup = bcm_gpio_set_device_wakeup; + dev->set_shutdown = bcm_gpio_set_shutdown; + + dev->supplies[0].supply = "vbat"; + dev->supplies[1].supply = "vddio"; + err = devm_regulator_bulk_get(dev->dev, BCM_NUM_SUPPLIES, + dev->supplies); + if (err) + return err; + + broken_irq_dmi_id = dmi_first_match(bcm_broken_irq_dmi_table); + if (broken_irq_dmi_id && broken_irq_dmi_id->driver_data) { + gpiod_add_lookup_table(broken_irq_dmi_id->driver_data); + irq_con_id = "host-wakeup-alt"; + dev->irq_active_low = false; + dev->irq = 0; + } + + /* IRQ can be declared in ACPI table as Interrupt or GpioInt */ + if (dev->irq <= 0) { + struct gpio_desc *gpio; + + gpio = devm_gpiod_get_optional(dev->dev, irq_con_id, GPIOD_IN); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + dev->irq = gpiod_to_irq(gpio); + } + + if (broken_irq_dmi_id) { + if (broken_irq_dmi_id->driver_data) { + gpiod_remove_lookup_table(broken_irq_dmi_id->driver_data); + } else { + dev_info(dev->dev, "%s: Has a broken IRQ config, disabling IRQ support / runtime-pm\n", + broken_irq_dmi_id->ident); + dev->irq = 0; + } + } + + dev_dbg(dev->dev, "BCM irq: %d\n", dev->irq); + return 0; +} + +#ifdef CONFIG_ACPI +static int bcm_acpi_probe(struct bcm_device *dev) +{ + LIST_HEAD(resources); + const struct acpi_gpio_mapping *gpio_mapping = acpi_bcm_int_last_gpios; + struct resource_entry *entry; + int ret; + + /* Retrieve UART ACPI info */ + dev->gpio_int_idx = -1; + ret = acpi_dev_get_resources(ACPI_COMPANION(dev->dev), + &resources, bcm_resource, dev); + if (ret < 0) + return ret; + + resource_list_for_each_entry(entry, &resources) { + if (resource_type(entry->res) == IORESOURCE_IRQ) { + dev->irq = entry->res->start; + break; + } + } + acpi_dev_free_resource_list(&resources); + + /* If the DSDT uses an Interrupt resource for the IRQ, then there are + * only 2 GPIO resources, we use the irq-last mapping for this, since + * we already have an irq the 3th / last mapping will not be used. + */ + if (dev->irq) + gpio_mapping = acpi_bcm_int_last_gpios; + else if (dev->gpio_int_idx == 0) + gpio_mapping = acpi_bcm_int_first_gpios; + else if (dev->gpio_int_idx == 2) + gpio_mapping = acpi_bcm_int_last_gpios; + else + dev_warn(dev->dev, "Unexpected ACPI gpio_int_idx: %d\n", + dev->gpio_int_idx); + + /* Warn if our expectations are not met. */ + if (dev->gpio_count != (dev->irq ? 2 : 3)) + dev_warn(dev->dev, "Unexpected number of ACPI GPIOs: %d\n", + dev->gpio_count); + + ret = devm_acpi_dev_add_driver_gpios(dev->dev, gpio_mapping); + if (ret) + return ret; + + if (irq_polarity != -1) { + dev->irq_active_low = irq_polarity; + dev_warn(dev->dev, "Overwriting IRQ polarity to active %s by module-param\n", + dev->irq_active_low ? "low" : "high"); + } + + return 0; +} +#else +static int bcm_acpi_probe(struct bcm_device *dev) +{ + return -EINVAL; +} +#endif /* CONFIG_ACPI */ + +static int bcm_of_probe(struct bcm_device *bdev) +{ + bdev->use_autobaud_mode = device_property_read_bool(bdev->dev, + "brcm,requires-autobaud-mode"); + device_property_read_u32(bdev->dev, "max-speed", &bdev->oper_speed); + device_property_read_u8_array(bdev->dev, "brcm,bt-pcm-int-params", + bdev->pcm_int_params, 5); + bdev->irq = of_irq_get_byname(bdev->dev->of_node, "host-wakeup"); + bdev->irq_active_low = irq_get_trigger_type(bdev->irq) + & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW); + return 0; +} + +static int bcm_probe(struct platform_device *pdev) +{ + struct bcm_device *dev; + int ret; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->dev = &pdev->dev; + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + + dev->irq = ret; + + /* Initialize routing field to an unused value */ + dev->pcm_int_params[0] = 0xff; + + if (has_acpi_companion(&pdev->dev)) { + ret = bcm_acpi_probe(dev); + if (ret) + return ret; + } + + ret = bcm_get_resources(dev); + if (ret) + return ret; + + platform_set_drvdata(pdev, dev); + + dev_info(&pdev->dev, "%s device registered.\n", dev->name); + + /* Place this instance on the device list */ + mutex_lock(&bcm_device_lock); + list_add_tail(&dev->list, &bcm_device_list); + mutex_unlock(&bcm_device_lock); + + ret = bcm_gpio_set_power(dev, false); + if (ret) + dev_err(&pdev->dev, "Failed to power down\n"); + + return 0; +} + +static int bcm_remove(struct platform_device *pdev) +{ + struct bcm_device *dev = platform_get_drvdata(pdev); + + mutex_lock(&bcm_device_lock); + list_del(&dev->list); + mutex_unlock(&bcm_device_lock); + + dev_info(&pdev->dev, "%s device unregistered.\n", dev->name); + + return 0; +} + +static const struct hci_uart_proto bcm_proto = { + .id = HCI_UART_BCM, + .name = "Broadcom", + .manufacturer = 15, + .init_speed = 115200, + .open = bcm_open, + .close = bcm_close, + .flush = bcm_flush, + .setup = bcm_setup, + .set_baudrate = bcm_set_baudrate, + .recv = bcm_recv, + .enqueue = bcm_enqueue, + .dequeue = bcm_dequeue, +}; + +#ifdef CONFIG_ACPI + +/* bcm43430a0/a1 BT does not support 48MHz UART clock, limit to 2000000 baud */ +static struct bcm_device_data bcm43430_device_data = { + .max_speed = 2000000, +}; + +static const struct acpi_device_id bcm_acpi_match[] = { + { "BCM2E00" }, + { "BCM2E01" }, + { "BCM2E02" }, + { "BCM2E03" }, + { "BCM2E04" }, + { "BCM2E05" }, + { "BCM2E06" }, + { "BCM2E07" }, + { "BCM2E08" }, + { "BCM2E09" }, + { "BCM2E0A" }, + { "BCM2E0B" }, + { "BCM2E0C" }, + { "BCM2E0D" }, + { "BCM2E0E" }, + { "BCM2E0F" }, + { "BCM2E10" }, + { "BCM2E11" }, + { "BCM2E12" }, + { "BCM2E13" }, + { "BCM2E14" }, + { "BCM2E15" }, + { "BCM2E16" }, + { "BCM2E17" }, + { "BCM2E18" }, + { "BCM2E19" }, + { "BCM2E1A" }, + { "BCM2E1B" }, + { "BCM2E1C" }, + { "BCM2E1D" }, + { "BCM2E1F" }, + { "BCM2E20" }, + { "BCM2E21" }, + { "BCM2E22" }, + { "BCM2E23" }, + { "BCM2E24" }, + { "BCM2E25" }, + { "BCM2E26" }, + { "BCM2E27" }, + { "BCM2E28" }, + { "BCM2E29" }, + { "BCM2E2A" }, + { "BCM2E2B" }, + { "BCM2E2C" }, + { "BCM2E2D" }, + { "BCM2E2E" }, + { "BCM2E2F" }, + { "BCM2E30" }, + { "BCM2E31" }, + { "BCM2E32" }, + { "BCM2E33" }, + { "BCM2E34" }, + { "BCM2E35" }, + { "BCM2E36" }, + { "BCM2E37" }, + { "BCM2E38" }, + { "BCM2E39" }, + { "BCM2E3A" }, + { "BCM2E3B" }, + { "BCM2E3C" }, + { "BCM2E3D" }, + { "BCM2E3E" }, + { "BCM2E3F" }, + { "BCM2E40" }, + { "BCM2E41" }, + { "BCM2E42" }, + { "BCM2E43" }, + { "BCM2E44" }, + { "BCM2E45" }, + { "BCM2E46" }, + { "BCM2E47" }, + { "BCM2E48" }, + { "BCM2E49" }, + { "BCM2E4A" }, + { "BCM2E4B" }, + { "BCM2E4C" }, + { "BCM2E4D" }, + { "BCM2E4E" }, + { "BCM2E4F" }, + { "BCM2E50" }, + { "BCM2E51" }, + { "BCM2E52" }, + { "BCM2E53" }, + { "BCM2E54" }, + { "BCM2E55" }, + { "BCM2E56" }, + { "BCM2E57" }, + { "BCM2E58" }, + { "BCM2E59" }, + { "BCM2E5A" }, + { "BCM2E5B" }, + { "BCM2E5C" }, + { "BCM2E5D" }, + { "BCM2E5E" }, + { "BCM2E5F" }, + { "BCM2E60" }, + { "BCM2E61" }, + { "BCM2E62" }, + { "BCM2E63" }, + { "BCM2E64" }, + { "BCM2E65" }, + { "BCM2E66" }, + { "BCM2E67" }, + { "BCM2E68" }, + { "BCM2E69" }, + { "BCM2E6B" }, + { "BCM2E6D" }, + { "BCM2E6E" }, + { "BCM2E6F" }, + { "BCM2E70" }, + { "BCM2E71" }, + { "BCM2E72" }, + { "BCM2E73" }, + { "BCM2E74", (long)&bcm43430_device_data }, + { "BCM2E75", (long)&bcm43430_device_data }, + { "BCM2E76" }, + { "BCM2E77" }, + { "BCM2E78" }, + { "BCM2E79" }, + { "BCM2E7A" }, + { "BCM2E7B", (long)&bcm43430_device_data }, + { "BCM2E7C" }, + { "BCM2E7D" }, + { "BCM2E7E" }, + { "BCM2E7F" }, + { "BCM2E80", (long)&bcm43430_device_data }, + { "BCM2E81" }, + { "BCM2E82" }, + { "BCM2E83" }, + { "BCM2E84" }, + { "BCM2E85" }, + { "BCM2E86" }, + { "BCM2E87" }, + { "BCM2E88" }, + { "BCM2E89", (long)&bcm43430_device_data }, + { "BCM2E8A" }, + { "BCM2E8B" }, + { "BCM2E8C" }, + { "BCM2E8D" }, + { "BCM2E8E" }, + { "BCM2E90" }, + { "BCM2E92" }, + { "BCM2E93" }, + { "BCM2E94", (long)&bcm43430_device_data }, + { "BCM2E95" }, + { "BCM2E96" }, + { "BCM2E97" }, + { "BCM2E98" }, + { "BCM2E99", (long)&bcm43430_device_data }, + { "BCM2E9A" }, + { "BCM2E9B", (long)&bcm43430_device_data }, + { "BCM2E9C" }, + { "BCM2E9D" }, + { "BCM2E9F", (long)&bcm43430_device_data }, + { "BCM2EA0" }, + { "BCM2EA1" }, + { "BCM2EA2", (long)&bcm43430_device_data }, + { "BCM2EA3", (long)&bcm43430_device_data }, + { "BCM2EA4" }, + { "BCM2EA5" }, + { "BCM2EA6" }, + { "BCM2EA7" }, + { "BCM2EA8" }, + { "BCM2EA9" }, + { "BCM2EAA", (long)&bcm43430_device_data }, + { "BCM2EAB", (long)&bcm43430_device_data }, + { "BCM2EAC", (long)&bcm43430_device_data }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, bcm_acpi_match); +#endif + +/* suspend and resume callbacks */ +static const struct dev_pm_ops bcm_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(bcm_suspend, bcm_resume) + SET_RUNTIME_PM_OPS(bcm_suspend_device, bcm_resume_device, NULL) +}; + +static struct platform_driver bcm_driver = { + .probe = bcm_probe, + .remove = bcm_remove, + .driver = { + .name = "hci_bcm", + .acpi_match_table = ACPI_PTR(bcm_acpi_match), + .pm = &bcm_pm_ops, + }, +}; + +static int bcm_serdev_probe(struct serdev_device *serdev) +{ + struct bcm_device *bcmdev; + const struct bcm_device_data *data; + int err; + + bcmdev = devm_kzalloc(&serdev->dev, sizeof(*bcmdev), GFP_KERNEL); + if (!bcmdev) + return -ENOMEM; + + bcmdev->dev = &serdev->dev; +#ifdef CONFIG_PM + bcmdev->hu = &bcmdev->serdev_hu; +#endif + bcmdev->serdev_hu.serdev = serdev; + serdev_device_set_drvdata(serdev, bcmdev); + + /* Initialize routing field to an unused value */ + bcmdev->pcm_int_params[0] = 0xff; + + if (has_acpi_companion(&serdev->dev)) + err = bcm_acpi_probe(bcmdev); + else + err = bcm_of_probe(bcmdev); + if (err) + return err; + + err = bcm_get_resources(bcmdev); + if (err) + return err; + + if (!bcmdev->shutdown) { + dev_warn(&serdev->dev, + "No reset resource, using default baud rate\n"); + bcmdev->oper_speed = bcmdev->init_speed; + } + + err = bcm_gpio_set_power(bcmdev, false); + if (err) + dev_err(&serdev->dev, "Failed to power down\n"); + + data = device_get_match_data(bcmdev->dev); + if (data) { + bcmdev->max_autobaud_speed = data->max_autobaud_speed; + bcmdev->no_early_set_baudrate = data->no_early_set_baudrate; + bcmdev->drive_rts_on_open = data->drive_rts_on_open; + bcmdev->no_uart_clock_set = data->no_uart_clock_set; + if (data->max_speed && bcmdev->oper_speed > data->max_speed) + bcmdev->oper_speed = data->max_speed; + } + + return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto); +} + +static void bcm_serdev_remove(struct serdev_device *serdev) +{ + struct bcm_device *bcmdev = serdev_device_get_drvdata(serdev); + + hci_uart_unregister_device(&bcmdev->serdev_hu); +} + +#ifdef CONFIG_OF +static struct bcm_device_data bcm4354_device_data = { + .no_early_set_baudrate = true, +}; + +static struct bcm_device_data bcm43438_device_data = { + .drive_rts_on_open = true, +}; + +static struct bcm_device_data cyw4373a0_device_data = { + .no_uart_clock_set = true, +}; + +static struct bcm_device_data cyw55572_device_data = { + .max_autobaud_speed = 921600, +}; + +static const struct of_device_id bcm_bluetooth_of_match[] = { + { .compatible = "brcm,bcm20702a1" }, + { .compatible = "brcm,bcm4329-bt" }, + { .compatible = "brcm,bcm4330-bt" }, + { .compatible = "brcm,bcm4334-bt" }, + { .compatible = "brcm,bcm4345c5" }, + { .compatible = "brcm,bcm43430a0-bt" }, + { .compatible = "brcm,bcm43430a1-bt" }, + { .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data }, + { .compatible = "brcm,bcm4349-bt", .data = &bcm43438_device_data }, + { .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data }, + { .compatible = "brcm,bcm4335a0" }, + { .compatible = "cypress,cyw4373a0-bt", .data = &cyw4373a0_device_data }, + { .compatible = "infineon,cyw55572-bt", .data = &cyw55572_device_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match); +#endif + +static struct serdev_device_driver bcm_serdev_driver = { + .probe = bcm_serdev_probe, + .remove = bcm_serdev_remove, + .driver = { + .name = "hci_uart_bcm", + .of_match_table = of_match_ptr(bcm_bluetooth_of_match), + .acpi_match_table = ACPI_PTR(bcm_acpi_match), + .pm = &bcm_pm_ops, + }, +}; + +int __init bcm_init(void) +{ + /* For now, we need to keep both platform device + * driver (ACPI generated) and serdev driver (DT). + */ + platform_driver_register(&bcm_driver); + serdev_device_driver_register(&bcm_serdev_driver); + + return hci_uart_register_proto(&bcm_proto); +} + +int __exit bcm_deinit(void) +{ + platform_driver_unregister(&bcm_driver); + serdev_device_driver_unregister(&bcm_serdev_driver); + + return hci_uart_unregister_proto(&bcm_proto); +} diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c new file mode 100644 index 000000000..a61757835 --- /dev/null +++ b/drivers/bluetooth/hci_bcm4377.c @@ -0,0 +1,2519 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Bluetooth HCI driver for Broadcom 4377/4378/4387 devices attached via PCIe + * + * Copyright (C) The Asahi Linux Contributors + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +enum bcm4377_chip { + BCM4377 = 0, + BCM4378, + BCM4387, +}; + +#define BCM4377_DEVICE_ID 0x5fa0 +#define BCM4378_DEVICE_ID 0x5f69 +#define BCM4387_DEVICE_ID 0x5f71 + +#define BCM4377_TIMEOUT 1000 + +/* + * These devices only support DMA transactions inside a 32bit window + * (possibly to avoid 64 bit arithmetic). The window size cannot exceed + * 0xffffffff but is always aligned down to the previous 0x200 byte boundary + * which effectively limits the window to [start, start+0xfffffe00]. + * We just limit the DMA window to [0, 0xfffffe00] to make sure we don't + * run into this limitation. + */ +#define BCM4377_DMA_MASK 0xfffffe00 + +#define BCM4377_PCIECFG_BAR0_WINDOW1 0x80 +#define BCM4377_PCIECFG_BAR0_WINDOW2 0x70 +#define BCM4377_PCIECFG_BAR0_CORE2_WINDOW1 0x74 +#define BCM4377_PCIECFG_BAR0_CORE2_WINDOW2 0x78 +#define BCM4377_PCIECFG_BAR2_WINDOW 0x84 + +#define BCM4377_PCIECFG_BAR0_CORE2_WINDOW1_DEFAULT 0x18011000 +#define BCM4377_PCIECFG_BAR2_WINDOW_DEFAULT 0x19000000 + +#define BCM4377_PCIECFG_SUBSYSTEM_CTRL 0x88 + +#define BCM4377_BAR0_FW_DOORBELL 0x140 +#define BCM4377_BAR0_RTI_CONTROL 0x144 + +#define BCM4377_BAR0_SLEEP_CONTROL 0x150 +#define BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE 0 +#define BCM4377_BAR0_SLEEP_CONTROL_AWAKE 2 +#define BCM4377_BAR0_SLEEP_CONTROL_QUIESCE 3 + +#define BCM4377_BAR0_DOORBELL 0x174 +#define BCM4377_BAR0_DOORBELL_VALUE GENMASK(31, 16) +#define BCM4377_BAR0_DOORBELL_IDX GENMASK(15, 8) +#define BCM4377_BAR0_DOORBELL_RING BIT(5) + +#define BCM4377_BAR0_HOST_WINDOW_LO 0x590 +#define BCM4377_BAR0_HOST_WINDOW_HI 0x594 +#define BCM4377_BAR0_HOST_WINDOW_SIZE 0x598 + +#define BCM4377_BAR2_BOOTSTAGE 0x200454 + +#define BCM4377_BAR2_FW_LO 0x200478 +#define BCM4377_BAR2_FW_HI 0x20047c +#define BCM4377_BAR2_FW_SIZE 0x200480 + +#define BCM4377_BAR2_CONTEXT_ADDR_LO 0x20048c +#define BCM4377_BAR2_CONTEXT_ADDR_HI 0x200450 + +#define BCM4377_BAR2_RTI_STATUS 0x20045c +#define BCM4377_BAR2_RTI_WINDOW_LO 0x200494 +#define BCM4377_BAR2_RTI_WINDOW_HI 0x200498 +#define BCM4377_BAR2_RTI_WINDOW_SIZE 0x20049c + +#define BCM4377_OTP_SIZE 0xe0 +#define BCM4377_OTP_SYS_VENDOR 0x15 +#define BCM4377_OTP_CIS 0x80 +#define BCM4377_OTP_VENDOR_HDR 0x00000008 +#define BCM4377_OTP_MAX_PARAM_LEN 16 + +#define BCM4377_N_TRANSFER_RINGS 9 +#define BCM4377_N_COMPLETION_RINGS 6 + +#define BCM4377_MAX_RING_SIZE 256 + +#define BCM4377_MSGID_GENERATION GENMASK(15, 8) +#define BCM4377_MSGID_ID GENMASK(7, 0) + +#define BCM4377_RING_N_ENTRIES 128 + +#define BCM4377_CONTROL_MSG_SIZE 0x34 +#define BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE (4 * 0xff) + +#define MAX_ACL_PAYLOAD_SIZE (HCI_MAX_FRAME_SIZE + HCI_ACL_HDR_SIZE) +#define MAX_SCO_PAYLOAD_SIZE (HCI_MAX_SCO_SIZE + HCI_SCO_HDR_SIZE) +#define MAX_EVENT_PAYLOAD_SIZE (HCI_MAX_EVENT_SIZE + HCI_EVENT_HDR_SIZE) + +enum bcm4377_otp_params_type { + BCM4377_OTP_BOARD_PARAMS, + BCM4377_OTP_CHIP_PARAMS +}; + +enum bcm4377_transfer_ring_id { + BCM4377_XFER_RING_CONTROL = 0, + BCM4377_XFER_RING_HCI_H2D = 1, + BCM4377_XFER_RING_HCI_D2H = 2, + BCM4377_XFER_RING_SCO_H2D = 3, + BCM4377_XFER_RING_SCO_D2H = 4, + BCM4377_XFER_RING_ACL_H2D = 5, + BCM4377_XFER_RING_ACL_D2H = 6, +}; + +enum bcm4377_completion_ring_id { + BCM4377_ACK_RING_CONTROL = 0, + BCM4377_ACK_RING_HCI_ACL = 1, + BCM4377_EVENT_RING_HCI_ACL = 2, + BCM4377_ACK_RING_SCO = 3, + BCM4377_EVENT_RING_SCO = 4, +}; + +enum bcm4377_doorbell { + BCM4377_DOORBELL_CONTROL = 0, + BCM4377_DOORBELL_HCI_H2D = 1, + BCM4377_DOORBELL_HCI_D2H = 2, + BCM4377_DOORBELL_ACL_H2D = 3, + BCM4377_DOORBELL_ACL_D2H = 4, + BCM4377_DOORBELL_SCO = 6, +}; + +/* + * Transfer ring entry + * + * flags: Flags to indicate if the payload is appended or mapped + * len: Payload length + * payload: Optional payload DMA address + * id: Message id to recognize the answer in the completion ring entry + */ +struct bcm4377_xfer_ring_entry { +#define BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED BIT(0) +#define BCM4377_XFER_RING_FLAG_PAYLOAD_IN_FOOTER BIT(1) + u8 flags; + __le16 len; + u8 _unk0; + __le64 payload; + __le16 id; + u8 _unk1[2]; +} __packed; +static_assert(sizeof(struct bcm4377_xfer_ring_entry) == 0x10); + +/* + * Completion ring entry + * + * flags: Flags to indicate if the payload is appended or mapped. If the payload + * is mapped it can be found in the buffer of the corresponding transfer + * ring message. + * ring_id: Transfer ring ID which required this message + * msg_id: Message ID specified in transfer ring entry + * len: Payload length + */ +struct bcm4377_completion_ring_entry { + u8 flags; + u8 _unk0; + __le16 ring_id; + __le16 msg_id; + __le32 len; + u8 _unk1[6]; +} __packed; +static_assert(sizeof(struct bcm4377_completion_ring_entry) == 0x10); + +enum bcm4377_control_message_type { + BCM4377_CONTROL_MSG_CREATE_XFER_RING = 1, + BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING = 2, + BCM4377_CONTROL_MSG_DESTROY_XFER_RING = 3, + BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING = 4, +}; + +/* + * Control message used to create a completion ring + * + * msg_type: Must be BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING + * header_size: Unknown, but probably reserved space in front of the entry + * footer_size: Number of 32 bit words reserved for payloads after the entry + * id/id_again: Completion ring index + * ring_iova: DMA address of the ring buffer + * n_elements: Number of elements inside the ring buffer + * msi: MSI index, doesn't work for all rings though and should be zero + * intmod_delay: Unknown delay + * intmod_bytes: Unknown + */ +struct bcm4377_create_completion_ring_msg { + u8 msg_type; + u8 header_size; + u8 footer_size; + u8 _unk0; + __le16 id; + __le16 id_again; + __le64 ring_iova; + __le16 n_elements; + __le32 unk; + u8 _unk1[6]; + __le16 msi; + __le16 intmod_delay; + __le32 intmod_bytes; + __le16 _unk2; + __le32 _unk3; + u8 _unk4[10]; +} __packed; +static_assert(sizeof(struct bcm4377_create_completion_ring_msg) == + BCM4377_CONTROL_MSG_SIZE); + +/* + * Control ring message used to destroy a completion ring + * + * msg_type: Must be BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING + * ring_id: Completion ring to be destroyed + */ +struct bcm4377_destroy_completion_ring_msg { + u8 msg_type; + u8 _pad0; + __le16 ring_id; + u8 _pad1[48]; +} __packed; +static_assert(sizeof(struct bcm4377_destroy_completion_ring_msg) == + BCM4377_CONTROL_MSG_SIZE); + +/* + * Control message used to create a transfer ring + * + * msg_type: Must be BCM4377_CONTROL_MSG_CREATE_XFER_RING + * header_size: Number of 32 bit words reserved for unknown content before the + * entry + * footer_size: Number of 32 bit words reserved for payloads after the entry + * ring_id/ring_id_again: Transfer ring index + * ring_iova: DMA address of the ring buffer + * n_elements: Number of elements inside the ring buffer + * completion_ring_id: Completion ring index for acknowledgements and events + * doorbell: Doorbell index used to notify device of new entries + * flags: Transfer ring flags + * - virtual: set if there is no associated shared memory and only the + * corresponding completion ring is used + * - sync: only set for the SCO rings + */ +struct bcm4377_create_transfer_ring_msg { + u8 msg_type; + u8 header_size; + u8 footer_size; + u8 _unk0; + __le16 ring_id; + __le16 ring_id_again; + __le64 ring_iova; + u8 _unk1[8]; + __le16 n_elements; + __le16 completion_ring_id; + __le16 doorbell; +#define BCM4377_XFER_RING_FLAG_VIRTUAL BIT(7) +#define BCM4377_XFER_RING_FLAG_SYNC BIT(8) + __le16 flags; + u8 _unk2[20]; +} __packed; +static_assert(sizeof(struct bcm4377_create_transfer_ring_msg) == + BCM4377_CONTROL_MSG_SIZE); + +/* + * Control ring message used to destroy a transfer ring + * + * msg_type: Must be BCM4377_CONTROL_MSG_DESTROY_XFER_RING + * ring_id: Transfer ring to be destroyed + */ +struct bcm4377_destroy_transfer_ring_msg { + u8 msg_type; + u8 _pad0; + __le16 ring_id; + u8 _pad1[48]; +} __packed; +static_assert(sizeof(struct bcm4377_destroy_transfer_ring_msg) == + BCM4377_CONTROL_MSG_SIZE); + +/* + * "Converged IPC" context struct used to make the device aware of all other + * shared memory structures. A pointer to this structure is configured inside a + * MMIO register. + * + * version: Protocol version, must be 2. + * size: Size of this structure, must be 0x68. + * enabled_caps: Enabled capabilities. Unknown bitfield but should be 2. + * peripheral_info_addr: DMA address for a 0x20 buffer to which the device will + * write unknown contents + * {completion,xfer}_ring_{tails,heads}_addr: DMA pointers to ring heads/tails + * n_completion_rings: Number of completion rings, the firmware only works if + * this is set to BCM4377_N_COMPLETION_RINGS. + * n_xfer_rings: Number of transfer rings, the firmware only works if + * this is set to BCM4377_N_TRANSFER_RINGS. + * control_completion_ring_addr: Control completion ring buffer DMA address + * control_xfer_ring_addr: Control transfer ring buffer DMA address + * control_xfer_ring_n_entries: Number of control transfer ring entries + * control_completion_ring_n_entries: Number of control completion ring entries + * control_xfer_ring_doorbell: Control transfer ring doorbell + * control_completion_ring_doorbell: Control completion ring doorbell, + * must be set to 0xffff + * control_xfer_ring_msi: Control completion ring MSI index, must be 0 + * control_completion_ring_msi: Control completion ring MSI index, must be 0. + * control_xfer_ring_header_size: Number of 32 bit words reserved in front of + * every control transfer ring entry + * control_xfer_ring_footer_size: Number of 32 bit words reserved after every + * control transfer ring entry + * control_completion_ring_header_size: Number of 32 bit words reserved in front + * of every control completion ring entry + * control_completion_ring_footer_size: Number of 32 bit words reserved after + * every control completion ring entry + * scratch_pad: Optional scratch pad DMA address + * scratch_pad_size: Scratch pad size + */ +struct bcm4377_context { + __le16 version; + __le16 size; + __le32 enabled_caps; + + __le64 peripheral_info_addr; + + /* ring heads and tails */ + __le64 completion_ring_heads_addr; + __le64 xfer_ring_tails_addr; + __le64 completion_ring_tails_addr; + __le64 xfer_ring_heads_addr; + __le16 n_completion_rings; + __le16 n_xfer_rings; + + /* control ring configuration */ + __le64 control_completion_ring_addr; + __le64 control_xfer_ring_addr; + __le16 control_xfer_ring_n_entries; + __le16 control_completion_ring_n_entries; + __le16 control_xfer_ring_doorbell; + __le16 control_completion_ring_doorbell; + __le16 control_xfer_ring_msi; + __le16 control_completion_ring_msi; + u8 control_xfer_ring_header_size; + u8 control_xfer_ring_footer_size; + u8 control_completion_ring_header_size; + u8 control_completion_ring_footer_size; + + __le16 _unk0; + __le16 _unk1; + + __le64 scratch_pad; + __le32 scratch_pad_size; + + __le32 _unk3; +} __packed; +static_assert(sizeof(struct bcm4377_context) == 0x68); + +#define BCM4378_CALIBRATION_CHUNK_SIZE 0xe6 +struct bcm4378_hci_send_calibration_cmd { + u8 unk; + __le16 blocks_left; + u8 data[BCM4378_CALIBRATION_CHUNK_SIZE]; +} __packed; + +#define BCM4378_PTB_CHUNK_SIZE 0xcf +struct bcm4378_hci_send_ptb_cmd { + __le16 blocks_left; + u8 data[BCM4378_PTB_CHUNK_SIZE]; +} __packed; + +/* + * Shared memory structure used to store the ring head and tail pointers. + */ +struct bcm4377_ring_state { + __le16 completion_ring_head[BCM4377_N_COMPLETION_RINGS]; + __le16 completion_ring_tail[BCM4377_N_COMPLETION_RINGS]; + __le16 xfer_ring_head[BCM4377_N_TRANSFER_RINGS]; + __le16 xfer_ring_tail[BCM4377_N_TRANSFER_RINGS]; +}; + +/* + * A transfer ring can be used in two configurations: + * 1) Send control or HCI messages to the device which are then acknowledged + * in the corresponding completion ring + * 2) Receiving HCI frames from the devices. In this case the transfer ring + * itself contains empty messages that are acknowledged once data is + * available from the device. If the payloads fit inside the footers + * of the completion ring the transfer ring can be configured to be + * virtual such that it has no ring buffer. + * + * ring_id: ring index hardcoded in the firmware + * doorbell: doorbell index to notify device of new entries + * payload_size: optional in-place payload size + * mapped_payload_size: optional out-of-place payload size + * completion_ring: index of corresponding completion ring + * n_entries: number of entries inside this ring + * generation: ring generation; incremented on hci_open to detect stale messages + * sync: set to true for SCO rings + * virtual: set to true if this ring has no entries and is just required to + * setup a corresponding completion ring for device->host messages + * d2h_buffers_only: set to true if this ring is only used to provide large + * buffers used by device->host messages in the completion + * ring + * allow_wait: allow to wait for messages to be acknowledged + * enabled: true once the ring has been created and can be used + * ring: ring buffer for entries (struct bcm4377_xfer_ring_entry) + * ring_dma: DMA address for ring entry buffer + * payloads: payload buffer for mapped_payload_size payloads + * payloads_dma:DMA address for payload buffer + * events: pointer to array of completions if waiting is allowed + * msgids: bitmap to keep track of used message ids + * lock: Spinlock to protect access to ring structurs used in the irq handler + */ +struct bcm4377_transfer_ring { + enum bcm4377_transfer_ring_id ring_id; + enum bcm4377_doorbell doorbell; + size_t payload_size; + size_t mapped_payload_size; + u8 completion_ring; + u16 n_entries; + u8 generation; + + bool sync; + bool virtual; + bool d2h_buffers_only; + bool allow_wait; + bool enabled; + + void *ring; + dma_addr_t ring_dma; + + void *payloads; + dma_addr_t payloads_dma; + + struct completion **events; + DECLARE_BITMAP(msgids, BCM4377_MAX_RING_SIZE); + spinlock_t lock; +}; + +/* + * A completion ring can be either used to either acknowledge messages sent in + * the corresponding transfer ring or to receive messages associated with the + * transfer ring. When used to receive messages the transfer ring either + * has no ring buffer and is only advanced ("virtual transfer ring") or it + * only contains empty DMA buffers to be used for the payloads. + * + * ring_id: completion ring id, hardcoded in firmware + * payload_size: optional payload size after each entry + * delay: unknown delay + * n_entries: number of entries in this ring + * enabled: true once the ring has been created and can be used + * ring: ring buffer for entries (struct bcm4377_completion_ring_entry) + * ring_dma: DMA address of ring buffer + * transfer_rings: bitmap of corresponding transfer ring ids + */ +struct bcm4377_completion_ring { + enum bcm4377_completion_ring_id ring_id; + u16 payload_size; + u16 delay; + u16 n_entries; + bool enabled; + + void *ring; + dma_addr_t ring_dma; + + unsigned long transfer_rings; +}; + +struct bcm4377_data; + +/* + * Chip-specific configuration struct + * + * id: Chip id (e.g. 0x4377 for BCM4377) + * otp_offset: Offset to the start of the OTP inside BAR0 + * bar0_window1: Backplane address mapped to the first window in BAR0 + * bar0_window2: Backplane address mapped to the second window in BAR0 + * bar0_core2_window2: Optional backplane address mapped to the second core's + * second window in BAR0 + * has_bar0_core2_window2: Set to true if this chip requires the second core's + * second window to be configured + * clear_pciecfg_subsystem_ctrl_bit19: Set to true if bit 19 in the + * vendor-specific subsystem control + * register has to be cleared + * disable_aspm: Set to true if ASPM must be disabled due to hardware errata + * broken_ext_scan: Set to true if the chip erroneously claims to support + * extended scanning + * broken_mws_transport_config: Set to true if the chip erroneously claims to + * support MWS Transport Configuration + * send_calibration: Optional callback to send calibration data + * send_ptb: Callback to send "PTB" regulatory/calibration data + */ +struct bcm4377_hw { + unsigned int id; + + u32 otp_offset; + + u32 bar0_window1; + u32 bar0_window2; + u32 bar0_core2_window2; + + unsigned long has_bar0_core2_window2 : 1; + unsigned long clear_pciecfg_subsystem_ctrl_bit19 : 1; + unsigned long disable_aspm : 1; + unsigned long broken_ext_scan : 1; + unsigned long broken_mws_transport_config : 1; + unsigned long broken_le_coded : 1; + + int (*send_calibration)(struct bcm4377_data *bcm4377); + int (*send_ptb)(struct bcm4377_data *bcm4377, + const struct firmware *fw); +}; + +static const struct bcm4377_hw bcm4377_hw_variants[]; +static const struct dmi_system_id bcm4377_dmi_board_table[]; + +/* + * Private struct associated with each device containing global state + * + * pdev: Pointer to associated struct pci_dev + * hdev: Pointer to associated strucy hci_dev + * bar0: iomem pointing to BAR0 + * bar1: iomem pointing to BAR2 + * bootstage: Current value of the bootstage + * rti_status: Current "RTI" status value + * hw: Pointer to chip-specific struct bcm4377_hw + * taurus_cal_blob: "Taurus" calibration blob used for some chips + * taurus_cal_size: "Taurus" calibration blob size + * taurus_beamforming_cal_blob: "Taurus" beamforming calibration blob used for + * some chips + * taurus_beamforming_cal_size: "Taurus" beamforming calibration blob size + * stepping: Chip stepping read from OTP; used for firmware selection + * vendor: Antenna vendor read from OTP; used for firmware selection + * board_type: Board type from FDT or DMI match; used for firmware selection + * event: Event for changed bootstage or rti_status; used for booting firmware + * ctx: "Converged IPC" context + * ctx_dma: "Converged IPC" context DMA address + * ring_state: Shared memory buffer containing ring head and tail indexes + * ring_state_dma: DMA address for ring_state + * {control,hci_acl,sco}_ack_ring: Completion rings used to acknowledge messages + * {hci_acl,sco}_event_ring: Completion rings used for device->host messages + * control_h2d_ring: Transfer ring used for control messages + * {hci,sco,acl}_h2d_ring: Transfer ring used to transfer HCI frames + * {hci,sco,acl}_d2h_ring: Transfer ring used to receive HCI frames in the + * corresponding completion ring + */ +struct bcm4377_data { + struct pci_dev *pdev; + struct hci_dev *hdev; + + void __iomem *bar0; + void __iomem *bar2; + + u32 bootstage; + u32 rti_status; + + const struct bcm4377_hw *hw; + + const void *taurus_cal_blob; + int taurus_cal_size; + const void *taurus_beamforming_cal_blob; + int taurus_beamforming_cal_size; + + char stepping[BCM4377_OTP_MAX_PARAM_LEN]; + char vendor[BCM4377_OTP_MAX_PARAM_LEN]; + const char *board_type; + + struct completion event; + + struct bcm4377_context *ctx; + dma_addr_t ctx_dma; + + struct bcm4377_ring_state *ring_state; + dma_addr_t ring_state_dma; + + /* + * The HCI and ACL rings have to be merged because this structure is + * hardcoded in the firmware. + */ + struct bcm4377_completion_ring control_ack_ring; + struct bcm4377_completion_ring hci_acl_ack_ring; + struct bcm4377_completion_ring hci_acl_event_ring; + struct bcm4377_completion_ring sco_ack_ring; + struct bcm4377_completion_ring sco_event_ring; + + struct bcm4377_transfer_ring control_h2d_ring; + struct bcm4377_transfer_ring hci_h2d_ring; + struct bcm4377_transfer_ring hci_d2h_ring; + struct bcm4377_transfer_ring sco_h2d_ring; + struct bcm4377_transfer_ring sco_d2h_ring; + struct bcm4377_transfer_ring acl_h2d_ring; + struct bcm4377_transfer_ring acl_d2h_ring; +}; + +static void bcm4377_ring_doorbell(struct bcm4377_data *bcm4377, u8 doorbell, + u16 val) +{ + u32 db = 0; + + db |= FIELD_PREP(BCM4377_BAR0_DOORBELL_VALUE, val); + db |= FIELD_PREP(BCM4377_BAR0_DOORBELL_IDX, doorbell); + db |= BCM4377_BAR0_DOORBELL_RING; + + dev_dbg(&bcm4377->pdev->dev, "write %d to doorbell #%d (0x%x)\n", val, + doorbell, db); + iowrite32(db, bcm4377->bar0 + BCM4377_BAR0_DOORBELL); +} + +static int bcm4377_extract_msgid(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring, + u16 raw_msgid, u8 *msgid) +{ + u8 generation = FIELD_GET(BCM4377_MSGID_GENERATION, raw_msgid); + *msgid = FIELD_GET(BCM4377_MSGID_ID, raw_msgid); + + if (generation != ring->generation) { + dev_warn( + &bcm4377->pdev->dev, + "invalid message generation %d should be %d in entry for ring %d\n", + generation, ring->generation, ring->ring_id); + return -EINVAL; + } + + if (*msgid >= ring->n_entries) { + dev_warn(&bcm4377->pdev->dev, + "invalid message id in entry for ring %d: %d > %d\n", + ring->ring_id, *msgid, ring->n_entries); + return -EINVAL; + } + + return 0; +} + +static void bcm4377_handle_event(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring, + u16 raw_msgid, u8 entry_flags, u8 type, + void *payload, size_t len) +{ + struct sk_buff *skb; + u16 head; + u8 msgid; + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + if (!ring->enabled) { + dev_warn(&bcm4377->pdev->dev, + "event for disabled transfer ring %d\n", + ring->ring_id); + goto out; + } + + if (ring->d2h_buffers_only && + entry_flags & BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED) { + if (bcm4377_extract_msgid(bcm4377, ring, raw_msgid, &msgid)) + goto out; + + if (len > ring->mapped_payload_size) { + dev_warn( + &bcm4377->pdev->dev, + "invalid payload len in event for ring %d: %zu > %zu\n", + ring->ring_id, len, ring->mapped_payload_size); + goto out; + } + + payload = ring->payloads + msgid * ring->mapped_payload_size; + } + + skb = bt_skb_alloc(len, GFP_ATOMIC); + if (!skb) + goto out; + + memcpy(skb_put(skb, len), payload, len); + hci_skb_pkt_type(skb) = type; + hci_recv_frame(bcm4377->hdev, skb); + +out: + head = le16_to_cpu(bcm4377->ring_state->xfer_ring_head[ring->ring_id]); + head = (head + 1) % ring->n_entries; + bcm4377->ring_state->xfer_ring_head[ring->ring_id] = cpu_to_le16(head); + + bcm4377_ring_doorbell(bcm4377, ring->doorbell, head); + + spin_unlock_irqrestore(&ring->lock, flags); +} + +static void bcm4377_handle_ack(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring, + u16 raw_msgid) +{ + unsigned long flags; + u8 msgid; + + spin_lock_irqsave(&ring->lock, flags); + + if (bcm4377_extract_msgid(bcm4377, ring, raw_msgid, &msgid)) + goto unlock; + + if (!test_bit(msgid, ring->msgids)) { + dev_warn( + &bcm4377->pdev->dev, + "invalid message id in ack for ring %d: %d is not used\n", + ring->ring_id, msgid); + goto unlock; + } + + if (ring->allow_wait && ring->events[msgid]) { + complete(ring->events[msgid]); + ring->events[msgid] = NULL; + } + + bitmap_release_region(ring->msgids, msgid, ring->n_entries); + +unlock: + spin_unlock_irqrestore(&ring->lock, flags); +} + +static void bcm4377_handle_completion(struct bcm4377_data *bcm4377, + struct bcm4377_completion_ring *ring, + u16 pos) +{ + struct bcm4377_completion_ring_entry *entry; + u16 msg_id, transfer_ring; + size_t entry_size, data_len; + void *data; + + if (pos >= ring->n_entries) { + dev_warn(&bcm4377->pdev->dev, + "invalid offset %d for completion ring %d\n", pos, + ring->ring_id); + return; + } + + entry_size = sizeof(*entry) + ring->payload_size; + entry = ring->ring + pos * entry_size; + data = ring->ring + pos * entry_size + sizeof(*entry); + data_len = le32_to_cpu(entry->len); + msg_id = le16_to_cpu(entry->msg_id); + transfer_ring = le16_to_cpu(entry->ring_id); + + if ((ring->transfer_rings & BIT(transfer_ring)) == 0) { + dev_warn( + &bcm4377->pdev->dev, + "invalid entry at offset %d for transfer ring %d in completion ring %d\n", + pos, transfer_ring, ring->ring_id); + return; + } + + dev_dbg(&bcm4377->pdev->dev, + "entry in completion ring %d for transfer ring %d with msg_id %d\n", + ring->ring_id, transfer_ring, msg_id); + + switch (transfer_ring) { + case BCM4377_XFER_RING_CONTROL: + bcm4377_handle_ack(bcm4377, &bcm4377->control_h2d_ring, msg_id); + break; + case BCM4377_XFER_RING_HCI_H2D: + bcm4377_handle_ack(bcm4377, &bcm4377->hci_h2d_ring, msg_id); + break; + case BCM4377_XFER_RING_SCO_H2D: + bcm4377_handle_ack(bcm4377, &bcm4377->sco_h2d_ring, msg_id); + break; + case BCM4377_XFER_RING_ACL_H2D: + bcm4377_handle_ack(bcm4377, &bcm4377->acl_h2d_ring, msg_id); + break; + + case BCM4377_XFER_RING_HCI_D2H: + bcm4377_handle_event(bcm4377, &bcm4377->hci_d2h_ring, msg_id, + entry->flags, HCI_EVENT_PKT, data, + data_len); + break; + case BCM4377_XFER_RING_SCO_D2H: + bcm4377_handle_event(bcm4377, &bcm4377->sco_d2h_ring, msg_id, + entry->flags, HCI_SCODATA_PKT, data, + data_len); + break; + case BCM4377_XFER_RING_ACL_D2H: + bcm4377_handle_event(bcm4377, &bcm4377->acl_d2h_ring, msg_id, + entry->flags, HCI_ACLDATA_PKT, data, + data_len); + break; + + default: + dev_warn( + &bcm4377->pdev->dev, + "entry in completion ring %d for unknown transfer ring %d with msg_id %d\n", + ring->ring_id, transfer_ring, msg_id); + } +} + +static void bcm4377_poll_completion_ring(struct bcm4377_data *bcm4377, + struct bcm4377_completion_ring *ring) +{ + u16 tail; + __le16 *heads = bcm4377->ring_state->completion_ring_head; + __le16 *tails = bcm4377->ring_state->completion_ring_tail; + + if (!ring->enabled) + return; + + tail = le16_to_cpu(tails[ring->ring_id]); + dev_dbg(&bcm4377->pdev->dev, + "completion ring #%d: head: %d, tail: %d\n", ring->ring_id, + le16_to_cpu(heads[ring->ring_id]), tail); + + while (tail != le16_to_cpu(READ_ONCE(heads[ring->ring_id]))) { + /* + * ensure the CPU doesn't speculate through the comparison. + * otherwise it might already read the (empty) queue entry + * before the updated head has been loaded and checked. + */ + dma_rmb(); + + bcm4377_handle_completion(bcm4377, ring, tail); + + tail = (tail + 1) % ring->n_entries; + tails[ring->ring_id] = cpu_to_le16(tail); + } +} + +static irqreturn_t bcm4377_irq(int irq, void *data) +{ + struct bcm4377_data *bcm4377 = data; + u32 bootstage, rti_status; + + bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE); + rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS); + + if (bootstage != bcm4377->bootstage || + rti_status != bcm4377->rti_status) { + dev_dbg(&bcm4377->pdev->dev, + "bootstage = %d -> %d, rti state = %d -> %d\n", + bcm4377->bootstage, bootstage, bcm4377->rti_status, + rti_status); + complete(&bcm4377->event); + bcm4377->bootstage = bootstage; + bcm4377->rti_status = rti_status; + } + + if (rti_status > 2) + dev_err(&bcm4377->pdev->dev, "RTI status is %d\n", rti_status); + + bcm4377_poll_completion_ring(bcm4377, &bcm4377->control_ack_ring); + bcm4377_poll_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring); + bcm4377_poll_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring); + bcm4377_poll_completion_ring(bcm4377, &bcm4377->sco_ack_ring); + bcm4377_poll_completion_ring(bcm4377, &bcm4377->sco_event_ring); + + return IRQ_HANDLED; +} + +static int bcm4377_enqueue(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring, void *data, + size_t len, bool wait) +{ + unsigned long flags; + struct bcm4377_xfer_ring_entry *entry; + void *payload; + size_t offset; + u16 head, tail, new_head; + u16 raw_msgid; + int ret, msgid; + DECLARE_COMPLETION_ONSTACK(event); + + if (len > ring->payload_size && len > ring->mapped_payload_size) { + dev_warn( + &bcm4377->pdev->dev, + "payload len %zu is too large for ring %d (max is %zu or %zu)\n", + len, ring->ring_id, ring->payload_size, + ring->mapped_payload_size); + return -EINVAL; + } + if (wait && !ring->allow_wait) + return -EINVAL; + if (ring->virtual) + return -EINVAL; + + spin_lock_irqsave(&ring->lock, flags); + + head = le16_to_cpu(bcm4377->ring_state->xfer_ring_head[ring->ring_id]); + tail = le16_to_cpu(bcm4377->ring_state->xfer_ring_tail[ring->ring_id]); + + new_head = (head + 1) % ring->n_entries; + + if (new_head == tail) { + dev_warn(&bcm4377->pdev->dev, + "can't send message because ring %d is full\n", + ring->ring_id); + ret = -EINVAL; + goto out; + } + + msgid = bitmap_find_free_region(ring->msgids, ring->n_entries, 0); + if (msgid < 0) { + dev_warn(&bcm4377->pdev->dev, + "can't find message id for ring %d\n", ring->ring_id); + ret = -EINVAL; + goto out; + } + + raw_msgid = FIELD_PREP(BCM4377_MSGID_GENERATION, ring->generation); + raw_msgid |= FIELD_PREP(BCM4377_MSGID_ID, msgid); + + offset = head * (sizeof(*entry) + ring->payload_size); + entry = ring->ring + offset; + + memset(entry, 0, sizeof(*entry)); + entry->id = cpu_to_le16(raw_msgid); + entry->len = cpu_to_le16(len); + + if (len <= ring->payload_size) { + entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_IN_FOOTER; + payload = ring->ring + offset + sizeof(*entry); + } else { + entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED; + entry->payload = cpu_to_le64(ring->payloads_dma + + msgid * ring->mapped_payload_size); + payload = ring->payloads + msgid * ring->mapped_payload_size; + } + + memcpy(payload, data, len); + + if (wait) + ring->events[msgid] = &event; + + /* + * The 4377 chips stop responding to any commands as soon as they + * have been idle for a while. Poking the sleep control register here + * makes them come alive again. + */ + iowrite32(BCM4377_BAR0_SLEEP_CONTROL_AWAKE, + bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL); + + dev_dbg(&bcm4377->pdev->dev, + "updating head for transfer queue #%d to %d\n", ring->ring_id, + new_head); + bcm4377->ring_state->xfer_ring_head[ring->ring_id] = + cpu_to_le16(new_head); + + if (!ring->sync) + bcm4377_ring_doorbell(bcm4377, ring->doorbell, new_head); + ret = 0; + +out: + spin_unlock_irqrestore(&ring->lock, flags); + + if (ret == 0 && wait) { + ret = wait_for_completion_interruptible_timeout( + &event, BCM4377_TIMEOUT); + if (ret == 0) + ret = -ETIMEDOUT; + else if (ret > 0) + ret = 0; + + spin_lock_irqsave(&ring->lock, flags); + ring->events[msgid] = NULL; + spin_unlock_irqrestore(&ring->lock, flags); + } + + return ret; +} + +static int bcm4377_create_completion_ring(struct bcm4377_data *bcm4377, + struct bcm4377_completion_ring *ring) +{ + struct bcm4377_create_completion_ring_msg msg; + int ret; + + if (ring->enabled) { + dev_warn(&bcm4377->pdev->dev, + "completion ring %d already enabled\n", ring->ring_id); + return 0; + } + + memset(ring->ring, 0, + ring->n_entries * (sizeof(struct bcm4377_completion_ring_entry) + + ring->payload_size)); + memset(&msg, 0, sizeof(msg)); + msg.msg_type = BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING; + msg.id = cpu_to_le16(ring->ring_id); + msg.id_again = cpu_to_le16(ring->ring_id); + msg.ring_iova = cpu_to_le64(ring->ring_dma); + msg.n_elements = cpu_to_le16(ring->n_entries); + msg.intmod_bytes = cpu_to_le32(0xffffffff); + msg.unk = cpu_to_le32(0xffffffff); + msg.intmod_delay = cpu_to_le16(ring->delay); + msg.footer_size = ring->payload_size / 4; + + ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg, + sizeof(msg), true); + if (!ret) + ring->enabled = true; + + return ret; +} + +static int bcm4377_destroy_completion_ring(struct bcm4377_data *bcm4377, + struct bcm4377_completion_ring *ring) +{ + struct bcm4377_destroy_completion_ring_msg msg; + int ret; + + memset(&msg, 0, sizeof(msg)); + msg.msg_type = BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING; + msg.ring_id = cpu_to_le16(ring->ring_id); + + ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg, + sizeof(msg), true); + if (ret) + dev_warn(&bcm4377->pdev->dev, + "failed to destroy completion ring %d\n", + ring->ring_id); + + ring->enabled = false; + return ret; +} + +static int bcm4377_create_transfer_ring(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring) +{ + struct bcm4377_create_transfer_ring_msg msg; + u16 flags = 0; + int ret, i; + unsigned long spinlock_flags; + + if (ring->virtual) + flags |= BCM4377_XFER_RING_FLAG_VIRTUAL; + if (ring->sync) + flags |= BCM4377_XFER_RING_FLAG_SYNC; + + spin_lock_irqsave(&ring->lock, spinlock_flags); + memset(&msg, 0, sizeof(msg)); + msg.msg_type = BCM4377_CONTROL_MSG_CREATE_XFER_RING; + msg.ring_id = cpu_to_le16(ring->ring_id); + msg.ring_id_again = cpu_to_le16(ring->ring_id); + msg.ring_iova = cpu_to_le64(ring->ring_dma); + msg.n_elements = cpu_to_le16(ring->n_entries); + msg.completion_ring_id = cpu_to_le16(ring->completion_ring); + msg.doorbell = cpu_to_le16(ring->doorbell); + msg.flags = cpu_to_le16(flags); + msg.footer_size = ring->payload_size / 4; + + bcm4377->ring_state->xfer_ring_head[ring->ring_id] = 0; + bcm4377->ring_state->xfer_ring_tail[ring->ring_id] = 0; + ring->generation++; + spin_unlock_irqrestore(&ring->lock, spinlock_flags); + + ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg, + sizeof(msg), true); + + spin_lock_irqsave(&ring->lock, spinlock_flags); + + if (ring->d2h_buffers_only) { + for (i = 0; i < ring->n_entries; ++i) { + struct bcm4377_xfer_ring_entry *entry = + ring->ring + i * sizeof(*entry); + u16 raw_msgid = FIELD_PREP(BCM4377_MSGID_GENERATION, + ring->generation); + raw_msgid |= FIELD_PREP(BCM4377_MSGID_ID, i); + + memset(entry, 0, sizeof(*entry)); + entry->id = cpu_to_le16(raw_msgid); + entry->len = cpu_to_le16(ring->mapped_payload_size); + entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED; + entry->payload = + cpu_to_le64(ring->payloads_dma + + i * ring->mapped_payload_size); + } + } + + /* + * send some messages if this is a device->host ring to allow the device + * to reply by acknowledging them in the completion ring + */ + if (ring->virtual || ring->d2h_buffers_only) { + bcm4377->ring_state->xfer_ring_head[ring->ring_id] = + cpu_to_le16(0xf); + bcm4377_ring_doorbell(bcm4377, ring->doorbell, 0xf); + } + + ring->enabled = true; + spin_unlock_irqrestore(&ring->lock, spinlock_flags); + + return ret; +} + +static int bcm4377_destroy_transfer_ring(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring) +{ + struct bcm4377_destroy_transfer_ring_msg msg; + int ret; + + memset(&msg, 0, sizeof(msg)); + msg.msg_type = BCM4377_CONTROL_MSG_DESTROY_XFER_RING; + msg.ring_id = cpu_to_le16(ring->ring_id); + + ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg, + sizeof(msg), true); + if (ret) + dev_warn(&bcm4377->pdev->dev, + "failed to destroy transfer ring %d\n", ring->ring_id); + + ring->enabled = false; + return ret; +} + +static int __bcm4378_send_calibration_chunk(struct bcm4377_data *bcm4377, + const void *data, size_t data_len, + u16 blocks_left) +{ + struct bcm4378_hci_send_calibration_cmd cmd; + struct sk_buff *skb; + + if (data_len > sizeof(cmd.data)) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.unk = 0x03; + cmd.blocks_left = cpu_to_le16(blocks_left); + memcpy(cmd.data, data, data_len); + + skb = __hci_cmd_sync(bcm4377->hdev, 0xfd97, sizeof(cmd), &cmd, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static int __bcm4378_send_calibration(struct bcm4377_data *bcm4377, + const void *data, size_t data_size) +{ + int ret; + size_t i, left, transfer_len; + size_t blocks = + DIV_ROUND_UP(data_size, (size_t)BCM4378_CALIBRATION_CHUNK_SIZE); + + if (!data) { + dev_err(&bcm4377->pdev->dev, + "no calibration data available.\n"); + return -ENOENT; + } + + for (i = 0, left = data_size; i < blocks; ++i, left -= transfer_len) { + transfer_len = + min_t(size_t, left, BCM4378_CALIBRATION_CHUNK_SIZE); + + ret = __bcm4378_send_calibration_chunk( + bcm4377, data + i * BCM4378_CALIBRATION_CHUNK_SIZE, + transfer_len, blocks - i - 1); + if (ret) { + dev_err(&bcm4377->pdev->dev, + "send calibration chunk failed with %d\n", ret); + return ret; + } + } + + return 0; +} + +static int bcm4378_send_calibration(struct bcm4377_data *bcm4377) +{ + if ((strcmp(bcm4377->stepping, "b1") == 0) || + strcmp(bcm4377->stepping, "b3") == 0) + return __bcm4378_send_calibration( + bcm4377, bcm4377->taurus_beamforming_cal_blob, + bcm4377->taurus_beamforming_cal_size); + else + return __bcm4378_send_calibration(bcm4377, + bcm4377->taurus_cal_blob, + bcm4377->taurus_cal_size); +} + +static int bcm4387_send_calibration(struct bcm4377_data *bcm4377) +{ + if (strcmp(bcm4377->stepping, "c2") == 0) + return __bcm4378_send_calibration( + bcm4377, bcm4377->taurus_beamforming_cal_blob, + bcm4377->taurus_beamforming_cal_size); + else + return __bcm4378_send_calibration(bcm4377, + bcm4377->taurus_cal_blob, + bcm4377->taurus_cal_size); +} + +static const struct firmware *bcm4377_request_blob(struct bcm4377_data *bcm4377, + const char *suffix) +{ + const struct firmware *fw; + char name0[64], name1[64]; + int ret; + + snprintf(name0, sizeof(name0), "brcm/brcmbt%04x%s-%s-%s.%s", + bcm4377->hw->id, bcm4377->stepping, bcm4377->board_type, + bcm4377->vendor, suffix); + snprintf(name1, sizeof(name1), "brcm/brcmbt%04x%s-%s.%s", + bcm4377->hw->id, bcm4377->stepping, bcm4377->board_type, + suffix); + dev_dbg(&bcm4377->pdev->dev, "Trying to load firmware: '%s' or '%s'\n", + name0, name1); + + ret = firmware_request_nowarn(&fw, name0, &bcm4377->pdev->dev); + if (!ret) + return fw; + ret = firmware_request_nowarn(&fw, name1, &bcm4377->pdev->dev); + if (!ret) + return fw; + + dev_err(&bcm4377->pdev->dev, + "Unable to load firmware; tried '%s' and '%s'\n", name0, name1); + return NULL; +} + +static int bcm4377_send_ptb(struct bcm4377_data *bcm4377, + const struct firmware *fw) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(bcm4377->hdev, 0xfd98, fw->size, fw->data, + HCI_INIT_TIMEOUT); + /* + * This command seems to always fail on more recent firmware versions + * (even in traces taken from the macOS driver). It's unclear why this + * happens but because the PTB file contains calibration and/or + * regulatory data and may be required on older firmware we still try to + * send it here just in case and just ignore if it fails. + */ + if (!IS_ERR(skb)) + kfree_skb(skb); + return 0; +} + +static int bcm4378_send_ptb_chunk(struct bcm4377_data *bcm4377, + const void *data, size_t data_len, + u16 blocks_left) +{ + struct bcm4378_hci_send_ptb_cmd cmd; + struct sk_buff *skb; + + if (data_len > BCM4378_PTB_CHUNK_SIZE) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.blocks_left = cpu_to_le16(blocks_left); + memcpy(cmd.data, data, data_len); + + skb = __hci_cmd_sync(bcm4377->hdev, 0xfe0d, sizeof(cmd), &cmd, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static int bcm4378_send_ptb(struct bcm4377_data *bcm4377, + const struct firmware *fw) +{ + size_t chunks = DIV_ROUND_UP(fw->size, (size_t)BCM4378_PTB_CHUNK_SIZE); + size_t i, left, transfer_len; + int ret; + + for (i = 0, left = fw->size; i < chunks; ++i, left -= transfer_len) { + transfer_len = min_t(size_t, left, BCM4378_PTB_CHUNK_SIZE); + + dev_dbg(&bcm4377->pdev->dev, "sending ptb chunk %zu/%zu\n", + i + 1, chunks); + ret = bcm4378_send_ptb_chunk( + bcm4377, fw->data + i * BCM4378_PTB_CHUNK_SIZE, + transfer_len, chunks - i - 1); + if (ret) { + dev_err(&bcm4377->pdev->dev, + "sending ptb chunk %zu failed (%d)", i, ret); + return ret; + } + } + + return 0; +} + +static int bcm4377_hci_open(struct hci_dev *hdev) +{ + struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); + int ret; + + dev_dbg(&bcm4377->pdev->dev, "creating rings\n"); + + ret = bcm4377_create_completion_ring(bcm4377, + &bcm4377->hci_acl_ack_ring); + if (ret) + return ret; + ret = bcm4377_create_completion_ring(bcm4377, + &bcm4377->hci_acl_event_ring); + if (ret) + goto destroy_hci_acl_ack; + ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->sco_ack_ring); + if (ret) + goto destroy_hci_acl_event; + ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->sco_event_ring); + if (ret) + goto destroy_sco_ack; + dev_dbg(&bcm4377->pdev->dev, + "all completion rings successfully created!\n"); + + ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring); + if (ret) + goto destroy_sco_event; + ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring); + if (ret) + goto destroy_hci_h2d; + ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring); + if (ret) + goto destroy_hci_d2h; + ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring); + if (ret) + goto destroy_sco_h2d; + ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring); + if (ret) + goto destroy_sco_d2h; + ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring); + if (ret) + goto destroy_acl_h2d; + dev_dbg(&bcm4377->pdev->dev, + "all transfer rings successfully created!\n"); + + return 0; + +destroy_acl_h2d: + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring); +destroy_sco_d2h: + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring); +destroy_sco_h2d: + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring); +destroy_hci_d2h: + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring); +destroy_hci_h2d: + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring); +destroy_sco_event: + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_event_ring); +destroy_sco_ack: + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_ack_ring); +destroy_hci_acl_event: + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring); +destroy_hci_acl_ack: + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring); + + dev_err(&bcm4377->pdev->dev, "Creating rings failed with %d\n", ret); + return ret; +} + +static int bcm4377_hci_close(struct hci_dev *hdev) +{ + struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); + + dev_dbg(&bcm4377->pdev->dev, "destroying rings in hci_close\n"); + + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring); + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring); + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring); + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring); + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring); + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring); + + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_event_ring); + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_ack_ring); + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring); + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring); + + return 0; +} + +static bool bcm4377_is_valid_bdaddr(struct bcm4377_data *bcm4377, + bdaddr_t *addr) +{ + if (addr->b[0] != 0x93) + return true; + if (addr->b[1] != 0x76) + return true; + if (addr->b[2] != 0x00) + return true; + if (addr->b[4] != (bcm4377->hw->id & 0xff)) + return true; + if (addr->b[5] != (bcm4377->hw->id >> 8)) + return true; + return false; +} + +static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377) +{ + struct hci_rp_read_bd_addr *bda; + struct sk_buff *skb; + + skb = __hci_cmd_sync(bcm4377->hdev, HCI_OP_READ_BD_ADDR, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + int err = PTR_ERR(skb); + + dev_err(&bcm4377->pdev->dev, "HCI_OP_READ_BD_ADDR failed (%d)", + err); + return err; + } + + if (skb->len != sizeof(*bda)) { + dev_err(&bcm4377->pdev->dev, + "HCI_OP_READ_BD_ADDR reply length invalid"); + kfree_skb(skb); + return -EIO; + } + + bda = (struct hci_rp_read_bd_addr *)skb->data; + if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr)) + set_bit(HCI_QUIRK_INVALID_BDADDR, &bcm4377->hdev->quirks); + + kfree_skb(skb); + return 0; +} + +static int bcm4377_hci_setup(struct hci_dev *hdev) +{ + struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); + const struct firmware *fw; + int ret; + + if (bcm4377->hw->send_calibration) { + ret = bcm4377->hw->send_calibration(bcm4377); + if (ret) + return ret; + } + + fw = bcm4377_request_blob(bcm4377, "ptb"); + if (!fw) { + dev_err(&bcm4377->pdev->dev, "failed to load PTB data"); + return -ENOENT; + } + + ret = bcm4377->hw->send_ptb(bcm4377, fw); + release_firmware(fw); + if (ret) + return ret; + + return bcm4377_check_bdaddr(bcm4377); +} + +static int bcm4377_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); + struct bcm4377_transfer_ring *ring; + int ret; + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + ring = &bcm4377->hci_h2d_ring; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + ring = &bcm4377->acl_h2d_ring; + break; + + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + ring = &bcm4377->sco_h2d_ring; + break; + + default: + return -EILSEQ; + } + + ret = bcm4377_enqueue(bcm4377, ring, skb->data, skb->len, false); + if (ret < 0) { + hdev->stat.err_tx++; + return ret; + } + + hdev->stat.byte_tx += skb->len; + kfree_skb(skb); + return ret; +} + +static int bcm4377_hci_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + dev_err(&bcm4377->pdev->dev, + "Change address command failed (%d)", err); + return err; + } + kfree_skb(skb); + + return 0; +} + +static int bcm4377_alloc_transfer_ring(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring) +{ + size_t entry_size; + + spin_lock_init(&ring->lock); + ring->payload_size = ALIGN(ring->payload_size, 4); + ring->mapped_payload_size = ALIGN(ring->mapped_payload_size, 4); + + if (ring->payload_size > BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE) + return -EINVAL; + if (ring->n_entries > BCM4377_MAX_RING_SIZE) + return -EINVAL; + if (ring->virtual && ring->allow_wait) + return -EINVAL; + + if (ring->d2h_buffers_only) { + if (ring->virtual) + return -EINVAL; + if (ring->payload_size) + return -EINVAL; + if (!ring->mapped_payload_size) + return -EINVAL; + } + if (ring->virtual) + return 0; + + entry_size = + ring->payload_size + sizeof(struct bcm4377_xfer_ring_entry); + ring->ring = dmam_alloc_coherent(&bcm4377->pdev->dev, + ring->n_entries * entry_size, + &ring->ring_dma, GFP_KERNEL); + if (!ring->ring) + return -ENOMEM; + + if (ring->allow_wait) { + ring->events = devm_kcalloc(&bcm4377->pdev->dev, + ring->n_entries, + sizeof(*ring->events), GFP_KERNEL); + if (!ring->events) + return -ENOMEM; + } + + if (ring->mapped_payload_size) { + ring->payloads = dmam_alloc_coherent( + &bcm4377->pdev->dev, + ring->n_entries * ring->mapped_payload_size, + &ring->payloads_dma, GFP_KERNEL); + if (!ring->payloads) + return -ENOMEM; + } + + return 0; +} + +static int bcm4377_alloc_completion_ring(struct bcm4377_data *bcm4377, + struct bcm4377_completion_ring *ring) +{ + size_t entry_size; + + ring->payload_size = ALIGN(ring->payload_size, 4); + if (ring->payload_size > BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE) + return -EINVAL; + if (ring->n_entries > BCM4377_MAX_RING_SIZE) + return -EINVAL; + + entry_size = ring->payload_size + + sizeof(struct bcm4377_completion_ring_entry); + + ring->ring = dmam_alloc_coherent(&bcm4377->pdev->dev, + ring->n_entries * entry_size, + &ring->ring_dma, GFP_KERNEL); + if (!ring->ring) + return -ENOMEM; + return 0; +} + +static int bcm4377_init_context(struct bcm4377_data *bcm4377) +{ + struct device *dev = &bcm4377->pdev->dev; + dma_addr_t peripheral_info_dma; + + bcm4377->ctx = dmam_alloc_coherent(dev, sizeof(*bcm4377->ctx), + &bcm4377->ctx_dma, GFP_KERNEL); + if (!bcm4377->ctx) + return -ENOMEM; + memset(bcm4377->ctx, 0, sizeof(*bcm4377->ctx)); + + bcm4377->ring_state = + dmam_alloc_coherent(dev, sizeof(*bcm4377->ring_state), + &bcm4377->ring_state_dma, GFP_KERNEL); + if (!bcm4377->ring_state) + return -ENOMEM; + memset(bcm4377->ring_state, 0, sizeof(*bcm4377->ring_state)); + + bcm4377->ctx->version = cpu_to_le16(1); + bcm4377->ctx->size = cpu_to_le16(sizeof(*bcm4377->ctx)); + bcm4377->ctx->enabled_caps = cpu_to_le32(2); + + /* + * The BT device will write 0x20 bytes of data to this buffer but + * the exact contents are unknown. It only needs to exist for BT + * to work such that we can just allocate and then ignore it. + */ + if (!dmam_alloc_coherent(&bcm4377->pdev->dev, 0x20, + &peripheral_info_dma, GFP_KERNEL)) + return -ENOMEM; + bcm4377->ctx->peripheral_info_addr = cpu_to_le64(peripheral_info_dma); + + bcm4377->ctx->xfer_ring_heads_addr = cpu_to_le64( + bcm4377->ring_state_dma + + offsetof(struct bcm4377_ring_state, xfer_ring_head)); + bcm4377->ctx->xfer_ring_tails_addr = cpu_to_le64( + bcm4377->ring_state_dma + + offsetof(struct bcm4377_ring_state, xfer_ring_tail)); + bcm4377->ctx->completion_ring_heads_addr = cpu_to_le64( + bcm4377->ring_state_dma + + offsetof(struct bcm4377_ring_state, completion_ring_head)); + bcm4377->ctx->completion_ring_tails_addr = cpu_to_le64( + bcm4377->ring_state_dma + + offsetof(struct bcm4377_ring_state, completion_ring_tail)); + + bcm4377->ctx->n_completion_rings = + cpu_to_le16(BCM4377_N_COMPLETION_RINGS); + bcm4377->ctx->n_xfer_rings = cpu_to_le16(BCM4377_N_TRANSFER_RINGS); + + bcm4377->ctx->control_completion_ring_addr = + cpu_to_le64(bcm4377->control_ack_ring.ring_dma); + bcm4377->ctx->control_completion_ring_n_entries = + cpu_to_le16(bcm4377->control_ack_ring.n_entries); + bcm4377->ctx->control_completion_ring_doorbell = cpu_to_le16(0xffff); + bcm4377->ctx->control_completion_ring_msi = 0; + bcm4377->ctx->control_completion_ring_header_size = 0; + bcm4377->ctx->control_completion_ring_footer_size = 0; + + bcm4377->ctx->control_xfer_ring_addr = + cpu_to_le64(bcm4377->control_h2d_ring.ring_dma); + bcm4377->ctx->control_xfer_ring_n_entries = + cpu_to_le16(bcm4377->control_h2d_ring.n_entries); + bcm4377->ctx->control_xfer_ring_doorbell = + cpu_to_le16(bcm4377->control_h2d_ring.doorbell); + bcm4377->ctx->control_xfer_ring_msi = 0; + bcm4377->ctx->control_xfer_ring_header_size = 0; + bcm4377->ctx->control_xfer_ring_footer_size = + bcm4377->control_h2d_ring.payload_size / 4; + + dev_dbg(&bcm4377->pdev->dev, "context initialized at IOVA %pad", + &bcm4377->ctx_dma); + + return 0; +} + +static int bcm4377_prepare_rings(struct bcm4377_data *bcm4377) +{ + int ret; + + /* + * Even though many of these settings appear to be configurable + * when sending the "create ring" messages most of these are + * actually hardcoded in some (and quite possibly all) firmware versions + * and changing them on the host has no effect. + * Specifically, this applies to at least the doorbells, the transfer + * and completion ring ids and their mapping (e.g. both HCI and ACL + * entries will always be queued in completion rings 1 and 2 no matter + * what we configure here). + */ + bcm4377->control_ack_ring.ring_id = BCM4377_ACK_RING_CONTROL; + bcm4377->control_ack_ring.n_entries = 32; + bcm4377->control_ack_ring.transfer_rings = + BIT(BCM4377_XFER_RING_CONTROL); + + bcm4377->hci_acl_ack_ring.ring_id = BCM4377_ACK_RING_HCI_ACL; + bcm4377->hci_acl_ack_ring.n_entries = 2 * BCM4377_RING_N_ENTRIES; + bcm4377->hci_acl_ack_ring.transfer_rings = + BIT(BCM4377_XFER_RING_HCI_H2D) | BIT(BCM4377_XFER_RING_ACL_H2D); + bcm4377->hci_acl_ack_ring.delay = 1000; + + /* + * A payload size of MAX_EVENT_PAYLOAD_SIZE is enough here since large + * ACL packets will be transmitted inside buffers mapped via + * acl_d2h_ring anyway. + */ + bcm4377->hci_acl_event_ring.ring_id = BCM4377_EVENT_RING_HCI_ACL; + bcm4377->hci_acl_event_ring.payload_size = MAX_EVENT_PAYLOAD_SIZE; + bcm4377->hci_acl_event_ring.n_entries = 2 * BCM4377_RING_N_ENTRIES; + bcm4377->hci_acl_event_ring.transfer_rings = + BIT(BCM4377_XFER_RING_HCI_D2H) | BIT(BCM4377_XFER_RING_ACL_D2H); + bcm4377->hci_acl_event_ring.delay = 1000; + + bcm4377->sco_ack_ring.ring_id = BCM4377_ACK_RING_SCO; + bcm4377->sco_ack_ring.n_entries = BCM4377_RING_N_ENTRIES; + bcm4377->sco_ack_ring.transfer_rings = BIT(BCM4377_XFER_RING_SCO_H2D); + + bcm4377->sco_event_ring.ring_id = BCM4377_EVENT_RING_SCO; + bcm4377->sco_event_ring.payload_size = MAX_SCO_PAYLOAD_SIZE; + bcm4377->sco_event_ring.n_entries = BCM4377_RING_N_ENTRIES; + bcm4377->sco_event_ring.transfer_rings = BIT(BCM4377_XFER_RING_SCO_D2H); + + bcm4377->control_h2d_ring.ring_id = BCM4377_XFER_RING_CONTROL; + bcm4377->control_h2d_ring.doorbell = BCM4377_DOORBELL_CONTROL; + bcm4377->control_h2d_ring.payload_size = BCM4377_CONTROL_MSG_SIZE; + bcm4377->control_h2d_ring.completion_ring = BCM4377_ACK_RING_CONTROL; + bcm4377->control_h2d_ring.allow_wait = true; + bcm4377->control_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES; + + bcm4377->hci_h2d_ring.ring_id = BCM4377_XFER_RING_HCI_H2D; + bcm4377->hci_h2d_ring.doorbell = BCM4377_DOORBELL_HCI_H2D; + bcm4377->hci_h2d_ring.payload_size = MAX_EVENT_PAYLOAD_SIZE; + bcm4377->hci_h2d_ring.completion_ring = BCM4377_ACK_RING_HCI_ACL; + bcm4377->hci_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES; + + bcm4377->hci_d2h_ring.ring_id = BCM4377_XFER_RING_HCI_D2H; + bcm4377->hci_d2h_ring.doorbell = BCM4377_DOORBELL_HCI_D2H; + bcm4377->hci_d2h_ring.completion_ring = BCM4377_EVENT_RING_HCI_ACL; + bcm4377->hci_d2h_ring.virtual = true; + bcm4377->hci_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES; + + bcm4377->sco_h2d_ring.ring_id = BCM4377_XFER_RING_SCO_H2D; + bcm4377->sco_h2d_ring.doorbell = BCM4377_DOORBELL_SCO; + bcm4377->sco_h2d_ring.payload_size = MAX_SCO_PAYLOAD_SIZE; + bcm4377->sco_h2d_ring.completion_ring = BCM4377_ACK_RING_SCO; + bcm4377->sco_h2d_ring.sync = true; + bcm4377->sco_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES; + + bcm4377->sco_d2h_ring.ring_id = BCM4377_XFER_RING_SCO_D2H; + bcm4377->sco_d2h_ring.doorbell = BCM4377_DOORBELL_SCO; + bcm4377->sco_d2h_ring.completion_ring = BCM4377_EVENT_RING_SCO; + bcm4377->sco_d2h_ring.virtual = true; + bcm4377->sco_d2h_ring.sync = true; + bcm4377->sco_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES; + + /* + * This ring has to use mapped_payload_size because the largest ACL + * packet doesn't fit inside the largest possible footer + */ + bcm4377->acl_h2d_ring.ring_id = BCM4377_XFER_RING_ACL_H2D; + bcm4377->acl_h2d_ring.doorbell = BCM4377_DOORBELL_ACL_H2D; + bcm4377->acl_h2d_ring.mapped_payload_size = MAX_ACL_PAYLOAD_SIZE; + bcm4377->acl_h2d_ring.completion_ring = BCM4377_ACK_RING_HCI_ACL; + bcm4377->acl_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES; + + /* + * This ring only contains empty buffers to be used by incoming + * ACL packets that do not fit inside the footer of hci_acl_event_ring + */ + bcm4377->acl_d2h_ring.ring_id = BCM4377_XFER_RING_ACL_D2H; + bcm4377->acl_d2h_ring.doorbell = BCM4377_DOORBELL_ACL_D2H; + bcm4377->acl_d2h_ring.completion_ring = BCM4377_EVENT_RING_HCI_ACL; + bcm4377->acl_d2h_ring.d2h_buffers_only = true; + bcm4377->acl_d2h_ring.mapped_payload_size = MAX_ACL_PAYLOAD_SIZE; + bcm4377->acl_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES; + + /* + * no need for any cleanup since this is only called from _probe + * and only devres-managed allocations are used + */ + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->control_h2d_ring); + if (ret) + return ret; + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring); + if (ret) + return ret; + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring); + if (ret) + return ret; + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring); + if (ret) + return ret; + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring); + if (ret) + return ret; + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring); + if (ret) + return ret; + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring); + if (ret) + return ret; + + ret = bcm4377_alloc_completion_ring(bcm4377, + &bcm4377->control_ack_ring); + if (ret) + return ret; + ret = bcm4377_alloc_completion_ring(bcm4377, + &bcm4377->hci_acl_ack_ring); + if (ret) + return ret; + ret = bcm4377_alloc_completion_ring(bcm4377, + &bcm4377->hci_acl_event_ring); + if (ret) + return ret; + ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->sco_ack_ring); + if (ret) + return ret; + ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->sco_event_ring); + if (ret) + return ret; + + dev_dbg(&bcm4377->pdev->dev, "all rings allocated and prepared\n"); + + return 0; +} + +static int bcm4377_boot(struct bcm4377_data *bcm4377) +{ + const struct firmware *fw; + void *bfr; + dma_addr_t fw_dma; + int ret = 0; + u32 bootstage, rti_status; + + bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE); + rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS); + + if (bootstage != 0) { + dev_err(&bcm4377->pdev->dev, "bootstage is %d and not 0\n", + bootstage); + return -EINVAL; + } + + if (rti_status != 0) { + dev_err(&bcm4377->pdev->dev, "RTI status is %d and not 0\n", + rti_status); + return -EINVAL; + } + + fw = bcm4377_request_blob(bcm4377, "bin"); + if (!fw) { + dev_err(&bcm4377->pdev->dev, "Failed to load firmware\n"); + return -ENOENT; + } + + bfr = dma_alloc_coherent(&bcm4377->pdev->dev, fw->size, &fw_dma, + GFP_KERNEL); + if (!bfr) { + ret = -ENOMEM; + goto out_release_fw; + } + + memcpy(bfr, fw->data, fw->size); + + iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_LO); + iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_HI); + iowrite32(BCM4377_DMA_MASK, + bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_SIZE); + + iowrite32(lower_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_LO); + iowrite32(upper_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_HI); + iowrite32(fw->size, bcm4377->bar2 + BCM4377_BAR2_FW_SIZE); + iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_FW_DOORBELL); + + dev_dbg(&bcm4377->pdev->dev, "waiting for firmware to boot\n"); + + ret = wait_for_completion_interruptible_timeout(&bcm4377->event, + BCM4377_TIMEOUT); + if (ret == 0) { + ret = -ETIMEDOUT; + goto out_dma_free; + } else if (ret < 0) { + goto out_dma_free; + } + + if (bcm4377->bootstage != 2) { + dev_err(&bcm4377->pdev->dev, "boostage %d != 2\n", + bcm4377->bootstage); + ret = -ENXIO; + goto out_dma_free; + } + + dev_dbg(&bcm4377->pdev->dev, "firmware has booted (stage = %x)\n", + bcm4377->bootstage); + ret = 0; + +out_dma_free: + dma_free_coherent(&bcm4377->pdev->dev, fw->size, bfr, fw_dma); +out_release_fw: + release_firmware(fw); + return ret; +} + +static int bcm4377_setup_rti(struct bcm4377_data *bcm4377) +{ + int ret; + + dev_dbg(&bcm4377->pdev->dev, "starting RTI\n"); + iowrite32(1, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL); + + ret = wait_for_completion_interruptible_timeout(&bcm4377->event, + BCM4377_TIMEOUT); + if (ret == 0) { + dev_err(&bcm4377->pdev->dev, + "timed out while waiting for RTI to transition to state 1"); + return -ETIMEDOUT; + } else if (ret < 0) { + return ret; + } + + if (bcm4377->rti_status != 1) { + dev_err(&bcm4377->pdev->dev, "RTI did not ack state 1 (%d)\n", + bcm4377->rti_status); + return -ENODEV; + } + dev_dbg(&bcm4377->pdev->dev, "RTI is in state 1\n"); + + /* allow access to the entire IOVA space again */ + iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_LO); + iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_HI); + iowrite32(BCM4377_DMA_MASK, + bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_SIZE); + + /* setup "Converged IPC" context */ + iowrite32(lower_32_bits(bcm4377->ctx_dma), + bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_LO); + iowrite32(upper_32_bits(bcm4377->ctx_dma), + bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_HI); + iowrite32(2, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL); + + ret = wait_for_completion_interruptible_timeout(&bcm4377->event, + BCM4377_TIMEOUT); + if (ret == 0) { + dev_err(&bcm4377->pdev->dev, + "timed out while waiting for RTI to transition to state 2"); + return -ETIMEDOUT; + } else if (ret < 0) { + return ret; + } + + if (bcm4377->rti_status != 2) { + dev_err(&bcm4377->pdev->dev, "RTI did not ack state 2 (%d)\n", + bcm4377->rti_status); + return -ENODEV; + } + + dev_dbg(&bcm4377->pdev->dev, + "RTI is in state 2; control ring is ready\n"); + bcm4377->control_ack_ring.enabled = true; + + return 0; +} + +static int bcm4377_parse_otp_board_params(struct bcm4377_data *bcm4377, + char tag, const char *val, size_t len) +{ + if (tag != 'V') + return 0; + if (len >= sizeof(bcm4377->vendor)) + return -EINVAL; + + strscpy(bcm4377->vendor, val, len + 1); + return 0; +} + +static int bcm4377_parse_otp_chip_params(struct bcm4377_data *bcm4377, char tag, + const char *val, size_t len) +{ + size_t idx = 0; + + if (tag != 's') + return 0; + if (len >= sizeof(bcm4377->stepping)) + return -EINVAL; + + while (len != 0) { + bcm4377->stepping[idx] = tolower(val[idx]); + if (val[idx] == '\0') + return 0; + + idx++; + len--; + } + + bcm4377->stepping[idx] = '\0'; + return 0; +} + +static int bcm4377_parse_otp_str(struct bcm4377_data *bcm4377, const u8 *str, + enum bcm4377_otp_params_type type) +{ + const char *p; + int ret; + + p = skip_spaces(str); + while (*p) { + char tag = *p++; + const char *end; + size_t len; + + if (*p++ != '=') /* implicit NUL check */ + return -EINVAL; + + /* *p might be NUL here, if so end == p and len == 0 */ + end = strchrnul(p, ' '); + len = end - p; + + /* leave 1 byte for NUL in destination string */ + if (len > (BCM4377_OTP_MAX_PARAM_LEN - 1)) + return -EINVAL; + + switch (type) { + case BCM4377_OTP_BOARD_PARAMS: + ret = bcm4377_parse_otp_board_params(bcm4377, tag, p, + len); + break; + case BCM4377_OTP_CHIP_PARAMS: + ret = bcm4377_parse_otp_chip_params(bcm4377, tag, p, + len); + break; + default: + ret = -EINVAL; + break; + } + + if (ret) + return ret; + + /* Skip to next arg, if any */ + p = skip_spaces(end); + } + + return 0; +} + +static int bcm4377_parse_otp_sys_vendor(struct bcm4377_data *bcm4377, u8 *otp, + size_t size) +{ + int idx = 4; + const char *chip_params; + const char *board_params; + int ret; + + /* 4-byte header and two empty strings */ + if (size < 6) + return -EINVAL; + + if (get_unaligned_le32(otp) != BCM4377_OTP_VENDOR_HDR) + return -EINVAL; + + chip_params = &otp[idx]; + + /* Skip first string, including terminator */ + idx += strnlen(chip_params, size - idx) + 1; + if (idx >= size) + return -EINVAL; + + board_params = &otp[idx]; + + /* Skip to terminator of second string */ + idx += strnlen(board_params, size - idx); + if (idx >= size) + return -EINVAL; + + /* At this point both strings are guaranteed NUL-terminated */ + dev_dbg(&bcm4377->pdev->dev, + "OTP: chip_params='%s' board_params='%s'\n", chip_params, + board_params); + + ret = bcm4377_parse_otp_str(bcm4377, chip_params, + BCM4377_OTP_CHIP_PARAMS); + if (ret) + return ret; + + ret = bcm4377_parse_otp_str(bcm4377, board_params, + BCM4377_OTP_BOARD_PARAMS); + if (ret) + return ret; + + if (!bcm4377->stepping[0] || !bcm4377->vendor[0]) + return -EINVAL; + + dev_dbg(&bcm4377->pdev->dev, "OTP: stepping=%s, vendor=%s\n", + bcm4377->stepping, bcm4377->vendor); + return 0; +} + +static int bcm4377_parse_otp(struct bcm4377_data *bcm4377) +{ + u8 *otp; + int i; + int ret = -ENOENT; + + otp = kzalloc(BCM4377_OTP_SIZE, GFP_KERNEL); + if (!otp) + return -ENOMEM; + + for (i = 0; i < BCM4377_OTP_SIZE; ++i) + otp[i] = ioread8(bcm4377->bar0 + bcm4377->hw->otp_offset + i); + + i = 0; + while (i < (BCM4377_OTP_SIZE - 1)) { + u8 type = otp[i]; + u8 length = otp[i + 1]; + + if (type == 0) + break; + + if ((i + 2 + length) > BCM4377_OTP_SIZE) + break; + + switch (type) { + case BCM4377_OTP_SYS_VENDOR: + dev_dbg(&bcm4377->pdev->dev, + "OTP @ 0x%x (%d): SYS_VENDOR", i, length); + ret = bcm4377_parse_otp_sys_vendor(bcm4377, &otp[i + 2], + length); + break; + case BCM4377_OTP_CIS: + dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): CIS", i, + length); + break; + default: + dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): unknown", + i, length); + break; + } + + i += 2 + length; + } + + kfree(otp); + return ret; +} + +static int bcm4377_init_cfg(struct bcm4377_data *bcm4377) +{ + int ret; + u32 ctrl; + + ret = pci_write_config_dword(bcm4377->pdev, + BCM4377_PCIECFG_BAR0_WINDOW1, + bcm4377->hw->bar0_window1); + if (ret) + return ret; + + ret = pci_write_config_dword(bcm4377->pdev, + BCM4377_PCIECFG_BAR0_WINDOW2, + bcm4377->hw->bar0_window2); + if (ret) + return ret; + + ret = pci_write_config_dword( + bcm4377->pdev, BCM4377_PCIECFG_BAR0_CORE2_WINDOW1, + BCM4377_PCIECFG_BAR0_CORE2_WINDOW1_DEFAULT); + if (ret) + return ret; + + if (bcm4377->hw->has_bar0_core2_window2) { + ret = pci_write_config_dword(bcm4377->pdev, + BCM4377_PCIECFG_BAR0_CORE2_WINDOW2, + bcm4377->hw->bar0_core2_window2); + if (ret) + return ret; + } + + ret = pci_write_config_dword(bcm4377->pdev, BCM4377_PCIECFG_BAR2_WINDOW, + BCM4377_PCIECFG_BAR2_WINDOW_DEFAULT); + if (ret) + return ret; + + ret = pci_read_config_dword(bcm4377->pdev, + BCM4377_PCIECFG_SUBSYSTEM_CTRL, &ctrl); + if (ret) + return ret; + + if (bcm4377->hw->clear_pciecfg_subsystem_ctrl_bit19) + ctrl &= ~BIT(19); + ctrl |= BIT(16); + + return pci_write_config_dword(bcm4377->pdev, + BCM4377_PCIECFG_SUBSYSTEM_CTRL, ctrl); +} + +static int bcm4377_probe_dmi(struct bcm4377_data *bcm4377) +{ + const struct dmi_system_id *board_type_dmi_id; + + board_type_dmi_id = dmi_first_match(bcm4377_dmi_board_table); + if (board_type_dmi_id && board_type_dmi_id->driver_data) { + bcm4377->board_type = board_type_dmi_id->driver_data; + dev_dbg(&bcm4377->pdev->dev, + "found board type via DMI match: %s\n", + bcm4377->board_type); + } + + return 0; +} + +static int bcm4377_probe_of(struct bcm4377_data *bcm4377) +{ + struct device_node *np = bcm4377->pdev->dev.of_node; + int ret; + + if (!np) + return 0; + + ret = of_property_read_string(np, "brcm,board-type", + &bcm4377->board_type); + if (ret) { + dev_err(&bcm4377->pdev->dev, "no brcm,board-type property\n"); + return ret; + } + + bcm4377->taurus_beamforming_cal_blob = + of_get_property(np, "brcm,taurus-bf-cal-blob", + &bcm4377->taurus_beamforming_cal_size); + if (!bcm4377->taurus_beamforming_cal_blob) { + dev_err(&bcm4377->pdev->dev, + "no brcm,taurus-bf-cal-blob property\n"); + return -ENOENT; + } + bcm4377->taurus_cal_blob = of_get_property(np, "brcm,taurus-cal-blob", + &bcm4377->taurus_cal_size); + if (!bcm4377->taurus_cal_blob) { + dev_err(&bcm4377->pdev->dev, + "no brcm,taurus-cal-blob property\n"); + return -ENOENT; + } + + return 0; +} + +static void bcm4377_disable_aspm(struct bcm4377_data *bcm4377) +{ + pci_disable_link_state(bcm4377->pdev, + PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); + + /* + * pci_disable_link_state can fail if either CONFIG_PCIEASPM is disabled + * or if the BIOS hasn't handed over control to us. We must *always* + * disable ASPM for this device due to hardware errata though. + */ + pcie_capability_clear_word(bcm4377->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC); +} + +static void bcm4377_pci_free_irq_vectors(void *data) +{ + pci_free_irq_vectors(data); +} + +static void bcm4377_hci_free_dev(void *data) +{ + hci_free_dev(data); +} + +static void bcm4377_hci_unregister_dev(void *data) +{ + hci_unregister_dev(data); +} + +static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct bcm4377_data *bcm4377; + struct hci_dev *hdev; + int ret, irq; + + ret = dma_set_mask_and_coherent(&pdev->dev, BCM4377_DMA_MASK); + if (ret) + return ret; + + bcm4377 = devm_kzalloc(&pdev->dev, sizeof(*bcm4377), GFP_KERNEL); + if (!bcm4377) + return -ENOMEM; + + bcm4377->pdev = pdev; + bcm4377->hw = &bcm4377_hw_variants[id->driver_data]; + init_completion(&bcm4377->event); + + ret = bcm4377_prepare_rings(bcm4377); + if (ret) + return ret; + + ret = bcm4377_init_context(bcm4377); + if (ret) + return ret; + + ret = bcm4377_probe_dmi(bcm4377); + if (ret) + return ret; + ret = bcm4377_probe_of(bcm4377); + if (ret) + return ret; + if (!bcm4377->board_type) { + dev_err(&pdev->dev, "unable to determine board type\n"); + return -ENODEV; + } + + if (bcm4377->hw->disable_aspm) + bcm4377_disable_aspm(bcm4377); + + ret = pci_reset_function_locked(pdev); + if (ret) + dev_warn( + &pdev->dev, + "function level reset failed with %d; trying to continue anyway\n", + ret); + + /* + * If this number is too low and we try to access any BAR too + * early the device will crash. Experiments have shown that + * approximately 50 msec is the minimum amount we have to wait. + * Let's double that to be safe. + */ + msleep(100); + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + + ret = bcm4377_init_cfg(bcm4377); + if (ret) + return ret; + + bcm4377->bar0 = pcim_iomap(pdev, 0, 0); + if (!bcm4377->bar0) + return -EBUSY; + bcm4377->bar2 = pcim_iomap(pdev, 2, 0); + if (!bcm4377->bar2) + return -EBUSY; + + ret = bcm4377_parse_otp(bcm4377); + if (ret) { + dev_err(&pdev->dev, "Reading OTP failed with %d\n", ret); + return ret; + } + + /* + * Legacy interrupts result in an IRQ storm because we don't know where + * the interrupt mask and status registers for these chips are. + * MSIs are acked automatically instead. + */ + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) + return -ENODEV; + ret = devm_add_action_or_reset(&pdev->dev, bcm4377_pci_free_irq_vectors, + pdev); + if (ret) + return ret; + + irq = pci_irq_vector(pdev, 0); + if (irq <= 0) + return -ENODEV; + + ret = devm_request_irq(&pdev->dev, irq, bcm4377_irq, 0, "bcm4377", + bcm4377); + if (ret) + return ret; + + hdev = hci_alloc_dev(); + if (!hdev) + return -ENOMEM; + ret = devm_add_action_or_reset(&pdev->dev, bcm4377_hci_free_dev, hdev); + if (ret) + return ret; + + bcm4377->hdev = hdev; + + hdev->bus = HCI_PCI; + hdev->dev_type = HCI_PRIMARY; + hdev->open = bcm4377_hci_open; + hdev->close = bcm4377_hci_close; + hdev->send = bcm4377_hci_send_frame; + hdev->set_bdaddr = bcm4377_hci_set_bdaddr; + hdev->setup = bcm4377_hci_setup; + + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); + if (bcm4377->hw->broken_mws_transport_config) + set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks); + if (bcm4377->hw->broken_ext_scan) + set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks); + if (bcm4377->hw->broken_le_coded) + set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks); + + pci_set_drvdata(pdev, bcm4377); + hci_set_drvdata(hdev, bcm4377); + SET_HCIDEV_DEV(hdev, &pdev->dev); + + ret = bcm4377_boot(bcm4377); + if (ret) + return ret; + + ret = bcm4377_setup_rti(bcm4377); + if (ret) + return ret; + + ret = hci_register_dev(hdev); + if (ret) + return ret; + return devm_add_action_or_reset(&pdev->dev, bcm4377_hci_unregister_dev, + hdev); +} + +static int bcm4377_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev); + int ret; + + ret = hci_suspend_dev(bcm4377->hdev); + if (ret) + return ret; + + iowrite32(BCM4377_BAR0_SLEEP_CONTROL_QUIESCE, + bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL); + + return 0; +} + +static int bcm4377_resume(struct pci_dev *pdev) +{ + struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev); + + iowrite32(BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE, + bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL); + + return hci_resume_dev(bcm4377->hdev); +} + +static const struct dmi_system_id bcm4377_dmi_board_table[] = { + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir9,1"), + }, + .driver_data = "apple,formosa", + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro15,4"), + }, + .driver_data = "apple,formosa", + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,3"), + }, + .driver_data = "apple,formosa", + }, + {} +}; + +static const struct bcm4377_hw bcm4377_hw_variants[] = { + [BCM4377] = { + .id = 0x4377, + .otp_offset = 0x4120, + .bar0_window1 = 0x1800b000, + .bar0_window2 = 0x1810c000, + .disable_aspm = true, + .broken_ext_scan = true, + .send_ptb = bcm4377_send_ptb, + }, + + [BCM4378] = { + .id = 0x4378, + .otp_offset = 0x4120, + .bar0_window1 = 0x18002000, + .bar0_window2 = 0x1810a000, + .bar0_core2_window2 = 0x18107000, + .has_bar0_core2_window2 = true, + .broken_mws_transport_config = true, + .broken_le_coded = true, + .send_calibration = bcm4378_send_calibration, + .send_ptb = bcm4378_send_ptb, + }, + + [BCM4387] = { + .id = 0x4387, + .otp_offset = 0x413c, + .bar0_window1 = 0x18002000, + .bar0_window2 = 0x18109000, + .bar0_core2_window2 = 0x18106000, + .has_bar0_core2_window2 = true, + .clear_pciecfg_subsystem_ctrl_bit19 = true, + .broken_mws_transport_config = true, + .broken_le_coded = true, + .send_calibration = bcm4387_send_calibration, + .send_ptb = bcm4378_send_ptb, + }, +}; + +#define BCM4377_DEVID_ENTRY(id) \ + { \ + PCI_VENDOR_ID_BROADCOM, BCM##id##_DEVICE_ID, PCI_ANY_ID, \ + PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, \ + BCM##id \ + } + +static const struct pci_device_id bcm4377_devid_table[] = { + BCM4377_DEVID_ENTRY(4377), + BCM4377_DEVID_ENTRY(4378), + BCM4377_DEVID_ENTRY(4387), + {}, +}; +MODULE_DEVICE_TABLE(pci, bcm4377_devid_table); + +static struct pci_driver bcm4377_pci_driver = { + .name = "hci_bcm4377", + .id_table = bcm4377_devid_table, + .probe = bcm4377_probe, + .suspend = bcm4377_suspend, + .resume = bcm4377_resume, +}; +module_pci_driver(bcm4377_pci_driver); + +MODULE_AUTHOR("Sven Peter "); +MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387 devices"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_FIRMWARE("brcm/brcmbt4377*.bin"); +MODULE_FIRMWARE("brcm/brcmbt4377*.ptb"); +MODULE_FIRMWARE("brcm/brcmbt4378*.bin"); +MODULE_FIRMWARE("brcm/brcmbt4378*.ptb"); +MODULE_FIRMWARE("brcm/brcmbt4387*.bin"); +MODULE_FIRMWARE("brcm/brcmbt4387*.ptb"); diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c new file mode 100644 index 000000000..2a5a27d71 --- /dev/null +++ b/drivers/bluetooth/hci_bcsp.c @@ -0,0 +1,784 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Bluetooth HCI UART driver + * + * Copyright (C) 2002-2003 Fabrizio Gennari + * Copyright (C) 2004-2005 Marcel Holtmann + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" + +static bool txcrc = true; +static bool hciextn = true; + +#define BCSP_TXWINSIZE 4 + +#define BCSP_ACK_PKT 0x05 +#define BCSP_LE_PKT 0x06 + +struct bcsp_struct { + struct sk_buff_head unack; /* Unack'ed packets queue */ + struct sk_buff_head rel; /* Reliable packets queue */ + struct sk_buff_head unrel; /* Unreliable packets queue */ + + unsigned long rx_count; + struct sk_buff *rx_skb; + u8 rxseq_txack; /* rxseq == txack. */ + u8 rxack; /* Last packet sent by us that the peer ack'ed */ + struct timer_list tbcsp; + struct hci_uart *hu; + + enum { + BCSP_W4_PKT_DELIMITER, + BCSP_W4_PKT_START, + BCSP_W4_BCSP_HDR, + BCSP_W4_DATA, + BCSP_W4_CRC + } rx_state; + + enum { + BCSP_ESCSTATE_NOESC, + BCSP_ESCSTATE_ESC + } rx_esc_state; + + u8 use_crc; + u16 message_crc; + u8 txack_req; /* Do we need to send ack's to the peer? */ + + /* Reliable packet sequence number - used to assign seq to each rel pkt. */ + u8 msgq_txseq; +}; + +/* ---- BCSP CRC calculation ---- */ + +/* Table for calculating CRC for polynomial 0x1021, LSB processed first, + * initial value 0xffff, bits shifted in reverse order. + */ + +static const u16 crc_table[] = { + 0x0000, 0x1081, 0x2102, 0x3183, + 0x4204, 0x5285, 0x6306, 0x7387, + 0x8408, 0x9489, 0xa50a, 0xb58b, + 0xc60c, 0xd68d, 0xe70e, 0xf78f +}; + +/* Initialise the crc calculator */ +#define BCSP_CRC_INIT(x) x = 0xffff + +/* Update crc with next data byte + * + * Implementation note + * The data byte is treated as two nibbles. The crc is generated + * in reverse, i.e., bits are fed into the register from the top. + */ +static void bcsp_crc_update(u16 *crc, u8 d) +{ + u16 reg = *crc; + + reg = (reg >> 4) ^ crc_table[(reg ^ d) & 0x000f]; + reg = (reg >> 4) ^ crc_table[(reg ^ (d >> 4)) & 0x000f]; + + *crc = reg; +} + +/* ---- BCSP core ---- */ + +static void bcsp_slip_msgdelim(struct sk_buff *skb) +{ + const char pkt_delim = 0xc0; + + skb_put_data(skb, &pkt_delim, 1); +} + +static void bcsp_slip_one_byte(struct sk_buff *skb, u8 c) +{ + const char esc_c0[2] = { 0xdb, 0xdc }; + const char esc_db[2] = { 0xdb, 0xdd }; + + switch (c) { + case 0xc0: + skb_put_data(skb, &esc_c0, 2); + break; + case 0xdb: + skb_put_data(skb, &esc_db, 2); + break; + default: + skb_put_data(skb, &c, 1); + } +} + +static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct bcsp_struct *bcsp = hu->priv; + + if (skb->len > 0xFFF) { + BT_ERR("Packet too long"); + kfree_skb(skb); + return 0; + } + + switch (hci_skb_pkt_type(skb)) { + case HCI_ACLDATA_PKT: + case HCI_COMMAND_PKT: + skb_queue_tail(&bcsp->rel, skb); + break; + + case HCI_SCODATA_PKT: + skb_queue_tail(&bcsp->unrel, skb); + break; + + default: + BT_ERR("Unknown packet type"); + kfree_skb(skb); + break; + } + + return 0; +} + +static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data, + int len, int pkt_type) +{ + struct sk_buff *nskb; + u8 hdr[4], chan; + u16 BCSP_CRC_INIT(bcsp_txmsg_crc); + int rel, i; + + switch (pkt_type) { + case HCI_ACLDATA_PKT: + chan = 6; /* BCSP ACL channel */ + rel = 1; /* reliable channel */ + break; + case HCI_COMMAND_PKT: + chan = 5; /* BCSP cmd/evt channel */ + rel = 1; /* reliable channel */ + break; + case HCI_SCODATA_PKT: + chan = 7; /* BCSP SCO channel */ + rel = 0; /* unreliable channel */ + break; + case BCSP_LE_PKT: + chan = 1; /* BCSP LE channel */ + rel = 0; /* unreliable channel */ + break; + case BCSP_ACK_PKT: + chan = 0; /* BCSP internal channel */ + rel = 0; /* unreliable channel */ + break; + default: + BT_ERR("Unknown packet type"); + return NULL; + } + + if (hciextn && chan == 5) { + __le16 opcode = ((struct hci_command_hdr *)data)->opcode; + + /* Vendor specific commands */ + if (hci_opcode_ogf(__le16_to_cpu(opcode)) == 0x3f) { + u8 desc = *(data + HCI_COMMAND_HDR_SIZE); + + if ((desc & 0xf0) == 0xc0) { + data += HCI_COMMAND_HDR_SIZE + 1; + len -= HCI_COMMAND_HDR_SIZE + 1; + chan = desc & 0x0f; + } + } + } + + /* Max len of packet: (original len +4(bcsp hdr) +2(crc))*2 + * (because bytes 0xc0 and 0xdb are escaped, worst case is + * when the packet is all made of 0xc0 and 0xdb :) ) + * + 2 (0xc0 delimiters at start and end). + */ + + nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC); + if (!nskb) + return NULL; + + hci_skb_pkt_type(nskb) = pkt_type; + + bcsp_slip_msgdelim(nskb); + + hdr[0] = bcsp->rxseq_txack << 3; + bcsp->txack_req = 0; + BT_DBG("We request packet no %u to card", bcsp->rxseq_txack); + + if (rel) { + hdr[0] |= 0x80 + bcsp->msgq_txseq; + BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq); + bcsp->msgq_txseq = (bcsp->msgq_txseq + 1) & 0x07; + } + + if (bcsp->use_crc) + hdr[0] |= 0x40; + + hdr[1] = ((len << 4) & 0xff) | chan; + hdr[2] = len >> 4; + hdr[3] = ~(hdr[0] + hdr[1] + hdr[2]); + + /* Put BCSP header */ + for (i = 0; i < 4; i++) { + bcsp_slip_one_byte(nskb, hdr[i]); + + if (bcsp->use_crc) + bcsp_crc_update(&bcsp_txmsg_crc, hdr[i]); + } + + /* Put payload */ + for (i = 0; i < len; i++) { + bcsp_slip_one_byte(nskb, data[i]); + + if (bcsp->use_crc) + bcsp_crc_update(&bcsp_txmsg_crc, data[i]); + } + + /* Put CRC */ + if (bcsp->use_crc) { + bcsp_txmsg_crc = bitrev16(bcsp_txmsg_crc); + bcsp_slip_one_byte(nskb, (u8)((bcsp_txmsg_crc >> 8) & 0x00ff)); + bcsp_slip_one_byte(nskb, (u8)(bcsp_txmsg_crc & 0x00ff)); + } + + bcsp_slip_msgdelim(nskb); + return nskb; +} + +/* This is a rewrite of pkt_avail in ABCSP */ +static struct sk_buff *bcsp_dequeue(struct hci_uart *hu) +{ + struct bcsp_struct *bcsp = hu->priv; + unsigned long flags; + struct sk_buff *skb; + + /* First of all, check for unreliable messages in the queue, + * since they have priority + */ + + skb = skb_dequeue(&bcsp->unrel); + if (skb != NULL) { + struct sk_buff *nskb; + + nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, + hci_skb_pkt_type(skb)); + if (nskb) { + kfree_skb(skb); + return nskb; + } else { + skb_queue_head(&bcsp->unrel, skb); + BT_ERR("Could not dequeue pkt because alloc_skb failed"); + } + } + + /* Now, try to send a reliable pkt. We can only send a + * reliable packet if the number of packets sent but not yet ack'ed + * is < than the winsize + */ + + spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); + + if (bcsp->unack.qlen < BCSP_TXWINSIZE) { + skb = skb_dequeue(&bcsp->rel); + if (skb != NULL) { + struct sk_buff *nskb; + + nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, + hci_skb_pkt_type(skb)); + if (nskb) { + __skb_queue_tail(&bcsp->unack, skb); + mod_timer(&bcsp->tbcsp, jiffies + HZ / 4); + spin_unlock_irqrestore(&bcsp->unack.lock, flags); + return nskb; + } else { + skb_queue_head(&bcsp->rel, skb); + BT_ERR("Could not dequeue pkt because alloc_skb failed"); + } + } + } + + spin_unlock_irqrestore(&bcsp->unack.lock, flags); + + /* We could not send a reliable packet, either because there are + * none or because there are too many unack'ed pkts. Did we receive + * any packets we have not acknowledged yet ? + */ + + if (bcsp->txack_req) { + /* if so, craft an empty ACK pkt and send it on BCSP unreliable + * channel 0 + */ + struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, NULL, 0, BCSP_ACK_PKT); + return nskb; + } + + /* We have nothing to send */ + return NULL; +} + +static int bcsp_flush(struct hci_uart *hu) +{ + BT_DBG("hu %p", hu); + return 0; +} + +/* Remove ack'ed packets */ +static void bcsp_pkt_cull(struct bcsp_struct *bcsp) +{ + struct sk_buff *skb, *tmp; + unsigned long flags; + int i, pkts_to_be_removed; + u8 seqno; + + spin_lock_irqsave(&bcsp->unack.lock, flags); + + pkts_to_be_removed = skb_queue_len(&bcsp->unack); + seqno = bcsp->msgq_txseq; + + while (pkts_to_be_removed) { + if (bcsp->rxack == seqno) + break; + pkts_to_be_removed--; + seqno = (seqno - 1) & 0x07; + } + + if (bcsp->rxack != seqno) + BT_ERR("Peer acked invalid packet"); + + BT_DBG("Removing %u pkts out of %u, up to seqno %u", + pkts_to_be_removed, skb_queue_len(&bcsp->unack), + (seqno - 1) & 0x07); + + i = 0; + skb_queue_walk_safe(&bcsp->unack, skb, tmp) { + if (i >= pkts_to_be_removed) + break; + i++; + + __skb_unlink(skb, &bcsp->unack); + dev_kfree_skb_irq(skb); + } + + if (skb_queue_empty(&bcsp->unack)) + del_timer(&bcsp->tbcsp); + + spin_unlock_irqrestore(&bcsp->unack.lock, flags); + + if (i != pkts_to_be_removed) + BT_ERR("Removed only %u out of %u pkts", i, pkts_to_be_removed); +} + +/* Handle BCSP link-establishment packets. When we + * detect a "sync" packet, symptom that the BT module has reset, + * we do nothing :) (yet) + */ +static void bcsp_handle_le_pkt(struct hci_uart *hu) +{ + struct bcsp_struct *bcsp = hu->priv; + u8 conf_pkt[4] = { 0xad, 0xef, 0xac, 0xed }; + u8 conf_rsp_pkt[4] = { 0xde, 0xad, 0xd0, 0xd0 }; + u8 sync_pkt[4] = { 0xda, 0xdc, 0xed, 0xed }; + + /* spot "conf" pkts and reply with a "conf rsp" pkt */ + if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 && + !memcmp(&bcsp->rx_skb->data[4], conf_pkt, 4)) { + struct sk_buff *nskb = alloc_skb(4, GFP_ATOMIC); + + BT_DBG("Found a LE conf pkt"); + if (!nskb) + return; + skb_put_data(nskb, conf_rsp_pkt, 4); + hci_skb_pkt_type(nskb) = BCSP_LE_PKT; + + skb_queue_head(&bcsp->unrel, nskb); + hci_uart_tx_wakeup(hu); + } + /* Spot "sync" pkts. If we find one...disaster! */ + else if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 && + !memcmp(&bcsp->rx_skb->data[4], sync_pkt, 4)) { + BT_ERR("Found a LE sync pkt, card has reset"); + } +} + +static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char byte) +{ + const u8 c0 = 0xc0, db = 0xdb; + + switch (bcsp->rx_esc_state) { + case BCSP_ESCSTATE_NOESC: + switch (byte) { + case 0xdb: + bcsp->rx_esc_state = BCSP_ESCSTATE_ESC; + break; + default: + skb_put_data(bcsp->rx_skb, &byte, 1); + if ((bcsp->rx_skb->data[0] & 0x40) != 0 && + bcsp->rx_state != BCSP_W4_CRC) + bcsp_crc_update(&bcsp->message_crc, byte); + bcsp->rx_count--; + } + break; + + case BCSP_ESCSTATE_ESC: + switch (byte) { + case 0xdc: + skb_put_data(bcsp->rx_skb, &c0, 1); + if ((bcsp->rx_skb->data[0] & 0x40) != 0 && + bcsp->rx_state != BCSP_W4_CRC) + bcsp_crc_update(&bcsp->message_crc, 0xc0); + bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; + bcsp->rx_count--; + break; + + case 0xdd: + skb_put_data(bcsp->rx_skb, &db, 1); + if ((bcsp->rx_skb->data[0] & 0x40) != 0 && + bcsp->rx_state != BCSP_W4_CRC) + bcsp_crc_update(&bcsp->message_crc, 0xdb); + bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; + bcsp->rx_count--; + break; + + default: + BT_ERR("Invalid byte %02x after esc byte", byte); + kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_count = 0; + } + } +} + +static void bcsp_complete_rx_pkt(struct hci_uart *hu) +{ + struct bcsp_struct *bcsp = hu->priv; + int pass_up = 0; + + if (bcsp->rx_skb->data[0] & 0x80) { /* reliable pkt */ + BT_DBG("Received seqno %u from card", bcsp->rxseq_txack); + + /* check the rx sequence number is as expected */ + if ((bcsp->rx_skb->data[0] & 0x07) == bcsp->rxseq_txack) { + bcsp->rxseq_txack++; + bcsp->rxseq_txack %= 0x8; + } else { + /* handle re-transmitted packet or + * when packet was missed + */ + BT_ERR("Out-of-order packet arrived, got %u expected %u", + bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack); + + /* do not process out-of-order packet payload */ + pass_up = 2; + } + + /* send current txack value to all received reliable packets */ + bcsp->txack_req = 1; + + /* If needed, transmit an ack pkt */ + hci_uart_tx_wakeup(hu); + } + + bcsp->rxack = (bcsp->rx_skb->data[0] >> 3) & 0x07; + BT_DBG("Request for pkt %u from card", bcsp->rxack); + + /* handle received ACK indications, + * including those from out-of-order packets + */ + bcsp_pkt_cull(bcsp); + + if (pass_up != 2) { + if ((bcsp->rx_skb->data[1] & 0x0f) == 6 && + (bcsp->rx_skb->data[0] & 0x80)) { + hci_skb_pkt_type(bcsp->rx_skb) = HCI_ACLDATA_PKT; + pass_up = 1; + } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 && + (bcsp->rx_skb->data[0] & 0x80)) { + hci_skb_pkt_type(bcsp->rx_skb) = HCI_EVENT_PKT; + pass_up = 1; + } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) { + hci_skb_pkt_type(bcsp->rx_skb) = HCI_SCODATA_PKT; + pass_up = 1; + } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 && + !(bcsp->rx_skb->data[0] & 0x80)) { + bcsp_handle_le_pkt(hu); + pass_up = 0; + } else { + pass_up = 0; + } + } + + if (pass_up == 0) { + struct hci_event_hdr hdr; + u8 desc = (bcsp->rx_skb->data[1] & 0x0f); + + if (desc != 0 && desc != 1) { + if (hciextn) { + desc |= 0xc0; + skb_pull(bcsp->rx_skb, 4); + memcpy(skb_push(bcsp->rx_skb, 1), &desc, 1); + + hdr.evt = 0xff; + hdr.plen = bcsp->rx_skb->len; + memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE); + hci_skb_pkt_type(bcsp->rx_skb) = HCI_EVENT_PKT; + + hci_recv_frame(hu->hdev, bcsp->rx_skb); + } else { + BT_ERR("Packet for unknown channel (%u %s)", + bcsp->rx_skb->data[1] & 0x0f, + bcsp->rx_skb->data[0] & 0x80 ? + "reliable" : "unreliable"); + kfree_skb(bcsp->rx_skb); + } + } else + kfree_skb(bcsp->rx_skb); + } else if (pass_up == 1) { + /* Pull out BCSP hdr */ + skb_pull(bcsp->rx_skb, 4); + + hci_recv_frame(hu->hdev, bcsp->rx_skb); + } else { + /* ignore packet payload of already ACKed re-transmitted + * packets or when a packet was missed in the BCSP window + */ + kfree_skb(bcsp->rx_skb); + } + + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_skb = NULL; +} + +static u16 bscp_get_crc(struct bcsp_struct *bcsp) +{ + return get_unaligned_be16(&bcsp->rx_skb->data[bcsp->rx_skb->len - 2]); +} + +/* Recv data */ +static int bcsp_recv(struct hci_uart *hu, const void *data, int count) +{ + struct bcsp_struct *bcsp = hu->priv; + const unsigned char *ptr; + + BT_DBG("hu %p count %d rx_state %d rx_count %ld", + hu, count, bcsp->rx_state, bcsp->rx_count); + + ptr = data; + while (count) { + if (bcsp->rx_count) { + if (*ptr == 0xc0) { + BT_ERR("Short BCSP packet"); + kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; + bcsp->rx_state = BCSP_W4_PKT_START; + bcsp->rx_count = 0; + } else + bcsp_unslip_one_byte(bcsp, *ptr); + + ptr++; count--; + continue; + } + + switch (bcsp->rx_state) { + case BCSP_W4_BCSP_HDR: + if ((0xff & (u8)~(bcsp->rx_skb->data[0] + bcsp->rx_skb->data[1] + + bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) { + BT_ERR("Error in BCSP hdr checksum"); + kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_count = 0; + continue; + } + bcsp->rx_state = BCSP_W4_DATA; + bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) + + (bcsp->rx_skb->data[2] << 4); /* May be 0 */ + continue; + + case BCSP_W4_DATA: + if (bcsp->rx_skb->data[0] & 0x40) { /* pkt with crc */ + bcsp->rx_state = BCSP_W4_CRC; + bcsp->rx_count = 2; + } else + bcsp_complete_rx_pkt(hu); + continue; + + case BCSP_W4_CRC: + if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) { + BT_ERR("Checksum failed: computed %04x received %04x", + bitrev16(bcsp->message_crc), + bscp_get_crc(bcsp)); + + kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_count = 0; + continue; + } + skb_trim(bcsp->rx_skb, bcsp->rx_skb->len - 2); + bcsp_complete_rx_pkt(hu); + continue; + + case BCSP_W4_PKT_DELIMITER: + switch (*ptr) { + case 0xc0: + bcsp->rx_state = BCSP_W4_PKT_START; + break; + default: + /*BT_ERR("Ignoring byte %02x", *ptr);*/ + break; + } + ptr++; count--; + break; + + case BCSP_W4_PKT_START: + switch (*ptr) { + case 0xc0: + ptr++; count--; + break; + + default: + bcsp->rx_state = BCSP_W4_BCSP_HDR; + bcsp->rx_count = 4; + bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; + BCSP_CRC_INIT(bcsp->message_crc); + + /* Do not increment ptr or decrement count + * Allocate packet. Max len of a BCSP pkt= + * 0xFFF (payload) +4 (header) +2 (crc) + */ + + bcsp->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC); + if (!bcsp->rx_skb) { + BT_ERR("Can't allocate mem for new packet"); + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_count = 0; + return 0; + } + break; + } + break; + } + } + return count; +} + + /* Arrange to retransmit all messages in the relq. */ +static void bcsp_timed_event(struct timer_list *t) +{ + struct bcsp_struct *bcsp = from_timer(bcsp, t, tbcsp); + struct hci_uart *hu = bcsp->hu; + struct sk_buff *skb; + unsigned long flags; + + BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen); + + spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); + + while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) { + bcsp->msgq_txseq = (bcsp->msgq_txseq - 1) & 0x07; + skb_queue_head(&bcsp->rel, skb); + } + + spin_unlock_irqrestore(&bcsp->unack.lock, flags); + + hci_uart_tx_wakeup(hu); +} + +static int bcsp_open(struct hci_uart *hu) +{ + struct bcsp_struct *bcsp; + + BT_DBG("hu %p", hu); + + bcsp = kzalloc(sizeof(*bcsp), GFP_KERNEL); + if (!bcsp) + return -ENOMEM; + + hu->priv = bcsp; + bcsp->hu = hu; + skb_queue_head_init(&bcsp->unack); + skb_queue_head_init(&bcsp->rel); + skb_queue_head_init(&bcsp->unrel); + + timer_setup(&bcsp->tbcsp, bcsp_timed_event, 0); + + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + + if (txcrc) + bcsp->use_crc = 1; + + return 0; +} + +static int bcsp_close(struct hci_uart *hu) +{ + struct bcsp_struct *bcsp = hu->priv; + + timer_shutdown_sync(&bcsp->tbcsp); + + hu->priv = NULL; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&bcsp->unack); + skb_queue_purge(&bcsp->rel); + skb_queue_purge(&bcsp->unrel); + + if (bcsp->rx_skb) { + kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; + } + + kfree(bcsp); + return 0; +} + +static const struct hci_uart_proto bcsp = { + .id = HCI_UART_BCSP, + .name = "BCSP", + .open = bcsp_open, + .close = bcsp_close, + .enqueue = bcsp_enqueue, + .dequeue = bcsp_dequeue, + .recv = bcsp_recv, + .flush = bcsp_flush +}; + +int __init bcsp_init(void) +{ + return hci_uart_register_proto(&bcsp); +} + +int __exit bcsp_deinit(void) +{ + return hci_uart_unregister_proto(&bcsp); +} + +module_param(txcrc, bool, 0644); +MODULE_PARM_DESC(txcrc, "Transmit CRC with every BCSP packet"); + +module_param(hciextn, bool, 0644); +MODULE_PARM_DESC(hciextn, "Convert HCI Extensions into BCSP packets"); diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c new file mode 100644 index 000000000..1d0cdf023 --- /dev/null +++ b/drivers/bluetooth/hci_h4.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Bluetooth HCI UART driver + * + * Copyright (C) 2000-2001 Qualcomm Incorporated + * Copyright (C) 2002-2003 Maxim Krasnyansky + * Copyright (C) 2004-2005 Marcel Holtmann + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" + +struct h4_struct { + struct sk_buff *rx_skb; + struct sk_buff_head txq; +}; + +/* Initialize protocol */ +static int h4_open(struct hci_uart *hu) +{ + struct h4_struct *h4; + + BT_DBG("hu %p", hu); + + h4 = kzalloc(sizeof(*h4), GFP_KERNEL); + if (!h4) + return -ENOMEM; + + skb_queue_head_init(&h4->txq); + + hu->priv = h4; + return 0; +} + +/* Flush protocol data */ +static int h4_flush(struct hci_uart *hu) +{ + struct h4_struct *h4 = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&h4->txq); + + return 0; +} + +/* Close protocol */ +static int h4_close(struct hci_uart *hu) +{ + struct h4_struct *h4 = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&h4->txq); + + kfree_skb(h4->rx_skb); + + hu->priv = NULL; + kfree(h4); + + return 0; +} + +/* Enqueue frame for transmission (padding, crc, etc) */ +static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct h4_struct *h4 = hu->priv; + + BT_DBG("hu %p skb %p", hu, skb); + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + skb_queue_tail(&h4->txq, skb); + + return 0; +} + +static const struct h4_recv_pkt h4_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, + { H4_RECV_ISO, .recv = hci_recv_frame }, +}; + +/* Recv data */ +static int h4_recv(struct hci_uart *hu, const void *data, int count) +{ + struct h4_struct *h4 = hu->priv; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count, + h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts)); + if (IS_ERR(h4->rx_skb)) { + int err = PTR_ERR(h4->rx_skb); + bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); + h4->rx_skb = NULL; + return err; + } + + return count; +} + +static struct sk_buff *h4_dequeue(struct hci_uart *hu) +{ + struct h4_struct *h4 = hu->priv; + return skb_dequeue(&h4->txq); +} + +static const struct hci_uart_proto h4p = { + .id = HCI_UART_H4, + .name = "H4", + .open = h4_open, + .close = h4_close, + .recv = h4_recv, + .enqueue = h4_enqueue, + .dequeue = h4_dequeue, + .flush = h4_flush, +}; + +int __init h4_init(void) +{ + return hci_uart_register_proto(&h4p); +} + +int __exit h4_deinit(void) +{ + return hci_uart_unregister_proto(&h4p); +} + +struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, + const unsigned char *buffer, int count, + const struct h4_recv_pkt *pkts, int pkts_count) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + u8 alignment = hu->alignment ? hu->alignment : 1; + + /* Check for error from previous call */ + if (IS_ERR(skb)) + skb = NULL; + + while (count) { + int i, len; + + /* remove padding bytes from buffer */ + for (; hu->padding && count > 0; hu->padding--) { + count--; + buffer++; + } + if (!count) + break; + + if (!skb) { + for (i = 0; i < pkts_count; i++) { + if (buffer[0] != (&pkts[i])->type) + continue; + + skb = bt_skb_alloc((&pkts[i])->maxlen, + GFP_ATOMIC); + if (!skb) + return ERR_PTR(-ENOMEM); + + hci_skb_pkt_type(skb) = (&pkts[i])->type; + hci_skb_expect(skb) = (&pkts[i])->hlen; + break; + } + + /* Check for invalid packet type */ + if (!skb) + return ERR_PTR(-EILSEQ); + + count -= 1; + buffer += 1; + } + + len = min_t(uint, hci_skb_expect(skb) - skb->len, count); + skb_put_data(skb, buffer, len); + + count -= len; + buffer += len; + + /* Check for partial packet */ + if (skb->len < hci_skb_expect(skb)) + continue; + + for (i = 0; i < pkts_count; i++) { + if (hci_skb_pkt_type(skb) == (&pkts[i])->type) + break; + } + + if (i >= pkts_count) { + kfree_skb(skb); + return ERR_PTR(-EILSEQ); + } + + if (skb->len == (&pkts[i])->hlen) { + u16 dlen; + + switch ((&pkts[i])->lsize) { + case 0: + /* No variable data length */ + dlen = 0; + break; + case 1: + /* Single octet variable length */ + dlen = skb->data[(&pkts[i])->loff]; + hci_skb_expect(skb) += dlen; + + if (skb_tailroom(skb) < dlen) { + kfree_skb(skb); + return ERR_PTR(-EMSGSIZE); + } + break; + case 2: + /* Double octet variable length */ + dlen = get_unaligned_le16(skb->data + + (&pkts[i])->loff); + hci_skb_expect(skb) += dlen; + + if (skb_tailroom(skb) < dlen) { + kfree_skb(skb); + return ERR_PTR(-EMSGSIZE); + } + break; + default: + /* Unsupported variable length */ + kfree_skb(skb); + return ERR_PTR(-EILSEQ); + } + + if (!dlen) { + hu->padding = (skb->len + 1) % alignment; + hu->padding = (alignment - hu->padding) % alignment; + + /* No more data, complete frame */ + (&pkts[i])->recv(hdev, skb); + skb = NULL; + } + } else { + hu->padding = (skb->len + 1) % alignment; + hu->padding = (alignment - hu->padding) % alignment; + + /* Complete frame */ + (&pkts[i])->recv(hdev, skb); + skb = NULL; + } + } + + return skb; +} +EXPORT_SYMBOL_GPL(h4_recv_buf); diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c new file mode 100644 index 000000000..71e748a94 --- /dev/null +++ b/drivers/bluetooth/hci_h5.c @@ -0,0 +1,1137 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Bluetooth HCI Three-wire UART driver + * + * Copyright (C) 2012 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "btrtl.h" +#include "hci_uart.h" + +#define SUSPEND_TIMEOUT_MS 6000 + +#define HCI_3WIRE_ACK_PKT 0 +#define HCI_3WIRE_LINK_PKT 15 + +/* Sliding window size */ +#define H5_TX_WIN_MAX 4 + +#define H5_ACK_TIMEOUT msecs_to_jiffies(250) +#define H5_SYNC_TIMEOUT msecs_to_jiffies(100) + +/* + * Maximum Three-wire packet: + * 4 byte header + max value for 12-bit length + 2 bytes for CRC + */ +#define H5_MAX_LEN (4 + 0xfff + 2) + +/* Convenience macros for reading Three-wire header values */ +#define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07) +#define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07) +#define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01) +#define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01) +#define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f) +#define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4)) + +#define SLIP_DELIMITER 0xc0 +#define SLIP_ESC 0xdb +#define SLIP_ESC_DELIM 0xdc +#define SLIP_ESC_ESC 0xdd + +/* H5 state flags */ +enum { + H5_RX_ESC, /* SLIP escape mode */ + H5_TX_ACK_REQ, /* Pending ack to send */ + H5_WAKEUP_DISABLE, /* Device cannot wake host */ + H5_HW_FLOW_CONTROL, /* Use HW flow control */ +}; + +struct h5 { + /* Must be the first member, hci_serdev.c expects this. */ + struct hci_uart serdev_hu; + + struct sk_buff_head unack; /* Unack'ed packets queue */ + struct sk_buff_head rel; /* Reliable packets queue */ + struct sk_buff_head unrel; /* Unreliable packets queue */ + + unsigned long flags; + + struct sk_buff *rx_skb; /* Receive buffer */ + size_t rx_pending; /* Expecting more bytes */ + u8 rx_ack; /* Last ack number received */ + + int (*rx_func)(struct hci_uart *hu, u8 c); + + struct timer_list timer; /* Retransmission timer */ + struct hci_uart *hu; /* Parent HCI UART */ + + u8 tx_seq; /* Next seq number to send */ + u8 tx_ack; /* Next ack number to send */ + u8 tx_win; /* Sliding window size */ + + enum { + H5_UNINITIALIZED, + H5_INITIALIZED, + H5_ACTIVE, + } state; + + enum { + H5_AWAKE, + H5_SLEEPING, + H5_WAKING_UP, + } sleep; + + const struct h5_vnd *vnd; + const char *id; + + struct gpio_desc *enable_gpio; + struct gpio_desc *device_wake_gpio; +}; + +enum h5_driver_info { + H5_INFO_WAKEUP_DISABLE = BIT(0), +}; + +struct h5_vnd { + int (*setup)(struct h5 *h5); + void (*open)(struct h5 *h5); + void (*close)(struct h5 *h5); + int (*suspend)(struct h5 *h5); + int (*resume)(struct h5 *h5); + const struct acpi_gpio_mapping *acpi_gpio_map; +}; + +struct h5_device_data { + uint32_t driver_info; + struct h5_vnd *vnd; +}; + +static void h5_reset_rx(struct h5 *h5); + +static void h5_link_control(struct hci_uart *hu, const void *data, size_t len) +{ + struct h5 *h5 = hu->priv; + struct sk_buff *nskb; + + nskb = alloc_skb(3, GFP_ATOMIC); + if (!nskb) + return; + + hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT; + + skb_put_data(nskb, data, len); + + skb_queue_tail(&h5->unrel, nskb); +} + +static u8 h5_cfg_field(struct h5 *h5) +{ + /* Sliding window size (first 3 bits) */ + return h5->tx_win & 0x07; +} + +static void h5_timed_event(struct timer_list *t) +{ + const unsigned char sync_req[] = { 0x01, 0x7e }; + unsigned char conf_req[3] = { 0x03, 0xfc }; + struct h5 *h5 = from_timer(h5, t, timer); + struct hci_uart *hu = h5->hu; + struct sk_buff *skb; + unsigned long flags; + + BT_DBG("%s", hu->hdev->name); + + if (h5->state == H5_UNINITIALIZED) + h5_link_control(hu, sync_req, sizeof(sync_req)); + + if (h5->state == H5_INITIALIZED) { + conf_req[2] = h5_cfg_field(h5); + h5_link_control(hu, conf_req, sizeof(conf_req)); + } + + if (h5->state != H5_ACTIVE) { + mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT); + goto wakeup; + } + + if (h5->sleep != H5_AWAKE) { + h5->sleep = H5_SLEEPING; + goto wakeup; + } + + BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen); + + spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING); + + while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) { + h5->tx_seq = (h5->tx_seq - 1) & 0x07; + skb_queue_head(&h5->rel, skb); + } + + spin_unlock_irqrestore(&h5->unack.lock, flags); + +wakeup: + hci_uart_tx_wakeup(hu); +} + +static void h5_peer_reset(struct hci_uart *hu) +{ + struct h5 *h5 = hu->priv; + + bt_dev_err(hu->hdev, "Peer device has reset"); + + h5->state = H5_UNINITIALIZED; + + del_timer(&h5->timer); + + skb_queue_purge(&h5->rel); + skb_queue_purge(&h5->unrel); + skb_queue_purge(&h5->unack); + + h5->tx_seq = 0; + h5->tx_ack = 0; + + /* Send reset request to upper stack */ + hci_reset_dev(hu->hdev); +} + +static int h5_open(struct hci_uart *hu) +{ + struct h5 *h5; + const unsigned char sync[] = { 0x01, 0x7e }; + + BT_DBG("hu %p", hu); + + if (hu->serdev) { + h5 = serdev_device_get_drvdata(hu->serdev); + } else { + h5 = kzalloc(sizeof(*h5), GFP_KERNEL); + if (!h5) + return -ENOMEM; + } + + hu->priv = h5; + h5->hu = hu; + + skb_queue_head_init(&h5->unack); + skb_queue_head_init(&h5->rel); + skb_queue_head_init(&h5->unrel); + + h5_reset_rx(h5); + + timer_setup(&h5->timer, h5_timed_event, 0); + + h5->tx_win = H5_TX_WIN_MAX; + + if (h5->vnd && h5->vnd->open) + h5->vnd->open(h5); + + set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags); + + /* Send initial sync request */ + h5_link_control(hu, sync, sizeof(sync)); + mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT); + + return 0; +} + +static int h5_close(struct hci_uart *hu) +{ + struct h5 *h5 = hu->priv; + + del_timer_sync(&h5->timer); + + skb_queue_purge(&h5->unack); + skb_queue_purge(&h5->rel); + skb_queue_purge(&h5->unrel); + + kfree_skb(h5->rx_skb); + h5->rx_skb = NULL; + + if (h5->vnd && h5->vnd->close) + h5->vnd->close(h5); + + if (!hu->serdev) + kfree(h5); + + return 0; +} + +static int h5_setup(struct hci_uart *hu) +{ + struct h5 *h5 = hu->priv; + + if (h5->vnd && h5->vnd->setup) + return h5->vnd->setup(h5); + + return 0; +} + +static void h5_pkt_cull(struct h5 *h5) +{ + struct sk_buff *skb, *tmp; + unsigned long flags; + int i, to_remove; + u8 seq; + + spin_lock_irqsave(&h5->unack.lock, flags); + + to_remove = skb_queue_len(&h5->unack); + if (to_remove == 0) + goto unlock; + + seq = h5->tx_seq; + + while (to_remove > 0) { + if (h5->rx_ack == seq) + break; + + to_remove--; + seq = (seq - 1) & 0x07; + } + + if (seq != h5->rx_ack) + BT_ERR("Controller acked invalid packet"); + + i = 0; + skb_queue_walk_safe(&h5->unack, skb, tmp) { + if (i++ >= to_remove) + break; + + __skb_unlink(skb, &h5->unack); + dev_kfree_skb_irq(skb); + } + + if (skb_queue_empty(&h5->unack)) + del_timer(&h5->timer); + +unlock: + spin_unlock_irqrestore(&h5->unack.lock, flags); +} + +static void h5_handle_internal_rx(struct hci_uart *hu) +{ + struct h5 *h5 = hu->priv; + const unsigned char sync_req[] = { 0x01, 0x7e }; + const unsigned char sync_rsp[] = { 0x02, 0x7d }; + unsigned char conf_req[3] = { 0x03, 0xfc }; + const unsigned char conf_rsp[] = { 0x04, 0x7b }; + const unsigned char wakeup_req[] = { 0x05, 0xfa }; + const unsigned char woken_req[] = { 0x06, 0xf9 }; + const unsigned char sleep_req[] = { 0x07, 0x78 }; + const unsigned char *hdr = h5->rx_skb->data; + const unsigned char *data = &h5->rx_skb->data[4]; + + BT_DBG("%s", hu->hdev->name); + + if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) + return; + + if (H5_HDR_LEN(hdr) < 2) + return; + + conf_req[2] = h5_cfg_field(h5); + + if (memcmp(data, sync_req, 2) == 0) { + if (h5->state == H5_ACTIVE) + h5_peer_reset(hu); + h5_link_control(hu, sync_rsp, 2); + } else if (memcmp(data, sync_rsp, 2) == 0) { + if (h5->state == H5_ACTIVE) + h5_peer_reset(hu); + h5->state = H5_INITIALIZED; + h5_link_control(hu, conf_req, 3); + } else if (memcmp(data, conf_req, 2) == 0) { + h5_link_control(hu, conf_rsp, 2); + h5_link_control(hu, conf_req, 3); + } else if (memcmp(data, conf_rsp, 2) == 0) { + if (H5_HDR_LEN(hdr) > 2) + h5->tx_win = (data[2] & 0x07); + BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win); + h5->state = H5_ACTIVE; + hci_uart_init_ready(hu); + return; + } else if (memcmp(data, sleep_req, 2) == 0) { + BT_DBG("Peer went to sleep"); + h5->sleep = H5_SLEEPING; + return; + } else if (memcmp(data, woken_req, 2) == 0) { + BT_DBG("Peer woke up"); + h5->sleep = H5_AWAKE; + } else if (memcmp(data, wakeup_req, 2) == 0) { + BT_DBG("Peer requested wakeup"); + h5_link_control(hu, woken_req, 2); + h5->sleep = H5_AWAKE; + } else { + BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]); + return; + } + + hci_uart_tx_wakeup(hu); +} + +static void h5_complete_rx_pkt(struct hci_uart *hu) +{ + struct h5 *h5 = hu->priv; + const unsigned char *hdr = h5->rx_skb->data; + + if (H5_HDR_RELIABLE(hdr)) { + h5->tx_ack = (h5->tx_ack + 1) % 8; + set_bit(H5_TX_ACK_REQ, &h5->flags); + hci_uart_tx_wakeup(hu); + } + + h5->rx_ack = H5_HDR_ACK(hdr); + + h5_pkt_cull(h5); + + switch (H5_HDR_PKT_TYPE(hdr)) { + case HCI_EVENT_PKT: + case HCI_ACLDATA_PKT: + case HCI_SCODATA_PKT: + case HCI_ISODATA_PKT: + hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr); + + /* Remove Three-wire header */ + skb_pull(h5->rx_skb, 4); + + hci_recv_frame(hu->hdev, h5->rx_skb); + h5->rx_skb = NULL; + + break; + + default: + h5_handle_internal_rx(hu); + break; + } + + h5_reset_rx(h5); +} + +static int h5_rx_crc(struct hci_uart *hu, unsigned char c) +{ + h5_complete_rx_pkt(hu); + + return 0; +} + +static int h5_rx_payload(struct hci_uart *hu, unsigned char c) +{ + struct h5 *h5 = hu->priv; + const unsigned char *hdr = h5->rx_skb->data; + + if (H5_HDR_CRC(hdr)) { + h5->rx_func = h5_rx_crc; + h5->rx_pending = 2; + } else { + h5_complete_rx_pkt(hu); + } + + return 0; +} + +static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c) +{ + struct h5 *h5 = hu->priv; + const unsigned char *hdr = h5->rx_skb->data; + + BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u", + hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr), + H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr), + H5_HDR_LEN(hdr)); + + if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) { + bt_dev_err(hu->hdev, "Invalid header checksum"); + h5_reset_rx(h5); + return 0; + } + + if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) { + bt_dev_err(hu->hdev, "Out-of-order packet arrived (%u != %u)", + H5_HDR_SEQ(hdr), h5->tx_ack); + set_bit(H5_TX_ACK_REQ, &h5->flags); + hci_uart_tx_wakeup(hu); + h5_reset_rx(h5); + return 0; + } + + if (h5->state != H5_ACTIVE && + H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) { + bt_dev_err(hu->hdev, "Non-link packet received in non-active state"); + h5_reset_rx(h5); + return 0; + } + + h5->rx_func = h5_rx_payload; + h5->rx_pending = H5_HDR_LEN(hdr); + + return 0; +} + +static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c) +{ + struct h5 *h5 = hu->priv; + + if (c == SLIP_DELIMITER) + return 1; + + h5->rx_func = h5_rx_3wire_hdr; + h5->rx_pending = 4; + + h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC); + if (!h5->rx_skb) { + bt_dev_err(hu->hdev, "Can't allocate mem for new packet"); + h5_reset_rx(h5); + return -ENOMEM; + } + + h5->rx_skb->dev = (void *)hu->hdev; + + return 0; +} + +static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c) +{ + struct h5 *h5 = hu->priv; + + if (c == SLIP_DELIMITER) + h5->rx_func = h5_rx_pkt_start; + + return 1; +} + +static void h5_unslip_one_byte(struct h5 *h5, unsigned char c) +{ + const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC; + const u8 *byte = &c; + + if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) { + set_bit(H5_RX_ESC, &h5->flags); + return; + } + + if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) { + switch (c) { + case SLIP_ESC_DELIM: + byte = &delim; + break; + case SLIP_ESC_ESC: + byte = &esc; + break; + default: + BT_ERR("Invalid esc byte 0x%02hhx", c); + h5_reset_rx(h5); + return; + } + } + + skb_put_data(h5->rx_skb, byte, 1); + h5->rx_pending--; + + BT_DBG("unslipped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending); +} + +static void h5_reset_rx(struct h5 *h5) +{ + if (h5->rx_skb) { + kfree_skb(h5->rx_skb); + h5->rx_skb = NULL; + } + + h5->rx_func = h5_rx_delimiter; + h5->rx_pending = 0; + clear_bit(H5_RX_ESC, &h5->flags); +} + +static int h5_recv(struct hci_uart *hu, const void *data, int count) +{ + struct h5 *h5 = hu->priv; + const unsigned char *ptr = data; + + BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending, + count); + + while (count > 0) { + int processed; + + if (h5->rx_pending > 0) { + if (*ptr == SLIP_DELIMITER) { + bt_dev_err(hu->hdev, "Too short H5 packet"); + h5_reset_rx(h5); + continue; + } + + h5_unslip_one_byte(h5, *ptr); + + ptr++; count--; + continue; + } + + processed = h5->rx_func(hu, *ptr); + if (processed < 0) + return processed; + + ptr += processed; + count -= processed; + } + + if (hu->serdev) { + pm_runtime_get(&hu->serdev->dev); + pm_runtime_mark_last_busy(&hu->serdev->dev); + pm_runtime_put_autosuspend(&hu->serdev->dev); + } + + return 0; +} + +static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct h5 *h5 = hu->priv; + + if (skb->len > 0xfff) { + bt_dev_err(hu->hdev, "Packet too long (%u bytes)", skb->len); + kfree_skb(skb); + return 0; + } + + if (h5->state != H5_ACTIVE) { + bt_dev_err(hu->hdev, "Ignoring HCI data in non-active state"); + kfree_skb(skb); + return 0; + } + + switch (hci_skb_pkt_type(skb)) { + case HCI_ACLDATA_PKT: + case HCI_COMMAND_PKT: + skb_queue_tail(&h5->rel, skb); + break; + + case HCI_SCODATA_PKT: + case HCI_ISODATA_PKT: + skb_queue_tail(&h5->unrel, skb); + break; + + default: + bt_dev_err(hu->hdev, "Unknown packet type %u", hci_skb_pkt_type(skb)); + kfree_skb(skb); + break; + } + + if (hu->serdev) { + pm_runtime_get_sync(&hu->serdev->dev); + pm_runtime_mark_last_busy(&hu->serdev->dev); + pm_runtime_put_autosuspend(&hu->serdev->dev); + } + + return 0; +} + +static void h5_slip_delim(struct sk_buff *skb) +{ + const char delim = SLIP_DELIMITER; + + skb_put_data(skb, &delim, 1); +} + +static void h5_slip_one_byte(struct sk_buff *skb, u8 c) +{ + const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM }; + const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC }; + + switch (c) { + case SLIP_DELIMITER: + skb_put_data(skb, &esc_delim, 2); + break; + case SLIP_ESC: + skb_put_data(skb, &esc_esc, 2); + break; + default: + skb_put_data(skb, &c, 1); + } +} + +static bool valid_packet_type(u8 type) +{ + switch (type) { + case HCI_ACLDATA_PKT: + case HCI_COMMAND_PKT: + case HCI_SCODATA_PKT: + case HCI_ISODATA_PKT: + case HCI_3WIRE_LINK_PKT: + case HCI_3WIRE_ACK_PKT: + return true; + default: + return false; + } +} + +static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type, + const u8 *data, size_t len) +{ + struct h5 *h5 = hu->priv; + struct sk_buff *nskb; + u8 hdr[4]; + int i; + + if (!valid_packet_type(pkt_type)) { + bt_dev_err(hu->hdev, "Unknown packet type %u", pkt_type); + return NULL; + } + + /* + * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2 + * (because bytes 0xc0 and 0xdb are escaped, worst case is when + * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0 + * delimiters at start and end). + */ + nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC); + if (!nskb) + return NULL; + + hci_skb_pkt_type(nskb) = pkt_type; + + h5_slip_delim(nskb); + + hdr[0] = h5->tx_ack << 3; + clear_bit(H5_TX_ACK_REQ, &h5->flags); + + /* Reliable packet? */ + if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) { + hdr[0] |= 1 << 7; + hdr[0] |= h5->tx_seq; + h5->tx_seq = (h5->tx_seq + 1) % 8; + } + + hdr[1] = pkt_type | ((len & 0x0f) << 4); + hdr[2] = len >> 4; + hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff); + + BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u", + hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr), + H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr), + H5_HDR_LEN(hdr)); + + for (i = 0; i < 4; i++) + h5_slip_one_byte(nskb, hdr[i]); + + for (i = 0; i < len; i++) + h5_slip_one_byte(nskb, data[i]); + + h5_slip_delim(nskb); + + return nskb; +} + +static struct sk_buff *h5_dequeue(struct hci_uart *hu) +{ + struct h5 *h5 = hu->priv; + unsigned long flags; + struct sk_buff *skb, *nskb; + + if (h5->sleep != H5_AWAKE) { + const unsigned char wakeup_req[] = { 0x05, 0xfa }; + + if (h5->sleep == H5_WAKING_UP) + return NULL; + + h5->sleep = H5_WAKING_UP; + BT_DBG("Sending wakeup request"); + + mod_timer(&h5->timer, jiffies + HZ / 100); + return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2); + } + + skb = skb_dequeue(&h5->unrel); + if (skb) { + nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb), + skb->data, skb->len); + if (nskb) { + kfree_skb(skb); + return nskb; + } + + skb_queue_head(&h5->unrel, skb); + bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed"); + } + + spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING); + + if (h5->unack.qlen >= h5->tx_win) + goto unlock; + + skb = skb_dequeue(&h5->rel); + if (skb) { + nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb), + skb->data, skb->len); + if (nskb) { + __skb_queue_tail(&h5->unack, skb); + mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT); + spin_unlock_irqrestore(&h5->unack.lock, flags); + return nskb; + } + + skb_queue_head(&h5->rel, skb); + bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed"); + } + +unlock: + spin_unlock_irqrestore(&h5->unack.lock, flags); + + if (test_bit(H5_TX_ACK_REQ, &h5->flags)) + return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0); + + return NULL; +} + +static int h5_flush(struct hci_uart *hu) +{ + BT_DBG("hu %p", hu); + return 0; +} + +static const struct hci_uart_proto h5p = { + .id = HCI_UART_3WIRE, + .name = "Three-wire (H5)", + .open = h5_open, + .close = h5_close, + .setup = h5_setup, + .recv = h5_recv, + .enqueue = h5_enqueue, + .dequeue = h5_dequeue, + .flush = h5_flush, +}; + +static int h5_serdev_probe(struct serdev_device *serdev) +{ + struct device *dev = &serdev->dev; + struct h5 *h5; + const struct h5_device_data *data; + + h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL); + if (!h5) + return -ENOMEM; + + h5->hu = &h5->serdev_hu; + h5->serdev_hu.serdev = serdev; + serdev_device_set_drvdata(serdev, h5); + + if (has_acpi_companion(dev)) { + const struct acpi_device_id *match; + + match = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!match) + return -ENODEV; + + data = (const struct h5_device_data *)match->driver_data; + h5->vnd = data->vnd; + h5->id = (char *)match->id; + + if (h5->vnd->acpi_gpio_map) + devm_acpi_dev_add_driver_gpios(dev, + h5->vnd->acpi_gpio_map); + } else { + data = of_device_get_match_data(dev); + if (!data) + return -ENODEV; + + h5->vnd = data->vnd; + } + + if (data->driver_info & H5_INFO_WAKEUP_DISABLE) + set_bit(H5_WAKEUP_DISABLE, &h5->flags); + + h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(h5->enable_gpio)) + return PTR_ERR(h5->enable_gpio); + + h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake", + GPIOD_OUT_LOW); + if (IS_ERR(h5->device_wake_gpio)) + return PTR_ERR(h5->device_wake_gpio); + + return hci_uart_register_device(&h5->serdev_hu, &h5p); +} + +static void h5_serdev_remove(struct serdev_device *serdev) +{ + struct h5 *h5 = serdev_device_get_drvdata(serdev); + + hci_uart_unregister_device(&h5->serdev_hu); +} + +static int __maybe_unused h5_serdev_suspend(struct device *dev) +{ + struct h5 *h5 = dev_get_drvdata(dev); + int ret = 0; + + if (h5->vnd && h5->vnd->suspend) + ret = h5->vnd->suspend(h5); + + return ret; +} + +static int __maybe_unused h5_serdev_resume(struct device *dev) +{ + struct h5 *h5 = dev_get_drvdata(dev); + int ret = 0; + + if (h5->vnd && h5->vnd->resume) + ret = h5->vnd->resume(h5); + + return ret; +} + +#ifdef CONFIG_BT_HCIUART_RTL +static int h5_btrtl_setup(struct h5 *h5) +{ + struct btrtl_device_info *btrtl_dev; + struct sk_buff *skb; + __le32 baudrate_data; + u32 device_baudrate; + unsigned int controller_baudrate; + bool flow_control; + int err; + + btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id); + if (IS_ERR(btrtl_dev)) + return PTR_ERR(btrtl_dev); + + err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev, + &controller_baudrate, &device_baudrate, + &flow_control); + if (err) + goto out_free; + + baudrate_data = cpu_to_le32(device_baudrate); + skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data), + &baudrate_data, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n"); + err = PTR_ERR(skb); + goto out_free; + } else { + kfree_skb(skb); + } + /* Give the device some time to set up the new baudrate. */ + usleep_range(10000, 20000); + + serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate); + serdev_device_set_flow_control(h5->hu->serdev, flow_control); + + if (flow_control) + set_bit(H5_HW_FLOW_CONTROL, &h5->flags); + + err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev); + /* Give the device some time before the hci-core sends it a reset */ + usleep_range(10000, 20000); + if (err) + goto out_free; + + btrtl_set_quirks(h5->hu->hdev, btrtl_dev); + +out_free: + btrtl_free(btrtl_dev); + + return err; +} + +static void h5_btrtl_open(struct h5 *h5) +{ + /* + * Since h5_btrtl_resume() does a device_reprobe() the suspend handling + * done by the hci_suspend_notifier is not necessary; it actually causes + * delays and a bunch of errors to get logged, so disable it. + */ + if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) + set_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &h5->hu->flags); + + /* Devices always start with these fixed parameters */ + serdev_device_set_flow_control(h5->hu->serdev, false); + serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN); + serdev_device_set_baudrate(h5->hu->serdev, 115200); + + if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) { + pm_runtime_set_active(&h5->hu->serdev->dev); + pm_runtime_use_autosuspend(&h5->hu->serdev->dev); + pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev, + SUSPEND_TIMEOUT_MS); + pm_runtime_enable(&h5->hu->serdev->dev); + } + + /* The controller needs reset to startup */ + gpiod_set_value_cansleep(h5->enable_gpio, 0); + gpiod_set_value_cansleep(h5->device_wake_gpio, 0); + msleep(100); + + /* The controller needs up to 500ms to wakeup */ + gpiod_set_value_cansleep(h5->enable_gpio, 1); + gpiod_set_value_cansleep(h5->device_wake_gpio, 1); + msleep(500); +} + +static void h5_btrtl_close(struct h5 *h5) +{ + if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) + pm_runtime_disable(&h5->hu->serdev->dev); + + gpiod_set_value_cansleep(h5->device_wake_gpio, 0); + gpiod_set_value_cansleep(h5->enable_gpio, 0); +} + +/* Suspend/resume support. On many devices the RTL BT device loses power during + * suspend/resume, causing it to lose its firmware and all state. So we simply + * turn it off on suspend and reprobe on resume. This mirrors how RTL devices + * are handled in the USB driver, where the BTUSB_WAKEUP_DISABLE is used which + * also causes a reprobe on resume. + */ +static int h5_btrtl_suspend(struct h5 *h5) +{ + serdev_device_set_flow_control(h5->hu->serdev, false); + gpiod_set_value_cansleep(h5->device_wake_gpio, 0); + + if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) + gpiod_set_value_cansleep(h5->enable_gpio, 0); + + return 0; +} + +struct h5_btrtl_reprobe { + struct device *dev; + struct work_struct work; +}; + +static void h5_btrtl_reprobe_worker(struct work_struct *work) +{ + struct h5_btrtl_reprobe *reprobe = + container_of(work, struct h5_btrtl_reprobe, work); + int ret; + + ret = device_reprobe(reprobe->dev); + if (ret && ret != -EPROBE_DEFER) + dev_err(reprobe->dev, "Reprobe error %d\n", ret); + + put_device(reprobe->dev); + kfree(reprobe); + module_put(THIS_MODULE); +} + +static int h5_btrtl_resume(struct h5 *h5) +{ + if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) { + struct h5_btrtl_reprobe *reprobe; + + reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL); + if (!reprobe) + return -ENOMEM; + + __module_get(THIS_MODULE); + + INIT_WORK(&reprobe->work, h5_btrtl_reprobe_worker); + reprobe->dev = get_device(&h5->hu->serdev->dev); + queue_work(system_long_wq, &reprobe->work); + } else { + gpiod_set_value_cansleep(h5->device_wake_gpio, 1); + + if (test_bit(H5_HW_FLOW_CONTROL, &h5->flags)) + serdev_device_set_flow_control(h5->hu->serdev, true); + } + + return 0; +} + +static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false }; +static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false }; +static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false }; +static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = { + { "device-wake-gpios", &btrtl_device_wake_gpios, 1 }, + { "enable-gpios", &btrtl_enable_gpios, 1 }, + { "host-wake-gpios", &btrtl_host_wake_gpios, 1 }, + {}, +}; + +static struct h5_vnd rtl_vnd = { + .setup = h5_btrtl_setup, + .open = h5_btrtl_open, + .close = h5_btrtl_close, + .suspend = h5_btrtl_suspend, + .resume = h5_btrtl_resume, + .acpi_gpio_map = acpi_btrtl_gpios, +}; + +static const struct h5_device_data h5_data_rtl8822cs = { + .vnd = &rtl_vnd, +}; + +static const struct h5_device_data h5_data_rtl8723bs = { + .driver_info = H5_INFO_WAKEUP_DISABLE, + .vnd = &rtl_vnd, +}; +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id h5_acpi_match[] = { +#ifdef CONFIG_BT_HCIUART_RTL + { "OBDA0623", (kernel_ulong_t)&h5_data_rtl8723bs }, + { "OBDA8723", (kernel_ulong_t)&h5_data_rtl8723bs }, +#endif + { }, +}; +MODULE_DEVICE_TABLE(acpi, h5_acpi_match); +#endif + +static const struct dev_pm_ops h5_serdev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(h5_serdev_suspend, h5_serdev_resume) + SET_RUNTIME_PM_OPS(h5_serdev_suspend, h5_serdev_resume, NULL) +}; + +static const struct of_device_id rtl_bluetooth_of_match[] = { +#ifdef CONFIG_BT_HCIUART_RTL + { .compatible = "realtek,rtl8822cs-bt", + .data = (const void *)&h5_data_rtl8822cs }, + { .compatible = "realtek,rtl8723bs-bt", + .data = (const void *)&h5_data_rtl8723bs }, + { .compatible = "realtek,rtl8723cs-bt", + .data = (const void *)&h5_data_rtl8723bs }, + { .compatible = "realtek,rtl8723ds-bt", + .data = (const void *)&h5_data_rtl8723bs }, +#endif + { }, +}; +MODULE_DEVICE_TABLE(of, rtl_bluetooth_of_match); + +static struct serdev_device_driver h5_serdev_driver = { + .probe = h5_serdev_probe, + .remove = h5_serdev_remove, + .driver = { + .name = "hci_uart_h5", + .acpi_match_table = ACPI_PTR(h5_acpi_match), + .pm = &h5_serdev_pm_ops, + .of_match_table = rtl_bluetooth_of_match, + }, +}; + +int __init h5_init(void) +{ + serdev_device_driver_register(&h5_serdev_driver); + return hci_uart_register_proto(&h5p); +} + +int __exit h5_deinit(void) +{ + serdev_device_driver_unregister(&h5_serdev_driver); + return hci_uart_unregister_proto(&h5p); +} diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c new file mode 100644 index 000000000..78afb9a34 --- /dev/null +++ b/drivers/bluetooth/hci_intel.c @@ -0,0 +1,1234 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Bluetooth HCI UART driver for Intel devices + * + * Copyright (C) 2015 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" +#include "btintel.h" + +#define STATE_BOOTLOADER 0 +#define STATE_DOWNLOADING 1 +#define STATE_FIRMWARE_LOADED 2 +#define STATE_FIRMWARE_FAILED 3 +#define STATE_BOOTING 4 +#define STATE_LPM_ENABLED 5 +#define STATE_TX_ACTIVE 6 +#define STATE_SUSPENDED 7 +#define STATE_LPM_TRANSACTION 8 + +#define HCI_LPM_WAKE_PKT 0xf0 +#define HCI_LPM_PKT 0xf1 +#define HCI_LPM_MAX_SIZE 10 +#define HCI_LPM_HDR_SIZE HCI_EVENT_HDR_SIZE + +#define LPM_OP_TX_NOTIFY 0x00 +#define LPM_OP_SUSPEND_ACK 0x02 +#define LPM_OP_RESUME_ACK 0x03 + +#define LPM_SUSPEND_DELAY_MS 1000 + +struct hci_lpm_pkt { + __u8 opcode; + __u8 dlen; + __u8 data[]; +} __packed; + +struct intel_device { + struct list_head list; + struct platform_device *pdev; + struct gpio_desc *reset; + struct hci_uart *hu; + struct mutex hu_lock; + int irq; +}; + +static LIST_HEAD(intel_device_list); +static DEFINE_MUTEX(intel_device_list_lock); + +struct intel_data { + struct sk_buff *rx_skb; + struct sk_buff_head txq; + struct work_struct busy_work; + struct hci_uart *hu; + unsigned long flags; +}; + +static u8 intel_convert_speed(unsigned int speed) +{ + switch (speed) { + case 9600: + return 0x00; + case 19200: + return 0x01; + case 38400: + return 0x02; + case 57600: + return 0x03; + case 115200: + return 0x04; + case 230400: + return 0x05; + case 460800: + return 0x06; + case 921600: + return 0x07; + case 1843200: + return 0x08; + case 3250000: + return 0x09; + case 2000000: + return 0x0a; + case 3000000: + return 0x0b; + default: + return 0xff; + } +} + +static int intel_wait_booting(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + int err; + + err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(1000)); + + if (err == -EINTR) { + bt_dev_err(hu->hdev, "Device boot interrupted"); + return -EINTR; + } + + if (err) { + bt_dev_err(hu->hdev, "Device boot timeout"); + return -ETIMEDOUT; + } + + return err; +} + +#ifdef CONFIG_PM +static int intel_wait_lpm_transaction(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + int err; + + err = wait_on_bit_timeout(&intel->flags, STATE_LPM_TRANSACTION, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(1000)); + + if (err == -EINTR) { + bt_dev_err(hu->hdev, "LPM transaction interrupted"); + return -EINTR; + } + + if (err) { + bt_dev_err(hu->hdev, "LPM transaction timeout"); + return -ETIMEDOUT; + } + + return err; +} + +static int intel_lpm_suspend(struct hci_uart *hu) +{ + static const u8 suspend[] = { 0x01, 0x01, 0x01 }; + struct intel_data *intel = hu->priv; + struct sk_buff *skb; + + if (!test_bit(STATE_LPM_ENABLED, &intel->flags) || + test_bit(STATE_SUSPENDED, &intel->flags)) + return 0; + + if (test_bit(STATE_TX_ACTIVE, &intel->flags)) + return -EAGAIN; + + bt_dev_dbg(hu->hdev, "Suspending"); + + skb = bt_skb_alloc(sizeof(suspend), GFP_KERNEL); + if (!skb) { + bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet"); + return -ENOMEM; + } + + skb_put_data(skb, suspend, sizeof(suspend)); + hci_skb_pkt_type(skb) = HCI_LPM_PKT; + + set_bit(STATE_LPM_TRANSACTION, &intel->flags); + + /* LPM flow is a priority, enqueue packet at list head */ + skb_queue_head(&intel->txq, skb); + hci_uart_tx_wakeup(hu); + + intel_wait_lpm_transaction(hu); + /* Even in case of failure, continue and test the suspended flag */ + + clear_bit(STATE_LPM_TRANSACTION, &intel->flags); + + if (!test_bit(STATE_SUSPENDED, &intel->flags)) { + bt_dev_err(hu->hdev, "Device suspend error"); + return -EINVAL; + } + + bt_dev_dbg(hu->hdev, "Suspended"); + + hci_uart_set_flow_control(hu, true); + + return 0; +} + +static int intel_lpm_resume(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + struct sk_buff *skb; + + if (!test_bit(STATE_LPM_ENABLED, &intel->flags) || + !test_bit(STATE_SUSPENDED, &intel->flags)) + return 0; + + bt_dev_dbg(hu->hdev, "Resuming"); + + hci_uart_set_flow_control(hu, false); + + skb = bt_skb_alloc(0, GFP_KERNEL); + if (!skb) { + bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet"); + return -ENOMEM; + } + + hci_skb_pkt_type(skb) = HCI_LPM_WAKE_PKT; + + set_bit(STATE_LPM_TRANSACTION, &intel->flags); + + /* LPM flow is a priority, enqueue packet at list head */ + skb_queue_head(&intel->txq, skb); + hci_uart_tx_wakeup(hu); + + intel_wait_lpm_transaction(hu); + /* Even in case of failure, continue and test the suspended flag */ + + clear_bit(STATE_LPM_TRANSACTION, &intel->flags); + + if (test_bit(STATE_SUSPENDED, &intel->flags)) { + bt_dev_err(hu->hdev, "Device resume error"); + return -EINVAL; + } + + bt_dev_dbg(hu->hdev, "Resumed"); + + return 0; +} +#endif /* CONFIG_PM */ + +static int intel_lpm_host_wake(struct hci_uart *hu) +{ + static const u8 lpm_resume_ack[] = { LPM_OP_RESUME_ACK, 0x00 }; + struct intel_data *intel = hu->priv; + struct sk_buff *skb; + + hci_uart_set_flow_control(hu, false); + + clear_bit(STATE_SUSPENDED, &intel->flags); + + skb = bt_skb_alloc(sizeof(lpm_resume_ack), GFP_KERNEL); + if (!skb) { + bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet"); + return -ENOMEM; + } + + skb_put_data(skb, lpm_resume_ack, sizeof(lpm_resume_ack)); + hci_skb_pkt_type(skb) = HCI_LPM_PKT; + + /* LPM flow is a priority, enqueue packet at list head */ + skb_queue_head(&intel->txq, skb); + hci_uart_tx_wakeup(hu); + + bt_dev_dbg(hu->hdev, "Resumed by controller"); + + return 0; +} + +static irqreturn_t intel_irq(int irq, void *dev_id) +{ + struct intel_device *idev = dev_id; + + dev_info(&idev->pdev->dev, "hci_intel irq\n"); + + mutex_lock(&idev->hu_lock); + if (idev->hu) + intel_lpm_host_wake(idev->hu); + mutex_unlock(&idev->hu_lock); + + /* Host/Controller are now LPM resumed, trigger a new delayed suspend */ + pm_runtime_get(&idev->pdev->dev); + pm_runtime_mark_last_busy(&idev->pdev->dev); + pm_runtime_put_autosuspend(&idev->pdev->dev); + + return IRQ_HANDLED; +} + +static int intel_set_power(struct hci_uart *hu, bool powered) +{ + struct intel_device *idev; + int err = -ENODEV; + + if (!hu->tty->dev) + return err; + + mutex_lock(&intel_device_list_lock); + + list_for_each_entry(idev, &intel_device_list, list) { + /* tty device and pdev device should share the same parent + * which is the UART port. + */ + if (hu->tty->dev->parent != idev->pdev->dev.parent) + continue; + + if (!idev->reset) { + err = -ENOTSUPP; + break; + } + + BT_INFO("hu %p, Switching compatible pm device (%s) to %u", + hu, dev_name(&idev->pdev->dev), powered); + + gpiod_set_value(idev->reset, powered); + + /* Provide to idev a hu reference which is used to run LPM + * transactions (lpm suspend/resume) from PM callbacks. + * hu needs to be protected against concurrent removing during + * these PM ops. + */ + mutex_lock(&idev->hu_lock); + idev->hu = powered ? hu : NULL; + mutex_unlock(&idev->hu_lock); + + if (idev->irq < 0) + break; + + if (powered && device_can_wakeup(&idev->pdev->dev)) { + err = devm_request_threaded_irq(&idev->pdev->dev, + idev->irq, NULL, + intel_irq, + IRQF_ONESHOT, + "bt-host-wake", idev); + if (err) { + BT_ERR("hu %p, unable to allocate irq-%d", + hu, idev->irq); + break; + } + + device_wakeup_enable(&idev->pdev->dev); + + pm_runtime_set_active(&idev->pdev->dev); + pm_runtime_use_autosuspend(&idev->pdev->dev); + pm_runtime_set_autosuspend_delay(&idev->pdev->dev, + LPM_SUSPEND_DELAY_MS); + pm_runtime_enable(&idev->pdev->dev); + } else if (!powered && device_may_wakeup(&idev->pdev->dev)) { + devm_free_irq(&idev->pdev->dev, idev->irq, idev); + device_wakeup_disable(&idev->pdev->dev); + + pm_runtime_disable(&idev->pdev->dev); + } + } + + mutex_unlock(&intel_device_list_lock); + + return err; +} + +static void intel_busy_work(struct work_struct *work) +{ + struct intel_data *intel = container_of(work, struct intel_data, + busy_work); + struct intel_device *idev; + + if (!intel->hu->tty->dev) + return; + + /* Link is busy, delay the suspend */ + mutex_lock(&intel_device_list_lock); + list_for_each_entry(idev, &intel_device_list, list) { + if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) { + pm_runtime_get(&idev->pdev->dev); + pm_runtime_mark_last_busy(&idev->pdev->dev); + pm_runtime_put_autosuspend(&idev->pdev->dev); + break; + } + } + mutex_unlock(&intel_device_list_lock); +} + +static int intel_open(struct hci_uart *hu) +{ + struct intel_data *intel; + + BT_DBG("hu %p", hu); + + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + + intel = kzalloc(sizeof(*intel), GFP_KERNEL); + if (!intel) + return -ENOMEM; + + skb_queue_head_init(&intel->txq); + INIT_WORK(&intel->busy_work, intel_busy_work); + + intel->hu = hu; + + hu->priv = intel; + + if (!intel_set_power(hu, true)) + set_bit(STATE_BOOTING, &intel->flags); + + return 0; +} + +static int intel_close(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + + BT_DBG("hu %p", hu); + + cancel_work_sync(&intel->busy_work); + + intel_set_power(hu, false); + + skb_queue_purge(&intel->txq); + kfree_skb(intel->rx_skb); + kfree(intel); + + hu->priv = NULL; + return 0; +} + +static int intel_flush(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&intel->txq); + + return 0; +} + +static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) +{ + struct sk_buff *skb; + struct hci_event_hdr *hdr; + struct hci_ev_cmd_complete *evt; + + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = skb_put(skb, sizeof(*hdr)); + hdr->evt = HCI_EV_CMD_COMPLETE; + hdr->plen = sizeof(*evt) + 1; + + evt = skb_put(skb, sizeof(*evt)); + evt->ncmd = 0x01; + evt->opcode = cpu_to_le16(opcode); + + skb_put_u8(skb, 0x00); + + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + + return hci_recv_frame(hdev, skb); +} + +static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed) +{ + struct intel_data *intel = hu->priv; + struct hci_dev *hdev = hu->hdev; + u8 speed_cmd[] = { 0x06, 0xfc, 0x01, 0x00 }; + struct sk_buff *skb; + int err; + + /* This can be the first command sent to the chip, check + * that the controller is ready. + */ + err = intel_wait_booting(hu); + + clear_bit(STATE_BOOTING, &intel->flags); + + /* In case of timeout, try to continue anyway */ + if (err && err != -ETIMEDOUT) + return err; + + bt_dev_info(hdev, "Change controller speed to %d", speed); + + speed_cmd[3] = intel_convert_speed(speed); + if (speed_cmd[3] == 0xff) { + bt_dev_err(hdev, "Unsupported speed"); + return -EINVAL; + } + + /* Device will not accept speed change if Intel version has not been + * previously requested. + */ + skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading Intel version information failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + skb = bt_skb_alloc(sizeof(speed_cmd), GFP_KERNEL); + if (!skb) { + bt_dev_err(hdev, "Failed to alloc memory for baudrate packet"); + return -ENOMEM; + } + + skb_put_data(skb, speed_cmd, sizeof(speed_cmd)); + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; + + hci_uart_set_flow_control(hu, true); + + skb_queue_tail(&intel->txq, skb); + hci_uart_tx_wakeup(hu); + + /* wait 100ms to change baudrate on controller side */ + msleep(100); + + hci_uart_set_baudrate(hu, speed); + hci_uart_set_flow_control(hu, false); + + return 0; +} + +static int intel_setup(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + struct hci_dev *hdev = hu->hdev; + struct sk_buff *skb; + struct intel_version ver; + struct intel_boot_params params; + struct intel_device *idev; + const struct firmware *fw; + char fwname[64]; + u32 boot_param; + ktime_t calltime, delta, rettime; + unsigned long long duration; + unsigned int init_speed, oper_speed; + int speed_change = 0; + int err; + + bt_dev_dbg(hdev, "start intel_setup"); + + hu->hdev->set_diag = btintel_set_diag; + hu->hdev->set_bdaddr = btintel_set_bdaddr; + + /* Set the default boot parameter to 0x0 and it is updated to + * SKU specific boot parameter after reading Intel_Write_Boot_Params + * command while downloading the firmware. + */ + boot_param = 0x00000000; + + calltime = ktime_get(); + + if (hu->init_speed) + init_speed = hu->init_speed; + else + init_speed = hu->proto->init_speed; + + if (hu->oper_speed) + oper_speed = hu->oper_speed; + else + oper_speed = hu->proto->oper_speed; + + if (oper_speed && init_speed && oper_speed != init_speed) + speed_change = 1; + + /* Check that the controller is ready */ + err = intel_wait_booting(hu); + + clear_bit(STATE_BOOTING, &intel->flags); + + /* In case of timeout, try to continue anyway */ + if (err && err != -ETIMEDOUT) + return err; + + set_bit(STATE_BOOTLOADER, &intel->flags); + + /* Read the Intel version information to determine if the device + * is in bootloader mode or if it already has operational firmware + * loaded. + */ + err = btintel_read_version(hdev, &ver); + if (err) + return err; + + /* The hardware platform number has a fixed value of 0x37 and + * for now only accept this single value. + */ + if (ver.hw_platform != 0x37) { + bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", + ver.hw_platform); + return -EINVAL; + } + + /* Check for supported iBT hardware variants of this firmware + * loading method. + * + * This check has been put in place to ensure correct forward + * compatibility options when newer hardware variants come along. + */ + switch (ver.hw_variant) { + case 0x0b: /* LnP */ + case 0x0c: /* WsP */ + case 0x12: /* ThP */ + break; + default: + bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", + ver.hw_variant); + return -EINVAL; + } + + btintel_version_info(hdev, &ver); + + /* The firmware variant determines if the device is in bootloader + * mode or is running operational firmware. The value 0x06 identifies + * the bootloader and the value 0x23 identifies the operational + * firmware. + * + * When the operational firmware is already present, then only + * the check for valid Bluetooth device address is needed. This + * determines if the device will be added as configured or + * unconfigured controller. + * + * It is not possible to use the Secure Boot Parameters in this + * case since that command is only available in bootloader mode. + */ + if (ver.fw_variant == 0x23) { + clear_bit(STATE_BOOTLOADER, &intel->flags); + btintel_check_bdaddr(hdev); + return 0; + } + + /* If the device is not in bootloader mode, then the only possible + * choice is to return an error and abort the device initialization. + */ + if (ver.fw_variant != 0x06) { + bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)", + ver.fw_variant); + return -ENODEV; + } + + /* Read the secure boot parameters to identify the operating + * details of the bootloader. + */ + err = btintel_read_boot_params(hdev, ¶ms); + if (err) + return err; + + /* It is required that every single firmware fragment is acknowledged + * with a command complete event. If the boot parameters indicate + * that this bootloader does not send them, then abort the setup. + */ + if (params.limited_cce != 0x00) { + bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)", + params.limited_cce); + return -EINVAL; + } + + /* If the OTP has no valid Bluetooth device address, then there will + * also be no valid address for the operational firmware. + */ + if (!bacmp(¶ms.otp_bdaddr, BDADDR_ANY)) { + bt_dev_info(hdev, "No device address configured"); + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + } + + /* With this Intel bootloader only the hardware variant and device + * revision information are used to select the right firmware for SfP + * and WsP. + * + * The firmware filename is ibt--.sfi. + * + * Currently the supported hardware variants are: + * 11 (0x0b) for iBT 3.0 (LnP/SfP) + * 12 (0x0c) for iBT 3.5 (WsP) + * + * For ThP/JfP and for future SKU's, the FW name varies based on HW + * variant, HW revision and FW revision, as these are dependent on CNVi + * and RF Combination. + * + * 18 (0x12) for iBT3.5 (ThP/JfP) + * + * The firmware file name for these will be + * ibt---.sfi. + * + */ + switch (ver.hw_variant) { + case 0x0b: /* SfP */ + case 0x0c: /* WsP */ + snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi", + ver.hw_variant, le16_to_cpu(params.dev_revid)); + break; + case 0x12: /* ThP */ + snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi", + ver.hw_variant, ver.hw_revision, ver.fw_revision); + break; + default: + bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", + ver.hw_variant); + return -EINVAL; + } + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", + err); + return err; + } + + bt_dev_info(hdev, "Found device firmware: %s", fwname); + + /* Save the DDC file name for later */ + switch (ver.hw_variant) { + case 0x0b: /* SfP */ + case 0x0c: /* WsP */ + snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc", + ver.hw_variant, le16_to_cpu(params.dev_revid)); + break; + case 0x12: /* ThP */ + snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc", + ver.hw_variant, ver.hw_revision, ver.fw_revision); + break; + default: + bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", + ver.hw_variant); + return -EINVAL; + } + + if (fw->size < 644) { + bt_dev_err(hdev, "Invalid size of firmware file (%zu)", + fw->size); + err = -EBADF; + goto done; + } + + set_bit(STATE_DOWNLOADING, &intel->flags); + + /* Start firmware downloading and get boot parameter */ + err = btintel_download_firmware(hdev, &ver, fw, &boot_param); + if (err < 0) + goto done; + + set_bit(STATE_FIRMWARE_LOADED, &intel->flags); + + bt_dev_info(hdev, "Waiting for firmware download to complete"); + + /* Before switching the device into operational mode and with that + * booting the loaded firmware, wait for the bootloader notification + * that all fragments have been successfully received. + * + * When the event processing receives the notification, then the + * STATE_DOWNLOADING flag will be cleared. + * + * The firmware loading should not take longer than 5 seconds + * and thus just timeout if that happens and fail the setup + * of this device. + */ + err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(5000)); + if (err == -EINTR) { + bt_dev_err(hdev, "Firmware loading interrupted"); + err = -EINTR; + goto done; + } + + if (err) { + bt_dev_err(hdev, "Firmware loading timeout"); + err = -ETIMEDOUT; + goto done; + } + + if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) { + bt_dev_err(hdev, "Firmware loading failed"); + err = -ENOEXEC; + goto done; + } + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + + bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration); + +done: + release_firmware(fw); + + /* Check if there was an error and if is not -EALREADY which means the + * firmware has already been loaded. + */ + if (err < 0 && err != -EALREADY) + return err; + + /* We need to restore the default speed before Intel reset */ + if (speed_change) { + err = intel_set_baudrate(hu, init_speed); + if (err) + return err; + } + + calltime = ktime_get(); + + set_bit(STATE_BOOTING, &intel->flags); + + err = btintel_send_intel_reset(hdev, boot_param); + if (err) + return err; + + /* The bootloader will not indicate when the device is ready. This + * is done by the operational firmware sending bootup notification. + * + * Booting into operational firmware should not take longer than + * 1 second. However if that happens, then just fail the setup + * since something went wrong. + */ + bt_dev_info(hdev, "Waiting for device to boot"); + + err = intel_wait_booting(hu); + if (err) + return err; + + clear_bit(STATE_BOOTING, &intel->flags); + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + + bt_dev_info(hdev, "Device booted in %llu usecs", duration); + + /* Enable LPM if matching pdev with wakeup enabled, set TX active + * until further LPM TX notification. + */ + mutex_lock(&intel_device_list_lock); + list_for_each_entry(idev, &intel_device_list, list) { + if (!hu->tty->dev) + break; + if (hu->tty->dev->parent == idev->pdev->dev.parent) { + if (device_may_wakeup(&idev->pdev->dev)) { + set_bit(STATE_LPM_ENABLED, &intel->flags); + set_bit(STATE_TX_ACTIVE, &intel->flags); + } + break; + } + } + mutex_unlock(&intel_device_list_lock); + + /* Ignore errors, device can work without DDC parameters */ + btintel_load_ddc_config(hdev, fwname); + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + kfree_skb(skb); + + if (speed_change) { + err = intel_set_baudrate(hu, oper_speed); + if (err) + return err; + } + + bt_dev_info(hdev, "Setup complete"); + + clear_bit(STATE_BOOTLOADER, &intel->flags); + + return 0; +} + +static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct intel_data *intel = hu->priv; + struct hci_event_hdr *hdr; + + if (!test_bit(STATE_BOOTLOADER, &intel->flags) && + !test_bit(STATE_BOOTING, &intel->flags)) + goto recv; + + hdr = (void *)skb->data; + + /* When the firmware loading completes the device sends + * out a vendor specific event indicating the result of + * the firmware loading. + */ + if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 && + skb->data[2] == 0x06) { + if (skb->data[3] != 0x00) + set_bit(STATE_FIRMWARE_FAILED, &intel->flags); + + if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) && + test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) + wake_up_bit(&intel->flags, STATE_DOWNLOADING); + + /* When switching to the operational firmware the device + * sends a vendor specific event indicating that the bootup + * completed. + */ + } else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 && + skb->data[2] == 0x02) { + if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) + wake_up_bit(&intel->flags, STATE_BOOTING); + } +recv: + return hci_recv_frame(hdev, skb); +} + +static void intel_recv_lpm_notify(struct hci_dev *hdev, int value) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct intel_data *intel = hu->priv; + + bt_dev_dbg(hdev, "TX idle notification (%d)", value); + + if (value) { + set_bit(STATE_TX_ACTIVE, &intel->flags); + schedule_work(&intel->busy_work); + } else { + clear_bit(STATE_TX_ACTIVE, &intel->flags); + } +} + +static int intel_recv_lpm(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_lpm_pkt *lpm = (void *)skb->data; + struct hci_uart *hu = hci_get_drvdata(hdev); + struct intel_data *intel = hu->priv; + + switch (lpm->opcode) { + case LPM_OP_TX_NOTIFY: + if (lpm->dlen < 1) { + bt_dev_err(hu->hdev, "Invalid LPM notification packet"); + break; + } + intel_recv_lpm_notify(hdev, lpm->data[0]); + break; + case LPM_OP_SUSPEND_ACK: + set_bit(STATE_SUSPENDED, &intel->flags); + if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) + wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION); + break; + case LPM_OP_RESUME_ACK: + clear_bit(STATE_SUSPENDED, &intel->flags); + if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) + wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION); + break; + default: + bt_dev_err(hdev, "Unknown LPM opcode (%02x)", lpm->opcode); + break; + } + + kfree_skb(skb); + + return 0; +} + +#define INTEL_RECV_LPM \ + .type = HCI_LPM_PKT, \ + .hlen = HCI_LPM_HDR_SIZE, \ + .loff = 1, \ + .lsize = 1, \ + .maxlen = HCI_LPM_MAX_SIZE + +static const struct h4_recv_pkt intel_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = intel_recv_event }, + { INTEL_RECV_LPM, .recv = intel_recv_lpm }, +}; + +static int intel_recv(struct hci_uart *hu, const void *data, int count) +{ + struct intel_data *intel = hu->priv; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count, + intel_recv_pkts, + ARRAY_SIZE(intel_recv_pkts)); + if (IS_ERR(intel->rx_skb)) { + int err = PTR_ERR(intel->rx_skb); + bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); + intel->rx_skb = NULL; + return err; + } + + return count; +} + +static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct intel_data *intel = hu->priv; + struct intel_device *idev; + + BT_DBG("hu %p skb %p", hu, skb); + + if (!hu->tty->dev) + goto out_enqueue; + + /* Be sure our controller is resumed and potential LPM transaction + * completed before enqueuing any packet. + */ + mutex_lock(&intel_device_list_lock); + list_for_each_entry(idev, &intel_device_list, list) { + if (hu->tty->dev->parent == idev->pdev->dev.parent) { + pm_runtime_get_sync(&idev->pdev->dev); + pm_runtime_mark_last_busy(&idev->pdev->dev); + pm_runtime_put_autosuspend(&idev->pdev->dev); + break; + } + } + mutex_unlock(&intel_device_list_lock); +out_enqueue: + skb_queue_tail(&intel->txq, skb); + + return 0; +} + +static struct sk_buff *intel_dequeue(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + struct sk_buff *skb; + + skb = skb_dequeue(&intel->txq); + if (!skb) + return skb; + + if (test_bit(STATE_BOOTLOADER, &intel->flags) && + (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT)) { + struct hci_command_hdr *cmd = (void *)skb->data; + __u16 opcode = le16_to_cpu(cmd->opcode); + + /* When the 0xfc01 command is issued to boot into + * the operational firmware, it will actually not + * send a command complete event. To keep the flow + * control working inject that event here. + */ + if (opcode == 0xfc01) + inject_cmd_complete(hu->hdev, opcode); + } + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + + return skb; +} + +static const struct hci_uart_proto intel_proto = { + .id = HCI_UART_INTEL, + .name = "Intel", + .manufacturer = 2, + .init_speed = 115200, + .oper_speed = 3000000, + .open = intel_open, + .close = intel_close, + .flush = intel_flush, + .setup = intel_setup, + .set_baudrate = intel_set_baudrate, + .recv = intel_recv, + .enqueue = intel_enqueue, + .dequeue = intel_dequeue, +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id intel_acpi_match[] = { + { "INT33E1", 0 }, + { "INT33E3", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, intel_acpi_match); +#endif + +#ifdef CONFIG_PM +static int intel_suspend_device(struct device *dev) +{ + struct intel_device *idev = dev_get_drvdata(dev); + + mutex_lock(&idev->hu_lock); + if (idev->hu) + intel_lpm_suspend(idev->hu); + mutex_unlock(&idev->hu_lock); + + return 0; +} + +static int intel_resume_device(struct device *dev) +{ + struct intel_device *idev = dev_get_drvdata(dev); + + mutex_lock(&idev->hu_lock); + if (idev->hu) + intel_lpm_resume(idev->hu); + mutex_unlock(&idev->hu_lock); + + return 0; +} +#endif + +#ifdef CONFIG_PM_SLEEP +static int intel_suspend(struct device *dev) +{ + struct intel_device *idev = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(idev->irq); + + return intel_suspend_device(dev); +} + +static int intel_resume(struct device *dev) +{ + struct intel_device *idev = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + disable_irq_wake(idev->irq); + + return intel_resume_device(dev); +} +#endif + +static const struct dev_pm_ops intel_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume) + SET_RUNTIME_PM_OPS(intel_suspend_device, intel_resume_device, NULL) +}; + +static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; +static const struct acpi_gpio_params host_wake_gpios = { 1, 0, false }; + +static const struct acpi_gpio_mapping acpi_hci_intel_gpios[] = { + { "reset-gpios", &reset_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO }, + { "host-wake-gpios", &host_wake_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO }, + { } +}; + +static int intel_probe(struct platform_device *pdev) +{ + struct intel_device *idev; + int ret; + + idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); + if (!idev) + return -ENOMEM; + + mutex_init(&idev->hu_lock); + + idev->pdev = pdev; + + ret = devm_acpi_dev_add_driver_gpios(&pdev->dev, acpi_hci_intel_gpios); + if (ret) + dev_dbg(&pdev->dev, "Unable to add GPIO mapping table\n"); + + idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(idev->reset)) { + dev_err(&pdev->dev, "Unable to retrieve gpio\n"); + return PTR_ERR(idev->reset); + } + + idev->irq = platform_get_irq(pdev, 0); + if (idev->irq < 0) { + struct gpio_desc *host_wake; + + dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n"); + + host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN); + if (IS_ERR(host_wake)) { + dev_err(&pdev->dev, "Unable to retrieve IRQ\n"); + goto no_irq; + } + + idev->irq = gpiod_to_irq(host_wake); + if (idev->irq < 0) { + dev_err(&pdev->dev, "No corresponding irq for gpio\n"); + goto no_irq; + } + } + + /* Only enable wake-up/irq when controller is powered */ + device_set_wakeup_capable(&pdev->dev, true); + device_wakeup_disable(&pdev->dev); + +no_irq: + platform_set_drvdata(pdev, idev); + + /* Place this instance on the device list */ + mutex_lock(&intel_device_list_lock); + list_add_tail(&idev->list, &intel_device_list); + mutex_unlock(&intel_device_list_lock); + + dev_info(&pdev->dev, "registered, gpio(%d)/irq(%d).\n", + desc_to_gpio(idev->reset), idev->irq); + + return 0; +} + +static int intel_remove(struct platform_device *pdev) +{ + struct intel_device *idev = platform_get_drvdata(pdev); + + device_wakeup_disable(&pdev->dev); + + mutex_lock(&intel_device_list_lock); + list_del(&idev->list); + mutex_unlock(&intel_device_list_lock); + + dev_info(&pdev->dev, "unregistered.\n"); + + return 0; +} + +static struct platform_driver intel_driver = { + .probe = intel_probe, + .remove = intel_remove, + .driver = { + .name = "hci_intel", + .acpi_match_table = ACPI_PTR(intel_acpi_match), + .pm = &intel_pm_ops, + }, +}; + +int __init intel_init(void) +{ + int err; + + err = platform_driver_register(&intel_driver); + if (err) + return err; + + return hci_uart_register_proto(&intel_proto); +} + +int __exit intel_deinit(void) +{ + platform_driver_unregister(&intel_driver); + + return hci_uart_unregister_proto(&intel_proto); +} diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c new file mode 100644 index 000000000..a26367e9f --- /dev/null +++ b/drivers/bluetooth/hci_ldisc.c @@ -0,0 +1,926 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Bluetooth HCI UART driver + * + * Copyright (C) 2000-2001 Qualcomm Incorporated + * Copyright (C) 2002-2003 Maxim Krasnyansky + * Copyright (C) 2004-2005 Marcel Holtmann + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "btintel.h" +#include "btbcm.h" +#include "hci_uart.h" + +#define VERSION "2.3" + +static const struct hci_uart_proto *hup[HCI_UART_MAX_PROTO]; + +int hci_uart_register_proto(const struct hci_uart_proto *p) +{ + if (p->id >= HCI_UART_MAX_PROTO) + return -EINVAL; + + if (hup[p->id]) + return -EEXIST; + + hup[p->id] = p; + + BT_INFO("HCI UART protocol %s registered", p->name); + + return 0; +} + +int hci_uart_unregister_proto(const struct hci_uart_proto *p) +{ + if (p->id >= HCI_UART_MAX_PROTO) + return -EINVAL; + + if (!hup[p->id]) + return -EINVAL; + + hup[p->id] = NULL; + + return 0; +} + +static const struct hci_uart_proto *hci_uart_get_proto(unsigned int id) +{ + if (id >= HCI_UART_MAX_PROTO) + return NULL; + + return hup[id]; +} + +static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type) +{ + struct hci_dev *hdev = hu->hdev; + + /* Update HCI stat counters */ + switch (pkt_type) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } +} + +static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) +{ + struct sk_buff *skb = hu->tx_skb; + + if (!skb) { + percpu_down_read(&hu->proto_lock); + + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + skb = hu->proto->dequeue(hu); + + percpu_up_read(&hu->proto_lock); + } else { + hu->tx_skb = NULL; + } + + return skb; +} + +int hci_uart_tx_wakeup(struct hci_uart *hu) +{ + /* This may be called in an IRQ context, so we can't sleep. Therefore + * we try to acquire the lock only, and if that fails we assume the + * tty is being closed because that is the only time the write lock is + * acquired. If, however, at some point in the future the write lock + * is also acquired in other situations, then this must be revisited. + */ + if (!percpu_down_read_trylock(&hu->proto_lock)) + return 0; + + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) + goto no_schedule; + + set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); + if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) + goto no_schedule; + + BT_DBG(""); + + schedule_work(&hu->write_work); + +no_schedule: + percpu_up_read(&hu->proto_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(hci_uart_tx_wakeup); + +static void hci_uart_write_work(struct work_struct *work) +{ + struct hci_uart *hu = container_of(work, struct hci_uart, write_work); + struct tty_struct *tty = hu->tty; + struct hci_dev *hdev = hu->hdev; + struct sk_buff *skb; + + /* REVISIT: should we cope with bad skbs or ->write() returning + * and error value ? + */ + +restart: + clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); + + while ((skb = hci_uart_dequeue(hu))) { + int len; + + set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + len = tty->ops->write(tty, skb->data, skb->len); + hdev->stat.byte_tx += len; + + skb_pull(skb, len); + if (skb->len) { + hu->tx_skb = skb; + break; + } + + hci_uart_tx_complete(hu, hci_skb_pkt_type(skb)); + kfree_skb(skb); + } + + clear_bit(HCI_UART_SENDING, &hu->tx_state); + if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)) + goto restart; + + wake_up_bit(&hu->tx_state, HCI_UART_SENDING); +} + +void hci_uart_init_work(struct work_struct *work) +{ + struct hci_uart *hu = container_of(work, struct hci_uart, init_ready); + int err; + struct hci_dev *hdev; + + if (!test_and_clear_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) + return; + + err = hci_register_dev(hu->hdev); + if (err < 0) { + BT_ERR("Can't register HCI device"); + clear_bit(HCI_UART_PROTO_READY, &hu->flags); + hu->proto->close(hu); + hdev = hu->hdev; + hu->hdev = NULL; + hci_free_dev(hdev); + return; + } + + set_bit(HCI_UART_REGISTERED, &hu->flags); +} + +int hci_uart_init_ready(struct hci_uart *hu) +{ + if (!test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) + return -EALREADY; + + schedule_work(&hu->init_ready); + + return 0; +} + +int hci_uart_wait_until_sent(struct hci_uart *hu) +{ + return wait_on_bit_timeout(&hu->tx_state, HCI_UART_SENDING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(2000)); +} + +/* ------- Interface to HCI layer ------ */ +/* Reset device */ +static int hci_uart_flush(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct tty_struct *tty = hu->tty; + + BT_DBG("hdev %p tty %p", hdev, tty); + + if (hu->tx_skb) { + kfree_skb(hu->tx_skb); hu->tx_skb = NULL; + } + + /* Flush any pending characters in the driver and discipline. */ + tty_ldisc_flush(tty); + tty_driver_flush_buffer(tty); + + percpu_down_read(&hu->proto_lock); + + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + hu->proto->flush(hu); + + percpu_up_read(&hu->proto_lock); + + return 0; +} + +/* Initialize device */ +static int hci_uart_open(struct hci_dev *hdev) +{ + BT_DBG("%s %p", hdev->name, hdev); + + /* Undo clearing this from hci_uart_close() */ + hdev->flush = hci_uart_flush; + + return 0; +} + +/* Close device */ +static int hci_uart_close(struct hci_dev *hdev) +{ + BT_DBG("hdev %p", hdev); + + hci_uart_flush(hdev); + hdev->flush = NULL; + return 0; +} + +/* Send frames from HCI layer */ +static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb), + skb->len); + + percpu_down_read(&hu->proto_lock); + + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { + percpu_up_read(&hu->proto_lock); + return -EUNATCH; + } + + hu->proto->enqueue(hu, skb); + percpu_up_read(&hu->proto_lock); + + hci_uart_tx_wakeup(hu); + + return 0; +} + +/* Check the underlying device or tty has flow control support */ +bool hci_uart_has_flow_control(struct hci_uart *hu) +{ + /* serdev nodes check if the needed operations are present */ + if (hu->serdev) + return true; + + if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset) + return true; + + return false; +} + +/* Flow control or un-flow control the device */ +void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) +{ + struct tty_struct *tty = hu->tty; + struct ktermios ktermios; + int status; + unsigned int set = 0; + unsigned int clear = 0; + + if (hu->serdev) { + serdev_device_set_flow_control(hu->serdev, !enable); + serdev_device_set_rts(hu->serdev, !enable); + return; + } + + if (enable) { + /* Disable hardware flow control */ + ktermios = tty->termios; + ktermios.c_cflag &= ~CRTSCTS; + tty_set_termios(tty, &ktermios); + BT_DBG("Disabling hardware flow control: %s", + (tty->termios.c_cflag & CRTSCTS) ? "failed" : "success"); + + /* Clear RTS to prevent the device from sending */ + /* Most UARTs need OUT2 to enable interrupts */ + status = tty->driver->ops->tiocmget(tty); + BT_DBG("Current tiocm 0x%x", status); + + set &= ~(TIOCM_OUT2 | TIOCM_RTS); + clear = ~set; + set &= TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 | + TIOCM_OUT2 | TIOCM_LOOP; + clear &= TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 | + TIOCM_OUT2 | TIOCM_LOOP; + status = tty->driver->ops->tiocmset(tty, set, clear); + BT_DBG("Clearing RTS: %s", status ? "failed" : "success"); + } else { + /* Set RTS to allow the device to send again */ + status = tty->driver->ops->tiocmget(tty); + BT_DBG("Current tiocm 0x%x", status); + + set |= (TIOCM_OUT2 | TIOCM_RTS); + clear = ~set; + set &= TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 | + TIOCM_OUT2 | TIOCM_LOOP; + clear &= TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 | + TIOCM_OUT2 | TIOCM_LOOP; + status = tty->driver->ops->tiocmset(tty, set, clear); + BT_DBG("Setting RTS: %s", status ? "failed" : "success"); + + /* Re-enable hardware flow control */ + ktermios = tty->termios; + ktermios.c_cflag |= CRTSCTS; + tty_set_termios(tty, &ktermios); + BT_DBG("Enabling hardware flow control: %s", + !(tty->termios.c_cflag & CRTSCTS) ? "failed" : "success"); + } +} + +void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, + unsigned int oper_speed) +{ + hu->init_speed = init_speed; + hu->oper_speed = oper_speed; +} + +void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed) +{ + struct tty_struct *tty = hu->tty; + struct ktermios ktermios; + + ktermios = tty->termios; + ktermios.c_cflag &= ~CBAUD; + tty_termios_encode_baud_rate(&ktermios, speed, speed); + + /* tty_set_termios() return not checked as it is always 0 */ + tty_set_termios(tty, &ktermios); + + BT_DBG("%s: New tty speeds: %d/%d", hu->hdev->name, + tty->termios.c_ispeed, tty->termios.c_ospeed); +} + +static int hci_uart_setup(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct hci_rp_read_local_version *ver; + struct sk_buff *skb; + unsigned int speed; + int err; + + /* Init speed if any */ + if (hu->init_speed) + speed = hu->init_speed; + else if (hu->proto->init_speed) + speed = hu->proto->init_speed; + else + speed = 0; + + if (speed) + hci_uart_set_baudrate(hu, speed); + + /* Operational speed if any */ + if (hu->oper_speed) + speed = hu->oper_speed; + else if (hu->proto->oper_speed) + speed = hu->proto->oper_speed; + else + speed = 0; + + if (hu->proto->set_baudrate && speed) { + err = hu->proto->set_baudrate(hu, speed); + if (!err) + hci_uart_set_baudrate(hu, speed); + } + + if (hu->proto->setup) + return hu->proto->setup(hu); + + if (!test_bit(HCI_UART_VND_DETECT, &hu->hdev_flags)) + return 0; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Reading local version information failed (%ld)", + hdev->name, PTR_ERR(skb)); + return 0; + } + + if (skb->len != sizeof(*ver)) { + BT_ERR("%s: Event length mismatch for version information", + hdev->name); + goto done; + } + + ver = (struct hci_rp_read_local_version *)skb->data; + + switch (le16_to_cpu(ver->manufacturer)) { +#ifdef CONFIG_BT_HCIUART_INTEL + case 2: + hdev->set_bdaddr = btintel_set_bdaddr; + btintel_check_bdaddr(hdev); + break; +#endif +#ifdef CONFIG_BT_HCIUART_BCM + case 15: + hdev->set_bdaddr = btbcm_set_bdaddr; + btbcm_check_bdaddr(hdev); + break; +#endif + default: + break; + } + +done: + kfree_skb(skb); + return 0; +} + +/* ------ LDISC part ------ */ +/* hci_uart_tty_open + * + * Called when line discipline changed to HCI_UART. + * + * Arguments: + * tty pointer to tty info structure + * Return Value: + * 0 if success, otherwise error code + */ +static int hci_uart_tty_open(struct tty_struct *tty) +{ + struct hci_uart *hu; + + BT_DBG("tty %p", tty); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* Error if the tty has no write op instead of leaving an exploitable + * hole + */ + if (tty->ops->write == NULL) + return -EOPNOTSUPP; + + hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL); + if (!hu) { + BT_ERR("Can't allocate control structure"); + return -ENFILE; + } + if (percpu_init_rwsem(&hu->proto_lock)) { + BT_ERR("Can't allocate semaphore structure"); + kfree(hu); + return -ENOMEM; + } + + tty->disc_data = hu; + hu->tty = tty; + tty->receive_room = 65536; + + /* disable alignment support by default */ + hu->alignment = 1; + hu->padding = 0; + + INIT_WORK(&hu->init_ready, hci_uart_init_work); + INIT_WORK(&hu->write_work, hci_uart_write_work); + + /* Flush any pending characters in the driver */ + tty_driver_flush_buffer(tty); + + return 0; +} + +/* hci_uart_tty_close() + * + * Called when the line discipline is changed to something + * else, the tty is closed, or the tty detects a hangup. + */ +static void hci_uart_tty_close(struct tty_struct *tty) +{ + struct hci_uart *hu = tty->disc_data; + struct hci_dev *hdev; + + BT_DBG("tty %p", tty); + + /* Detach from the tty */ + tty->disc_data = NULL; + + if (!hu) + return; + + hdev = hu->hdev; + if (hdev) + hci_uart_close(hdev); + + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) { + percpu_down_write(&hu->proto_lock); + clear_bit(HCI_UART_PROTO_READY, &hu->flags); + percpu_up_write(&hu->proto_lock); + + cancel_work_sync(&hu->init_ready); + cancel_work_sync(&hu->write_work); + + if (hdev) { + if (test_bit(HCI_UART_REGISTERED, &hu->flags)) + hci_unregister_dev(hdev); + hci_free_dev(hdev); + } + hu->proto->close(hu); + } + clear_bit(HCI_UART_PROTO_SET, &hu->flags); + + percpu_free_rwsem(&hu->proto_lock); + + kfree(hu); +} + +/* hci_uart_tty_wakeup() + * + * Callback for transmit wakeup. Called when low level + * device driver can accept more send data. + * + * Arguments: tty pointer to associated tty instance data + * Return Value: None + */ +static void hci_uart_tty_wakeup(struct tty_struct *tty) +{ + struct hci_uart *hu = tty->disc_data; + + BT_DBG(""); + + if (!hu) + return; + + clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + + if (tty != hu->tty) + return; + + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + hci_uart_tx_wakeup(hu); +} + +/* hci_uart_tty_receive() + * + * Called by tty low level driver when receive data is + * available. + * + * Arguments: tty pointer to tty isntance data + * data pointer to received data + * flags pointer to flags for data + * count count of received data in bytes + * + * Return Value: None + */ +static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, + const u8 *flags, size_t count) +{ + struct hci_uart *hu = tty->disc_data; + + if (!hu || tty != hu->tty) + return; + + percpu_down_read(&hu->proto_lock); + + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { + percpu_up_read(&hu->proto_lock); + return; + } + + /* It does not need a lock here as it is already protected by a mutex in + * tty caller + */ + hu->proto->recv(hu, data, count); + percpu_up_read(&hu->proto_lock); + + if (hu->hdev) + hu->hdev->stat.byte_rx += count; + + tty_unthrottle(tty); +} + +static int hci_uart_register_dev(struct hci_uart *hu) +{ + struct hci_dev *hdev; + int err; + + BT_DBG(""); + + /* Initialize and register HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + return -ENOMEM; + } + + hu->hdev = hdev; + + hdev->bus = HCI_UART; + hci_set_drvdata(hdev, hu); + + /* Only when vendor specific setup callback is provided, consider + * the manufacturer information valid. This avoids filling in the + * value for Ericsson when nothing is specified. + */ + if (hu->proto->setup) + hdev->manufacturer = hu->proto->manufacturer; + + hdev->open = hci_uart_open; + hdev->close = hci_uart_close; + hdev->flush = hci_uart_flush; + hdev->send = hci_uart_send_frame; + hdev->setup = hci_uart_setup; + SET_HCIDEV_DEV(hdev, hu->tty->dev); + + if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags)) + set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + + if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags)) + set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks); + + if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags)) + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + + if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) + hdev->dev_type = HCI_AMP; + else + hdev->dev_type = HCI_PRIMARY; + + /* Only call open() for the protocol after hdev is fully initialized as + * open() (or a timer/workqueue it starts) may attempt to reference it. + */ + err = hu->proto->open(hu); + if (err) { + hu->hdev = NULL; + hci_free_dev(hdev); + return err; + } + + if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) + return 0; + + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + hu->proto->close(hu); + hu->hdev = NULL; + hci_free_dev(hdev); + return -ENODEV; + } + + set_bit(HCI_UART_REGISTERED, &hu->flags); + + return 0; +} + +static int hci_uart_set_proto(struct hci_uart *hu, int id) +{ + const struct hci_uart_proto *p; + int err; + + p = hci_uart_get_proto(id); + if (!p) + return -EPROTONOSUPPORT; + + hu->proto = p; + + err = hci_uart_register_dev(hu); + if (err) { + return err; + } + + set_bit(HCI_UART_PROTO_READY, &hu->flags); + return 0; +} + +static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags) +{ + unsigned long valid_flags = BIT(HCI_UART_RAW_DEVICE) | + BIT(HCI_UART_RESET_ON_INIT) | + BIT(HCI_UART_CREATE_AMP) | + BIT(HCI_UART_INIT_PENDING) | + BIT(HCI_UART_EXT_CONFIG) | + BIT(HCI_UART_VND_DETECT); + + if (flags & ~valid_flags) + return -EINVAL; + + hu->hdev_flags = flags; + + return 0; +} + +/* hci_uart_tty_ioctl() + * + * Process IOCTL system call for the tty device. + * + * Arguments: + * + * tty pointer to tty instance data + * cmd IOCTL command code + * arg argument for IOCTL call (cmd dependent) + * + * Return Value: Command dependent + */ +static int hci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd, + unsigned long arg) +{ + struct hci_uart *hu = tty->disc_data; + int err = 0; + + BT_DBG(""); + + /* Verify the status of the device */ + if (!hu) + return -EBADF; + + switch (cmd) { + case HCIUARTSETPROTO: + if (!test_and_set_bit(HCI_UART_PROTO_SET, &hu->flags)) { + err = hci_uart_set_proto(hu, arg); + if (err) + clear_bit(HCI_UART_PROTO_SET, &hu->flags); + } else + err = -EBUSY; + break; + + case HCIUARTGETPROTO: + if (test_bit(HCI_UART_PROTO_SET, &hu->flags) && + test_bit(HCI_UART_PROTO_READY, &hu->flags)) + err = hu->proto->id; + else + err = -EUNATCH; + break; + + case HCIUARTGETDEVICE: + if (test_bit(HCI_UART_REGISTERED, &hu->flags)) + err = hu->hdev->id; + else + err = -EUNATCH; + break; + + case HCIUARTSETFLAGS: + if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) + err = -EBUSY; + else + err = hci_uart_set_flags(hu, arg); + break; + + case HCIUARTGETFLAGS: + err = hu->hdev_flags; + break; + + default: + err = n_tty_ioctl_helper(tty, cmd, arg); + break; + } + + return err; +} + +/* + * We don't provide read/write/poll interface for user space. + */ +static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file, + u8 *buf, size_t nr, void **cookie, + unsigned long offset) +{ + return 0; +} + +static ssize_t hci_uart_tty_write(struct tty_struct *tty, struct file *file, + const u8 *data, size_t count) +{ + return 0; +} + +static struct tty_ldisc_ops hci_uart_ldisc = { + .owner = THIS_MODULE, + .num = N_HCI, + .name = "n_hci", + .open = hci_uart_tty_open, + .close = hci_uart_tty_close, + .read = hci_uart_tty_read, + .write = hci_uart_tty_write, + .ioctl = hci_uart_tty_ioctl, + .compat_ioctl = hci_uart_tty_ioctl, + .receive_buf = hci_uart_tty_receive, + .write_wakeup = hci_uart_tty_wakeup, +}; + +static int __init hci_uart_init(void) +{ + int err; + + BT_INFO("HCI UART driver ver %s", VERSION); + + /* Register the tty discipline */ + err = tty_register_ldisc(&hci_uart_ldisc); + if (err) { + BT_ERR("HCI line discipline registration failed. (%d)", err); + return err; + } + +#ifdef CONFIG_BT_HCIUART_H4 + h4_init(); +#endif +#ifdef CONFIG_BT_HCIUART_BCSP + bcsp_init(); +#endif +#ifdef CONFIG_BT_HCIUART_LL + ll_init(); +#endif +#ifdef CONFIG_BT_HCIUART_ATH3K + ath_init(); +#endif +#ifdef CONFIG_BT_HCIUART_3WIRE + h5_init(); +#endif +#ifdef CONFIG_BT_HCIUART_INTEL + intel_init(); +#endif +#ifdef CONFIG_BT_HCIUART_BCM + bcm_init(); +#endif +#ifdef CONFIG_BT_HCIUART_QCA + qca_init(); +#endif +#ifdef CONFIG_BT_HCIUART_AG6XX + ag6xx_init(); +#endif +#ifdef CONFIG_BT_HCIUART_MRVL + mrvl_init(); +#endif + + return 0; +} + +static void __exit hci_uart_exit(void) +{ +#ifdef CONFIG_BT_HCIUART_H4 + h4_deinit(); +#endif +#ifdef CONFIG_BT_HCIUART_BCSP + bcsp_deinit(); +#endif +#ifdef CONFIG_BT_HCIUART_LL + ll_deinit(); +#endif +#ifdef CONFIG_BT_HCIUART_ATH3K + ath_deinit(); +#endif +#ifdef CONFIG_BT_HCIUART_3WIRE + h5_deinit(); +#endif +#ifdef CONFIG_BT_HCIUART_INTEL + intel_deinit(); +#endif +#ifdef CONFIG_BT_HCIUART_BCM + bcm_deinit(); +#endif +#ifdef CONFIG_BT_HCIUART_QCA + qca_deinit(); +#endif +#ifdef CONFIG_BT_HCIUART_AG6XX + ag6xx_deinit(); +#endif +#ifdef CONFIG_BT_HCIUART_MRVL + mrvl_deinit(); +#endif + + tty_unregister_ldisc(&hci_uart_ldisc); +} + +module_init(hci_uart_init); +module_exit(hci_uart_exit); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth HCI UART driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_LDISC(N_HCI); diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c new file mode 100644 index 000000000..4a0b5c316 --- /dev/null +++ b/drivers/bluetooth/hci_ll.c @@ -0,0 +1,822 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Texas Instruments' Bluetooth HCILL UART protocol + * + * HCILL (HCI Low Level) is a Texas Instruments' power management + * protocol extension to H4. + * + * Copyright (C) 2007 Texas Instruments, Inc. + * + * Written by Ohad Ben-Cohen + * + * Acknowledgements: + * This file is based on hci_h4.c, which was written + * by Maxim Krasnyansky and Marcel Holtmann. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "hci_uart.h" + +/* Vendor-specific HCI commands */ +#define HCI_VS_WRITE_BD_ADDR 0xfc06 +#define HCI_VS_UPDATE_UART_HCI_BAUDRATE 0xff36 + +/* HCILL commands */ +#define HCILL_GO_TO_SLEEP_IND 0x30 +#define HCILL_GO_TO_SLEEP_ACK 0x31 +#define HCILL_WAKE_UP_IND 0x32 +#define HCILL_WAKE_UP_ACK 0x33 + +/* HCILL states */ +enum hcill_states_e { + HCILL_ASLEEP, + HCILL_ASLEEP_TO_AWAKE, + HCILL_AWAKE, + HCILL_AWAKE_TO_ASLEEP +}; + +struct ll_device { + struct hci_uart hu; + struct serdev_device *serdev; + struct gpio_desc *enable_gpio; + struct clk *ext_clk; + bdaddr_t bdaddr; +}; + +struct ll_struct { + struct sk_buff *rx_skb; + struct sk_buff_head txq; + spinlock_t hcill_lock; /* HCILL state lock */ + unsigned long hcill_state; /* HCILL power state */ + struct sk_buff_head tx_wait_q; /* HCILL wait queue */ +}; + +/* + * Builds and sends an HCILL command packet. + * These are very simple packets with only 1 cmd byte + */ +static int send_hcill_cmd(u8 cmd, struct hci_uart *hu) +{ + int err = 0; + struct sk_buff *skb = NULL; + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p cmd 0x%x", hu, cmd); + + /* allocate packet */ + skb = bt_skb_alloc(1, GFP_ATOMIC); + if (!skb) { + BT_ERR("cannot allocate memory for HCILL packet"); + err = -ENOMEM; + goto out; + } + + /* prepare packet */ + skb_put_u8(skb, cmd); + + /* send packet */ + skb_queue_tail(&ll->txq, skb); +out: + return err; +} + +/* Initialize protocol */ +static int ll_open(struct hci_uart *hu) +{ + struct ll_struct *ll; + + BT_DBG("hu %p", hu); + + ll = kzalloc(sizeof(*ll), GFP_KERNEL); + if (!ll) + return -ENOMEM; + + skb_queue_head_init(&ll->txq); + skb_queue_head_init(&ll->tx_wait_q); + spin_lock_init(&ll->hcill_lock); + + ll->hcill_state = HCILL_AWAKE; + + hu->priv = ll; + + if (hu->serdev) { + struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev); + + if (!IS_ERR(lldev->ext_clk)) + clk_prepare_enable(lldev->ext_clk); + } + + return 0; +} + +/* Flush protocol data */ +static int ll_flush(struct hci_uart *hu) +{ + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&ll->tx_wait_q); + skb_queue_purge(&ll->txq); + + return 0; +} + +/* Close protocol */ +static int ll_close(struct hci_uart *hu) +{ + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&ll->tx_wait_q); + skb_queue_purge(&ll->txq); + + kfree_skb(ll->rx_skb); + + if (hu->serdev) { + struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev); + + gpiod_set_value_cansleep(lldev->enable_gpio, 0); + + clk_disable_unprepare(lldev->ext_clk); + } + + hu->priv = NULL; + + kfree(ll); + + return 0; +} + +/* + * internal function, which does common work of the device wake up process: + * 1. places all pending packets (waiting in tx_wait_q list) in txq list. + * 2. changes internal state to HCILL_AWAKE. + * Note: assumes that hcill_lock spinlock is taken, + * shouldn't be called otherwise! + */ +static void __ll_do_awake(struct ll_struct *ll) +{ + struct sk_buff *skb = NULL; + + while ((skb = skb_dequeue(&ll->tx_wait_q))) + skb_queue_tail(&ll->txq, skb); + + ll->hcill_state = HCILL_AWAKE; +} + +/* + * Called upon a wake-up-indication from the device + */ +static void ll_device_want_to_wakeup(struct hci_uart *hu) +{ + unsigned long flags; + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p", hu); + + /* lock hcill state */ + spin_lock_irqsave(&ll->hcill_lock, flags); + + switch (ll->hcill_state) { + case HCILL_ASLEEP_TO_AWAKE: + /* + * This state means that both the host and the BRF chip + * have simultaneously sent a wake-up-indication packet. + * Traditionally, in this case, receiving a wake-up-indication + * was enough and an additional wake-up-ack wasn't needed. + * This has changed with the BRF6350, which does require an + * explicit wake-up-ack. Other BRF versions, which do not + * require an explicit ack here, do accept it, thus it is + * perfectly safe to always send one. + */ + BT_DBG("dual wake-up-indication"); + fallthrough; + case HCILL_ASLEEP: + /* acknowledge device wake up */ + if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) { + BT_ERR("cannot acknowledge device wake up"); + goto out; + } + break; + default: + /* any other state is illegal */ + BT_ERR("received HCILL_WAKE_UP_IND in state %ld", + ll->hcill_state); + break; + } + + /* send pending packets and change state to HCILL_AWAKE */ + __ll_do_awake(ll); + +out: + spin_unlock_irqrestore(&ll->hcill_lock, flags); + + /* actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +/* + * Called upon a sleep-indication from the device + */ +static void ll_device_want_to_sleep(struct hci_uart *hu) +{ + unsigned long flags; + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p", hu); + + /* lock hcill state */ + spin_lock_irqsave(&ll->hcill_lock, flags); + + /* sanity check */ + if (ll->hcill_state != HCILL_AWAKE) + BT_ERR("ERR: HCILL_GO_TO_SLEEP_IND in state %ld", + ll->hcill_state); + + /* acknowledge device sleep */ + if (send_hcill_cmd(HCILL_GO_TO_SLEEP_ACK, hu) < 0) { + BT_ERR("cannot acknowledge device sleep"); + goto out; + } + + /* update state */ + ll->hcill_state = HCILL_ASLEEP; + +out: + spin_unlock_irqrestore(&ll->hcill_lock, flags); + + /* actually send the sleep ack packet */ + hci_uart_tx_wakeup(hu); +} + +/* + * Called upon wake-up-acknowledgement from the device + */ +static void ll_device_woke_up(struct hci_uart *hu) +{ + unsigned long flags; + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p", hu); + + /* lock hcill state */ + spin_lock_irqsave(&ll->hcill_lock, flags); + + /* sanity check */ + if (ll->hcill_state != HCILL_ASLEEP_TO_AWAKE) + BT_ERR("received HCILL_WAKE_UP_ACK in state %ld", + ll->hcill_state); + + /* send pending packets and change state to HCILL_AWAKE */ + __ll_do_awake(ll); + + spin_unlock_irqrestore(&ll->hcill_lock, flags); + + /* actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +/* Enqueue frame for transmittion (padding, crc, etc) */ +/* may be called from two simultaneous tasklets */ +static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + unsigned long flags = 0; + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p skb %p", hu, skb); + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + + /* lock hcill state */ + spin_lock_irqsave(&ll->hcill_lock, flags); + + /* act according to current state */ + switch (ll->hcill_state) { + case HCILL_AWAKE: + BT_DBG("device awake, sending normally"); + skb_queue_tail(&ll->txq, skb); + break; + case HCILL_ASLEEP: + BT_DBG("device asleep, waking up and queueing packet"); + /* save packet for later */ + skb_queue_tail(&ll->tx_wait_q, skb); + /* awake device */ + if (send_hcill_cmd(HCILL_WAKE_UP_IND, hu) < 0) { + BT_ERR("cannot wake up device"); + break; + } + ll->hcill_state = HCILL_ASLEEP_TO_AWAKE; + break; + case HCILL_ASLEEP_TO_AWAKE: + BT_DBG("device waking up, queueing packet"); + /* transient state; just keep packet for later */ + skb_queue_tail(&ll->tx_wait_q, skb); + break; + default: + BT_ERR("illegal hcill state: %ld (losing packet)", + ll->hcill_state); + dev_kfree_skb_irq(skb); + break; + } + + spin_unlock_irqrestore(&ll->hcill_lock, flags); + + return 0; +} + +static int ll_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct ll_struct *ll = hu->priv; + + switch (hci_skb_pkt_type(skb)) { + case HCILL_GO_TO_SLEEP_IND: + BT_DBG("HCILL_GO_TO_SLEEP_IND packet"); + ll_device_want_to_sleep(hu); + break; + case HCILL_GO_TO_SLEEP_ACK: + /* shouldn't happen */ + bt_dev_err(hdev, "received HCILL_GO_TO_SLEEP_ACK in state %ld", + ll->hcill_state); + break; + case HCILL_WAKE_UP_IND: + BT_DBG("HCILL_WAKE_UP_IND packet"); + ll_device_want_to_wakeup(hu); + break; + case HCILL_WAKE_UP_ACK: + BT_DBG("HCILL_WAKE_UP_ACK packet"); + ll_device_woke_up(hu); + break; + } + + kfree_skb(skb); + return 0; +} + +#define LL_RECV_SLEEP_IND \ + .type = HCILL_GO_TO_SLEEP_IND, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = 0 + +#define LL_RECV_SLEEP_ACK \ + .type = HCILL_GO_TO_SLEEP_ACK, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = 0 + +#define LL_RECV_WAKE_IND \ + .type = HCILL_WAKE_UP_IND, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = 0 + +#define LL_RECV_WAKE_ACK \ + .type = HCILL_WAKE_UP_ACK, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = 0 + +static const struct h4_recv_pkt ll_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, + { LL_RECV_SLEEP_IND, .recv = ll_recv_frame }, + { LL_RECV_SLEEP_ACK, .recv = ll_recv_frame }, + { LL_RECV_WAKE_IND, .recv = ll_recv_frame }, + { LL_RECV_WAKE_ACK, .recv = ll_recv_frame }, +}; + +/* Recv data */ +static int ll_recv(struct hci_uart *hu, const void *data, int count) +{ + struct ll_struct *ll = hu->priv; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count, + ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts)); + if (IS_ERR(ll->rx_skb)) { + int err = PTR_ERR(ll->rx_skb); + bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); + ll->rx_skb = NULL; + return err; + } + + return count; +} + +static struct sk_buff *ll_dequeue(struct hci_uart *hu) +{ + struct ll_struct *ll = hu->priv; + + return skb_dequeue(&ll->txq); +} + +#if IS_ENABLED(CONFIG_SERIAL_DEV_BUS) +static int read_local_version(struct hci_dev *hdev) +{ + int err = 0; + unsigned short version = 0; + struct sk_buff *skb; + struct hci_rp_read_local_version *ver; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading TI version information failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + if (skb->len != sizeof(*ver)) { + err = -EILSEQ; + goto out; + } + + ver = (struct hci_rp_read_local_version *)skb->data; + if (le16_to_cpu(ver->manufacturer) != 13) { + err = -ENODEV; + goto out; + } + + version = le16_to_cpu(ver->lmp_subver); + +out: + if (err) + bt_dev_err(hdev, "Failed to read TI version info: %d", err); + kfree_skb(skb); + return err ? err : version; +} + +static int send_command_from_firmware(struct ll_device *lldev, + struct hci_command *cmd) +{ + struct sk_buff *skb; + + if (cmd->opcode == HCI_VS_UPDATE_UART_HCI_BAUDRATE) { + /* ignore remote change + * baud rate HCI VS command + */ + bt_dev_warn(lldev->hu.hdev, + "change remote baud rate command in firmware"); + return 0; + } + if (cmd->prefix != 1) + bt_dev_dbg(lldev->hu.hdev, "command type %d", cmd->prefix); + + skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen, + &cmd->speed, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(lldev->hu.hdev, "send command failed"); + return PTR_ERR(skb); + } + kfree_skb(skb); + return 0; +} + +/* + * download_firmware - + * internal function which parses through the .bts firmware + * script file intreprets SEND, DELAY actions only as of now + */ +static int download_firmware(struct ll_device *lldev) +{ + unsigned short chip, min_ver, maj_ver; + int version, err, len; + unsigned char *ptr, *action_ptr; + unsigned char bts_scr_name[40]; /* 40 char long bts scr name? */ + const struct firmware *fw; + struct hci_command *cmd; + + version = read_local_version(lldev->hu.hdev); + if (version < 0) + return version; + + chip = (version & 0x7C00) >> 10; + min_ver = (version & 0x007F); + maj_ver = (version & 0x0380) >> 7; + if (version & 0x8000) + maj_ver |= 0x0008; + + snprintf(bts_scr_name, sizeof(bts_scr_name), + "ti-connectivity/TIInit_%d.%d.%d.bts", + chip, maj_ver, min_ver); + + err = request_firmware(&fw, bts_scr_name, &lldev->serdev->dev); + if (err || !fw->data || !fw->size) { + bt_dev_err(lldev->hu.hdev, "request_firmware failed(errno %d) for %s", + err, bts_scr_name); + return -EINVAL; + } + ptr = (void *)fw->data; + len = fw->size; + /* bts_header to remove out magic number and + * version + */ + ptr += sizeof(struct bts_header); + len -= sizeof(struct bts_header); + + while (len > 0 && ptr) { + bt_dev_dbg(lldev->hu.hdev, " action size %d, type %d ", + ((struct bts_action *)ptr)->size, + ((struct bts_action *)ptr)->type); + + action_ptr = &(((struct bts_action *)ptr)->data[0]); + + switch (((struct bts_action *)ptr)->type) { + case ACTION_SEND_COMMAND: /* action send */ + bt_dev_dbg(lldev->hu.hdev, "S"); + cmd = (struct hci_command *)action_ptr; + err = send_command_from_firmware(lldev, cmd); + if (err) + goto out_rel_fw; + break; + case ACTION_WAIT_EVENT: /* wait */ + /* no need to wait as command was synchronous */ + bt_dev_dbg(lldev->hu.hdev, "W"); + break; + case ACTION_DELAY: /* sleep */ + bt_dev_info(lldev->hu.hdev, "sleep command in scr"); + msleep(((struct bts_action_delay *)action_ptr)->msec); + break; + } + len -= (sizeof(struct bts_action) + + ((struct bts_action *)ptr)->size); + ptr += sizeof(struct bts_action) + + ((struct bts_action *)ptr)->size; + } + +out_rel_fw: + /* fw download complete */ + release_firmware(fw); + return err; +} + +static int ll_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + bdaddr_t bdaddr_swapped; + struct sk_buff *skb; + + /* HCI_VS_WRITE_BD_ADDR (at least on a CC2560A chip) expects the BD + * address to be MSB first, but bdaddr_t has the convention of being + * LSB first. + */ + baswap(&bdaddr_swapped, bdaddr); + skb = __hci_cmd_sync(hdev, HCI_VS_WRITE_BD_ADDR, sizeof(bdaddr_t), + &bdaddr_swapped, HCI_INIT_TIMEOUT); + if (!IS_ERR(skb)) + kfree_skb(skb); + + return PTR_ERR_OR_ZERO(skb); +} + +static int ll_setup(struct hci_uart *hu) +{ + int err, retry = 3; + struct ll_device *lldev; + struct serdev_device *serdev = hu->serdev; + u32 speed; + + if (!serdev) + return 0; + + lldev = serdev_device_get_drvdata(serdev); + + hu->hdev->set_bdaddr = ll_set_bdaddr; + + serdev_device_set_flow_control(serdev, true); + + do { + /* Reset the Bluetooth device */ + gpiod_set_value_cansleep(lldev->enable_gpio, 0); + msleep(5); + gpiod_set_value_cansleep(lldev->enable_gpio, 1); + mdelay(100); + err = serdev_device_wait_for_cts(serdev, true, 200); + if (err) { + bt_dev_err(hu->hdev, "Failed to get CTS"); + return err; + } + + err = download_firmware(lldev); + if (!err) + break; + + /* Toggle BT_EN and retry */ + bt_dev_err(hu->hdev, "download firmware failed, retrying..."); + } while (retry--); + + if (err) + return err; + + /* Set BD address if one was specified at probe */ + if (!bacmp(&lldev->bdaddr, BDADDR_NONE)) { + /* This means that there was an error getting the BD address + * during probe, so mark the device as having a bad address. + */ + set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks); + } else if (bacmp(&lldev->bdaddr, BDADDR_ANY)) { + err = ll_set_bdaddr(hu->hdev, &lldev->bdaddr); + if (err) + set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks); + } + + /* Operational speed if any */ + if (hu->oper_speed) + speed = hu->oper_speed; + else if (hu->proto->oper_speed) + speed = hu->proto->oper_speed; + else + speed = 0; + + if (speed) { + __le32 speed_le = cpu_to_le32(speed); + struct sk_buff *skb; + + skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE, + sizeof(speed_le), &speed_le, + HCI_INIT_TIMEOUT); + if (!IS_ERR(skb)) { + kfree_skb(skb); + serdev_device_set_baudrate(serdev, speed); + } + } + + return 0; +} + +static const struct hci_uart_proto llp; + +static int hci_ti_probe(struct serdev_device *serdev) +{ + struct hci_uart *hu; + struct ll_device *lldev; + struct nvmem_cell *bdaddr_cell; + u32 max_speed = 3000000; + + lldev = devm_kzalloc(&serdev->dev, sizeof(struct ll_device), GFP_KERNEL); + if (!lldev) + return -ENOMEM; + hu = &lldev->hu; + + serdev_device_set_drvdata(serdev, lldev); + lldev->serdev = hu->serdev = serdev; + + lldev->enable_gpio = devm_gpiod_get_optional(&serdev->dev, + "enable", + GPIOD_OUT_LOW); + if (IS_ERR(lldev->enable_gpio)) + return PTR_ERR(lldev->enable_gpio); + + lldev->ext_clk = devm_clk_get(&serdev->dev, "ext_clock"); + if (IS_ERR(lldev->ext_clk) && PTR_ERR(lldev->ext_clk) != -ENOENT) + return PTR_ERR(lldev->ext_clk); + + of_property_read_u32(serdev->dev.of_node, "max-speed", &max_speed); + hci_uart_set_speeds(hu, 115200, max_speed); + + /* optional BD address from nvram */ + bdaddr_cell = nvmem_cell_get(&serdev->dev, "bd-address"); + if (IS_ERR(bdaddr_cell)) { + int err = PTR_ERR(bdaddr_cell); + + if (err == -EPROBE_DEFER) + return err; + + /* ENOENT means there is no matching nvmem cell and ENOSYS + * means that nvmem is not enabled in the kernel configuration. + */ + if (err != -ENOENT && err != -ENOSYS) { + /* If there was some other error, give userspace a + * chance to fix the problem instead of failing to load + * the driver. Using BDADDR_NONE as a flag that is + * tested later in the setup function. + */ + dev_warn(&serdev->dev, + "Failed to get \"bd-address\" nvmem cell (%d)\n", + err); + bacpy(&lldev->bdaddr, BDADDR_NONE); + } + } else { + bdaddr_t *bdaddr; + size_t len; + + bdaddr = nvmem_cell_read(bdaddr_cell, &len); + nvmem_cell_put(bdaddr_cell); + if (IS_ERR(bdaddr)) { + dev_err(&serdev->dev, "Failed to read nvmem bd-address\n"); + return PTR_ERR(bdaddr); + } + if (len != sizeof(bdaddr_t)) { + dev_err(&serdev->dev, "Invalid nvmem bd-address length\n"); + kfree(bdaddr); + return -EINVAL; + } + + /* As per the device tree bindings, the value from nvmem is + * expected to be MSB first, but in the kernel it is expected + * that bdaddr_t is LSB first. + */ + baswap(&lldev->bdaddr, bdaddr); + kfree(bdaddr); + } + + return hci_uart_register_device(hu, &llp); +} + +static void hci_ti_remove(struct serdev_device *serdev) +{ + struct ll_device *lldev = serdev_device_get_drvdata(serdev); + + hci_uart_unregister_device(&lldev->hu); +} + +static const struct of_device_id hci_ti_of_match[] = { + { .compatible = "ti,cc2560" }, + { .compatible = "ti,wl1271-st" }, + { .compatible = "ti,wl1273-st" }, + { .compatible = "ti,wl1281-st" }, + { .compatible = "ti,wl1283-st" }, + { .compatible = "ti,wl1285-st" }, + { .compatible = "ti,wl1801-st" }, + { .compatible = "ti,wl1805-st" }, + { .compatible = "ti,wl1807-st" }, + { .compatible = "ti,wl1831-st" }, + { .compatible = "ti,wl1835-st" }, + { .compatible = "ti,wl1837-st" }, + {}, +}; +MODULE_DEVICE_TABLE(of, hci_ti_of_match); + +static struct serdev_device_driver hci_ti_drv = { + .driver = { + .name = "hci-ti", + .of_match_table = hci_ti_of_match, + }, + .probe = hci_ti_probe, + .remove = hci_ti_remove, +}; +#else +#define ll_setup NULL +#endif + +static const struct hci_uart_proto llp = { + .id = HCI_UART_LL, + .name = "LL", + .setup = ll_setup, + .open = ll_open, + .close = ll_close, + .recv = ll_recv, + .enqueue = ll_enqueue, + .dequeue = ll_dequeue, + .flush = ll_flush, +}; + +int __init ll_init(void) +{ + serdev_device_driver_register(&hci_ti_drv); + + return hci_uart_register_proto(&llp); +} + +int __exit ll_deinit(void) +{ + serdev_device_driver_unregister(&hci_ti_drv); + + return hci_uart_unregister_proto(&llp); +} diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c new file mode 100644 index 000000000..e08222395 --- /dev/null +++ b/drivers/bluetooth/hci_mrvl.c @@ -0,0 +1,516 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Bluetooth HCI UART driver for marvell devices + * + * Copyright (C) 2016 Marvell International Ltd. + * Copyright (C) 2016 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" + +#define HCI_FW_REQ_PKT 0xA5 +#define HCI_CHIP_VER_PKT 0xAA + +#define MRVL_ACK 0x5A +#define MRVL_NAK 0xBF +#define MRVL_RAW_DATA 0x1F +#define MRVL_SET_BAUDRATE 0xFC09 + +enum { + STATE_CHIP_VER_PENDING, + STATE_FW_REQ_PENDING, + STATE_FW_LOADED, +}; + +struct mrvl_data { + struct sk_buff *rx_skb; + struct sk_buff_head txq; + struct sk_buff_head rawq; + unsigned long flags; + unsigned int tx_len; + u8 id, rev; +}; + +struct mrvl_serdev { + struct hci_uart hu; +}; + +struct hci_mrvl_pkt { + __le16 lhs; + __le16 rhs; +} __packed; +#define HCI_MRVL_PKT_SIZE 4 + +static int mrvl_open(struct hci_uart *hu) +{ + struct mrvl_data *mrvl; + int ret; + + BT_DBG("hu %p", hu); + + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + + mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL); + if (!mrvl) + return -ENOMEM; + + skb_queue_head_init(&mrvl->txq); + skb_queue_head_init(&mrvl->rawq); + + set_bit(STATE_CHIP_VER_PENDING, &mrvl->flags); + + hu->priv = mrvl; + + if (hu->serdev) { + ret = serdev_device_open(hu->serdev); + if (ret) + goto err; + } + + return 0; +err: + kfree(mrvl); + + return ret; +} + +static int mrvl_close(struct hci_uart *hu) +{ + struct mrvl_data *mrvl = hu->priv; + + BT_DBG("hu %p", hu); + + if (hu->serdev) + serdev_device_close(hu->serdev); + + skb_queue_purge(&mrvl->txq); + skb_queue_purge(&mrvl->rawq); + kfree_skb(mrvl->rx_skb); + kfree(mrvl); + + hu->priv = NULL; + return 0; +} + +static int mrvl_flush(struct hci_uart *hu) +{ + struct mrvl_data *mrvl = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&mrvl->txq); + skb_queue_purge(&mrvl->rawq); + + return 0; +} + +static struct sk_buff *mrvl_dequeue(struct hci_uart *hu) +{ + struct mrvl_data *mrvl = hu->priv; + struct sk_buff *skb; + + skb = skb_dequeue(&mrvl->txq); + if (!skb) { + /* Any raw data ? */ + skb = skb_dequeue(&mrvl->rawq); + } else { + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + } + + return skb; +} + +static int mrvl_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct mrvl_data *mrvl = hu->priv; + + skb_queue_tail(&mrvl->txq, skb); + return 0; +} + +static void mrvl_send_ack(struct hci_uart *hu, unsigned char type) +{ + struct mrvl_data *mrvl = hu->priv; + struct sk_buff *skb; + + /* No H4 payload, only 1 byte header */ + skb = bt_skb_alloc(0, GFP_ATOMIC); + if (!skb) { + bt_dev_err(hu->hdev, "Unable to alloc ack/nak packet"); + return; + } + hci_skb_pkt_type(skb) = type; + + skb_queue_tail(&mrvl->txq, skb); + hci_uart_tx_wakeup(hu); +} + +static int mrvl_recv_fw_req(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_mrvl_pkt *pkt = (void *)skb->data; + struct hci_uart *hu = hci_get_drvdata(hdev); + struct mrvl_data *mrvl = hu->priv; + int ret = 0; + + if ((pkt->lhs ^ pkt->rhs) != 0xffff) { + bt_dev_err(hdev, "Corrupted mrvl header"); + mrvl_send_ack(hu, MRVL_NAK); + ret = -EINVAL; + goto done; + } + mrvl_send_ack(hu, MRVL_ACK); + + if (!test_bit(STATE_FW_REQ_PENDING, &mrvl->flags)) { + bt_dev_err(hdev, "Received unexpected firmware request"); + ret = -EINVAL; + goto done; + } + + mrvl->tx_len = le16_to_cpu(pkt->lhs); + + clear_bit(STATE_FW_REQ_PENDING, &mrvl->flags); + smp_mb__after_atomic(); + wake_up_bit(&mrvl->flags, STATE_FW_REQ_PENDING); + +done: + kfree_skb(skb); + return ret; +} + +static int mrvl_recv_chip_ver(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_mrvl_pkt *pkt = (void *)skb->data; + struct hci_uart *hu = hci_get_drvdata(hdev); + struct mrvl_data *mrvl = hu->priv; + u16 version = le16_to_cpu(pkt->lhs); + int ret = 0; + + if ((pkt->lhs ^ pkt->rhs) != 0xffff) { + bt_dev_err(hdev, "Corrupted mrvl header"); + mrvl_send_ack(hu, MRVL_NAK); + ret = -EINVAL; + goto done; + } + mrvl_send_ack(hu, MRVL_ACK); + + if (!test_bit(STATE_CHIP_VER_PENDING, &mrvl->flags)) { + bt_dev_err(hdev, "Received unexpected chip version"); + goto done; + } + + mrvl->id = version; + mrvl->rev = version >> 8; + + bt_dev_info(hdev, "Controller id = %x, rev = %x", mrvl->id, mrvl->rev); + + clear_bit(STATE_CHIP_VER_PENDING, &mrvl->flags); + smp_mb__after_atomic(); + wake_up_bit(&mrvl->flags, STATE_CHIP_VER_PENDING); + +done: + kfree_skb(skb); + return ret; +} + +#define HCI_RECV_CHIP_VER \ + .type = HCI_CHIP_VER_PKT, \ + .hlen = HCI_MRVL_PKT_SIZE, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = HCI_MRVL_PKT_SIZE + +#define HCI_RECV_FW_REQ \ + .type = HCI_FW_REQ_PKT, \ + .hlen = HCI_MRVL_PKT_SIZE, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = HCI_MRVL_PKT_SIZE + +static const struct h4_recv_pkt mrvl_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, + { HCI_RECV_FW_REQ, .recv = mrvl_recv_fw_req }, + { HCI_RECV_CHIP_VER, .recv = mrvl_recv_chip_ver }, +}; + +static int mrvl_recv(struct hci_uart *hu, const void *data, int count) +{ + struct mrvl_data *mrvl = hu->priv; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + /* We might receive some noise when there is no firmware loaded. Therefore, + * we drop data if the firmware is not loaded yet and if there is no fw load + * request pending. + */ + if (!test_bit(STATE_FW_REQ_PENDING, &mrvl->flags) && + !test_bit(STATE_FW_LOADED, &mrvl->flags)) + return count; + + mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count, + mrvl_recv_pkts, + ARRAY_SIZE(mrvl_recv_pkts)); + if (IS_ERR(mrvl->rx_skb)) { + int err = PTR_ERR(mrvl->rx_skb); + bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); + mrvl->rx_skb = NULL; + return err; + } + + return count; +} + +static int mrvl_load_firmware(struct hci_dev *hdev, const char *name) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct mrvl_data *mrvl = hu->priv; + const struct firmware *fw = NULL; + const u8 *fw_ptr, *fw_max; + int err; + + err = request_firmware(&fw, name, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Failed to load firmware file %s", name); + return err; + } + + fw_ptr = fw->data; + fw_max = fw->data + fw->size; + + bt_dev_info(hdev, "Loading %s", name); + + set_bit(STATE_FW_REQ_PENDING, &mrvl->flags); + + while (fw_ptr <= fw_max) { + struct sk_buff *skb; + + /* Controller drives the firmware load by sending firmware + * request packets containing the expected fragment size. + */ + err = wait_on_bit_timeout(&mrvl->flags, STATE_FW_REQ_PENDING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(2000)); + if (err == 1) { + bt_dev_err(hdev, "Firmware load interrupted"); + err = -EINTR; + break; + } else if (err) { + bt_dev_err(hdev, "Firmware request timeout"); + err = -ETIMEDOUT; + break; + } + + bt_dev_dbg(hdev, "Firmware request, expecting %d bytes", + mrvl->tx_len); + + if (fw_ptr == fw_max) { + /* Controller requests a null size once firmware is + * fully loaded. If controller expects more data, there + * is an issue. + */ + if (!mrvl->tx_len) { + bt_dev_info(hdev, "Firmware loading complete"); + } else { + bt_dev_err(hdev, "Firmware loading failure"); + err = -EINVAL; + } + break; + } + + if (fw_ptr + mrvl->tx_len > fw_max) { + mrvl->tx_len = fw_max - fw_ptr; + bt_dev_dbg(hdev, "Adjusting tx_len to %d", + mrvl->tx_len); + } + + skb = bt_skb_alloc(mrvl->tx_len, GFP_KERNEL); + if (!skb) { + bt_dev_err(hdev, "Failed to alloc mem for FW packet"); + err = -ENOMEM; + break; + } + bt_cb(skb)->pkt_type = MRVL_RAW_DATA; + + skb_put_data(skb, fw_ptr, mrvl->tx_len); + fw_ptr += mrvl->tx_len; + + set_bit(STATE_FW_REQ_PENDING, &mrvl->flags); + + skb_queue_tail(&mrvl->rawq, skb); + hci_uart_tx_wakeup(hu); + } + + release_firmware(fw); + return err; +} + +static int mrvl_setup(struct hci_uart *hu) +{ + int err; + struct mrvl_data *mrvl = hu->priv; + + hci_uart_set_flow_control(hu, true); + + err = mrvl_load_firmware(hu->hdev, "mrvl/helper_uart_3000000.bin"); + if (err) { + bt_dev_err(hu->hdev, "Unable to download firmware helper"); + return -EINVAL; + } + + /* Let the final ack go out before switching the baudrate */ + hci_uart_wait_until_sent(hu); + + if (hu->serdev) + serdev_device_set_baudrate(hu->serdev, hu->oper_speed); + else + hci_uart_set_baudrate(hu, hu->oper_speed); + + hci_uart_set_flow_control(hu, false); + + err = mrvl_load_firmware(hu->hdev, "mrvl/uart8897_bt.bin"); + if (err) + return err; + + set_bit(STATE_FW_LOADED, &mrvl->flags); + + return 0; +} + +static int mrvl_set_baudrate(struct hci_uart *hu, unsigned int speed) +{ + int err; + struct mrvl_data *mrvl = hu->priv; + __le32 speed_le = cpu_to_le32(speed); + + /* The firmware might be loaded by the Wifi driver over SDIO. We wait + * up to 10s for the CTS to go up. Afterward, we know that the firmware + * is ready. + */ + err = serdev_device_wait_for_cts(hu->serdev, true, 10000); + if (err) { + bt_dev_err(hu->hdev, "Wait for CTS failed with %d\n", err); + return err; + } + + set_bit(STATE_FW_LOADED, &mrvl->flags); + + err = __hci_cmd_sync_status(hu->hdev, MRVL_SET_BAUDRATE, + sizeof(speed_le), &speed_le, + HCI_INIT_TIMEOUT); + if (err) { + bt_dev_err(hu->hdev, "send command failed: %d", err); + return err; + } + + serdev_device_set_baudrate(hu->serdev, speed); + + /* We forcefully have to send a command to the bluetooth module so that + * the driver detects it after a baudrate change. This is foreseen by + * hci_serdev by setting HCI_UART_VND_DETECT which then causes a dummy + * local version read. + */ + set_bit(HCI_UART_VND_DETECT, &hu->hdev_flags); + + return 0; +} + +static const struct hci_uart_proto mrvl_proto_8897 = { + .id = HCI_UART_MRVL, + .name = "Marvell", + .init_speed = 115200, + .oper_speed = 3000000, + .open = mrvl_open, + .close = mrvl_close, + .flush = mrvl_flush, + .setup = mrvl_setup, + .recv = mrvl_recv, + .enqueue = mrvl_enqueue, + .dequeue = mrvl_dequeue, +}; + +static const struct hci_uart_proto mrvl_proto_8997 = { + .id = HCI_UART_MRVL, + .name = "Marvell 8997", + .init_speed = 115200, + .oper_speed = 3000000, + .open = mrvl_open, + .close = mrvl_close, + .flush = mrvl_flush, + .set_baudrate = mrvl_set_baudrate, + .recv = mrvl_recv, + .enqueue = mrvl_enqueue, + .dequeue = mrvl_dequeue, +}; + +static int mrvl_serdev_probe(struct serdev_device *serdev) +{ + struct mrvl_serdev *mrvldev; + const struct hci_uart_proto *mrvl_proto = device_get_match_data(&serdev->dev); + + mrvldev = devm_kzalloc(&serdev->dev, sizeof(*mrvldev), GFP_KERNEL); + if (!mrvldev) + return -ENOMEM; + + mrvldev->hu.oper_speed = mrvl_proto->oper_speed; + if (mrvl_proto->set_baudrate) + of_property_read_u32(serdev->dev.of_node, "max-speed", &mrvldev->hu.oper_speed); + + mrvldev->hu.serdev = serdev; + serdev_device_set_drvdata(serdev, mrvldev); + + return hci_uart_register_device(&mrvldev->hu, mrvl_proto); +} + +static void mrvl_serdev_remove(struct serdev_device *serdev) +{ + struct mrvl_serdev *mrvldev = serdev_device_get_drvdata(serdev); + + hci_uart_unregister_device(&mrvldev->hu); +} + +static const struct of_device_id __maybe_unused mrvl_bluetooth_of_match[] = { + { .compatible = "mrvl,88w8897", .data = &mrvl_proto_8897}, + { .compatible = "mrvl,88w8997", .data = &mrvl_proto_8997}, + { }, +}; +MODULE_DEVICE_TABLE(of, mrvl_bluetooth_of_match); + +static struct serdev_device_driver mrvl_serdev_driver = { + .probe = mrvl_serdev_probe, + .remove = mrvl_serdev_remove, + .driver = { + .name = "hci_uart_mrvl", + .of_match_table = of_match_ptr(mrvl_bluetooth_of_match), + }, +}; + +int __init mrvl_init(void) +{ + serdev_device_driver_register(&mrvl_serdev_driver); + + return hci_uart_register_proto(&mrvl_proto_8897); +} + +int __exit mrvl_deinit(void) +{ + serdev_device_driver_unregister(&mrvl_serdev_driver); + + return hci_uart_unregister_proto(&mrvl_proto_8897); +} diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c new file mode 100644 index 000000000..97da0b2bf --- /dev/null +++ b/drivers/bluetooth/hci_nokia.c @@ -0,0 +1,811 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Bluetooth HCI UART H4 driver with Nokia Extensions AKA Nokia H4+ + * + * Copyright (C) 2015 Marcel Holtmann + * Copyright (C) 2015-2017 Sebastian Reichel + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hci_uart.h" +#include "btbcm.h" + +#define VERSION "0.1" + +#define NOKIA_ID_BCM2048 0x04 +#define NOKIA_ID_TI1271 0x31 + +#define FIRMWARE_BCM2048 "nokia/bcmfw.bin" +#define FIRMWARE_TI1271 "nokia/ti1273.bin" + +#define HCI_NOKIA_NEG_PKT 0x06 +#define HCI_NOKIA_ALIVE_PKT 0x07 +#define HCI_NOKIA_RADIO_PKT 0x08 + +#define HCI_NOKIA_NEG_HDR_SIZE 1 +#define HCI_NOKIA_MAX_NEG_SIZE 255 +#define HCI_NOKIA_ALIVE_HDR_SIZE 1 +#define HCI_NOKIA_MAX_ALIVE_SIZE 255 +#define HCI_NOKIA_RADIO_HDR_SIZE 2 +#define HCI_NOKIA_MAX_RADIO_SIZE 255 + +#define NOKIA_PROTO_PKT 0x44 +#define NOKIA_PROTO_BYTE 0x4c + +#define NOKIA_NEG_REQ 0x00 +#define NOKIA_NEG_ACK 0x20 +#define NOKIA_NEG_NAK 0x40 + +#define H4_TYPE_SIZE 1 + +#define NOKIA_RECV_ALIVE \ + .type = HCI_NOKIA_ALIVE_PKT, \ + .hlen = HCI_NOKIA_ALIVE_HDR_SIZE, \ + .loff = 0, \ + .lsize = 1, \ + .maxlen = HCI_NOKIA_MAX_ALIVE_SIZE \ + +#define NOKIA_RECV_NEG \ + .type = HCI_NOKIA_NEG_PKT, \ + .hlen = HCI_NOKIA_NEG_HDR_SIZE, \ + .loff = 0, \ + .lsize = 1, \ + .maxlen = HCI_NOKIA_MAX_NEG_SIZE \ + +#define NOKIA_RECV_RADIO \ + .type = HCI_NOKIA_RADIO_PKT, \ + .hlen = HCI_NOKIA_RADIO_HDR_SIZE, \ + .loff = 1, \ + .lsize = 1, \ + .maxlen = HCI_NOKIA_MAX_RADIO_SIZE \ + +struct hci_nokia_neg_hdr { + u8 dlen; +} __packed; + +struct hci_nokia_neg_cmd { + u8 ack; + u16 baud; + u16 unused1; + u8 proto; + u16 sys_clk; + u16 unused2; +} __packed; + +#define NOKIA_ALIVE_REQ 0x55 +#define NOKIA_ALIVE_RESP 0xcc + +struct hci_nokia_alive_hdr { + u8 dlen; +} __packed; + +struct hci_nokia_alive_pkt { + u8 mid; + u8 unused; +} __packed; + +struct hci_nokia_neg_evt { + u8 ack; + u16 baud; + u16 unused1; + u8 proto; + u16 sys_clk; + u16 unused2; + u8 man_id; + u8 ver_id; +} __packed; + +#define MAX_BAUD_RATE 3692300 +#define SETUP_BAUD_RATE 921600 +#define INIT_BAUD_RATE 120000 + +struct hci_nokia_radio_hdr { + u8 evt; + u8 dlen; +} __packed; + +struct nokia_bt_dev { + struct hci_uart hu; + struct serdev_device *serdev; + + struct gpio_desc *reset; + struct gpio_desc *wakeup_host; + struct gpio_desc *wakeup_bt; + unsigned long sysclk_speed; + + int wake_irq; + struct sk_buff *rx_skb; + struct sk_buff_head txq; + bdaddr_t bdaddr; + + int init_error; + struct completion init_completion; + + u8 man_id; + u8 ver_id; + + bool initialized; + bool tx_enabled; + bool rx_enabled; +}; + +static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb); + +static void nokia_flow_control(struct serdev_device *serdev, bool enable) +{ + if (enable) { + serdev_device_set_rts(serdev, true); + serdev_device_set_flow_control(serdev, true); + } else { + serdev_device_set_flow_control(serdev, false); + serdev_device_set_rts(serdev, false); + } +} + +static irqreturn_t wakeup_handler(int irq, void *data) +{ + struct nokia_bt_dev *btdev = data; + struct device *dev = &btdev->serdev->dev; + int wake_state = gpiod_get_value(btdev->wakeup_host); + + if (btdev->rx_enabled == wake_state) + return IRQ_HANDLED; + + if (wake_state) + pm_runtime_get(dev); + else + pm_runtime_put(dev); + + btdev->rx_enabled = wake_state; + + return IRQ_HANDLED; +} + +static int nokia_reset(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + int err; + + /* reset routine */ + gpiod_set_value_cansleep(btdev->reset, 1); + gpiod_set_value_cansleep(btdev->wakeup_bt, 1); + + msleep(100); + + /* safety check */ + err = gpiod_get_value_cansleep(btdev->wakeup_host); + if (err == 1) { + dev_err(dev, "reset: host wakeup not low!"); + return -EPROTO; + } + + /* flush queue */ + serdev_device_write_flush(btdev->serdev); + + /* init uart */ + nokia_flow_control(btdev->serdev, false); + serdev_device_set_baudrate(btdev->serdev, INIT_BAUD_RATE); + + gpiod_set_value_cansleep(btdev->reset, 0); + + /* wait for cts */ + err = serdev_device_wait_for_cts(btdev->serdev, true, 200); + if (err < 0) { + dev_err(dev, "CTS not received: %d", err); + return err; + } + + nokia_flow_control(btdev->serdev, true); + + return 0; +} + +static int nokia_send_alive_packet(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + struct hci_nokia_alive_hdr *hdr; + struct hci_nokia_alive_pkt *pkt; + struct sk_buff *skb; + int len; + + init_completion(&btdev->init_completion); + + len = H4_TYPE_SIZE + sizeof(*hdr) + sizeof(*pkt); + skb = bt_skb_alloc(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hci_skb_pkt_type(skb) = HCI_NOKIA_ALIVE_PKT; + memset(skb->data, 0x00, len); + + hdr = skb_put(skb, sizeof(*hdr)); + hdr->dlen = sizeof(*pkt); + pkt = skb_put(skb, sizeof(*pkt)); + pkt->mid = NOKIA_ALIVE_REQ; + + nokia_enqueue(hu, skb); + hci_uart_tx_wakeup(hu); + + dev_dbg(dev, "Alive sent"); + + if (!wait_for_completion_interruptible_timeout(&btdev->init_completion, + msecs_to_jiffies(1000))) { + return -ETIMEDOUT; + } + + if (btdev->init_error < 0) + return btdev->init_error; + + return 0; +} + +static int nokia_send_negotiation(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + struct hci_nokia_neg_cmd *neg_cmd; + struct hci_nokia_neg_hdr *neg_hdr; + struct sk_buff *skb; + int len, err; + u16 baud = DIV_ROUND_CLOSEST(btdev->sysclk_speed * 10, SETUP_BAUD_RATE); + int sysclk = btdev->sysclk_speed / 1000; + + len = H4_TYPE_SIZE + sizeof(*neg_hdr) + sizeof(*neg_cmd); + skb = bt_skb_alloc(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hci_skb_pkt_type(skb) = HCI_NOKIA_NEG_PKT; + + neg_hdr = skb_put(skb, sizeof(*neg_hdr)); + neg_hdr->dlen = sizeof(*neg_cmd); + + neg_cmd = skb_put(skb, sizeof(*neg_cmd)); + neg_cmd->ack = NOKIA_NEG_REQ; + neg_cmd->baud = cpu_to_le16(baud); + neg_cmd->unused1 = 0x0000; + neg_cmd->proto = NOKIA_PROTO_BYTE; + neg_cmd->sys_clk = cpu_to_le16(sysclk); + neg_cmd->unused2 = 0x0000; + + btdev->init_error = 0; + init_completion(&btdev->init_completion); + + nokia_enqueue(hu, skb); + hci_uart_tx_wakeup(hu); + + dev_dbg(dev, "Negotiation sent"); + + if (!wait_for_completion_interruptible_timeout(&btdev->init_completion, + msecs_to_jiffies(10000))) { + return -ETIMEDOUT; + } + + if (btdev->init_error < 0) + return btdev->init_error; + + /* Change to previously negotiated speed. Flow Control + * is disabled until bluetooth adapter is ready to avoid + * broken bytes being received. + */ + nokia_flow_control(btdev->serdev, false); + serdev_device_set_baudrate(btdev->serdev, SETUP_BAUD_RATE); + err = serdev_device_wait_for_cts(btdev->serdev, true, 200); + if (err < 0) { + dev_err(dev, "CTS not received: %d", err); + return err; + } + nokia_flow_control(btdev->serdev, true); + + dev_dbg(dev, "Negotiation successful"); + + return 0; +} + +static int nokia_setup_fw(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + const char *fwname; + const struct firmware *fw; + const u8 *fw_ptr; + size_t fw_size; + int err; + + dev_dbg(dev, "setup firmware"); + + if (btdev->man_id == NOKIA_ID_BCM2048) { + fwname = FIRMWARE_BCM2048; + } else if (btdev->man_id == NOKIA_ID_TI1271) { + fwname = FIRMWARE_TI1271; + } else { + dev_err(dev, "Unsupported bluetooth device!"); + return -ENODEV; + } + + err = request_firmware(&fw, fwname, dev); + if (err < 0) { + dev_err(dev, "%s: Failed to load Nokia firmware file (%d)", + hu->hdev->name, err); + return err; + } + + fw_ptr = fw->data; + fw_size = fw->size; + + while (fw_size >= 4) { + u16 pkt_size = get_unaligned_le16(fw_ptr); + u8 pkt_type = fw_ptr[2]; + const struct hci_command_hdr *cmd; + u16 opcode; + struct sk_buff *skb; + + switch (pkt_type) { + case HCI_COMMAND_PKT: + cmd = (struct hci_command_hdr *)(fw_ptr + 3); + opcode = le16_to_cpu(cmd->opcode); + + skb = __hci_cmd_sync(hu->hdev, opcode, cmd->plen, + fw_ptr + 3 + HCI_COMMAND_HDR_SIZE, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + dev_err(dev, "%s: FW command %04x failed (%d)", + hu->hdev->name, opcode, err); + goto done; + } + kfree_skb(skb); + break; + case HCI_NOKIA_RADIO_PKT: + case HCI_NOKIA_NEG_PKT: + case HCI_NOKIA_ALIVE_PKT: + break; + } + + fw_ptr += pkt_size + 2; + fw_size -= pkt_size + 2; + } + +done: + release_firmware(fw); + return err; +} + +static int nokia_setup(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + int err; + + btdev->initialized = false; + + nokia_flow_control(btdev->serdev, false); + + pm_runtime_get_sync(dev); + + if (btdev->tx_enabled) { + gpiod_set_value_cansleep(btdev->wakeup_bt, 0); + pm_runtime_put(&btdev->serdev->dev); + btdev->tx_enabled = false; + } + + dev_dbg(dev, "protocol setup"); + + /* 0. reset connection */ + err = nokia_reset(hu); + if (err < 0) { + dev_err(dev, "Reset failed: %d", err); + goto out; + } + + /* 1. negotiate speed etc */ + err = nokia_send_negotiation(hu); + if (err < 0) { + dev_err(dev, "Negotiation failed: %d", err); + goto out; + } + + /* 2. verify correct setup using alive packet */ + err = nokia_send_alive_packet(hu); + if (err < 0) { + dev_err(dev, "Alive check failed: %d", err); + goto out; + } + + /* 3. send firmware */ + err = nokia_setup_fw(hu); + if (err < 0) { + dev_err(dev, "Could not setup FW: %d", err); + goto out; + } + + nokia_flow_control(btdev->serdev, false); + serdev_device_set_baudrate(btdev->serdev, MAX_BAUD_RATE); + nokia_flow_control(btdev->serdev, true); + + if (btdev->man_id == NOKIA_ID_BCM2048) { + hu->hdev->set_bdaddr = btbcm_set_bdaddr; + set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks); + dev_dbg(dev, "bcm2048 has invalid bluetooth address!"); + } + + dev_dbg(dev, "protocol setup done!"); + + gpiod_set_value_cansleep(btdev->wakeup_bt, 0); + pm_runtime_put(dev); + btdev->tx_enabled = false; + btdev->initialized = true; + + return 0; +out: + pm_runtime_put(dev); + + return err; +} + +static int nokia_open(struct hci_uart *hu) +{ + struct device *dev = &hu->serdev->dev; + + dev_dbg(dev, "protocol open"); + + pm_runtime_enable(dev); + + return 0; +} + +static int nokia_flush(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + + dev_dbg(&btdev->serdev->dev, "flush device"); + + skb_queue_purge(&btdev->txq); + + return 0; +} + +static int nokia_close(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + + dev_dbg(dev, "close device"); + + btdev->initialized = false; + + skb_queue_purge(&btdev->txq); + + kfree_skb(btdev->rx_skb); + + /* disable module */ + gpiod_set_value(btdev->reset, 1); + gpiod_set_value(btdev->wakeup_bt, 0); + + pm_runtime_disable(&btdev->serdev->dev); + + return 0; +} + +/* Enqueue frame for transmittion (padding, crc, etc) */ +static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct nokia_bt_dev *btdev = hu->priv; + int err; + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + + /* Packets must be word aligned */ + if (skb->len % 2) { + err = skb_pad(skb, 1); + if (err) + return err; + skb_put(skb, 1); + } + + skb_queue_tail(&btdev->txq, skb); + + return 0; +} + +static int nokia_recv_negotiation_packet(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + struct hci_nokia_neg_hdr *hdr; + struct hci_nokia_neg_evt *evt; + int ret = 0; + + hdr = (struct hci_nokia_neg_hdr *)skb->data; + if (hdr->dlen != sizeof(*evt)) { + btdev->init_error = -EIO; + ret = -EIO; + goto finish_neg; + } + + evt = skb_pull(skb, sizeof(*hdr)); + + if (evt->ack != NOKIA_NEG_ACK) { + dev_err(dev, "Negotiation received: wrong reply"); + btdev->init_error = -EINVAL; + ret = -EINVAL; + goto finish_neg; + } + + btdev->man_id = evt->man_id; + btdev->ver_id = evt->ver_id; + + dev_dbg(dev, "Negotiation received: baud=%u:clk=%u:manu=%u:vers=%u", + evt->baud, evt->sys_clk, evt->man_id, evt->ver_id); + +finish_neg: + complete(&btdev->init_completion); + kfree_skb(skb); + return ret; +} + +static int nokia_recv_alive_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + struct hci_nokia_alive_hdr *hdr; + struct hci_nokia_alive_pkt *pkt; + int ret = 0; + + hdr = (struct hci_nokia_alive_hdr *)skb->data; + if (hdr->dlen != sizeof(*pkt)) { + dev_err(dev, "Corrupted alive message"); + btdev->init_error = -EIO; + ret = -EIO; + goto finish_alive; + } + + pkt = skb_pull(skb, sizeof(*hdr)); + + if (pkt->mid != NOKIA_ALIVE_RESP) { + dev_err(dev, "Alive received: invalid response: 0x%02x!", + pkt->mid); + btdev->init_error = -EINVAL; + ret = -EINVAL; + goto finish_alive; + } + + dev_dbg(dev, "Alive received"); + +finish_alive: + complete(&btdev->init_completion); + kfree_skb(skb); + return ret; +} + +static int nokia_recv_radio(struct hci_dev *hdev, struct sk_buff *skb) +{ + /* Packets received on the dedicated radio channel are + * HCI events and so feed them back into the core. + */ + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + return hci_recv_frame(hdev, skb); +} + +/* Recv data */ +static const struct h4_recv_pkt nokia_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, + { NOKIA_RECV_ALIVE, .recv = nokia_recv_alive_packet }, + { NOKIA_RECV_NEG, .recv = nokia_recv_negotiation_packet }, + { NOKIA_RECV_RADIO, .recv = nokia_recv_radio }, +}; + +static int nokia_recv(struct hci_uart *hu, const void *data, int count) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + int err; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count, + nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts)); + if (IS_ERR(btdev->rx_skb)) { + err = PTR_ERR(btdev->rx_skb); + dev_err(dev, "Frame reassembly failed (%d)", err); + btdev->rx_skb = NULL; + return err; + } + + return count; +} + +static struct sk_buff *nokia_dequeue(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + struct sk_buff *result = skb_dequeue(&btdev->txq); + + if (!btdev->initialized) + return result; + + if (btdev->tx_enabled == !!result) + return result; + + if (result) { + pm_runtime_get_sync(dev); + gpiod_set_value_cansleep(btdev->wakeup_bt, 1); + } else { + serdev_device_wait_until_sent(btdev->serdev, 0); + gpiod_set_value_cansleep(btdev->wakeup_bt, 0); + pm_runtime_put(dev); + } + + btdev->tx_enabled = !!result; + + return result; +} + +static const struct hci_uart_proto nokia_proto = { + .id = HCI_UART_NOKIA, + .name = "Nokia", + .open = nokia_open, + .close = nokia_close, + .recv = nokia_recv, + .enqueue = nokia_enqueue, + .dequeue = nokia_dequeue, + .flush = nokia_flush, + .setup = nokia_setup, + .manufacturer = 1, +}; + +static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev) +{ + struct device *dev = &serdev->dev; + struct nokia_bt_dev *btdev; + struct clk *sysclk; + int err = 0; + + btdev = devm_kzalloc(dev, sizeof(*btdev), GFP_KERNEL); + if (!btdev) + return -ENOMEM; + + btdev->hu.serdev = btdev->serdev = serdev; + serdev_device_set_drvdata(serdev, btdev); + + btdev->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(btdev->reset)) { + err = PTR_ERR(btdev->reset); + dev_err(dev, "could not get reset gpio: %d", err); + return err; + } + + btdev->wakeup_host = devm_gpiod_get(dev, "host-wakeup", GPIOD_IN); + if (IS_ERR(btdev->wakeup_host)) { + err = PTR_ERR(btdev->wakeup_host); + dev_err(dev, "could not get host wakeup gpio: %d", err); + return err; + } + + btdev->wake_irq = gpiod_to_irq(btdev->wakeup_host); + + err = devm_request_threaded_irq(dev, btdev->wake_irq, NULL, + wakeup_handler, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "wakeup", btdev); + if (err) { + dev_err(dev, "could request wakeup irq: %d", err); + return err; + } + + btdev->wakeup_bt = devm_gpiod_get(dev, "bluetooth-wakeup", + GPIOD_OUT_LOW); + if (IS_ERR(btdev->wakeup_bt)) { + err = PTR_ERR(btdev->wakeup_bt); + dev_err(dev, "could not get BT wakeup gpio: %d", err); + return err; + } + + sysclk = devm_clk_get(dev, "sysclk"); + if (IS_ERR(sysclk)) { + err = PTR_ERR(sysclk); + dev_err(dev, "could not get sysclk: %d", err); + return err; + } + + err = clk_prepare_enable(sysclk); + if (err) { + dev_err(dev, "could not enable sysclk: %d", err); + return err; + } + btdev->sysclk_speed = clk_get_rate(sysclk); + clk_disable_unprepare(sysclk); + + skb_queue_head_init(&btdev->txq); + + btdev->hu.priv = btdev; + btdev->hu.alignment = 2; /* Nokia H4+ is word aligned */ + + err = hci_uart_register_device(&btdev->hu, &nokia_proto); + if (err) { + dev_err(dev, "could not register bluetooth uart: %d", err); + return err; + } + + return 0; +} + +static void nokia_bluetooth_serdev_remove(struct serdev_device *serdev) +{ + struct nokia_bt_dev *btdev = serdev_device_get_drvdata(serdev); + + hci_uart_unregister_device(&btdev->hu); +} + +static int nokia_bluetooth_runtime_suspend(struct device *dev) +{ + struct serdev_device *serdev = to_serdev_device(dev); + + nokia_flow_control(serdev, false); + return 0; +} + +static int nokia_bluetooth_runtime_resume(struct device *dev) +{ + struct serdev_device *serdev = to_serdev_device(dev); + + nokia_flow_control(serdev, true); + return 0; +} + +static const struct dev_pm_ops nokia_bluetooth_pm_ops = { + SET_RUNTIME_PM_OPS(nokia_bluetooth_runtime_suspend, + nokia_bluetooth_runtime_resume, + NULL) +}; + +#ifdef CONFIG_OF +static const struct of_device_id nokia_bluetooth_of_match[] = { + { .compatible = "nokia,h4p-bluetooth", }, + {}, +}; +MODULE_DEVICE_TABLE(of, nokia_bluetooth_of_match); +#endif + +static struct serdev_device_driver nokia_bluetooth_serdev_driver = { + .probe = nokia_bluetooth_serdev_probe, + .remove = nokia_bluetooth_serdev_remove, + .driver = { + .name = "nokia-bluetooth", + .pm = &nokia_bluetooth_pm_ops, + .of_match_table = of_match_ptr(nokia_bluetooth_of_match), + }, +}; + +module_serdev_device_driver(nokia_bluetooth_serdev_driver); + +MODULE_AUTHOR("Sebastian Reichel "); +MODULE_DESCRIPTION("Bluetooth HCI UART Nokia H4+ driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c new file mode 100644 index 000000000..4b57e15f9 --- /dev/null +++ b/drivers/bluetooth/hci_qca.c @@ -0,0 +1,2628 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Bluetooth Software UART Qualcomm protocol + * + * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management + * protocol extension to H4. + * + * Copyright (C) 2007 Texas Instruments, Inc. + * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved. + * + * Acknowledgements: + * This file is based on hci_ll.c, which was... + * Written by Ohad Ben-Cohen + * which was in turn based on hci_h4.c, which was written + * by Maxim Krasnyansky and Marcel Holtmann. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" +#include "btqca.h" + +/* HCI_IBS protocol messages */ +#define HCI_IBS_SLEEP_IND 0xFE +#define HCI_IBS_WAKE_IND 0xFD +#define HCI_IBS_WAKE_ACK 0xFC +#define HCI_MAX_IBS_SIZE 10 + +#define IBS_WAKE_RETRANS_TIMEOUT_MS 100 +#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 200 +#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000 +#define CMD_TRANS_TIMEOUT_MS 100 +#define MEMDUMP_TIMEOUT_MS 8000 +#define IBS_DISABLE_SSR_TIMEOUT_MS \ + (MEMDUMP_TIMEOUT_MS + FW_DOWNLOAD_TIMEOUT_MS) +#define FW_DOWNLOAD_TIMEOUT_MS 3000 + +/* susclk rate */ +#define SUSCLK_RATE_32KHZ 32768 + +/* Controller debug log header */ +#define QCA_DEBUG_HANDLE 0x2EDC + +/* max retry count when init fails */ +#define MAX_INIT_RETRIES 3 + +/* Controller dump header */ +#define QCA_SSR_DUMP_HANDLE 0x0108 +#define QCA_DUMP_PACKET_SIZE 255 +#define QCA_LAST_SEQUENCE_NUM 0xFFFF +#define QCA_CRASHBYTE_PACKET_LEN 1096 +#define QCA_MEMDUMP_BYTE 0xFB + +enum qca_flags { + QCA_IBS_DISABLED, + QCA_DROP_VENDOR_EVENT, + QCA_SUSPENDING, + QCA_MEMDUMP_COLLECTION, + QCA_HW_ERROR_EVENT, + QCA_SSR_TRIGGERED, + QCA_BT_OFF, + QCA_ROM_FW, + QCA_DEBUGFS_CREATED, +}; + +enum qca_capabilities { + QCA_CAP_WIDEBAND_SPEECH = BIT(0), + QCA_CAP_VALID_LE_STATES = BIT(1), +}; + +/* HCI_IBS transmit side sleep protocol states */ +enum tx_ibs_states { + HCI_IBS_TX_ASLEEP, + HCI_IBS_TX_WAKING, + HCI_IBS_TX_AWAKE, +}; + +/* HCI_IBS receive side sleep protocol states */ +enum rx_states { + HCI_IBS_RX_ASLEEP, + HCI_IBS_RX_AWAKE, +}; + +/* HCI_IBS transmit and receive side clock state vote */ +enum hci_ibs_clock_state_vote { + HCI_IBS_VOTE_STATS_UPDATE, + HCI_IBS_TX_VOTE_CLOCK_ON, + HCI_IBS_TX_VOTE_CLOCK_OFF, + HCI_IBS_RX_VOTE_CLOCK_ON, + HCI_IBS_RX_VOTE_CLOCK_OFF, +}; + +/* Controller memory dump states */ +enum qca_memdump_states { + QCA_MEMDUMP_IDLE, + QCA_MEMDUMP_COLLECTING, + QCA_MEMDUMP_COLLECTED, + QCA_MEMDUMP_TIMEOUT, +}; + +struct qca_memdump_info { + u32 current_seq_no; + u32 received_dump; + u32 ram_dump_size; +}; + +struct qca_memdump_event_hdr { + __u8 evt; + __u8 plen; + __u16 opcode; + __le16 seq_no; + __u8 reserved; +} __packed; + + +struct qca_dump_size { + __le32 dump_size; +} __packed; + +struct qca_data { + struct hci_uart *hu; + struct sk_buff *rx_skb; + struct sk_buff_head txq; + struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */ + struct sk_buff_head rx_memdump_q; /* Memdump wait queue */ + spinlock_t hci_ibs_lock; /* HCI_IBS state lock */ + u8 tx_ibs_state; /* HCI_IBS transmit side power state*/ + u8 rx_ibs_state; /* HCI_IBS receive side power state */ + bool tx_vote; /* Clock must be on for TX */ + bool rx_vote; /* Clock must be on for RX */ + struct timer_list tx_idle_timer; + u32 tx_idle_delay; + struct timer_list wake_retrans_timer; + u32 wake_retrans; + struct workqueue_struct *workqueue; + struct work_struct ws_awake_rx; + struct work_struct ws_awake_device; + struct work_struct ws_rx_vote_off; + struct work_struct ws_tx_vote_off; + struct work_struct ctrl_memdump_evt; + struct delayed_work ctrl_memdump_timeout; + struct qca_memdump_info *qca_memdump; + unsigned long flags; + struct completion drop_ev_comp; + wait_queue_head_t suspend_wait_q; + enum qca_memdump_states memdump_state; + struct mutex hci_memdump_lock; + + u16 fw_version; + u16 controller_id; + /* For debugging purpose */ + u64 ibs_sent_wacks; + u64 ibs_sent_slps; + u64 ibs_sent_wakes; + u64 ibs_recv_wacks; + u64 ibs_recv_slps; + u64 ibs_recv_wakes; + u64 vote_last_jif; + u32 vote_on_ms; + u32 vote_off_ms; + u64 tx_votes_on; + u64 rx_votes_on; + u64 tx_votes_off; + u64 rx_votes_off; + u64 votes_on; + u64 votes_off; +}; + +enum qca_speed_type { + QCA_INIT_SPEED = 1, + QCA_OPER_SPEED +}; + +/* + * Voltage regulator information required for configuring the + * QCA Bluetooth chipset + */ +struct qca_vreg { + const char *name; + unsigned int load_uA; +}; + +struct qca_device_data { + enum qca_btsoc_type soc_type; + struct qca_vreg *vregs; + size_t num_vregs; + uint32_t capabilities; +}; + +/* + * Platform data for the QCA Bluetooth power driver. + */ +struct qca_power { + struct device *dev; + struct regulator_bulk_data *vreg_bulk; + int num_vregs; + bool vregs_on; +}; + +struct qca_serdev { + struct hci_uart serdev_hu; + struct gpio_desc *bt_en; + struct gpio_desc *sw_ctrl; + struct clk *susclk; + enum qca_btsoc_type btsoc_type; + struct qca_power *bt_power; + u32 init_speed; + u32 oper_speed; + const char *firmware_name; +}; + +static int qca_regulator_enable(struct qca_serdev *qcadev); +static void qca_regulator_disable(struct qca_serdev *qcadev); +static void qca_power_shutdown(struct hci_uart *hu); +static int qca_power_off(struct hci_dev *hdev); +static void qca_controller_memdump(struct work_struct *work); +static void qca_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb); + +static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu) +{ + enum qca_btsoc_type soc_type; + + if (hu->serdev) { + struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev); + + soc_type = qsd->btsoc_type; + } else { + soc_type = QCA_ROME; + } + + return soc_type; +} + +static const char *qca_get_firmware_name(struct hci_uart *hu) +{ + if (hu->serdev) { + struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev); + + return qsd->firmware_name; + } else { + return NULL; + } +} + +static void __serial_clock_on(struct tty_struct *tty) +{ + /* TODO: Some chipset requires to enable UART clock on client + * side to save power consumption or manual work is required. + * Please put your code to control UART clock here if needed + */ +} + +static void __serial_clock_off(struct tty_struct *tty) +{ + /* TODO: Some chipset requires to disable UART clock on client + * side to save power consumption or manual work is required. + * Please put your code to control UART clock off here if needed + */ +} + +/* serial_clock_vote needs to be called with the ibs lock held */ +static void serial_clock_vote(unsigned long vote, struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + unsigned int diff; + + bool old_vote = (qca->tx_vote | qca->rx_vote); + bool new_vote; + + switch (vote) { + case HCI_IBS_VOTE_STATS_UPDATE: + diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); + + if (old_vote) + qca->vote_off_ms += diff; + else + qca->vote_on_ms += diff; + return; + + case HCI_IBS_TX_VOTE_CLOCK_ON: + qca->tx_vote = true; + qca->tx_votes_on++; + break; + + case HCI_IBS_RX_VOTE_CLOCK_ON: + qca->rx_vote = true; + qca->rx_votes_on++; + break; + + case HCI_IBS_TX_VOTE_CLOCK_OFF: + qca->tx_vote = false; + qca->tx_votes_off++; + break; + + case HCI_IBS_RX_VOTE_CLOCK_OFF: + qca->rx_vote = false; + qca->rx_votes_off++; + break; + + default: + BT_ERR("Voting irregularity"); + return; + } + + new_vote = qca->rx_vote | qca->tx_vote; + + if (new_vote != old_vote) { + if (new_vote) + __serial_clock_on(hu->tty); + else + __serial_clock_off(hu->tty); + + BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false", + vote ? "true" : "false"); + + diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); + + if (new_vote) { + qca->votes_on++; + qca->vote_off_ms += diff; + } else { + qca->votes_off++; + qca->vote_on_ms += diff; + } + qca->vote_last_jif = jiffies; + } +} + +/* Builds and sends an HCI_IBS command packet. + * These are very simple packets with only 1 cmd byte. + */ +static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu) +{ + int err = 0; + struct sk_buff *skb = NULL; + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd); + + skb = bt_skb_alloc(1, GFP_ATOMIC); + if (!skb) { + BT_ERR("Failed to allocate memory for HCI_IBS packet"); + return -ENOMEM; + } + + /* Assign HCI_IBS type */ + skb_put_u8(skb, cmd); + + skb_queue_tail(&qca->txq, skb); + + return err; +} + +static void qca_wq_awake_device(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ws_awake_device); + struct hci_uart *hu = qca->hu; + unsigned long retrans_delay; + unsigned long flags; + + BT_DBG("hu %p wq awake device", hu); + + /* Vote for serial clock */ + serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu); + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + /* Send wake indication to device */ + if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) + BT_ERR("Failed to send WAKE to device"); + + qca->ibs_sent_wakes++; + + /* Start retransmit timer */ + retrans_delay = msecs_to_jiffies(qca->wake_retrans); + mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + /* Actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +static void qca_wq_awake_rx(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ws_awake_rx); + struct hci_uart *hu = qca->hu; + unsigned long flags; + + BT_DBG("hu %p wq awake rx", hu); + + serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu); + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + qca->rx_ibs_state = HCI_IBS_RX_AWAKE; + + /* Always acknowledge device wake up, + * sending IBS message doesn't count as TX ON. + */ + if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) + BT_ERR("Failed to acknowledge device wake up"); + + qca->ibs_sent_wacks++; + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + /* Actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ws_rx_vote_off); + struct hci_uart *hu = qca->hu; + + BT_DBG("hu %p rx clock vote off", hu); + + serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu); +} + +static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ws_tx_vote_off); + struct hci_uart *hu = qca->hu; + + BT_DBG("hu %p tx clock vote off", hu); + + /* Run HCI tx handling unlocked */ + hci_uart_tx_wakeup(hu); + + /* Now that message queued to tty driver, vote for tty clocks off. + * It is up to the tty driver to pend the clocks off until tx done. + */ + serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu); +} + +static void hci_ibs_tx_idle_timeout(struct timer_list *t) +{ + struct qca_data *qca = from_timer(qca, t, tx_idle_timer); + struct hci_uart *hu = qca->hu; + unsigned long flags; + + BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state); + + spin_lock_irqsave_nested(&qca->hci_ibs_lock, + flags, SINGLE_DEPTH_NESTING); + + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_AWAKE: + /* TX_IDLE, go to SLEEP */ + if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) { + BT_ERR("Failed to send SLEEP to device"); + break; + } + qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; + qca->ibs_sent_slps++; + queue_work(qca->workqueue, &qca->ws_tx_vote_off); + break; + + case HCI_IBS_TX_ASLEEP: + case HCI_IBS_TX_WAKING: + default: + BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); +} + +static void hci_ibs_wake_retrans_timeout(struct timer_list *t) +{ + struct qca_data *qca = from_timer(qca, t, wake_retrans_timer); + struct hci_uart *hu = qca->hu; + unsigned long flags, retrans_delay; + bool retransmit = false; + + BT_DBG("hu %p wake retransmit timeout in %d state", + hu, qca->tx_ibs_state); + + spin_lock_irqsave_nested(&qca->hci_ibs_lock, + flags, SINGLE_DEPTH_NESTING); + + /* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */ + if (test_bit(QCA_SUSPENDING, &qca->flags)) { + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + return; + } + + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_WAKING: + /* No WAKE_ACK, retransmit WAKE */ + retransmit = true; + if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) { + BT_ERR("Failed to acknowledge device wake up"); + break; + } + qca->ibs_sent_wakes++; + retrans_delay = msecs_to_jiffies(qca->wake_retrans); + mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); + break; + + case HCI_IBS_TX_ASLEEP: + case HCI_IBS_TX_AWAKE: + default: + BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + if (retransmit) + hci_uart_tx_wakeup(hu); +} + + +static void qca_controller_memdump_timeout(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ctrl_memdump_timeout.work); + struct hci_uart *hu = qca->hu; + + mutex_lock(&qca->hci_memdump_lock); + if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { + qca->memdump_state = QCA_MEMDUMP_TIMEOUT; + if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { + /* Inject hw error event to reset the device + * and driver. + */ + hci_reset_dev(hu->hdev); + } + } + + mutex_unlock(&qca->hci_memdump_lock); +} + + +/* Initialize protocol */ +static int qca_open(struct hci_uart *hu) +{ + struct qca_serdev *qcadev; + struct qca_data *qca; + + BT_DBG("hu %p qca_open", hu); + + if (!hci_uart_has_flow_control(hu)) + return -EOPNOTSUPP; + + qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); + if (!qca) + return -ENOMEM; + + skb_queue_head_init(&qca->txq); + skb_queue_head_init(&qca->tx_wait_q); + skb_queue_head_init(&qca->rx_memdump_q); + spin_lock_init(&qca->hci_ibs_lock); + mutex_init(&qca->hci_memdump_lock); + qca->workqueue = alloc_ordered_workqueue("qca_wq", 0); + if (!qca->workqueue) { + BT_ERR("QCA Workqueue not initialized properly"); + kfree(qca); + return -ENOMEM; + } + + INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx); + INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device); + INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off); + INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off); + INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump); + INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout, + qca_controller_memdump_timeout); + init_waitqueue_head(&qca->suspend_wait_q); + + qca->hu = hu; + init_completion(&qca->drop_ev_comp); + + /* Assume we start with both sides asleep -- extra wakes OK */ + qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; + qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; + + qca->vote_last_jif = jiffies; + + hu->priv = qca; + + if (hu->serdev) { + qcadev = serdev_device_get_drvdata(hu->serdev); + + switch (qcadev->btsoc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + hu->init_speed = qcadev->init_speed; + break; + + default: + break; + } + + if (qcadev->oper_speed) + hu->oper_speed = qcadev->oper_speed; + } + + timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0); + qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; + + timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); + qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS; + + BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u", + qca->tx_idle_delay, qca->wake_retrans); + + return 0; +} + +static void qca_debugfs_init(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + struct dentry *ibs_dir; + umode_t mode; + + if (!hdev->debugfs) + return; + + if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags)) + return; + + ibs_dir = debugfs_create_dir("ibs", hdev->debugfs); + + /* read only */ + mode = 0444; + debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); + debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); + debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir, + &qca->ibs_sent_slps); + debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir, + &qca->ibs_sent_wakes); + debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir, + &qca->ibs_sent_wacks); + debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir, + &qca->ibs_recv_slps); + debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir, + &qca->ibs_recv_wakes); + debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir, + &qca->ibs_recv_wacks); + debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote); + debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on); + debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off); + debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote); + debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on); + debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off); + debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on); + debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off); + debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms); + debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); + + /* read/write */ + mode = 0644; + debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); + debugfs_create_u32("tx_idle_delay", mode, ibs_dir, + &qca->tx_idle_delay); +} + +/* Flush protocol data */ +static int qca_flush(struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p qca flush", hu); + + skb_queue_purge(&qca->tx_wait_q); + skb_queue_purge(&qca->txq); + + return 0; +} + +/* Close protocol */ +static int qca_close(struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p qca close", hu); + + serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu); + + skb_queue_purge(&qca->tx_wait_q); + skb_queue_purge(&qca->txq); + skb_queue_purge(&qca->rx_memdump_q); + /* + * Shut the timers down so they can't be rearmed when + * destroy_workqueue() drains pending work which in turn might try + * to arm a timer. After shutdown rearm attempts are silently + * ignored by the timer core code. + */ + timer_shutdown_sync(&qca->tx_idle_timer); + timer_shutdown_sync(&qca->wake_retrans_timer); + destroy_workqueue(qca->workqueue); + qca->hu = NULL; + + kfree_skb(qca->rx_skb); + + hu->priv = NULL; + + kfree(qca); + + return 0; +} + +/* Called upon a wake-up-indication from the device. + */ +static void device_want_to_wakeup(struct hci_uart *hu) +{ + unsigned long flags; + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p want to wake up", hu); + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + qca->ibs_recv_wakes++; + + /* Don't wake the rx up when suspending. */ + if (test_bit(QCA_SUSPENDING, &qca->flags)) { + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + return; + } + + switch (qca->rx_ibs_state) { + case HCI_IBS_RX_ASLEEP: + /* Make sure clock is on - we may have turned clock off since + * receiving the wake up indicator awake rx clock. + */ + queue_work(qca->workqueue, &qca->ws_awake_rx); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + return; + + case HCI_IBS_RX_AWAKE: + /* Always acknowledge device wake up, + * sending IBS message doesn't count as TX ON. + */ + if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) { + BT_ERR("Failed to acknowledge device wake up"); + break; + } + qca->ibs_sent_wacks++; + break; + + default: + /* Any other state is illegal */ + BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d", + qca->rx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + /* Actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +/* Called upon a sleep-indication from the device. + */ +static void device_want_to_sleep(struct hci_uart *hu) +{ + unsigned long flags; + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state); + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + qca->ibs_recv_slps++; + + switch (qca->rx_ibs_state) { + case HCI_IBS_RX_AWAKE: + /* Update state */ + qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; + /* Vote off rx clock under workqueue */ + queue_work(qca->workqueue, &qca->ws_rx_vote_off); + break; + + case HCI_IBS_RX_ASLEEP: + break; + + default: + /* Any other state is illegal */ + BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d", + qca->rx_ibs_state); + break; + } + + wake_up_interruptible(&qca->suspend_wait_q); + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); +} + +/* Called upon wake-up-acknowledgement from the device + */ +static void device_woke_up(struct hci_uart *hu) +{ + unsigned long flags, idle_delay; + struct qca_data *qca = hu->priv; + struct sk_buff *skb = NULL; + + BT_DBG("hu %p woke up", hu); + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + qca->ibs_recv_wacks++; + + /* Don't react to the wake-up-acknowledgment when suspending. */ + if (test_bit(QCA_SUSPENDING, &qca->flags)) { + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + return; + } + + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_AWAKE: + /* Expect one if we send 2 WAKEs */ + BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d", + qca->tx_ibs_state); + break; + + case HCI_IBS_TX_WAKING: + /* Send pending packets */ + while ((skb = skb_dequeue(&qca->tx_wait_q))) + skb_queue_tail(&qca->txq, skb); + + /* Switch timers and change state to HCI_IBS_TX_AWAKE */ + del_timer(&qca->wake_retrans_timer); + idle_delay = msecs_to_jiffies(qca->tx_idle_delay); + mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); + qca->tx_ibs_state = HCI_IBS_TX_AWAKE; + break; + + case HCI_IBS_TX_ASLEEP: + default: + BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d", + qca->tx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + /* Actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +/* Enqueue frame for transmittion (padding, crc, etc) may be called from + * two simultaneous tasklets. + */ +static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + unsigned long flags = 0, idle_delay; + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb, + qca->tx_ibs_state); + + if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { + /* As SSR is in progress, ignore the packets */ + bt_dev_dbg(hu->hdev, "SSR is in progress"); + kfree_skb(skb); + return 0; + } + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + /* Don't go to sleep in middle of patch download or + * Out-Of-Band(GPIOs control) sleep is selected. + * Don't wake the device up when suspending. + */ + if (test_bit(QCA_IBS_DISABLED, &qca->flags) || + test_bit(QCA_SUSPENDING, &qca->flags)) { + skb_queue_tail(&qca->txq, skb); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + return 0; + } + + /* Act according to current state */ + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_AWAKE: + BT_DBG("Device awake, sending normally"); + skb_queue_tail(&qca->txq, skb); + idle_delay = msecs_to_jiffies(qca->tx_idle_delay); + mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); + break; + + case HCI_IBS_TX_ASLEEP: + BT_DBG("Device asleep, waking up and queueing packet"); + /* Save packet for later */ + skb_queue_tail(&qca->tx_wait_q, skb); + + qca->tx_ibs_state = HCI_IBS_TX_WAKING; + /* Schedule a work queue to wake up device */ + queue_work(qca->workqueue, &qca->ws_awake_device); + break; + + case HCI_IBS_TX_WAKING: + BT_DBG("Device waking up, queueing packet"); + /* Transient state; just keep packet for later */ + skb_queue_tail(&qca->tx_wait_q, skb); + break; + + default: + BT_ERR("Illegal tx state: %d (losing packet)", + qca->tx_ibs_state); + dev_kfree_skb_irq(skb); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + return 0; +} + +static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND); + + device_want_to_sleep(hu); + + kfree_skb(skb); + return 0; +} + +static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND); + + device_want_to_wakeup(hu); + + kfree_skb(skb); + return 0; +} + +static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK); + + device_woke_up(hu); + + kfree_skb(skb); + return 0; +} + +static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb) +{ + /* We receive debug logs from chip as an ACL packets. + * Instead of sending the data to ACL to decode the + * received data, we are pushing them to the above layers + * as a diagnostic packet. + */ + if (get_unaligned_le16(skb->data) == QCA_DEBUG_HANDLE) + return hci_recv_diag(hdev, skb); + + return hci_recv_frame(hdev, skb); +} + +static void qca_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + char buf[80]; + + snprintf(buf, sizeof(buf), "Controller Name: 0x%x\n", + qca->controller_id); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Firmware Version: 0x%x\n", + qca->fw_version); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Vendor:Qualcomm\n"); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Driver: %s\n", + hu->serdev->dev.driver->name); + skb_put_data(skb, buf, strlen(buf)); +} + +static void qca_controller_memdump(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ctrl_memdump_evt); + struct hci_uart *hu = qca->hu; + struct sk_buff *skb; + struct qca_memdump_event_hdr *cmd_hdr; + struct qca_memdump_info *qca_memdump = qca->qca_memdump; + struct qca_dump_size *dump; + u16 seq_no; + u32 rx_size; + int ret = 0; + enum qca_btsoc_type soc_type = qca_soc_type(hu); + + while ((skb = skb_dequeue(&qca->rx_memdump_q))) { + + mutex_lock(&qca->hci_memdump_lock); + /* Skip processing the received packets if timeout detected + * or memdump collection completed. + */ + if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || + qca->memdump_state == QCA_MEMDUMP_COLLECTED) { + mutex_unlock(&qca->hci_memdump_lock); + return; + } + + if (!qca_memdump) { + qca_memdump = kzalloc(sizeof(struct qca_memdump_info), + GFP_ATOMIC); + if (!qca_memdump) { + mutex_unlock(&qca->hci_memdump_lock); + return; + } + + qca->qca_memdump = qca_memdump; + } + + qca->memdump_state = QCA_MEMDUMP_COLLECTING; + cmd_hdr = (void *) skb->data; + seq_no = __le16_to_cpu(cmd_hdr->seq_no); + skb_pull(skb, sizeof(struct qca_memdump_event_hdr)); + + if (!seq_no) { + + /* This is the first frame of memdump packet from + * the controller, Disable IBS to recevie dump + * with out any interruption, ideally time required for + * the controller to send the dump is 8 seconds. let us + * start timer to handle this asynchronous activity. + */ + set_bit(QCA_IBS_DISABLED, &qca->flags); + set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); + dump = (void *) skb->data; + qca_memdump->ram_dump_size = __le32_to_cpu(dump->dump_size); + if (!(qca_memdump->ram_dump_size)) { + bt_dev_err(hu->hdev, "Rx invalid memdump size"); + kfree(qca_memdump); + kfree_skb(skb); + mutex_unlock(&qca->hci_memdump_lock); + return; + } + + queue_delayed_work(qca->workqueue, + &qca->ctrl_memdump_timeout, + msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)); + skb_pull(skb, sizeof(qca_memdump->ram_dump_size)); + qca_memdump->current_seq_no = 0; + qca_memdump->received_dump = 0; + ret = hci_devcd_init(hu->hdev, qca_memdump->ram_dump_size); + bt_dev_info(hu->hdev, "hci_devcd_init Return:%d", + ret); + if (ret < 0) { + kfree(qca->qca_memdump); + qca->qca_memdump = NULL; + qca->memdump_state = QCA_MEMDUMP_COLLECTED; + cancel_delayed_work(&qca->ctrl_memdump_timeout); + clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); + mutex_unlock(&qca->hci_memdump_lock); + return; + } + + bt_dev_info(hu->hdev, "QCA collecting dump of size:%u", + qca_memdump->ram_dump_size); + + } + + /* If sequence no 0 is missed then there is no point in + * accepting the other sequences. + */ + if (!test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { + bt_dev_err(hu->hdev, "QCA: Discarding other packets"); + kfree(qca_memdump); + kfree_skb(skb); + mutex_unlock(&qca->hci_memdump_lock); + return; + } + /* There could be chance of missing some packets from + * the controller. In such cases let us store the dummy + * packets in the buffer. + */ + /* For QCA6390, controller does not lost packets but + * sequence number field of packet sometimes has error + * bits, so skip this checking for missing packet. + */ + while ((seq_no > qca_memdump->current_seq_no + 1) && + (soc_type != QCA_QCA6390) && + seq_no != QCA_LAST_SEQUENCE_NUM) { + bt_dev_err(hu->hdev, "QCA controller missed packet:%d", + qca_memdump->current_seq_no); + rx_size = qca_memdump->received_dump; + rx_size += QCA_DUMP_PACKET_SIZE; + if (rx_size > qca_memdump->ram_dump_size) { + bt_dev_err(hu->hdev, + "QCA memdump received %d, no space for missed packet", + qca_memdump->received_dump); + break; + } + hci_devcd_append_pattern(hu->hdev, 0x00, + QCA_DUMP_PACKET_SIZE); + qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE; + qca_memdump->current_seq_no++; + } + + rx_size = qca_memdump->received_dump + skb->len; + if (rx_size <= qca_memdump->ram_dump_size) { + if ((seq_no != QCA_LAST_SEQUENCE_NUM) && + (seq_no != qca_memdump->current_seq_no)) { + bt_dev_err(hu->hdev, + "QCA memdump unexpected packet %d", + seq_no); + } + bt_dev_dbg(hu->hdev, + "QCA memdump packet %d with length %d", + seq_no, skb->len); + hci_devcd_append(hu->hdev, skb); + qca_memdump->current_seq_no += 1; + qca_memdump->received_dump = rx_size; + } else { + bt_dev_err(hu->hdev, + "QCA memdump received no space for packet %d", + qca_memdump->current_seq_no); + } + + if (seq_no == QCA_LAST_SEQUENCE_NUM) { + bt_dev_info(hu->hdev, + "QCA memdump Done, received %d, total %d", + qca_memdump->received_dump, + qca_memdump->ram_dump_size); + hci_devcd_complete(hu->hdev); + cancel_delayed_work(&qca->ctrl_memdump_timeout); + kfree(qca->qca_memdump); + qca->qca_memdump = NULL; + qca->memdump_state = QCA_MEMDUMP_COLLECTED; + clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); + } + + mutex_unlock(&qca->hci_memdump_lock); + } + +} + +static int qca_controller_memdump_event(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + + set_bit(QCA_SSR_TRIGGERED, &qca->flags); + skb_queue_tail(&qca->rx_memdump_q, skb); + queue_work(qca->workqueue, &qca->ctrl_memdump_evt); + + return 0; +} + +static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + + if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) { + struct hci_event_hdr *hdr = (void *)skb->data; + + /* For the WCN3990 the vendor command for a baudrate change + * isn't sent as synchronous HCI command, because the + * controller sends the corresponding vendor event with the + * new baudrate. The event is received and properly decoded + * after changing the baudrate of the host port. It needs to + * be dropped, otherwise it can be misinterpreted as + * response to a later firmware download command (also a + * vendor command). + */ + + if (hdr->evt == HCI_EV_VENDOR) + complete(&qca->drop_ev_comp); + + kfree_skb(skb); + + return 0; + } + /* We receive chip memory dump as an event packet, With a dedicated + * handler followed by a hardware error event. When this event is + * received we store dump into a file before closing hci. This + * dump will help in triaging the issues. + */ + if ((skb->data[0] == HCI_VENDOR_PKT) && + (get_unaligned_be16(skb->data + 2) == QCA_SSR_DUMP_HANDLE)) + return qca_controller_memdump_event(hdev, skb); + + return hci_recv_frame(hdev, skb); +} + +#define QCA_IBS_SLEEP_IND_EVENT \ + .type = HCI_IBS_SLEEP_IND, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = HCI_MAX_IBS_SIZE + +#define QCA_IBS_WAKE_IND_EVENT \ + .type = HCI_IBS_WAKE_IND, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = HCI_MAX_IBS_SIZE + +#define QCA_IBS_WAKE_ACK_EVENT \ + .type = HCI_IBS_WAKE_ACK, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = HCI_MAX_IBS_SIZE + +static const struct h4_recv_pkt qca_recv_pkts[] = { + { H4_RECV_ACL, .recv = qca_recv_acl_data }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = qca_recv_event }, + { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind }, + { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack }, + { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind }, +}; + +static int qca_recv(struct hci_uart *hu, const void *data, int count) +{ + struct qca_data *qca = hu->priv; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, + qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts)); + if (IS_ERR(qca->rx_skb)) { + int err = PTR_ERR(qca->rx_skb); + bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); + qca->rx_skb = NULL; + return err; + } + + return count; +} + +static struct sk_buff *qca_dequeue(struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + + return skb_dequeue(&qca->txq); +} + +static uint8_t qca_get_baudrate_value(int speed) +{ + switch (speed) { + case 9600: + return QCA_BAUDRATE_9600; + case 19200: + return QCA_BAUDRATE_19200; + case 38400: + return QCA_BAUDRATE_38400; + case 57600: + return QCA_BAUDRATE_57600; + case 115200: + return QCA_BAUDRATE_115200; + case 230400: + return QCA_BAUDRATE_230400; + case 460800: + return QCA_BAUDRATE_460800; + case 500000: + return QCA_BAUDRATE_500000; + case 921600: + return QCA_BAUDRATE_921600; + case 1000000: + return QCA_BAUDRATE_1000000; + case 2000000: + return QCA_BAUDRATE_2000000; + case 3000000: + return QCA_BAUDRATE_3000000; + case 3200000: + return QCA_BAUDRATE_3200000; + case 3500000: + return QCA_BAUDRATE_3500000; + default: + return QCA_BAUDRATE_115200; + } +} + +static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + struct sk_buff *skb; + u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 }; + + if (baudrate > QCA_BAUDRATE_3200000) + return -EINVAL; + + cmd[4] = baudrate; + + skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); + if (!skb) { + bt_dev_err(hdev, "Failed to allocate baudrate packet"); + return -ENOMEM; + } + + /* Assign commands to change baudrate and packet type. */ + skb_put_data(skb, cmd, sizeof(cmd)); + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; + + skb_queue_tail(&qca->txq, skb); + hci_uart_tx_wakeup(hu); + + /* Wait for the baudrate change request to be sent */ + + while (!skb_queue_empty(&qca->txq)) + usleep_range(100, 200); + + if (hu->serdev) + serdev_device_wait_until_sent(hu->serdev, + msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS)); + + /* Give the controller time to process the request */ + switch (qca_soc_type(hu)) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + usleep_range(1000, 10000); + break; + + default: + msleep(300); + } + + return 0; +} + +static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed) +{ + if (hu->serdev) + serdev_device_set_baudrate(hu->serdev, speed); + else + hci_uart_set_baudrate(hu, speed); +} + +static int qca_send_power_pulse(struct hci_uart *hu, bool on) +{ + int ret; + int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS); + u8 cmd = on ? QCA_WCN3990_POWERON_PULSE : QCA_WCN3990_POWEROFF_PULSE; + + /* These power pulses are single byte command which are sent + * at required baudrate to wcn3990. On wcn3990, we have an external + * circuit at Tx pin which decodes the pulse sent at specific baudrate. + * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT + * and also we use the same power inputs to turn on and off for + * Wi-Fi/BT. Powering up the power sources will not enable BT, until + * we send a power on pulse at 115200 bps. This algorithm will help to + * save power. Disabling hardware flow control is mandatory while + * sending power pulses to SoC. + */ + bt_dev_dbg(hu->hdev, "sending power pulse %02x to controller", cmd); + + serdev_device_write_flush(hu->serdev); + hci_uart_set_flow_control(hu, true); + ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd)); + if (ret < 0) { + bt_dev_err(hu->hdev, "failed to send power pulse %02x", cmd); + return ret; + } + + serdev_device_wait_until_sent(hu->serdev, timeout); + hci_uart_set_flow_control(hu, false); + + /* Give to controller time to boot/shutdown */ + if (on) + msleep(100); + else + usleep_range(1000, 10000); + + return 0; +} + +static unsigned int qca_get_speed(struct hci_uart *hu, + enum qca_speed_type speed_type) +{ + unsigned int speed = 0; + + if (speed_type == QCA_INIT_SPEED) { + if (hu->init_speed) + speed = hu->init_speed; + else if (hu->proto->init_speed) + speed = hu->proto->init_speed; + } else { + if (hu->oper_speed) + speed = hu->oper_speed; + else if (hu->proto->oper_speed) + speed = hu->proto->oper_speed; + } + + return speed; +} + +static int qca_check_speeds(struct hci_uart *hu) +{ + switch (qca_soc_type(hu)) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + if (!qca_get_speed(hu, QCA_INIT_SPEED) && + !qca_get_speed(hu, QCA_OPER_SPEED)) + return -EINVAL; + break; + + default: + if (!qca_get_speed(hu, QCA_INIT_SPEED) || + !qca_get_speed(hu, QCA_OPER_SPEED)) + return -EINVAL; + } + + return 0; +} + +static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) +{ + unsigned int speed, qca_baudrate; + struct qca_data *qca = hu->priv; + int ret = 0; + + if (speed_type == QCA_INIT_SPEED) { + speed = qca_get_speed(hu, QCA_INIT_SPEED); + if (speed) + host_set_baudrate(hu, speed); + } else { + enum qca_btsoc_type soc_type = qca_soc_type(hu); + + speed = qca_get_speed(hu, QCA_OPER_SPEED); + if (!speed) + return 0; + + /* Disable flow control for wcn3990 to deassert RTS while + * changing the baudrate of chip and host. + */ + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + hci_uart_set_flow_control(hu, true); + break; + + default: + break; + } + + switch (soc_type) { + case QCA_WCN3990: + reinit_completion(&qca->drop_ev_comp); + set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); + break; + + default: + break; + } + + qca_baudrate = qca_get_baudrate_value(speed); + bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed); + ret = qca_set_baudrate(hu->hdev, qca_baudrate); + if (ret) + goto error; + + host_set_baudrate(hu, speed); + +error: + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + hci_uart_set_flow_control(hu, false); + break; + + default: + break; + } + + switch (soc_type) { + case QCA_WCN3990: + /* Wait for the controller to send the vendor event + * for the baudrate change command. + */ + if (!wait_for_completion_timeout(&qca->drop_ev_comp, + msecs_to_jiffies(100))) { + bt_dev_err(hu->hdev, + "Failed to change controller baudrate\n"); + ret = -ETIMEDOUT; + } + + clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); + break; + + default: + break; + } + } + + return ret; +} + +static int qca_send_crashbuffer(struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + struct sk_buff *skb; + + skb = bt_skb_alloc(QCA_CRASHBYTE_PACKET_LEN, GFP_KERNEL); + if (!skb) { + bt_dev_err(hu->hdev, "Failed to allocate memory for skb packet"); + return -ENOMEM; + } + + /* We forcefully crash the controller, by sending 0xfb byte for + * 1024 times. We also might have chance of losing data, To be + * on safer side we send 1096 bytes to the SoC. + */ + memset(skb_put(skb, QCA_CRASHBYTE_PACKET_LEN), QCA_MEMDUMP_BYTE, + QCA_CRASHBYTE_PACKET_LEN); + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; + bt_dev_info(hu->hdev, "crash the soc to collect controller dump"); + skb_queue_tail(&qca->txq, skb); + hci_uart_tx_wakeup(hu); + + return 0; +} + +static void qca_wait_for_dump_collection(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + + wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION, + TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS); + + clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); +} + +static void qca_hw_error(struct hci_dev *hdev, u8 code) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + + set_bit(QCA_SSR_TRIGGERED, &qca->flags); + set_bit(QCA_HW_ERROR_EVENT, &qca->flags); + bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state); + + if (qca->memdump_state == QCA_MEMDUMP_IDLE) { + /* If hardware error event received for other than QCA + * soc memory dump event, then we need to crash the SOC + * and wait here for 8 seconds to get the dump packets. + * This will block main thread to be on hold until we + * collect dump. + */ + set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); + qca_send_crashbuffer(hu); + qca_wait_for_dump_collection(hdev); + } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { + /* Let us wait here until memory dump collected or + * memory dump timer expired. + */ + bt_dev_info(hdev, "waiting for dump to complete"); + qca_wait_for_dump_collection(hdev); + } + + mutex_lock(&qca->hci_memdump_lock); + if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { + bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout"); + hci_devcd_abort(hu->hdev); + if (qca->qca_memdump) { + kfree(qca->qca_memdump); + qca->qca_memdump = NULL; + } + qca->memdump_state = QCA_MEMDUMP_TIMEOUT; + cancel_delayed_work(&qca->ctrl_memdump_timeout); + } + mutex_unlock(&qca->hci_memdump_lock); + + if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || + qca->memdump_state == QCA_MEMDUMP_COLLECTED) { + cancel_work_sync(&qca->ctrl_memdump_evt); + skb_queue_purge(&qca->rx_memdump_q); + } + + clear_bit(QCA_HW_ERROR_EVENT, &qca->flags); +} + +static void qca_cmd_timeout(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + + set_bit(QCA_SSR_TRIGGERED, &qca->flags); + if (qca->memdump_state == QCA_MEMDUMP_IDLE) { + set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); + qca_send_crashbuffer(hu); + qca_wait_for_dump_collection(hdev); + } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { + /* Let us wait here until memory dump collected or + * memory dump timer expired. + */ + bt_dev_info(hdev, "waiting for dump to complete"); + qca_wait_for_dump_collection(hdev); + } + + mutex_lock(&qca->hci_memdump_lock); + if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { + qca->memdump_state = QCA_MEMDUMP_TIMEOUT; + if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { + /* Inject hw error event to reset the device + * and driver. + */ + hci_reset_dev(hu->hdev); + } + } + mutex_unlock(&qca->hci_memdump_lock); +} + +static bool qca_wakeup(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + bool wakeup; + + /* BT SoC attached through the serial bus is handled by the serdev driver. + * So we need to use the device handle of the serdev driver to get the + * status of device may wakeup. + */ + wakeup = device_may_wakeup(&hu->serdev->ctrl->dev); + bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup); + + return wakeup; +} + +static int qca_regulator_init(struct hci_uart *hu) +{ + enum qca_btsoc_type soc_type = qca_soc_type(hu); + struct qca_serdev *qcadev; + int ret; + bool sw_ctrl_state; + + /* Check for vregs status, may be hci down has turned + * off the voltage regulator. + */ + qcadev = serdev_device_get_drvdata(hu->serdev); + if (!qcadev->bt_power->vregs_on) { + serdev_device_close(hu->serdev); + ret = qca_regulator_enable(qcadev); + if (ret) + return ret; + + ret = serdev_device_open(hu->serdev); + if (ret) { + bt_dev_err(hu->hdev, "failed to open port"); + return ret; + } + } + + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + /* Forcefully enable wcn399x to enter in to boot mode. */ + host_set_baudrate(hu, 2400); + ret = qca_send_power_pulse(hu, false); + if (ret) + return ret; + break; + + default: + break; + } + + /* For wcn6750 need to enable gpio bt_en */ + if (qcadev->bt_en) { + gpiod_set_value_cansleep(qcadev->bt_en, 0); + msleep(50); + gpiod_set_value_cansleep(qcadev->bt_en, 1); + msleep(50); + if (qcadev->sw_ctrl) { + sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl); + bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state); + } + } + + qca_set_speed(hu, QCA_INIT_SPEED); + + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + ret = qca_send_power_pulse(hu, true); + if (ret) + return ret; + break; + + default: + break; + } + + /* Now the device is in ready state to communicate with host. + * To sync host with device we need to reopen port. + * Without this, we will have RTS and CTS synchronization + * issues. + */ + serdev_device_close(hu->serdev); + ret = serdev_device_open(hu->serdev); + if (ret) { + bt_dev_err(hu->hdev, "failed to open port"); + return ret; + } + + hci_uart_set_flow_control(hu, false); + + return 0; +} + +static int qca_power_on(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + enum qca_btsoc_type soc_type = qca_soc_type(hu); + struct qca_serdev *qcadev; + struct qca_data *qca = hu->priv; + int ret = 0; + + /* Non-serdev device usually is powered by external power + * and don't need additional action in driver for power on + */ + if (!hu->serdev) + return 0; + + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + ret = qca_regulator_init(hu); + break; + + default: + qcadev = serdev_device_get_drvdata(hu->serdev); + if (qcadev->bt_en) { + gpiod_set_value_cansleep(qcadev->bt_en, 1); + /* Controller needs time to bootup. */ + msleep(150); + } + } + + clear_bit(QCA_BT_OFF, &qca->flags); + return ret; +} + +static void hci_coredump_qca(struct hci_dev *hdev) +{ + static const u8 param[] = { 0x26 }; + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) + bt_dev_err(hdev, "%s: trigger crash failed (%ld)", __func__, PTR_ERR(skb)); + kfree_skb(skb); +} + +static int qca_setup(struct hci_uart *hu) +{ + struct hci_dev *hdev = hu->hdev; + struct qca_data *qca = hu->priv; + unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200; + unsigned int retries = 0; + enum qca_btsoc_type soc_type = qca_soc_type(hu); + const char *firmware_name = qca_get_firmware_name(hu); + int ret; + struct qca_btsoc_version ver; + const char *soc_name; + + ret = qca_check_speeds(hu); + if (ret) + return ret; + + clear_bit(QCA_ROM_FW, &qca->flags); + /* Patch downloading has to be done without IBS mode */ + set_bit(QCA_IBS_DISABLED, &qca->flags); + + /* Enable controller to do both LE scan and BR/EDR inquiry + * simultaneously. + */ + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + soc_name = "wcn399x"; + break; + + case QCA_WCN6750: + soc_name = "wcn6750"; + break; + + case QCA_WCN6855: + soc_name = "wcn6855"; + break; + + case QCA_WCN7850: + soc_name = "wcn7850"; + break; + + default: + soc_name = "ROME/QCA6390"; + } + bt_dev_info(hdev, "setting up %s", soc_name); + + qca->memdump_state = QCA_MEMDUMP_IDLE; + +retry: + ret = qca_power_on(hdev); + if (ret) + goto out; + + clear_bit(QCA_SSR_TRIGGERED, &qca->flags); + + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); + hci_set_aosp_capable(hdev); + + ret = qca_read_soc_version(hdev, &ver, soc_type); + if (ret) + goto out; + break; + + default: + qca_set_speed(hu, QCA_INIT_SPEED); + } + + /* Setup user speed if needed */ + speed = qca_get_speed(hu, QCA_OPER_SPEED); + if (speed) { + ret = qca_set_speed(hu, QCA_OPER_SPEED); + if (ret) + goto out; + + qca_baudrate = qca_get_baudrate_value(speed); + } + + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + break; + + default: + /* Get QCA version information */ + ret = qca_read_soc_version(hdev, &ver, soc_type); + if (ret) + goto out; + } + + /* Setup patch / NVM configurations */ + ret = qca_uart_setup(hdev, qca_baudrate, soc_type, ver, + firmware_name); + if (!ret) { + clear_bit(QCA_IBS_DISABLED, &qca->flags); + qca_debugfs_init(hdev); + hu->hdev->hw_error = qca_hw_error; + hu->hdev->cmd_timeout = qca_cmd_timeout; + if (device_can_wakeup(hu->serdev->ctrl->dev.parent)) + hu->hdev->wakeup = qca_wakeup; + } else if (ret == -ENOENT) { + /* No patch/nvm-config found, run with original fw/config */ + set_bit(QCA_ROM_FW, &qca->flags); + ret = 0; + } else if (ret == -EAGAIN) { + /* + * Userspace firmware loader will return -EAGAIN in case no + * patch/nvm-config is found, so run with original fw/config. + */ + set_bit(QCA_ROM_FW, &qca->flags); + ret = 0; + } + +out: + if (ret && retries < MAX_INIT_RETRIES) { + bt_dev_warn(hdev, "Retry BT power ON:%d", retries); + qca_power_shutdown(hu); + if (hu->serdev) { + serdev_device_close(hu->serdev); + ret = serdev_device_open(hu->serdev); + if (ret) { + bt_dev_err(hdev, "failed to open port"); + return ret; + } + } + retries++; + goto retry; + } + + /* Setup bdaddr */ + if (soc_type == QCA_ROME) + hu->hdev->set_bdaddr = qca_set_bdaddr_rome; + else + hu->hdev->set_bdaddr = qca_set_bdaddr; + qca->fw_version = le16_to_cpu(ver.patch_ver); + qca->controller_id = le16_to_cpu(ver.rom_ver); + hci_devcd_register(hdev, hci_coredump_qca, qca_dmp_hdr, NULL); + + return ret; +} + +static const struct hci_uart_proto qca_proto = { + .id = HCI_UART_QCA, + .name = "QCA", + .manufacturer = 29, + .init_speed = 115200, + .oper_speed = 3000000, + .open = qca_open, + .close = qca_close, + .flush = qca_flush, + .setup = qca_setup, + .recv = qca_recv, + .enqueue = qca_enqueue, + .dequeue = qca_dequeue, +}; + +static const struct qca_device_data qca_soc_data_wcn3988 __maybe_unused = { + .soc_type = QCA_WCN3988, + .vregs = (struct qca_vreg []) { + { "vddio", 15000 }, + { "vddxo", 80000 }, + { "vddrf", 300000 }, + { "vddch0", 450000 }, + }, + .num_vregs = 4, +}; + +static const struct qca_device_data qca_soc_data_wcn3990 __maybe_unused = { + .soc_type = QCA_WCN3990, + .vregs = (struct qca_vreg []) { + { "vddio", 15000 }, + { "vddxo", 80000 }, + { "vddrf", 300000 }, + { "vddch0", 450000 }, + }, + .num_vregs = 4, +}; + +static const struct qca_device_data qca_soc_data_wcn3991 __maybe_unused = { + .soc_type = QCA_WCN3991, + .vregs = (struct qca_vreg []) { + { "vddio", 15000 }, + { "vddxo", 80000 }, + { "vddrf", 300000 }, + { "vddch0", 450000 }, + }, + .num_vregs = 4, + .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, +}; + +static const struct qca_device_data qca_soc_data_wcn3998 __maybe_unused = { + .soc_type = QCA_WCN3998, + .vregs = (struct qca_vreg []) { + { "vddio", 10000 }, + { "vddxo", 80000 }, + { "vddrf", 300000 }, + { "vddch0", 450000 }, + }, + .num_vregs = 4, +}; + +static const struct qca_device_data qca_soc_data_qca6390 __maybe_unused = { + .soc_type = QCA_QCA6390, + .num_vregs = 0, +}; + +static const struct qca_device_data qca_soc_data_wcn6750 __maybe_unused = { + .soc_type = QCA_WCN6750, + .vregs = (struct qca_vreg []) { + { "vddio", 5000 }, + { "vddaon", 26000 }, + { "vddbtcxmx", 126000 }, + { "vddrfacmn", 12500 }, + { "vddrfa0p8", 102000 }, + { "vddrfa1p7", 302000 }, + { "vddrfa1p2", 257000 }, + { "vddrfa2p2", 1700000 }, + { "vddasd", 200 }, + }, + .num_vregs = 9, + .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, +}; + +static const struct qca_device_data qca_soc_data_wcn6855 __maybe_unused = { + .soc_type = QCA_WCN6855, + .vregs = (struct qca_vreg []) { + { "vddio", 5000 }, + { "vddbtcxmx", 126000 }, + { "vddrfacmn", 12500 }, + { "vddrfa0p8", 102000 }, + { "vddrfa1p7", 302000 }, + { "vddrfa1p2", 257000 }, + }, + .num_vregs = 6, + .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, +}; + +static const struct qca_device_data qca_soc_data_wcn7850 __maybe_unused = { + .soc_type = QCA_WCN7850, + .vregs = (struct qca_vreg []) { + { "vddio", 5000 }, + { "vddaon", 26000 }, + { "vdddig", 126000 }, + { "vddrfa0p8", 102000 }, + { "vddrfa1p2", 257000 }, + { "vddrfa1p9", 302000 }, + }, + .num_vregs = 6, + .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, +}; + +static void qca_power_shutdown(struct hci_uart *hu) +{ + struct qca_serdev *qcadev; + struct qca_data *qca = hu->priv; + unsigned long flags; + enum qca_btsoc_type soc_type = qca_soc_type(hu); + bool sw_ctrl_state; + + /* From this point we go into power off state. But serial port is + * still open, stop queueing the IBS data and flush all the buffered + * data in skb's. + */ + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + set_bit(QCA_IBS_DISABLED, &qca->flags); + qca_flush(hu); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + /* Non-serdev device usually is powered by external power + * and don't need additional action in driver for power down + */ + if (!hu->serdev) + return; + + qcadev = serdev_device_get_drvdata(hu->serdev); + + switch (soc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + host_set_baudrate(hu, 2400); + qca_send_power_pulse(hu, false); + qca_regulator_disable(qcadev); + break; + + case QCA_WCN6750: + case QCA_WCN6855: + gpiod_set_value_cansleep(qcadev->bt_en, 0); + msleep(100); + qca_regulator_disable(qcadev); + if (qcadev->sw_ctrl) { + sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl); + bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state); + } + break; + + default: + gpiod_set_value_cansleep(qcadev->bt_en, 0); + } + + set_bit(QCA_BT_OFF, &qca->flags); +} + +static int qca_power_off(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + enum qca_btsoc_type soc_type = qca_soc_type(hu); + + hu->hdev->hw_error = NULL; + hu->hdev->cmd_timeout = NULL; + + del_timer_sync(&qca->wake_retrans_timer); + del_timer_sync(&qca->tx_idle_timer); + + /* Stop sending shutdown command if soc crashes. */ + if (soc_type != QCA_ROME + && qca->memdump_state == QCA_MEMDUMP_IDLE) { + qca_send_pre_shutdown_cmd(hdev); + usleep_range(8000, 10000); + } + + qca_power_shutdown(hu); + return 0; +} + +static int qca_regulator_enable(struct qca_serdev *qcadev) +{ + struct qca_power *power = qcadev->bt_power; + int ret; + + /* Already enabled */ + if (power->vregs_on) + return 0; + + BT_DBG("enabling %d regulators)", power->num_vregs); + + ret = regulator_bulk_enable(power->num_vregs, power->vreg_bulk); + if (ret) + return ret; + + power->vregs_on = true; + + ret = clk_prepare_enable(qcadev->susclk); + if (ret) + qca_regulator_disable(qcadev); + + return ret; +} + +static void qca_regulator_disable(struct qca_serdev *qcadev) +{ + struct qca_power *power; + + if (!qcadev) + return; + + power = qcadev->bt_power; + + /* Already disabled? */ + if (!power->vregs_on) + return; + + regulator_bulk_disable(power->num_vregs, power->vreg_bulk); + power->vregs_on = false; + + clk_disable_unprepare(qcadev->susclk); +} + +static int qca_init_regulators(struct qca_power *qca, + const struct qca_vreg *vregs, size_t num_vregs) +{ + struct regulator_bulk_data *bulk; + int ret; + int i; + + bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL); + if (!bulk) + return -ENOMEM; + + for (i = 0; i < num_vregs; i++) + bulk[i].supply = vregs[i].name; + + ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk); + if (ret < 0) + return ret; + + for (i = 0; i < num_vregs; i++) { + ret = regulator_set_load(bulk[i].consumer, vregs[i].load_uA); + if (ret) + return ret; + } + + qca->vreg_bulk = bulk; + qca->num_vregs = num_vregs; + + return 0; +} + +static int qca_serdev_probe(struct serdev_device *serdev) +{ + struct qca_serdev *qcadev; + struct hci_dev *hdev; + const struct qca_device_data *data; + int err; + bool power_ctrl_enabled = true; + + qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL); + if (!qcadev) + return -ENOMEM; + + qcadev->serdev_hu.serdev = serdev; + data = device_get_match_data(&serdev->dev); + serdev_device_set_drvdata(serdev, qcadev); + device_property_read_string(&serdev->dev, "firmware-name", + &qcadev->firmware_name); + device_property_read_u32(&serdev->dev, "max-speed", + &qcadev->oper_speed); + if (!qcadev->oper_speed) + BT_DBG("UART will pick default operating speed"); + + if (data) + qcadev->btsoc_type = data->soc_type; + else + qcadev->btsoc_type = QCA_ROME; + + switch (qcadev->btsoc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + qcadev->bt_power = devm_kzalloc(&serdev->dev, + sizeof(struct qca_power), + GFP_KERNEL); + if (!qcadev->bt_power) + return -ENOMEM; + + qcadev->bt_power->dev = &serdev->dev; + err = qca_init_regulators(qcadev->bt_power, data->vregs, + data->num_vregs); + if (err) { + BT_ERR("Failed to init regulators:%d", err); + return err; + } + + qcadev->bt_power->vregs_on = false; + + qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", + GPIOD_OUT_LOW); + if (IS_ERR_OR_NULL(qcadev->bt_en) && + (data->soc_type == QCA_WCN6750 || + data->soc_type == QCA_WCN6855)) { + dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n"); + power_ctrl_enabled = false; + } + + qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl", + GPIOD_IN); + if (IS_ERR_OR_NULL(qcadev->sw_ctrl) && + (data->soc_type == QCA_WCN6750 || + data->soc_type == QCA_WCN6855 || + data->soc_type == QCA_WCN7850)) + dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n"); + + qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); + if (IS_ERR(qcadev->susclk)) { + dev_err(&serdev->dev, "failed to acquire clk\n"); + return PTR_ERR(qcadev->susclk); + } + + err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); + if (err) { + BT_ERR("wcn3990 serdev registration failed"); + return err; + } + break; + + default: + qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", + GPIOD_OUT_LOW); + if (IS_ERR_OR_NULL(qcadev->bt_en)) { + dev_warn(&serdev->dev, "failed to acquire enable gpio\n"); + power_ctrl_enabled = false; + } + + qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); + if (IS_ERR(qcadev->susclk)) { + dev_warn(&serdev->dev, "failed to acquire clk\n"); + return PTR_ERR(qcadev->susclk); + } + err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ); + if (err) + return err; + + err = clk_prepare_enable(qcadev->susclk); + if (err) + return err; + + err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); + if (err) { + BT_ERR("Rome serdev registration failed"); + clk_disable_unprepare(qcadev->susclk); + return err; + } + } + + hdev = qcadev->serdev_hu.hdev; + + if (power_ctrl_enabled) { + set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + hdev->shutdown = qca_power_off; + } + + if (data) { + /* Wideband speech support must be set per driver since it can't + * be queried via hci. Same with the valid le states quirk. + */ + if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH) + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, + &hdev->quirks); + + if (data->capabilities & QCA_CAP_VALID_LE_STATES) + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + } + + return 0; +} + +static void qca_serdev_remove(struct serdev_device *serdev) +{ + struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); + struct qca_power *power = qcadev->bt_power; + + switch (qcadev->btsoc_type) { + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: + case QCA_WCN6855: + case QCA_WCN7850: + if (power->vregs_on) { + qca_power_shutdown(&qcadev->serdev_hu); + break; + } + fallthrough; + + default: + if (qcadev->susclk) + clk_disable_unprepare(qcadev->susclk); + } + + hci_uart_unregister_device(&qcadev->serdev_hu); +} + +static void qca_serdev_shutdown(struct device *dev) +{ + int ret; + int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS); + struct serdev_device *serdev = to_serdev_device(dev); + struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); + struct hci_uart *hu = &qcadev->serdev_hu; + struct hci_dev *hdev = hu->hdev; + struct qca_data *qca = hu->priv; + const u8 ibs_wake_cmd[] = { 0xFD }; + const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 }; + + if (qcadev->btsoc_type == QCA_QCA6390) { + if (test_bit(QCA_BT_OFF, &qca->flags) || + !test_bit(HCI_RUNNING, &hdev->flags)) + return; + + serdev_device_write_flush(serdev); + ret = serdev_device_write_buf(serdev, ibs_wake_cmd, + sizeof(ibs_wake_cmd)); + if (ret < 0) { + BT_ERR("QCA send IBS_WAKE_IND error: %d", ret); + return; + } + serdev_device_wait_until_sent(serdev, timeout); + usleep_range(8000, 10000); + + serdev_device_write_flush(serdev); + ret = serdev_device_write_buf(serdev, edl_reset_soc_cmd, + sizeof(edl_reset_soc_cmd)); + if (ret < 0) { + BT_ERR("QCA send EDL_RESET_REQ error: %d", ret); + return; + } + serdev_device_wait_until_sent(serdev, timeout); + usleep_range(8000, 10000); + } +} + +static int __maybe_unused qca_suspend(struct device *dev) +{ + struct serdev_device *serdev = to_serdev_device(dev); + struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); + struct hci_uart *hu = &qcadev->serdev_hu; + struct qca_data *qca = hu->priv; + unsigned long flags; + bool tx_pending = false; + int ret = 0; + u8 cmd; + u32 wait_timeout = 0; + + set_bit(QCA_SUSPENDING, &qca->flags); + + /* if BT SoC is running with default firmware then it does not + * support in-band sleep + */ + if (test_bit(QCA_ROM_FW, &qca->flags)) + return 0; + + /* During SSR after memory dump collection, controller will be + * powered off and then powered on.If controller is powered off + * during SSR then we should wait until SSR is completed. + */ + if (test_bit(QCA_BT_OFF, &qca->flags) && + !test_bit(QCA_SSR_TRIGGERED, &qca->flags)) + return 0; + + if (test_bit(QCA_IBS_DISABLED, &qca->flags) || + test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { + wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ? + IBS_DISABLE_SSR_TIMEOUT_MS : + FW_DOWNLOAD_TIMEOUT_MS; + + /* QCA_IBS_DISABLED flag is set to true, During FW download + * and during memory dump collection. It is reset to false, + * After FW download complete. + */ + wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED, + TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout)); + + if (test_bit(QCA_IBS_DISABLED, &qca->flags)) { + bt_dev_err(hu->hdev, "SSR or FW download time out"); + ret = -ETIMEDOUT; + goto error; + } + } + + cancel_work_sync(&qca->ws_awake_device); + cancel_work_sync(&qca->ws_awake_rx); + + spin_lock_irqsave_nested(&qca->hci_ibs_lock, + flags, SINGLE_DEPTH_NESTING); + + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_WAKING: + del_timer(&qca->wake_retrans_timer); + fallthrough; + case HCI_IBS_TX_AWAKE: + del_timer(&qca->tx_idle_timer); + + serdev_device_write_flush(hu->serdev); + cmd = HCI_IBS_SLEEP_IND; + ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd)); + + if (ret < 0) { + BT_ERR("Failed to send SLEEP to device"); + break; + } + + qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; + qca->ibs_sent_slps++; + tx_pending = true; + break; + + case HCI_IBS_TX_ASLEEP: + break; + + default: + BT_ERR("Spurious tx state %d", qca->tx_ibs_state); + ret = -EINVAL; + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + if (ret < 0) + goto error; + + if (tx_pending) { + serdev_device_wait_until_sent(hu->serdev, + msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS)); + serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu); + } + + /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going + * to sleep, so that the packet does not wake the system later. + */ + ret = wait_event_interruptible_timeout(qca->suspend_wait_q, + qca->rx_ibs_state == HCI_IBS_RX_ASLEEP, + msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS)); + if (ret == 0) { + ret = -ETIMEDOUT; + goto error; + } + + return 0; + +error: + clear_bit(QCA_SUSPENDING, &qca->flags); + + return ret; +} + +static int __maybe_unused qca_resume(struct device *dev) +{ + struct serdev_device *serdev = to_serdev_device(dev); + struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); + struct hci_uart *hu = &qcadev->serdev_hu; + struct qca_data *qca = hu->priv; + + clear_bit(QCA_SUSPENDING, &qca->flags); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume); + +#ifdef CONFIG_OF +static const struct of_device_id qca_bluetooth_of_match[] = { + { .compatible = "qcom,qca6174-bt" }, + { .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390}, + { .compatible = "qcom,qca9377-bt" }, + { .compatible = "qcom,wcn3988-bt", .data = &qca_soc_data_wcn3988}, + { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990}, + { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991}, + { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998}, + { .compatible = "qcom,wcn6750-bt", .data = &qca_soc_data_wcn6750}, + { .compatible = "qcom,wcn6855-bt", .data = &qca_soc_data_wcn6855}, + { .compatible = "qcom,wcn7850-bt", .data = &qca_soc_data_wcn7850}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id qca_bluetooth_acpi_match[] = { + { "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 }, + { "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 }, + { "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 }, + { "DLB26390", (kernel_ulong_t)&qca_soc_data_qca6390 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, qca_bluetooth_acpi_match); +#endif + +#ifdef CONFIG_DEV_COREDUMP +static void hciqca_coredump(struct device *dev) +{ + struct serdev_device *serdev = to_serdev_device(dev); + struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); + struct hci_uart *hu = &qcadev->serdev_hu; + struct hci_dev *hdev = hu->hdev; + + if (hdev->dump.coredump) + hdev->dump.coredump(hdev); +} +#endif + +static struct serdev_device_driver qca_serdev_driver = { + .probe = qca_serdev_probe, + .remove = qca_serdev_remove, + .driver = { + .name = "hci_uart_qca", + .of_match_table = of_match_ptr(qca_bluetooth_of_match), + .acpi_match_table = ACPI_PTR(qca_bluetooth_acpi_match), + .shutdown = qca_serdev_shutdown, + .pm = &qca_pm_ops, +#ifdef CONFIG_DEV_COREDUMP + .coredump = hciqca_coredump, +#endif + }, +}; + +int __init qca_init(void) +{ + serdev_device_driver_register(&qca_serdev_driver); + + return hci_uart_register_proto(&qca_proto); +} + +int __exit qca_deinit(void) +{ + serdev_device_driver_unregister(&qca_serdev_driver); + + return hci_uart_unregister_proto(&qca_proto); +} diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c new file mode 100644 index 000000000..f16fd79bc --- /dev/null +++ b/drivers/bluetooth/hci_serdev.c @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Bluetooth HCI serdev driver lib + * + * Copyright (C) 2017 Linaro, Ltd., Rob Herring + * + * Based on hci_ldisc.c: + * + * Copyright (C) 2000-2001 Qualcomm Incorporated + * Copyright (C) 2002-2003 Maxim Krasnyansky + * Copyright (C) 2004-2005 Marcel Holtmann + */ + +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" + +static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type) +{ + struct hci_dev *hdev = hu->hdev; + + /* Update HCI stat counters */ + switch (pkt_type) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } +} + +static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) +{ + struct sk_buff *skb = hu->tx_skb; + + if (!skb) { + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + skb = hu->proto->dequeue(hu); + } else + hu->tx_skb = NULL; + + return skb; +} + +static void hci_uart_write_work(struct work_struct *work) +{ + struct hci_uart *hu = container_of(work, struct hci_uart, write_work); + struct serdev_device *serdev = hu->serdev; + struct hci_dev *hdev = hu->hdev; + struct sk_buff *skb; + + /* REVISIT: + * should we cope with bad skbs or ->write() returning an error value? + */ + do { + clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); + + while ((skb = hci_uart_dequeue(hu))) { + int len; + + len = serdev_device_write_buf(serdev, + skb->data, skb->len); + hdev->stat.byte_tx += len; + + skb_pull(skb, len); + if (skb->len) { + hu->tx_skb = skb; + break; + } + + hci_uart_tx_complete(hu, hci_skb_pkt_type(skb)); + kfree_skb(skb); + } + + clear_bit(HCI_UART_SENDING, &hu->tx_state); + } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)); +} + +/* ------- Interface to HCI layer ------ */ + +/* Reset device */ +static int hci_uart_flush(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("hdev %p serdev %p", hdev, hu->serdev); + + if (hu->tx_skb) { + kfree_skb(hu->tx_skb); hu->tx_skb = NULL; + } + + /* Flush any pending characters in the driver and discipline. */ + serdev_device_write_flush(hu->serdev); + + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + hu->proto->flush(hu); + + return 0; +} + +/* Initialize device */ +static int hci_uart_open(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + int err; + + BT_DBG("%s %p", hdev->name, hdev); + + /* When Quirk HCI_QUIRK_NON_PERSISTENT_SETUP is set by + * driver, BT SoC is completely turned OFF during + * BT OFF. Upon next BT ON UART port should be opened. + */ + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { + err = serdev_device_open(hu->serdev); + if (err) + return err; + set_bit(HCI_UART_PROTO_READY, &hu->flags); + } + + /* Undo clearing this from hci_uart_close() */ + hdev->flush = hci_uart_flush; + + return 0; +} + +/* Close device */ +static int hci_uart_close(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("hdev %p", hdev); + + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) + return 0; + + hci_uart_flush(hdev); + hdev->flush = NULL; + + /* When QUIRK HCI_QUIRK_NON_PERSISTENT_SETUP is set by driver, + * BT SOC is completely powered OFF during BT OFF, holding port + * open may drain the battery. + */ + if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { + clear_bit(HCI_UART_PROTO_READY, &hu->flags); + serdev_device_close(hu->serdev); + } + + return 0; +} + +/* Send frames from HCI layer */ +static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb), + skb->len); + + hu->proto->enqueue(hu, skb); + + hci_uart_tx_wakeup(hu); + + return 0; +} + +static int hci_uart_setup(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct hci_rp_read_local_version *ver; + struct sk_buff *skb; + unsigned int speed; + int err; + + /* Init speed if any */ + if (hu->init_speed) + speed = hu->init_speed; + else if (hu->proto->init_speed) + speed = hu->proto->init_speed; + else + speed = 0; + + if (speed) + serdev_device_set_baudrate(hu->serdev, speed); + + /* Operational speed if any */ + if (hu->oper_speed) + speed = hu->oper_speed; + else if (hu->proto->oper_speed) + speed = hu->proto->oper_speed; + else + speed = 0; + + if (hu->proto->set_baudrate && speed) { + err = hu->proto->set_baudrate(hu, speed); + if (err) + bt_dev_err(hdev, "Failed to set baudrate"); + else + serdev_device_set_baudrate(hu->serdev, speed); + } + + if (hu->proto->setup) + return hu->proto->setup(hu); + + if (!test_bit(HCI_UART_VND_DETECT, &hu->hdev_flags)) + return 0; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading local version info failed (%ld)", + PTR_ERR(skb)); + return 0; + } + + if (skb->len != sizeof(*ver)) + bt_dev_err(hdev, "Event length mismatch for version info"); + + kfree_skb(skb); + return 0; +} + +/* Check if the device is wakeable */ +static bool hci_uart_wakeup(struct hci_dev *hdev) +{ + /* HCI UART devices are assumed to be wakeable by default. + * Implement wakeup callback to override this behavior. + */ + return true; +} + +/** hci_uart_write_wakeup - transmit buffer wakeup + * @serdev: serial device + * + * This function is called by the serdev framework when it accepts + * more data being sent. + */ +static void hci_uart_write_wakeup(struct serdev_device *serdev) +{ + struct hci_uart *hu = serdev_device_get_drvdata(serdev); + + BT_DBG(""); + + if (!hu || serdev != hu->serdev) { + WARN_ON(1); + return; + } + + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + hci_uart_tx_wakeup(hu); +} + +/** hci_uart_receive_buf - receive buffer wakeup + * @serdev: serial device + * @data: pointer to received data + * @count: count of received data in bytes + * + * This function is called by the serdev framework when it received data + * in the RX buffer. + * + * Return: number of processed bytes + */ +static int hci_uart_receive_buf(struct serdev_device *serdev, const u8 *data, + size_t count) +{ + struct hci_uart *hu = serdev_device_get_drvdata(serdev); + + if (!hu || serdev != hu->serdev) { + WARN_ON(1); + return 0; + } + + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) + return 0; + + /* It does not need a lock here as it is already protected by a mutex in + * tty caller + */ + hu->proto->recv(hu, data, count); + + if (hu->hdev) + hu->hdev->stat.byte_rx += count; + + return count; +} + +static const struct serdev_device_ops hci_serdev_client_ops = { + .receive_buf = hci_uart_receive_buf, + .write_wakeup = hci_uart_write_wakeup, +}; + +int hci_uart_register_device(struct hci_uart *hu, + const struct hci_uart_proto *p) +{ + int err; + struct hci_dev *hdev; + + BT_DBG(""); + + serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops); + + if (percpu_init_rwsem(&hu->proto_lock)) + return -ENOMEM; + + err = serdev_device_open(hu->serdev); + if (err) + goto err_rwsem; + + err = p->open(hu); + if (err) + goto err_open; + + hu->proto = p; + set_bit(HCI_UART_PROTO_READY, &hu->flags); + + /* Initialize and register HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + err = -ENOMEM; + goto err_alloc; + } + + hu->hdev = hdev; + + hdev->bus = HCI_UART; + hci_set_drvdata(hdev, hu); + + INIT_WORK(&hu->init_ready, hci_uart_init_work); + INIT_WORK(&hu->write_work, hci_uart_write_work); + + /* Only when vendor specific setup callback is provided, consider + * the manufacturer information valid. This avoids filling in the + * value for Ericsson when nothing is specified. + */ + if (hu->proto->setup) + hdev->manufacturer = hu->proto->manufacturer; + + hdev->open = hci_uart_open; + hdev->close = hci_uart_close; + hdev->flush = hci_uart_flush; + hdev->send = hci_uart_send_frame; + hdev->setup = hci_uart_setup; + if (!hdev->wakeup) + hdev->wakeup = hci_uart_wakeup; + SET_HCIDEV_DEV(hdev, &hu->serdev->dev); + + if (test_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &hu->flags)) + set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks); + + if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags)) + set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + + if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags)) + set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks); + + if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) + hdev->dev_type = HCI_AMP; + else + hdev->dev_type = HCI_PRIMARY; + + if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) + return 0; + + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + err = -ENODEV; + goto err_register; + } + + set_bit(HCI_UART_REGISTERED, &hu->flags); + + return 0; + +err_register: + hci_free_dev(hdev); +err_alloc: + clear_bit(HCI_UART_PROTO_READY, &hu->flags); + p->close(hu); +err_open: + serdev_device_close(hu->serdev); +err_rwsem: + percpu_free_rwsem(&hu->proto_lock); + return err; +} +EXPORT_SYMBOL_GPL(hci_uart_register_device); + +void hci_uart_unregister_device(struct hci_uart *hu) +{ + struct hci_dev *hdev = hu->hdev; + + cancel_work_sync(&hu->init_ready); + if (test_bit(HCI_UART_REGISTERED, &hu->flags)) + hci_unregister_dev(hdev); + hci_free_dev(hdev); + + cancel_work_sync(&hu->write_work); + + hu->proto->close(hu); + + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) { + clear_bit(HCI_UART_PROTO_READY, &hu->flags); + serdev_device_close(hu->serdev); + } + percpu_free_rwsem(&hu->proto_lock); +} +EXPORT_SYMBOL_GPL(hci_uart_unregister_device); diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h new file mode 100644 index 000000000..fb4a2d0d8 --- /dev/null +++ b/drivers/bluetooth/hci_uart.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * + * Bluetooth HCI UART driver + * + * Copyright (C) 2000-2001 Qualcomm Incorporated + * Copyright (C) 2002-2003 Maxim Krasnyansky + * Copyright (C) 2004-2005 Marcel Holtmann + */ + +#ifndef N_HCI +#define N_HCI 15 +#endif + +/* Ioctls */ +#define HCIUARTSETPROTO _IOW('U', 200, int) +#define HCIUARTGETPROTO _IOR('U', 201, int) +#define HCIUARTGETDEVICE _IOR('U', 202, int) +#define HCIUARTSETFLAGS _IOW('U', 203, int) +#define HCIUARTGETFLAGS _IOR('U', 204, int) + +/* UART protocols */ +#define HCI_UART_MAX_PROTO 12 + +#define HCI_UART_H4 0 +#define HCI_UART_BCSP 1 +#define HCI_UART_3WIRE 2 +#define HCI_UART_H4DS 3 +#define HCI_UART_LL 4 +#define HCI_UART_ATH3K 5 +#define HCI_UART_INTEL 6 +#define HCI_UART_BCM 7 +#define HCI_UART_QCA 8 +#define HCI_UART_AG6XX 9 +#define HCI_UART_NOKIA 10 +#define HCI_UART_MRVL 11 + +#define HCI_UART_RAW_DEVICE 0 +#define HCI_UART_RESET_ON_INIT 1 +#define HCI_UART_CREATE_AMP 2 +#define HCI_UART_INIT_PENDING 3 +#define HCI_UART_EXT_CONFIG 4 +#define HCI_UART_VND_DETECT 5 + +struct hci_uart; +struct serdev_device; + +struct hci_uart_proto { + unsigned int id; + const char *name; + unsigned int manufacturer; + unsigned int init_speed; + unsigned int oper_speed; + int (*open)(struct hci_uart *hu); + int (*close)(struct hci_uart *hu); + int (*flush)(struct hci_uart *hu); + int (*setup)(struct hci_uart *hu); + int (*set_baudrate)(struct hci_uart *hu, unsigned int speed); + int (*recv)(struct hci_uart *hu, const void *data, int len); + int (*enqueue)(struct hci_uart *hu, struct sk_buff *skb); + struct sk_buff *(*dequeue)(struct hci_uart *hu); +}; + +struct hci_uart { + struct tty_struct *tty; + struct serdev_device *serdev; + struct hci_dev *hdev; + unsigned long flags; + unsigned long hdev_flags; + + struct work_struct init_ready; + struct work_struct write_work; + + const struct hci_uart_proto *proto; + struct percpu_rw_semaphore proto_lock; /* Stop work for proto close */ + void *priv; + + struct sk_buff *tx_skb; + unsigned long tx_state; + + unsigned int init_speed; + unsigned int oper_speed; + + u8 alignment; + u8 padding; +}; + +/* HCI_UART proto flag bits */ +#define HCI_UART_PROTO_SET 0 +#define HCI_UART_REGISTERED 1 +#define HCI_UART_PROTO_READY 2 +#define HCI_UART_NO_SUSPEND_NOTIFIER 3 + +/* TX states */ +#define HCI_UART_SENDING 1 +#define HCI_UART_TX_WAKEUP 2 + +int hci_uart_register_proto(const struct hci_uart_proto *p); +int hci_uart_unregister_proto(const struct hci_uart_proto *p); +int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p); +void hci_uart_unregister_device(struct hci_uart *hu); + +int hci_uart_tx_wakeup(struct hci_uart *hu); +int hci_uart_wait_until_sent(struct hci_uart *hu); +int hci_uart_init_ready(struct hci_uart *hu); +void hci_uart_init_work(struct work_struct *work); +void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed); +bool hci_uart_has_flow_control(struct hci_uart *hu); +void hci_uart_set_flow_control(struct hci_uart *hu, bool enable); +void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, + unsigned int oper_speed); + +#ifdef CONFIG_BT_HCIUART_H4 +int h4_init(void); +int h4_deinit(void); + +struct h4_recv_pkt { + u8 type; /* Packet type */ + u8 hlen; /* Header length */ + u8 loff; /* Data length offset in header */ + u8 lsize; /* Data length field size */ + u16 maxlen; /* Max overall packet length */ + int (*recv)(struct hci_dev *hdev, struct sk_buff *skb); +}; + +#define H4_RECV_ACL \ + .type = HCI_ACLDATA_PKT, \ + .hlen = HCI_ACL_HDR_SIZE, \ + .loff = 2, \ + .lsize = 2, \ + .maxlen = HCI_MAX_FRAME_SIZE \ + +#define H4_RECV_SCO \ + .type = HCI_SCODATA_PKT, \ + .hlen = HCI_SCO_HDR_SIZE, \ + .loff = 2, \ + .lsize = 1, \ + .maxlen = HCI_MAX_SCO_SIZE + +#define H4_RECV_EVENT \ + .type = HCI_EVENT_PKT, \ + .hlen = HCI_EVENT_HDR_SIZE, \ + .loff = 1, \ + .lsize = 1, \ + .maxlen = HCI_MAX_EVENT_SIZE + +#define H4_RECV_ISO \ + .type = HCI_ISODATA_PKT, \ + .hlen = HCI_ISO_HDR_SIZE, \ + .loff = 2, \ + .lsize = 2, \ + .maxlen = HCI_MAX_FRAME_SIZE \ + +struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, + const unsigned char *buffer, int count, + const struct h4_recv_pkt *pkts, int pkts_count); +#endif + +#ifdef CONFIG_BT_HCIUART_BCSP +int bcsp_init(void); +int bcsp_deinit(void); +#endif + +#ifdef CONFIG_BT_HCIUART_LL +int ll_init(void); +int ll_deinit(void); +#endif + +#ifdef CONFIG_BT_HCIUART_ATH3K +int ath_init(void); +int ath_deinit(void); +#endif + +#ifdef CONFIG_BT_HCIUART_3WIRE +int h5_init(void); +int h5_deinit(void); +#endif + +#ifdef CONFIG_BT_HCIUART_INTEL +int intel_init(void); +int intel_deinit(void); +#endif + +#ifdef CONFIG_BT_HCIUART_BCM +int bcm_init(void); +int bcm_deinit(void); +#endif + +#ifdef CONFIG_BT_HCIUART_QCA +int qca_init(void); +int qca_deinit(void); +#endif + +#ifdef CONFIG_BT_HCIUART_AG6XX +int ag6xx_init(void); +int ag6xx_deinit(void); +#endif + +#ifdef CONFIG_BT_HCIUART_MRVL +int mrvl_init(void); +int mrvl_deinit(void); +#endif diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c new file mode 100644 index 000000000..572d68d52 --- /dev/null +++ b/drivers/bluetooth/hci_vhci.c @@ -0,0 +1,710 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * Bluetooth virtual HCI driver + * + * Copyright (C) 2000-2001 Qualcomm Incorporated + * Copyright (C) 2002-2003 Maxim Krasnyansky + * Copyright (C) 2004-2006 Marcel Holtmann + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#define VERSION "1.5" + +static bool amp; + +struct vhci_data { + struct hci_dev *hdev; + + wait_queue_head_t read_wait; + struct sk_buff_head readq; + + struct mutex open_mutex; + struct delayed_work open_timeout; + struct work_struct suspend_work; + + bool suspended; + bool wakeup; + __u16 msft_opcode; + bool aosp_capable; + atomic_t initialized; +}; + +static int vhci_open_dev(struct hci_dev *hdev) +{ + return 0; +} + +static int vhci_close_dev(struct hci_dev *hdev) +{ + struct vhci_data *data = hci_get_drvdata(hdev); + + skb_queue_purge(&data->readq); + + return 0; +} + +static int vhci_flush(struct hci_dev *hdev) +{ + struct vhci_data *data = hci_get_drvdata(hdev); + + skb_queue_purge(&data->readq); + + return 0; +} + +static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct vhci_data *data = hci_get_drvdata(hdev); + + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + + skb_queue_tail(&data->readq, skb); + + if (atomic_read(&data->initialized)) + wake_up_interruptible(&data->read_wait); + return 0; +} + +static int vhci_get_data_path_id(struct hci_dev *hdev, u8 *data_path_id) +{ + *data_path_id = 0; + return 0; +} + +static int vhci_get_codec_config_data(struct hci_dev *hdev, __u8 type, + struct bt_codec *codec, __u8 *vnd_len, + __u8 **vnd_data) +{ + if (type != ESCO_LINK) + return -EINVAL; + + *vnd_len = 0; + *vnd_data = NULL; + return 0; +} + +static bool vhci_wakeup(struct hci_dev *hdev) +{ + struct vhci_data *data = hci_get_drvdata(hdev); + + return data->wakeup; +} + +static ssize_t force_suspend_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct vhci_data *data = file->private_data; + char buf[3]; + + buf[0] = data->suspended ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static void vhci_suspend_work(struct work_struct *work) +{ + struct vhci_data *data = container_of(work, struct vhci_data, + suspend_work); + + if (data->suspended) + hci_suspend_dev(data->hdev); + else + hci_resume_dev(data->hdev); +} + +static ssize_t force_suspend_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct vhci_data *data = file->private_data; + bool enable; + int err; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + if (data->suspended == enable) + return -EALREADY; + + data->suspended = enable; + + schedule_work(&data->suspend_work); + + return count; +} + +static const struct file_operations force_suspend_fops = { + .open = simple_open, + .read = force_suspend_read, + .write = force_suspend_write, + .llseek = default_llseek, +}; + +static ssize_t force_wakeup_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct vhci_data *data = file->private_data; + char buf[3]; + + buf[0] = data->wakeup ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_wakeup_write(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct vhci_data *data = file->private_data; + bool enable; + int err; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + if (data->wakeup == enable) + return -EALREADY; + + data->wakeup = enable; + + return count; +} + +static const struct file_operations force_wakeup_fops = { + .open = simple_open, + .read = force_wakeup_read, + .write = force_wakeup_write, + .llseek = default_llseek, +}; + +static int msft_opcode_set(void *data, u64 val) +{ + struct vhci_data *vhci = data; + + if (val > 0xffff || hci_opcode_ogf(val) != 0x3f) + return -EINVAL; + + if (vhci->msft_opcode) + return -EALREADY; + + vhci->msft_opcode = val; + + return 0; +} + +static int msft_opcode_get(void *data, u64 *val) +{ + struct vhci_data *vhci = data; + + *val = vhci->msft_opcode; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(msft_opcode_fops, msft_opcode_get, msft_opcode_set, + "%llu\n"); + +static ssize_t aosp_capable_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct vhci_data *vhci = file->private_data; + char buf[3]; + + buf[0] = vhci->aosp_capable ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t aosp_capable_write(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct vhci_data *vhci = file->private_data; + bool enable; + int err; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + if (!enable) + return -EINVAL; + + if (vhci->aosp_capable) + return -EALREADY; + + vhci->aosp_capable = enable; + + return count; +} + +static const struct file_operations aosp_capable_fops = { + .open = simple_open, + .read = aosp_capable_read, + .write = aosp_capable_write, + .llseek = default_llseek, +}; + +static int vhci_setup(struct hci_dev *hdev) +{ + struct vhci_data *vhci = hci_get_drvdata(hdev); + + if (vhci->msft_opcode) + hci_set_msft_opcode(hdev, vhci->msft_opcode); + + if (vhci->aosp_capable) + hci_set_aosp_capable(hdev); + + return 0; +} + +static void vhci_coredump(struct hci_dev *hdev) +{ + /* No need to do anything */ +} + +static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) +{ + char buf[80]; + + snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n"); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n"); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Driver: vhci_drv\n"); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Vendor: vhci\n"); + skb_put_data(skb, buf, strlen(buf)); +} + +#define MAX_COREDUMP_LINE_LEN 40 + +struct devcoredump_test_data { + enum devcoredump_state state; + unsigned int timeout; + char data[MAX_COREDUMP_LINE_LEN]; +}; + +static inline void force_devcd_timeout(struct hci_dev *hdev, + unsigned int timeout) +{ +#ifdef CONFIG_DEV_COREDUMP + hdev->dump.timeout = msecs_to_jiffies(timeout * 1000); +#endif +} + +static ssize_t force_devcd_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct vhci_data *data = file->private_data; + struct hci_dev *hdev = data->hdev; + struct sk_buff *skb = NULL; + struct devcoredump_test_data dump_data; + size_t data_size; + int ret; + + if (count < offsetof(struct devcoredump_test_data, data) || + count > sizeof(dump_data)) + return -EINVAL; + + if (copy_from_user(&dump_data, user_buf, count)) + return -EFAULT; + + data_size = count - offsetof(struct devcoredump_test_data, data); + skb = alloc_skb(data_size, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + skb_put_data(skb, &dump_data.data, data_size); + + hci_devcd_register(hdev, vhci_coredump, vhci_coredump_hdr, NULL); + + /* Force the devcoredump timeout */ + if (dump_data.timeout) + force_devcd_timeout(hdev, dump_data.timeout); + + ret = hci_devcd_init(hdev, skb->len); + if (ret) { + BT_ERR("Failed to generate devcoredump"); + kfree_skb(skb); + return ret; + } + + hci_devcd_append(hdev, skb); + + switch (dump_data.state) { + case HCI_DEVCOREDUMP_DONE: + hci_devcd_complete(hdev); + break; + case HCI_DEVCOREDUMP_ABORT: + hci_devcd_abort(hdev); + break; + case HCI_DEVCOREDUMP_TIMEOUT: + /* Do nothing */ + break; + default: + return -EINVAL; + } + + return count; +} + +static const struct file_operations force_devcoredump_fops = { + .open = simple_open, + .write = force_devcd_write, +}; + +static int __vhci_create_device(struct vhci_data *data, __u8 opcode) +{ + struct hci_dev *hdev; + struct sk_buff *skb; + __u8 dev_type; + + if (data->hdev) + return -EBADFD; + + /* bits 0-1 are dev_type (Primary or AMP) */ + dev_type = opcode & 0x03; + + if (dev_type != HCI_PRIMARY && dev_type != HCI_AMP) + return -EINVAL; + + /* bits 2-5 are reserved (must be zero) */ + if (opcode & 0x3c) + return -EINVAL; + + skb = bt_skb_alloc(4, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdev = hci_alloc_dev(); + if (!hdev) { + kfree_skb(skb); + return -ENOMEM; + } + + data->hdev = hdev; + + hdev->bus = HCI_VIRTUAL; + hdev->dev_type = dev_type; + hci_set_drvdata(hdev, data); + + hdev->open = vhci_open_dev; + hdev->close = vhci_close_dev; + hdev->flush = vhci_flush; + hdev->send = vhci_send_frame; + hdev->get_data_path_id = vhci_get_data_path_id; + hdev->get_codec_config_data = vhci_get_codec_config_data; + hdev->wakeup = vhci_wakeup; + hdev->setup = vhci_setup; + set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + + /* bit 6 is for external configuration */ + if (opcode & 0x40) + set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks); + + /* bit 7 is for raw device */ + if (opcode & 0x80) + set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + hci_free_dev(hdev); + data->hdev = NULL; + kfree_skb(skb); + return -EBUSY; + } + + debugfs_create_file("force_suspend", 0644, hdev->debugfs, data, + &force_suspend_fops); + + debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data, + &force_wakeup_fops); + + if (IS_ENABLED(CONFIG_BT_MSFTEXT)) + debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data, + &msft_opcode_fops); + + if (IS_ENABLED(CONFIG_BT_AOSPEXT)) + debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data, + &aosp_capable_fops); + + debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data, + &force_devcoredump_fops); + + hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; + + skb_put_u8(skb, 0xff); + skb_put_u8(skb, opcode); + put_unaligned_le16(hdev->id, skb_put(skb, 2)); + skb_queue_head(&data->readq, skb); + atomic_inc(&data->initialized); + + wake_up_interruptible(&data->read_wait); + return 0; +} + +static int vhci_create_device(struct vhci_data *data, __u8 opcode) +{ + int err; + + mutex_lock(&data->open_mutex); + err = __vhci_create_device(data, opcode); + mutex_unlock(&data->open_mutex); + + return err; +} + +static inline ssize_t vhci_get_user(struct vhci_data *data, + struct iov_iter *from) +{ + size_t len = iov_iter_count(from); + struct sk_buff *skb; + __u8 pkt_type, opcode; + int ret; + + if (len < 2 || len > HCI_MAX_FRAME_SIZE) + return -EINVAL; + + skb = bt_skb_alloc(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + if (!copy_from_iter_full(skb_put(skb, len), len, from)) { + kfree_skb(skb); + return -EFAULT; + } + + pkt_type = *((__u8 *) skb->data); + skb_pull(skb, 1); + + switch (pkt_type) { + case HCI_EVENT_PKT: + case HCI_ACLDATA_PKT: + case HCI_SCODATA_PKT: + case HCI_ISODATA_PKT: + if (!data->hdev) { + kfree_skb(skb); + return -ENODEV; + } + + hci_skb_pkt_type(skb) = pkt_type; + + ret = hci_recv_frame(data->hdev, skb); + break; + + case HCI_VENDOR_PKT: + cancel_delayed_work_sync(&data->open_timeout); + + opcode = *((__u8 *) skb->data); + skb_pull(skb, 1); + + if (skb->len > 0) { + kfree_skb(skb); + return -EINVAL; + } + + kfree_skb(skb); + + ret = vhci_create_device(data, opcode); + break; + + default: + kfree_skb(skb); + return -EINVAL; + } + + return (ret < 0) ? ret : len; +} + +static inline ssize_t vhci_put_user(struct vhci_data *data, + struct sk_buff *skb, + char __user *buf, int count) +{ + char __user *ptr = buf; + int len; + + len = min_t(unsigned int, skb->len, count); + + if (copy_to_user(ptr, skb->data, len)) + return -EFAULT; + + if (!data->hdev) + return len; + + data->hdev->stat.byte_tx += len; + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + data->hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + data->hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + data->hdev->stat.sco_tx++; + break; + } + + return len; +} + +static ssize_t vhci_read(struct file *file, + char __user *buf, size_t count, loff_t *pos) +{ + struct vhci_data *data = file->private_data; + struct sk_buff *skb; + ssize_t ret = 0; + + while (count) { + skb = skb_dequeue(&data->readq); + if (skb) { + ret = vhci_put_user(data, skb, buf, count); + if (ret < 0) + skb_queue_head(&data->readq, skb); + else + kfree_skb(skb); + break; + } + + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + + ret = wait_event_interruptible(data->read_wait, + !skb_queue_empty(&data->readq)); + if (ret < 0) + break; + } + + return ret; +} + +static ssize_t vhci_write(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct vhci_data *data = file->private_data; + + return vhci_get_user(data, from); +} + +static __poll_t vhci_poll(struct file *file, poll_table *wait) +{ + struct vhci_data *data = file->private_data; + + poll_wait(file, &data->read_wait, wait); + + if (!skb_queue_empty(&data->readq)) + return EPOLLIN | EPOLLRDNORM; + + return EPOLLOUT | EPOLLWRNORM; +} + +static void vhci_open_timeout(struct work_struct *work) +{ + struct vhci_data *data = container_of(work, struct vhci_data, + open_timeout.work); + + vhci_create_device(data, amp ? HCI_AMP : HCI_PRIMARY); +} + +static int vhci_open(struct inode *inode, struct file *file) +{ + struct vhci_data *data; + + data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + skb_queue_head_init(&data->readq); + init_waitqueue_head(&data->read_wait); + + mutex_init(&data->open_mutex); + INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout); + INIT_WORK(&data->suspend_work, vhci_suspend_work); + + file->private_data = data; + nonseekable_open(inode, file); + + schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000)); + + return 0; +} + +static int vhci_release(struct inode *inode, struct file *file) +{ + struct vhci_data *data = file->private_data; + struct hci_dev *hdev; + + cancel_delayed_work_sync(&data->open_timeout); + flush_work(&data->suspend_work); + + hdev = data->hdev; + + if (hdev) { + hci_unregister_dev(hdev); + hci_free_dev(hdev); + } + + skb_queue_purge(&data->readq); + file->private_data = NULL; + kfree(data); + + return 0; +} + +static const struct file_operations vhci_fops = { + .owner = THIS_MODULE, + .read = vhci_read, + .write_iter = vhci_write, + .poll = vhci_poll, + .open = vhci_open, + .release = vhci_release, + .llseek = no_llseek, +}; + +static struct miscdevice vhci_miscdev = { + .name = "vhci", + .fops = &vhci_fops, + .minor = VHCI_MINOR, +}; +module_misc_device(vhci_miscdev); + +module_param(amp, bool, 0644); +MODULE_PARM_DESC(amp, "Create AMP controller device"); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("devname:vhci"); +MODULE_ALIAS_MISCDEV(VHCI_MINOR); diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c new file mode 100644 index 000000000..2ac70b560 --- /dev/null +++ b/drivers/bluetooth/virtio_bt.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#define VERSION "0.1" + +enum { + VIRTBT_VQ_TX, + VIRTBT_VQ_RX, + VIRTBT_NUM_VQS, +}; + +struct virtio_bluetooth { + struct virtio_device *vdev; + struct virtqueue *vqs[VIRTBT_NUM_VQS]; + struct work_struct rx; + struct hci_dev *hdev; +}; + +static int virtbt_add_inbuf(struct virtio_bluetooth *vbt) +{ + struct virtqueue *vq = vbt->vqs[VIRTBT_VQ_RX]; + struct scatterlist sg[1]; + struct sk_buff *skb; + int err; + + skb = alloc_skb(1000, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + sg_init_one(sg, skb->data, 1000); + + err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_KERNEL); + if (err < 0) { + kfree_skb(skb); + return err; + } + + return 0; +} + +static int virtbt_open(struct hci_dev *hdev) +{ + return 0; +} + +static int virtbt_open_vdev(struct virtio_bluetooth *vbt) +{ + if (virtbt_add_inbuf(vbt) < 0) + return -EIO; + + virtqueue_kick(vbt->vqs[VIRTBT_VQ_RX]); + return 0; +} + +static int virtbt_close(struct hci_dev *hdev) +{ + return 0; +} + +static int virtbt_close_vdev(struct virtio_bluetooth *vbt) +{ + int i; + + cancel_work_sync(&vbt->rx); + + for (i = 0; i < ARRAY_SIZE(vbt->vqs); i++) { + struct virtqueue *vq = vbt->vqs[i]; + struct sk_buff *skb; + + while ((skb = virtqueue_detach_unused_buf(vq))) + kfree_skb(skb); + cond_resched(); + } + + return 0; +} + +static int virtbt_flush(struct hci_dev *hdev) +{ + return 0; +} + +static int virtbt_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct virtio_bluetooth *vbt = hci_get_drvdata(hdev); + struct scatterlist sg[1]; + int err; + + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + + sg_init_one(sg, skb->data, skb->len); + err = virtqueue_add_outbuf(vbt->vqs[VIRTBT_VQ_TX], sg, 1, skb, + GFP_KERNEL); + if (err) { + kfree_skb(skb); + return err; + } + + virtqueue_kick(vbt->vqs[VIRTBT_VQ_TX]); + return 0; +} + +static int virtbt_setup_zephyr(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Read Build Information */ + skb = __hci_cmd_sync(hdev, 0xfc08, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + bt_dev_info(hdev, "%s", (char *)(skb->data + 1)); + + hci_set_fw_info(hdev, "%s", skb->data + 1); + + kfree_skb(skb); + return 0; +} + +static int virtbt_set_bdaddr_zephyr(struct hci_dev *hdev, + const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + + /* Write BD_ADDR */ + skb = __hci_cmd_sync(hdev, 0xfc06, 6, bdaddr, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static int virtbt_setup_intel(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Intel Read Version */ + skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static int virtbt_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + + /* Intel Write BD Address */ + skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static int virtbt_setup_realtek(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Read ROM Version */ + skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + bt_dev_info(hdev, "ROM version %u", *((__u8 *) (skb->data + 1))); + + kfree_skb(skb); + return 0; +} + +static int virtbt_shutdown_generic(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Reset */ + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static void virtbt_rx_handle(struct virtio_bluetooth *vbt, struct sk_buff *skb) +{ + __u8 pkt_type; + + pkt_type = *((__u8 *) skb->data); + skb_pull(skb, 1); + + switch (pkt_type) { + case HCI_EVENT_PKT: + case HCI_ACLDATA_PKT: + case HCI_SCODATA_PKT: + case HCI_ISODATA_PKT: + hci_skb_pkt_type(skb) = pkt_type; + hci_recv_frame(vbt->hdev, skb); + break; + default: + kfree_skb(skb); + break; + } +} + +static void virtbt_rx_work(struct work_struct *work) +{ + struct virtio_bluetooth *vbt = container_of(work, + struct virtio_bluetooth, rx); + struct sk_buff *skb; + unsigned int len; + + skb = virtqueue_get_buf(vbt->vqs[VIRTBT_VQ_RX], &len); + if (!skb) + return; + + skb_put(skb, len); + virtbt_rx_handle(vbt, skb); + + if (virtbt_add_inbuf(vbt) < 0) + return; + + virtqueue_kick(vbt->vqs[VIRTBT_VQ_RX]); +} + +static void virtbt_tx_done(struct virtqueue *vq) +{ + struct sk_buff *skb; + unsigned int len; + + while ((skb = virtqueue_get_buf(vq, &len))) + kfree_skb(skb); +} + +static void virtbt_rx_done(struct virtqueue *vq) +{ + struct virtio_bluetooth *vbt = vq->vdev->priv; + + schedule_work(&vbt->rx); +} + +static int virtbt_probe(struct virtio_device *vdev) +{ + vq_callback_t *callbacks[VIRTBT_NUM_VQS] = { + [VIRTBT_VQ_TX] = virtbt_tx_done, + [VIRTBT_VQ_RX] = virtbt_rx_done, + }; + const char *names[VIRTBT_NUM_VQS] = { + [VIRTBT_VQ_TX] = "tx", + [VIRTBT_VQ_RX] = "rx", + }; + struct virtio_bluetooth *vbt; + struct hci_dev *hdev; + int err; + __u8 type; + + if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) + return -ENODEV; + + type = virtio_cread8(vdev, offsetof(struct virtio_bt_config, type)); + + switch (type) { + case VIRTIO_BT_CONFIG_TYPE_PRIMARY: + case VIRTIO_BT_CONFIG_TYPE_AMP: + break; + default: + return -EINVAL; + } + + vbt = kzalloc(sizeof(*vbt), GFP_KERNEL); + if (!vbt) + return -ENOMEM; + + vdev->priv = vbt; + vbt->vdev = vdev; + + INIT_WORK(&vbt->rx, virtbt_rx_work); + + err = virtio_find_vqs(vdev, VIRTBT_NUM_VQS, vbt->vqs, callbacks, + names, NULL); + if (err) + return err; + + hdev = hci_alloc_dev(); + if (!hdev) { + err = -ENOMEM; + goto failed; + } + + vbt->hdev = hdev; + + hdev->bus = HCI_VIRTIO; + hdev->dev_type = type; + hci_set_drvdata(hdev, vbt); + + hdev->open = virtbt_open; + hdev->close = virtbt_close; + hdev->flush = virtbt_flush; + hdev->send = virtbt_send_frame; + + if (virtio_has_feature(vdev, VIRTIO_BT_F_VND_HCI)) { + __u16 vendor; + + if (virtio_has_feature(vdev, VIRTIO_BT_F_CONFIG_V2)) + virtio_cread(vdev, struct virtio_bt_config_v2, + vendor, &vendor); + else + virtio_cread(vdev, struct virtio_bt_config, + vendor, &vendor); + + switch (vendor) { + case VIRTIO_BT_CONFIG_VENDOR_ZEPHYR: + hdev->manufacturer = 1521; + hdev->setup = virtbt_setup_zephyr; + hdev->shutdown = virtbt_shutdown_generic; + hdev->set_bdaddr = virtbt_set_bdaddr_zephyr; + break; + + case VIRTIO_BT_CONFIG_VENDOR_INTEL: + hdev->manufacturer = 2; + hdev->setup = virtbt_setup_intel; + hdev->shutdown = virtbt_shutdown_generic; + hdev->set_bdaddr = virtbt_set_bdaddr_intel; + set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + break; + + case VIRTIO_BT_CONFIG_VENDOR_REALTEK: + hdev->manufacturer = 93; + hdev->setup = virtbt_setup_realtek; + hdev->shutdown = virtbt_shutdown_generic; + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + break; + } + } + + if (virtio_has_feature(vdev, VIRTIO_BT_F_MSFT_EXT)) { + __u16 msft_opcode; + + if (virtio_has_feature(vdev, VIRTIO_BT_F_CONFIG_V2)) + virtio_cread(vdev, struct virtio_bt_config_v2, + msft_opcode, &msft_opcode); + else + virtio_cread(vdev, struct virtio_bt_config, + msft_opcode, &msft_opcode); + + hci_set_msft_opcode(hdev, msft_opcode); + } + + if (virtio_has_feature(vdev, VIRTIO_BT_F_AOSP_EXT)) + hci_set_aosp_capable(hdev); + + if (hci_register_dev(hdev) < 0) { + hci_free_dev(hdev); + err = -EBUSY; + goto failed; + } + + virtio_device_ready(vdev); + err = virtbt_open_vdev(vbt); + if (err) + goto open_failed; + + return 0; + +open_failed: + hci_free_dev(hdev); +failed: + vdev->config->del_vqs(vdev); + return err; +} + +static void virtbt_remove(struct virtio_device *vdev) +{ + struct virtio_bluetooth *vbt = vdev->priv; + struct hci_dev *hdev = vbt->hdev; + + hci_unregister_dev(hdev); + virtio_reset_device(vdev); + virtbt_close_vdev(vbt); + + hci_free_dev(hdev); + vbt->hdev = NULL; + + vdev->config->del_vqs(vdev); + kfree(vbt); +} + +static struct virtio_device_id virtbt_table[] = { + { VIRTIO_ID_BT, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +MODULE_DEVICE_TABLE(virtio, virtbt_table); + +static const unsigned int virtbt_features[] = { + VIRTIO_BT_F_VND_HCI, + VIRTIO_BT_F_MSFT_EXT, + VIRTIO_BT_F_AOSP_EXT, + VIRTIO_BT_F_CONFIG_V2, +}; + +static struct virtio_driver virtbt_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .feature_table = virtbt_features, + .feature_table_size = ARRAY_SIZE(virtbt_features), + .id_table = virtbt_table, + .probe = virtbt_probe, + .remove = virtbt_remove, +}; + +module_virtio_driver(virtbt_driver); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Generic Bluetooth VIRTIO driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); -- cgit v1.2.3