summaryrefslogtreecommitdiffstats
path: root/drivers/char/tpm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/tpm')
-rw-r--r--drivers/char/tpm/Kconfig214
-rw-r--r--drivers/char/tpm/Makefile44
-rw-r--r--drivers/char/tpm/eventlog/acpi.c171
-rw-r--r--drivers/char/tpm/eventlog/common.c189
-rw-r--r--drivers/char/tpm/eventlog/common.h35
-rw-r--r--drivers/char/tpm/eventlog/efi.c125
-rw-r--r--drivers/char/tpm/eventlog/of.c108
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c296
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c159
-rw-r--r--drivers/char/tpm/st33zp24/Kconfig30
-rw-r--r--drivers/char/tpm/st33zp24/Makefile13
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c173
-rw-r--r--drivers/char/tpm/st33zp24/spi.c290
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c588
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.h41
-rw-r--r--drivers/char/tpm/tpm-chip.c679
-rw-r--r--drivers/char/tpm/tpm-dev-common.c284
-rw-r--r--drivers/char/tpm/tpm-dev.c68
-rw-r--r--drivers/char/tpm/tpm-dev.h33
-rw-r--r--drivers/char/tpm/tpm-interface.c530
-rw-r--r--drivers/char/tpm/tpm-sysfs.c529
-rw-r--r--drivers/char/tpm/tpm.h320
-rw-r--r--drivers/char/tpm/tpm1-cmd.c813
-rw-r--r--drivers/char/tpm/tpm2-cmd.c789
-rw-r--r--drivers/char/tpm/tpm2-space.c641
-rw-r--r--drivers/char/tpm/tpm_atmel.c235
-rw-r--r--drivers/char/tpm/tpm_atmel.h140
-rw-r--r--drivers/char/tpm/tpm_crb.c858
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c419
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.h40
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c219
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c732
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c666
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c757
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.h73
-rw-r--r--drivers/char/tpm/tpm_infineon.c625
-rw-r--r--drivers/char/tpm/tpm_nsc.c416
-rw-r--r--drivers/char/tpm/tpm_ppi.c389
-rw-r--r--drivers/char/tpm/tpm_tis.c434
-rw-r--r--drivers/char/tpm/tpm_tis_core.c1367
-rw-r--r--drivers/char/tpm/tpm_tis_core.h228
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c404
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c795
-rw-r--r--drivers/char/tpm/tpm_tis_spi.h49
-rw-r--r--drivers/char/tpm/tpm_tis_spi_cr50.c342
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c359
-rw-r--r--drivers/char/tpm/tpm_tis_synquacer.c167
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c717
-rw-r--r--drivers/char/tpm/tpmrm-dev.c55
-rw-r--r--drivers/char/tpm/xen-tpmfront.c441
50 files changed, 18089 insertions, 0 deletions
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
new file mode 100644
index 0000000000..927088b2c3
--- /dev/null
+++ b/drivers/char/tpm/Kconfig
@@ -0,0 +1,214 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# TPM device configuration
+#
+
+menuconfig TCG_TPM
+ tristate "TPM Hardware Support"
+ depends on HAS_IOMEM
+ imply SECURITYFS
+ select CRYPTO
+ select CRYPTO_HASH_INFO
+ help
+ If you have a TPM security chip in your system, which
+ implements the Trusted Computing Group's specification,
+ say Yes and it will be accessible from within Linux. For
+ more information see <http://www.trustedcomputinggroup.org>.
+ An implementation of the Trusted Software Stack (TSS), the
+ userspace enablement piece of the specification, can be
+ obtained at: <http://sourceforge.net/projects/trousers>. To
+ compile this driver as a module, choose M here; the module
+ will be called tpm. If unsure, say N.
+ Notes:
+ 1) For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI
+ and CONFIG_PNPACPI.
+ 2) Without ACPI enabled, the BIOS event log won't be accessible,
+ which is required to validate the PCR 0-7 values.
+
+if TCG_TPM
+
+config HW_RANDOM_TPM
+ bool "TPM HW Random Number Generator support"
+ depends on TCG_TPM && HW_RANDOM && !(TCG_TPM=y && HW_RANDOM=m)
+ default y
+ help
+ This setting exposes the TPM's Random Number Generator as a hwrng
+ device. This allows the kernel to collect randomness from the TPM at
+ boot, and provides the TPM randomines in /dev/hwrng.
+
+ If unsure, say Y.
+
+config TCG_TIS_CORE
+ tristate
+ help
+ TCG TIS TPM core driver. It implements the TPM TCG TIS logic and hooks
+ into the TPM kernel APIs. Physical layers will register against it.
+
+config TCG_TIS
+ tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface"
+ depends on X86 || OF
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip that is compliant with the
+ TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO
+ specification (TPM2.0) say Yes and it will be accessible from
+ within Linux. To compile this driver as a module, choose M here;
+ the module will be called tpm_tis.
+
+config TCG_TIS_SPI
+ tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (SPI)"
+ depends on SPI
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip which is connected to a regular,
+ non-tcg SPI master (i.e. most embedded platforms) that is compliant with the
+ TCG TIS 1.3 TPM specification (TPM1.2) or the TCG PTP FIFO
+ specification (TPM2.0) say Yes and it will be accessible from
+ within Linux. To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_spi.
+
+config TCG_TIS_SPI_CR50
+ bool "Cr50 SPI Interface"
+ depends on TCG_TIS_SPI
+ help
+ If you have a H1 secure module running Cr50 firmware on SPI bus,
+ say Yes and it will be accessible from within Linux.
+
+config TCG_TIS_I2C
+ tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (I2C - generic)"
+ depends on I2C
+ select CRC_CCITT
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip, compliant with the TCG TPM PTP
+ (I2C interface) specification and connected to an I2C bus master,
+ say Yes and it will be accessible from within Linux.
+ To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_i2c.
+
+config TCG_TIS_SYNQUACER
+ tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface (MMIO - SynQuacer)"
+ depends on ARCH_SYNQUACER || COMPILE_TEST
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip that is compliant with the
+ TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO
+ specification (TPM2.0) say Yes and it will be accessible from
+ within Linux on Socionext SynQuacer platform.
+ To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_synquacer.
+
+config TCG_TIS_I2C_CR50
+ tristate "TPM Interface Specification 2.0 Interface (I2C - CR50)"
+ depends on I2C
+ help
+ This is a driver for the Google cr50 I2C TPM interface which is a
+ custom microcontroller and requires a custom i2c protocol interface
+ to handle the limitations of the hardware. To compile this driver
+ as a module, choose M here; the module will be called tcg_tis_i2c_cr50.
+
+config TCG_TIS_I2C_ATMEL
+ tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
+ depends on I2C
+ help
+ If you have an Atmel I2C TPM security chip say Yes and it will be
+ accessible from within Linux.
+ To compile this driver as a module, choose M here; the module will
+ be called tpm_tis_i2c_atmel.
+
+config TCG_TIS_I2C_INFINEON
+ tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)"
+ depends on I2C
+ help
+ If you have a TPM security chip that is compliant with the
+ TCG TIS 1.2 TPM specification and Infineon's I2C Protocol Stack
+ Specification 0.20 say Yes and it will be accessible from within
+ Linux.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_i2c_infineon.
+
+config TCG_TIS_I2C_NUVOTON
+ tristate "TPM Interface Specification 1.2 Interface (I2C - Nuvoton)"
+ depends on I2C
+ help
+ If you have a TPM security chip with an I2C interface from
+ Nuvoton Technology Corp. say Yes and it will be accessible
+ from within Linux.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_i2c_nuvoton.
+
+config TCG_NSC
+ tristate "National Semiconductor TPM Interface"
+ depends on X86
+ help
+ If you have a TPM security chip from National Semiconductor
+ say Yes and it will be accessible from within Linux. To
+ compile this driver as a module, choose M here; the module
+ will be called tpm_nsc.
+
+config TCG_ATMEL
+ tristate "Atmel TPM Interface"
+ depends on PPC64 || HAS_IOPORT_MAP
+ help
+ If you have a TPM security chip from Atmel say Yes and it
+ will be accessible from within Linux. To compile this driver
+ as a module, choose M here; the module will be called tpm_atmel.
+
+config TCG_INFINEON
+ tristate "Infineon Technologies TPM Interface"
+ depends on PNP
+ help
+ If you have a TPM security chip from Infineon Technologies
+ (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it
+ will be accessible from within Linux.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_infineon.
+ Further information on this driver and the supported hardware
+ can be found at http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
+
+config TCG_IBMVTPM
+ tristate "IBM VTPM Interface"
+ depends on PPC_PSERIES
+ help
+ If you have IBM virtual TPM (VTPM) support say Yes and it
+ will be accessible from within Linux. To compile this driver
+ as a module, choose M here; the module will be called tpm_ibmvtpm.
+
+config TCG_XEN
+ tristate "XEN TPM Interface"
+ depends on TCG_TPM && XEN
+ select XEN_XENBUS_FRONTEND
+ help
+ If you want to make TPM support available to a Xen user domain,
+ say Yes and it will be accessible from within Linux. See
+ the manpages for xl, xl.conf, and docs/misc/vtpm.txt in
+ the Xen source repository for more details.
+ To compile this driver as a module, choose M here; the module
+ will be called xen-tpmfront.
+
+config TCG_CRB
+ tristate "TPM 2.0 CRB Interface"
+ depends on ACPI
+ help
+ If you have a TPM security chip that is compliant with the
+ TCG CRB 2.0 TPM specification say Yes and it will be accessible
+ from within Linux. To compile this driver as a module, choose
+ M here; the module will be called tpm_crb.
+
+config TCG_VTPM_PROXY
+ tristate "VTPM Proxy Interface"
+ depends on TCG_TPM
+ help
+ This driver proxies for an emulated TPM (vTPM) running in userspace.
+ A device /dev/vtpmx is provided that creates a device pair
+ /dev/vtpmX and a server-side file descriptor on which the vTPM
+ can receive commands.
+
+config TCG_FTPM_TEE
+ tristate "TEE based fTPM Interface"
+ depends on TEE && OPTEE
+ help
+ This driver proxies for firmware TPM running in TEE.
+
+source "drivers/char/tpm/st33zp24/Kconfig"
+endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
new file mode 100644
index 0000000000..0222b1ddb3
--- /dev/null
+++ b/drivers/char/tpm/Makefile
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the kernel tpm device drivers.
+#
+obj-$(CONFIG_TCG_TPM) += tpm.o
+tpm-y := tpm-chip.o
+tpm-y += tpm-dev-common.o
+tpm-y += tpm-dev.o
+tpm-y += tpm-interface.o
+tpm-y += tpm1-cmd.o
+tpm-y += tpm2-cmd.o
+tpm-y += tpmrm-dev.o
+tpm-y += tpm2-space.o
+tpm-y += tpm-sysfs.o
+tpm-y += eventlog/common.o
+tpm-y += eventlog/tpm1.o
+tpm-y += eventlog/tpm2.o
+
+tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o
+tpm-$(CONFIG_EFI) += eventlog/efi.o
+tpm-$(CONFIG_OF) += eventlog/of.o
+obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
+obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+obj-$(CONFIG_TCG_TIS_SYNQUACER) += tpm_tis_synquacer.o
+
+obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
+tpm_tis_spi-y := tpm_tis_spi_main.o
+tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
+
+obj-$(CONFIG_TCG_TIS_I2C_CR50) += tpm_tis_i2c_cr50.o
+
+obj-$(CONFIG_TCG_TIS_I2C) += tpm_tis_i2c.o
+obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
+obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
+obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
+obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
+obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
+obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
+obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/
+obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
+obj-$(CONFIG_TCG_CRB) += tpm_crb.o
+obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
+obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
new file mode 100644
index 0000000000..bd757d836c
--- /dev/null
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ * Seiji Munetoh <munetoh@jp.ibm.com>
+ * Stefan Berger <stefanb@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Access to the event log extended by the TCG BIOS of PC platform
+ */
+
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+struct acpi_tcpa {
+ struct acpi_table_header hdr;
+ u16 platform_class;
+ union {
+ struct client_hdr {
+ u32 log_max_len __packed;
+ u64 log_start_addr __packed;
+ } client;
+ struct server_hdr {
+ u16 reserved;
+ u64 log_max_len __packed;
+ u64 log_start_addr __packed;
+ } server;
+ };
+};
+
+/* Check that the given log is indeed a TPM2 log. */
+static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
+{
+ struct tcg_efi_specid_event_head *efispecid;
+ struct tcg_pcr_event *event_header;
+ int n;
+
+ if (len < sizeof(*event_header))
+ return false;
+ len -= sizeof(*event_header);
+ event_header = bios_event_log;
+
+ if (len < sizeof(*efispecid))
+ return false;
+ efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
+
+ n = memcmp(efispecid->signature, TCG_SPECID_SIG,
+ sizeof(TCG_SPECID_SIG));
+ return n == 0;
+}
+
+/* read binary bios log */
+int tpm_read_log_acpi(struct tpm_chip *chip)
+{
+ struct acpi_tcpa *buff;
+ acpi_status status;
+ void __iomem *virt;
+ u64 len, start;
+ struct tpm_bios_log *log;
+ struct acpi_table_tpm2 *tbl;
+ struct acpi_tpm2_phy *tpm2_phy;
+ int format;
+ int ret;
+
+ log = &chip->log;
+
+ /* Unfortuntely ACPI does not associate the event log with a specific
+ * TPM, like PPI. Thus all ACPI TPMs will read the same log.
+ */
+ if (!chip->acpi_dev_handle)
+ return -ENODEV;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ status = acpi_get_table("TPM2", 1,
+ (struct acpi_table_header **)&tbl);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (tbl->header.length <
+ sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) {
+ acpi_put_table((struct acpi_table_header *)tbl);
+ return -ENODEV;
+ }
+
+ tpm2_phy = (void *)tbl + sizeof(*tbl);
+ len = tpm2_phy->log_area_minimum_length;
+
+ start = tpm2_phy->log_area_start_address;
+ if (!start || !len) {
+ acpi_put_table((struct acpi_table_header *)tbl);
+ return -ENODEV;
+ }
+
+ acpi_put_table((struct acpi_table_header *)tbl);
+ format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ } else {
+ /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
+ status = acpi_get_table(ACPI_SIG_TCPA, 1,
+ (struct acpi_table_header **)&buff);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ switch (buff->platform_class) {
+ case BIOS_SERVER:
+ len = buff->server.log_max_len;
+ start = buff->server.log_start_addr;
+ break;
+ case BIOS_CLIENT:
+ default:
+ len = buff->client.log_max_len;
+ start = buff->client.log_start_addr;
+ break;
+ }
+
+ acpi_put_table((struct acpi_table_header *)buff);
+ format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ }
+
+ if (!len) {
+ dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
+ return -EIO;
+ }
+
+ /* malloc EventLog space */
+ log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL);
+ if (!log->bios_event_log)
+ return -ENOMEM;
+
+ log->bios_event_log_end = log->bios_event_log + len;
+
+ ret = -EIO;
+ virt = acpi_os_map_iomem(start, len);
+ if (!virt) {
+ dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__);
+ /* try EFI log next */
+ ret = -ENODEV;
+ goto err;
+ }
+
+ memcpy_fromio(log->bios_event_log, virt, len);
+
+ acpi_os_unmap_iomem(virt, len);
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 &&
+ !tpm_is_tpm2_log(log->bios_event_log, len)) {
+ /* try EFI log next */
+ ret = -ENODEV;
+ goto err;
+ }
+
+ return format;
+
+err:
+ devm_kfree(&chip->dev, log->bios_event_log);
+ log->bios_event_log = NULL;
+ return ret;
+}
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
new file mode 100644
index 0000000000..639c3f395a
--- /dev/null
+++ b/drivers/char/tpm/eventlog/common.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2005, 2012 IBM Corporation
+ *
+ * Authors:
+ * Kent Yoder <key@linux.vnet.ibm.com>
+ * Seiji Munetoh <munetoh@jp.ibm.com>
+ * Stefan Berger <stefanb@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Access to the event log created by a system's firmware / BIOS
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+static int tpm_bios_measurements_open(struct inode *inode,
+ struct file *file)
+{
+ int err;
+ struct seq_file *seq;
+ struct tpm_chip_seqops *chip_seqops;
+ const struct seq_operations *seqops;
+ struct tpm_chip *chip;
+
+ inode_lock(inode);
+ if (!inode->i_private) {
+ inode_unlock(inode);
+ return -ENODEV;
+ }
+ chip_seqops = inode->i_private;
+ seqops = chip_seqops->seqops;
+ chip = chip_seqops->chip;
+ get_device(&chip->dev);
+ inode_unlock(inode);
+
+ /* now register seq file */
+ err = seq_open(file, seqops);
+ if (!err) {
+ seq = file->private_data;
+ seq->private = chip;
+ }
+
+ return err;
+}
+
+static int tpm_bios_measurements_release(struct inode *inode,
+ struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct tpm_chip *chip = seq->private;
+
+ put_device(&chip->dev);
+
+ return seq_release(inode, file);
+}
+
+static const struct file_operations tpm_bios_measurements_ops = {
+ .owner = THIS_MODULE,
+ .open = tpm_bios_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = tpm_bios_measurements_release,
+};
+
+static int tpm_read_log(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (chip->log.bios_event_log != NULL) {
+ dev_dbg(&chip->dev,
+ "%s: ERROR - event log already initialized\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ rc = tpm_read_log_acpi(chip);
+ if (rc != -ENODEV)
+ return rc;
+
+ rc = tpm_read_log_efi(chip);
+ if (rc != -ENODEV)
+ return rc;
+
+ return tpm_read_log_of(chip);
+}
+
+/*
+ * tpm_bios_log_setup() - Read the event log from the firmware
+ * @chip: TPM chip to use.
+ *
+ * If an event log is found then the securityfs files are setup to
+ * export it to userspace, otherwise nothing is done.
+ */
+void tpm_bios_log_setup(struct tpm_chip *chip)
+{
+ const char *name = dev_name(&chip->dev);
+ unsigned int cnt;
+ int log_version;
+ int rc = 0;
+
+ if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
+ return;
+
+ rc = tpm_read_log(chip);
+ if (rc < 0)
+ return;
+ log_version = rc;
+
+ cnt = 0;
+ chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+ /* NOTE: securityfs_create_dir can return ENODEV if securityfs is
+ * compiled out. The caller should ignore the ENODEV return code.
+ */
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ chip->bin_log_seqops.chip = chip;
+ if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ chip->bin_log_seqops.seqops =
+ &tpm2_binary_b_measurements_seqops;
+ else
+ chip->bin_log_seqops.seqops =
+ &tpm1_binary_b_measurements_seqops;
+
+
+ chip->bios_dir[cnt] =
+ securityfs_create_file("binary_bios_measurements",
+ 0440, chip->bios_dir[0],
+ (void *)&chip->bin_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+
+ chip->ascii_log_seqops.chip = chip;
+ chip->ascii_log_seqops.seqops =
+ &tpm1_ascii_b_measurements_seqops;
+
+ chip->bios_dir[cnt] =
+ securityfs_create_file("ascii_bios_measurements",
+ 0440, chip->bios_dir[0],
+ (void *)&chip->ascii_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+ }
+
+ return;
+
+err:
+ chip->bios_dir[cnt] = NULL;
+ tpm_bios_log_teardown(chip);
+ return;
+}
+
+void tpm_bios_log_teardown(struct tpm_chip *chip)
+{
+ int i;
+ struct inode *inode;
+
+ /* securityfs_remove currently doesn't take care of handling sync
+ * between removal and opening of pseudo files. To handle this, a
+ * workaround is added by making i_private = NULL here during removal
+ * and to check it during open(), both within inode_lock()/unlock().
+ * This design ensures that open() either safely gets kref or fails.
+ */
+ for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
+ if (chip->bios_dir[i]) {
+ inode = d_inode(chip->bios_dir[i]);
+ inode_lock(inode);
+ inode->i_private = NULL;
+ inode_unlock(inode);
+ securityfs_remove(chip->bios_dir[i]);
+ }
+ }
+}
diff --git a/drivers/char/tpm/eventlog/common.h b/drivers/char/tpm/eventlog/common.h
new file mode 100644
index 0000000000..47ff8136ce
--- /dev/null
+++ b/drivers/char/tpm/eventlog/common.h
@@ -0,0 +1,35 @@
+#ifndef __TPM_EVENTLOG_COMMON_H__
+#define __TPM_EVENTLOG_COMMON_H__
+
+#include "../tpm.h"
+
+extern const struct seq_operations tpm1_ascii_b_measurements_seqops;
+extern const struct seq_operations tpm1_binary_b_measurements_seqops;
+extern const struct seq_operations tpm2_binary_b_measurements_seqops;
+
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_EFI)
+int tpm_read_log_efi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_efi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
new file mode 100644
index 0000000000..4e9d7c2bf3
--- /dev/null
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Google
+ *
+ * Authors:
+ * Thiebaud Weksteen <tweek@google.com>
+ */
+
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+/* read binary bios log from EFI configuration table */
+int tpm_read_log_efi(struct tpm_chip *chip)
+{
+
+ struct efi_tcg2_final_events_table *final_tbl = NULL;
+ int final_events_log_size = efi_tpm_final_log_size;
+ struct linux_efi_tpm_eventlog *log_tbl;
+ struct tpm_bios_log *log;
+ u32 log_size;
+ u8 tpm_log_version;
+ void *tmp;
+ int ret;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ return -ENODEV;
+
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ log = &chip->log;
+
+ log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl), MEMREMAP_WB);
+ if (!log_tbl) {
+ pr_err("Could not map UEFI TPM log table !\n");
+ return -ENOMEM;
+ }
+
+ log_size = log_tbl->size;
+ memunmap(log_tbl);
+
+ if (!log_size) {
+ pr_warn("UEFI TPM log area empty\n");
+ return -EIO;
+ }
+
+ log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
+ MEMREMAP_WB);
+ if (!log_tbl) {
+ pr_err("Could not map UEFI TPM log table payload!\n");
+ return -ENOMEM;
+ }
+
+ /* malloc EventLog space */
+ log->bios_event_log = devm_kmemdup(&chip->dev, log_tbl->log, log_size, GFP_KERNEL);
+ if (!log->bios_event_log) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ log->bios_event_log_end = log->bios_event_log + log_size;
+ tpm_log_version = log_tbl->version;
+
+ ret = tpm_log_version;
+
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
+ final_events_log_size == 0 ||
+ tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ goto out;
+
+ final_tbl = memremap(efi.tpm_final_log,
+ sizeof(*final_tbl) + final_events_log_size,
+ MEMREMAP_WB);
+ if (!final_tbl) {
+ pr_err("Could not map UEFI TPM final log\n");
+ devm_kfree(&chip->dev, log->bios_event_log);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * The 'final events log' size excludes the 'final events preboot log'
+ * at its beginning.
+ */
+ final_events_log_size -= log_tbl->final_events_preboot_size;
+
+ /*
+ * Allocate memory for the 'combined log' where we will append the
+ * 'final events log' to.
+ */
+ tmp = devm_krealloc(&chip->dev, log->bios_event_log,
+ log_size + final_events_log_size,
+ GFP_KERNEL);
+ if (!tmp) {
+ devm_kfree(&chip->dev, log->bios_event_log);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ log->bios_event_log = tmp;
+
+ /*
+ * Append any of the 'final events log' that didn't also end up in the
+ * 'main log'. Events can be logged in both if events are generated
+ * between GetEventLog() and ExitBootServices().
+ */
+ memcpy((void *)log->bios_event_log + log_size,
+ final_tbl->events + log_tbl->final_events_preboot_size,
+ final_events_log_size);
+ /*
+ * The size of the 'combined log' is the size of the 'main log' plus
+ * the size of the 'final events log'.
+ */
+ log->bios_event_log_end = log->bios_event_log +
+ log_size + final_events_log_size;
+
+out:
+ memunmap(final_tbl);
+ memunmap(log_tbl);
+ return ret;
+}
diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c
new file mode 100644
index 0000000000..930fe43d5d
--- /dev/null
+++ b/drivers/char/tpm/eventlog/of.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2012 IBM Corporation
+ *
+ * Author: Ashley Lai <ashleydlai@gmail.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Read the event log created by the firmware on PPC64
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+static int tpm_read_log_memory_region(struct tpm_chip *chip)
+{
+ struct device_node *node;
+ struct resource res;
+ int rc;
+
+ node = of_parse_phandle(chip->dev.parent->of_node, "memory-region", 0);
+ if (!node)
+ return -ENODEV;
+
+ rc = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
+ if (rc)
+ return rc;
+
+ chip->log.bios_event_log = devm_memremap(&chip->dev, res.start, resource_size(&res),
+ MEMREMAP_WB);
+ if (IS_ERR(chip->log.bios_event_log))
+ return -ENOMEM;
+
+ chip->log.bios_event_log_end = chip->log.bios_event_log + resource_size(&res);
+
+ return chip->flags & TPM_CHIP_FLAG_TPM2 ? EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 :
+ EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+}
+
+int tpm_read_log_of(struct tpm_chip *chip)
+{
+ struct device_node *np;
+ const u32 *sizep;
+ const u64 *basep;
+ struct tpm_bios_log *log;
+ u32 size;
+ u64 base;
+
+ log = &chip->log;
+ if (chip->dev.parent && chip->dev.parent->of_node)
+ np = chip->dev.parent->of_node;
+ else
+ return -ENODEV;
+
+ if (of_property_read_bool(np, "powered-while-suspended"))
+ chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
+
+ sizep = of_get_property(np, "linux,sml-size", NULL);
+ basep = of_get_property(np, "linux,sml-base", NULL);
+ if (sizep == NULL && basep == NULL)
+ return tpm_read_log_memory_region(chip);
+ if (sizep == NULL || basep == NULL)
+ return -EIO;
+
+ /*
+ * For both vtpm/tpm, firmware has log addr and log size in big
+ * endian format. But in case of vtpm, there is a method called
+ * sml-handover which is run during kernel init even before
+ * device tree is setup. This sml-handover function takes care
+ * of endianness and writes to sml-base and sml-size in little
+ * endian format. For this reason, vtpm doesn't need conversion
+ * but physical tpm needs the conversion.
+ */
+ if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0 &&
+ of_property_match_string(np, "compatible", "IBM,vtpm20") < 0) {
+ size = be32_to_cpup((__force __be32 *)sizep);
+ base = be64_to_cpup((__force __be64 *)basep);
+ } else {
+ size = *sizep;
+ base = *basep;
+ }
+
+ if (size == 0) {
+ dev_warn(&chip->dev, "%s: Event log area empty\n", __func__);
+ return -EIO;
+ }
+
+ log->bios_event_log = devm_kmemdup(&chip->dev, __va(base), size, GFP_KERNEL);
+ if (!log->bios_event_log)
+ return -ENOMEM;
+
+ log->bios_event_log_end = log->bios_event_log + size;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+}
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
new file mode 100644
index 0000000000..12ee42a31c
--- /dev/null
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2005, 2012 IBM Corporation
+ *
+ * Authors:
+ * Kent Yoder <key@linux.vnet.ibm.com>
+ * Seiji Munetoh <munetoh@jp.ibm.com>
+ * Stefan Berger <stefanb@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Access to the event log created by a system's firmware / BIOS
+ */
+
+#include <linux/seq_file.h>
+#include <linux/efi.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+
+static const char* tcpa_event_type_strings[] = {
+ "PREBOOT",
+ "POST CODE",
+ "",
+ "NO ACTION",
+ "SEPARATOR",
+ "ACTION",
+ "EVENT TAG",
+ "S-CRTM Contents",
+ "S-CRTM Version",
+ "CPU Microcode",
+ "Platform Config Flags",
+ "Table of Devices",
+ "Compact Hash",
+ "IPL",
+ "IPL Partition Data",
+ "Non-Host Code",
+ "Non-Host Config",
+ "Non-Host Info"
+};
+
+static const char* tcpa_pc_event_id_strings[] = {
+ "",
+ "SMBIOS",
+ "BIS Certificate",
+ "POST BIOS ",
+ "ESCD ",
+ "CMOS",
+ "NVRAM",
+ "Option ROM",
+ "Option ROM config",
+ "",
+ "Option ROM microcode ",
+ "S-CRTM Version",
+ "S-CRTM Contents ",
+ "POST Contents ",
+ "Table of Devices",
+};
+
+/* returns pointer to start of pos. entry of tcg log */
+static void *tpm1_bios_measurements_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t i = 0;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ void *addr = log->bios_event_log;
+ void *limit = log->bios_event_log_end;
+ struct tcpa_event *event;
+ u32 converted_event_size;
+ u32 converted_event_type;
+
+ /* read over *pos measurements */
+ do {
+ event = addr;
+
+ /* check if current entry is valid */
+ if (addr + sizeof(struct tcpa_event) > limit)
+ return NULL;
+
+ converted_event_size =
+ do_endian_conversion(event->event_size);
+ converted_event_type =
+ do_endian_conversion(event->event_type);
+
+ if (((converted_event_type == 0) && (converted_event_size == 0))
+ || ((addr + sizeof(struct tcpa_event) + converted_event_size)
+ > limit))
+ return NULL;
+
+ if (i++ == *pos)
+ break;
+
+ addr += (sizeof(struct tcpa_event) + converted_event_size);
+ } while (1);
+
+ return addr;
+}
+
+static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
+ loff_t *pos)
+{
+ struct tcpa_event *event = v;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ void *limit = log->bios_event_log_end;
+ u32 converted_event_size;
+ u32 converted_event_type;
+
+ (*pos)++;
+ converted_event_size = do_endian_conversion(event->event_size);
+
+ v += sizeof(struct tcpa_event) + converted_event_size;
+
+ /* now check if current entry is valid */
+ if ((v + sizeof(struct tcpa_event)) > limit)
+ return NULL;
+
+ event = v;
+
+ converted_event_size = do_endian_conversion(event->event_size);
+ converted_event_type = do_endian_conversion(event->event_type);
+
+ if (((converted_event_type == 0) && (converted_event_size == 0)) ||
+ ((v + sizeof(struct tcpa_event) + converted_event_size) > limit))
+ return NULL;
+
+ return v;
+}
+
+static void tpm1_bios_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+static int get_event_name(char *dest, struct tcpa_event *event,
+ unsigned char * event_entry)
+{
+ const char *name = "";
+ /* 41 so there is room for 40 data and 1 nul */
+ char data[41] = "";
+ int i, n_len = 0, d_len = 0;
+ struct tcpa_pc_event *pc_event;
+
+ switch (do_endian_conversion(event->event_type)) {
+ case PREBOOT:
+ case POST_CODE:
+ case UNUSED:
+ case NO_ACTION:
+ case SCRTM_CONTENTS:
+ case SCRTM_VERSION:
+ case CPU_MICROCODE:
+ case PLATFORM_CONFIG_FLAGS:
+ case TABLE_OF_DEVICES:
+ case COMPACT_HASH:
+ case IPL:
+ case IPL_PARTITION_DATA:
+ case NONHOST_CODE:
+ case NONHOST_CONFIG:
+ case NONHOST_INFO:
+ name = tcpa_event_type_strings[do_endian_conversion
+ (event->event_type)];
+ n_len = strlen(name);
+ break;
+ case SEPARATOR:
+ case ACTION:
+ if (MAX_TEXT_EVENT >
+ do_endian_conversion(event->event_size)) {
+ name = event_entry;
+ n_len = do_endian_conversion(event->event_size);
+ }
+ break;
+ case EVENT_TAG:
+ pc_event = (struct tcpa_pc_event *)event_entry;
+
+ /* ToDo Row data -> Base64 */
+
+ switch (do_endian_conversion(pc_event->event_id)) {
+ case SMBIOS:
+ case BIS_CERT:
+ case CMOS:
+ case NVRAM:
+ case OPTION_ROM_EXEC:
+ case OPTION_ROM_CONFIG:
+ case S_CRTM_VERSION:
+ name = tcpa_pc_event_id_strings[do_endian_conversion
+ (pc_event->event_id)];
+ n_len = strlen(name);
+ break;
+ /* hash data */
+ case POST_BIOS_ROM:
+ case ESCD:
+ case OPTION_ROM_MICROCODE:
+ case S_CRTM_CONTENTS:
+ case POST_CONTENTS:
+ name = tcpa_pc_event_id_strings[do_endian_conversion
+ (pc_event->event_id)];
+ n_len = strlen(name);
+ for (i = 0; i < 20; i++)
+ d_len += sprintf(&data[2*i], "%02x",
+ pc_event->event_data[i]);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return snprintf(dest, MAX_TEXT_EVENT, "[%.*s%.*s]",
+ n_len, name, d_len, data);
+
+}
+
+static int tpm1_binary_bios_measurements_show(struct seq_file *m, void *v)
+{
+ struct tcpa_event *event = v;
+ struct tcpa_event temp_event;
+ char *temp_ptr;
+ int i;
+
+ memcpy(&temp_event, event, sizeof(struct tcpa_event));
+
+ /* convert raw integers for endianness */
+ temp_event.pcr_index = do_endian_conversion(event->pcr_index);
+ temp_event.event_type = do_endian_conversion(event->event_type);
+ temp_event.event_size = do_endian_conversion(event->event_size);
+
+ temp_ptr = (char *) &temp_event;
+
+ for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
+ seq_putc(m, temp_ptr[i]);
+
+ temp_ptr = (char *) v;
+
+ for (i = (sizeof(struct tcpa_event) - 1);
+ i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
+ seq_putc(m, temp_ptr[i]);
+
+ return 0;
+
+}
+
+static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v)
+{
+ char *eventname;
+ struct tcpa_event *event = v;
+ unsigned char *event_entry =
+ (unsigned char *)(v + sizeof(struct tcpa_event));
+
+ eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
+ if (!eventname) {
+ printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
+ __func__);
+ return -EFAULT;
+ }
+
+ /* 1st: PCR */
+ seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index));
+
+ /* 2nd: SHA1 */
+ seq_printf(m, "%20phN", event->pcr_value);
+
+ /* 3rd: event type identifier */
+ seq_printf(m, " %02x", do_endian_conversion(event->event_type));
+
+ get_event_name(eventname, event, event_entry);
+
+ /* 4th: eventname <= max + \'0' delimiter */
+ seq_printf(m, " %s\n", eventname);
+
+ kfree(eventname);
+ return 0;
+}
+
+const struct seq_operations tpm1_ascii_b_measurements_seqops = {
+ .start = tpm1_bios_measurements_start,
+ .next = tpm1_bios_measurements_next,
+ .stop = tpm1_bios_measurements_stop,
+ .show = tpm1_ascii_bios_measurements_show,
+};
+
+const struct seq_operations tpm1_binary_b_measurements_seqops = {
+ .start = tpm1_bios_measurements_start,
+ .next = tpm1_bios_measurements_next,
+ .stop = tpm1_bios_measurements_stop,
+ .show = tpm1_binary_bios_measurements_show,
+};
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
new file mode 100644
index 0000000000..37a0580098
--- /dev/null
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Authors:
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Access to TPM 2.0 event log as written by Firmware.
+ * It assumes that writer of event log has followed TCG Specification
+ * for Family "2.0" and written the event data in little endian.
+ * With that, it doesn't need any endian conversion for structure
+ * content.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+/*
+ * calc_tpm2_event_size() - calculate the event size, where event
+ * is an entry in the TPM 2.0 event log. The event is of type Crypto
+ * Agile Log Entry Format as defined in TCG EFI Protocol Specification
+ * Family "2.0".
+
+ * @event: event whose size is to be calculated.
+ * @event_header: the first event in the event log.
+ *
+ * Returns size of the event. If it is an invalid event, returns 0.
+ */
+static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+ struct tcg_pcr_event *event_header)
+{
+ return __calc_tpm2_event_size(event, event_header, false);
+}
+
+static void *tpm2_bios_measurements_start(struct seq_file *m, loff_t *pos)
+{
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ void *addr = log->bios_event_log;
+ void *limit = log->bios_event_log_end;
+ struct tcg_pcr_event *event_header;
+ struct tcg_pcr_event2_head *event;
+ size_t size;
+ int i;
+
+ event_header = addr;
+ size = struct_size(event_header, event, event_header->event_size);
+
+ if (*pos == 0) {
+ if (addr + size < limit) {
+ if ((event_header->event_type == 0) &&
+ (event_header->event_size == 0))
+ return NULL;
+ return SEQ_START_TOKEN;
+ }
+ }
+
+ if (*pos > 0) {
+ addr += size;
+ event = addr;
+ size = calc_tpm2_event_size(event, event_header);
+ if ((addr + size >= limit) || (size == 0))
+ return NULL;
+ }
+
+ for (i = 0; i < (*pos - 1); i++) {
+ event = addr;
+ size = calc_tpm2_event_size(event, event_header);
+
+ if ((addr + size >= limit) || (size == 0))
+ return NULL;
+ addr += size;
+ }
+
+ return addr;
+}
+
+static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
+ loff_t *pos)
+{
+ struct tcg_pcr_event *event_header;
+ struct tcg_pcr_event2_head *event;
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ void *limit = log->bios_event_log_end;
+ size_t event_size;
+ void *marker;
+
+ (*pos)++;
+ event_header = log->bios_event_log;
+
+ if (v == SEQ_START_TOKEN) {
+ event_size = struct_size(event_header, event,
+ event_header->event_size);
+ marker = event_header;
+ } else {
+ event = v;
+ event_size = calc_tpm2_event_size(event, event_header);
+ if (event_size == 0)
+ return NULL;
+ marker = event;
+ }
+
+ marker = marker + event_size;
+ if (marker >= limit)
+ return NULL;
+ v = marker;
+ event = v;
+
+ event_size = calc_tpm2_event_size(event, event_header);
+ if (((v + event_size) >= limit) || (event_size == 0))
+ return NULL;
+
+ return v;
+}
+
+static void tpm2_bios_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+static int tpm2_binary_bios_measurements_show(struct seq_file *m, void *v)
+{
+ struct tpm_chip *chip = m->private;
+ struct tpm_bios_log *log = &chip->log;
+ struct tcg_pcr_event *event_header = log->bios_event_log;
+ struct tcg_pcr_event2_head *event = v;
+ void *temp_ptr;
+ size_t size;
+
+ if (v == SEQ_START_TOKEN) {
+ size = struct_size(event_header, event,
+ event_header->event_size);
+ temp_ptr = event_header;
+
+ if (size > 0)
+ seq_write(m, temp_ptr, size);
+ } else {
+ size = calc_tpm2_event_size(event, event_header);
+ temp_ptr = event;
+ if (size > 0)
+ seq_write(m, temp_ptr, size);
+ }
+
+ return 0;
+}
+
+const struct seq_operations tpm2_binary_b_measurements_seqops = {
+ .start = tpm2_bios_measurements_start,
+ .next = tpm2_bios_measurements_next,
+ .stop = tpm2_bios_measurements_stop,
+ .show = tpm2_binary_bios_measurements_show,
+};
diff --git a/drivers/char/tpm/st33zp24/Kconfig b/drivers/char/tpm/st33zp24/Kconfig
new file mode 100644
index 0000000000..601c2ae5bf
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config TCG_TIS_ST33ZP24
+ tristate
+ help
+ STMicroelectronics ST33ZP24 core driver. It implements the core
+ TPM1.2 logic and hooks into the TPM kernel APIs. Physical layers will
+ register against it.
+
+ To compile this driver as a module, choose m here. The module will be called
+ tpm_st33zp24.
+
+config TCG_TIS_ST33ZP24_I2C
+ tristate "STMicroelectronics TPM Interface Specification 1.2 Interface (I2C)"
+ depends on I2C
+ select TCG_TIS_ST33ZP24
+ help
+ This module adds support for the STMicroelectronics TPM security chip
+ ST33ZP24 with i2c interface.
+ To compile this driver as a module, choose M here; the module will be
+ called tpm_st33zp24_i2c.
+
+config TCG_TIS_ST33ZP24_SPI
+ tristate "STMicroelectronics TPM Interface Specification 1.2 Interface (SPI)"
+ depends on SPI
+ select TCG_TIS_ST33ZP24
+ help
+ This module adds support for the STMicroelectronics TPM security chip
+ ST33ZP24 with spi interface.
+ To compile this driver as a module, choose M here; the module will be
+ called tpm_st33zp24_spi.
diff --git a/drivers/char/tpm/st33zp24/Makefile b/drivers/char/tpm/st33zp24/Makefile
new file mode 100644
index 0000000000..649e41107d
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for ST33ZP24 TPM 1.2 driver
+#
+
+tpm_st33zp24-objs = st33zp24.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24) += tpm_st33zp24.o
+
+tpm_st33zp24_i2c-objs = i2c.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24_I2C) += tpm_st33zp24_i2c.o
+
+tpm_st33zp24_spi-objs = spi.o
+obj-$(CONFIG_TCG_TIS_ST33ZP24_SPI) += tpm_st33zp24_spi.o
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
new file mode 100644
index 0000000000..661574bb0a
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2016 STMicroelectronics
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/acpi.h>
+#include <linux/tpm.h>
+
+#include "../tpm.h"
+#include "st33zp24.h"
+
+#define TPM_DUMMY_BYTE 0xAA
+
+struct st33zp24_i2c_phy {
+ struct i2c_client *client;
+ u8 buf[ST33ZP24_BUFSIZE + 1];
+};
+
+/*
+ * write8_reg
+ * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, The length of the data
+ * @return: Returns negative errno, or else the number of bytes written.
+ */
+static int write8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size)
+{
+ struct st33zp24_i2c_phy *phy = phy_id;
+
+ phy->buf[0] = tpm_register;
+ memcpy(phy->buf + 1, tpm_data, tpm_size);
+ return i2c_master_send(phy->client, phy->buf, tpm_size + 1);
+} /* write8_reg() */
+
+/*
+ * read8_reg
+ * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size)
+{
+ struct st33zp24_i2c_phy *phy = phy_id;
+ u8 status = 0;
+ u8 data;
+
+ data = TPM_DUMMY_BYTE;
+ status = write8_reg(phy, tpm_register, &data, 1);
+ if (status == 2)
+ status = i2c_master_recv(phy->client, tpm_data, tpm_size);
+ return status;
+} /* read8_reg() */
+
+/*
+ * st33zp24_i2c_send
+ * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, the length of the data
+ * @return: number of byte written successfully: should be one if success.
+ */
+static int st33zp24_i2c_send(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ return write8_reg(phy_id, tpm_register | TPM_WRITE_DIRECTION, tpm_data,
+ tpm_size);
+}
+
+/*
+ * st33zp24_i2c_recv
+ * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int st33zp24_i2c_recv(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ return read8_reg(phy_id, tpm_register, tpm_data, tpm_size);
+}
+
+static const struct st33zp24_phy_ops i2c_phy_ops = {
+ .send = st33zp24_i2c_send,
+ .recv = st33zp24_i2c_recv,
+};
+
+/*
+ * st33zp24_i2c_probe initialize the TPM device
+ * @param: client, the i2c_client description (TPM I2C description).
+ * @param: id, the i2c_device_id struct.
+ * @return: 0 in case of success.
+ * -1 in other case.
+ */
+static int st33zp24_i2c_probe(struct i2c_client *client)
+{
+ struct st33zp24_i2c_phy *phy;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_info(&client->dev, "client not i2c capable\n");
+ return -ENODEV;
+ }
+
+ phy = devm_kzalloc(&client->dev, sizeof(struct st33zp24_i2c_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->client = client;
+
+ return st33zp24_probe(phy, &i2c_phy_ops, &client->dev, client->irq);
+}
+
+/*
+ * st33zp24_i2c_remove remove the TPM device
+ * @param: client, the i2c_client description (TPM I2C description).
+ * @return: 0 in case of success.
+ */
+static void st33zp24_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
+ st33zp24_remove(chip);
+}
+
+static const struct i2c_device_id st33zp24_i2c_id[] = {
+ {TPM_ST33_I2C, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id);
+
+static const struct of_device_id of_st33zp24_i2c_match[] __maybe_unused = {
+ { .compatible = "st,st33zp24-i2c", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match);
+
+static const struct acpi_device_id st33zp24_i2c_acpi_match[] __maybe_unused = {
+ {"SMO3324"},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, st33zp24_i2c_acpi_match);
+
+static SIMPLE_DEV_PM_OPS(st33zp24_i2c_ops, st33zp24_pm_suspend,
+ st33zp24_pm_resume);
+
+static struct i2c_driver st33zp24_i2c_driver = {
+ .driver = {
+ .name = TPM_ST33_I2C,
+ .pm = &st33zp24_i2c_ops,
+ .of_match_table = of_match_ptr(of_st33zp24_i2c_match),
+ .acpi_match_table = ACPI_PTR(st33zp24_i2c_acpi_match),
+ },
+ .probe = st33zp24_i2c_probe,
+ .remove = st33zp24_i2c_remove,
+ .id_table = st33zp24_i2c_id
+};
+
+module_i2c_driver(st33zp24_i2c_driver);
+
+MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_DESCRIPTION("STM TPM 1.2 I2C ST33 Driver");
+MODULE_VERSION("1.3.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
new file mode 100644
index 0000000000..f5811b301d
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * STMicroelectronics TPM SPI Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2016 STMicroelectronics
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/of.h>
+#include <linux/acpi.h>
+#include <linux/tpm.h>
+
+#include "../tpm.h"
+#include "st33zp24.h"
+
+#define TPM_DATA_FIFO 0x24
+#define TPM_INTF_CAPABILITY 0x14
+
+#define TPM_DUMMY_BYTE 0x00
+
+#define MAX_SPI_LATENCY 15
+#define LOCALITY0 0
+
+#define ST33ZP24_OK 0x5A
+#define ST33ZP24_UNDEFINED_ERR 0x80
+#define ST33ZP24_BADLOCALITY 0x81
+#define ST33ZP24_TISREGISTER_UNKNOWN 0x82
+#define ST33ZP24_LOCALITY_NOT_ACTIVATED 0x83
+#define ST33ZP24_HASH_END_BEFORE_HASH_START 0x84
+#define ST33ZP24_BAD_COMMAND_ORDER 0x85
+#define ST33ZP24_INCORECT_RECEIVED_LENGTH 0x86
+#define ST33ZP24_TPM_FIFO_OVERFLOW 0x89
+#define ST33ZP24_UNEXPECTED_READ_FIFO 0x8A
+#define ST33ZP24_UNEXPECTED_WRITE_FIFO 0x8B
+#define ST33ZP24_CMDRDY_SET_WHEN_PROCESSING_HASH_END 0x90
+#define ST33ZP24_DUMMY_BYTES 0x00
+
+/*
+ * TPM command can be up to 2048 byte, A TPM response can be up to
+ * 1024 byte.
+ * Between command and response, there are latency byte (up to 15
+ * usually on st33zp24 2 are enough).
+ *
+ * Overall when sending a command and expecting an answer we need if
+ * worst case:
+ * 2048 (for the TPM command) + 1024 (for the TPM answer). We need
+ * some latency byte before the answer is available (max 15).
+ * We have 2048 + 1024 + 15.
+ */
+#define ST33ZP24_SPI_BUFFER_SIZE (ST33ZP24_BUFSIZE + (ST33ZP24_BUFSIZE / 2) +\
+ MAX_SPI_LATENCY)
+
+
+struct st33zp24_spi_phy {
+ struct spi_device *spi_device;
+
+ u8 tx_buf[ST33ZP24_SPI_BUFFER_SIZE];
+ u8 rx_buf[ST33ZP24_SPI_BUFFER_SIZE];
+
+ int latency;
+};
+
+static int st33zp24_status_to_errno(u8 code)
+{
+ switch (code) {
+ case ST33ZP24_OK:
+ return 0;
+ case ST33ZP24_UNDEFINED_ERR:
+ case ST33ZP24_BADLOCALITY:
+ case ST33ZP24_TISREGISTER_UNKNOWN:
+ case ST33ZP24_LOCALITY_NOT_ACTIVATED:
+ case ST33ZP24_HASH_END_BEFORE_HASH_START:
+ case ST33ZP24_BAD_COMMAND_ORDER:
+ case ST33ZP24_UNEXPECTED_READ_FIFO:
+ case ST33ZP24_UNEXPECTED_WRITE_FIFO:
+ case ST33ZP24_CMDRDY_SET_WHEN_PROCESSING_HASH_END:
+ return -EPROTO;
+ case ST33ZP24_INCORECT_RECEIVED_LENGTH:
+ case ST33ZP24_TPM_FIFO_OVERFLOW:
+ return -EMSGSIZE;
+ case ST33ZP24_DUMMY_BYTES:
+ return -ENOSYS;
+ }
+ return code;
+}
+
+/*
+ * st33zp24_spi_send
+ * Send byte to the TIS register according to the ST33ZP24 SPI protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, The length of the data
+ * @return: should be zero if success else a negative error code.
+ */
+static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ int total_length = 0, ret = 0;
+ struct st33zp24_spi_phy *phy = phy_id;
+ struct spi_device *dev = phy->spi_device;
+ struct spi_transfer spi_xfer = {
+ .tx_buf = phy->tx_buf,
+ .rx_buf = phy->rx_buf,
+ };
+
+ /* Pre-Header */
+ phy->tx_buf[total_length++] = TPM_WRITE_DIRECTION | LOCALITY0;
+ phy->tx_buf[total_length++] = tpm_register;
+
+ if (tpm_size > 0 && tpm_register == TPM_DATA_FIFO) {
+ phy->tx_buf[total_length++] = tpm_size >> 8;
+ phy->tx_buf[total_length++] = tpm_size;
+ }
+
+ memcpy(&phy->tx_buf[total_length], tpm_data, tpm_size);
+ total_length += tpm_size;
+
+ memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE, phy->latency);
+
+ spi_xfer.len = total_length + phy->latency;
+
+ ret = spi_sync_transfer(dev, &spi_xfer, 1);
+ if (ret == 0)
+ ret = phy->rx_buf[total_length + phy->latency - 1];
+
+ return st33zp24_status_to_errno(ret);
+} /* st33zp24_spi_send() */
+
+/*
+ * st33zp24_spi_read8_recv
+ * Recv byte from the TIS register according to the ST33ZP24 SPI protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: should be zero if success else a negative error code.
+ */
+static int st33zp24_spi_read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ int total_length = 0, ret;
+ struct st33zp24_spi_phy *phy = phy_id;
+ struct spi_device *dev = phy->spi_device;
+ struct spi_transfer spi_xfer = {
+ .tx_buf = phy->tx_buf,
+ .rx_buf = phy->rx_buf,
+ };
+
+ /* Pre-Header */
+ phy->tx_buf[total_length++] = LOCALITY0;
+ phy->tx_buf[total_length++] = tpm_register;
+
+ memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE,
+ phy->latency + tpm_size);
+
+ spi_xfer.len = total_length + phy->latency + tpm_size;
+
+ /* header + status byte + size of the data + status byte */
+ ret = spi_sync_transfer(dev, &spi_xfer, 1);
+ if (tpm_size > 0 && ret == 0) {
+ ret = phy->rx_buf[total_length + phy->latency - 1];
+
+ memcpy(tpm_data, phy->rx_buf + total_length + phy->latency,
+ tpm_size);
+ }
+
+ return ret;
+} /* st33zp24_spi_read8_reg() */
+
+/*
+ * st33zp24_spi_recv
+ * Recv byte from the TIS register according to the ST33ZP24 SPI protocol.
+ * @param: phy_id, the phy description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int st33zp24_spi_recv(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
+{
+ int ret;
+
+ ret = st33zp24_spi_read8_reg(phy_id, tpm_register, tpm_data, tpm_size);
+ if (!st33zp24_status_to_errno(ret))
+ return tpm_size;
+ return ret;
+} /* st33zp24_spi_recv() */
+
+static int st33zp24_spi_evaluate_latency(void *phy_id)
+{
+ struct st33zp24_spi_phy *phy = phy_id;
+ int latency = 1, status = 0;
+ u8 data = 0;
+
+ while (!status && latency < MAX_SPI_LATENCY) {
+ phy->latency = latency;
+ status = st33zp24_spi_read8_reg(phy_id, TPM_INTF_CAPABILITY,
+ &data, 1);
+ latency++;
+ }
+ if (status < 0)
+ return status;
+ if (latency == MAX_SPI_LATENCY)
+ return -ENODEV;
+
+ return latency - 1;
+} /* evaluate_latency() */
+
+static const struct st33zp24_phy_ops spi_phy_ops = {
+ .send = st33zp24_spi_send,
+ .recv = st33zp24_spi_recv,
+};
+
+/*
+ * st33zp24_spi_probe initialize the TPM device
+ * @param: dev, the spi_device description (TPM SPI description).
+ * @return: 0 in case of success.
+ * or a negative value describing the error.
+ */
+static int st33zp24_spi_probe(struct spi_device *dev)
+{
+ struct st33zp24_spi_phy *phy;
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct st33zp24_spi_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->spi_device = dev;
+
+ phy->latency = st33zp24_spi_evaluate_latency(phy);
+ if (phy->latency <= 0)
+ return -ENODEV;
+
+ return st33zp24_probe(phy, &spi_phy_ops, &dev->dev, dev->irq);
+}
+
+/*
+ * st33zp24_spi_remove remove the TPM device
+ * @param: client, the spi_device description (TPM SPI description).
+ * @return: 0 in case of success.
+ */
+static void st33zp24_spi_remove(struct spi_device *dev)
+{
+ struct tpm_chip *chip = spi_get_drvdata(dev);
+
+ st33zp24_remove(chip);
+}
+
+static const struct spi_device_id st33zp24_spi_id[] = {
+ {TPM_ST33_SPI, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, st33zp24_spi_id);
+
+static const struct of_device_id of_st33zp24_spi_match[] __maybe_unused = {
+ { .compatible = "st,st33zp24-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_st33zp24_spi_match);
+
+static const struct acpi_device_id st33zp24_spi_acpi_match[] __maybe_unused = {
+ {"SMO3324"},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, st33zp24_spi_acpi_match);
+
+static SIMPLE_DEV_PM_OPS(st33zp24_spi_ops, st33zp24_pm_suspend,
+ st33zp24_pm_resume);
+
+static struct spi_driver st33zp24_spi_driver = {
+ .driver = {
+ .name = "st33zp24-spi",
+ .pm = &st33zp24_spi_ops,
+ .of_match_table = of_match_ptr(of_st33zp24_spi_match),
+ .acpi_match_table = ACPI_PTR(st33zp24_spi_acpi_match),
+ },
+ .probe = st33zp24_spi_probe,
+ .remove = st33zp24_spi_remove,
+ .id_table = st33zp24_spi_id,
+};
+
+module_spi_driver(st33zp24_spi_driver);
+
+MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver");
+MODULE_VERSION("1.3.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
new file mode 100644
index 0000000000..a5b554cd47
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * STMicroelectronics TPM Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2016 STMicroelectronics
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/freezer.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/gpio/consumer.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "../tpm.h"
+#include "st33zp24.h"
+
+#define TPM_ACCESS 0x0
+#define TPM_STS 0x18
+#define TPM_DATA_FIFO 0x24
+#define TPM_INTF_CAPABILITY 0x14
+#define TPM_INT_STATUS 0x10
+#define TPM_INT_ENABLE 0x08
+
+#define LOCALITY0 0
+
+enum st33zp24_access {
+ TPM_ACCESS_VALID = 0x80,
+ TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+ TPM_ACCESS_REQUEST_PENDING = 0x04,
+ TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum st33zp24_status {
+ TPM_STS_VALID = 0x80,
+ TPM_STS_COMMAND_READY = 0x40,
+ TPM_STS_GO = 0x20,
+ TPM_STS_DATA_AVAIL = 0x10,
+ TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum st33zp24_int_flags {
+ TPM_GLOBAL_INT_ENABLE = 0x80,
+ TPM_INTF_CMD_READY_INT = 0x080,
+ TPM_INTF_FIFO_AVALAIBLE_INT = 0x040,
+ TPM_INTF_WAKE_UP_READY_INT = 0x020,
+ TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
+ TPM_INTF_STS_VALID_INT = 0x002,
+ TPM_INTF_DATA_AVAIL_INT = 0x001,
+};
+
+enum tis_defaults {
+ TIS_SHORT_TIMEOUT = 750,
+ TIS_LONG_TIMEOUT = 2000,
+};
+
+/*
+ * clear the pending interrupt.
+ */
+static u8 clear_interruption(struct st33zp24_dev *tpm_dev)
+{
+ u8 interrupt;
+
+ tpm_dev->ops->recv(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1);
+ tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1);
+ return interrupt;
+}
+
+/*
+ * cancel the current command execution or set STS to COMMAND READY.
+ */
+static void st33zp24_cancel(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ u8 data;
+
+ data = TPM_STS_COMMAND_READY;
+ tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1);
+}
+
+/*
+ * return the TPM_STS register
+ */
+static u8 st33zp24_status(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ u8 data;
+
+ tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS, &data, 1);
+ return data;
+}
+
+/*
+ * if the locality is active
+ */
+static bool check_locality(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ u8 data;
+ u8 status;
+
+ status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
+ if (status && (data &
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
+ return true;
+
+ return false;
+}
+
+static int request_locality(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+ long ret;
+ u8 data;
+
+ if (check_locality(chip))
+ return tpm_dev->locality;
+
+ data = TPM_ACCESS_REQUEST_USE;
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
+ if (ret < 0)
+ return ret;
+
+ stop = jiffies + chip->timeout_a;
+
+ /* Request locality is usually effective after the request */
+ do {
+ if (check_locality(chip))
+ return tpm_dev->locality;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+
+ /* could not get locality */
+ return -EACCES;
+}
+
+static void release_locality(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ u8 data;
+
+ data = TPM_ACCESS_ACTIVE_LOCALITY;
+
+ tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
+}
+
+/*
+ * get_burstcount return the burstcount value
+ */
+static int get_burstcount(struct tpm_chip *chip)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+ int burstcnt, status;
+ u8 temp;
+
+ stop = jiffies + chip->timeout_d;
+ do {
+ status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 1,
+ &temp, 1);
+ if (status < 0)
+ return -EBUSY;
+
+ burstcnt = temp;
+ status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 2,
+ &temp, 1);
+ if (status < 0)
+ return -EBUSY;
+
+ burstcnt |= temp << 8;
+ if (burstcnt)
+ return burstcnt;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ return -EBUSY;
+}
+
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+/*
+ * wait for a TPM_STS value
+ */
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+ wait_queue_head_t *queue, bool check_cancel)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+ int ret = 0;
+ bool canceled = false;
+ bool condition;
+ u32 cur_intrs;
+ u8 status;
+
+ /* check current status */
+ status = st33zp24_status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+ cur_intrs = tpm_dev->intrs;
+ clear_interruption(tpm_dev);
+ enable_irq(tpm_dev->irq);
+
+ do {
+ if (ret == -ERESTARTSYS && freezing(current))
+ clear_thread_flag(TIF_SIGPENDING);
+
+ timeout = stop - jiffies;
+ if ((long) timeout <= 0)
+ return -1;
+
+ ret = wait_event_interruptible_timeout(*queue,
+ cur_intrs != tpm_dev->intrs,
+ timeout);
+ clear_interruption(tpm_dev);
+ condition = wait_for_tpm_stat_cond(chip, mask,
+ check_cancel, &canceled);
+ if (ret >= 0 && condition) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ } while (ret == -ERESTARTSYS && freezing(current));
+
+ disable_irq_nosync(tpm_dev->irq);
+
+ } else {
+ do {
+ msleep(TPM_TIMEOUT);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+
+ return -ETIME;
+}
+
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ int size = 0, burstcnt, len, ret;
+
+ while (size < count &&
+ wait_for_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->timeout_c,
+ &tpm_dev->read_queue, true) == 0) {
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0)
+ return burstcnt;
+ len = min_t(int, burstcnt, count - size);
+ ret = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_DATA_FIFO,
+ buf + size, len);
+ if (ret < 0)
+ return ret;
+
+ size += len;
+ }
+ return size;
+}
+
+static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+
+ tpm_dev->intrs++;
+ wake_up_interruptible(&tpm_dev->read_queue);
+ disable_irq_nosync(tpm_dev->irq);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * send TPM commands through the I2C bus.
+ */
+static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
+ size_t len)
+{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ u32 status, i, size, ordinal;
+ int burstcnt = 0;
+ int ret;
+ u8 data;
+
+ if (len < TPM_HEADER_SIZE)
+ return -EBUSY;
+
+ ret = request_locality(chip);
+ if (ret < 0)
+ return ret;
+
+ status = st33zp24_status(chip);
+ if ((status & TPM_STS_COMMAND_READY) == 0) {
+ st33zp24_cancel(chip);
+ if (wait_for_stat
+ (chip, TPM_STS_COMMAND_READY, chip->timeout_b,
+ &tpm_dev->read_queue, false) < 0) {
+ ret = -ETIME;
+ goto out_err;
+ }
+ }
+
+ for (i = 0; i < len - 1;) {
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0)
+ return burstcnt;
+ size = min_t(int, len - i - 1, burstcnt);
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_DATA_FIFO,
+ buf + i, size);
+ if (ret < 0)
+ goto out_err;
+
+ i += size;
+ }
+
+ status = st33zp24_status(chip);
+ if ((status & TPM_STS_DATA_EXPECT) == 0) {
+ ret = -EIO;
+ goto out_err;
+ }
+
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_DATA_FIFO,
+ buf + len - 1, 1);
+ if (ret < 0)
+ goto out_err;
+
+ status = st33zp24_status(chip);
+ if ((status & TPM_STS_DATA_EXPECT) != 0) {
+ ret = -EIO;
+ goto out_err;
+ }
+
+ data = TPM_STS_GO;
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1);
+ if (ret < 0)
+ goto out_err;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+
+ ret = wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ tpm_calc_ordinal_duration(chip, ordinal),
+ &tpm_dev->read_queue, false);
+ if (ret < 0)
+ goto out_err;
+ }
+
+ return 0;
+out_err:
+ st33zp24_cancel(chip);
+ release_locality(chip);
+ return ret;
+}
+
+static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
+ size_t count)
+{
+ int size = 0;
+ u32 expected;
+
+ if (!chip)
+ return -EBUSY;
+
+ if (count < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ size = recv_data(chip, buf, TPM_HEADER_SIZE);
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "Unable to read header\n");
+ goto out;
+ }
+
+ expected = be32_to_cpu(*(__be32 *)(buf + 2));
+ if (expected > count || expected < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE);
+ if (size < expected) {
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
+ size = -ETIME;
+ }
+
+out:
+ st33zp24_cancel(chip);
+ release_locality(chip);
+ return size;
+}
+
+static bool st33zp24_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct tpm_class_ops st33zp24_tpm = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .send = st33zp24_send,
+ .recv = st33zp24_recv,
+ .cancel = st33zp24_cancel,
+ .status = st33zp24_status,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = st33zp24_req_canceled,
+};
+
+static const struct acpi_gpio_params lpcpd_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_st33zp24_gpios[] = {
+ { "lpcpd-gpios", &lpcpd_gpios, 1 },
+ { },
+};
+
+/*
+ * initialize the TPM device
+ */
+int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
+ struct device *dev, int irq)
+{
+ int ret;
+ u8 intmask = 0;
+ struct tpm_chip *chip;
+ struct st33zp24_dev *tpm_dev;
+
+ chip = tpmm_chip_alloc(dev, &st33zp24_tpm);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ tpm_dev = devm_kzalloc(dev, sizeof(struct st33zp24_dev),
+ GFP_KERNEL);
+ if (!tpm_dev)
+ return -ENOMEM;
+
+ tpm_dev->phy_id = phy_id;
+ tpm_dev->ops = ops;
+ dev_set_drvdata(&chip->dev, tpm_dev);
+
+ chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+ tpm_dev->locality = LOCALITY0;
+
+ if (ACPI_COMPANION(dev)) {
+ ret = devm_acpi_dev_add_driver_gpios(dev, acpi_st33zp24_gpios);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Get LPCPD GPIO. If lpcpd pin is not specified. This is not an
+ * issue as power management can be also managed by TPM specific
+ * commands.
+ */
+ tpm_dev->io_lpcpd = devm_gpiod_get_optional(dev, "lpcpd",
+ GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(tpm_dev->io_lpcpd);
+ if (ret) {
+ dev_err(dev, "failed to request lpcpd gpio: %d\n", ret);
+ return ret;
+ }
+
+ if (irq) {
+ /* INTERRUPT Setup */
+ init_waitqueue_head(&tpm_dev->read_queue);
+ tpm_dev->intrs = 0;
+
+ if (request_locality(chip) != LOCALITY0) {
+ ret = -ENODEV;
+ goto _tpm_clean_answer;
+ }
+
+ clear_interruption(tpm_dev);
+ ret = devm_request_irq(dev, irq, tpm_ioserirq_handler,
+ IRQF_TRIGGER_HIGH, "TPM SERIRQ management",
+ chip);
+ if (ret < 0) {
+ dev_err(&chip->dev, "TPM SERIRQ signals %d not available\n",
+ irq);
+ goto _tpm_clean_answer;
+ }
+
+ intmask |= TPM_INTF_CMD_READY_INT
+ | TPM_INTF_STS_VALID_INT
+ | TPM_INTF_DATA_AVAIL_INT;
+
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_ENABLE,
+ &intmask, 1);
+ if (ret < 0)
+ goto _tpm_clean_answer;
+
+ intmask = TPM_GLOBAL_INT_ENABLE;
+ ret = tpm_dev->ops->send(tpm_dev->phy_id, (TPM_INT_ENABLE + 3),
+ &intmask, 1);
+ if (ret < 0)
+ goto _tpm_clean_answer;
+
+ tpm_dev->irq = irq;
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
+
+ disable_irq_nosync(tpm_dev->irq);
+ }
+
+ return tpm_chip_register(chip);
+_tpm_clean_answer:
+ dev_info(&chip->dev, "TPM initialization fail\n");
+ return ret;
+}
+EXPORT_SYMBOL(st33zp24_probe);
+
+void st33zp24_remove(struct tpm_chip *chip)
+{
+ tpm_chip_unregister(chip);
+}
+EXPORT_SYMBOL(st33zp24_remove);
+
+#ifdef CONFIG_PM_SLEEP
+int st33zp24_pm_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+
+ int ret = 0;
+
+ if (tpm_dev->io_lpcpd)
+ gpiod_set_value_cansleep(tpm_dev->io_lpcpd, 0);
+ else
+ ret = tpm_pm_suspend(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(st33zp24_pm_suspend);
+
+int st33zp24_pm_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ int ret = 0;
+
+ if (tpm_dev->io_lpcpd) {
+ gpiod_set_value_cansleep(tpm_dev->io_lpcpd, 1);
+ ret = wait_for_stat(chip,
+ TPM_STS_VALID, chip->timeout_b,
+ &tpm_dev->read_queue, false);
+ } else {
+ ret = tpm_pm_resume(dev);
+ if (!ret)
+ tpm1_do_selftest(chip);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(st33zp24_pm_resume);
+#endif
+
+MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
+MODULE_DESCRIPTION("ST33ZP24 TPM 1.2 driver");
+MODULE_VERSION("1.3.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
new file mode 100644
index 0000000000..5acc85f711
--- /dev/null
+++ b/drivers/char/tpm/st33zp24/st33zp24.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * STMicroelectronics TPM Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009 - 2016 STMicroelectronics
+ */
+
+#ifndef __LOCAL_ST33ZP24_H__
+#define __LOCAL_ST33ZP24_H__
+
+#define TPM_ST33_I2C "st33zp24-i2c"
+#define TPM_ST33_SPI "st33zp24-spi"
+
+#define TPM_WRITE_DIRECTION 0x80
+#define ST33ZP24_BUFSIZE 2048
+
+struct st33zp24_dev {
+ struct tpm_chip *chip;
+ void *phy_id;
+ const struct st33zp24_phy_ops *ops;
+ int locality;
+ int irq;
+ u32 intrs;
+ struct gpio_desc *io_lpcpd;
+ wait_queue_head_t read_queue;
+};
+
+
+struct st33zp24_phy_ops {
+ int (*send)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size);
+ int (*recv)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size);
+};
+
+#ifdef CONFIG_PM_SLEEP
+int st33zp24_pm_suspend(struct device *dev);
+int st33zp24_pm_resume(struct device *dev);
+#endif
+
+int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
+ struct device *dev, int irq);
+void st33zp24_remove(struct tpm_chip *chip);
+#endif /* __LOCAL_ST33ZP24_H__ */
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
new file mode 100644
index 0000000000..42b1062e33
--- /dev/null
+++ b/drivers/char/tpm/tpm-chip.c
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * TPM chip management routines.
+ */
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/freezer.h>
+#include <linux/major.h>
+#include <linux/tpm_eventlog.h>
+#include <linux/hw_random.h>
+#include "tpm.h"
+
+DEFINE_IDR(dev_nums_idr);
+static DEFINE_MUTEX(idr_lock);
+
+const struct class tpm_class = {
+ .name = "tpm",
+ .shutdown_pre = tpm_class_shutdown,
+};
+const struct class tpmrm_class = {
+ .name = "tpmrm",
+};
+dev_t tpm_devt;
+
+static int tpm_request_locality(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (!chip->ops->request_locality)
+ return 0;
+
+ rc = chip->ops->request_locality(chip, 0);
+ if (rc < 0)
+ return rc;
+
+ chip->locality = rc;
+ return 0;
+}
+
+static void tpm_relinquish_locality(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (!chip->ops->relinquish_locality)
+ return;
+
+ rc = chip->ops->relinquish_locality(chip, chip->locality);
+ if (rc)
+ dev_err(&chip->dev, "%s: : error %d\n", __func__, rc);
+
+ chip->locality = -1;
+}
+
+static int tpm_cmd_ready(struct tpm_chip *chip)
+{
+ if (!chip->ops->cmd_ready)
+ return 0;
+
+ return chip->ops->cmd_ready(chip);
+}
+
+static int tpm_go_idle(struct tpm_chip *chip)
+{
+ if (!chip->ops->go_idle)
+ return 0;
+
+ return chip->ops->go_idle(chip);
+}
+
+static void tpm_clk_enable(struct tpm_chip *chip)
+{
+ if (chip->ops->clk_enable)
+ chip->ops->clk_enable(chip, true);
+}
+
+static void tpm_clk_disable(struct tpm_chip *chip)
+{
+ if (chip->ops->clk_enable)
+ chip->ops->clk_enable(chip, false);
+}
+
+/**
+ * tpm_chip_start() - power on the TPM
+ * @chip: a TPM chip to use
+ *
+ * Return:
+ * * The response length - OK
+ * * -errno - A system error
+ */
+int tpm_chip_start(struct tpm_chip *chip)
+{
+ int ret;
+
+ tpm_clk_enable(chip);
+
+ if (chip->locality == -1) {
+ ret = tpm_request_locality(chip);
+ if (ret) {
+ tpm_clk_disable(chip);
+ return ret;
+ }
+ }
+
+ ret = tpm_cmd_ready(chip);
+ if (ret) {
+ tpm_relinquish_locality(chip);
+ tpm_clk_disable(chip);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_chip_start);
+
+/**
+ * tpm_chip_stop() - power off the TPM
+ * @chip: a TPM chip to use
+ *
+ * Return:
+ * * The response length - OK
+ * * -errno - A system error
+ */
+void tpm_chip_stop(struct tpm_chip *chip)
+{
+ tpm_go_idle(chip);
+ tpm_relinquish_locality(chip);
+ tpm_clk_disable(chip);
+}
+EXPORT_SYMBOL_GPL(tpm_chip_stop);
+
+/**
+ * tpm_try_get_ops() - Get a ref to the tpm_chip
+ * @chip: Chip to ref
+ *
+ * The caller must already have some kind of locking to ensure that chip is
+ * valid. This function will lock the chip so that the ops member can be
+ * accessed safely. The locking prevents tpm_chip_unregister from
+ * completing, so it should not be held for long periods.
+ *
+ * Returns -ERRNO if the chip could not be got.
+ */
+int tpm_try_get_ops(struct tpm_chip *chip)
+{
+ int rc = -EIO;
+
+ get_device(&chip->dev);
+
+ down_read(&chip->ops_sem);
+ if (!chip->ops)
+ goto out_ops;
+
+ mutex_lock(&chip->tpm_mutex);
+ rc = tpm_chip_start(chip);
+ if (rc)
+ goto out_lock;
+
+ return 0;
+out_lock:
+ mutex_unlock(&chip->tpm_mutex);
+out_ops:
+ up_read(&chip->ops_sem);
+ put_device(&chip->dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_try_get_ops);
+
+/**
+ * tpm_put_ops() - Release a ref to the tpm_chip
+ * @chip: Chip to put
+ *
+ * This is the opposite pair to tpm_try_get_ops(). After this returns chip may
+ * be kfree'd.
+ */
+void tpm_put_ops(struct tpm_chip *chip)
+{
+ tpm_chip_stop(chip);
+ mutex_unlock(&chip->tpm_mutex);
+ up_read(&chip->ops_sem);
+ put_device(&chip->dev);
+}
+EXPORT_SYMBOL_GPL(tpm_put_ops);
+
+/**
+ * tpm_default_chip() - find a TPM chip and get a reference to it
+ */
+struct tpm_chip *tpm_default_chip(void)
+{
+ struct tpm_chip *chip, *res = NULL;
+ int chip_num = 0;
+ int chip_prev;
+
+ mutex_lock(&idr_lock);
+
+ do {
+ chip_prev = chip_num;
+ chip = idr_get_next(&dev_nums_idr, &chip_num);
+ if (chip) {
+ get_device(&chip->dev);
+ res = chip;
+ break;
+ }
+ } while (chip_prev != chip_num);
+
+ mutex_unlock(&idr_lock);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(tpm_default_chip);
+
+/**
+ * tpm_find_get_ops() - find and reserve a TPM chip
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ *
+ * Finds a TPM chip and reserves its class device and operations. The chip must
+ * be released with tpm_put_ops() after use.
+ * This function is for internal use only. It supports existing TPM callers
+ * by accepting NULL, but those callers should be converted to pass in a chip
+ * directly.
+ *
+ * Return:
+ * A reserved &struct tpm_chip instance.
+ * %NULL if a chip is not found.
+ * %NULL if the chip is not available.
+ */
+struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (chip) {
+ if (!tpm_try_get_ops(chip))
+ return chip;
+ return NULL;
+ }
+
+ chip = tpm_default_chip();
+ if (!chip)
+ return NULL;
+ rc = tpm_try_get_ops(chip);
+ /* release additional reference we got from tpm_default_chip() */
+ put_device(&chip->dev);
+ if (rc)
+ return NULL;
+ return chip;
+}
+
+/**
+ * tpm_dev_release() - free chip memory and the device number
+ * @dev: the character device for the TPM chip
+ *
+ * This is used as the release function for the character device.
+ */
+static void tpm_dev_release(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
+ mutex_lock(&idr_lock);
+ idr_remove(&dev_nums_idr, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
+ kfree(chip->work_space.context_buf);
+ kfree(chip->work_space.session_buf);
+ kfree(chip->allocated_banks);
+ kfree(chip);
+}
+
+/**
+ * tpm_class_shutdown() - prepare the TPM device for loss of power.
+ * @dev: device to which the chip is associated.
+ *
+ * Issues a TPM2_Shutdown command prior to loss of power, as required by the
+ * TPM 2.0 spec. Then, calls bus- and device- specific shutdown code.
+ *
+ * Return: always 0 (i.e. success)
+ */
+int tpm_class_shutdown(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
+ down_write(&chip->ops_sem);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ if (!tpm_chip_start(chip)) {
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_stop(chip);
+ }
+ }
+ chip->ops = NULL;
+ up_write(&chip->ops_sem);
+
+ return 0;
+}
+
+/**
+ * tpm_chip_alloc() - allocate a new struct tpm_chip instance
+ * @pdev: device to which the chip is associated
+ * At this point pdev mst be initialized, but does not have to
+ * be registered
+ * @ops: struct tpm_class_ops instance
+ *
+ * Allocates a new struct tpm_chip instance and assigns a free
+ * device number for it. Must be paired with put_device(&chip->dev).
+ */
+struct tpm_chip *tpm_chip_alloc(struct device *pdev,
+ const struct tpm_class_ops *ops)
+{
+ struct tpm_chip *chip;
+ int rc;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&chip->tpm_mutex);
+ init_rwsem(&chip->ops_sem);
+
+ chip->ops = ops;
+
+ mutex_lock(&idr_lock);
+ rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL);
+ mutex_unlock(&idr_lock);
+ if (rc < 0) {
+ dev_err(pdev, "No available tpm device numbers\n");
+ kfree(chip);
+ return ERR_PTR(rc);
+ }
+ chip->dev_num = rc;
+
+ device_initialize(&chip->dev);
+
+ chip->dev.class = &tpm_class;
+ chip->dev.release = tpm_dev_release;
+ chip->dev.parent = pdev;
+ chip->dev.groups = chip->groups;
+
+ if (chip->dev_num == 0)
+ chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR);
+ else
+ chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num);
+
+ rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num);
+ if (rc)
+ goto out;
+
+ if (!pdev)
+ chip->flags |= TPM_CHIP_FLAG_VIRTUAL;
+
+ cdev_init(&chip->cdev, &tpm_fops);
+ chip->cdev.owner = THIS_MODULE;
+
+ rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
+ if (rc) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ chip->locality = -1;
+ return chip;
+
+out:
+ put_device(&chip->dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(tpm_chip_alloc);
+
+static void tpm_put_device(void *dev)
+{
+ put_device(dev);
+}
+
+/**
+ * tpmm_chip_alloc() - allocate a new struct tpm_chip instance
+ * @pdev: parent device to which the chip is associated
+ * @ops: struct tpm_class_ops instance
+ *
+ * Same as tpm_chip_alloc except devm is used to do the put_device
+ */
+struct tpm_chip *tpmm_chip_alloc(struct device *pdev,
+ const struct tpm_class_ops *ops)
+{
+ struct tpm_chip *chip;
+ int rc;
+
+ chip = tpm_chip_alloc(pdev, ops);
+ if (IS_ERR(chip))
+ return chip;
+
+ rc = devm_add_action_or_reset(pdev,
+ tpm_put_device,
+ &chip->dev);
+ if (rc)
+ return ERR_PTR(rc);
+
+ dev_set_drvdata(pdev, chip);
+
+ return chip;
+}
+EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
+
+static int tpm_add_char_device(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = cdev_device_add(&chip->cdev, &chip->dev);
+ if (rc) {
+ dev_err(&chip->dev,
+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
+ dev_name(&chip->dev), MAJOR(chip->dev.devt),
+ MINOR(chip->dev.devt), rc);
+ return rc;
+ }
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) {
+ rc = tpm_devs_add(chip);
+ if (rc)
+ goto err_del_cdev;
+ }
+
+ /* Make the chip available. */
+ mutex_lock(&idr_lock);
+ idr_replace(&dev_nums_idr, chip, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
+ return 0;
+
+err_del_cdev:
+ cdev_device_del(&chip->cdev, &chip->dev);
+ return rc;
+}
+
+static void tpm_del_char_device(struct tpm_chip *chip)
+{
+ cdev_device_del(&chip->cdev, &chip->dev);
+
+ /* Make the chip unavailable. */
+ mutex_lock(&idr_lock);
+ idr_replace(&dev_nums_idr, NULL, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
+ /* Make the driver uncallable. */
+ down_write(&chip->ops_sem);
+
+ /*
+ * Check if chip->ops is still valid: In case that the controller
+ * drivers shutdown handler unregisters the controller in its
+ * shutdown handler we are called twice and chip->ops to NULL.
+ */
+ if (chip->ops) {
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ if (!tpm_chip_start(chip)) {
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_stop(chip);
+ }
+ }
+ chip->ops = NULL;
+ }
+ up_write(&chip->ops_sem);
+}
+
+static void tpm_del_legacy_sysfs(struct tpm_chip *chip)
+{
+ struct attribute **i;
+
+ if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL) ||
+ tpm_is_firmware_upgrade(chip))
+ return;
+
+ sysfs_remove_link(&chip->dev.parent->kobj, "ppi");
+
+ for (i = chip->groups[0]->attrs; *i != NULL; ++i)
+ sysfs_remove_link(&chip->dev.parent->kobj, (*i)->name);
+}
+
+/* For compatibility with legacy sysfs paths we provide symlinks from the
+ * parent dev directory to selected names within the tpm chip directory. Old
+ * kernel versions created these files directly under the parent.
+ */
+static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
+{
+ struct attribute **i;
+ int rc;
+
+ if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL) ||
+ tpm_is_firmware_upgrade(chip))
+ return 0;
+
+ rc = compat_only_sysfs_link_entry_to_kobj(
+ &chip->dev.parent->kobj, &chip->dev.kobj, "ppi", NULL);
+ if (rc && rc != -ENOENT)
+ return rc;
+
+ /* All the names from tpm-sysfs */
+ for (i = chip->groups[0]->attrs; *i != NULL; ++i) {
+ rc = compat_only_sysfs_link_entry_to_kobj(
+ &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name, NULL);
+ if (rc) {
+ tpm_del_legacy_sysfs(chip);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+
+ /* Give back zero bytes, as TPM chip has not yet fully resumed: */
+ if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
+ return 0;
+
+ return tpm_get_random(chip, data, max);
+}
+
+static bool tpm_is_hwrng_enabled(struct tpm_chip *chip)
+{
+ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ return false;
+ if (tpm_is_firmware_upgrade(chip))
+ return false;
+ if (chip->flags & TPM_CHIP_FLAG_HWRNG_DISABLED)
+ return false;
+ return true;
+}
+
+static int tpm_add_hwrng(struct tpm_chip *chip)
+{
+ if (!tpm_is_hwrng_enabled(chip))
+ return 0;
+
+ snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
+ "tpm-rng-%d", chip->dev_num);
+ chip->hwrng.name = chip->hwrng_name;
+ chip->hwrng.read = tpm_hwrng_read;
+ return hwrng_register(&chip->hwrng);
+}
+
+static int tpm_get_pcr_allocation(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (tpm_is_firmware_upgrade(chip))
+ return 0;
+
+ rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ?
+ tpm2_get_pcr_allocation(chip) :
+ tpm1_get_pcr_allocation(chip);
+
+ if (rc > 0)
+ return -ENODEV;
+
+ return rc;
+}
+
+/*
+ * tpm_chip_bootstrap() - Boostrap TPM chip after power on
+ * @chip: TPM chip to use.
+ *
+ * Initialize TPM chip after power on. This a one-shot function: subsequent
+ * calls will have no effect.
+ */
+int tpm_chip_bootstrap(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (chip->flags & TPM_CHIP_FLAG_BOOTSTRAPPED)
+ return 0;
+
+ rc = tpm_chip_start(chip);
+ if (rc)
+ return rc;
+
+ rc = tpm_auto_startup(chip);
+ if (rc)
+ goto stop;
+
+ rc = tpm_get_pcr_allocation(chip);
+stop:
+ tpm_chip_stop(chip);
+
+ /*
+ * Unconditionally set, as driver initialization should cease, when the
+ * boostrapping process fails.
+ */
+ chip->flags |= TPM_CHIP_FLAG_BOOTSTRAPPED;
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_chip_bootstrap);
+
+/*
+ * tpm_chip_register() - create a character device for the TPM chip
+ * @chip: TPM chip to use.
+ *
+ * Creates a character device for the TPM chip and adds sysfs attributes for
+ * the device. As the last step this function adds the chip to the list of TPM
+ * chips available for in-kernel use.
+ *
+ * This function should be only called after the chip initialization is
+ * complete.
+ */
+int tpm_chip_register(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = tpm_chip_bootstrap(chip);
+ if (rc)
+ return rc;
+
+ tpm_sysfs_add_device(chip);
+
+ tpm_bios_log_setup(chip);
+
+ tpm_add_ppi(chip);
+
+ rc = tpm_add_hwrng(chip);
+ if (rc)
+ goto out_ppi;
+
+ rc = tpm_add_char_device(chip);
+ if (rc)
+ goto out_hwrng;
+
+ rc = tpm_add_legacy_sysfs(chip);
+ if (rc) {
+ tpm_chip_unregister(chip);
+ return rc;
+ }
+
+ return 0;
+
+out_hwrng:
+ if (tpm_is_hwrng_enabled(chip))
+ hwrng_unregister(&chip->hwrng);
+out_ppi:
+ tpm_bios_log_teardown(chip);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_chip_register);
+
+/*
+ * tpm_chip_unregister() - release the TPM driver
+ * @chip: TPM chip to use.
+ *
+ * Takes the chip first away from the list of available TPM chips and then
+ * cleans up all the resources reserved by tpm_chip_register().
+ *
+ * Once this function returns the driver call backs in 'op's will not be
+ * running and will no longer start.
+ *
+ * NOTE: This function should be only called before deinitializing chip
+ * resources.
+ */
+void tpm_chip_unregister(struct tpm_chip *chip)
+{
+ tpm_del_legacy_sysfs(chip);
+ if (tpm_is_hwrng_enabled(chip))
+ hwrng_unregister(&chip->hwrng);
+ tpm_bios_log_teardown(chip);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
+ tpm_devs_remove(chip);
+ tpm_del_char_device(chip);
+}
+EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
new file mode 100644
index 0000000000..30b4c288c1
--- /dev/null
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Copyright (C) 2013 Obsidian Research Corp
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * Device file system interface to the TPM
+ */
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include "tpm.h"
+#include "tpm-dev.h"
+
+static struct workqueue_struct *tpm_dev_wq;
+
+static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
+ u8 *buf, size_t bufsiz)
+{
+ struct tpm_header *header = (void *)buf;
+ ssize_t ret, len;
+
+ ret = tpm2_prepare_space(chip, space, buf, bufsiz);
+ /* If the command is not implemented by the TPM, synthesize a
+ * response with a TPM2_RC_COMMAND_CODE return for user-space.
+ */
+ if (ret == -EOPNOTSUPP) {
+ header->length = cpu_to_be32(sizeof(*header));
+ header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
+ TSS2_RESMGR_TPM_RC_LAYER);
+ ret = sizeof(*header);
+ }
+ if (ret)
+ goto out_rc;
+
+ len = tpm_transmit(chip, buf, bufsiz);
+ if (len < 0)
+ ret = len;
+
+ if (!ret)
+ ret = tpm2_commit_space(chip, space, buf, &len);
+
+out_rc:
+ return ret ? ret : len;
+}
+
+static void tpm_dev_async_work(struct work_struct *work)
+{
+ struct file_priv *priv =
+ container_of(work, struct file_priv, async_work);
+ ssize_t ret;
+
+ mutex_lock(&priv->buffer_mutex);
+ priv->command_enqueued = false;
+ ret = tpm_try_get_ops(priv->chip);
+ if (ret) {
+ priv->response_length = ret;
+ goto out;
+ }
+
+ ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer));
+ tpm_put_ops(priv->chip);
+
+ /*
+ * If ret is > 0 then tpm_dev_transmit returned the size of the
+ * response. If ret is < 0 then tpm_dev_transmit failed and
+ * returned an error code.
+ */
+ if (ret != 0) {
+ priv->response_length = ret;
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ }
+out:
+ mutex_unlock(&priv->buffer_mutex);
+ wake_up_interruptible(&priv->async_wait);
+}
+
+static void user_reader_timeout(struct timer_list *t)
+{
+ struct file_priv *priv = from_timer(priv, t, user_read_timer);
+
+ pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
+ task_tgid_nr(current));
+
+ schedule_work(&priv->timeout_work);
+}
+
+static void tpm_timeout_work(struct work_struct *work)
+{
+ struct file_priv *priv = container_of(work, struct file_priv,
+ timeout_work);
+
+ mutex_lock(&priv->buffer_mutex);
+ priv->response_read = true;
+ priv->response_length = 0;
+ memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
+ mutex_unlock(&priv->buffer_mutex);
+ wake_up_interruptible(&priv->async_wait);
+}
+
+void tpm_common_open(struct file *file, struct tpm_chip *chip,
+ struct file_priv *priv, struct tpm_space *space)
+{
+ priv->chip = chip;
+ priv->space = space;
+ priv->response_read = true;
+
+ mutex_init(&priv->buffer_mutex);
+ timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
+ INIT_WORK(&priv->timeout_work, tpm_timeout_work);
+ INIT_WORK(&priv->async_work, tpm_dev_async_work);
+ init_waitqueue_head(&priv->async_wait);
+ file->private_data = priv;
+}
+
+ssize_t tpm_common_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off)
+{
+ struct file_priv *priv = file->private_data;
+ ssize_t ret_size = 0;
+ int rc;
+
+ mutex_lock(&priv->buffer_mutex);
+
+ if (priv->response_length) {
+ priv->response_read = true;
+
+ ret_size = min_t(ssize_t, size, priv->response_length);
+ if (ret_size <= 0) {
+ priv->response_length = 0;
+ goto out;
+ }
+
+ rc = copy_to_user(buf, priv->data_buffer + *off, ret_size);
+ if (rc) {
+ memset(priv->data_buffer, 0, TPM_BUFSIZE);
+ priv->response_length = 0;
+ ret_size = -EFAULT;
+ } else {
+ memset(priv->data_buffer + *off, 0, ret_size);
+ priv->response_length -= ret_size;
+ *off += ret_size;
+ }
+ }
+
+out:
+ if (!priv->response_length) {
+ *off = 0;
+ del_timer_sync(&priv->user_read_timer);
+ flush_work(&priv->timeout_work);
+ }
+ mutex_unlock(&priv->buffer_mutex);
+ return ret_size;
+}
+
+ssize_t tpm_common_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off)
+{
+ struct file_priv *priv = file->private_data;
+ int ret = 0;
+
+ if (size > TPM_BUFSIZE)
+ return -E2BIG;
+
+ mutex_lock(&priv->buffer_mutex);
+
+ /* Cannot perform a write until the read has cleared either via
+ * tpm_read or a user_read_timer timeout. This also prevents split
+ * buffered writes from blocking here.
+ */
+ if ((!priv->response_read && priv->response_length) ||
+ priv->command_enqueued) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (copy_from_user(priv->data_buffer, buf, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (size < 6 ||
+ size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->response_length = 0;
+ priv->response_read = false;
+ *off = 0;
+
+ /*
+ * If in nonblocking mode schedule an async job to send
+ * the command return the size.
+ * In case of error the err code will be returned in
+ * the subsequent read call.
+ */
+ if (file->f_flags & O_NONBLOCK) {
+ priv->command_enqueued = true;
+ queue_work(tpm_dev_wq, &priv->async_work);
+ mutex_unlock(&priv->buffer_mutex);
+ return size;
+ }
+
+ /* atomic tpm command send and result receive. We only hold the ops
+ * lock during this period so that the tpm can be unregistered even if
+ * the char dev is held open.
+ */
+ if (tpm_try_get_ops(priv->chip)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer));
+ tpm_put_ops(priv->chip);
+
+ if (ret > 0) {
+ priv->response_length = ret;
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ ret = size;
+ }
+out:
+ mutex_unlock(&priv->buffer_mutex);
+ return ret;
+}
+
+__poll_t tpm_common_poll(struct file *file, poll_table *wait)
+{
+ struct file_priv *priv = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &priv->async_wait, wait);
+ mutex_lock(&priv->buffer_mutex);
+
+ /*
+ * The response_length indicates if there is still response
+ * (or part of it) to be consumed. Partial reads decrease it
+ * by the number of bytes read, and write resets it the zero.
+ */
+ if (priv->response_length)
+ mask = EPOLLIN | EPOLLRDNORM;
+ else
+ mask = EPOLLOUT | EPOLLWRNORM;
+
+ mutex_unlock(&priv->buffer_mutex);
+ return mask;
+}
+
+/*
+ * Called on file close
+ */
+void tpm_common_release(struct file *file, struct file_priv *priv)
+{
+ flush_work(&priv->async_work);
+ del_timer_sync(&priv->user_read_timer);
+ flush_work(&priv->timeout_work);
+ file->private_data = NULL;
+ priv->response_length = 0;
+}
+
+int __init tpm_dev_common_init(void)
+{
+ tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
+
+ return !tpm_dev_wq ? -ENOMEM : 0;
+}
+
+void __exit tpm_dev_common_exit(void)
+{
+ if (tpm_dev_wq) {
+ destroy_workqueue(tpm_dev_wq);
+ tpm_dev_wq = NULL;
+ }
+}
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
new file mode 100644
index 0000000000..e2c0baa69f
--- /dev/null
+++ b/drivers/char/tpm/tpm-dev.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Copyright (C) 2013 Obsidian Research Corp
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * Device file system interface to the TPM
+ */
+#include <linux/slab.h>
+#include "tpm-dev.h"
+
+static int tpm_open(struct inode *inode, struct file *file)
+{
+ struct tpm_chip *chip;
+ struct file_priv *priv;
+
+ chip = container_of(inode->i_cdev, struct tpm_chip, cdev);
+
+ /* It's assured that the chip will be opened just once,
+ * by the check of is_open variable, which is protected
+ * by driver_lock. */
+ if (test_and_set_bit(0, &chip->is_open)) {
+ dev_dbg(&chip->dev, "Another process owns this TPM\n");
+ return -EBUSY;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL)
+ goto out;
+
+ tpm_common_open(file, chip, priv, NULL);
+
+ return 0;
+
+ out:
+ clear_bit(0, &chip->is_open);
+ return -ENOMEM;
+}
+
+/*
+ * Called on file close
+ */
+static int tpm_release(struct inode *inode, struct file *file)
+{
+ struct file_priv *priv = file->private_data;
+
+ tpm_common_release(file, priv);
+ clear_bit(0, &priv->chip->is_open);
+ kfree(priv);
+
+ return 0;
+}
+
+const struct file_operations tpm_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_common_read,
+ .write = tpm_common_write,
+ .poll = tpm_common_poll,
+ .release = tpm_release,
+};
diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
new file mode 100644
index 0000000000..f3742bcc73
--- /dev/null
+++ b/drivers/char/tpm/tpm-dev.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TPM_DEV_H
+#define _TPM_DEV_H
+
+#include <linux/poll.h>
+#include "tpm.h"
+
+struct file_priv {
+ struct tpm_chip *chip;
+ struct tpm_space *space;
+
+ struct mutex buffer_mutex;
+ struct timer_list user_read_timer; /* user needs to claim result */
+ struct work_struct timeout_work;
+ struct work_struct async_work;
+ wait_queue_head_t async_wait;
+ ssize_t response_length;
+ bool response_read;
+ bool command_enqueued;
+
+ u8 data_buffer[TPM_BUFSIZE];
+};
+
+void tpm_common_open(struct file *file, struct tpm_chip *chip,
+ struct file_priv *priv, struct tpm_space *space);
+ssize_t tpm_common_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off);
+ssize_t tpm_common_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off);
+__poll_t tpm_common_poll(struct file *file, poll_table *wait);
+
+void tpm_common_release(struct file *file, struct file_priv *priv);
+#endif
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
new file mode 100644
index 0000000000..66b16d26ee
--- /dev/null
+++ b/drivers/char/tpm/tpm-interface.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * Note, the TPM chip is not interrupt driven (only polling)
+ * and can have very long timeouts (minutes!). Hence the unusual
+ * calls to msleep.
+ */
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/suspend.h>
+#include <linux/freezer.h>
+#include <linux/tpm_eventlog.h>
+
+#include "tpm.h"
+
+/*
+ * Bug workaround - some TPM's don't flush the most
+ * recently changed pcr on suspend, so force the flush
+ * with an extend to the selected _unused_ non-volatile pcr.
+ */
+static u32 tpm_suspend_pcr;
+module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
+MODULE_PARM_DESC(suspend_pcr,
+ "PCR to use for dummy writes to facilitate flush on suspend.");
+
+/**
+ * tpm_calc_ordinal_duration() - calculate the maximum command duration
+ * @chip: TPM chip to use.
+ * @ordinal: TPM command ordinal.
+ *
+ * The function returns the maximum amount of time the chip could take
+ * to return the result for a particular ordinal in jiffies.
+ *
+ * Return: A maximal duration time for an ordinal in jiffies.
+ */
+unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+{
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return tpm2_calc_ordinal_duration(chip, ordinal);
+ else
+ return tpm1_calc_ordinal_duration(chip, ordinal);
+}
+EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
+
+static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
+{
+ struct tpm_header *header = buf;
+ int rc;
+ ssize_t len = 0;
+ u32 count, ordinal;
+ unsigned long stop;
+
+ if (bufsiz < TPM_HEADER_SIZE)
+ return -EINVAL;
+
+ if (bufsiz > TPM_BUFSIZE)
+ bufsiz = TPM_BUFSIZE;
+
+ count = be32_to_cpu(header->length);
+ ordinal = be32_to_cpu(header->ordinal);
+ if (count == 0)
+ return -ENODATA;
+ if (count > bufsiz) {
+ dev_err(&chip->dev,
+ "invalid count value %x %zx\n", count, bufsiz);
+ return -E2BIG;
+ }
+
+ rc = chip->ops->send(chip, buf, count);
+ if (rc < 0) {
+ if (rc != -EPIPE)
+ dev_err(&chip->dev,
+ "%s: send(): error %d\n", __func__, rc);
+ return rc;
+ }
+
+ /* A sanity check. send() should just return zero on success e.g.
+ * not the command length.
+ */
+ if (rc > 0) {
+ dev_warn(&chip->dev,
+ "%s: send(): invalid value %d\n", __func__, rc);
+ rc = 0;
+ }
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ)
+ goto out_recv;
+
+ stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
+ do {
+ u8 status = chip->ops->status(chip);
+ if ((status & chip->ops->req_complete_mask) ==
+ chip->ops->req_complete_val)
+ goto out_recv;
+
+ if (chip->ops->req_canceled(chip, status)) {
+ dev_err(&chip->dev, "Operation Canceled\n");
+ return -ECANCELED;
+ }
+
+ tpm_msleep(TPM_TIMEOUT_POLL);
+ rmb();
+ } while (time_before(jiffies, stop));
+
+ chip->ops->cancel(chip);
+ dev_err(&chip->dev, "Operation Timed out\n");
+ return -ETIME;
+
+out_recv:
+ len = chip->ops->recv(chip, buf, bufsiz);
+ if (len < 0) {
+ rc = len;
+ dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %d\n", rc);
+ } else if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length))
+ rc = -EFAULT;
+
+ return rc ? rc : len;
+}
+
+/**
+ * tpm_transmit - Internal kernel interface to transmit TPM commands.
+ * @chip: a TPM chip to use
+ * @buf: a TPM command buffer
+ * @bufsiz: length of the TPM command buffer
+ *
+ * A wrapper around tpm_try_transmit() that handles TPM2_RC_RETRY returns from
+ * the TPM and retransmits the command after a delay up to a maximum wait of
+ * TPM2_DURATION_LONG.
+ *
+ * Note that TPM 1.x never returns TPM2_RC_RETRY so the retry logic is TPM 2.0
+ * only.
+ *
+ * Return:
+ * * The response length - OK
+ * * -errno - A system error
+ */
+ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz)
+{
+ struct tpm_header *header = (struct tpm_header *)buf;
+ /* space for header and handles */
+ u8 save[TPM_HEADER_SIZE + 3*sizeof(u32)];
+ unsigned int delay_msec = TPM2_DURATION_SHORT;
+ u32 rc = 0;
+ ssize_t ret;
+ const size_t save_size = min(sizeof(save), bufsiz);
+ /* the command code is where the return code will be */
+ u32 cc = be32_to_cpu(header->return_code);
+
+ /*
+ * Subtlety here: if we have a space, the handles will be
+ * transformed, so when we restore the header we also have to
+ * restore the handles.
+ */
+ memcpy(save, buf, save_size);
+
+ for (;;) {
+ ret = tpm_try_transmit(chip, buf, bufsiz);
+ if (ret < 0)
+ break;
+ rc = be32_to_cpu(header->return_code);
+ if (rc != TPM2_RC_RETRY && rc != TPM2_RC_TESTING)
+ break;
+ /*
+ * return immediately if self test returns test
+ * still running to shorten boot time.
+ */
+ if (rc == TPM2_RC_TESTING && cc == TPM2_CC_SELF_TEST)
+ break;
+
+ if (delay_msec > TPM2_DURATION_LONG) {
+ if (rc == TPM2_RC_RETRY)
+ dev_err(&chip->dev, "in retry loop\n");
+ else
+ dev_err(&chip->dev,
+ "self test is still running\n");
+ break;
+ }
+ tpm_msleep(delay_msec);
+ delay_msec *= 2;
+ memcpy(buf, save, save_size);
+ }
+ return ret;
+}
+
+/**
+ * tpm_transmit_cmd - send a tpm command to the device
+ * @chip: a TPM chip to use
+ * @buf: a TPM command buffer
+ * @min_rsp_body_length: minimum expected length of response body
+ * @desc: command description used in the error message
+ *
+ * Return:
+ * * 0 - OK
+ * * -errno - A system error
+ * * TPM_RC - A TPM error
+ */
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
+ size_t min_rsp_body_length, const char *desc)
+{
+ const struct tpm_header *header = (struct tpm_header *)buf->data;
+ int err;
+ ssize_t len;
+
+ len = tpm_transmit(chip, buf->data, PAGE_SIZE);
+ if (len < 0)
+ return len;
+
+ err = be32_to_cpu(header->return_code);
+ if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED
+ && err != TPM2_RC_TESTING && desc)
+ dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
+ desc);
+ if (err)
+ return err;
+
+ if (len < min_rsp_body_length + TPM_HEADER_SIZE)
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_transmit_cmd);
+
+int tpm_get_timeouts(struct tpm_chip *chip)
+{
+ if (chip->flags & TPM_CHIP_FLAG_HAVE_TIMEOUTS)
+ return 0;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return tpm2_get_timeouts(chip);
+ else
+ return tpm1_get_timeouts(chip);
+}
+EXPORT_SYMBOL_GPL(tpm_get_timeouts);
+
+/**
+ * tpm_is_tpm2 - do we a have a TPM2 chip?
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ *
+ * Return:
+ * 1 if we have a TPM2 chip.
+ * 0 if we don't have a TPM2 chip.
+ * A negative number for system errors (errno).
+ */
+int tpm_is_tpm2(struct tpm_chip *chip)
+{
+ int rc;
+
+ chip = tpm_find_get_ops(chip);
+ if (!chip)
+ return -ENODEV;
+
+ rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
+
+ tpm_put_ops(chip);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_is_tpm2);
+
+/**
+ * tpm_pcr_read - read a PCR value from SHA1 bank
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx: the PCR to be retrieved
+ * @digest: the PCR bank and buffer current PCR value is written to
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digest)
+{
+ int rc;
+
+ chip = tpm_find_get_ops(chip);
+ if (!chip)
+ return -ENODEV;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ rc = tpm2_pcr_read(chip, pcr_idx, digest, NULL);
+ else
+ rc = tpm1_pcr_read(chip, pcr_idx, digest->digest);
+
+ tpm_put_ops(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_pcr_read);
+
+/**
+ * tpm_pcr_extend - extend a PCR value in SHA1 bank.
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx: the PCR to be retrieved
+ * @digests: array of tpm_digest structures used to extend PCRs
+ *
+ * Note: callers must pass a digest for every allocated PCR bank, in the same
+ * order of the banks in chip->allocated_banks.
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digests)
+{
+ int rc;
+ int i;
+
+ chip = tpm_find_get_ops(chip);
+ if (!chip)
+ return -ENODEV;
+
+ for (i = 0; i < chip->nr_allocated_banks; i++) {
+ if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ rc = tpm2_pcr_extend(chip, pcr_idx, digests);
+ goto out;
+ }
+
+ rc = tpm1_pcr_extend(chip, pcr_idx, digests[0].digest,
+ "attempting extend a PCR value");
+
+out:
+ tpm_put_ops(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_pcr_extend);
+
+/**
+ * tpm_send - send a TPM command
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @cmd: a TPM command buffer
+ * @buflen: the length of the TPM command buffer
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ chip = tpm_find_get_ops(chip);
+ if (!chip)
+ return -ENODEV;
+
+ buf.data = cmd;
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command");
+
+ tpm_put_ops(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_send);
+
+int tpm_auto_startup(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (!(chip->ops->flags & TPM_OPS_AUTO_STARTUP))
+ return 0;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ rc = tpm2_auto_startup(chip);
+ else
+ rc = tpm1_auto_startup(chip);
+
+ return rc;
+}
+
+/*
+ * We are about to suspend. Save the TPM state
+ * so that it can be restored.
+ */
+int tpm_pm_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ int rc = 0;
+
+ if (!chip)
+ return -ENODEV;
+
+ if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
+ goto suspended;
+
+ if ((chip->flags & TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED) &&
+ !pm_suspend_via_firmware())
+ goto suspended;
+
+ rc = tpm_try_get_ops(chip);
+ if (!rc) {
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ tpm2_shutdown(chip, TPM2_SU_STATE);
+ else
+ rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
+ tpm_put_ops(chip);
+ }
+
+suspended:
+ chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+
+ if (rc)
+ dev_err(dev, "Ignoring error %d while suspending\n", rc);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_pm_suspend);
+
+/*
+ * Resume from a power safe. The BIOS already restored
+ * the TPM state.
+ */
+int tpm_pm_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip == NULL)
+ return -ENODEV;
+
+ chip->flags &= ~TPM_CHIP_FLAG_SUSPENDED;
+
+ /*
+ * Guarantee that SUSPENDED is written last, so that hwrng does not
+ * activate before the chip has been fully resumed.
+ */
+ wmb();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_pm_resume);
+
+/**
+ * tpm_get_random() - get random bytes from the TPM's RNG
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @out: destination buffer for the random bytes
+ * @max: the max number of bytes to write to @out
+ *
+ * Return: number of random bytes read or a negative error value.
+ */
+int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
+{
+ int rc;
+
+ if (!out || max > TPM_MAX_RNG_DATA)
+ return -EINVAL;
+
+ chip = tpm_find_get_ops(chip);
+ if (!chip)
+ return -ENODEV;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ rc = tpm2_get_random(chip, out, max);
+ else
+ rc = tpm1_get_random(chip, out, max);
+
+ tpm_put_ops(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_get_random);
+
+static int __init tpm_init(void)
+{
+ int rc;
+
+ rc = class_register(&tpm_class);
+ if (rc) {
+ pr_err("couldn't create tpm class\n");
+ return rc;
+ }
+
+ rc = class_register(&tpmrm_class);
+ if (rc) {
+ pr_err("couldn't create tpmrm class\n");
+ goto out_destroy_tpm_class;
+ }
+
+ rc = alloc_chrdev_region(&tpm_devt, 0, 2*TPM_NUM_DEVICES, "tpm");
+ if (rc < 0) {
+ pr_err("tpm: failed to allocate char dev region\n");
+ goto out_destroy_tpmrm_class;
+ }
+
+ rc = tpm_dev_common_init();
+ if (rc) {
+ pr_err("tpm: failed to allocate char dev region\n");
+ goto out_unreg_chrdev;
+ }
+
+ return 0;
+
+out_unreg_chrdev:
+ unregister_chrdev_region(tpm_devt, 2 * TPM_NUM_DEVICES);
+out_destroy_tpmrm_class:
+ class_unregister(&tpmrm_class);
+out_destroy_tpm_class:
+ class_unregister(&tpm_class);
+
+ return rc;
+}
+
+static void __exit tpm_exit(void)
+{
+ idr_destroy(&dev_nums_idr);
+ class_unregister(&tpm_class);
+ class_unregister(&tpmrm_class);
+ unregister_chrdev_region(tpm_devt, 2*TPM_NUM_DEVICES);
+ tpm_dev_common_exit();
+}
+
+subsys_initcall(tpm_init);
+module_exit(tpm_exit);
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
new file mode 100644
index 0000000000..54c71473aa
--- /dev/null
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Copyright (C) 2013 Obsidian Research Corp
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * sysfs filesystem inspection interface to the TPM
+ */
+#include <linux/device.h>
+#include "tpm.h"
+
+struct tpm_readpubek_out {
+ u8 algorithm[4];
+ u8 encscheme[2];
+ u8 sigscheme[2];
+ __be32 paramsize;
+ u8 parameters[12];
+ __be32 keysize;
+ u8 modulus[256];
+ u8 checksum[20];
+} __packed;
+
+#define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256)
+#define TPM_ORD_READPUBEK 124
+
+static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_buf tpm_buf;
+ struct tpm_readpubek_out *out;
+ int i;
+ char *str = buf;
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ char anti_replay[20];
+
+ memset(&anti_replay, 0, sizeof(anti_replay));
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK))
+ goto out_ops;
+
+ tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
+
+ if (tpm_transmit_cmd(chip, &tpm_buf, READ_PUBEK_RESULT_MIN_BODY_SIZE,
+ "attempting to read the PUBEK"))
+ goto out_buf;
+
+ out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
+ str +=
+ sprintf(str,
+ "Algorithm: %4ph\n"
+ "Encscheme: %2ph\n"
+ "Sigscheme: %2ph\n"
+ "Parameters: %12ph\n"
+ "Modulus length: %d\n"
+ "Modulus:\n",
+ out->algorithm,
+ out->encscheme,
+ out->sigscheme,
+ out->parameters,
+ be32_to_cpu(out->keysize));
+
+ for (i = 0; i < 256; i += 16)
+ str += sprintf(str, "%16ph\n", &out->modulus[i]);
+
+out_buf:
+ tpm_buf_destroy(&tpm_buf);
+out_ops:
+ tpm_put_ops(chip);
+ return str - buf;
+}
+static DEVICE_ATTR_RO(pubek);
+
+static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ cap_t cap;
+ u8 digest[TPM_DIGEST_SIZE];
+ u32 i, j, num_pcrs;
+ char *str = buf;
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm1_getcap(chip, TPM_CAP_PROP_PCR, &cap,
+ "attempting to determine the number of PCRS",
+ sizeof(cap.num_pcrs))) {
+ tpm_put_ops(chip);
+ return 0;
+ }
+
+ num_pcrs = be32_to_cpu(cap.num_pcrs);
+ for (i = 0; i < num_pcrs; i++) {
+ if (tpm1_pcr_read(chip, i, digest)) {
+ str = buf;
+ break;
+ }
+ str += sprintf(str, "PCR-%02d: ", i);
+ for (j = 0; j < TPM_DIGEST_SIZE; j++)
+ str += sprintf(str, "%02X ", digest[j]);
+ str += sprintf(str, "\n");
+ }
+ tpm_put_ops(chip);
+ return str - buf;
+}
+static DEVICE_ATTR_RO(pcrs);
+
+static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t rc = 0;
+ cap_t cap;
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm1_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent enabled state",
+ sizeof(cap.perm_flags)))
+ goto out_ops;
+
+ rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
+out_ops:
+ tpm_put_ops(chip);
+ return rc;
+}
+static DEVICE_ATTR_RO(enabled);
+
+static ssize_t active_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t rc = 0;
+ cap_t cap;
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm1_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent active state",
+ sizeof(cap.perm_flags)))
+ goto out_ops;
+
+ rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
+out_ops:
+ tpm_put_ops(chip);
+ return rc;
+}
+static DEVICE_ATTR_RO(active);
+
+static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t rc = 0;
+ cap_t cap;
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm1_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
+ "attempting to determine the owner state",
+ sizeof(cap.owned)))
+ goto out_ops;
+
+ rc = sprintf(buf, "%d\n", cap.owned);
+out_ops:
+ tpm_put_ops(chip);
+ return rc;
+}
+static DEVICE_ATTR_RO(owned);
+
+static ssize_t temp_deactivated_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t rc = 0;
+ cap_t cap;
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
+ "attempting to determine the temporary state",
+ sizeof(cap.stclear_flags)))
+ goto out_ops;
+
+ rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
+out_ops:
+ tpm_put_ops(chip);
+ return rc;
+}
+static DEVICE_ATTR_RO(temp_deactivated);
+
+static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ struct tpm1_version *version;
+ ssize_t rc = 0;
+ char *str = buf;
+ cap_t cap;
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ if (tpm1_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
+ "attempting to determine the manufacturer",
+ sizeof(cap.manufacturer_id)))
+ goto out_ops;
+
+ str += sprintf(str, "Manufacturer: 0x%x\n",
+ be32_to_cpu(cap.manufacturer_id));
+
+ /* TPM 1.2 */
+ if (!tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
+ "attempting to determine the 1.2 version",
+ sizeof(cap.version2))) {
+ version = &cap.version2.version;
+ goto out_print;
+ }
+
+ /* TPM 1.1 */
+ if (tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version",
+ sizeof(cap.version1))) {
+ goto out_ops;
+ }
+
+ version = &cap.version1;
+
+out_print:
+ str += sprintf(str,
+ "TCG version: %d.%d\nFirmware version: %d.%d\n",
+ version->major, version->minor,
+ version->rev_major, version->rev_minor);
+
+ rc = str - buf;
+
+out_ops:
+ tpm_put_ops(chip);
+ return rc;
+}
+static DEVICE_ATTR_RO(caps);
+
+static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ if (tpm_try_get_ops(chip))
+ return 0;
+
+ chip->ops->cancel(chip);
+ tpm_put_ops(chip);
+ return count;
+}
+static DEVICE_ATTR_WO(cancel);
+
+static ssize_t durations_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ if (chip->duration[TPM_LONG] == 0)
+ return 0;
+
+ return sprintf(buf, "%d %d %d [%s]\n",
+ jiffies_to_usecs(chip->duration[TPM_SHORT]),
+ jiffies_to_usecs(chip->duration[TPM_MEDIUM]),
+ jiffies_to_usecs(chip->duration[TPM_LONG]),
+ chip->duration_adjusted
+ ? "adjusted" : "original");
+}
+static DEVICE_ATTR_RO(durations);
+
+static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return sprintf(buf, "%d %d %d %d [%s]\n",
+ jiffies_to_usecs(chip->timeout_a),
+ jiffies_to_usecs(chip->timeout_b),
+ jiffies_to_usecs(chip->timeout_c),
+ jiffies_to_usecs(chip->timeout_d),
+ chip->timeout_adjusted
+ ? "adjusted" : "original");
+}
+static DEVICE_ATTR_RO(timeouts);
+
+static ssize_t tpm_version_major_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2
+ ? "2" : "1");
+}
+static DEVICE_ATTR_RO(tpm_version_major);
+
+static struct attribute *tpm1_dev_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr,
+ &dev_attr_tpm_version_major.attr,
+ NULL,
+};
+
+static struct attribute *tpm2_dev_attrs[] = {
+ &dev_attr_tpm_version_major.attr,
+ NULL
+};
+
+static const struct attribute_group tpm1_dev_group = {
+ .attrs = tpm1_dev_attrs,
+};
+
+static const struct attribute_group tpm2_dev_group = {
+ .attrs = tpm2_dev_attrs,
+};
+
+struct tpm_pcr_attr {
+ int alg_id;
+ int pcr;
+ struct device_attribute attr;
+};
+
+#define to_tpm_pcr_attr(a) container_of(a, struct tpm_pcr_attr, attr)
+
+static ssize_t pcr_value_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_pcr_attr *ha = to_tpm_pcr_attr(attr);
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ struct tpm_digest digest;
+ int i;
+ int digest_size = 0;
+ int rc;
+ char *str = buf;
+
+ for (i = 0; i < chip->nr_allocated_banks; i++)
+ if (ha->alg_id == chip->allocated_banks[i].alg_id)
+ digest_size = chip->allocated_banks[i].digest_size;
+ /* should never happen */
+ if (!digest_size)
+ return -EINVAL;
+
+ digest.alg_id = ha->alg_id;
+ rc = tpm_pcr_read(chip, ha->pcr, &digest);
+ if (rc)
+ return rc;
+ for (i = 0; i < digest_size; i++)
+ str += sprintf(str, "%02X", digest.digest[i]);
+ str += sprintf(str, "\n");
+
+ return str - buf;
+}
+
+/*
+ * The following set of defines represents all the magic to build
+ * the per hash attribute groups for displaying each bank of PCRs.
+ * The only slight problem with this approach is that every PCR is
+ * hard coded to be present, so you don't know if an PCR is missing
+ * until a cat of the file returns -EINVAL
+ *
+ * Also note you must ignore checkpatch warnings in this macro
+ * code. This is deep macro magic that checkpatch.pl doesn't
+ * understand.
+ */
+
+/* Note, this must match TPM2_PLATFORM_PCR which is fixed at 24. */
+#define _TPM_HELPER(_alg, _hash, F) \
+ F(_alg, _hash, 0) \
+ F(_alg, _hash, 1) \
+ F(_alg, _hash, 2) \
+ F(_alg, _hash, 3) \
+ F(_alg, _hash, 4) \
+ F(_alg, _hash, 5) \
+ F(_alg, _hash, 6) \
+ F(_alg, _hash, 7) \
+ F(_alg, _hash, 8) \
+ F(_alg, _hash, 9) \
+ F(_alg, _hash, 10) \
+ F(_alg, _hash, 11) \
+ F(_alg, _hash, 12) \
+ F(_alg, _hash, 13) \
+ F(_alg, _hash, 14) \
+ F(_alg, _hash, 15) \
+ F(_alg, _hash, 16) \
+ F(_alg, _hash, 17) \
+ F(_alg, _hash, 18) \
+ F(_alg, _hash, 19) \
+ F(_alg, _hash, 20) \
+ F(_alg, _hash, 21) \
+ F(_alg, _hash, 22) \
+ F(_alg, _hash, 23)
+
+/* ignore checkpatch warning about trailing ; in macro. */
+#define PCR_ATTR(_alg, _hash, _pcr) \
+ static struct tpm_pcr_attr dev_attr_pcr_##_hash##_##_pcr = { \
+ .alg_id = _alg, \
+ .pcr = _pcr, \
+ .attr = { \
+ .attr = { \
+ .name = __stringify(_pcr), \
+ .mode = 0444 \
+ }, \
+ .show = pcr_value_show \
+ } \
+ };
+
+#define PCR_ATTRS(_alg, _hash) \
+ _TPM_HELPER(_alg, _hash, PCR_ATTR)
+
+/* ignore checkpatch warning about trailing , in macro. */
+#define PCR_ATTR_VAL(_alg, _hash, _pcr) \
+ &dev_attr_pcr_##_hash##_##_pcr.attr.attr,
+
+#define PCR_ATTR_GROUP_ARRAY(_alg, _hash) \
+ static struct attribute *pcr_group_attrs_##_hash[] = { \
+ _TPM_HELPER(_alg, _hash, PCR_ATTR_VAL) \
+ NULL \
+ }
+
+#define PCR_ATTR_GROUP(_alg, _hash) \
+ static struct attribute_group pcr_group_##_hash = { \
+ .name = "pcr-" __stringify(_hash), \
+ .attrs = pcr_group_attrs_##_hash \
+ }
+
+#define PCR_ATTR_BUILD(_alg, _hash) \
+ PCR_ATTRS(_alg, _hash) \
+ PCR_ATTR_GROUP_ARRAY(_alg, _hash); \
+ PCR_ATTR_GROUP(_alg, _hash)
+/*
+ * End of macro structure to build an attribute group containing 24
+ * PCR value files for each supported hash algorithm
+ */
+
+/*
+ * The next set of macros implements the cleverness for each hash to
+ * build a static attribute group called pcr_group_<hash> which can be
+ * added to chip->groups[].
+ *
+ * The first argument is the TPM algorithm id and the second is the
+ * hash used as both the suffix and the group name. Note: the group
+ * name is a directory in the top level tpm class with the name
+ * pcr-<hash>, so it must not clash with any other names already
+ * in the sysfs directory.
+ */
+PCR_ATTR_BUILD(TPM_ALG_SHA1, sha1);
+PCR_ATTR_BUILD(TPM_ALG_SHA256, sha256);
+PCR_ATTR_BUILD(TPM_ALG_SHA384, sha384);
+PCR_ATTR_BUILD(TPM_ALG_SHA512, sha512);
+PCR_ATTR_BUILD(TPM_ALG_SM3_256, sm3);
+
+
+void tpm_sysfs_add_device(struct tpm_chip *chip)
+{
+ int i;
+
+ WARN_ON(chip->groups_cnt != 0);
+
+ if (tpm_is_firmware_upgrade(chip))
+ return;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ chip->groups[chip->groups_cnt++] = &tpm2_dev_group;
+ else
+ chip->groups[chip->groups_cnt++] = &tpm1_dev_group;
+
+ /* add one group for each bank hash */
+ for (i = 0; i < chip->nr_allocated_banks; i++) {
+ switch (chip->allocated_banks[i].alg_id) {
+ case TPM_ALG_SHA1:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha1;
+ break;
+ case TPM_ALG_SHA256:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha256;
+ break;
+ case TPM_ALG_SHA384:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha384;
+ break;
+ case TPM_ALG_SHA512:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sha512;
+ break;
+ case TPM_ALG_SM3_256:
+ chip->groups[chip->groups_cnt++] = &pcr_group_sm3;
+ break;
+ default:
+ /*
+ * If triggers, send a patch to add both a
+ * PCR_ATTR_BUILD() macro above for the
+ * missing algorithm as well as an additional
+ * case in this switch statement.
+ */
+ dev_err(&chip->dev,
+ "TPM with unsupported bank algorithm 0x%04x",
+ chip->allocated_banks[i].alg_id);
+ break;
+ }
+ }
+
+ /*
+ * This will only trigger if someone has added an additional
+ * hash to the tpm_algorithms enum without incrementing
+ * TPM_MAX_HASHES.
+ */
+ WARN_ON(chip->groups_cnt > TPM_MAX_HASHES + 1);
+}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
new file mode 100644
index 0000000000..61445f1dc4
--- /dev/null
+++ b/drivers/char/tpm/tpm.h
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ */
+
+#ifndef __TPM_H__
+#define __TPM_H__
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/tpm.h>
+#include <linux/tpm_eventlog.h>
+
+#ifdef CONFIG_X86
+#include <asm/intel-family.h>
+#endif
+
+#define TPM_MINOR 224 /* officially assigned */
+#define TPM_BUFSIZE 4096
+#define TPM_NUM_DEVICES 65536
+#define TPM_RETRY 50
+
+enum tpm_timeout {
+ TPM_TIMEOUT = 5, /* msecs */
+ TPM_TIMEOUT_RETRY = 100, /* msecs */
+ TPM_TIMEOUT_RANGE_US = 300, /* usecs */
+ TPM_TIMEOUT_POLL = 1, /* msecs */
+ TPM_TIMEOUT_USECS_MIN = 100, /* usecs */
+ TPM_TIMEOUT_USECS_MAX = 500 /* usecs */
+};
+
+/* TPM addresses */
+enum tpm_addr {
+ TPM_SUPERIO_ADDR = 0x2E,
+ TPM_ADDR = 0x4E,
+};
+
+#define TPM_WARN_RETRY 0x800
+#define TPM_WARN_DOING_SELFTEST 0x802
+#define TPM_ERR_DEACTIVATED 0x6
+#define TPM_ERR_DISABLED 0x7
+#define TPM_ERR_FAILEDSELFTEST 0x1C
+#define TPM_ERR_INVALID_POSTINIT 38
+
+#define TPM_TAG_RQU_COMMAND 193
+
+/* TPM2 specific constants. */
+#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */
+
+struct stclear_flags_t {
+ __be16 tag;
+ u8 deactivated;
+ u8 disableForceClear;
+ u8 physicalPresence;
+ u8 physicalPresenceLock;
+ u8 bGlobalLock;
+} __packed;
+
+struct tpm1_version {
+ u8 major;
+ u8 minor;
+ u8 rev_major;
+ u8 rev_minor;
+} __packed;
+
+struct tpm1_version2 {
+ __be16 tag;
+ struct tpm1_version version;
+} __packed;
+
+struct timeout_t {
+ __be32 a;
+ __be32 b;
+ __be32 c;
+ __be32 d;
+} __packed;
+
+struct duration_t {
+ __be32 tpm_short;
+ __be32 tpm_medium;
+ __be32 tpm_long;
+} __packed;
+
+struct permanent_flags_t {
+ __be16 tag;
+ u8 disable;
+ u8 ownership;
+ u8 deactivated;
+ u8 readPubek;
+ u8 disableOwnerClear;
+ u8 allowMaintenance;
+ u8 physicalPresenceLifetimeLock;
+ u8 physicalPresenceHWEnable;
+ u8 physicalPresenceCMDEnable;
+ u8 CEKPUsed;
+ u8 TPMpost;
+ u8 TPMpostLock;
+ u8 FIPS;
+ u8 operator;
+ u8 enableRevokeEK;
+ u8 nvLocked;
+ u8 readSRKPub;
+ u8 tpmEstablished;
+ u8 maintenanceDone;
+ u8 disableFullDALogicInfo;
+} __packed;
+
+typedef union {
+ struct permanent_flags_t perm_flags;
+ struct stclear_flags_t stclear_flags;
+ __u8 owned;
+ __be32 num_pcrs;
+ struct tpm1_version version1;
+ struct tpm1_version2 version2;
+ __be32 manufacturer_id;
+ struct timeout_t timeout;
+ struct duration_t duration;
+} cap_t;
+
+enum tpm_capabilities {
+ TPM_CAP_FLAG = 4,
+ TPM_CAP_PROP = 5,
+ TPM_CAP_VERSION_1_1 = 0x06,
+ TPM_CAP_VERSION_1_2 = 0x1A,
+};
+
+enum tpm_sub_capabilities {
+ TPM_CAP_PROP_PCR = 0x101,
+ TPM_CAP_PROP_MANUFACTURER = 0x103,
+ TPM_CAP_FLAG_PERM = 0x108,
+ TPM_CAP_FLAG_VOL = 0x109,
+ TPM_CAP_PROP_OWNER = 0x111,
+ TPM_CAP_PROP_TIS_TIMEOUT = 0x115,
+ TPM_CAP_PROP_TIS_DURATION = 0x120,
+};
+
+enum tpm2_pt_props {
+ TPM2_PT_NONE = 0x00000000,
+ TPM2_PT_GROUP = 0x00000100,
+ TPM2_PT_FIXED = TPM2_PT_GROUP * 1,
+ TPM2_PT_FAMILY_INDICATOR = TPM2_PT_FIXED + 0,
+ TPM2_PT_LEVEL = TPM2_PT_FIXED + 1,
+ TPM2_PT_REVISION = TPM2_PT_FIXED + 2,
+ TPM2_PT_DAY_OF_YEAR = TPM2_PT_FIXED + 3,
+ TPM2_PT_YEAR = TPM2_PT_FIXED + 4,
+ TPM2_PT_MANUFACTURER = TPM2_PT_FIXED + 5,
+ TPM2_PT_VENDOR_STRING_1 = TPM2_PT_FIXED + 6,
+ TPM2_PT_VENDOR_STRING_2 = TPM2_PT_FIXED + 7,
+ TPM2_PT_VENDOR_STRING_3 = TPM2_PT_FIXED + 8,
+ TPM2_PT_VENDOR_STRING_4 = TPM2_PT_FIXED + 9,
+ TPM2_PT_VENDOR_TPM_TYPE = TPM2_PT_FIXED + 10,
+ TPM2_PT_FIRMWARE_VERSION_1 = TPM2_PT_FIXED + 11,
+ TPM2_PT_FIRMWARE_VERSION_2 = TPM2_PT_FIXED + 12,
+ TPM2_PT_INPUT_BUFFER = TPM2_PT_FIXED + 13,
+ TPM2_PT_HR_TRANSIENT_MIN = TPM2_PT_FIXED + 14,
+ TPM2_PT_HR_PERSISTENT_MIN = TPM2_PT_FIXED + 15,
+ TPM2_PT_HR_LOADED_MIN = TPM2_PT_FIXED + 16,
+ TPM2_PT_ACTIVE_SESSIONS_MAX = TPM2_PT_FIXED + 17,
+ TPM2_PT_PCR_COUNT = TPM2_PT_FIXED + 18,
+ TPM2_PT_PCR_SELECT_MIN = TPM2_PT_FIXED + 19,
+ TPM2_PT_CONTEXT_GAP_MAX = TPM2_PT_FIXED + 20,
+ TPM2_PT_NV_COUNTERS_MAX = TPM2_PT_FIXED + 22,
+ TPM2_PT_NV_INDEX_MAX = TPM2_PT_FIXED + 23,
+ TPM2_PT_MEMORY = TPM2_PT_FIXED + 24,
+ TPM2_PT_CLOCK_UPDATE = TPM2_PT_FIXED + 25,
+ TPM2_PT_CONTEXT_HASH = TPM2_PT_FIXED + 26,
+ TPM2_PT_CONTEXT_SYM = TPM2_PT_FIXED + 27,
+ TPM2_PT_CONTEXT_SYM_SIZE = TPM2_PT_FIXED + 28,
+ TPM2_PT_ORDERLY_COUNT = TPM2_PT_FIXED + 29,
+ TPM2_PT_MAX_COMMAND_SIZE = TPM2_PT_FIXED + 30,
+ TPM2_PT_MAX_RESPONSE_SIZE = TPM2_PT_FIXED + 31,
+ TPM2_PT_MAX_DIGEST = TPM2_PT_FIXED + 32,
+ TPM2_PT_MAX_OBJECT_CONTEXT = TPM2_PT_FIXED + 33,
+ TPM2_PT_MAX_SESSION_CONTEXT = TPM2_PT_FIXED + 34,
+ TPM2_PT_PS_FAMILY_INDICATOR = TPM2_PT_FIXED + 35,
+ TPM2_PT_PS_LEVEL = TPM2_PT_FIXED + 36,
+ TPM2_PT_PS_REVISION = TPM2_PT_FIXED + 37,
+ TPM2_PT_PS_DAY_OF_YEAR = TPM2_PT_FIXED + 38,
+ TPM2_PT_PS_YEAR = TPM2_PT_FIXED + 39,
+ TPM2_PT_SPLIT_MAX = TPM2_PT_FIXED + 40,
+ TPM2_PT_TOTAL_COMMANDS = TPM2_PT_FIXED + 41,
+ TPM2_PT_LIBRARY_COMMANDS = TPM2_PT_FIXED + 42,
+ TPM2_PT_VENDOR_COMMANDS = TPM2_PT_FIXED + 43,
+ TPM2_PT_NV_BUFFER_MAX = TPM2_PT_FIXED + 44,
+ TPM2_PT_MODES = TPM2_PT_FIXED + 45,
+ TPM2_PT_MAX_CAP_BUFFER = TPM2_PT_FIXED + 46,
+ TPM2_PT_VAR = TPM2_PT_GROUP * 2,
+ TPM2_PT_PERMANENT = TPM2_PT_VAR + 0,
+ TPM2_PT_STARTUP_CLEAR = TPM2_PT_VAR + 1,
+ TPM2_PT_HR_NV_INDEX = TPM2_PT_VAR + 2,
+ TPM2_PT_HR_LOADED = TPM2_PT_VAR + 3,
+ TPM2_PT_HR_LOADED_AVAIL = TPM2_PT_VAR + 4,
+ TPM2_PT_HR_ACTIVE = TPM2_PT_VAR + 5,
+ TPM2_PT_HR_ACTIVE_AVAIL = TPM2_PT_VAR + 6,
+ TPM2_PT_HR_TRANSIENT_AVAIL = TPM2_PT_VAR + 7,
+ TPM2_PT_HR_PERSISTENT = TPM2_PT_VAR + 8,
+ TPM2_PT_HR_PERSISTENT_AVAIL = TPM2_PT_VAR + 9,
+ TPM2_PT_NV_COUNTERS = TPM2_PT_VAR + 10,
+ TPM2_PT_NV_COUNTERS_AVAIL = TPM2_PT_VAR + 11,
+ TPM2_PT_ALGORITHM_SET = TPM2_PT_VAR + 12,
+ TPM2_PT_LOADED_CURVES = TPM2_PT_VAR + 13,
+ TPM2_PT_LOCKOUT_COUNTER = TPM2_PT_VAR + 14,
+ TPM2_PT_MAX_AUTH_FAIL = TPM2_PT_VAR + 15,
+ TPM2_PT_LOCKOUT_INTERVAL = TPM2_PT_VAR + 16,
+ TPM2_PT_LOCKOUT_RECOVERY = TPM2_PT_VAR + 17,
+ TPM2_PT_NV_WRITE_RECOVERY = TPM2_PT_VAR + 18,
+ TPM2_PT_AUDIT_COUNTER_0 = TPM2_PT_VAR + 19,
+ TPM2_PT_AUDIT_COUNTER_1 = TPM2_PT_VAR + 20,
+};
+
+/* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
+ * bytes, but 128 is still a relatively large number of random bytes and
+ * anything much bigger causes users of struct tpm_cmd_t to start getting
+ * compiler warnings about stack frame size. */
+#define TPM_MAX_RNG_DATA 128
+
+extern const struct class tpm_class;
+extern const struct class tpmrm_class;
+extern dev_t tpm_devt;
+extern const struct file_operations tpm_fops;
+extern const struct file_operations tpmrm_fops;
+extern struct idr dev_nums_idr;
+
+ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz);
+int tpm_get_timeouts(struct tpm_chip *);
+int tpm_auto_startup(struct tpm_chip *chip);
+
+int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr);
+int tpm1_auto_startup(struct tpm_chip *chip);
+int tpm1_do_selftest(struct tpm_chip *chip);
+int tpm1_get_timeouts(struct tpm_chip *chip);
+unsigned long tpm1_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+int tpm1_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash,
+ const char *log_msg);
+int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf);
+ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
+ const char *desc, size_t min_cap_length);
+int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max);
+int tpm1_get_pcr_allocation(struct tpm_chip *chip);
+unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+int tpm_pm_suspend(struct device *dev);
+int tpm_pm_resume(struct device *dev);
+int tpm_class_shutdown(struct device *dev);
+
+static inline void tpm_msleep(unsigned int delay_msec)
+{
+ usleep_range((delay_msec * 1000) - TPM_TIMEOUT_RANGE_US,
+ delay_msec * 1000);
+};
+
+int tpm_chip_bootstrap(struct tpm_chip *chip);
+int tpm_chip_start(struct tpm_chip *chip);
+void tpm_chip_stop(struct tpm_chip *chip);
+struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
+
+struct tpm_chip *tpm_chip_alloc(struct device *dev,
+ const struct tpm_class_ops *ops);
+struct tpm_chip *tpmm_chip_alloc(struct device *pdev,
+ const struct tpm_class_ops *ops);
+int tpm_chip_register(struct tpm_chip *chip);
+void tpm_chip_unregister(struct tpm_chip *chip);
+
+void tpm_sysfs_add_device(struct tpm_chip *chip);
+
+
+#ifdef CONFIG_ACPI
+extern void tpm_add_ppi(struct tpm_chip *chip);
+#else
+static inline void tpm_add_ppi(struct tpm_chip *chip)
+{
+}
+#endif
+
+int tpm2_get_timeouts(struct tpm_chip *chip);
+int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digest, u16 *digest_size_ptr);
+int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digests);
+int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max);
+ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
+ u32 *value, const char *desc);
+
+ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
+int tpm2_auto_startup(struct tpm_chip *chip);
+void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
+unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+int tpm2_probe(struct tpm_chip *chip);
+int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
+int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
+int tpm2_init_space(struct tpm_space *space, unsigned int buf_size);
+void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
+void tpm2_flush_space(struct tpm_chip *chip);
+int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
+ size_t cmdsiz);
+int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf,
+ size_t *bufsiz);
+int tpm_devs_add(struct tpm_chip *chip);
+void tpm_devs_remove(struct tpm_chip *chip);
+
+void tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_teardown(struct tpm_chip *chip);
+int tpm_dev_common_init(void);
+void tpm_dev_common_exit(void);
+#endif
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
new file mode 100644
index 0000000000..cf64c73851
--- /dev/null
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -0,0 +1,813 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ */
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/freezer.h>
+#include <linux/tpm_eventlog.h>
+
+#include "tpm.h"
+
+#define TPM_MAX_ORDINAL 243
+
+/*
+ * Array with one entry per ordinal defining the maximum amount
+ * of time the chip could take to return the result. The ordinal
+ * designation of short, medium or long is defined in a table in
+ * TCG Specification TPM Main Part 2 TPM Structures Section 17. The
+ * values of the SHORT, MEDIUM, and LONG durations are retrieved
+ * from the chip during initialization with a call to tpm_get_timeouts.
+ */
+static const u8 tpm1_ordinal_duration[TPM_MAX_ORDINAL] = {
+ TPM_UNDEFINED, /* 0 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 5 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 10 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_LONG,
+ TPM_LONG,
+ TPM_MEDIUM, /* 15 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_LONG,
+ TPM_SHORT, /* 20 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_SHORT, /* 25 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM, /* 30 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 35 */
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 40 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 45 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_LONG,
+ TPM_MEDIUM, /* 50 */
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 55 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 60 */
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM, /* 65 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 70 */
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 75 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_LONG, /* 80 */
+ TPM_UNDEFINED,
+ TPM_MEDIUM,
+ TPM_LONG,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 85 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 90 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 95 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 100 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 105 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 110 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 115 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_LONG, /* 120 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_SHORT, /* 125 */
+ TPM_SHORT,
+ TPM_LONG,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 130 */
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_UNDEFINED, /* 135 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 140 */
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 145 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 150 */
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 155 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 160 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 165 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_LONG, /* 170 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 175 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 180 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM, /* 185 */
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 190 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 195 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 200 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_SHORT, /* 205 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM, /* 210 */
+ TPM_UNDEFINED,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_UNDEFINED, /* 215 */
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_SHORT, /* 220 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 225 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 230 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 235 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 240 */
+ TPM_UNDEFINED,
+ TPM_MEDIUM,
+};
+
+/**
+ * tpm1_calc_ordinal_duration() - calculate the maximum command duration
+ * @chip: TPM chip to use.
+ * @ordinal: TPM command ordinal.
+ *
+ * The function returns the maximum amount of time the chip could take
+ * to return the result for a particular ordinal in jiffies.
+ *
+ * Return: A maximal duration time for an ordinal in jiffies.
+ */
+unsigned long tpm1_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+{
+ int duration_idx = TPM_UNDEFINED;
+ int duration = 0;
+
+ /*
+ * We only have a duration table for protected commands, where the upper
+ * 16 bits are 0. For the few other ordinals the fallback will be used.
+ */
+ if (ordinal < TPM_MAX_ORDINAL)
+ duration_idx = tpm1_ordinal_duration[ordinal];
+
+ if (duration_idx != TPM_UNDEFINED)
+ duration = chip->duration[duration_idx];
+ if (duration <= 0)
+ return 2 * 60 * HZ;
+ else
+ return duration;
+}
+
+#define TPM_ORD_STARTUP 153
+#define TPM_ST_CLEAR 1
+
+/**
+ * tpm1_startup() - turn on the TPM
+ * @chip: TPM chip to use
+ *
+ * Normally the firmware should start the TPM. This function is provided as a
+ * workaround if this does not happen. A legal case for this could be for
+ * example when a TPM emulator is used.
+ *
+ * Return: same as tpm_transmit_cmd()
+ */
+static int tpm1_startup(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ dev_info(&chip->dev, "starting up the TPM manually\n");
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_STARTUP);
+ if (rc < 0)
+ return rc;
+
+ tpm_buf_append_u16(&buf, TPM_ST_CLEAR);
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to start the TPM");
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+int tpm1_get_timeouts(struct tpm_chip *chip)
+{
+ cap_t cap;
+ unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4];
+ unsigned long durations[3];
+ ssize_t rc;
+
+ rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, NULL,
+ sizeof(cap.timeout));
+ if (rc == TPM_ERR_INVALID_POSTINIT) {
+ if (tpm1_startup(chip))
+ return rc;
+
+ rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+ "attempting to determine the timeouts",
+ sizeof(cap.timeout));
+ }
+
+ if (rc) {
+ dev_err(&chip->dev, "A TPM error (%zd) occurred attempting to determine the timeouts\n",
+ rc);
+ return rc;
+ }
+
+ timeout_old[0] = jiffies_to_usecs(chip->timeout_a);
+ timeout_old[1] = jiffies_to_usecs(chip->timeout_b);
+ timeout_old[2] = jiffies_to_usecs(chip->timeout_c);
+ timeout_old[3] = jiffies_to_usecs(chip->timeout_d);
+ timeout_chip[0] = be32_to_cpu(cap.timeout.a);
+ timeout_chip[1] = be32_to_cpu(cap.timeout.b);
+ timeout_chip[2] = be32_to_cpu(cap.timeout.c);
+ timeout_chip[3] = be32_to_cpu(cap.timeout.d);
+ memcpy(timeout_eff, timeout_chip, sizeof(timeout_eff));
+
+ /*
+ * Provide ability for vendor overrides of timeout values in case
+ * of misreporting.
+ */
+ if (chip->ops->update_timeouts)
+ chip->ops->update_timeouts(chip, timeout_eff);
+
+ if (!chip->timeout_adjusted) {
+ /* Restore default if chip reported 0 */
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(timeout_eff); i++) {
+ if (timeout_eff[i])
+ continue;
+
+ timeout_eff[i] = timeout_old[i];
+ chip->timeout_adjusted = true;
+ }
+
+ if (timeout_eff[0] != 0 && timeout_eff[0] < 1000) {
+ /* timeouts in msec rather usec */
+ for (i = 0; i != ARRAY_SIZE(timeout_eff); i++)
+ timeout_eff[i] *= 1000;
+ chip->timeout_adjusted = true;
+ }
+ }
+
+ /* Report adjusted timeouts */
+ if (chip->timeout_adjusted) {
+ dev_info(&chip->dev, HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
+ timeout_chip[0], timeout_eff[0],
+ timeout_chip[1], timeout_eff[1],
+ timeout_chip[2], timeout_eff[2],
+ timeout_chip[3], timeout_eff[3]);
+ }
+
+ chip->timeout_a = usecs_to_jiffies(timeout_eff[0]);
+ chip->timeout_b = usecs_to_jiffies(timeout_eff[1]);
+ chip->timeout_c = usecs_to_jiffies(timeout_eff[2]);
+ chip->timeout_d = usecs_to_jiffies(timeout_eff[3]);
+
+ rc = tpm1_getcap(chip, TPM_CAP_PROP_TIS_DURATION, &cap,
+ "attempting to determine the durations",
+ sizeof(cap.duration));
+ if (rc)
+ return rc;
+
+ chip->duration[TPM_SHORT] =
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_short));
+ chip->duration[TPM_MEDIUM] =
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_medium));
+ chip->duration[TPM_LONG] =
+ usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long));
+ chip->duration[TPM_LONG_LONG] = 0; /* not used under 1.2 */
+
+ /*
+ * Provide the ability for vendor overrides of duration values in case
+ * of misreporting.
+ */
+ if (chip->ops->update_durations)
+ chip->ops->update_durations(chip, durations);
+
+ if (chip->duration_adjusted) {
+ dev_info(&chip->dev, HW_ERR "Adjusting reported durations.");
+ chip->duration[TPM_SHORT] = durations[0];
+ chip->duration[TPM_MEDIUM] = durations[1];
+ chip->duration[TPM_LONG] = durations[2];
+ }
+
+ /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
+ * value wrong and apparently reports msecs rather than usecs. So we
+ * fix up the resulting too-small TPM_SHORT value to make things work.
+ * We also scale the TPM_MEDIUM and -_LONG values by 1000.
+ */
+ if (chip->duration[TPM_SHORT] < (HZ / 100)) {
+ chip->duration[TPM_SHORT] = HZ;
+ chip->duration[TPM_MEDIUM] *= 1000;
+ chip->duration[TPM_LONG] *= 1000;
+ chip->duration_adjusted = true;
+ dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
+ }
+
+ chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
+ return 0;
+}
+
+#define TPM_ORD_PCR_EXTEND 20
+int tpm1_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash,
+ const char *log_msg)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_PCR_EXTEND);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, pcr_idx);
+ tpm_buf_append(&buf, hash, TPM_DIGEST_SIZE);
+
+ rc = tpm_transmit_cmd(chip, &buf, TPM_DIGEST_SIZE, log_msg);
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+#define TPM_ORD_GET_CAP 101
+ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
+ const char *desc, size_t min_cap_length)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_CAP);
+ if (rc)
+ return rc;
+
+ if (subcap_id == TPM_CAP_VERSION_1_1 ||
+ subcap_id == TPM_CAP_VERSION_1_2) {
+ tpm_buf_append_u32(&buf, subcap_id);
+ tpm_buf_append_u32(&buf, 0);
+ } else {
+ if (subcap_id == TPM_CAP_FLAG_PERM ||
+ subcap_id == TPM_CAP_FLAG_VOL)
+ tpm_buf_append_u32(&buf, TPM_CAP_FLAG);
+ else
+ tpm_buf_append_u32(&buf, TPM_CAP_PROP);
+
+ tpm_buf_append_u32(&buf, 4);
+ tpm_buf_append_u32(&buf, subcap_id);
+ }
+ rc = tpm_transmit_cmd(chip, &buf, min_cap_length, desc);
+ if (!rc)
+ *cap = *(cap_t *)&buf.data[TPM_HEADER_SIZE + 4];
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm1_getcap);
+
+#define TPM_ORD_GET_RANDOM 70
+struct tpm1_get_random_out {
+ __be32 rng_data_len;
+ u8 rng_data[TPM_MAX_RNG_DATA];
+} __packed;
+
+/**
+ * tpm1_get_random() - get random bytes from the TPM's RNG
+ * @chip: a &struct tpm_chip instance
+ * @dest: destination buffer for the random bytes
+ * @max: the maximum number of bytes to write to @dest
+ *
+ * Return:
+ * * number of bytes read
+ * * -errno (positive TPM return codes are masked to -EIO)
+ */
+int tpm1_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
+{
+ struct tpm1_get_random_out *out;
+ u32 num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA);
+ struct tpm_buf buf;
+ u32 total = 0;
+ int retries = 5;
+ u32 recd;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_RANDOM);
+ if (rc)
+ return rc;
+
+ do {
+ tpm_buf_append_u32(&buf, num_bytes);
+
+ rc = tpm_transmit_cmd(chip, &buf, sizeof(out->rng_data_len),
+ "attempting get random");
+ if (rc) {
+ if (rc > 0)
+ rc = -EIO;
+ goto out;
+ }
+
+ out = (struct tpm1_get_random_out *)&buf.data[TPM_HEADER_SIZE];
+
+ recd = be32_to_cpu(out->rng_data_len);
+ if (recd > num_bytes) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE +
+ sizeof(out->rng_data_len) + recd) {
+ rc = -EFAULT;
+ goto out;
+ }
+ memcpy(dest, out->rng_data, recd);
+
+ dest += recd;
+ total += recd;
+ num_bytes -= recd;
+
+ tpm_buf_reset(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_RANDOM);
+ } while (retries-- && total < max);
+
+ rc = total ? (int)total : -EIO;
+out:
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+#define TPM_ORD_PCRREAD 21
+int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_PCRREAD);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, pcr_idx);
+
+ rc = tpm_transmit_cmd(chip, &buf, TPM_DIGEST_SIZE,
+ "attempting to read a pcr value");
+ if (rc)
+ goto out;
+
+ if (tpm_buf_length(&buf) < TPM_DIGEST_SIZE) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ memcpy(res_buf, &buf.data[TPM_HEADER_SIZE], TPM_DIGEST_SIZE);
+
+out:
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+#define TPM_ORD_CONTINUE_SELFTEST 83
+/**
+ * tpm1_continue_selftest() - run TPM's selftest
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
+ * a TPM error code.
+ */
+static int tpm1_continue_selftest(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_CONTINUE_SELFTEST);
+ if (rc)
+ return rc;
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "continue selftest");
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+/**
+ * tpm1_do_selftest - have the TPM continue its selftest and wait until it
+ * can receive further commands
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
+ * a TPM error code.
+ */
+int tpm1_do_selftest(struct tpm_chip *chip)
+{
+ int rc;
+ unsigned int loops;
+ unsigned int delay_msec = 100;
+ unsigned long duration;
+ u8 dummy[TPM_DIGEST_SIZE];
+
+ duration = tpm1_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST);
+
+ loops = jiffies_to_msecs(duration) / delay_msec;
+
+ rc = tpm1_continue_selftest(chip);
+ if (rc == TPM_ERR_INVALID_POSTINIT) {
+ chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
+ dev_info(&chip->dev, "TPM not ready (%d)\n", rc);
+ }
+ /* This may fail if there was no TPM driver during a suspend/resume
+ * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
+ */
+ if (rc)
+ return rc;
+
+ do {
+ /* Attempt to read a PCR value */
+ rc = tpm1_pcr_read(chip, 0, dummy);
+
+ /* Some buggy TPMs will not respond to tpm_tis_ready() for
+ * around 300ms while the self test is ongoing, keep trying
+ * until the self test duration expires.
+ */
+ if (rc == -ETIME) {
+ dev_info(&chip->dev, HW_ERR "TPM command timed out during continue self test");
+ tpm_msleep(delay_msec);
+ continue;
+ }
+
+ if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
+ dev_info(&chip->dev, "TPM is disabled/deactivated (0x%X)\n",
+ rc);
+ /* TPM is disabled and/or deactivated; driver can
+ * proceed and TPM does handle commands for
+ * suspend/resume correctly
+ */
+ return 0;
+ }
+ if (rc != TPM_WARN_DOING_SELFTEST)
+ return rc;
+ tpm_msleep(delay_msec);
+ } while (--loops > 0);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm1_do_selftest);
+
+/**
+ * tpm1_auto_startup - Perform the standard automatic TPM initialization
+ * sequence
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error.
+ */
+int tpm1_auto_startup(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = tpm1_get_timeouts(chip);
+ if (rc)
+ goto out;
+ rc = tpm1_do_selftest(chip);
+ if (rc == TPM_ERR_FAILEDSELFTEST) {
+ dev_warn(&chip->dev, "TPM self test failed, switching to the firmware upgrade mode\n");
+ /* A TPM in this state possibly allows or needs a firmware upgrade */
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ return 0;
+ } else if (rc) {
+ dev_err(&chip->dev, "TPM self test failed\n");
+ goto out;
+ }
+
+ return rc;
+out:
+ if (rc > 0)
+ rc = -ENODEV;
+ return rc;
+}
+
+#define TPM_ORD_SAVESTATE 152
+
+/**
+ * tpm1_pm_suspend() - pm suspend handler
+ * @chip: TPM chip to use.
+ * @tpm_suspend_pcr: flush pcr for buggy TPM chips.
+ *
+ * The functions saves the TPM state to be restored on resume.
+ *
+ * Return:
+ * * 0 on success,
+ * * < 0 on error.
+ */
+int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
+{
+ u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
+ struct tpm_buf buf;
+ unsigned int try;
+ int rc;
+
+
+ /* for buggy tpm, flush pcrs with extend to selected dummy */
+ if (tpm_suspend_pcr)
+ rc = tpm1_pcr_extend(chip, tpm_suspend_pcr, dummy_hash,
+ "extending dummy pcr before suspend");
+
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_SAVESTATE);
+ if (rc)
+ return rc;
+ /* now do the actual savestate */
+ for (try = 0; try < TPM_RETRY; try++) {
+ rc = tpm_transmit_cmd(chip, &buf, 0, NULL);
+ /*
+ * If the TPM indicates that it is too busy to respond to
+ * this command then retry before giving up. It can take
+ * several seconds for this TPM to be ready.
+ *
+ * This can happen if the TPM has already been sent the
+ * SaveState command before the driver has loaded. TCG 1.2
+ * specification states that any communication after SaveState
+ * may cause the TPM to invalidate previously saved state.
+ */
+ if (rc != TPM_WARN_RETRY)
+ break;
+ tpm_msleep(TPM_TIMEOUT_RETRY);
+
+ tpm_buf_reset(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_SAVESTATE);
+ }
+
+ if (rc)
+ dev_err(&chip->dev, "Error (%d) sending savestate before suspend\n",
+ rc);
+ else if (try > 0)
+ dev_warn(&chip->dev, "TPM savestate took %dms\n",
+ try * TPM_TIMEOUT_RETRY);
+
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
+/**
+ * tpm1_get_pcr_allocation() - initialize the allocated bank
+ * @chip: TPM chip to use.
+ *
+ * The function initializes the SHA1 allocated bank to extend PCR
+ *
+ * Return:
+ * * 0 on success,
+ * * < 0 on error.
+ */
+int tpm1_get_pcr_allocation(struct tpm_chip *chip)
+{
+ chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
+ GFP_KERNEL);
+ if (!chip->allocated_banks)
+ return -ENOMEM;
+
+ chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
+ chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
+ chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
+ chip->nr_allocated_banks = 1;
+
+ return 0;
+}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
new file mode 100644
index 0000000000..93545be190
--- /dev/null
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -0,0 +1,789 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This file contains TPM2 protocol implementations of the commands
+ * used by the kernel internally.
+ */
+
+#include "tpm.h"
+#include <crypto/hash_info.h>
+
+static struct tpm2_hash tpm2_hash_map[] = {
+ {HASH_ALGO_SHA1, TPM_ALG_SHA1},
+ {HASH_ALGO_SHA256, TPM_ALG_SHA256},
+ {HASH_ALGO_SHA384, TPM_ALG_SHA384},
+ {HASH_ALGO_SHA512, TPM_ALG_SHA512},
+ {HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
+};
+
+int tpm2_get_timeouts(struct tpm_chip *chip)
+{
+ /* Fixed timeouts for TPM2 */
+ chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
+ chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
+ chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
+ chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
+
+ /* PTP spec timeouts */
+ chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT);
+ chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM);
+ chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG);
+
+ /* Key creation commands long timeouts */
+ chip->duration[TPM_LONG_LONG] =
+ msecs_to_jiffies(TPM2_DURATION_LONG_LONG);
+
+ chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
+
+ return 0;
+}
+
+/**
+ * tpm2_ordinal_duration_index() - returns an index to the chip duration table
+ * @ordinal: TPM command ordinal.
+ *
+ * The function returns an index to the chip duration table
+ * (enum tpm_duration), that describes the maximum amount of
+ * time the chip could take to return the result for a particular ordinal.
+ *
+ * The values of the MEDIUM, and LONG durations are taken
+ * from the PC Client Profile (PTP) specification (750, 2000 msec)
+ *
+ * LONG_LONG is for commands that generates keys which empirically takes
+ * a longer time on some systems.
+ *
+ * Return:
+ * * TPM_MEDIUM
+ * * TPM_LONG
+ * * TPM_LONG_LONG
+ * * TPM_UNDEFINED
+ */
+static u8 tpm2_ordinal_duration_index(u32 ordinal)
+{
+ switch (ordinal) {
+ /* Startup */
+ case TPM2_CC_STARTUP: /* 144 */
+ return TPM_MEDIUM;
+
+ case TPM2_CC_SELF_TEST: /* 143 */
+ return TPM_LONG;
+
+ case TPM2_CC_GET_RANDOM: /* 17B */
+ return TPM_LONG;
+
+ case TPM2_CC_SEQUENCE_UPDATE: /* 15C */
+ return TPM_MEDIUM;
+ case TPM2_CC_SEQUENCE_COMPLETE: /* 13E */
+ return TPM_MEDIUM;
+ case TPM2_CC_EVENT_SEQUENCE_COMPLETE: /* 185 */
+ return TPM_MEDIUM;
+ case TPM2_CC_HASH_SEQUENCE_START: /* 186 */
+ return TPM_MEDIUM;
+
+ case TPM2_CC_VERIFY_SIGNATURE: /* 177 */
+ return TPM_LONG_LONG;
+
+ case TPM2_CC_PCR_EXTEND: /* 182 */
+ return TPM_MEDIUM;
+
+ case TPM2_CC_HIERARCHY_CONTROL: /* 121 */
+ return TPM_LONG;
+ case TPM2_CC_HIERARCHY_CHANGE_AUTH: /* 129 */
+ return TPM_LONG;
+
+ case TPM2_CC_GET_CAPABILITY: /* 17A */
+ return TPM_MEDIUM;
+
+ case TPM2_CC_NV_READ: /* 14E */
+ return TPM_LONG;
+
+ case TPM2_CC_CREATE_PRIMARY: /* 131 */
+ return TPM_LONG_LONG;
+ case TPM2_CC_CREATE: /* 153 */
+ return TPM_LONG_LONG;
+ case TPM2_CC_CREATE_LOADED: /* 191 */
+ return TPM_LONG_LONG;
+
+ default:
+ return TPM_UNDEFINED;
+ }
+}
+
+/**
+ * tpm2_calc_ordinal_duration() - calculate the maximum command duration
+ * @chip: TPM chip to use.
+ * @ordinal: TPM command ordinal.
+ *
+ * The function returns the maximum amount of time the chip could take
+ * to return the result for a particular ordinal in jiffies.
+ *
+ * Return: A maximal duration time for an ordinal in jiffies.
+ */
+unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+{
+ unsigned int index;
+
+ index = tpm2_ordinal_duration_index(ordinal);
+
+ if (index != TPM_UNDEFINED)
+ return chip->duration[index];
+ else
+ return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
+}
+
+
+struct tpm2_pcr_read_out {
+ __be32 update_cnt;
+ __be32 pcr_selects_cnt;
+ __be16 hash_alg;
+ u8 pcr_select_size;
+ u8 pcr_select[TPM2_PCR_SELECT_MIN];
+ __be32 digests_cnt;
+ __be16 digest_size;
+ u8 digest[];
+} __packed;
+
+/**
+ * tpm2_pcr_read() - read a PCR value
+ * @chip: TPM chip to use.
+ * @pcr_idx: index of the PCR to read.
+ * @digest: PCR bank and buffer current PCR value is written to.
+ * @digest_size_ptr: pointer to variable that stores the digest size.
+ *
+ * Return: Same as with tpm_transmit_cmd.
+ */
+int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digest, u16 *digest_size_ptr)
+{
+ int i;
+ int rc;
+ struct tpm_buf buf;
+ struct tpm2_pcr_read_out *out;
+ u8 pcr_select[TPM2_PCR_SELECT_MIN] = {0};
+ u16 digest_size;
+ u16 expected_digest_size = 0;
+
+ if (pcr_idx >= TPM2_PLATFORM_PCR)
+ return -EINVAL;
+
+ if (!digest_size_ptr) {
+ for (i = 0; i < chip->nr_allocated_banks &&
+ chip->allocated_banks[i].alg_id != digest->alg_id; i++)
+ ;
+
+ if (i == chip->nr_allocated_banks)
+ return -EINVAL;
+
+ expected_digest_size = chip->allocated_banks[i].digest_size;
+ }
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_PCR_READ);
+ if (rc)
+ return rc;
+
+ pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7);
+
+ tpm_buf_append_u32(&buf, 1);
+ tpm_buf_append_u16(&buf, digest->alg_id);
+ tpm_buf_append_u8(&buf, TPM2_PCR_SELECT_MIN);
+ tpm_buf_append(&buf, (const unsigned char *)pcr_select,
+ sizeof(pcr_select));
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to read a pcr value");
+ if (rc)
+ goto out;
+
+ out = (struct tpm2_pcr_read_out *)&buf.data[TPM_HEADER_SIZE];
+ digest_size = be16_to_cpu(out->digest_size);
+ if (digest_size > sizeof(digest->digest) ||
+ (!digest_size_ptr && digest_size != expected_digest_size)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (digest_size_ptr)
+ *digest_size_ptr = digest_size;
+
+ memcpy(digest->digest, out->digest, digest_size);
+out:
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+struct tpm2_null_auth_area {
+ __be32 handle;
+ __be16 nonce_size;
+ u8 attributes;
+ __be16 auth_size;
+} __packed;
+
+/**
+ * tpm2_pcr_extend() - extend a PCR value
+ *
+ * @chip: TPM chip to use.
+ * @pcr_idx: index of the PCR.
+ * @digests: list of pcr banks and corresponding digest values to extend.
+ *
+ * Return: Same as with tpm_transmit_cmd.
+ */
+int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digests)
+{
+ struct tpm_buf buf;
+ struct tpm2_null_auth_area auth_area;
+ int rc;
+ int i;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, pcr_idx);
+
+ auth_area.handle = cpu_to_be32(TPM2_RS_PW);
+ auth_area.nonce_size = 0;
+ auth_area.attributes = 0;
+ auth_area.auth_size = 0;
+
+ tpm_buf_append_u32(&buf, sizeof(struct tpm2_null_auth_area));
+ tpm_buf_append(&buf, (const unsigned char *)&auth_area,
+ sizeof(auth_area));
+ tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
+
+ for (i = 0; i < chip->nr_allocated_banks; i++) {
+ tpm_buf_append_u16(&buf, digests[i].alg_id);
+ tpm_buf_append(&buf, (const unsigned char *)&digests[i].digest,
+ chip->allocated_banks[i].digest_size);
+ }
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
+
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
+struct tpm2_get_random_out {
+ __be16 size;
+ u8 buffer[TPM_MAX_RNG_DATA];
+} __packed;
+
+/**
+ * tpm2_get_random() - get random bytes from the TPM RNG
+ *
+ * @chip: a &tpm_chip instance
+ * @dest: destination buffer
+ * @max: the max number of random bytes to pull
+ *
+ * Return:
+ * size of the buffer on success,
+ * -errno otherwise (positive TPM return codes are masked to -EIO)
+ */
+int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
+{
+ struct tpm2_get_random_out *out;
+ struct tpm_buf buf;
+ u32 recd;
+ u32 num_bytes = max;
+ int err;
+ int total = 0;
+ int retries = 5;
+ u8 *dest_ptr = dest;
+
+ if (!num_bytes || max > TPM_MAX_RNG_DATA)
+ return -EINVAL;
+
+ err = tpm_buf_init(&buf, 0, 0);
+ if (err)
+ return err;
+
+ do {
+ tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM);
+ tpm_buf_append_u16(&buf, num_bytes);
+ err = tpm_transmit_cmd(chip, &buf,
+ offsetof(struct tpm2_get_random_out,
+ buffer),
+ "attempting get random");
+ if (err) {
+ if (err > 0)
+ err = -EIO;
+ goto out;
+ }
+
+ out = (struct tpm2_get_random_out *)
+ &buf.data[TPM_HEADER_SIZE];
+ recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
+ if (tpm_buf_length(&buf) <
+ TPM_HEADER_SIZE +
+ offsetof(struct tpm2_get_random_out, buffer) +
+ recd) {
+ err = -EFAULT;
+ goto out;
+ }
+ memcpy(dest_ptr, out->buffer, recd);
+
+ dest_ptr += recd;
+ total += recd;
+ num_bytes -= recd;
+ } while (retries-- && total < max);
+
+ tpm_buf_destroy(&buf);
+ return total ? total : -EIO;
+out:
+ tpm_buf_destroy(&buf);
+ return err;
+}
+
+/**
+ * tpm2_flush_context() - execute a TPM2_FlushContext command
+ * @chip: TPM chip to use
+ * @handle: context handle
+ */
+void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT);
+ if (rc) {
+ dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n",
+ handle);
+ return;
+ }
+
+ tpm_buf_append_u32(&buf, handle);
+
+ tpm_transmit_cmd(chip, &buf, 0, "flushing context");
+ tpm_buf_destroy(&buf);
+}
+EXPORT_SYMBOL_GPL(tpm2_flush_context);
+
+struct tpm2_get_cap_out {
+ u8 more_data;
+ __be32 subcap_id;
+ __be32 property_cnt;
+ __be32 property_id;
+ __be32 value;
+} __packed;
+
+/**
+ * tpm2_get_tpm_pt() - get value of a TPM_CAP_TPM_PROPERTIES type property
+ * @chip: a &tpm_chip instance
+ * @property_id: property ID.
+ * @value: output variable.
+ * @desc: passed to tpm_transmit_cmd()
+ *
+ * Return:
+ * 0 on success,
+ * -errno or a TPM return code otherwise
+ */
+ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
+ const char *desc)
+{
+ struct tpm2_get_cap_out *out;
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
+ return rc;
+ tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
+ tpm_buf_append_u32(&buf, property_id);
+ tpm_buf_append_u32(&buf, 1);
+ rc = tpm_transmit_cmd(chip, &buf, 0, NULL);
+ if (!rc) {
+ out = (struct tpm2_get_cap_out *)
+ &buf.data[TPM_HEADER_SIZE];
+ /*
+ * To prevent failing boot up of some systems, Infineon TPM2.0
+ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also
+ * the TPM2_Getcapability command returns a zero length list
+ * in field upgrade mode.
+ */
+ if (be32_to_cpu(out->property_cnt) > 0)
+ *value = be32_to_cpu(out->value);
+ else
+ rc = -ENODATA;
+ }
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm2_get_tpm_pt);
+
+/**
+ * tpm2_shutdown() - send a TPM shutdown command
+ *
+ * Sends a TPM shutdown command. The shutdown command is used in call
+ * sites where the system is going down. If it fails, there is not much
+ * that can be done except print an error message.
+ *
+ * @chip: a &tpm_chip instance
+ * @shutdown_type: TPM_SU_CLEAR or TPM_SU_STATE.
+ */
+void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_SHUTDOWN);
+ if (rc)
+ return;
+ tpm_buf_append_u16(&buf, shutdown_type);
+ tpm_transmit_cmd(chip, &buf, 0, "stopping the TPM");
+ tpm_buf_destroy(&buf);
+}
+
+/**
+ * tpm2_do_selftest() - ensure that all self tests have passed
+ *
+ * @chip: TPM chip to use
+ *
+ * Return: Same as with tpm_transmit_cmd.
+ *
+ * The TPM can either run all self tests synchronously and then return
+ * RC_SUCCESS once all tests were successful. Or it can choose to run the tests
+ * asynchronously and return RC_TESTING immediately while the self tests still
+ * execute in the background. This function handles both cases and waits until
+ * all tests have completed.
+ */
+static int tpm2_do_selftest(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ int full;
+ int rc;
+
+ for (full = 0; full < 2; full++) {
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_SELF_TEST);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u8(&buf, full);
+ rc = tpm_transmit_cmd(chip, &buf, 0,
+ "attempting the self test");
+ tpm_buf_destroy(&buf);
+
+ if (rc == TPM2_RC_TESTING)
+ rc = TPM2_RC_SUCCESS;
+ if (rc == TPM2_RC_INITIALIZE || rc == TPM2_RC_SUCCESS)
+ return rc;
+ }
+
+ return rc;
+}
+
+/**
+ * tpm2_probe() - probe for the TPM 2.0 protocol
+ * @chip: a &tpm_chip instance
+ *
+ * Send an idempotent TPM 2.0 command and see whether there is TPM2 chip in the
+ * other end based on the response tag. The flag TPM_CHIP_FLAG_TPM2 is set by
+ * this function if this is the case.
+ *
+ * Return:
+ * 0 on success,
+ * -errno otherwise
+ */
+int tpm2_probe(struct tpm_chip *chip)
+{
+ struct tpm_header *out;
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
+ return rc;
+ tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES);
+ tpm_buf_append_u32(&buf, TPM_PT_TOTAL_COMMANDS);
+ tpm_buf_append_u32(&buf, 1);
+ rc = tpm_transmit_cmd(chip, &buf, 0, NULL);
+ /* We ignore TPM return codes on purpose. */
+ if (rc >= 0) {
+ out = (struct tpm_header *)buf.data;
+ if (be16_to_cpu(out->tag) == TPM2_ST_NO_SESSIONS)
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ }
+ tpm_buf_destroy(&buf);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm2_probe);
+
+static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index)
+{
+ struct tpm_bank_info *bank = chip->allocated_banks + bank_index;
+ struct tpm_digest digest = { .alg_id = bank->alg_id };
+ int i;
+
+ /*
+ * Avoid unnecessary PCR read operations to reduce overhead
+ * and obtain identifiers of the crypto subsystem.
+ */
+ for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
+ enum hash_algo crypto_algo = tpm2_hash_map[i].crypto_id;
+
+ if (bank->alg_id != tpm2_hash_map[i].tpm_id)
+ continue;
+
+ bank->digest_size = hash_digest_size[crypto_algo];
+ bank->crypto_id = crypto_algo;
+ return 0;
+ }
+
+ bank->crypto_id = HASH_ALGO__LAST;
+
+ return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size);
+}
+
+struct tpm2_pcr_selection {
+ __be16 hash_alg;
+ u8 size_of_select;
+ u8 pcr_select[3];
+} __packed;
+
+ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
+{
+ struct tpm2_pcr_selection pcr_selection;
+ struct tpm_buf buf;
+ void *marker;
+ void *end;
+ void *pcr_select_offset;
+ u32 sizeof_pcr_selection;
+ u32 nr_possible_banks;
+ u32 nr_alloc_banks = 0;
+ u16 hash_alg;
+ u32 rsp_len;
+ int rc;
+ int i = 0;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, TPM2_CAP_PCRS);
+ tpm_buf_append_u32(&buf, 0);
+ tpm_buf_append_u32(&buf, 1);
+
+ rc = tpm_transmit_cmd(chip, &buf, 9, "get tpm pcr allocation");
+ if (rc)
+ goto out;
+
+ nr_possible_banks = be32_to_cpup(
+ (__be32 *)&buf.data[TPM_HEADER_SIZE + 5]);
+
+ chip->allocated_banks = kcalloc(nr_possible_banks,
+ sizeof(*chip->allocated_banks),
+ GFP_KERNEL);
+ if (!chip->allocated_banks) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ marker = &buf.data[TPM_HEADER_SIZE + 9];
+
+ rsp_len = be32_to_cpup((__be32 *)&buf.data[2]);
+ end = &buf.data[rsp_len];
+
+ for (i = 0; i < nr_possible_banks; i++) {
+ pcr_select_offset = marker +
+ offsetof(struct tpm2_pcr_selection, size_of_select);
+ if (pcr_select_offset >= end) {
+ rc = -EFAULT;
+ break;
+ }
+
+ memcpy(&pcr_selection, marker, sizeof(pcr_selection));
+ hash_alg = be16_to_cpu(pcr_selection.hash_alg);
+
+ pcr_select_offset = memchr_inv(pcr_selection.pcr_select, 0,
+ pcr_selection.size_of_select);
+ if (pcr_select_offset) {
+ chip->allocated_banks[nr_alloc_banks].alg_id = hash_alg;
+
+ rc = tpm2_init_bank_info(chip, nr_alloc_banks);
+ if (rc < 0)
+ break;
+
+ nr_alloc_banks++;
+ }
+
+ sizeof_pcr_selection = sizeof(pcr_selection.hash_alg) +
+ sizeof(pcr_selection.size_of_select) +
+ pcr_selection.size_of_select;
+ marker = marker + sizeof_pcr_selection;
+ }
+
+ chip->nr_allocated_banks = nr_alloc_banks;
+out:
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
+int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ u32 nr_commands;
+ __be32 *attrs;
+ u32 cc;
+ int i;
+ int rc;
+
+ rc = tpm2_get_tpm_pt(chip, TPM_PT_TOTAL_COMMANDS, &nr_commands, NULL);
+ if (rc)
+ goto out;
+
+ if (nr_commands > 0xFFFFF) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ chip->cc_attrs_tbl = devm_kcalloc(&chip->dev, 4, nr_commands,
+ GFP_KERNEL);
+ if (!chip->cc_attrs_tbl) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY);
+ if (rc)
+ goto out;
+
+ tpm_buf_append_u32(&buf, TPM2_CAP_COMMANDS);
+ tpm_buf_append_u32(&buf, TPM2_CC_FIRST);
+ tpm_buf_append_u32(&buf, nr_commands);
+
+ rc = tpm_transmit_cmd(chip, &buf, 9 + 4 * nr_commands, NULL);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ goto out;
+ }
+
+ if (nr_commands !=
+ be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
+ rc = -EFAULT;
+ tpm_buf_destroy(&buf);
+ goto out;
+ }
+
+ chip->nr_commands = nr_commands;
+
+ attrs = (__be32 *)&buf.data[TPM_HEADER_SIZE + 9];
+ for (i = 0; i < nr_commands; i++, attrs++) {
+ chip->cc_attrs_tbl[i] = be32_to_cpup(attrs);
+ cc = chip->cc_attrs_tbl[i] & 0xFFFF;
+
+ if (cc == TPM2_CC_CONTEXT_SAVE || cc == TPM2_CC_FLUSH_CONTEXT) {
+ chip->cc_attrs_tbl[i] &=
+ ~(GENMASK(2, 0) << TPM2_CC_ATTR_CHANDLES);
+ chip->cc_attrs_tbl[i] |= 1 << TPM2_CC_ATTR_CHANDLES;
+ }
+ }
+
+ tpm_buf_destroy(&buf);
+
+out:
+ if (rc > 0)
+ rc = -ENODEV;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm2_get_cc_attrs_tbl);
+
+/**
+ * tpm2_startup - turn on the TPM
+ * @chip: TPM chip to use
+ *
+ * Normally the firmware should start the TPM. This function is provided as a
+ * workaround if this does not happen. A legal case for this could be for
+ * example when a TPM emulator is used.
+ *
+ * Return: same as tpm_transmit_cmd()
+ */
+
+static int tpm2_startup(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ dev_info(&chip->dev, "starting up the TPM manually\n");
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_STARTUP);
+ if (rc < 0)
+ return rc;
+
+ tpm_buf_append_u16(&buf, TPM2_SU_CLEAR);
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to start the TPM");
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
+/**
+ * tpm2_auto_startup - Perform the standard automatic TPM initialization
+ * sequence
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error.
+ */
+int tpm2_auto_startup(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = tpm2_get_timeouts(chip);
+ if (rc)
+ goto out;
+
+ rc = tpm2_do_selftest(chip);
+ if (rc && rc != TPM2_RC_INITIALIZE)
+ goto out;
+
+ if (rc == TPM2_RC_INITIALIZE) {
+ rc = tpm2_startup(chip);
+ if (rc)
+ goto out;
+
+ rc = tpm2_do_selftest(chip);
+ if (rc)
+ goto out;
+ }
+
+ rc = tpm2_get_cc_attrs_tbl(chip);
+ if (rc == TPM2_RC_FAILURE || (rc < 0 && rc != -ENOMEM)) {
+ dev_info(&chip->dev,
+ "TPM in field failure mode, requires firmware upgrade\n");
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ rc = 0;
+ }
+
+out:
+ /*
+ * Infineon TPM in field upgrade mode will return no data for the number
+ * of supported commands.
+ */
+ if (rc == TPM2_RC_UPGRADE || rc == -ENODATA) {
+ dev_info(&chip->dev, "TPM in field upgrade mode, requires firmware upgrade\n");
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ rc = 0;
+ }
+
+ if (rc > 0)
+ rc = -ENODEV;
+ return rc;
+}
+
+int tpm2_find_cc(struct tpm_chip *chip, u32 cc)
+{
+ u32 cc_mask;
+ int i;
+
+ cc_mask = 1 << TPM2_CC_ATTR_VENDOR | GENMASK(15, 0);
+ for (i = 0; i < chip->nr_commands; i++)
+ if (cc == (chip->cc_attrs_tbl[i] & cc_mask))
+ return i;
+
+ return -1;
+}
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
new file mode 100644
index 0000000000..363afdd4d1
--- /dev/null
+++ b/drivers/char/tpm/tpm2-space.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This file contains TPM2 protocol implementations of the commands
+ * used by the kernel internally.
+ */
+
+#include <linux/gfp.h>
+#include <asm/unaligned.h>
+#include "tpm.h"
+
+enum tpm2_handle_types {
+ TPM2_HT_HMAC_SESSION = 0x02000000,
+ TPM2_HT_POLICY_SESSION = 0x03000000,
+ TPM2_HT_TRANSIENT = 0x80000000,
+};
+
+struct tpm2_context {
+ __be64 sequence;
+ __be32 saved_handle;
+ __be32 hierarchy;
+ __be16 blob_size;
+} __packed;
+
+static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
+ if (space->session_tbl[i])
+ tpm2_flush_context(chip, space->session_tbl[i]);
+ }
+}
+
+int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
+{
+ space->context_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!space->context_buf)
+ return -ENOMEM;
+
+ space->session_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (space->session_buf == NULL) {
+ kfree(space->context_buf);
+ /* Prevent caller getting a dangling pointer. */
+ space->context_buf = NULL;
+ return -ENOMEM;
+ }
+
+ space->buf_size = buf_size;
+ return 0;
+}
+
+void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space)
+{
+
+ if (tpm_try_get_ops(chip) == 0) {
+ tpm2_flush_sessions(chip, space);
+ tpm_put_ops(chip);
+ }
+
+ kfree(space->context_buf);
+ kfree(space->session_buf);
+}
+
+static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
+ unsigned int *offset, u32 *handle)
+{
+ struct tpm_buf tbuf;
+ struct tpm2_context *ctx;
+ unsigned int body_size;
+ int rc;
+
+ rc = tpm_buf_init(&tbuf, TPM2_ST_NO_SESSIONS, TPM2_CC_CONTEXT_LOAD);
+ if (rc)
+ return rc;
+
+ ctx = (struct tpm2_context *)&buf[*offset];
+ body_size = sizeof(*ctx) + be16_to_cpu(ctx->blob_size);
+ tpm_buf_append(&tbuf, &buf[*offset], body_size);
+
+ rc = tpm_transmit_cmd(chip, &tbuf, 4, NULL);
+ if (rc < 0) {
+ dev_warn(&chip->dev, "%s: failed with a system error %d\n",
+ __func__, rc);
+ tpm_buf_destroy(&tbuf);
+ return -EFAULT;
+ } else if (tpm2_rc_value(rc) == TPM2_RC_HANDLE ||
+ rc == TPM2_RC_REFERENCE_H0) {
+ /*
+ * TPM_RC_HANDLE means that the session context can't
+ * be loaded because of an internal counter mismatch
+ * that makes the TPM think there might have been a
+ * replay. This might happen if the context was saved
+ * and loaded outside the space.
+ *
+ * TPM_RC_REFERENCE_H0 means the session has been
+ * flushed outside the space
+ */
+ *handle = 0;
+ tpm_buf_destroy(&tbuf);
+ return -ENOENT;
+ } else if (rc > 0) {
+ dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
+ __func__, rc);
+ tpm_buf_destroy(&tbuf);
+ return -EFAULT;
+ }
+
+ *handle = be32_to_cpup((__be32 *)&tbuf.data[TPM_HEADER_SIZE]);
+ *offset += body_size;
+
+ tpm_buf_destroy(&tbuf);
+ return 0;
+}
+
+static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
+ unsigned int buf_size, unsigned int *offset)
+{
+ struct tpm_buf tbuf;
+ unsigned int body_size;
+ int rc;
+
+ rc = tpm_buf_init(&tbuf, TPM2_ST_NO_SESSIONS, TPM2_CC_CONTEXT_SAVE);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&tbuf, handle);
+
+ rc = tpm_transmit_cmd(chip, &tbuf, 0, NULL);
+ if (rc < 0) {
+ dev_warn(&chip->dev, "%s: failed with a system error %d\n",
+ __func__, rc);
+ tpm_buf_destroy(&tbuf);
+ return -EFAULT;
+ } else if (tpm2_rc_value(rc) == TPM2_RC_REFERENCE_H0) {
+ tpm_buf_destroy(&tbuf);
+ return -ENOENT;
+ } else if (rc) {
+ dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
+ __func__, rc);
+ tpm_buf_destroy(&tbuf);
+ return -EFAULT;
+ }
+
+ body_size = tpm_buf_length(&tbuf) - TPM_HEADER_SIZE;
+ if ((*offset + body_size) > buf_size) {
+ dev_warn(&chip->dev, "%s: out of backing storage\n", __func__);
+ tpm_buf_destroy(&tbuf);
+ return -ENOMEM;
+ }
+
+ memcpy(&buf[*offset], &tbuf.data[TPM_HEADER_SIZE], body_size);
+ *offset += body_size;
+ tpm_buf_destroy(&tbuf);
+ return 0;
+}
+
+void tpm2_flush_space(struct tpm_chip *chip)
+{
+ struct tpm_space *space = &chip->work_space;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
+ if (space->context_tbl[i] && ~space->context_tbl[i])
+ tpm2_flush_context(chip, space->context_tbl[i]);
+
+ tpm2_flush_sessions(chip, space);
+}
+
+static int tpm2_load_space(struct tpm_chip *chip)
+{
+ struct tpm_space *space = &chip->work_space;
+ unsigned int offset;
+ int i;
+ int rc;
+
+ for (i = 0, offset = 0; i < ARRAY_SIZE(space->context_tbl); i++) {
+ if (!space->context_tbl[i])
+ continue;
+
+ /* sanity check, should never happen */
+ if (~space->context_tbl[i]) {
+ dev_err(&chip->dev, "context table is inconsistent");
+ return -EFAULT;
+ }
+
+ rc = tpm2_load_context(chip, space->context_buf, &offset,
+ &space->context_tbl[i]);
+ if (rc)
+ return rc;
+ }
+
+ for (i = 0, offset = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
+ u32 handle;
+
+ if (!space->session_tbl[i])
+ continue;
+
+ rc = tpm2_load_context(chip, space->session_buf,
+ &offset, &handle);
+ if (rc == -ENOENT) {
+ /* load failed, just forget session */
+ space->session_tbl[i] = 0;
+ } else if (rc) {
+ tpm2_flush_space(chip);
+ return rc;
+ }
+ if (handle != space->session_tbl[i]) {
+ dev_warn(&chip->dev, "session restored to wrong handle\n");
+ tpm2_flush_space(chip);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static bool tpm2_map_to_phandle(struct tpm_space *space, void *handle)
+{
+ u32 vhandle = be32_to_cpup((__be32 *)handle);
+ u32 phandle;
+ int i;
+
+ i = 0xFFFFFF - (vhandle & 0xFFFFFF);
+ if (i >= ARRAY_SIZE(space->context_tbl) || !space->context_tbl[i])
+ return false;
+
+ phandle = space->context_tbl[i];
+ *((__be32 *)handle) = cpu_to_be32(phandle);
+ return true;
+}
+
+static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd)
+{
+ struct tpm_space *space = &chip->work_space;
+ unsigned int nr_handles;
+ u32 attrs;
+ __be32 *handle;
+ int i;
+
+ i = tpm2_find_cc(chip, cc);
+ if (i < 0)
+ return -EINVAL;
+
+ attrs = chip->cc_attrs_tbl[i];
+ nr_handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
+
+ handle = (__be32 *)&cmd[TPM_HEADER_SIZE];
+ for (i = 0; i < nr_handles; i++, handle++) {
+ if ((be32_to_cpu(*handle) & 0xFF000000) == TPM2_HT_TRANSIENT) {
+ if (!tpm2_map_to_phandle(space, handle))
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int tpm_find_and_validate_cc(struct tpm_chip *chip,
+ struct tpm_space *space,
+ const void *cmd, size_t len)
+{
+ const struct tpm_header *header = (const void *)cmd;
+ int i;
+ u32 cc;
+ u32 attrs;
+ unsigned int nr_handles;
+
+ if (len < TPM_HEADER_SIZE || !chip->nr_commands)
+ return -EINVAL;
+
+ cc = be32_to_cpu(header->ordinal);
+
+ i = tpm2_find_cc(chip, cc);
+ if (i < 0) {
+ dev_dbg(&chip->dev, "0x%04X is an invalid command\n",
+ cc);
+ return -EOPNOTSUPP;
+ }
+
+ attrs = chip->cc_attrs_tbl[i];
+ nr_handles =
+ 4 * ((attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0));
+ if (len < TPM_HEADER_SIZE + 4 * nr_handles)
+ goto err_len;
+
+ return cc;
+err_len:
+ dev_dbg(&chip->dev, "%s: insufficient command length %zu", __func__,
+ len);
+ return -EINVAL;
+}
+
+int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
+ size_t cmdsiz)
+{
+ int rc;
+ int cc;
+
+ if (!space)
+ return 0;
+
+ cc = tpm_find_and_validate_cc(chip, space, cmd, cmdsiz);
+ if (cc < 0)
+ return cc;
+
+ memcpy(&chip->work_space.context_tbl, &space->context_tbl,
+ sizeof(space->context_tbl));
+ memcpy(&chip->work_space.session_tbl, &space->session_tbl,
+ sizeof(space->session_tbl));
+ memcpy(chip->work_space.context_buf, space->context_buf,
+ space->buf_size);
+ memcpy(chip->work_space.session_buf, space->session_buf,
+ space->buf_size);
+
+ rc = tpm2_load_space(chip);
+ if (rc) {
+ tpm2_flush_space(chip);
+ return rc;
+ }
+
+ rc = tpm2_map_command(chip, cc, cmd);
+ if (rc) {
+ tpm2_flush_space(chip);
+ return rc;
+ }
+
+ chip->last_cc = cc;
+ return 0;
+}
+
+static bool tpm2_add_session(struct tpm_chip *chip, u32 handle)
+{
+ struct tpm_space *space = &chip->work_space;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++)
+ if (space->session_tbl[i] == 0)
+ break;
+
+ if (i == ARRAY_SIZE(space->session_tbl))
+ return false;
+
+ space->session_tbl[i] = handle;
+ return true;
+}
+
+static u32 tpm2_map_to_vhandle(struct tpm_space *space, u32 phandle, bool alloc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) {
+ if (alloc) {
+ if (!space->context_tbl[i]) {
+ space->context_tbl[i] = phandle;
+ break;
+ }
+ } else if (space->context_tbl[i] == phandle)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(space->context_tbl))
+ return 0;
+
+ return TPM2_HT_TRANSIENT | (0xFFFFFF - i);
+}
+
+static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
+ size_t len)
+{
+ struct tpm_space *space = &chip->work_space;
+ struct tpm_header *header = (struct tpm_header *)rsp;
+ u32 phandle;
+ u32 phandle_type;
+ u32 vhandle;
+ u32 attrs;
+ int i;
+
+ if (be32_to_cpu(header->return_code) != TPM2_RC_SUCCESS)
+ return 0;
+
+ i = tpm2_find_cc(chip, cc);
+ /* sanity check, should never happen */
+ if (i < 0)
+ return -EFAULT;
+
+ attrs = chip->cc_attrs_tbl[i];
+ if (!((attrs >> TPM2_CC_ATTR_RHANDLE) & 1))
+ return 0;
+
+ phandle = be32_to_cpup((__be32 *)&rsp[TPM_HEADER_SIZE]);
+ phandle_type = phandle & 0xFF000000;
+
+ switch (phandle_type) {
+ case TPM2_HT_TRANSIENT:
+ vhandle = tpm2_map_to_vhandle(space, phandle, true);
+ if (!vhandle)
+ goto out_no_slots;
+
+ *(__be32 *)&rsp[TPM_HEADER_SIZE] = cpu_to_be32(vhandle);
+ break;
+ case TPM2_HT_HMAC_SESSION:
+ case TPM2_HT_POLICY_SESSION:
+ if (!tpm2_add_session(chip, phandle))
+ goto out_no_slots;
+ break;
+ default:
+ dev_err(&chip->dev, "%s: unknown handle 0x%08X\n",
+ __func__, phandle);
+ break;
+ }
+
+ return 0;
+out_no_slots:
+ tpm2_flush_context(chip, phandle);
+ dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__,
+ phandle);
+ return -ENOMEM;
+}
+
+struct tpm2_cap_handles {
+ u8 more_data;
+ __be32 capability;
+ __be32 count;
+ __be32 handles[];
+} __packed;
+
+static int tpm2_map_response_body(struct tpm_chip *chip, u32 cc, u8 *rsp,
+ size_t len)
+{
+ struct tpm_space *space = &chip->work_space;
+ struct tpm_header *header = (struct tpm_header *)rsp;
+ struct tpm2_cap_handles *data;
+ u32 phandle;
+ u32 phandle_type;
+ u32 vhandle;
+ int i;
+ int j;
+
+ if (cc != TPM2_CC_GET_CAPABILITY ||
+ be32_to_cpu(header->return_code) != TPM2_RC_SUCCESS) {
+ return 0;
+ }
+
+ if (len < TPM_HEADER_SIZE + 9)
+ return -EFAULT;
+
+ data = (void *)&rsp[TPM_HEADER_SIZE];
+ if (be32_to_cpu(data->capability) != TPM2_CAP_HANDLES)
+ return 0;
+
+ if (be32_to_cpu(data->count) > (UINT_MAX - TPM_HEADER_SIZE - 9) / 4)
+ return -EFAULT;
+
+ if (len != TPM_HEADER_SIZE + 9 + 4 * be32_to_cpu(data->count))
+ return -EFAULT;
+
+ for (i = 0, j = 0; i < be32_to_cpu(data->count); i++) {
+ phandle = be32_to_cpup((__be32 *)&data->handles[i]);
+ phandle_type = phandle & 0xFF000000;
+
+ switch (phandle_type) {
+ case TPM2_HT_TRANSIENT:
+ vhandle = tpm2_map_to_vhandle(space, phandle, false);
+ if (!vhandle)
+ break;
+
+ data->handles[j] = cpu_to_be32(vhandle);
+ j++;
+ break;
+
+ default:
+ data->handles[j] = cpu_to_be32(phandle);
+ j++;
+ break;
+ }
+
+ }
+
+ header->length = cpu_to_be32(TPM_HEADER_SIZE + 9 + 4 * j);
+ data->count = cpu_to_be32(j);
+ return 0;
+}
+
+static int tpm2_save_space(struct tpm_chip *chip)
+{
+ struct tpm_space *space = &chip->work_space;
+ unsigned int offset;
+ int i;
+ int rc;
+
+ for (i = 0, offset = 0; i < ARRAY_SIZE(space->context_tbl); i++) {
+ if (!(space->context_tbl[i] && ~space->context_tbl[i]))
+ continue;
+
+ rc = tpm2_save_context(chip, space->context_tbl[i],
+ space->context_buf, space->buf_size,
+ &offset);
+ if (rc == -ENOENT) {
+ space->context_tbl[i] = 0;
+ continue;
+ } else if (rc)
+ return rc;
+
+ tpm2_flush_context(chip, space->context_tbl[i]);
+ space->context_tbl[i] = ~0;
+ }
+
+ for (i = 0, offset = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
+ if (!space->session_tbl[i])
+ continue;
+
+ rc = tpm2_save_context(chip, space->session_tbl[i],
+ space->session_buf, space->buf_size,
+ &offset);
+ if (rc == -ENOENT) {
+ /* handle error saving session, just forget it */
+ space->session_tbl[i] = 0;
+ } else if (rc < 0) {
+ tpm2_flush_space(chip);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
+ void *buf, size_t *bufsiz)
+{
+ struct tpm_header *header = buf;
+ int rc;
+
+ if (!space)
+ return 0;
+
+ rc = tpm2_map_response_header(chip, chip->last_cc, buf, *bufsiz);
+ if (rc) {
+ tpm2_flush_space(chip);
+ goto out;
+ }
+
+ rc = tpm2_map_response_body(chip, chip->last_cc, buf, *bufsiz);
+ if (rc) {
+ tpm2_flush_space(chip);
+ goto out;
+ }
+
+ rc = tpm2_save_space(chip);
+ if (rc) {
+ tpm2_flush_space(chip);
+ goto out;
+ }
+
+ *bufsiz = be32_to_cpu(header->length);
+
+ memcpy(&space->context_tbl, &chip->work_space.context_tbl,
+ sizeof(space->context_tbl));
+ memcpy(&space->session_tbl, &chip->work_space.session_tbl,
+ sizeof(space->session_tbl));
+ memcpy(space->context_buf, chip->work_space.context_buf,
+ space->buf_size);
+ memcpy(space->session_buf, chip->work_space.session_buf,
+ space->buf_size);
+
+ return 0;
+out:
+ dev_err(&chip->dev, "%s: error %d\n", __func__, rc);
+ return rc;
+}
+
+/*
+ * Put the reference to the main device.
+ */
+static void tpm_devs_release(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs);
+
+ /* release the master device reference */
+ put_device(&chip->dev);
+}
+
+/*
+ * Remove the device file for exposed TPM spaces and release the device
+ * reference. This may also release the reference to the master device.
+ */
+void tpm_devs_remove(struct tpm_chip *chip)
+{
+ cdev_device_del(&chip->cdevs, &chip->devs);
+ put_device(&chip->devs);
+}
+
+/*
+ * Add a device file to expose TPM spaces. Also take a reference to the
+ * main device.
+ */
+int tpm_devs_add(struct tpm_chip *chip)
+{
+ int rc;
+
+ device_initialize(&chip->devs);
+ chip->devs.parent = chip->dev.parent;
+ chip->devs.class = &tpmrm_class;
+
+ /*
+ * Get extra reference on main device to hold on behalf of devs.
+ * This holds the chip structure while cdevs is in use. The
+ * corresponding put is in the tpm_devs_release.
+ */
+ get_device(&chip->dev);
+ chip->devs.release = tpm_devs_release;
+ chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES);
+ cdev_init(&chip->cdevs, &tpmrm_fops);
+ chip->cdevs.owner = THIS_MODULE;
+
+ rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num);
+ if (rc)
+ goto err_put_devs;
+
+ rc = cdev_device_add(&chip->cdevs, &chip->devs);
+ if (rc) {
+ dev_err(&chip->devs,
+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
+ dev_name(&chip->devs), MAJOR(chip->devs.devt),
+ MINOR(chip->devs.devt), rc);
+ goto err_put_devs;
+ }
+
+ return 0;
+
+err_put_devs:
+ put_device(&chip->devs);
+
+ return rc;
+}
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
new file mode 100644
index 0000000000..54a6750a67
--- /dev/null
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ */
+
+#include "tpm.h"
+#include "tpm_atmel.h"
+
+/* write status bits */
+enum tpm_atmel_write_status {
+ ATML_STATUS_ABORT = 0x01,
+ ATML_STATUS_LASTBYTE = 0x04
+};
+/* read status bits */
+enum tpm_atmel_read_status {
+ ATML_STATUS_BUSY = 0x01,
+ ATML_STATUS_DATA_AVAIL = 0x02,
+ ATML_STATUS_REWRITE = 0x04,
+ ATML_STATUS_READY = 0x08
+};
+
+static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+ u8 status, *hdr = buf;
+ u32 size;
+ int i;
+ __be32 *native_size;
+
+ /* start reading header */
+ if (count < 6)
+ return -EIO;
+
+ for (i = 0; i < 6; i++) {
+ status = ioread8(priv->iobase + 1);
+ if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+ dev_err(&chip->dev, "error reading header\n");
+ return -EIO;
+ }
+ *buf++ = ioread8(priv->iobase);
+ }
+
+ /* size of the data received */
+ native_size = (__force __be32 *) (hdr + 2);
+ size = be32_to_cpu(*native_size);
+
+ if (count < size) {
+ dev_err(&chip->dev,
+ "Recv size(%d) less than available space\n", size);
+ for (; i < size; i++) { /* clear the waiting data anyway */
+ status = ioread8(priv->iobase + 1);
+ if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+ dev_err(&chip->dev, "error reading data\n");
+ return -EIO;
+ }
+ }
+ return -EIO;
+ }
+
+ /* read all the data available */
+ for (; i < size; i++) {
+ status = ioread8(priv->iobase + 1);
+ if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+ dev_err(&chip->dev, "error reading data\n");
+ return -EIO;
+ }
+ *buf++ = ioread8(priv->iobase);
+ }
+
+ /* make sure data available is gone */
+ status = ioread8(priv->iobase + 1);
+
+ if (status & ATML_STATUS_DATA_AVAIL) {
+ dev_err(&chip->dev, "data available is stuck\n");
+ return -EIO;
+ }
+
+ return size;
+}
+
+static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+ int i;
+
+ dev_dbg(&chip->dev, "tpm_atml_send:\n");
+ for (i = 0; i < count; i++) {
+ dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
+ iowrite8(buf[i], priv->iobase);
+ }
+
+ return 0;
+}
+
+static void tpm_atml_cancel(struct tpm_chip *chip)
+{
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+
+ iowrite8(ATML_STATUS_ABORT, priv->iobase + 1);
+}
+
+static u8 tpm_atml_status(struct tpm_chip *chip)
+{
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return ioread8(priv->iobase + 1);
+}
+
+static bool tpm_atml_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == ATML_STATUS_READY);
+}
+
+static const struct tpm_class_ops tpm_atmel = {
+ .recv = tpm_atml_recv,
+ .send = tpm_atml_send,
+ .cancel = tpm_atml_cancel,
+ .status = tpm_atml_status,
+ .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
+ .req_complete_val = ATML_STATUS_DATA_AVAIL,
+ .req_canceled = tpm_atml_req_canceled,
+};
+
+static struct platform_device *pdev;
+
+static void atml_plat_remove(void)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+
+ tpm_chip_unregister(chip);
+ if (priv->have_region)
+ atmel_release_region(priv->base, priv->region_size);
+ atmel_put_base_addr(priv->iobase);
+ platform_device_unregister(pdev);
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume);
+
+static struct platform_driver atml_drv = {
+ .driver = {
+ .name = "tpm_atmel",
+ .pm = &tpm_atml_pm,
+ },
+};
+
+static int __init init_atmel(void)
+{
+ int rc = 0;
+ void __iomem *iobase = NULL;
+ int have_region, region_size;
+ unsigned long base;
+ struct tpm_chip *chip;
+ struct tpm_atmel_priv *priv;
+
+ rc = platform_driver_register(&atml_drv);
+ if (rc)
+ return rc;
+
+ if ((iobase = atmel_get_base_addr(&base, &region_size)) == NULL) {
+ rc = -ENODEV;
+ goto err_unreg_drv;
+ }
+
+ have_region =
+ (atmel_request_region
+ (base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
+
+ pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ rc = PTR_ERR(pdev);
+ goto err_rel_reg;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto err_unreg_dev;
+ }
+
+ priv->iobase = iobase;
+ priv->base = base;
+ priv->have_region = have_region;
+ priv->region_size = region_size;
+
+ chip = tpmm_chip_alloc(&pdev->dev, &tpm_atmel);
+ if (IS_ERR(chip)) {
+ rc = PTR_ERR(chip);
+ goto err_unreg_dev;
+ }
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto err_unreg_dev;
+
+ return 0;
+
+err_unreg_dev:
+ platform_device_unregister(pdev);
+err_rel_reg:
+ atmel_put_base_addr(iobase);
+ if (have_region)
+ atmel_release_region(base,
+ region_size);
+err_unreg_drv:
+ platform_driver_unregister(&atml_drv);
+ return rc;
+}
+
+static void __exit cleanup_atmel(void)
+{
+ platform_driver_unregister(&atml_drv);
+ atml_plat_remove();
+}
+
+module_init(init_atmel);
+module_exit(cleanup_atmel);
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
new file mode 100644
index 0000000000..7ac3f69dcf
--- /dev/null
+++ b/drivers/char/tpm/tpm_atmel.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * These difference are required on power because the device must be
+ * discovered through the device tree and iomap must be used to get
+ * around the need for holes in the io_page_mask. This does not happen
+ * automatically because the tpm is not a normal pci device and lives
+ * under the root node.
+ */
+
+struct tpm_atmel_priv {
+ int region_size;
+ int have_region;
+ unsigned long base;
+ void __iomem *iobase;
+};
+
+#ifdef CONFIG_PPC64
+
+#include <linux/of.h>
+
+#define atmel_getb(priv, offset) readb(priv->iobase + offset)
+#define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset)
+#define atmel_request_region request_mem_region
+#define atmel_release_region release_mem_region
+
+static inline void atmel_put_base_addr(void __iomem *iobase)
+{
+ iounmap(iobase);
+}
+
+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
+{
+ struct device_node *dn;
+ unsigned long address, size;
+ const unsigned int *reg;
+ int reglen;
+ int naddrc;
+ int nsizec;
+
+ dn = of_find_node_by_name(NULL, "tpm");
+
+ if (!dn)
+ return NULL;
+
+ if (!of_device_is_compatible(dn, "AT97SC3201")) {
+ of_node_put(dn);
+ return NULL;
+ }
+
+ reg = of_get_property(dn, "reg", &reglen);
+ naddrc = of_n_addr_cells(dn);
+ nsizec = of_n_size_cells(dn);
+
+ of_node_put(dn);
+
+
+ if (naddrc == 2)
+ address = ((unsigned long) reg[0] << 32) | reg[1];
+ else
+ address = reg[0];
+
+ if (nsizec == 2)
+ size =
+ ((unsigned long) reg[naddrc] << 32) | reg[naddrc + 1];
+ else
+ size = reg[naddrc];
+
+ *base = address;
+ *region_size = size;
+ return ioremap(*base, *region_size);
+}
+#else
+#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + offset)
+#define atmel_putb(val, chip, offset) \
+ outb(val, atmel_get_priv(chip)->base + offset)
+#define atmel_request_region request_region
+#define atmel_release_region release_region
+/* Atmel definitions */
+enum tpm_atmel_addr {
+ TPM_ATMEL_BASE_ADDR_LO = 0x08,
+ TPM_ATMEL_BASE_ADDR_HI = 0x09
+};
+
+static inline int tpm_read_index(int base, int index)
+{
+ outb(index, base);
+ return inb(base+1) & 0xFF;
+}
+
+/* Verify this is a 1.1 Atmel TPM */
+static int atmel_verify_tpm11(void)
+{
+
+ /* verify that it is an Atmel part */
+ if (tpm_read_index(TPM_ADDR, 4) != 'A' ||
+ tpm_read_index(TPM_ADDR, 5) != 'T' ||
+ tpm_read_index(TPM_ADDR, 6) != 'M' ||
+ tpm_read_index(TPM_ADDR, 7) != 'L')
+ return 1;
+
+ /* query chip for its version number */
+ if (tpm_read_index(TPM_ADDR, 0x00) != 1 ||
+ tpm_read_index(TPM_ADDR, 0x01) != 1)
+ return 1;
+
+ /* This is an atmel supported part */
+ return 0;
+}
+
+static inline void atmel_put_base_addr(void __iomem *iobase)
+{
+}
+
+/* Determine where to talk to device */
+static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
+{
+ int lo, hi;
+
+ if (atmel_verify_tpm11() != 0)
+ return NULL;
+
+ lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
+ hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
+
+ *base = (hi << 8) | lo;
+ *region_size = 2;
+
+ return ioport_map(*base, *region_size);
+}
+#endif
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
new file mode 100644
index 0000000000..ea085b14ab
--- /dev/null
+++ b/drivers/char/tpm/tpm_crb.c
@@ -0,0 +1,858 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * Authors:
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG CRB 2.0 TPM specification.
+ */
+
+#include <linux/acpi.h>
+#include <linux/highmem.h>
+#include <linux/rculist.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#ifdef CONFIG_ARM64
+#include <linux/arm-smccc.h>
+#endif
+#include "tpm.h"
+
+#define ACPI_SIG_TPM2 "TPM2"
+#define TPM_CRB_MAX_RESOURCES 3
+
+static const guid_t crb_acpi_start_guid =
+ GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
+ 0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4);
+
+enum crb_defaults {
+ CRB_ACPI_START_REVISION_ID = 1,
+ CRB_ACPI_START_INDEX = 1,
+};
+
+enum crb_loc_ctrl {
+ CRB_LOC_CTRL_REQUEST_ACCESS = BIT(0),
+ CRB_LOC_CTRL_RELINQUISH = BIT(1),
+};
+
+enum crb_loc_state {
+ CRB_LOC_STATE_LOC_ASSIGNED = BIT(1),
+ CRB_LOC_STATE_TPM_REG_VALID_STS = BIT(7),
+};
+
+enum crb_ctrl_req {
+ CRB_CTRL_REQ_CMD_READY = BIT(0),
+ CRB_CTRL_REQ_GO_IDLE = BIT(1),
+};
+
+enum crb_ctrl_sts {
+ CRB_CTRL_STS_ERROR = BIT(0),
+ CRB_CTRL_STS_TPM_IDLE = BIT(1),
+};
+
+enum crb_start {
+ CRB_START_INVOKE = BIT(0),
+};
+
+enum crb_cancel {
+ CRB_CANCEL_INVOKE = BIT(0),
+};
+
+struct crb_regs_head {
+ u32 loc_state;
+ u32 reserved1;
+ u32 loc_ctrl;
+ u32 loc_sts;
+ u8 reserved2[32];
+ u64 intf_id;
+ u64 ctrl_ext;
+} __packed;
+
+struct crb_regs_tail {
+ u32 ctrl_req;
+ u32 ctrl_sts;
+ u32 ctrl_cancel;
+ u32 ctrl_start;
+ u32 ctrl_int_enable;
+ u32 ctrl_int_sts;
+ u32 ctrl_cmd_size;
+ u32 ctrl_cmd_pa_low;
+ u32 ctrl_cmd_pa_high;
+ u32 ctrl_rsp_size;
+ u64 ctrl_rsp_pa;
+} __packed;
+
+enum crb_status {
+ CRB_DRV_STS_COMPLETE = BIT(0),
+};
+
+struct crb_priv {
+ u32 sm;
+ const char *hid;
+ struct crb_regs_head __iomem *regs_h;
+ struct crb_regs_tail __iomem *regs_t;
+ u8 __iomem *cmd;
+ u8 __iomem *rsp;
+ u32 cmd_size;
+ u32 smc_func_id;
+ u32 __iomem *pluton_start_addr;
+ u32 __iomem *pluton_reply_addr;
+};
+
+struct tpm2_crb_smc {
+ u32 interrupt;
+ u8 interrupt_flags;
+ u8 op_flags;
+ u16 reserved2;
+ u32 smc_func_id;
+};
+
+struct tpm2_crb_pluton {
+ u64 start_addr;
+ u64 reply_addr;
+};
+
+static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
+ unsigned long timeout)
+{
+ ktime_t start;
+ ktime_t stop;
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(timeout));
+
+ do {
+ if ((ioread32(reg) & mask) == value)
+ return true;
+
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), stop));
+
+ return ((ioread32(reg) & mask) == value);
+}
+
+static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete)
+{
+ if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
+ return 0;
+
+ if (!crb_wait_for_reg_32(priv->pluton_reply_addr, ~0, 1, TPM2_TIMEOUT_C))
+ return -ETIME;
+
+ iowrite32(1, priv->pluton_start_addr);
+ if (wait_for_complete == false)
+ return 0;
+
+ if (!crb_wait_for_reg_32(priv->pluton_start_addr,
+ 0xffffffff, 0, 200))
+ return -ETIME;
+
+ return 0;
+}
+
+/**
+ * __crb_go_idle - request tpm crb device to go the idle state
+ *
+ * @dev: crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
+ * The device should respond within TIMEOUT_C by clearing the bit.
+ * Anyhow, we do not wait here as a consequent CMD_READY request
+ * will be handled correctly even if idle was not completed.
+ *
+ * The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
+ *
+ * Return: 0 always
+ */
+static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
+{
+ int rc;
+
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
+
+ rc = crb_try_pluton_doorbell(priv, true);
+ if (rc)
+ return rc;
+
+ if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
+ CRB_CTRL_REQ_GO_IDLE/* mask */,
+ 0, /* value */
+ TPM2_TIMEOUT_C)) {
+ dev_warn(dev, "goIdle timed out\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int crb_go_idle(struct tpm_chip *chip)
+{
+ struct device *dev = &chip->dev;
+ struct crb_priv *priv = dev_get_drvdata(dev);
+
+ return __crb_go_idle(dev, priv);
+}
+
+/**
+ * __crb_cmd_ready - request tpm crb device to enter ready state
+ *
+ * @dev: crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
+ * and poll till the device acknowledge it by clearing the bit.
+ * The device should respond within TIMEOUT_C.
+ *
+ * The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
+ *
+ * Return: 0 on success -ETIME on timeout;
+ */
+static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+{
+ int rc;
+
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ return 0;
+
+ iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
+
+ rc = crb_try_pluton_doorbell(priv, true);
+ if (rc)
+ return rc;
+
+ if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
+ CRB_CTRL_REQ_CMD_READY /* mask */,
+ 0, /* value */
+ TPM2_TIMEOUT_C)) {
+ dev_warn(dev, "cmdReady timed out\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int crb_cmd_ready(struct tpm_chip *chip)
+{
+ struct device *dev = &chip->dev;
+ struct crb_priv *priv = dev_get_drvdata(dev);
+
+ return __crb_cmd_ready(dev, priv);
+}
+
+static int __crb_request_locality(struct device *dev,
+ struct crb_priv *priv, int loc)
+{
+ u32 value = CRB_LOC_STATE_LOC_ASSIGNED |
+ CRB_LOC_STATE_TPM_REG_VALID_STS;
+
+ if (!priv->regs_h)
+ return 0;
+
+ iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl);
+ if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value,
+ TPM2_TIMEOUT_C)) {
+ dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int crb_request_locality(struct tpm_chip *chip, int loc)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return __crb_request_locality(&chip->dev, priv, loc);
+}
+
+static int __crb_relinquish_locality(struct device *dev,
+ struct crb_priv *priv, int loc)
+{
+ u32 mask = CRB_LOC_STATE_LOC_ASSIGNED |
+ CRB_LOC_STATE_TPM_REG_VALID_STS;
+ u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS;
+
+ if (!priv->regs_h)
+ return 0;
+
+ iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
+ if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
+ TPM2_TIMEOUT_C)) {
+ dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int crb_relinquish_locality(struct tpm_chip *chip, int loc)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return __crb_relinquish_locality(&chip->dev, priv, loc);
+}
+
+static u8 crb_status(struct tpm_chip *chip)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ u8 sts = 0;
+
+ if ((ioread32(&priv->regs_t->ctrl_start) & CRB_START_INVOKE) !=
+ CRB_START_INVOKE)
+ sts |= CRB_DRV_STS_COMPLETE;
+
+ return sts;
+}
+
+static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ unsigned int expected;
+
+ /* A sanity check that the upper layer wants to get at least the header
+ * as that is the minimum size for any TPM response.
+ */
+ if (count < TPM_HEADER_SIZE)
+ return -EIO;
+
+ /* If this bit is set, according to the spec, the TPM is in
+ * unrecoverable condition.
+ */
+ if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR)
+ return -EIO;
+
+ /* Read the first 8 bytes in order to get the length of the response.
+ * We read exactly a quad word in order to make sure that the remaining
+ * reads will be aligned.
+ */
+ memcpy_fromio(buf, priv->rsp, 8);
+
+ expected = be32_to_cpup((__be32 *)&buf[2]);
+ if (expected > count || expected < TPM_HEADER_SIZE)
+ return -EIO;
+
+ memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8);
+
+ return expected;
+}
+
+static int crb_do_acpi_start(struct tpm_chip *chip)
+{
+ union acpi_object *obj;
+ int rc;
+
+ obj = acpi_evaluate_dsm(chip->acpi_dev_handle,
+ &crb_acpi_start_guid,
+ CRB_ACPI_START_REVISION_ID,
+ CRB_ACPI_START_INDEX,
+ NULL);
+ if (!obj)
+ return -ENXIO;
+ rc = obj->integer.value == 0 ? 0 : -ENXIO;
+ ACPI_FREE(obj);
+ return rc;
+}
+
+#ifdef CONFIG_ARM64
+/*
+ * This is a TPM Command Response Buffer start method that invokes a
+ * Secure Monitor Call to requrest the firmware to execute or cancel
+ * a TPM 2.0 command.
+ */
+static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != 0) {
+ dev_err(dev,
+ FW_BUG "tpm_crb_smc_start() returns res.a0 = 0x%lx\n",
+ res.a0);
+ return -EIO;
+ }
+
+ return 0;
+}
+#else
+static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
+{
+ dev_err(dev, FW_BUG "tpm_crb: incorrect start method\n");
+ return -EINVAL;
+}
+#endif
+
+static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+
+ /* Zero the cancel register so that the next command will not get
+ * canceled.
+ */
+ iowrite32(0, &priv->regs_t->ctrl_cancel);
+
+ if (len > priv->cmd_size) {
+ dev_err(&chip->dev, "invalid command count value %zd %d\n",
+ len, priv->cmd_size);
+ return -E2BIG;
+ }
+
+ /* Seems to be necessary for every command */
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
+ __crb_cmd_ready(&chip->dev, priv);
+
+ memcpy_toio(priv->cmd, buf, len);
+
+ /* Make sure that cmd is populated before issuing start. */
+ wmb();
+
+ /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
+ * report only ACPI start but in practice seems to require both
+ * CRB start, hence invoking CRB start method if hid == MSFT0101.
+ */
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
+ (!strcmp(priv->hid, "MSFT0101")))
+ iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
+
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
+ rc = crb_do_acpi_start(chip);
+
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
+ iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
+ rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
+ }
+
+ if (rc)
+ return rc;
+
+ return crb_try_pluton_doorbell(priv, false);
+}
+
+static void crb_cancel(struct tpm_chip *chip)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+ iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
+
+ if (((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
+ crb_do_acpi_start(chip))
+ dev_err(&chip->dev, "ACPI Start failed\n");
+}
+
+static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ u32 cancel = ioread32(&priv->regs_t->ctrl_cancel);
+
+ return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
+}
+
+static const struct tpm_class_ops tpm_crb = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = crb_status,
+ .recv = crb_recv,
+ .send = crb_send,
+ .cancel = crb_cancel,
+ .req_canceled = crb_req_canceled,
+ .go_idle = crb_go_idle,
+ .cmd_ready = crb_cmd_ready,
+ .request_locality = crb_request_locality,
+ .relinquish_locality = crb_relinquish_locality,
+ .req_complete_mask = CRB_DRV_STS_COMPLETE,
+ .req_complete_val = CRB_DRV_STS_COMPLETE,
+};
+
+static int crb_check_resource(struct acpi_resource *ares, void *data)
+{
+ struct resource *iores_array = data;
+ struct resource_win win;
+ struct resource *res = &(win.res);
+ int i;
+
+ if (acpi_dev_resource_memory(ares, res) ||
+ acpi_dev_resource_address_space(ares, &win)) {
+ for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) {
+ if (resource_type(iores_array + i) != IORESOURCE_MEM) {
+ iores_array[i] = *res;
+ iores_array[i].name = NULL;
+ break;
+ }
+ }
+ }
+
+ return 1;
+}
+
+static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
+ void __iomem **iobase_ptr, u64 start, u32 size)
+{
+ struct resource new_res = {
+ .start = start,
+ .end = start + size - 1,
+ .flags = IORESOURCE_MEM,
+ };
+
+ /* Detect a 64 bit address on a 32 bit system */
+ if (start != new_res.start)
+ return IOMEM_ERR_PTR(-EINVAL);
+
+ if (!iores)
+ return devm_ioremap_resource(dev, &new_res);
+
+ if (!*iobase_ptr) {
+ *iobase_ptr = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(*iobase_ptr))
+ return *iobase_ptr;
+ }
+
+ return *iobase_ptr + (new_res.start - iores->start);
+}
+
+/*
+ * Work around broken BIOSs that return inconsistent values from the ACPI
+ * region vs the registers. Trust the ACPI region. Such broken systems
+ * probably cannot send large TPM commands since the buffer will be truncated.
+ */
+static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
+ u64 start, u64 size)
+{
+ if (io_res->start > start || io_res->end < start)
+ return size;
+
+ if (start + size - 1 <= io_res->end)
+ return size;
+
+ dev_err(dev,
+ FW_BUG "ACPI region does not cover the entire command/response buffer. %pr vs %llx %llx\n",
+ io_res, start, size);
+
+ return io_res->end - start + 1;
+}
+
+static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ struct acpi_table_tpm2 *buf)
+{
+ struct list_head acpi_resource_list;
+ struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} };
+ void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL};
+ struct device *dev = &device->dev;
+ struct resource *iores;
+ void __iomem **iobase_ptr;
+ int i;
+ u32 pa_high, pa_low;
+ u64 cmd_pa;
+ u32 cmd_size;
+ __le64 __rsp_pa;
+ u64 rsp_pa;
+ u32 rsp_size;
+ int ret;
+
+ /*
+ * Pluton sometimes does not define ACPI memory regions.
+ * Mapping is then done in crb_map_pluton
+ */
+ if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
+ INIT_LIST_HEAD(&acpi_resource_list);
+ ret = acpi_dev_get_resources(device, &acpi_resource_list,
+ crb_check_resource, iores_array);
+ if (ret < 0)
+ return ret;
+ acpi_dev_free_resource_list(&acpi_resource_list);
+
+ if (resource_type(iores_array) != IORESOURCE_MEM) {
+ dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ return -EINVAL;
+ } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
+ IORESOURCE_MEM) {
+ dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
+ memset(iores_array + TPM_CRB_MAX_RESOURCES,
+ 0, sizeof(*iores_array));
+ iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
+ }
+ }
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
+ if (buf->control_address >= iores_array[i].start &&
+ buf->control_address + sizeof(struct crb_regs_tail) - 1 <=
+ iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address,
+ sizeof(struct crb_regs_tail));
+
+ if (IS_ERR(priv->regs_t))
+ return PTR_ERR(priv->regs_t);
+
+ /* The ACPI IO region starts at the head area and continues to include
+ * the control area, as one nice sane region except for some older
+ * stuff that puts the control area outside the ACPI IO region.
+ */
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
+ if (iores &&
+ buf->control_address == iores->start +
+ sizeof(*priv->regs_h))
+ priv->regs_h = *iobase_ptr;
+ else
+ dev_warn(dev, FW_BUG "Bad ACPI memory layout");
+ }
+
+ ret = __crb_request_locality(dev, priv, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * PTT HW bug w/a: wake up the device to access
+ * possibly not retained registers.
+ */
+ ret = __crb_cmd_ready(dev, priv);
+ if (ret)
+ goto out_relinquish_locality;
+
+ pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
+ pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
+ cmd_pa = ((u64)pa_high << 32) | pa_low;
+ cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size);
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; iores_array[i].end; ++i) {
+ if (cmd_pa >= iores_array[i].start &&
+ cmd_pa <= iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ if (iores)
+ cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size);
+
+ dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
+ pa_high, pa_low, cmd_size);
+
+ priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size);
+ if (IS_ERR(priv->cmd)) {
+ ret = PTR_ERR(priv->cmd);
+ goto out;
+ }
+
+ memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
+ rsp_pa = le64_to_cpu(__rsp_pa);
+ rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
+
+ iores = NULL;
+ iobase_ptr = NULL;
+ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
+ if (rsp_pa >= iores_array[i].start &&
+ rsp_pa <= iores_array[i].end) {
+ iores = iores_array + i;
+ iobase_ptr = iobase_array + i;
+ break;
+ }
+ }
+
+ if (iores)
+ rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size);
+
+ if (cmd_pa != rsp_pa) {
+ priv->rsp = crb_map_res(dev, iores, iobase_ptr,
+ rsp_pa, rsp_size);
+ ret = PTR_ERR_OR_ZERO(priv->rsp);
+ goto out;
+ }
+
+ /* According to the PTP specification, overlapping command and response
+ * buffer sizes must be identical.
+ */
+ if (cmd_size != rsp_size) {
+ dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->rsp = priv->cmd;
+
+out:
+ if (!ret)
+ priv->cmd_size = cmd_size;
+
+ __crb_go_idle(dev, priv);
+
+out_relinquish_locality:
+
+ __crb_relinquish_locality(dev, priv, 0);
+
+ return ret;
+}
+
+static int crb_map_pluton(struct device *dev, struct crb_priv *priv,
+ struct acpi_table_tpm2 *buf, struct tpm2_crb_pluton *crb_pluton)
+{
+ priv->pluton_start_addr = crb_map_res(dev, NULL, NULL,
+ crb_pluton->start_addr, 4);
+ if (IS_ERR(priv->pluton_start_addr))
+ return PTR_ERR(priv->pluton_start_addr);
+
+ priv->pluton_reply_addr = crb_map_res(dev, NULL, NULL,
+ crb_pluton->reply_addr, 4);
+ if (IS_ERR(priv->pluton_reply_addr))
+ return PTR_ERR(priv->pluton_reply_addr);
+
+ return 0;
+}
+
+static int crb_acpi_add(struct acpi_device *device)
+{
+ struct acpi_table_tpm2 *buf;
+ struct crb_priv *priv;
+ struct tpm_chip *chip;
+ struct device *dev = &device->dev;
+ struct tpm2_crb_smc *crb_smc;
+ struct tpm2_crb_pluton *crb_pluton;
+ acpi_status status;
+ u32 sm;
+ int rc;
+
+ status = acpi_get_table(ACPI_SIG_TPM2, 1,
+ (struct acpi_table_header **) &buf);
+ if (ACPI_FAILURE(status) || buf->header.length < sizeof(*buf)) {
+ dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
+ return -EINVAL;
+ }
+
+ /* Should the FIFO driver handle this? */
+ sm = buf->start_method;
+ if (sm == ACPI_TPM2_MEMORY_MAPPED) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ buf->header.length,
+ ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
+ rc = -EINVAL;
+ goto out;
+ }
+ crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
+ priv->smc_func_id = crb_smc->smc_func_id;
+ }
+
+ if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ buf->header.length,
+ ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
+ rc = -EINVAL;
+ goto out;
+ }
+ crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
+ rc = crb_map_pluton(dev, priv, buf, crb_pluton);
+ if (rc)
+ goto out;
+ }
+
+ priv->sm = sm;
+ priv->hid = acpi_device_hid(device);
+
+ rc = crb_map_io(device, priv, buf);
+ if (rc)
+ goto out;
+
+ chip = tpmm_chip_alloc(dev, &tpm_crb);
+ if (IS_ERR(chip)) {
+ rc = PTR_ERR(chip);
+ goto out;
+ }
+
+ dev_set_drvdata(&chip->dev, priv);
+ chip->acpi_dev_handle = device->handle;
+ chip->flags = TPM_CHIP_FLAG_TPM2;
+
+ rc = tpm_chip_bootstrap(chip);
+ if (rc)
+ goto out;
+
+#ifdef CONFIG_X86
+ /* A quirk for https://www.amd.com/en/support/kb/faq/pa-410 */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
+ dev_info(dev, "Disabling hwrng\n");
+ chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
+ }
+#endif /* CONFIG_X86 */
+
+ rc = tpm_chip_register(chip);
+
+out:
+ acpi_put_table((struct acpi_table_header *)buf);
+ return rc;
+}
+
+static void crb_acpi_remove(struct acpi_device *device)
+{
+ struct device *dev = &device->dev;
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ tpm_chip_unregister(chip);
+}
+
+static const struct dev_pm_ops crb_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
+};
+
+static const struct acpi_device_id crb_device_ids[] = {
+ {"MSFT0101", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, crb_device_ids);
+
+static struct acpi_driver crb_acpi_driver = {
+ .name = "tpm_crb",
+ .ids = crb_device_ids,
+ .ops = {
+ .add = crb_acpi_add,
+ .remove = crb_acpi_remove,
+ },
+ .drv = {
+ .pm = &crb_pm,
+ },
+};
+
+module_acpi_driver(crb_acpi_driver);
+MODULE_AUTHOR("Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>");
+MODULE_DESCRIPTION("TPM2 Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
new file mode 100644
index 0000000000..76adb10807
--- /dev/null
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Microsoft Corporation
+ *
+ * Implements a firmware TPM as described here:
+ * https://www.microsoft.com/en-us/research/publication/ftpm-software-implementation-tpm-chip/
+ *
+ * A reference implementation is available here:
+ * https://github.com/microsoft/ms-tpm-20-ref/tree/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM
+ */
+
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/tee_drv.h>
+#include <linux/tpm.h>
+#include <linux/uuid.h>
+
+#include "tpm.h"
+#include "tpm_ftpm_tee.h"
+
+/*
+ * TA_FTPM_UUID: BC50D971-D4C9-42C4-82CB-343FB7F37896
+ *
+ * Randomly generated, and must correspond to the GUID on the TA side.
+ * Defined here in the reference implementation:
+ * https://github.com/microsoft/ms-tpm-20-ref/blob/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM/include/fTPM.h#L42
+ */
+static const uuid_t ftpm_ta_uuid =
+ UUID_INIT(0xBC50D971, 0xD4C9, 0x42C4,
+ 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
+
+/**
+ * ftpm_tee_tpm_op_recv() - retrieve fTPM response.
+ * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h.
+ * @buf: the buffer to store data.
+ * @count: the number of bytes to read.
+ *
+ * Return:
+ * In case of success the number of bytes received.
+ * On failure, -errno.
+ */
+static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
+ size_t len;
+
+ len = pvt_data->resp_len;
+ if (count < len) {
+ dev_err(&chip->dev,
+ "%s: Invalid size in recv: count=%zd, resp_len=%zd\n",
+ __func__, count, len);
+ return -EIO;
+ }
+
+ memcpy(buf, pvt_data->resp_buf, len);
+ pvt_data->resp_len = 0;
+
+ return len;
+}
+
+/**
+ * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory.
+ * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h
+ * @buf: the buffer to send.
+ * @len: the number of bytes to send.
+ *
+ * Return:
+ * In case of success, returns 0.
+ * On failure, -errno
+ */
+static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
+ size_t resp_len;
+ int rc;
+ u8 *temp_buf;
+ struct tpm_header *resp_header;
+ struct tee_ioctl_invoke_arg transceive_args;
+ struct tee_param command_params[4];
+ struct tee_shm *shm = pvt_data->shm;
+
+ if (len > MAX_COMMAND_SIZE) {
+ dev_err(&chip->dev,
+ "%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n",
+ __func__, len);
+ return -EIO;
+ }
+
+ memset(&transceive_args, 0, sizeof(transceive_args));
+ memset(command_params, 0, sizeof(command_params));
+ pvt_data->resp_len = 0;
+
+ /* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */
+ transceive_args = (struct tee_ioctl_invoke_arg) {
+ .func = FTPM_OPTEE_TA_SUBMIT_COMMAND,
+ .session = pvt_data->session,
+ .num_params = 4,
+ };
+
+ /* Fill FTPM_OPTEE_TA_SUBMIT_COMMAND parameters */
+ command_params[0] = (struct tee_param) {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT,
+ .u.memref = {
+ .shm = shm,
+ .size = len,
+ .shm_offs = 0,
+ },
+ };
+
+ temp_buf = tee_shm_get_va(shm, 0);
+ if (IS_ERR(temp_buf)) {
+ dev_err(&chip->dev, "%s: tee_shm_get_va failed for transmit\n",
+ __func__);
+ return PTR_ERR(temp_buf);
+ }
+ memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE));
+ memcpy(temp_buf, buf, len);
+
+ command_params[1] = (struct tee_param) {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
+ .u.memref = {
+ .shm = shm,
+ .size = MAX_RESPONSE_SIZE,
+ .shm_offs = MAX_COMMAND_SIZE,
+ },
+ };
+
+ rc = tee_client_invoke_func(pvt_data->ctx, &transceive_args,
+ command_params);
+ if ((rc < 0) || (transceive_args.ret != 0)) {
+ dev_err(&chip->dev, "%s: SUBMIT_COMMAND invoke error: 0x%x\n",
+ __func__, transceive_args.ret);
+ return (rc < 0) ? rc : transceive_args.ret;
+ }
+
+ temp_buf = tee_shm_get_va(shm, command_params[1].u.memref.shm_offs);
+ if (IS_ERR(temp_buf)) {
+ dev_err(&chip->dev, "%s: tee_shm_get_va failed for receive\n",
+ __func__);
+ return PTR_ERR(temp_buf);
+ }
+
+ resp_header = (struct tpm_header *)temp_buf;
+ resp_len = be32_to_cpu(resp_header->length);
+
+ /* sanity check resp_len */
+ if (resp_len < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "%s: tpm response header too small\n",
+ __func__);
+ return -EIO;
+ }
+ if (resp_len > MAX_RESPONSE_SIZE) {
+ dev_err(&chip->dev,
+ "%s: resp_len=%zd exceeds MAX_RESPONSE_SIZE\n",
+ __func__, resp_len);
+ return -EIO;
+ }
+
+ /* sanity checks look good, cache the response */
+ memcpy(pvt_data->resp_buf, temp_buf, resp_len);
+ pvt_data->resp_len = resp_len;
+
+ return 0;
+}
+
+static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip)
+{
+ /* not supported */
+}
+
+static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip)
+{
+ return 0;
+}
+
+static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return false;
+}
+
+static const struct tpm_class_ops ftpm_tee_tpm_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = ftpm_tee_tpm_op_recv,
+ .send = ftpm_tee_tpm_op_send,
+ .cancel = ftpm_tee_tpm_op_cancel,
+ .status = ftpm_tee_tpm_op_status,
+ .req_complete_mask = 0,
+ .req_complete_val = 0,
+ .req_canceled = ftpm_tee_tpm_req_canceled,
+};
+
+/*
+ * Check whether this driver supports the fTPM TA in the TEE instance
+ * represented by the params (ver/data) to this function.
+ */
+static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ /*
+ * Currently this driver only support GP Complaint OPTEE based fTPM TA
+ */
+ if ((ver->impl_id == TEE_IMPL_ID_OPTEE) &&
+ (ver->gen_caps & TEE_GEN_CAP_GP))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * ftpm_tee_probe() - initialize the fTPM
+ * @pdev: the platform_device description.
+ *
+ * Return:
+ * On success, 0. On failure, -errno.
+ */
+static int ftpm_tee_probe(struct device *dev)
+{
+ int rc;
+ struct tpm_chip *chip;
+ struct ftpm_tee_private *pvt_data = NULL;
+ struct tee_ioctl_open_session_arg sess_arg;
+
+ pvt_data = devm_kzalloc(dev, sizeof(struct ftpm_tee_private),
+ GFP_KERNEL);
+ if (!pvt_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, pvt_data);
+
+ /* Open context with TEE driver */
+ pvt_data->ctx = tee_client_open_context(NULL, ftpm_tee_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data->ctx)) {
+ if (PTR_ERR(pvt_data->ctx) == -ENOENT)
+ return -EPROBE_DEFER;
+ dev_err(dev, "%s: tee_client_open_context failed\n", __func__);
+ return PTR_ERR(pvt_data->ctx);
+ }
+
+ /* Open a session with fTPM TA */
+ memset(&sess_arg, 0, sizeof(sess_arg));
+ export_uuid(sess_arg.uuid, &ftpm_ta_uuid);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(pvt_data->ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != 0)) {
+ dev_err(dev, "%s: tee_client_open_session failed, err=%x\n",
+ __func__, sess_arg.ret);
+ rc = -EINVAL;
+ goto out_tee_session;
+ }
+ pvt_data->session = sess_arg.session;
+
+ /* Allocate dynamic shared memory with fTPM TA */
+ pvt_data->shm = tee_shm_alloc_kernel_buf(pvt_data->ctx,
+ MAX_COMMAND_SIZE +
+ MAX_RESPONSE_SIZE);
+ if (IS_ERR(pvt_data->shm)) {
+ dev_err(dev, "%s: tee_shm_alloc_kernel_buf failed\n", __func__);
+ rc = -ENOMEM;
+ goto out_shm_alloc;
+ }
+
+ /* Allocate new struct tpm_chip instance */
+ chip = tpm_chip_alloc(dev, &ftpm_tee_tpm_ops);
+ if (IS_ERR(chip)) {
+ dev_err(dev, "%s: tpm_chip_alloc failed\n", __func__);
+ rc = PTR_ERR(chip);
+ goto out_chip_alloc;
+ }
+
+ pvt_data->chip = chip;
+ pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ /* Create a character device for the fTPM */
+ rc = tpm_chip_register(pvt_data->chip);
+ if (rc) {
+ dev_err(dev, "%s: tpm_chip_register failed with rc=%d\n",
+ __func__, rc);
+ goto out_chip;
+ }
+
+ return 0;
+
+out_chip:
+ put_device(&pvt_data->chip->dev);
+out_chip_alloc:
+ tee_shm_free(pvt_data->shm);
+out_shm_alloc:
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+out_tee_session:
+ tee_client_close_context(pvt_data->ctx);
+
+ return rc;
+}
+
+static int ftpm_plat_tee_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return ftpm_tee_probe(dev);
+}
+
+/**
+ * ftpm_tee_remove() - remove the TPM device
+ * @pdev: the platform_device description.
+ *
+ * Return:
+ * 0 always.
+ */
+static int ftpm_tee_remove(struct device *dev)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(dev);
+
+ /* Release the chip */
+ tpm_chip_unregister(pvt_data->chip);
+
+ /* frees chip */
+ put_device(&pvt_data->chip->dev);
+
+ /* Free the shared memory pool */
+ tee_shm_free(pvt_data->shm);
+
+ /* close the existing session with fTPM TA*/
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+
+ /* close the context with TEE driver */
+ tee_client_close_context(pvt_data->ctx);
+
+ /* memory allocated with devm_kzalloc() is freed automatically */
+
+ return 0;
+}
+
+static void ftpm_plat_tee_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ ftpm_tee_remove(dev);
+}
+
+/**
+ * ftpm_tee_shutdown() - shutdown the TPM device
+ * @pdev: the platform_device description.
+ */
+static void ftpm_plat_tee_shutdown(struct platform_device *pdev)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
+
+ tee_shm_free(pvt_data->shm);
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+ tee_client_close_context(pvt_data->ctx);
+}
+
+static const struct of_device_id of_ftpm_tee_ids[] = {
+ { .compatible = "microsoft,ftpm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids);
+
+static struct platform_driver ftpm_tee_plat_driver = {
+ .driver = {
+ .name = "ftpm-tee",
+ .of_match_table = of_match_ptr(of_ftpm_tee_ids),
+ },
+ .shutdown = ftpm_plat_tee_shutdown,
+ .probe = ftpm_plat_tee_probe,
+ .remove_new = ftpm_plat_tee_remove,
+};
+
+/* UUID of the fTPM TA */
+static const struct tee_client_device_id optee_ftpm_id_table[] = {
+ {UUID_INIT(0xbc50d971, 0xd4c9, 0x42c4,
+ 0x82, 0xcb, 0x34, 0x3f, 0xb7, 0xf3, 0x78, 0x96)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(tee, optee_ftpm_id_table);
+
+static struct tee_client_driver ftpm_tee_driver = {
+ .id_table = optee_ftpm_id_table,
+ .driver = {
+ .name = "optee-ftpm",
+ .bus = &tee_bus_type,
+ .probe = ftpm_tee_probe,
+ .remove = ftpm_tee_remove,
+ },
+};
+
+static int __init ftpm_mod_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&ftpm_tee_plat_driver);
+ if (rc)
+ return rc;
+
+ rc = driver_register(&ftpm_tee_driver.driver);
+ if (rc) {
+ platform_driver_unregister(&ftpm_tee_plat_driver);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void __exit ftpm_mod_exit(void)
+{
+ platform_driver_unregister(&ftpm_tee_plat_driver);
+ driver_unregister(&ftpm_tee_driver.driver);
+}
+
+module_init(ftpm_mod_init);
+module_exit(ftpm_mod_exit);
+
+MODULE_AUTHOR("Thirupathaiah Annapureddy <thiruan@microsoft.com>");
+MODULE_DESCRIPTION("TPM Driver for fTPM TA in TEE");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h
new file mode 100644
index 0000000000..f98daa7bf6
--- /dev/null
+++ b/drivers/char/tpm/tpm_ftpm_tee.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Microsoft Corporation
+ */
+
+#ifndef __TPM_FTPM_TEE_H__
+#define __TPM_FTPM_TEE_H__
+
+#include <linux/tee_drv.h>
+#include <linux/tpm.h>
+#include <linux/uuid.h>
+
+/* The TAFs ID implemented in this TA */
+#define FTPM_OPTEE_TA_SUBMIT_COMMAND (0)
+#define FTPM_OPTEE_TA_EMULATE_PPI (1)
+
+/* max. buffer size supported by fTPM */
+#define MAX_COMMAND_SIZE 4096
+#define MAX_RESPONSE_SIZE 4096
+
+/**
+ * struct ftpm_tee_private - fTPM's private data
+ * @chip: struct tpm_chip instance registered with tpm framework.
+ * @state: internal state
+ * @session: fTPM TA session identifier.
+ * @resp_len: cached response buffer length.
+ * @resp_buf: cached response buffer.
+ * @ctx: TEE context handler.
+ * @shm: Memory pool shared with fTPM TA in TEE.
+ */
+struct ftpm_tee_private {
+ struct tpm_chip *chip;
+ u32 session;
+ size_t resp_len;
+ u8 resp_buf[MAX_RESPONSE_SIZE];
+ struct tee_context *ctx;
+ struct tee_shm *shm;
+};
+
+#endif /* __TPM_FTPM_TEE_H__ */
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
new file mode 100644
index 0000000000..301a95b373
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ATMEL I2C TPM AT97SC3204T
+ *
+ * Copyright (C) 2012 V Lab Technologies
+ * Teddy Reed <teddy@prosauce.org>
+ * Copyright (C) 2013, Obsidian Research Corp.
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ * Device driver for ATMEL I2C TPMs.
+ *
+ * Teddy Reed determined the basic I2C command flow, unlike other I2C TPM
+ * devices the raw TCG formatted TPM command data is written via I2C and then
+ * raw TCG formatted TPM command data is returned via I2C.
+ *
+ * TGC status/locality/etc functions seen in the LPC implementation do not
+ * seem to be present.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include "tpm.h"
+
+#define I2C_DRIVER_NAME "tpm_i2c_atmel"
+
+#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */
+#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */
+
+#define ATMEL_STS_OK 1
+
+struct priv_data {
+ size_t len;
+ /* This is the amount we read on the first try. 25 was chosen to fit a
+ * fair number of read responses in the buffer so a 2nd retry can be
+ * avoided in small message cases. */
+ u8 buffer[sizeof(struct tpm_header) + 25];
+};
+
+static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ s32 status;
+
+ priv->len = 0;
+
+ if (len <= 2)
+ return -EIO;
+
+ status = i2c_master_send(client, buf, len);
+
+ dev_dbg(&chip->dev,
+ "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
+ (int)min_t(size_t, 64, len), buf, len, status);
+
+ if (status < 0)
+ return status;
+
+ /* The upper layer does not support incomplete sends. */
+ if (status != len)
+ return -E2BIG;
+
+ return 0;
+}
+
+static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ struct tpm_header *hdr = (struct tpm_header *)priv->buffer;
+ u32 expected_len;
+ int rc;
+
+ if (priv->len == 0)
+ return -EIO;
+
+ /* Get the message size from the message header, if we didn't get the
+ * whole message in read_status then we need to re-read the
+ * message. */
+ expected_len = be32_to_cpu(hdr->length);
+ if (expected_len > count)
+ return -ENOMEM;
+
+ if (priv->len >= expected_len) {
+ dev_dbg(&chip->dev,
+ "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
+ (int)min_t(size_t, 64, expected_len), buf, count,
+ expected_len);
+ memcpy(buf, priv->buffer, expected_len);
+ return expected_len;
+ }
+
+ rc = i2c_master_recv(client, buf, expected_len);
+ dev_dbg(&chip->dev,
+ "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
+ (int)min_t(size_t, 64, expected_len), buf, count,
+ expected_len);
+ return rc;
+}
+
+static void i2c_atmel_cancel(struct tpm_chip *chip)
+{
+ dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported");
+}
+
+static u8 i2c_atmel_read_status(struct tpm_chip *chip)
+{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ int rc;
+
+ /* The TPM fails the I2C read until it is ready, so we do the entire
+ * transfer here and buffer it locally. This way the common code can
+ * properly handle the timeouts. */
+ priv->len = 0;
+ memset(priv->buffer, 0, sizeof(priv->buffer));
+
+
+ /* Once the TPM has completed the command the command remains readable
+ * until another command is issued. */
+ rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
+ dev_dbg(&chip->dev,
+ "%s: sts=%d", __func__, rc);
+ if (rc <= 0)
+ return 0;
+
+ priv->len = rc;
+
+ return ATMEL_STS_OK;
+}
+
+static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return false;
+}
+
+static const struct tpm_class_ops i2c_atmel = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = i2c_atmel_read_status,
+ .recv = i2c_atmel_recv,
+ .send = i2c_atmel_send,
+ .cancel = i2c_atmel_cancel,
+ .req_complete_mask = ATMEL_STS_OK,
+ .req_complete_val = ATMEL_STS_OK,
+ .req_canceled = i2c_atmel_req_canceled,
+};
+
+static int i2c_atmel_probe(struct i2c_client *client)
+{
+ struct tpm_chip *chip;
+ struct device *dev = &client->dev;
+ struct priv_data *priv;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ chip = tpmm_chip_alloc(dev, &i2c_atmel);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Default timeouts */
+ chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ /* There is no known way to probe for this device, and all version
+ * information seems to be read via TPM commands. Thus we rely on the
+ * TPM startup process in the common code to detect the device. */
+
+ return tpm_chip_register(chip);
+}
+
+static void i2c_atmel_remove(struct i2c_client *client)
+{
+ struct device *dev = &(client->dev);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ tpm_chip_unregister(chip);
+}
+
+static const struct i2c_device_id i2c_atmel_id[] = {
+ {I2C_DRIVER_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, i2c_atmel_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_atmel_of_match[] = {
+ {.compatible = "atmel,at97sc3204t"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_atmel_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver i2c_atmel_driver = {
+ .id_table = i2c_atmel_id,
+ .probe = i2c_atmel_probe,
+ .remove = i2c_atmel_remove,
+ .driver = {
+ .name = I2C_DRIVER_NAME,
+ .pm = &i2c_atmel_pm_ops,
+ .of_match_table = of_match_ptr(i2c_atmel_of_match),
+ },
+};
+
+module_i2c_driver(i2c_atmel_driver);
+
+MODULE_AUTHOR("Jason Gunthorpe <jgunthorpe@obsidianresearch.com>");
+MODULE_DESCRIPTION("Atmel TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
new file mode 100644
index 0000000000..81d8a78dc6
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -0,0 +1,732 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012,2013 Infineon Technologies
+ *
+ * Authors:
+ * Peter Huewe <peter.huewe@infineon.com>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0 and the
+ * Infineon I2C Protocol Stack Specification v0.20.
+ *
+ * It is based on the original tpm_tis device driver from Leendert van
+ * Dorn and Kyleen Hall.
+ */
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include "tpm.h"
+
+#define TPM_I2C_INFINEON_BUFSIZE 1260
+
+/* max. number of iterations after I2C NAK */
+#define MAX_COUNT 3
+
+#define SLEEP_DURATION_LOW 55
+#define SLEEP_DURATION_HI 65
+
+/* max. number of iterations after I2C NAK for 'long' commands
+ * we need this especially for sending TPM_READY, since the cleanup after the
+ * transtion to the ready state may take some time, but it is unpredictable
+ * how long it will take.
+ */
+#define MAX_COUNT_LONG 50
+
+#define SLEEP_DURATION_LONG_LOW 200
+#define SLEEP_DURATION_LONG_HI 220
+
+/* After sending TPM_READY to 'reset' the TPM we have to sleep even longer */
+#define SLEEP_DURATION_RESET_LOW 2400
+#define SLEEP_DURATION_RESET_HI 2600
+
+/* we want to use usleep_range instead of msleep for the 5ms TPM_TIMEOUT */
+#define TPM_TIMEOUT_US_LOW (TPM_TIMEOUT * 1000)
+#define TPM_TIMEOUT_US_HI (TPM_TIMEOUT_US_LOW + 2000)
+
+/* expected value for DIDVID register */
+#define TPM_TIS_I2C_DID_VID_9635 0xd1150b00L
+#define TPM_TIS_I2C_DID_VID_9645 0x001a15d1L
+
+enum i2c_chip_type {
+ SLB9635,
+ SLB9645,
+ UNKNOWN,
+};
+
+struct tpm_inf_dev {
+ struct i2c_client *client;
+ int locality;
+ /* In addition to the data itself, the buffer must fit the 7-bit I2C
+ * address and the direction bit.
+ */
+ u8 buf[TPM_I2C_INFINEON_BUFSIZE + 1];
+ struct tpm_chip *chip;
+ enum i2c_chip_type chip_type;
+ unsigned int adapterlimit;
+};
+
+static struct tpm_inf_dev tpm_dev;
+
+/*
+ * iic_tpm_read() - read from TPM register
+ * @addr: register address to read from
+ * @buffer: provided by caller
+ * @len: number of bytes to read
+ *
+ * Read len bytes from TPM register and put them into
+ * buffer (little-endian format, i.e. first byte is put into buffer[0]).
+ *
+ * NOTE: TPM is big-endian for multi-byte values. Multi-byte
+ * values have to be swapped.
+ *
+ * NOTE: We can't unfortunately use the combined read/write functions
+ * provided by the i2c core as the TPM currently does not support the
+ * repeated start condition and due to it's special requirements.
+ * The i2c_smbus* functions do not work for this chip.
+ *
+ * Return -EIO on error, 0 on success.
+ */
+static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
+{
+
+ struct i2c_msg msg1 = {
+ .addr = tpm_dev.client->addr,
+ .len = 1,
+ .buf = &addr
+ };
+ struct i2c_msg msg2 = {
+ .addr = tpm_dev.client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buffer
+ };
+ struct i2c_msg msgs[] = {msg1, msg2};
+
+ int rc = 0;
+ int count;
+ unsigned int msglen = len;
+
+ /* Lock the adapter for the duration of the whole sequence. */
+ if (!tpm_dev.client->adapter->algo->master_xfer)
+ return -EOPNOTSUPP;
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
+
+ if (tpm_dev.chip_type == SLB9645) {
+ /* use a combined read for newer chips
+ * unfortunately the smbus functions are not suitable due to
+ * the 32 byte limit of the smbus.
+ * retries should usually not be needed, but are kept just to
+ * be on the safe side.
+ */
+ for (count = 0; count < MAX_COUNT; count++) {
+ rc = __i2c_transfer(tpm_dev.client->adapter, msgs, 2);
+ if (rc > 0)
+ break; /* break here to skip sleep */
+ usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+ }
+ } else {
+ /* Expect to send one command message and one data message, but
+ * support looping over each or both if necessary.
+ */
+ while (len > 0) {
+ /* slb9635 protocol should work in all cases */
+ for (count = 0; count < MAX_COUNT; count++) {
+ rc = __i2c_transfer(tpm_dev.client->adapter,
+ &msg1, 1);
+ if (rc > 0)
+ break; /* break here to skip sleep */
+
+ usleep_range(SLEEP_DURATION_LOW,
+ SLEEP_DURATION_HI);
+ }
+
+ if (rc <= 0)
+ goto out;
+
+ /* After the TPM has successfully received the register
+ * address it needs some time, thus we're sleeping here
+ * again, before retrieving the data
+ */
+ for (count = 0; count < MAX_COUNT; count++) {
+ if (tpm_dev.adapterlimit) {
+ msglen = min_t(unsigned int,
+ tpm_dev.adapterlimit,
+ len);
+ msg2.len = msglen;
+ }
+ usleep_range(SLEEP_DURATION_LOW,
+ SLEEP_DURATION_HI);
+ rc = __i2c_transfer(tpm_dev.client->adapter,
+ &msg2, 1);
+ if (rc > 0) {
+ /* Since len is unsigned, make doubly
+ * sure we do not underflow it.
+ */
+ if (msglen > len)
+ len = 0;
+ else
+ len -= msglen;
+ msg2.buf += msglen;
+ break;
+ }
+ /* If the I2C adapter rejected the request (e.g
+ * when the quirk read_max_len < len) fall back
+ * to a sane minimum value and try again.
+ */
+ if (rc == -EOPNOTSUPP)
+ tpm_dev.adapterlimit =
+ I2C_SMBUS_BLOCK_MAX;
+ }
+
+ if (rc <= 0)
+ goto out;
+ }
+ }
+
+out:
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
+ /* take care of 'guard time' */
+ usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+
+ /* __i2c_transfer returns the number of successfully transferred
+ * messages.
+ * So rc should be greater than 0 here otherwise we have an error.
+ */
+ if (rc <= 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
+ unsigned int sleep_low,
+ unsigned int sleep_hi, u8 max_count)
+{
+ int rc = -EIO;
+ int count;
+
+ struct i2c_msg msg1 = {
+ .addr = tpm_dev.client->addr,
+ .len = len + 1,
+ .buf = tpm_dev.buf
+ };
+
+ if (len > TPM_I2C_INFINEON_BUFSIZE)
+ return -EINVAL;
+
+ if (!tpm_dev.client->adapter->algo->master_xfer)
+ return -EOPNOTSUPP;
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
+
+ /* prepend the 'register address' to the buffer */
+ tpm_dev.buf[0] = addr;
+ memcpy(&(tpm_dev.buf[1]), buffer, len);
+
+ /*
+ * NOTE: We have to use these special mechanisms here and unfortunately
+ * cannot rely on the standard behavior of i2c_transfer.
+ * Even for newer chips the smbus functions are not
+ * suitable due to the 32 byte limit of the smbus.
+ */
+ for (count = 0; count < max_count; count++) {
+ rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1);
+ if (rc > 0)
+ break;
+ usleep_range(sleep_low, sleep_hi);
+ }
+
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
+ /* take care of 'guard time' */
+ usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+
+ /* __i2c_transfer returns the number of successfully transferred
+ * messages.
+ * So rc should be greater than 0 here otherwise we have an error.
+ */
+ if (rc <= 0)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * iic_tpm_write() - write to TPM register
+ * @addr: register address to write to
+ * @buffer: containing data to be written
+ * @len: number of bytes to write
+ *
+ * Write len bytes from provided buffer to TPM register (little
+ * endian format, i.e. buffer[0] is written as first byte).
+ *
+ * NOTE: TPM is big-endian for multi-byte values. Multi-byte
+ * values have to be swapped.
+ *
+ * NOTE: use this function instead of the iic_tpm_write_generic function.
+ *
+ * Return -EIO on error, 0 on success
+ */
+static int iic_tpm_write(u8 addr, u8 *buffer, size_t len)
+{
+ return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LOW,
+ SLEEP_DURATION_HI, MAX_COUNT);
+}
+
+/*
+ * This function is needed especially for the cleanup situation after
+ * sending TPM_READY
+ * */
+static int iic_tpm_write_long(u8 addr, u8 *buffer, size_t len)
+{
+ return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LONG_LOW,
+ SLEEP_DURATION_LONG_HI, MAX_COUNT_LONG);
+}
+
+enum tis_access {
+ TPM_ACCESS_VALID = 0x80,
+ TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+ TPM_ACCESS_REQUEST_PENDING = 0x04,
+ TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum tis_status {
+ TPM_STS_VALID = 0x80,
+ TPM_STS_COMMAND_READY = 0x40,
+ TPM_STS_GO = 0x20,
+ TPM_STS_DATA_AVAIL = 0x10,
+ TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum tis_defaults {
+ TIS_SHORT_TIMEOUT = 750, /* ms */
+ TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+};
+
+#define TPM_ACCESS(l) (0x0000 | ((l) << 4))
+#define TPM_STS(l) (0x0001 | ((l) << 4))
+#define TPM_DATA_FIFO(l) (0x0005 | ((l) << 4))
+#define TPM_DID_VID(l) (0x0006 | ((l) << 4))
+
+static bool check_locality(struct tpm_chip *chip, int loc)
+{
+ u8 buf;
+ int rc;
+
+ rc = iic_tpm_read(TPM_ACCESS(loc), &buf, 1);
+ if (rc < 0)
+ return false;
+
+ if ((buf & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
+ tpm_dev.locality = loc;
+ return true;
+ }
+
+ return false;
+}
+
+/* implementation similar to tpm_tis */
+static void release_locality(struct tpm_chip *chip, int loc, int force)
+{
+ u8 buf;
+ if (iic_tpm_read(TPM_ACCESS(loc), &buf, 1) < 0)
+ return;
+
+ if (force || (buf & (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) {
+ buf = TPM_ACCESS_ACTIVE_LOCALITY;
+ iic_tpm_write(TPM_ACCESS(loc), &buf, 1);
+ }
+}
+
+static int request_locality(struct tpm_chip *chip, int loc)
+{
+ unsigned long stop;
+ u8 buf = TPM_ACCESS_REQUEST_USE;
+
+ if (check_locality(chip, loc))
+ return loc;
+
+ iic_tpm_write(TPM_ACCESS(loc), &buf, 1);
+
+ /* wait for burstcount */
+ stop = jiffies + chip->timeout_a;
+ do {
+ if (check_locality(chip, loc))
+ return loc;
+ usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
+ } while (time_before(jiffies, stop));
+
+ return -ETIME;
+}
+
+static u8 tpm_tis_i2c_status(struct tpm_chip *chip)
+{
+ /* NOTE: since I2C read may fail, return 0 in this case --> time-out */
+ u8 buf = 0xFF;
+ u8 i = 0;
+
+ do {
+ if (iic_tpm_read(TPM_STS(tpm_dev.locality), &buf, 1) < 0)
+ return 0;
+
+ i++;
+ /* if locallity is set STS should not be 0xFF */
+ } while ((buf == 0xFF) && i < 10);
+
+ return buf;
+}
+
+static void tpm_tis_i2c_ready(struct tpm_chip *chip)
+{
+ /* this causes the current command to be aborted */
+ u8 buf = TPM_STS_COMMAND_READY;
+ iic_tpm_write_long(TPM_STS(tpm_dev.locality), &buf, 1);
+}
+
+static ssize_t get_burstcount(struct tpm_chip *chip)
+{
+ unsigned long stop;
+ ssize_t burstcnt;
+ u8 buf[3];
+
+ /* wait for burstcount */
+ /* which timeout value, spec has 2 answers (c & d) */
+ stop = jiffies + chip->timeout_d;
+ do {
+ /* Note: STS is little endian */
+ if (iic_tpm_read(TPM_STS(tpm_dev.locality)+1, buf, 3) < 0)
+ burstcnt = 0;
+ else
+ burstcnt = (buf[2] << 16) + (buf[1] << 8) + buf[0];
+
+ if (burstcnt)
+ return burstcnt;
+
+ usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
+ } while (time_before(jiffies, stop));
+ return -EBUSY;
+}
+
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+ int *status)
+{
+ unsigned long stop;
+
+ /* check current status */
+ *status = tpm_tis_i2c_status(chip);
+ if ((*status != 0xFF) && (*status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+ do {
+ /* since we just checked the status, give the TPM some time */
+ usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
+ *status = tpm_tis_i2c_status(chip);
+ if ((*status & mask) == mask)
+ return 0;
+
+ } while (time_before(jiffies, stop));
+
+ return -ETIME;
+}
+
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ size_t size = 0;
+ ssize_t burstcnt;
+ u8 retries = 0;
+ int rc;
+
+ while (size < count) {
+ burstcnt = get_burstcount(chip);
+
+ /* burstcnt < 0 = TPM is busy */
+ if (burstcnt < 0)
+ return burstcnt;
+
+ /* limit received data to max. left */
+ if (burstcnt > (count - size))
+ burstcnt = count - size;
+
+ rc = iic_tpm_read(TPM_DATA_FIFO(tpm_dev.locality),
+ &(buf[size]), burstcnt);
+ if (rc == 0)
+ size += burstcnt;
+ else if (rc < 0)
+ retries++;
+
+ /* avoid endless loop in case of broken HW */
+ if (retries > MAX_COUNT_LONG)
+ return -EIO;
+ }
+ return size;
+}
+
+static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ int size = 0;
+ int status;
+ u32 expected;
+
+ if (count < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ /* read first 10 bytes, including tag, paramsize, and result */
+ size = recv_data(chip, buf, TPM_HEADER_SIZE);
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "Unable to read header\n");
+ goto out;
+ }
+
+ expected = be32_to_cpu(*(__be32 *)(buf + 2));
+ if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
+ size = -EIO;
+ goto out;
+ }
+
+ size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE);
+ if (size < expected) {
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
+ size = -ETIME;
+ goto out;
+ }
+
+ wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status);
+ if (status & TPM_STS_DATA_AVAIL) { /* retry? */
+ dev_err(&chip->dev, "Error left over data\n");
+ size = -EIO;
+ goto out;
+ }
+
+out:
+ tpm_tis_i2c_ready(chip);
+ /* The TPM needs some time to clean up here,
+ * so we sleep rather than keeping the bus busy
+ */
+ usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI);
+ release_locality(chip, tpm_dev.locality, 0);
+ return size;
+}
+
+static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ int rc, status;
+ ssize_t burstcnt;
+ size_t count = 0;
+ u8 retries = 0;
+ u8 sts = TPM_STS_GO;
+
+ if (len > TPM_I2C_INFINEON_BUFSIZE)
+ return -E2BIG;
+
+ if (request_locality(chip, 0) < 0)
+ return -EBUSY;
+
+ status = tpm_tis_i2c_status(chip);
+ if ((status & TPM_STS_COMMAND_READY) == 0) {
+ tpm_tis_i2c_ready(chip);
+ if (wait_for_stat
+ (chip, TPM_STS_COMMAND_READY,
+ chip->timeout_b, &status) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ }
+
+ while (count < len - 1) {
+ burstcnt = get_burstcount(chip);
+
+ /* burstcnt < 0 = TPM is busy */
+ if (burstcnt < 0)
+ return burstcnt;
+
+ if (burstcnt > (len - 1 - count))
+ burstcnt = len - 1 - count;
+
+ rc = iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality),
+ &(buf[count]), burstcnt);
+ if (rc == 0)
+ count += burstcnt;
+ else if (rc < 0)
+ retries++;
+
+ /* avoid endless loop in case of broken HW */
+ if (retries > MAX_COUNT_LONG) {
+ rc = -EIO;
+ goto out_err;
+ }
+
+ wait_for_stat(chip, TPM_STS_VALID,
+ chip->timeout_c, &status);
+
+ if ((status & TPM_STS_DATA_EXPECT) == 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+ }
+
+ /* write last byte */
+ iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality), &(buf[count]), 1);
+ wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status);
+ if ((status & TPM_STS_DATA_EXPECT) != 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /* go and do it */
+ iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1);
+
+ return 0;
+out_err:
+ tpm_tis_i2c_ready(chip);
+ /* The TPM needs some time to clean up here,
+ * so we sleep rather than keeping the bus busy
+ */
+ usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI);
+ release_locality(chip, tpm_dev.locality, 0);
+ return rc;
+}
+
+static bool tpm_tis_i2c_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct tpm_class_ops tpm_tis_i2c = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = tpm_tis_i2c_status,
+ .recv = tpm_tis_i2c_recv,
+ .send = tpm_tis_i2c_send,
+ .cancel = tpm_tis_i2c_ready,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = tpm_tis_i2c_req_canceled,
+};
+
+static int tpm_tis_i2c_init(struct device *dev)
+{
+ u32 vendor;
+ int rc = 0;
+ struct tpm_chip *chip;
+
+ chip = tpmm_chip_alloc(dev, &tpm_tis_i2c);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ /* Default timeouts */
+ chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+ if (request_locality(chip, 0) != 0) {
+ dev_err(dev, "could not request locality\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ /* read four bytes from DID_VID register */
+ if (iic_tpm_read(TPM_DID_VID(0), (u8 *)&vendor, 4) < 0) {
+ dev_err(dev, "could not read vendor id\n");
+ rc = -EIO;
+ goto out_release;
+ }
+
+ if (vendor == TPM_TIS_I2C_DID_VID_9645) {
+ tpm_dev.chip_type = SLB9645;
+ } else if (vendor == TPM_TIS_I2C_DID_VID_9635) {
+ tpm_dev.chip_type = SLB9635;
+ } else {
+ dev_err(dev, "vendor id did not match! ID was %08x\n", vendor);
+ rc = -ENODEV;
+ goto out_release;
+ }
+
+ dev_info(dev, "1.2 TPM (device-id 0x%X)\n", vendor >> 16);
+
+ tpm_dev.chip = chip;
+
+ return tpm_chip_register(chip);
+out_release:
+ release_locality(chip, tpm_dev.locality, 1);
+ tpm_dev.client = NULL;
+out_err:
+ return rc;
+}
+
+static const struct i2c_device_id tpm_tis_i2c_table[] = {
+ {"tpm_i2c_infineon"},
+ {"slb9635tt"},
+ {"slb9645tt"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table);
+
+#ifdef CONFIG_OF
+static const struct of_device_id tpm_tis_i2c_of_match[] = {
+ {.compatible = "infineon,tpm_i2c_infineon"},
+ {.compatible = "infineon,slb9635tt"},
+ {.compatible = "infineon,slb9645tt"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tpm_tis_i2c_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_i2c_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static int tpm_tis_i2c_probe(struct i2c_client *client)
+{
+ int rc;
+ struct device *dev = &(client->dev);
+
+ if (tpm_dev.client != NULL) {
+ dev_err(dev, "This driver only supports one client at a time\n");
+ return -EBUSY; /* We only support one client */
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "no algorithms associated to the i2c bus\n");
+ return -ENODEV;
+ }
+
+ tpm_dev.client = client;
+ rc = tpm_tis_i2c_init(&client->dev);
+ if (rc != 0) {
+ tpm_dev.client = NULL;
+ rc = -ENODEV;
+ }
+ return rc;
+}
+
+static void tpm_tis_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = tpm_dev.chip;
+
+ tpm_chip_unregister(chip);
+ release_locality(chip, tpm_dev.locality, 1);
+ tpm_dev.client = NULL;
+}
+
+static struct i2c_driver tpm_tis_i2c_driver = {
+ .id_table = tpm_tis_i2c_table,
+ .probe = tpm_tis_i2c_probe,
+ .remove = tpm_tis_i2c_remove,
+ .driver = {
+ .name = "tpm_i2c_infineon",
+ .pm = &tpm_tis_i2c_ops,
+ .of_match_table = of_match_ptr(tpm_tis_i2c_of_match),
+ },
+};
+
+module_i2c_driver(tpm_tis_i2c_driver);
+MODULE_AUTHOR("Peter Huewe <peter.huewe@infineon.com>");
+MODULE_DESCRIPTION("TPM TIS I2C Infineon Driver");
+MODULE_VERSION("2.2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
new file mode 100644
index 0000000000..d7be03c410
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+ /******************************************************************************
+ * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501/NPCT6XX,
+ * based on the TCG TPM Interface Spec version 1.2.
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * Copyright (C) 2011, Nuvoton Technology Corporation.
+ * Dan Morav <dan.morav@nuvoton.com>
+ * Copyright (C) 2013, Obsidian Research Corp.
+ * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+ *
+ * Nuvoton contact information: APC.Support@nuvoton.com
+ *****************************************************************************/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include "tpm.h"
+
+/* I2C interface offsets */
+#define TPM_STS 0x00
+#define TPM_BURST_COUNT 0x01
+#define TPM_DATA_FIFO_W 0x20
+#define TPM_DATA_FIFO_R 0x40
+#define TPM_VID_DID_RID 0x60
+#define TPM_I2C_RETRIES 5
+/*
+ * I2C bus device maximum buffer size w/o counting I2C address or command
+ * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data
+ */
+#define TPM_I2C_MAX_BUF_SIZE 32
+#define TPM_I2C_RETRY_COUNT 32
+#define TPM_I2C_BUS_DELAY 1000 /* usec */
+#define TPM_I2C_RETRY_DELAY_SHORT (2 * 1000) /* usec */
+#define TPM_I2C_RETRY_DELAY_LONG (10 * 1000) /* usec */
+#define TPM_I2C_DELAY_RANGE 300 /* usec */
+
+#define OF_IS_TPM2 ((void *)1)
+#define I2C_IS_TPM2 1
+
+struct priv_data {
+ int irq;
+ unsigned int intrs;
+ wait_queue_head_t read_queue;
+};
+
+static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size,
+ u8 *data)
+{
+ s32 status;
+
+ status = i2c_smbus_read_i2c_block_data(client, offset, size, data);
+ dev_dbg(&client->dev,
+ "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
+ offset, size, (int)size, data, status);
+ return status;
+}
+
+static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size,
+ u8 *data)
+{
+ s32 status;
+
+ status = i2c_smbus_write_i2c_block_data(client, offset, size, data);
+ dev_dbg(&client->dev,
+ "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__,
+ offset, size, (int)size, data, status);
+ return status;
+}
+
+#define TPM_STS_VALID 0x80
+#define TPM_STS_COMMAND_READY 0x40
+#define TPM_STS_GO 0x20
+#define TPM_STS_DATA_AVAIL 0x10
+#define TPM_STS_EXPECT 0x08
+#define TPM_STS_RESPONSE_RETRY 0x02
+#define TPM_STS_ERR_VAL 0x07 /* bit2...bit0 reads always 0 */
+
+#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */
+#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */
+
+/* read TPM_STS register */
+static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ s32 status;
+ u8 data;
+
+ status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
+ if (status <= 0) {
+ dev_err(&chip->dev, "%s() error return %d\n", __func__,
+ status);
+ data = TPM_STS_ERR_VAL;
+ }
+
+ return data;
+}
+
+/* write byte to TPM_STS register */
+static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
+{
+ s32 status;
+ int i;
+
+ /* this causes the current command to be aborted */
+ for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
+ status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
+ if (status < 0)
+ usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+ + TPM_I2C_DELAY_RANGE);
+ }
+ return status;
+}
+
+/* write commandReady to TPM_STS register */
+static void i2c_nuvoton_ready(struct tpm_chip *chip)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ s32 status;
+
+ /* this causes the current command to be aborted */
+ status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
+ if (status < 0)
+ dev_err(&chip->dev,
+ "%s() fail to write TPM_STS.commandReady\n", __func__);
+}
+
+/* read burstCount field from TPM_STS register
+ * return -1 on fail to read */
+static int i2c_nuvoton_get_burstcount(struct i2c_client *client,
+ struct tpm_chip *chip)
+{
+ unsigned long stop = jiffies + chip->timeout_d;
+ s32 status;
+ int burst_count = -1;
+ u8 data;
+
+ /* wait for burstcount to be non-zero */
+ do {
+ /* in I2C burstCount is 1 byte */
+ status = i2c_nuvoton_read_buf(client, TPM_BURST_COUNT, 1,
+ &data);
+ if (status > 0 && data > 0) {
+ burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data);
+ break;
+ }
+ usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+ + TPM_I2C_DELAY_RANGE);
+ } while (time_before(jiffies, stop));
+
+ return burst_count;
+}
+
+/*
+ * WPCT301/NPCT501/NPCT6XX SINT# supports only dataAvail
+ * any call to this function which is not waiting for dataAvail will
+ * set queue to NULL to avoid waiting for interrupt
+ */
+static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value)
+{
+ u8 status = i2c_nuvoton_read_status(chip);
+ return (status != TPM_STS_ERR_VAL) && ((status & mask) == value);
+}
+
+static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
+ u32 timeout, wait_queue_head_t *queue)
+{
+ if ((chip->flags & TPM_CHIP_FLAG_IRQ) && queue) {
+ s32 rc;
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned int cur_intrs = priv->intrs;
+
+ enable_irq(priv->irq);
+ rc = wait_event_interruptible_timeout(*queue,
+ cur_intrs != priv->intrs,
+ timeout);
+ if (rc > 0)
+ return 0;
+ /* At this point we know that the SINT pin is asserted, so we
+ * do not need to do i2c_nuvoton_check_status */
+ } else {
+ unsigned long ten_msec, stop;
+ bool status_valid;
+
+ /* check current status */
+ status_valid = i2c_nuvoton_check_status(chip, mask, value);
+ if (status_valid)
+ return 0;
+
+ /* use polling to wait for the event */
+ ten_msec = jiffies + usecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
+ stop = jiffies + timeout;
+ do {
+ if (time_before(jiffies, ten_msec))
+ usleep_range(TPM_I2C_RETRY_DELAY_SHORT,
+ TPM_I2C_RETRY_DELAY_SHORT
+ + TPM_I2C_DELAY_RANGE);
+ else
+ usleep_range(TPM_I2C_RETRY_DELAY_LONG,
+ TPM_I2C_RETRY_DELAY_LONG
+ + TPM_I2C_DELAY_RANGE);
+ status_valid = i2c_nuvoton_check_status(chip, mask,
+ value);
+ if (status_valid)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
+ value);
+ return -ETIMEDOUT;
+}
+
+/* wait for dataAvail field to be set in the TPM_STS register */
+static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout,
+ wait_queue_head_t *queue)
+{
+ return i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ timeout, queue);
+}
+
+/* Read @count bytes into @buf from TPM_RD_FIFO register */
+static int i2c_nuvoton_recv_data(struct i2c_client *client,
+ struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ s32 rc;
+ int burst_count, bytes2read, size = 0;
+
+ while (size < count &&
+ i2c_nuvoton_wait_for_data_avail(chip,
+ chip->timeout_c,
+ &priv->read_queue) == 0) {
+ burst_count = i2c_nuvoton_get_burstcount(client, chip);
+ if (burst_count < 0) {
+ dev_err(&chip->dev,
+ "%s() fail to read burstCount=%d\n", __func__,
+ burst_count);
+ return -EIO;
+ }
+ bytes2read = min_t(size_t, burst_count, count - size);
+ rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
+ bytes2read, &buf[size]);
+ if (rc < 0) {
+ dev_err(&chip->dev,
+ "%s() fail on i2c_nuvoton_read_buf()=%d\n",
+ __func__, rc);
+ return -EIO;
+ }
+ dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read);
+ size += bytes2read;
+ }
+
+ return size;
+}
+
+/* Read TPM command results */
+static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct device *dev = chip->dev.parent;
+ struct i2c_client *client = to_i2c_client(dev);
+ s32 rc;
+ int status;
+ int burst_count;
+ int retries;
+ int size = 0;
+ u32 expected;
+
+ if (count < TPM_HEADER_SIZE) {
+ i2c_nuvoton_ready(chip); /* return to idle */
+ dev_err(dev, "%s() count < header size\n", __func__);
+ return -EIO;
+ }
+ for (retries = 0; retries < TPM_I2C_RETRIES; retries++) {
+ if (retries > 0) {
+ /* if this is not the first trial, set responseRetry */
+ i2c_nuvoton_write_status(client,
+ TPM_STS_RESPONSE_RETRY);
+ }
+ /*
+ * read first available (> 10 bytes), including:
+ * tag, paramsize, and result
+ */
+ status = i2c_nuvoton_wait_for_data_avail(
+ chip, chip->timeout_c, &priv->read_queue);
+ if (status != 0) {
+ dev_err(dev, "%s() timeout on dataAvail\n", __func__);
+ size = -ETIMEDOUT;
+ continue;
+ }
+ burst_count = i2c_nuvoton_get_burstcount(client, chip);
+ if (burst_count < 0) {
+ dev_err(dev, "%s() fail to get burstCount\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ size = i2c_nuvoton_recv_data(client, chip, buf,
+ burst_count);
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(dev, "%s() fail to read header\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ /*
+ * convert number of expected bytes field from big endian 32 bit
+ * to machine native
+ */
+ expected = be32_to_cpu(*(__be32 *) (buf + 2));
+ if (expected > count || expected < size) {
+ dev_err(dev, "%s() expected > count\n", __func__);
+ size = -EIO;
+ continue;
+ }
+ rc = i2c_nuvoton_recv_data(client, chip, &buf[size],
+ expected - size);
+ size += rc;
+ if (rc < 0 || size < expected) {
+ dev_err(dev, "%s() fail to read remainder of result\n",
+ __func__);
+ size = -EIO;
+ continue;
+ }
+ if (i2c_nuvoton_wait_for_stat(
+ chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL,
+ TPM_STS_VALID, chip->timeout_c,
+ NULL)) {
+ dev_err(dev, "%s() error left over data\n", __func__);
+ size = -ETIMEDOUT;
+ continue;
+ }
+ break;
+ }
+ i2c_nuvoton_ready(chip);
+ dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size);
+ return size;
+}
+
+/*
+ * Send TPM command.
+ *
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct device *dev = chip->dev.parent;
+ struct i2c_client *client = to_i2c_client(dev);
+ u32 ordinal;
+ unsigned long duration;
+ size_t count = 0;
+ int burst_count, bytes2write, retries, rc = -EIO;
+
+ for (retries = 0; retries < TPM_RETRY; retries++) {
+ i2c_nuvoton_ready(chip);
+ if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY,
+ TPM_STS_COMMAND_READY,
+ chip->timeout_b, NULL)) {
+ dev_err(dev, "%s() timeout on commandReady\n",
+ __func__);
+ rc = -EIO;
+ continue;
+ }
+ rc = 0;
+ while (count < len - 1) {
+ burst_count = i2c_nuvoton_get_burstcount(client,
+ chip);
+ if (burst_count < 0) {
+ dev_err(dev, "%s() fail get burstCount\n",
+ __func__);
+ rc = -EIO;
+ break;
+ }
+ bytes2write = min_t(size_t, burst_count,
+ len - 1 - count);
+ rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W,
+ bytes2write, &buf[count]);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail i2cWriteBuf\n",
+ __func__);
+ break;
+ }
+ dev_dbg(dev, "%s(%d):", __func__, bytes2write);
+ count += bytes2write;
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_VALID |
+ TPM_STS_EXPECT,
+ TPM_STS_VALID |
+ TPM_STS_EXPECT,
+ chip->timeout_c,
+ NULL);
+ if (rc < 0) {
+ dev_err(dev, "%s() timeout on Expect\n",
+ __func__);
+ rc = -ETIMEDOUT;
+ break;
+ }
+ }
+ if (rc < 0)
+ continue;
+
+ /* write last byte */
+ rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1,
+ &buf[count]);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail to write last byte\n",
+ __func__);
+ rc = -EIO;
+ continue;
+ }
+ dev_dbg(dev, "%s(last): %02x", __func__, buf[count]);
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_VALID | TPM_STS_EXPECT,
+ TPM_STS_VALID,
+ chip->timeout_c, NULL);
+ if (rc) {
+ dev_err(dev, "%s() timeout on Expect to clear\n",
+ __func__);
+ rc = -ETIMEDOUT;
+ continue;
+ }
+ break;
+ }
+ if (rc < 0) {
+ /* retries == TPM_RETRY */
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+ /* execute the TPM command */
+ rc = i2c_nuvoton_write_status(client, TPM_STS_GO);
+ if (rc < 0) {
+ dev_err(dev, "%s() fail to write Go\n", __func__);
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+ duration = tpm_calc_ordinal_duration(chip, ordinal);
+
+ rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue);
+ if (rc) {
+ dev_err(dev, "%s() timeout command duration %ld\n",
+ __func__, duration);
+ i2c_nuvoton_ready(chip);
+ return rc;
+ }
+
+ dev_dbg(dev, "%s() -> %zd\n", __func__, len);
+ return 0;
+}
+
+static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct tpm_class_ops tpm_i2c = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = i2c_nuvoton_read_status,
+ .recv = i2c_nuvoton_recv,
+ .send = i2c_nuvoton_send,
+ .cancel = i2c_nuvoton_ready,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = i2c_nuvoton_req_canceled,
+};
+
+/* The only purpose for the handler is to signal to any waiting threads that
+ * the interrupt is currently being asserted. The driver does not do any
+ * processing triggered by interrupts, and the chip provides no way to mask at
+ * the source (plus that would be slow over I2C). Run the IRQ as a one-shot,
+ * this means it cannot be shared. */
+static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ priv->intrs++;
+ wake_up(&priv->read_queue);
+ disable_irq_nosync(priv->irq);
+ return IRQ_HANDLED;
+}
+
+static int get_vid(struct i2c_client *client, u32 *res)
+{
+ static const u8 vid_did_rid_value[] = { 0x50, 0x10, 0xfe };
+ u32 temp;
+ s32 rc;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+ rc = i2c_nuvoton_read_buf(client, TPM_VID_DID_RID, 4, (u8 *)&temp);
+ if (rc < 0)
+ return rc;
+
+ /* check WPCT301 values - ignore RID */
+ if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) {
+ /*
+ * f/w rev 2.81 has an issue where the VID_DID_RID is not
+ * reporting the right value. so give it another chance at
+ * offset 0x20 (FIFO_W).
+ */
+ rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_W, 4,
+ (u8 *) (&temp));
+ if (rc < 0)
+ return rc;
+
+ /* check WPCT301 values - ignore RID */
+ if (memcmp(&temp, vid_did_rid_value,
+ sizeof(vid_did_rid_value)))
+ return -ENODEV;
+ }
+
+ *res = temp;
+ return 0;
+}
+
+static int i2c_nuvoton_probe(struct i2c_client *client)
+{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ int rc;
+ struct tpm_chip *chip;
+ struct device *dev = &client->dev;
+ struct priv_data *priv;
+ u32 vid = 0;
+
+ rc = get_vid(client, &vid);
+ if (rc)
+ return rc;
+
+ dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid,
+ (u8) (vid >> 16), (u8) (vid >> 24));
+
+ chip = tpmm_chip_alloc(dev, &tpm_i2c);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (dev->of_node) {
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(dev->driver->of_match_table, dev);
+ if (of_id && of_id->data == OF_IS_TPM2)
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ } else
+ if (id->driver_data == I2C_IS_TPM2)
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ init_waitqueue_head(&priv->read_queue);
+
+ /* Default timeouts */
+ chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ /*
+ * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to:
+ * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT
+ * The IRQ should be set in the i2c_board_info (which is done
+ * automatically in of_i2c_register_devices, for device tree users */
+ priv->irq = client->irq;
+ if (client->irq) {
+ dev_dbg(dev, "%s() priv->irq\n", __func__);
+ rc = devm_request_irq(dev, client->irq,
+ i2c_nuvoton_int_handler,
+ IRQF_TRIGGER_LOW,
+ dev_name(&chip->dev),
+ chip);
+ if (rc) {
+ dev_err(dev, "%s() Unable to request irq: %d for use\n",
+ __func__, priv->irq);
+ priv->irq = 0;
+ } else {
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
+ /* Clear any pending interrupt */
+ i2c_nuvoton_ready(chip);
+ /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */
+ rc = i2c_nuvoton_wait_for_stat(chip,
+ TPM_STS_COMMAND_READY,
+ TPM_STS_COMMAND_READY,
+ chip->timeout_b,
+ NULL);
+ if (rc == 0) {
+ /*
+ * TIS is in ready state
+ * write dummy byte to enter reception state
+ * TPM_DATA_FIFO_W <- rc (0)
+ */
+ rc = i2c_nuvoton_write_buf(client,
+ TPM_DATA_FIFO_W,
+ 1, (u8 *) (&rc));
+ if (rc < 0)
+ return rc;
+ /* TPM_STS <- 0x40 (commandReady) */
+ i2c_nuvoton_ready(chip);
+ } else {
+ /*
+ * timeout_b reached - command was
+ * aborted. TIS should now be in idle state -
+ * only TPM_STS_VALID should be set
+ */
+ if (i2c_nuvoton_read_status(chip) !=
+ TPM_STS_VALID)
+ return -EIO;
+ }
+ }
+ }
+
+ return tpm_chip_register(chip);
+}
+
+static void i2c_nuvoton_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
+ tpm_chip_unregister(chip);
+}
+
+static const struct i2c_device_id i2c_nuvoton_id[] = {
+ {"tpm_i2c_nuvoton"},
+ {"tpm2_i2c_nuvoton", .driver_data = I2C_IS_TPM2},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_nuvoton_of_match[] = {
+ {.compatible = "nuvoton,npct501"},
+ {.compatible = "winbond,wpct301"},
+ {.compatible = "nuvoton,npct601", .data = OF_IS_TPM2},
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver i2c_nuvoton_driver = {
+ .id_table = i2c_nuvoton_id,
+ .probe = i2c_nuvoton_probe,
+ .remove = i2c_nuvoton_remove,
+ .driver = {
+ .name = "tpm_i2c_nuvoton",
+ .pm = &i2c_nuvoton_pm_ops,
+ .of_match_table = of_match_ptr(i2c_nuvoton_of_match),
+ },
+};
+
+module_i2c_driver(i2c_nuvoton_driver);
+
+MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)");
+MODULE_DESCRIPTION("Nuvoton TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
new file mode 100644
index 0000000000..d3989b257f
--- /dev/null
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012-2020 IBM Corporation
+ *
+ * Author: Ashley Lai <ashleydlai@gmail.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <asm/vio.h>
+#include <asm/irq.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <asm/prom.h>
+
+#include "tpm.h"
+#include "tpm_ibmvtpm.h"
+
+static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
+
+static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
+ { "IBM,vtpm", "IBM,vtpm"},
+ { "IBM,vtpm", "IBM,vtpm20"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
+
+/**
+ * ibmvtpm_send_crq_word() - Send a CRQ request
+ * @vdev: vio device struct
+ * @w1: pre-constructed first word of tpm crq (second word is reserved)
+ *
+ * Return:
+ * 0 - Success
+ * Non-zero - Failure
+ */
+static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
+{
+ return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
+}
+
+/**
+ * ibmvtpm_send_crq() - Send a CRQ request
+ *
+ * @vdev: vio device struct
+ * @valid: Valid field
+ * @msg: Type field
+ * @len: Length field
+ * @data: Data field
+ *
+ * The ibmvtpm crq is defined as follows:
+ *
+ * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
+ * -----------------------------------------------------------------------
+ * Word0 | Valid | Type | Length | Data
+ * -----------------------------------------------------------------------
+ * Word1 | Reserved
+ * -----------------------------------------------------------------------
+ *
+ * Which matches the following structure (on bigendian host):
+ *
+ * struct ibmvtpm_crq {
+ * u8 valid;
+ * u8 msg;
+ * __be16 len;
+ * __be32 data;
+ * __be64 reserved;
+ * } __attribute__((packed, aligned(8)));
+ *
+ * However, the value is passed in a register so just compute the numeric value
+ * to load into the register avoiding byteswap altogether. Endian only affects
+ * memory loads and stores - registers are internally represented the same.
+ *
+ * Return:
+ * 0 (H_SUCCESS) - Success
+ * Non-zero - Failure
+ */
+static int ibmvtpm_send_crq(struct vio_dev *vdev,
+ u8 valid, u8 msg, u16 len, u32 data)
+{
+ u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
+ (u64)data;
+ return ibmvtpm_send_crq_word(vdev, w1);
+}
+
+/**
+ * tpm_ibmvtpm_recv - Receive data after send
+ *
+ * @chip: tpm chip struct
+ * @buf: buffer to read
+ * @count: size of buffer
+ *
+ * Return:
+ * Number of bytes read
+ */
+static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ u16 len;
+
+ if (!ibmvtpm->rtce_buf) {
+ dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
+ return 0;
+ }
+
+ len = ibmvtpm->res_len;
+
+ if (count < len) {
+ dev_err(ibmvtpm->dev,
+ "Invalid size in recv: count=%zd, crq_size=%d\n",
+ count, len);
+ return -EIO;
+ }
+
+ spin_lock(&ibmvtpm->rtce_lock);
+ memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
+ memset(ibmvtpm->rtce_buf, 0, len);
+ ibmvtpm->res_len = 0;
+ spin_unlock(&ibmvtpm->rtce_lock);
+ return len;
+}
+
+/**
+ * ibmvtpm_crq_send_init - Send a CRQ initialize message
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc;
+
+ rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "%s failed rc=%d\n", __func__, rc);
+
+ return rc;
+}
+
+/**
+ * tpm_ibmvtpm_resume - Resume from suspend
+ *
+ * @dev: device struct
+ *
+ * Return: Always 0.
+ */
+static int tpm_ibmvtpm_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_ENABLE_CRQ,
+ ibmvtpm->vdev->unit_address);
+ } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ if (rc) {
+ dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = vio_enable_interrupts(ibmvtpm->vdev);
+ if (rc) {
+ dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = ibmvtpm_crq_send_init(ibmvtpm);
+ if (rc)
+ dev_err(dev, "Error send_init rc=%d\n", rc);
+
+ return rc;
+}
+
+/**
+ * tpm_ibmvtpm_send() - Send a TPM command
+ * @chip: tpm chip struct
+ * @buf: buffer contains data to send
+ * @count: size of buffer
+ *
+ * Return:
+ * 0 on success,
+ * -errno on error
+ */
+static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ bool retry = true;
+ int rc, sig;
+
+ if (!ibmvtpm->rtce_buf) {
+ dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
+ return 0;
+ }
+
+ if (count > ibmvtpm->rtce_size) {
+ dev_err(ibmvtpm->dev,
+ "Invalid size in send: count=%zd, rtce_size=%d\n",
+ count, ibmvtpm->rtce_size);
+ return -EIO;
+ }
+
+ if (ibmvtpm->tpm_processing_cmd) {
+ dev_info(ibmvtpm->dev,
+ "Need to wait for TPM to finish\n");
+ /* wait for previous command to finish */
+ sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
+ if (sig)
+ return -EINTR;
+ }
+
+ spin_lock(&ibmvtpm->rtce_lock);
+ ibmvtpm->res_len = 0;
+ memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
+
+ /*
+ * set the processing flag before the Hcall, since we may get the
+ * result (interrupt) before even being able to check rc.
+ */
+ ibmvtpm->tpm_processing_cmd = 1;
+
+again:
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
+ count, ibmvtpm->rtce_dma_handle);
+ if (rc != H_SUCCESS) {
+ /*
+ * H_CLOSED can be returned after LPM resume. Call
+ * tpm_ibmvtpm_resume() to re-enable the CRQ then retry
+ * ibmvtpm_send_crq() once before failing.
+ */
+ if (rc == H_CLOSED && retry) {
+ tpm_ibmvtpm_resume(ibmvtpm->dev);
+ retry = false;
+ goto again;
+ }
+ dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
+ ibmvtpm->tpm_processing_cmd = 0;
+ }
+
+ spin_unlock(&ibmvtpm->rtce_lock);
+ return 0;
+}
+
+static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
+{
+ return;
+}
+
+static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
+{
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+
+ return ibmvtpm->tpm_processing_cmd;
+}
+
+/**
+ * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
+ *
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc;
+
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
+
+ return rc;
+}
+
+/**
+ * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
+ * - Note that this is vtpm version and not tpm version
+ *
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc;
+
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "ibmvtpm_crq_get_version failed rc=%d\n", rc);
+
+ return rc;
+}
+
+/**
+ * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc;
+
+ rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
+
+ return rc;
+}
+
+/**
+ * tpm_ibmvtpm_remove - ibm vtpm remove entry point
+ * @vdev: vio device struct
+ *
+ * Return: Always 0.
+ */
+static void tpm_ibmvtpm_remove(struct vio_dev *vdev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+
+ tpm_chip_unregister(chip);
+
+ free_irq(vdev->irq, ibmvtpm);
+
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
+ CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
+
+ if (ibmvtpm->rtce_buf) {
+ dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
+ ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
+ kfree(ibmvtpm->rtce_buf);
+ }
+
+ kfree(ibmvtpm);
+ /* For tpm_ibmvtpm_get_desired_dma */
+ dev_set_drvdata(&vdev->dev, NULL);
+}
+
+/**
+ * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
+ * @vdev: vio device struct
+ *
+ * Return:
+ * Number of bytes the driver needs to DMA map.
+ */
+static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
+ struct ibmvtpm_dev *ibmvtpm;
+
+ /*
+ * ibmvtpm initializes at probe time, so the data we are
+ * asking for may not be set yet. Estimate that 4K required
+ * for TCE-mapped buffer in addition to CRQ.
+ */
+ if (chip)
+ ibmvtpm = dev_get_drvdata(&chip->dev);
+ else
+ return CRQ_RES_BUF_SIZE + PAGE_SIZE;
+
+ return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
+}
+
+/**
+ * tpm_ibmvtpm_suspend - Suspend
+ * @dev: device struct
+ *
+ * Return: Always 0.
+ */
+static int tpm_ibmvtpm_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
+
+ return rc;
+}
+
+/**
+ * ibmvtpm_reset_crq - Reset CRQ
+ *
+ * @ibmvtpm: ibm vtpm struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc = 0;
+
+ do {
+ if (rc)
+ msleep(100);
+ rc = plpar_hcall_norets(H_FREE_CRQ,
+ ibmvtpm->vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
+ ibmvtpm->crq_queue.index = 0;
+
+ return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
+ ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
+}
+
+static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == 0);
+}
+
+static const struct tpm_class_ops tpm_ibmvtpm = {
+ .recv = tpm_ibmvtpm_recv,
+ .send = tpm_ibmvtpm_send,
+ .cancel = tpm_ibmvtpm_cancel,
+ .status = tpm_ibmvtpm_status,
+ .req_complete_mask = 1,
+ .req_complete_val = 0,
+ .req_canceled = tpm_ibmvtpm_req_canceled,
+};
+
+static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
+ .suspend = tpm_ibmvtpm_suspend,
+ .resume = tpm_ibmvtpm_resume,
+};
+
+/**
+ * ibmvtpm_crq_get_next - Get next responded crq
+ *
+ * @ibmvtpm: vtpm device struct
+ *
+ * Return: vtpm crq pointer or NULL.
+ */
+static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
+{
+ struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
+ struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
+
+ if (crq->valid & VTPM_MSG_RES) {
+ if (++crq_q->index == crq_q->num_entry)
+ crq_q->index = 0;
+ smp_rmb();
+ } else
+ crq = NULL;
+ return crq;
+}
+
+/**
+ * ibmvtpm_crq_process - Process responded crq
+ *
+ * @crq: crq to be processed
+ * @ibmvtpm: vtpm device struct
+ *
+ */
+static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
+ struct ibmvtpm_dev *ibmvtpm)
+{
+ int rc = 0;
+
+ switch (crq->valid) {
+ case VALID_INIT_CRQ:
+ switch (crq->msg) {
+ case INIT_CRQ_RES:
+ dev_info(ibmvtpm->dev, "CRQ initialized\n");
+ rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
+ if (rc)
+ dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
+ return;
+ case INIT_CRQ_COMP_RES:
+ dev_info(ibmvtpm->dev,
+ "CRQ initialization completed\n");
+ return;
+ default:
+ dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
+ return;
+ }
+ case IBMVTPM_VALID_CMD:
+ switch (crq->msg) {
+ case VTPM_GET_RTCE_BUFFER_SIZE_RES:
+ if (be16_to_cpu(crq->len) <= 0) {
+ dev_err(ibmvtpm->dev, "Invalid rtce size\n");
+ return;
+ }
+ ibmvtpm->rtce_size = be16_to_cpu(crq->len);
+ ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
+ GFP_ATOMIC);
+ if (!ibmvtpm->rtce_buf) {
+ dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
+ return;
+ }
+
+ ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
+ ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(ibmvtpm->dev,
+ ibmvtpm->rtce_dma_handle)) {
+ kfree(ibmvtpm->rtce_buf);
+ ibmvtpm->rtce_buf = NULL;
+ dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
+ }
+
+ return;
+ case VTPM_GET_VERSION_RES:
+ ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
+ return;
+ case VTPM_TPM_COMMAND_RES:
+ /* len of the data in rtce buffer */
+ ibmvtpm->res_len = be16_to_cpu(crq->len);
+ ibmvtpm->tpm_processing_cmd = 0;
+ wake_up_interruptible(&ibmvtpm->wq);
+ return;
+ default:
+ return;
+ }
+ }
+ return;
+}
+
+/**
+ * ibmvtpm_interrupt - Interrupt handler
+ *
+ * @irq: irq number to handle
+ * @vtpm_instance: vtpm that received interrupt
+ *
+ * Returns:
+ * IRQ_HANDLED
+ **/
+static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
+{
+ struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
+ struct ibmvtpm_crq *crq;
+
+ /* while loop is needed for initial setup (get version and
+ * get rtce_size). There should be only one tpm request at any
+ * given time.
+ */
+ while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
+ ibmvtpm_crq_process(crq, ibmvtpm);
+ wake_up_interruptible(&ibmvtpm->crq_queue.wq);
+ crq->valid = 0;
+ smp_wmb();
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
+ *
+ * @vio_dev: vio device struct
+ * @id: vio device id struct
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+ const struct vio_device_id *id)
+{
+ struct ibmvtpm_dev *ibmvtpm;
+ struct device *dev = &vio_dev->dev;
+ struct ibmvtpm_crq_queue *crq_q;
+ struct tpm_chip *chip;
+ int rc = -ENOMEM, rc1;
+
+ chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
+ if (!ibmvtpm) {
+ dev_err(dev, "kzalloc for ibmvtpm failed\n");
+ goto cleanup;
+ }
+
+ ibmvtpm->dev = dev;
+ ibmvtpm->vdev = vio_dev;
+
+ crq_q = &ibmvtpm->crq_queue;
+ crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
+ if (!crq_q->crq_addr) {
+ dev_err(dev, "Unable to allocate memory for crq_addr\n");
+ goto cleanup;
+ }
+
+ crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
+ init_waitqueue_head(&crq_q->wq);
+ ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
+ CRQ_RES_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
+ dev_err(dev, "dma mapping failed\n");
+ goto cleanup;
+ }
+
+ rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
+ ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
+ if (rc == H_RESOURCE)
+ rc = ibmvtpm_reset_crq(ibmvtpm);
+
+ if (rc) {
+ dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
+ goto reg_crq_cleanup;
+ }
+
+ rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
+ tpm_ibmvtpm_driver_name, ibmvtpm);
+ if (rc) {
+ dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
+ goto init_irq_cleanup;
+ }
+
+ rc = vio_enable_interrupts(vio_dev);
+ if (rc) {
+ dev_err(dev, "Error %d enabling interrupts\n", rc);
+ goto init_irq_cleanup;
+ }
+
+ init_waitqueue_head(&ibmvtpm->wq);
+
+ crq_q->index = 0;
+
+ dev_set_drvdata(&chip->dev, ibmvtpm);
+
+ spin_lock_init(&ibmvtpm->rtce_lock);
+
+ rc = ibmvtpm_crq_send_init(ibmvtpm);
+ if (rc)
+ goto init_irq_cleanup;
+
+ rc = ibmvtpm_crq_get_version(ibmvtpm);
+ if (rc)
+ goto init_irq_cleanup;
+
+ rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
+ if (rc)
+ goto init_irq_cleanup;
+
+ if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
+ ibmvtpm->rtce_buf != NULL,
+ HZ)) {
+ rc = -ENODEV;
+ dev_err(dev, "CRQ response timed out\n");
+ goto init_irq_cleanup;
+ }
+
+
+ if (!strcmp(id->compat, "IBM,vtpm20"))
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ rc = tpm_get_timeouts(chip);
+ if (rc)
+ goto init_irq_cleanup;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ rc = tpm2_get_cc_attrs_tbl(chip);
+ if (rc)
+ goto init_irq_cleanup;
+ }
+
+ return tpm_chip_register(chip);
+init_irq_cleanup:
+ do {
+ rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
+ } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
+reg_crq_cleanup:
+ dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+cleanup:
+ if (ibmvtpm) {
+ if (crq_q->crq_addr)
+ free_page((unsigned long)crq_q->crq_addr);
+ kfree(ibmvtpm);
+ }
+
+ return rc;
+}
+
+static struct vio_driver ibmvtpm_driver = {
+ .id_table = tpm_ibmvtpm_device_table,
+ .probe = tpm_ibmvtpm_probe,
+ .remove = tpm_ibmvtpm_remove,
+ .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
+ .name = tpm_ibmvtpm_driver_name,
+ .pm = &tpm_ibmvtpm_pm_ops,
+};
+
+/**
+ * ibmvtpm_module_init - Initialize ibm vtpm module.
+ *
+ *
+ * Return:
+ * 0 on success.
+ * Non-zero on failure.
+ */
+static int __init ibmvtpm_module_init(void)
+{
+ return vio_register_driver(&ibmvtpm_driver);
+}
+
+/**
+ * ibmvtpm_module_exit - Tear down ibm vtpm module.
+ */
+static void __exit ibmvtpm_module_exit(void)
+{
+ vio_unregister_driver(&ibmvtpm_driver);
+}
+
+module_init(ibmvtpm_module_init);
+module_exit(ibmvtpm_module_exit);
+
+MODULE_AUTHOR("adlai@us.ibm.com");
+MODULE_DESCRIPTION("IBM vTPM Driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
new file mode 100644
index 0000000000..51198b1374
--- /dev/null
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 IBM Corporation
+ *
+ * Author: Ashley Lai <ashleydlai@gmail.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ */
+
+#ifndef __TPM_IBMVTPM_H__
+#define __TPM_IBMVTPM_H__
+
+/* vTPM Message Format 1 */
+struct ibmvtpm_crq {
+ u8 valid;
+ u8 msg;
+ __be16 len;
+ __be32 data;
+ __be64 reserved;
+} __attribute__((packed, aligned(8)));
+
+struct ibmvtpm_crq_queue {
+ struct ibmvtpm_crq *crq_addr;
+ u32 index;
+ u32 num_entry;
+ wait_queue_head_t wq;
+};
+
+struct ibmvtpm_dev {
+ struct device *dev;
+ struct vio_dev *vdev;
+ struct ibmvtpm_crq_queue crq_queue;
+ dma_addr_t crq_dma_handle;
+ u32 rtce_size;
+ void __iomem *rtce_buf;
+ dma_addr_t rtce_dma_handle;
+ spinlock_t rtce_lock;
+ wait_queue_head_t wq;
+ u16 res_len;
+ u32 vtpm_version;
+ u8 tpm_processing_cmd;
+};
+
+#define CRQ_RES_BUF_SIZE PAGE_SIZE
+
+/* Initialize CRQ */
+#define INIT_CRQ_CMD 0xC001000000000000LL /* Init cmd */
+#define INIT_CRQ_COMP_CMD 0xC002000000000000LL /* Init complete cmd */
+#define INIT_CRQ_RES 0x01 /* Init respond */
+#define INIT_CRQ_COMP_RES 0x02 /* Init complete respond */
+#define VALID_INIT_CRQ 0xC0 /* Valid command for init crq */
+
+/* vTPM CRQ response is the message type | 0x80 */
+#define VTPM_MSG_RES 0x80
+#define IBMVTPM_VALID_CMD 0x80
+
+/* vTPM CRQ message types */
+#define VTPM_GET_VERSION 0x01
+#define VTPM_GET_VERSION_RES (0x01 | VTPM_MSG_RES)
+
+#define VTPM_TPM_COMMAND 0x02
+#define VTPM_TPM_COMMAND_RES (0x02 | VTPM_MSG_RES)
+
+#define VTPM_GET_RTCE_BUFFER_SIZE 0x03
+#define VTPM_GET_RTCE_BUFFER_SIZE_RES (0x03 | VTPM_MSG_RES)
+
+#define VTPM_PREPARE_TO_SUSPEND 0x04
+#define VTPM_PREPARE_TO_SUSPEND_RES (0x04 | VTPM_MSG_RES)
+
+#endif
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
new file mode 100644
index 0000000000..9c924a1440
--- /dev/null
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Description:
+ * Device Driver for the Infineon Technologies
+ * SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * Copyright (C) 2005, Marcel Selhorst <tpmdd@selhorst.net>
+ * Sirrix AG - security technologies <tpmdd@sirrix.com> and
+ * Applied Data Security Group, Ruhr-University Bochum, Germany
+ * Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
+ */
+
+#include <linux/init.h>
+#include <linux/pnp.h>
+#include "tpm.h"
+
+/* Infineon specific definitions */
+/* maximum number of WTX-packages */
+#define TPM_MAX_WTX_PACKAGES 50
+/* msleep-Time for WTX-packages */
+#define TPM_WTX_MSLEEP_TIME 20
+/* msleep-Time --> Interval to check status register */
+#define TPM_MSLEEP_TIME 3
+/* gives number of max. msleep()-calls before throwing timeout */
+#define TPM_MAX_TRIES 5000
+#define TPM_INFINEON_DEV_VEN_VALUE 0x15D1
+
+#define TPM_INF_IO_PORT 0x0
+#define TPM_INF_IO_MEM 0x1
+
+#define TPM_INF_ADDR 0x0
+#define TPM_INF_DATA 0x1
+
+struct tpm_inf_dev {
+ int iotype;
+
+ void __iomem *mem_base; /* MMIO ioremap'd addr */
+ unsigned long map_base; /* phys MMIO base */
+ unsigned long map_size; /* MMIO region size */
+ unsigned int index_off; /* index register offset */
+
+ unsigned int data_regs; /* Data registers */
+ unsigned int data_size;
+
+ unsigned int config_port; /* IO Port config index reg */
+ unsigned int config_size;
+};
+
+static struct tpm_inf_dev tpm_dev;
+
+static inline void tpm_data_out(unsigned char data, unsigned char offset)
+{
+ if (tpm_dev.iotype == TPM_INF_IO_PORT)
+ outb(data, tpm_dev.data_regs + offset);
+ else
+ writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset);
+}
+
+static inline unsigned char tpm_data_in(unsigned char offset)
+{
+ if (tpm_dev.iotype == TPM_INF_IO_PORT)
+ return inb(tpm_dev.data_regs + offset);
+ else
+ return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset);
+}
+
+static inline void tpm_config_out(unsigned char data, unsigned char offset)
+{
+ if (tpm_dev.iotype == TPM_INF_IO_PORT)
+ outb(data, tpm_dev.config_port + offset);
+ else
+ writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset);
+}
+
+static inline unsigned char tpm_config_in(unsigned char offset)
+{
+ if (tpm_dev.iotype == TPM_INF_IO_PORT)
+ return inb(tpm_dev.config_port + offset);
+ else
+ return readb(tpm_dev.mem_base + tpm_dev.index_off + offset);
+}
+
+/* TPM header definitions */
+enum infineon_tpm_header {
+ TPM_VL_VER = 0x01,
+ TPM_VL_CHANNEL_CONTROL = 0x07,
+ TPM_VL_CHANNEL_PERSONALISATION = 0x0A,
+ TPM_VL_CHANNEL_TPM = 0x0B,
+ TPM_VL_CONTROL = 0x00,
+ TPM_INF_NAK = 0x15,
+ TPM_CTRL_WTX = 0x10,
+ TPM_CTRL_WTX_ABORT = 0x18,
+ TPM_CTRL_WTX_ABORT_ACK = 0x18,
+ TPM_CTRL_ERROR = 0x20,
+ TPM_CTRL_CHAININGACK = 0x40,
+ TPM_CTRL_CHAINING = 0x80,
+ TPM_CTRL_DATA = 0x04,
+ TPM_CTRL_DATA_CHA = 0x84,
+ TPM_CTRL_DATA_CHA_ACK = 0xC4
+};
+
+enum infineon_tpm_register {
+ WRFIFO = 0x00,
+ RDFIFO = 0x01,
+ STAT = 0x02,
+ CMD = 0x03
+};
+
+enum infineon_tpm_command_bits {
+ CMD_DIS = 0x00,
+ CMD_LP = 0x01,
+ CMD_RES = 0x02,
+ CMD_IRQC = 0x06
+};
+
+enum infineon_tpm_status_bits {
+ STAT_XFE = 0x00,
+ STAT_LPA = 0x01,
+ STAT_FOK = 0x02,
+ STAT_TOK = 0x03,
+ STAT_IRQA = 0x06,
+ STAT_RDA = 0x07
+};
+
+/* some outgoing values */
+enum infineon_tpm_values {
+ CHIP_ID1 = 0x20,
+ CHIP_ID2 = 0x21,
+ TPM_DAR = 0x30,
+ RESET_LP_IRQC_DISABLE = 0x41,
+ ENABLE_REGISTER_PAIR = 0x55,
+ IOLIMH = 0x60,
+ IOLIML = 0x61,
+ DISABLE_REGISTER_PAIR = 0xAA,
+ IDVENL = 0xF1,
+ IDVENH = 0xF2,
+ IDPDL = 0xF3,
+ IDPDH = 0xF4
+};
+
+static int number_of_wtx;
+
+static int empty_fifo(struct tpm_chip *chip, int clear_wrfifo)
+{
+ int status;
+ int check = 0;
+ int i;
+
+ if (clear_wrfifo) {
+ for (i = 0; i < 4096; i++) {
+ status = tpm_data_in(WRFIFO);
+ if (status == 0xff) {
+ if (check == 5)
+ break;
+ else
+ check++;
+ }
+ }
+ }
+ /* Note: The values which are currently in the FIFO of the TPM
+ are thrown away since there is no usage for them. Usually,
+ this has nothing to say, since the TPM will give its answer
+ immediately or will be aborted anyway, so the data here is
+ usually garbage and useless.
+ We have to clean this, because the next communication with
+ the TPM would be rubbish, if there is still some old data
+ in the Read FIFO.
+ */
+ i = 0;
+ do {
+ status = tpm_data_in(RDFIFO);
+ status = tpm_data_in(STAT);
+ i++;
+ if (i == TPM_MAX_TRIES)
+ return -EIO;
+ } while ((status & (1 << STAT_RDA)) != 0);
+ return 0;
+}
+
+static int wait(struct tpm_chip *chip, int wait_for_bit)
+{
+ int status;
+ int i;
+ for (i = 0; i < TPM_MAX_TRIES; i++) {
+ status = tpm_data_in(STAT);
+ /* check the status-register if wait_for_bit is set */
+ if (status & 1 << wait_for_bit)
+ break;
+ tpm_msleep(TPM_MSLEEP_TIME);
+ }
+ if (i == TPM_MAX_TRIES) { /* timeout occurs */
+ if (wait_for_bit == STAT_XFE)
+ dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n");
+ if (wait_for_bit == STAT_RDA)
+ dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n");
+ return -EIO;
+ }
+ return 0;
+};
+
+static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
+{
+ wait(chip, STAT_XFE);
+ tpm_data_out(sendbyte, WRFIFO);
+}
+
+ /* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more
+ calculation time, it sends a WTX-package, which has to be acknowledged
+ or aborted. This usually occurs if you are hammering the TPM with key
+ creation. Set the maximum number of WTX-packages in the definitions
+ above, if the number is reached, the waiting-time will be denied
+ and the TPM command has to be resend.
+ */
+
+static void tpm_wtx(struct tpm_chip *chip)
+{
+ number_of_wtx++;
+ dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n",
+ number_of_wtx, TPM_MAX_WTX_PACKAGES);
+ wait_and_send(chip, TPM_VL_VER);
+ wait_and_send(chip, TPM_CTRL_WTX);
+ wait_and_send(chip, 0x00);
+ wait_and_send(chip, 0x00);
+ tpm_msleep(TPM_WTX_MSLEEP_TIME);
+}
+
+static void tpm_wtx_abort(struct tpm_chip *chip)
+{
+ dev_info(&chip->dev, "Aborting WTX\n");
+ wait_and_send(chip, TPM_VL_VER);
+ wait_and_send(chip, TPM_CTRL_WTX_ABORT);
+ wait_and_send(chip, 0x00);
+ wait_and_send(chip, 0x00);
+ number_of_wtx = 0;
+ tpm_msleep(TPM_WTX_MSLEEP_TIME);
+}
+
+static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+ int i;
+ int ret;
+ u32 size = 0;
+ number_of_wtx = 0;
+
+recv_begin:
+ /* start receiving header */
+ for (i = 0; i < 4; i++) {
+ ret = wait(chip, STAT_RDA);
+ if (ret)
+ return -EIO;
+ buf[i] = tpm_data_in(RDFIFO);
+ }
+
+ if (buf[0] != TPM_VL_VER) {
+ dev_err(&chip->dev,
+ "Wrong transport protocol implementation!\n");
+ return -EIO;
+ }
+
+ if (buf[1] == TPM_CTRL_DATA) {
+ /* size of the data received */
+ size = ((buf[2] << 8) | buf[3]);
+
+ for (i = 0; i < size; i++) {
+ wait(chip, STAT_RDA);
+ buf[i] = tpm_data_in(RDFIFO);
+ }
+
+ if ((size == 0x6D00) && (buf[1] == 0x80)) {
+ dev_err(&chip->dev, "Error handling on vendor layer!\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < size; i++)
+ buf[i] = buf[i + 6];
+
+ size = size - 6;
+ return size;
+ }
+
+ if (buf[1] == TPM_CTRL_WTX) {
+ dev_info(&chip->dev, "WTX-package received\n");
+ if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
+ tpm_wtx(chip);
+ goto recv_begin;
+ } else {
+ tpm_wtx_abort(chip);
+ goto recv_begin;
+ }
+ }
+
+ if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
+ dev_info(&chip->dev, "WTX-abort acknowledged\n");
+ return size;
+ }
+
+ if (buf[1] == TPM_CTRL_ERROR) {
+ dev_err(&chip->dev, "ERROR-package received:\n");
+ if (buf[4] == TPM_INF_NAK)
+ dev_err(&chip->dev,
+ "-> Negative acknowledgement"
+ " - retransmit command!\n");
+ return -EIO;
+ }
+ return -EIO;
+}
+
+static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+ int i;
+ int ret;
+ u8 count_high, count_low, count_4, count_3, count_2, count_1;
+
+ /* Disabling Reset, LP and IRQC */
+ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+
+ ret = empty_fifo(chip, 1);
+ if (ret) {
+ dev_err(&chip->dev, "Timeout while clearing FIFO\n");
+ return -EIO;
+ }
+
+ ret = wait(chip, STAT_XFE);
+ if (ret)
+ return -EIO;
+
+ count_4 = (count & 0xff000000) >> 24;
+ count_3 = (count & 0x00ff0000) >> 16;
+ count_2 = (count & 0x0000ff00) >> 8;
+ count_1 = (count & 0x000000ff);
+ count_high = ((count + 6) & 0xffffff00) >> 8;
+ count_low = ((count + 6) & 0x000000ff);
+
+ /* Sending Header */
+ wait_and_send(chip, TPM_VL_VER);
+ wait_and_send(chip, TPM_CTRL_DATA);
+ wait_and_send(chip, count_high);
+ wait_and_send(chip, count_low);
+
+ /* Sending Data Header */
+ wait_and_send(chip, TPM_VL_VER);
+ wait_and_send(chip, TPM_VL_CHANNEL_TPM);
+ wait_and_send(chip, count_4);
+ wait_and_send(chip, count_3);
+ wait_and_send(chip, count_2);
+ wait_and_send(chip, count_1);
+
+ /* Sending Data */
+ for (i = 0; i < count; i++) {
+ wait_and_send(chip, buf[i]);
+ }
+ return 0;
+}
+
+static void tpm_inf_cancel(struct tpm_chip *chip)
+{
+ /*
+ Since we are using the legacy mode to communicate
+ with the TPM, we have no cancel functions, but have
+ a workaround for interrupting the TPM through WTX.
+ */
+}
+
+static u8 tpm_inf_status(struct tpm_chip *chip)
+{
+ return tpm_data_in(STAT);
+}
+
+static const struct tpm_class_ops tpm_inf = {
+ .recv = tpm_inf_recv,
+ .send = tpm_inf_send,
+ .cancel = tpm_inf_cancel,
+ .status = tpm_inf_status,
+ .req_complete_mask = 0,
+ .req_complete_val = 0,
+};
+
+static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
+ /* Infineon TPMs */
+ {"IFX0101", 0},
+ {"IFX0102", 0},
+ {"", 0}
+};
+
+MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
+
+static int tpm_inf_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id)
+{
+ int rc = 0;
+ u8 iol, ioh;
+ int vendorid[2];
+ int version[2];
+ int productid[2];
+ const char *chipname;
+ struct tpm_chip *chip;
+
+ /* read IO-ports through PnP */
+ if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
+ !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
+
+ tpm_dev.iotype = TPM_INF_IO_PORT;
+
+ tpm_dev.config_port = pnp_port_start(dev, 0);
+ tpm_dev.config_size = pnp_port_len(dev, 0);
+ tpm_dev.data_regs = pnp_port_start(dev, 1);
+ tpm_dev.data_size = pnp_port_len(dev, 1);
+ if ((tpm_dev.data_size < 4) || (tpm_dev.config_size < 2)) {
+ rc = -EINVAL;
+ goto err_last;
+ }
+ dev_info(&dev->dev, "Found %s with ID %s\n",
+ dev->name, dev_id->id);
+ if (!((tpm_dev.data_regs >> 8) & 0xff)) {
+ rc = -EINVAL;
+ goto err_last;
+ }
+ /* publish my base address and request region */
+ if (request_region(tpm_dev.data_regs, tpm_dev.data_size,
+ "tpm_infineon0") == NULL) {
+ rc = -EINVAL;
+ goto err_last;
+ }
+ if (request_region(tpm_dev.config_port, tpm_dev.config_size,
+ "tpm_infineon0") == NULL) {
+ release_region(tpm_dev.data_regs, tpm_dev.data_size);
+ rc = -EINVAL;
+ goto err_last;
+ }
+ } else if (pnp_mem_valid(dev, 0) &&
+ !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
+
+ tpm_dev.iotype = TPM_INF_IO_MEM;
+
+ tpm_dev.map_base = pnp_mem_start(dev, 0);
+ tpm_dev.map_size = pnp_mem_len(dev, 0);
+
+ dev_info(&dev->dev, "Found %s with ID %s\n",
+ dev->name, dev_id->id);
+
+ /* publish my base address and request region */
+ if (request_mem_region(tpm_dev.map_base, tpm_dev.map_size,
+ "tpm_infineon0") == NULL) {
+ rc = -EINVAL;
+ goto err_last;
+ }
+
+ tpm_dev.mem_base = ioremap(tpm_dev.map_base, tpm_dev.map_size);
+ if (tpm_dev.mem_base == NULL) {
+ release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+ rc = -EINVAL;
+ goto err_last;
+ }
+
+ /*
+ * The only known MMIO based Infineon TPM system provides
+ * a single large mem region with the device config
+ * registers at the default TPM_ADDR. The data registers
+ * seem like they could be placed anywhere within the MMIO
+ * region, but lets just put them at zero offset.
+ */
+ tpm_dev.index_off = TPM_ADDR;
+ tpm_dev.data_regs = 0x0;
+ } else {
+ rc = -EINVAL;
+ goto err_last;
+ }
+
+ /* query chip for its vendor, its version number a.s.o. */
+ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ tpm_config_out(IDVENL, TPM_INF_ADDR);
+ vendorid[1] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(IDVENH, TPM_INF_ADDR);
+ vendorid[0] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(IDPDL, TPM_INF_ADDR);
+ productid[1] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(IDPDH, TPM_INF_ADDR);
+ productid[0] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(CHIP_ID1, TPM_INF_ADDR);
+ version[1] = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(CHIP_ID2, TPM_INF_ADDR);
+ version[0] = tpm_config_in(TPM_INF_DATA);
+
+ switch ((productid[0] << 8) | productid[1]) {
+ case 6:
+ chipname = " (SLD 9630 TT 1.1)";
+ break;
+ case 11:
+ chipname = " (SLB 9635 TT 1.2)";
+ break;
+ default:
+ chipname = " (unknown chip)";
+ break;
+ }
+
+ if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) {
+
+ /* configure TPM with IO-ports */
+ tpm_config_out(IOLIMH, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
+ tpm_config_out(IOLIML, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
+
+ /* control if IO-ports are set correctly */
+ tpm_config_out(IOLIMH, TPM_INF_ADDR);
+ ioh = tpm_config_in(TPM_INF_DATA);
+ tpm_config_out(IOLIML, TPM_INF_ADDR);
+ iol = tpm_config_in(TPM_INF_DATA);
+
+ if ((ioh << 8 | iol) != tpm_dev.data_regs) {
+ dev_err(&dev->dev,
+ "Could not set IO-data registers to 0x%x\n",
+ tpm_dev.data_regs);
+ rc = -EIO;
+ goto err_release_region;
+ }
+
+ /* activate register */
+ tpm_config_out(TPM_DAR, TPM_INF_ADDR);
+ tpm_config_out(0x01, TPM_INF_DATA);
+ tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
+
+ /* disable RESET, LP and IRQC */
+ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+
+ /* Finally, we're done, print some infos */
+ dev_info(&dev->dev, "TPM found: "
+ "config base 0x%lx, "
+ "data base 0x%lx, "
+ "chip version 0x%02x%02x, "
+ "vendor id 0x%x%x (Infineon), "
+ "product id 0x%02x%02x"
+ "%s\n",
+ tpm_dev.iotype == TPM_INF_IO_PORT ?
+ tpm_dev.config_port :
+ tpm_dev.map_base + tpm_dev.index_off,
+ tpm_dev.iotype == TPM_INF_IO_PORT ?
+ tpm_dev.data_regs :
+ tpm_dev.map_base + tpm_dev.data_regs,
+ version[0], version[1],
+ vendorid[0], vendorid[1],
+ productid[0], productid[1], chipname);
+
+ chip = tpmm_chip_alloc(&dev->dev, &tpm_inf);
+ if (IS_ERR(chip)) {
+ rc = PTR_ERR(chip);
+ goto err_release_region;
+ }
+
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto err_release_region;
+
+ return 0;
+ } else {
+ rc = -ENODEV;
+ goto err_release_region;
+ }
+
+err_release_region:
+ if (tpm_dev.iotype == TPM_INF_IO_PORT) {
+ release_region(tpm_dev.data_regs, tpm_dev.data_size);
+ release_region(tpm_dev.config_port, tpm_dev.config_size);
+ } else {
+ iounmap(tpm_dev.mem_base);
+ release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+ }
+
+err_last:
+ return rc;
+}
+
+static void tpm_inf_pnp_remove(struct pnp_dev *dev)
+{
+ struct tpm_chip *chip = pnp_get_drvdata(dev);
+
+ tpm_chip_unregister(chip);
+
+ if (tpm_dev.iotype == TPM_INF_IO_PORT) {
+ release_region(tpm_dev.data_regs, tpm_dev.data_size);
+ release_region(tpm_dev.config_port,
+ tpm_dev.config_size);
+ } else {
+ iounmap(tpm_dev.mem_base);
+ release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tpm_inf_resume(struct device *dev)
+{
+ /* Re-configure TPM after suspending */
+ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ tpm_config_out(IOLIMH, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
+ tpm_config_out(IOLIML, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
+ /* activate register */
+ tpm_config_out(TPM_DAR, TPM_INF_ADDR);
+ tpm_config_out(0x01, TPM_INF_DATA);
+ tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ /* disable RESET, LP and IRQC */
+ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+ return tpm_pm_resume(dev);
+}
+#endif
+static SIMPLE_DEV_PM_OPS(tpm_inf_pm, tpm_pm_suspend, tpm_inf_resume);
+
+static struct pnp_driver tpm_inf_pnp_driver = {
+ .name = "tpm_inf_pnp",
+ .id_table = tpm_inf_pnp_tbl,
+ .probe = tpm_inf_pnp_probe,
+ .remove = tpm_inf_pnp_remove,
+ .driver = {
+ .pm = &tpm_inf_pm,
+ }
+};
+
+module_pnp_driver(tpm_inf_pnp_driver);
+
+MODULE_AUTHOR("Marcel Selhorst <tpmdd@sirrix.com>");
+MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
+MODULE_VERSION("1.9.2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
new file mode 100644
index 0000000000..038701d483
--- /dev/null
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ */
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "tpm.h"
+
+/* National definitions */
+enum tpm_nsc_addr{
+ TPM_NSC_IRQ = 0x07,
+ TPM_NSC_BASE0_HI = 0x60,
+ TPM_NSC_BASE0_LO = 0x61,
+ TPM_NSC_BASE1_HI = 0x62,
+ TPM_NSC_BASE1_LO = 0x63
+};
+
+enum tpm_nsc_index {
+ NSC_LDN_INDEX = 0x07,
+ NSC_SID_INDEX = 0x20,
+ NSC_LDC_INDEX = 0x30,
+ NSC_DIO_INDEX = 0x60,
+ NSC_CIO_INDEX = 0x62,
+ NSC_IRQ_INDEX = 0x70,
+ NSC_ITS_INDEX = 0x71
+};
+
+enum tpm_nsc_status_loc {
+ NSC_STATUS = 0x01,
+ NSC_COMMAND = 0x01,
+ NSC_DATA = 0x00
+};
+
+/* status bits */
+enum tpm_nsc_status {
+ NSC_STATUS_OBF = 0x01, /* output buffer full */
+ NSC_STATUS_IBF = 0x02, /* input buffer full */
+ NSC_STATUS_F0 = 0x04, /* F0 */
+ NSC_STATUS_A2 = 0x08, /* A2 */
+ NSC_STATUS_RDY = 0x10, /* ready to receive command */
+ NSC_STATUS_IBR = 0x20 /* ready to receive data */
+};
+
+/* command bits */
+enum tpm_nsc_cmd_mode {
+ NSC_COMMAND_NORMAL = 0x01, /* normal mode */
+ NSC_COMMAND_EOC = 0x03,
+ NSC_COMMAND_CANCEL = 0x22
+};
+
+struct tpm_nsc_priv {
+ unsigned long base;
+};
+
+/*
+ * Wait for a certain status to appear
+ */
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data)
+{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+
+ /* status immediately available check */
+ *data = inb(priv->base + NSC_STATUS);
+ if ((*data & mask) == val)
+ return 0;
+
+ /* wait for status */
+ stop = jiffies + 10 * HZ;
+ do {
+ msleep(TPM_TIMEOUT);
+ *data = inb(priv->base + 1);
+ if ((*data & mask) == val)
+ return 0;
+ }
+ while (time_before(jiffies, stop));
+
+ return -EBUSY;
+}
+
+static int nsc_wait_for_ready(struct tpm_chip *chip)
+{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+ int status;
+ unsigned long stop;
+
+ /* status immediately available check */
+ status = inb(priv->base + NSC_STATUS);
+ if (status & NSC_STATUS_OBF)
+ status = inb(priv->base + NSC_DATA);
+ if (status & NSC_STATUS_RDY)
+ return 0;
+
+ /* wait for status */
+ stop = jiffies + 100;
+ do {
+ msleep(TPM_TIMEOUT);
+ status = inb(priv->base + NSC_STATUS);
+ if (status & NSC_STATUS_OBF)
+ status = inb(priv->base + NSC_DATA);
+ if (status & NSC_STATUS_RDY)
+ return 0;
+ }
+ while (time_before(jiffies, stop));
+
+ dev_info(&chip->dev, "wait for ready failed\n");
+ return -EBUSY;
+}
+
+
+static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+ u8 *buffer = buf;
+ u8 data, *p;
+ u32 size;
+ __be32 *native_size;
+
+ if (count < 6)
+ return -EIO;
+
+ if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
+ dev_err(&chip->dev, "F0 timeout\n");
+ return -EIO;
+ }
+
+ data = inb(priv->base + NSC_DATA);
+ if (data != NSC_COMMAND_NORMAL) {
+ dev_err(&chip->dev, "not in normal mode (0x%x)\n",
+ data);
+ return -EIO;
+ }
+
+ /* read the whole packet */
+ for (p = buffer; p < &buffer[count]; p++) {
+ if (wait_for_stat
+ (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
+ dev_err(&chip->dev,
+ "OBF timeout (while reading data)\n");
+ return -EIO;
+ }
+ if (data & NSC_STATUS_F0)
+ break;
+ *p = inb(priv->base + NSC_DATA);
+ }
+
+ if ((data & NSC_STATUS_F0) == 0 &&
+ (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
+ dev_err(&chip->dev, "F0 not set\n");
+ return -EIO;
+ }
+
+ data = inb(priv->base + NSC_DATA);
+ if (data != NSC_COMMAND_EOC) {
+ dev_err(&chip->dev,
+ "expected end of command(0x%x)\n", data);
+ return -EIO;
+ }
+
+ native_size = (__force __be32 *) (buf + 2);
+ size = be32_to_cpu(*native_size);
+
+ if (count < size)
+ return -EIO;
+
+ return size;
+}
+
+static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
+{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+ u8 data;
+ int i;
+
+ /*
+ * If we hit the chip with back to back commands it locks up
+ * and never set IBF. Hitting it with this "hammer" seems to
+ * fix it. Not sure why this is needed, we followed the flow
+ * chart in the manual to the letter.
+ */
+ outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND);
+
+ if (nsc_wait_for_ready(chip) != 0)
+ return -EIO;
+
+ if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+ dev_err(&chip->dev, "IBF timeout\n");
+ return -EIO;
+ }
+
+ outb(NSC_COMMAND_NORMAL, priv->base + NSC_COMMAND);
+ if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
+ dev_err(&chip->dev, "IBR timeout\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+ dev_err(&chip->dev,
+ "IBF timeout (while writing data)\n");
+ return -EIO;
+ }
+ outb(buf[i], priv->base + NSC_DATA);
+ }
+
+ if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+ dev_err(&chip->dev, "IBF timeout\n");
+ return -EIO;
+ }
+ outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND);
+
+ return 0;
+}
+
+static void tpm_nsc_cancel(struct tpm_chip *chip)
+{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+
+ outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND);
+}
+
+static u8 tpm_nsc_status(struct tpm_chip *chip)
+{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return inb(priv->base + NSC_STATUS);
+}
+
+static bool tpm_nsc_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == NSC_STATUS_RDY);
+}
+
+static const struct tpm_class_ops tpm_nsc = {
+ .recv = tpm_nsc_recv,
+ .send = tpm_nsc_send,
+ .cancel = tpm_nsc_cancel,
+ .status = tpm_nsc_status,
+ .req_complete_mask = NSC_STATUS_OBF,
+ .req_complete_val = NSC_STATUS_OBF,
+ .req_canceled = tpm_nsc_req_canceled,
+};
+
+static struct platform_device *pdev = NULL;
+
+static void tpm_nsc_remove(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+
+ tpm_chip_unregister(chip);
+ release_region(priv->base, 2);
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume);
+
+static struct platform_driver nsc_drv = {
+ .driver = {
+ .name = "tpm_nsc",
+ .pm = &tpm_nsc_pm,
+ },
+};
+
+static inline int tpm_read_index(int base, int index)
+{
+ outb(index, base);
+ return inb(base+1) & 0xFF;
+}
+
+static inline void tpm_write_index(int base, int index, int value)
+{
+ outb(index, base);
+ outb(value & 0xFF, base+1);
+}
+
+static int __init init_nsc(void)
+{
+ int rc = 0;
+ int lo, hi, err;
+ int nscAddrBase = TPM_ADDR;
+ struct tpm_chip *chip;
+ unsigned long base;
+ struct tpm_nsc_priv *priv;
+
+ /* verify that it is a National part (SID) */
+ if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) {
+ nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)|
+ (tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE);
+ if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6)
+ return -ENODEV;
+ }
+
+ err = platform_driver_register(&nsc_drv);
+ if (err)
+ return err;
+
+ hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI);
+ lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO);
+ base = (hi<<8) | lo;
+
+ /* enable the DPM module */
+ tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
+
+ pdev = platform_device_alloc("tpm_nscl0", -1);
+ if (!pdev) {
+ rc = -ENOMEM;
+ goto err_unreg_drv;
+ }
+
+ pdev->num_resources = 0;
+ pdev->dev.driver = &nsc_drv.driver;
+ pdev->dev.release = tpm_nsc_remove;
+
+ if ((rc = platform_device_add(pdev)) < 0)
+ goto err_put_dev;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto err_del_dev;
+ }
+
+ priv->base = base;
+
+ if (request_region(base, 2, "tpm_nsc0") == NULL ) {
+ rc = -EBUSY;
+ goto err_del_dev;
+ }
+
+ chip = tpmm_chip_alloc(&pdev->dev, &tpm_nsc);
+ if (IS_ERR(chip)) {
+ rc = -ENODEV;
+ goto err_rel_reg;
+ }
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto err_rel_reg;
+
+ dev_dbg(&pdev->dev, "NSC TPM detected\n");
+ dev_dbg(&pdev->dev,
+ "NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n",
+ tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20),
+ tpm_read_index(nscAddrBase,0x27));
+ dev_dbg(&pdev->dev,
+ "NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n",
+ tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25),
+ tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28));
+ dev_dbg(&pdev->dev, "NSC IO Base0 0x%x\n",
+ (tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61));
+ dev_dbg(&pdev->dev, "NSC IO Base1 0x%x\n",
+ (tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63));
+ dev_dbg(&pdev->dev, "NSC Interrupt number and wakeup 0x%x\n",
+ tpm_read_index(nscAddrBase,0x70));
+ dev_dbg(&pdev->dev, "NSC IRQ type select 0x%x\n",
+ tpm_read_index(nscAddrBase,0x71));
+ dev_dbg(&pdev->dev,
+ "NSC DMA channel select0 0x%x, select1 0x%x\n",
+ tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75));
+ dev_dbg(&pdev->dev,
+ "NSC Config "
+ "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1),
+ tpm_read_index(nscAddrBase,0xF2), tpm_read_index(nscAddrBase,0xF3),
+ tpm_read_index(nscAddrBase,0xF4), tpm_read_index(nscAddrBase,0xF5),
+ tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7),
+ tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9));
+
+ dev_info(&pdev->dev,
+ "NSC TPM revision %d\n",
+ tpm_read_index(nscAddrBase, 0x27) & 0x1F);
+
+ return 0;
+
+err_rel_reg:
+ release_region(base, 2);
+err_del_dev:
+ platform_device_del(pdev);
+err_put_dev:
+ platform_device_put(pdev);
+err_unreg_drv:
+ platform_driver_unregister(&nsc_drv);
+ return rc;
+}
+
+static void __exit cleanup_nsc(void)
+{
+ if (pdev) {
+ tpm_nsc_remove(&pdev->dev);
+ platform_device_unregister(pdev);
+ }
+
+ platform_driver_unregister(&nsc_drv);
+}
+
+module_init(init_nsc);
+module_exit(cleanup_nsc);
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
new file mode 100644
index 0000000000..bc7b1b4501
--- /dev/null
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012-2014 Intel Corporation
+ *
+ * Authors:
+ * Xiaoyan Zhang <xiaoyan.zhang@intel.com>
+ * Jiang Liu <jiang.liu@linux.intel.com>
+ * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * This file contains implementation of the sysfs interface for PPI.
+ */
+
+
+#include <linux/acpi.h>
+#include "tpm.h"
+
+#define TPM_PPI_REVISION_ID_1 1
+#define TPM_PPI_REVISION_ID_2 2
+#define TPM_PPI_FN_VERSION 1
+#define TPM_PPI_FN_SUBREQ 2
+#define TPM_PPI_FN_GETREQ 3
+#define TPM_PPI_FN_GETACT 4
+#define TPM_PPI_FN_GETRSP 5
+#define TPM_PPI_FN_SUBREQ2 7
+#define TPM_PPI_FN_GETOPR 8
+#define PPI_TPM_REQ_MAX 101 /* PPI 1.3 for TPM 2 */
+#define PPI_VS_REQ_START 128
+#define PPI_VS_REQ_END 255
+
+static const guid_t tpm_ppi_guid =
+ GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
+ 0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
+
+static bool tpm_ppi_req_has_parameter(u64 req)
+{
+ return req == 23;
+}
+
+static inline union acpi_object *
+tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type,
+ union acpi_object *argv4, u64 rev)
+{
+ BUG_ON(!ppi_handle);
+ return acpi_evaluate_dsm_typed(ppi_handle, &tpm_ppi_guid,
+ rev, func, argv4, type);
+}
+
+static ssize_t tpm_show_ppi_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version);
+}
+
+static ssize_t tpm_show_ppi_request(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t size = -EINVAL;
+ union acpi_object *obj;
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ u64 rev = TPM_PPI_REVISION_ID_2;
+ u64 req;
+
+ if (strcmp(chip->ppi_version, "1.2") < 0)
+ rev = TPM_PPI_REVISION_ID_1;
+
+ obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETREQ,
+ ACPI_TYPE_PACKAGE, NULL, rev);
+ if (!obj)
+ return -ENXIO;
+
+ /*
+ * output.pointer should be of package type, including two integers.
+ * The first is function return code, 0 means success and 1 means
+ * error. The second is pending TPM operation requested by the OS, 0
+ * means none and >0 means operation value.
+ */
+ if (obj->package.count == 3 &&
+ obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
+ obj->package.elements[1].type == ACPI_TYPE_INTEGER &&
+ obj->package.elements[2].type == ACPI_TYPE_INTEGER) {
+ if (obj->package.elements[0].integer.value)
+ size = -EFAULT;
+ else {
+ req = obj->package.elements[1].integer.value;
+ if (tpm_ppi_req_has_parameter(req))
+ size = scnprintf(buf, PAGE_SIZE,
+ "%llu %llu\n", req,
+ obj->package.elements[2].integer.value);
+ else
+ size = scnprintf(buf, PAGE_SIZE,
+ "%llu\n", req);
+ }
+ } else if (obj->package.count == 2 &&
+ obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
+ obj->package.elements[1].type == ACPI_TYPE_INTEGER) {
+ if (obj->package.elements[0].integer.value)
+ size = -EFAULT;
+ else
+ size = scnprintf(buf, PAGE_SIZE, "%llu\n",
+ obj->package.elements[1].integer.value);
+ }
+
+ ACPI_FREE(obj);
+
+ return size;
+}
+
+static ssize_t tpm_store_ppi_request(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 req;
+ u64 ret;
+ int func = TPM_PPI_FN_SUBREQ;
+ union acpi_object *obj, tmp[2];
+ union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(2, tmp);
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ u64 rev = TPM_PPI_REVISION_ID_1;
+
+ /*
+ * the function to submit TPM operation request to pre-os environment
+ * is updated with function index from SUBREQ to SUBREQ2 since PPI
+ * version 1.1
+ */
+ if (acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
+ TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_SUBREQ2))
+ func = TPM_PPI_FN_SUBREQ2;
+
+ /*
+ * PPI spec defines params[3].type as ACPI_TYPE_PACKAGE. Some BIOS
+ * accept buffer/string/integer type, but some BIOS accept buffer/
+ * string/package type. For PPI version 1.0 and 1.1, use buffer type
+ * for compatibility, and use package type since 1.2 according to spec.
+ */
+ if (strcmp(chip->ppi_version, "1.3") == 0) {
+ if (sscanf(buf, "%llu %llu", &tmp[0].integer.value,
+ &tmp[1].integer.value) != 2)
+ goto ppi12;
+ rev = TPM_PPI_REVISION_ID_2;
+ tmp[0].type = ACPI_TYPE_INTEGER;
+ tmp[1].type = ACPI_TYPE_INTEGER;
+ } else if (strcmp(chip->ppi_version, "1.2") < 0) {
+ if (sscanf(buf, "%d", &req) != 1)
+ return -EINVAL;
+ argv4.type = ACPI_TYPE_BUFFER;
+ argv4.buffer.length = sizeof(req);
+ argv4.buffer.pointer = (u8 *)&req;
+ } else {
+ppi12:
+ argv4.package.count = 1;
+ tmp[0].type = ACPI_TYPE_INTEGER;
+ if (sscanf(buf, "%llu", &tmp[0].integer.value) != 1)
+ return -EINVAL;
+ }
+
+ obj = tpm_eval_dsm(chip->acpi_dev_handle, func, ACPI_TYPE_INTEGER,
+ &argv4, rev);
+ if (!obj) {
+ return -ENXIO;
+ } else {
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
+ }
+
+ if (ret == 0)
+ return (acpi_status)count;
+
+ return (ret == 1) ? -EPERM : -EFAULT;
+}
+
+static ssize_t tpm_show_ppi_transition_action(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 ret;
+ acpi_status status;
+ union acpi_object *obj = NULL;
+ union acpi_object tmp = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = 0,
+ .buffer.pointer = NULL
+ };
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ static char *info[] = {
+ "None",
+ "Shutdown",
+ "Reboot",
+ "OS Vendor-specific",
+ "Error",
+ };
+
+ /*
+ * PPI spec defines params[3].type as empty package, but some platforms
+ * (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for
+ * compatibility, define params[3].type as buffer, if PPI version < 1.2
+ */
+ if (strcmp(chip->ppi_version, "1.2") < 0)
+ obj = &tmp;
+ obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETACT,
+ ACPI_TYPE_INTEGER, obj, TPM_PPI_REVISION_ID_1);
+ if (!obj) {
+ return -ENXIO;
+ } else {
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
+ }
+
+ if (ret < ARRAY_SIZE(info) - 1)
+ status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]);
+ else
+ status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret,
+ info[ARRAY_SIZE(info)-1]);
+ return status;
+}
+
+static ssize_t tpm_show_ppi_response(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ acpi_status status = -EINVAL;
+ union acpi_object *obj, *ret_obj;
+ u64 req, res;
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETRSP,
+ ACPI_TYPE_PACKAGE, NULL, TPM_PPI_REVISION_ID_1);
+ if (!obj)
+ return -ENXIO;
+
+ /*
+ * parameter output.pointer should be of package type, including
+ * 3 integers. The first means function return code, the second means
+ * most recent TPM operation request, and the last means response to
+ * the most recent TPM operation request. Only if the first is 0, and
+ * the second integer is not 0, the response makes sense.
+ */
+ ret_obj = obj->package.elements;
+ if (obj->package.count < 3 ||
+ ret_obj[0].type != ACPI_TYPE_INTEGER ||
+ ret_obj[1].type != ACPI_TYPE_INTEGER ||
+ ret_obj[2].type != ACPI_TYPE_INTEGER)
+ goto cleanup;
+
+ if (ret_obj[0].integer.value) {
+ status = -EFAULT;
+ goto cleanup;
+ }
+
+ req = ret_obj[1].integer.value;
+ res = ret_obj[2].integer.value;
+ if (req) {
+ if (res == 0)
+ status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
+ "0: Success");
+ else if (res == 0xFFFFFFF0)
+ status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
+ "0xFFFFFFF0: User Abort");
+ else if (res == 0xFFFFFFF1)
+ status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
+ "0xFFFFFFF1: BIOS Failure");
+ else if (res >= 1 && res <= 0x00000FFF)
+ status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
+ req, res, "Corresponding TPM error");
+ else
+ status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
+ req, res, "Error");
+ } else {
+ status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n",
+ req, "No Recent Request");
+ }
+
+cleanup:
+ ACPI_FREE(obj);
+ return status;
+}
+
+static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
+ u32 end)
+{
+ int i;
+ u32 ret;
+ char *str = buf;
+ union acpi_object *obj, tmp;
+ union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp);
+
+ static char *info[] = {
+ "Not implemented",
+ "BIOS only",
+ "Blocked for OS by BIOS",
+ "User required",
+ "User not required",
+ };
+
+ if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1,
+ 1 << TPM_PPI_FN_GETOPR))
+ return -EPERM;
+
+ tmp.integer.type = ACPI_TYPE_INTEGER;
+ for (i = start; i <= end; i++) {
+ tmp.integer.value = i;
+ obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR,
+ ACPI_TYPE_INTEGER, &argv,
+ TPM_PPI_REVISION_ID_1);
+ if (!obj) {
+ return -ENOMEM;
+ } else {
+ ret = obj->integer.value;
+ ACPI_FREE(obj);
+ }
+
+ if (ret > 0 && ret < ARRAY_SIZE(info))
+ str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n",
+ i, ret, info[ret]);
+ }
+
+ return str - buf;
+}
+
+static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return show_ppi_operations(chip->acpi_dev_handle, buf, 0,
+ PPI_TPM_REQ_MAX);
+}
+
+static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START,
+ PPI_VS_REQ_END);
+}
+
+static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL);
+static DEVICE_ATTR(request, S_IRUGO | S_IWUSR | S_IWGRP,
+ tpm_show_ppi_request, tpm_store_ppi_request);
+static DEVICE_ATTR(transition_action, S_IRUGO,
+ tpm_show_ppi_transition_action, NULL);
+static DEVICE_ATTR(response, S_IRUGO, tpm_show_ppi_response, NULL);
+static DEVICE_ATTR(tcg_operations, S_IRUGO, tpm_show_ppi_tcg_operations, NULL);
+static DEVICE_ATTR(vs_operations, S_IRUGO, tpm_show_ppi_vs_operations, NULL);
+
+static struct attribute *ppi_attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_request.attr,
+ &dev_attr_transition_action.attr,
+ &dev_attr_response.attr,
+ &dev_attr_tcg_operations.attr,
+ &dev_attr_vs_operations.attr, NULL,
+};
+static const struct attribute_group ppi_attr_grp = {
+ .name = "ppi",
+ .attrs = ppi_attrs
+};
+
+void tpm_add_ppi(struct tpm_chip *chip)
+{
+ union acpi_object *obj;
+
+ if (!chip->acpi_dev_handle)
+ return;
+
+ if (!acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
+ TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_VERSION))
+ return;
+
+ /* Cache PPI version string. */
+ obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, &tpm_ppi_guid,
+ TPM_PPI_REVISION_ID_1,
+ TPM_PPI_FN_VERSION,
+ NULL, ACPI_TYPE_STRING);
+ if (obj) {
+ strscpy(chip->ppi_version, obj->string.pointer,
+ sizeof(chip->ppi_version));
+ ACPI_FREE(obj);
+ }
+
+ chip->groups[chip->groups_cnt++] = &ppi_attr_grp;
+}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
new file mode 100644
index 0000000000..2c52b7905b
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005, 2006 IBM Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include <linux/freezer.h>
+#include <linux/of.h>
+#include <linux/kernel.h>
+#include "tpm.h"
+#include "tpm_tis_core.h"
+
+struct tpm_info {
+ struct resource res;
+ /* irq > 0 means: use irq $irq;
+ * irq = 0 means: autoprobe for an irq;
+ * irq = -1 means: no irq support
+ */
+ int irq;
+};
+
+struct tpm_tis_tcg_phy {
+ struct tpm_tis_data priv;
+ void __iomem *iobase;
+};
+
+static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_tcg_phy, priv);
+}
+
+#ifdef CONFIG_PREEMPT_RT
+/*
+ * Flush previous write operations with a dummy read operation to the
+ * TPM MMIO base address.
+ */
+static inline void tpm_tis_flush(void __iomem *iobase)
+{
+ ioread8(iobase + TPM_ACCESS(0));
+}
+#else
+#define tpm_tis_flush(iobase) do { } while (0)
+#endif
+
+/*
+ * Write a byte word to the TPM MMIO address, and flush the write queue.
+ * The flush ensures that the data is sent immediately over the bus and not
+ * aggregated with further requests and transferred later in a batch. The large
+ * write requests can lead to unwanted latency spikes by blocking the CPU until
+ * the complete batch has been transferred.
+ */
+static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr)
+{
+ iowrite8(b, iobase + addr);
+ tpm_tis_flush(iobase);
+}
+
+/*
+ * Write a 32-bit word to the TPM MMIO address, and flush the write queue.
+ * The flush ensures that the data is sent immediately over the bus and not
+ * aggregated with further requests and transferred later in a batch. The large
+ * write requests can lead to unwanted latency spikes by blocking the CPU until
+ * the complete batch has been transferred.
+ */
+static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr)
+{
+ iowrite32(b, iobase + addr);
+ tpm_tis_flush(iobase);
+}
+
+static bool interrupts;
+module_param(interrupts, bool, 0444);
+MODULE_PARM_DESC(interrupts, "Enable interrupts");
+
+static bool itpm;
+module_param(itpm, bool, 0444);
+MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
+
+static bool force;
+#ifdef CONFIG_X86
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
+#endif
+
+#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
+static int has_hid(struct acpi_device *dev, const char *hid)
+{
+ struct acpi_hardware_id *id;
+
+ list_for_each_entry(id, &dev->pnp.ids, list)
+ if (!strcmp(hid, id->id))
+ return 1;
+
+ return 0;
+}
+
+static inline int is_itpm(struct acpi_device *dev)
+{
+ if (!dev)
+ return 0;
+ return has_hid(dev, "INTC0102");
+}
+#else
+static inline int is_itpm(struct acpi_device *dev)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_ACPI)
+#define DEVICE_IS_TPM2 1
+
+static const struct acpi_device_id tpm_acpi_tbl[] = {
+ {"MSFT0101", DEVICE_IS_TPM2},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl);
+
+static int check_acpi_tpm2(struct device *dev)
+{
+ const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev);
+ struct acpi_table_tpm2 *tbl;
+ acpi_status st;
+ int ret = 0;
+
+ if (!aid || aid->driver_data != DEVICE_IS_TPM2)
+ return 0;
+
+ /* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2
+ * table is mandatory
+ */
+ st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
+ if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
+ dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
+ return -EINVAL;
+ }
+
+ /* The tpm2_crb driver handles this device */
+ if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
+ ret = -ENODEV;
+
+ acpi_put_table((struct acpi_table_header *)tbl);
+ return ret;
+}
+#else
+static int check_acpi_tpm2(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+ __le16 result_le16;
+ __le32 result_le32;
+
+ switch (io_mode) {
+ case TPM_TIS_PHYS_8:
+ while (len--)
+ *result++ = ioread8(phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_16:
+ result_le16 = cpu_to_le16(ioread16(phy->iobase + addr));
+ memcpy(result, &result_le16, sizeof(u16));
+ break;
+ case TPM_TIS_PHYS_32:
+ result_le32 = cpu_to_le32(ioread32(phy->iobase + addr));
+ memcpy(result, &result_le32, sizeof(u32));
+ break;
+ }
+
+ return 0;
+}
+
+static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ const u8 *value, enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+ switch (io_mode) {
+ case TPM_TIS_PHYS_8:
+ while (len--)
+ tpm_tis_iowrite8(*value++, phy->iobase, addr);
+ break;
+ case TPM_TIS_PHYS_16:
+ return -EINVAL;
+ case TPM_TIS_PHYS_32:
+ tpm_tis_iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase, addr);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct tpm_tis_phy_ops tpm_tcg = {
+ .read_bytes = tpm_tcg_read_bytes,
+ .write_bytes = tpm_tcg_write_bytes,
+};
+
+static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
+{
+ struct tpm_tis_tcg_phy *phy;
+ int irq = -1;
+ int rc;
+
+ rc = check_acpi_tpm2(dev);
+ if (rc)
+ return rc;
+
+ phy = devm_kzalloc(dev, sizeof(struct tpm_tis_tcg_phy), GFP_KERNEL);
+ if (phy == NULL)
+ return -ENOMEM;
+
+ phy->iobase = devm_ioremap_resource(dev, &tpm_info->res);
+ if (IS_ERR(phy->iobase))
+ return PTR_ERR(phy->iobase);
+
+ if (interrupts)
+ irq = tpm_info->irq;
+
+ if (itpm || is_itpm(ACPI_COMPANION(dev)))
+ set_bit(TPM_TIS_ITPM_WORKAROUND, &phy->priv.flags);
+
+ return tpm_tis_core_init(dev, &phy->priv, irq, &tpm_tcg,
+ ACPI_HANDLE(dev));
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
+ const struct pnp_device_id *pnp_id)
+{
+ struct tpm_info tpm_info = {};
+ struct resource *res;
+
+ res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ tpm_info.res = *res;
+
+ if (pnp_irq_valid(pnp_dev, 0))
+ tpm_info.irq = pnp_irq(pnp_dev, 0);
+ else
+ tpm_info.irq = -1;
+
+ return tpm_tis_init(&pnp_dev->dev, &tpm_info);
+}
+
+/*
+ * There is a known bug caused by 93e1b7d42e1e ("[PATCH] tpm: add HID module
+ * parameter"). This commit added IFX0102 device ID, which is also used by
+ * tpm_infineon but ignored to add quirks to probe which driver ought to be
+ * used.
+ */
+
+static struct pnp_device_id tpm_pnp_tbl[] = {
+ {"PNP0C31", 0}, /* TPM */
+ {"ATM1200", 0}, /* Atmel */
+ {"IFX0102", 0}, /* Infineon */
+ {"BCM0101", 0}, /* Broadcom */
+ {"BCM0102", 0}, /* Broadcom */
+ {"NSC1200", 0}, /* National */
+ {"ICO0102", 0}, /* Intel */
+ /* Add new here */
+ {"", 0}, /* User Specified */
+ {"", 0} /* Terminator */
+};
+MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
+
+static void tpm_tis_pnp_remove(struct pnp_dev *dev)
+{
+ struct tpm_chip *chip = pnp_get_drvdata(dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+}
+
+static struct pnp_driver tis_pnp_driver = {
+ .name = "tpm_tis",
+ .id_table = tpm_pnp_tbl,
+ .probe = tpm_tis_pnp_init,
+ .remove = tpm_tis_pnp_remove,
+ .driver = {
+ .pm = &tpm_tis_pm,
+ },
+};
+
+#define TIS_HID_USR_IDX (ARRAY_SIZE(tpm_pnp_tbl) - 2)
+module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
+ sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
+MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
+
+static struct platform_device *force_pdev;
+
+static int tpm_tis_plat_probe(struct platform_device *pdev)
+{
+ struct tpm_info tpm_info = {};
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+ tpm_info.res = *res;
+
+ tpm_info.irq = platform_get_irq_optional(pdev, 0);
+ if (tpm_info.irq <= 0) {
+ if (pdev != force_pdev)
+ tpm_info.irq = -1;
+ else
+ /* When forcing auto probe the IRQ */
+ tpm_info.irq = 0;
+ }
+
+ return tpm_tis_init(&pdev->dev, &tpm_info);
+}
+
+static void tpm_tis_plat_remove(struct platform_device *pdev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tis_of_platform_match[] = {
+ {.compatible = "tcg,tpm-tis-mmio"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tis_of_platform_match);
+#endif
+
+static struct platform_driver tis_drv = {
+ .probe = tpm_tis_plat_probe,
+ .remove_new = tpm_tis_plat_remove,
+ .driver = {
+ .name = "tpm_tis",
+ .pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(tis_of_platform_match),
+ .acpi_match_table = ACPI_PTR(tpm_acpi_tbl),
+ },
+};
+
+static int tpm_tis_force_device(void)
+{
+ struct platform_device *pdev;
+ static const struct resource x86_resources[] = {
+ DEFINE_RES_MEM(0xFED40000, TIS_MEM_LEN)
+ };
+
+ if (!force)
+ return 0;
+
+ /* The driver core will match the name tpm_tis of the device to
+ * the tpm_tis platform driver and complete the setup via
+ * tpm_tis_plat_probe
+ */
+ pdev = platform_device_register_simple("tpm_tis", -1, x86_resources,
+ ARRAY_SIZE(x86_resources));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+ force_pdev = pdev;
+
+ return 0;
+}
+
+static int __init init_tis(void)
+{
+ int rc;
+
+ rc = tpm_tis_force_device();
+ if (rc)
+ goto err_force;
+
+ rc = platform_driver_register(&tis_drv);
+ if (rc)
+ goto err_platform;
+
+
+ if (IS_ENABLED(CONFIG_PNP)) {
+ rc = pnp_register_driver(&tis_pnp_driver);
+ if (rc)
+ goto err_pnp;
+ }
+
+ return 0;
+
+err_pnp:
+ platform_driver_unregister(&tis_drv);
+err_platform:
+ if (force_pdev)
+ platform_device_unregister(force_pdev);
+err_force:
+ return rc;
+}
+
+static void __exit cleanup_tis(void)
+{
+ pnp_unregister_driver(&tis_pnp_driver);
+ platform_driver_unregister(&tis_drv);
+
+ if (force_pdev)
+ platform_device_unregister(force_pdev);
+}
+
+module_init(init_tis);
+module_exit(cleanup_tis);
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
new file mode 100644
index 0000000000..1b350412d8
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -0,0 +1,1367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2005, 2006 IBM Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include <linux/freezer.h>
+#include <linux/dmi.h>
+#include "tpm.h"
+#include "tpm_tis_core.h"
+
+#define TPM_TIS_MAX_UNHANDLED_IRQS 1000
+
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
+
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+static u8 tpm_tis_filter_sts_mask(u8 int_mask, u8 sts_mask)
+{
+ if (!(int_mask & TPM_INTF_STS_VALID_INT))
+ sts_mask &= ~TPM_STS_VALID;
+
+ if (!(int_mask & TPM_INTF_DATA_AVAIL_INT))
+ sts_mask &= ~TPM_STS_DATA_AVAIL;
+
+ if (!(int_mask & TPM_INTF_CMD_READY_INT))
+ sts_mask &= ~TPM_STS_COMMAND_READY;
+
+ return sts_mask;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ unsigned long timeout, wait_queue_head_t *queue,
+ bool check_cancel)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+ long rc;
+ u8 status;
+ bool canceled = false;
+ u8 sts_mask;
+ int ret = 0;
+
+ /* check current status */
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ sts_mask = mask & (TPM_STS_VALID | TPM_STS_DATA_AVAIL |
+ TPM_STS_COMMAND_READY);
+ /* check what status changes can be handled by irqs */
+ sts_mask = tpm_tis_filter_sts_mask(priv->int_mask, sts_mask);
+
+ stop = jiffies + timeout;
+ /* process status changes with irq support */
+ if (sts_mask) {
+ ret = -ETIME;
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ wait_for_tpm_stat_cond(chip, sts_mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
+ ret = 0;
+ }
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ mask &= ~sts_mask;
+ if (!mask) /* all done */
+ return 0;
+ /* process status changes without irq support */
+ do {
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ usleep_range(priv->timeout_min,
+ priv->timeout_max);
+ } while (time_before(jiffies, stop));
+ return -ETIME;
+}
+
+/* Before we attempt to access the TPM we must see that the valid bit is set.
+ * The specification says that this bit is 0 at reset and remains 0 until the
+ * 'TPM has gone through its self test and initialization and has established
+ * correct values in the other bits.'
+ */
+static int wait_startup(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop = jiffies + chip->timeout_a;
+
+ do {
+ int rc;
+ u8 access;
+
+ rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+ if (rc < 0)
+ return rc;
+
+ if (access & TPM_ACCESS_VALID)
+ return 0;
+ tpm_msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ return -1;
+}
+
+static bool check_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc;
+ u8 access;
+
+ rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+ if (rc < 0)
+ return false;
+
+ if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID
+ | TPM_ACCESS_REQUEST_USE)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
+ priv->locality = l;
+ return true;
+ }
+
+ return false;
+}
+
+static int __tpm_tis_relinquish_locality(struct tpm_tis_data *priv, int l)
+{
+ tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+
+ return 0;
+}
+
+static int tpm_tis_relinquish_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ mutex_lock(&priv->locality_count_mutex);
+ priv->locality_count--;
+ if (priv->locality_count == 0)
+ __tpm_tis_relinquish_locality(priv, l);
+ mutex_unlock(&priv->locality_count_mutex);
+
+ return 0;
+}
+
+static int __tpm_tis_request_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop, timeout;
+ long rc;
+
+ if (check_locality(chip, l))
+ return l;
+
+ rc = tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_REQUEST_USE);
+ if (rc < 0)
+ return rc;
+
+ stop = jiffies + chip->timeout_a;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -1;
+ rc = wait_event_interruptible_timeout(priv->int_queue,
+ (check_locality
+ (chip, l)),
+ timeout);
+ if (rc > 0)
+ return l;
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ /* wait for burstcount */
+ do {
+ if (check_locality(chip, l))
+ return l;
+ tpm_msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ }
+ return -1;
+}
+
+static int tpm_tis_request_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int ret = 0;
+
+ mutex_lock(&priv->locality_count_mutex);
+ if (priv->locality_count == 0)
+ ret = __tpm_tis_request_locality(chip, l);
+ if (!ret)
+ priv->locality_count++;
+ mutex_unlock(&priv->locality_count_mutex);
+ return ret;
+}
+
+static u8 tpm_tis_status(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc;
+ u8 status;
+
+ rc = tpm_tis_read8(priv, TPM_STS(priv->locality), &status);
+ if (rc < 0)
+ return 0;
+
+ if (unlikely((status & TPM_STS_READ_ZERO) != 0)) {
+ if (!test_and_set_bit(TPM_TIS_INVALID_STATUS, &priv->flags)) {
+ /*
+ * If this trips, the chances are the read is
+ * returning 0xff because the locality hasn't been
+ * acquired. Usually because tpm_try_get_ops() hasn't
+ * been called before doing a TPM operation.
+ */
+ dev_err(&chip->dev, "invalid TPM_STS.x 0x%02x, dumping stack for forensics\n",
+ status);
+
+ /*
+ * Dump stack for forensics, as invalid TPM_STS.x could be
+ * potentially triggered by impaired tpm_try_get_ops() or
+ * tpm_find_get_ops().
+ */
+ dump_stack();
+ }
+
+ return 0;
+ }
+
+ return status;
+}
+
+static void tpm_tis_ready(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ /* this causes the current command to be aborted */
+ tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_COMMAND_READY);
+}
+
+static int get_burstcount(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+ int burstcnt, rc;
+ u32 value;
+
+ /* wait for burstcount */
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ stop = jiffies + chip->timeout_a;
+ else
+ stop = jiffies + chip->timeout_d;
+ do {
+ rc = tpm_tis_read32(priv, TPM_STS(priv->locality), &value);
+ if (rc < 0)
+ return rc;
+
+ burstcnt = (value >> 8) & 0xFFFF;
+ if (burstcnt)
+ return burstcnt;
+ usleep_range(TPM_TIMEOUT_USECS_MIN, TPM_TIMEOUT_USECS_MAX);
+ } while (time_before(jiffies, stop));
+ return -EBUSY;
+}
+
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int size = 0, burstcnt, rc;
+
+ while (size < count) {
+ rc = wait_for_tpm_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->timeout_c,
+ &priv->read_queue, true);
+ if (rc < 0)
+ return rc;
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0) {
+ dev_err(&chip->dev, "Unable to read burstcount\n");
+ return burstcnt;
+ }
+ burstcnt = min_t(int, burstcnt, count - size);
+
+ rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality),
+ burstcnt, buf + size);
+ if (rc < 0)
+ return rc;
+
+ size += burstcnt;
+ }
+ return size;
+}
+
+static int tpm_tis_try_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int size = 0;
+ int status;
+ u32 expected;
+ int rc;
+
+ size = recv_data(chip, buf, TPM_HEADER_SIZE);
+ /* read first 10 bytes, including tag, paramsize, and result */
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "Unable to read header\n");
+ goto out;
+ }
+
+ expected = be32_to_cpu(*(__be32 *) (buf + 2));
+ if (expected > count || expected < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ rc = recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE);
+ if (rc < 0) {
+ size = rc;
+ goto out;
+ }
+ size += rc;
+ if (size < expected) {
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
+ size = -ETIME;
+ goto out;
+ }
+
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ size = -ETIME;
+ goto out;
+ }
+ status = tpm_tis_status(chip);
+ if (status & TPM_STS_DATA_AVAIL) {
+ dev_err(&chip->dev, "Error left over data\n");
+ size = -EIO;
+ goto out;
+ }
+
+ rc = tpm_tis_verify_crc(priv, (size_t)size, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for response.\n");
+ size = rc;
+ goto out;
+ }
+
+out:
+ return size;
+}
+
+static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned int try;
+ int rc = 0;
+
+ if (count < TPM_HEADER_SIZE)
+ return -EIO;
+
+ for (try = 0; try < TPM_RETRY; try++) {
+ rc = tpm_tis_try_recv(chip, buf, count);
+
+ if (rc == -EIO)
+ /* Data transfer errors, indicated by EIO, can be
+ * recovered by rereading the response.
+ */
+ tpm_tis_write8(priv, TPM_STS(priv->locality),
+ TPM_STS_RESPONSE_RETRY);
+ else
+ break;
+ }
+
+ tpm_tis_ready(chip);
+
+ return rc;
+}
+
+/*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc, status, burstcnt;
+ size_t count = 0;
+ bool itpm = test_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags);
+
+ status = tpm_tis_status(chip);
+ if ((status & TPM_STS_COMMAND_READY) == 0) {
+ tpm_tis_ready(chip);
+ if (wait_for_tpm_stat
+ (chip, TPM_STS_COMMAND_READY, chip->timeout_b,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ }
+
+ while (count < len - 1) {
+ burstcnt = get_burstcount(chip);
+ if (burstcnt < 0) {
+ dev_err(&chip->dev, "Unable to read burstcount\n");
+ rc = burstcnt;
+ goto out_err;
+ }
+ burstcnt = min_t(int, burstcnt, len - count - 1);
+ rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality),
+ burstcnt, buf + count);
+ if (rc < 0)
+ goto out_err;
+
+ count += burstcnt;
+
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ status = tpm_tis_status(chip);
+ if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+ }
+
+ /* write last byte */
+ rc = tpm_tis_write8(priv, TPM_DATA_FIFO(priv->locality), buf[count]);
+ if (rc < 0)
+ goto out_err;
+
+ if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ status = tpm_tis_status(chip);
+ if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+
+ rc = tpm_tis_verify_crc(priv, len, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for command.\n");
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ tpm_tis_ready(chip);
+ return rc;
+}
+
+static void __tpm_tis_disable_interrupts(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u32 int_mask = 0;
+
+ tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &int_mask);
+ int_mask &= ~TPM_GLOBAL_INT_ENABLE;
+ tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), int_mask);
+
+ chip->flags &= ~TPM_CHIP_FLAG_IRQ;
+}
+
+static void tpm_tis_disable_interrupts(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (priv->irq == 0)
+ return;
+
+ __tpm_tis_disable_interrupts(chip);
+
+ devm_free_irq(chip->dev.parent, priv->irq, chip);
+ priv->irq = 0;
+}
+
+/*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc;
+ u32 ordinal;
+ unsigned long dur;
+ unsigned int try;
+
+ for (try = 0; try < TPM_RETRY; try++) {
+ rc = tpm_tis_send_data(chip, buf, len);
+ if (rc >= 0)
+ /* Data transfer done successfully */
+ break;
+ else if (rc != -EIO)
+ /* Data transfer failed, not recoverable */
+ return rc;
+ }
+
+ /* go and do it */
+ rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO);
+ if (rc < 0)
+ goto out_err;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+
+ dur = tpm_calc_ordinal_duration(chip, ordinal);
+ if (wait_for_tpm_stat
+ (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur,
+ &priv->read_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ }
+ return 0;
+out_err:
+ tpm_tis_ready(chip);
+ return rc;
+}
+
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ int rc, irq;
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ) ||
+ test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
+ return tpm_tis_send_main(chip, buf, len);
+
+ /* Verify receipt of the expected IRQ */
+ irq = priv->irq;
+ priv->irq = 0;
+ chip->flags &= ~TPM_CHIP_FLAG_IRQ;
+ rc = tpm_tis_send_main(chip, buf, len);
+ priv->irq = irq;
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
+ if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
+ tpm_msleep(1);
+ if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
+ tpm_tis_disable_interrupts(chip);
+ set_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
+ return rc;
+}
+
+struct tis_vendor_durations_override {
+ u32 did_vid;
+ struct tpm1_version version;
+ unsigned long durations[3];
+};
+
+static const struct tis_vendor_durations_override vendor_dur_overrides[] = {
+ /* STMicroelectronics 0x104a */
+ { 0x0000104a,
+ { 1, 2, 8, 28 },
+ { (2 * 60 * HZ), (2 * 60 * HZ), (2 * 60 * HZ) } },
+};
+
+static void tpm_tis_update_durations(struct tpm_chip *chip,
+ unsigned long *duration_cap)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ struct tpm1_version *version;
+ u32 did_vid;
+ int i, rc;
+ cap_t cap;
+
+ chip->duration_adjusted = false;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
+ if (rc < 0) {
+ dev_warn(&chip->dev, "%s: failed to read did_vid. %d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Try to get a TPM version 1.2 or 1.1 TPM_CAP_VERSION_INFO */
+ rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
+ "attempting to determine the 1.2 version",
+ sizeof(cap.version2));
+ if (!rc) {
+ version = &cap.version2.version;
+ } else {
+ rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+ "attempting to determine the 1.1 version",
+ sizeof(cap.version1));
+
+ if (rc)
+ goto out;
+
+ version = &cap.version1;
+ }
+
+ for (i = 0; i != ARRAY_SIZE(vendor_dur_overrides); i++) {
+ if (vendor_dur_overrides[i].did_vid != did_vid)
+ continue;
+
+ if ((version->major ==
+ vendor_dur_overrides[i].version.major) &&
+ (version->minor ==
+ vendor_dur_overrides[i].version.minor) &&
+ (version->rev_major ==
+ vendor_dur_overrides[i].version.rev_major) &&
+ (version->rev_minor ==
+ vendor_dur_overrides[i].version.rev_minor)) {
+
+ memcpy(duration_cap,
+ vendor_dur_overrides[i].durations,
+ sizeof(vendor_dur_overrides[i].durations));
+
+ chip->duration_adjusted = true;
+ goto out;
+ }
+ }
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+}
+
+struct tis_vendor_timeout_override {
+ u32 did_vid;
+ unsigned long timeout_us[4];
+};
+
+static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
+ /* Atmel 3204 */
+ { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
+ (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
+};
+
+static void tpm_tis_update_timeouts(struct tpm_chip *chip,
+ unsigned long *timeout_cap)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int i, rc;
+ u32 did_vid;
+
+ chip->timeout_adjusted = false;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
+ if (rc < 0) {
+ dev_warn(&chip->dev, "%s: failed to read did_vid: %d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
+ if (vendor_timeout_overrides[i].did_vid != did_vid)
+ continue;
+ memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
+ sizeof(vendor_timeout_overrides[i].timeout_us));
+ chip->timeout_adjusted = true;
+ }
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return;
+}
+
+/*
+ * Early probing for iTPM with STS_DATA_EXPECT flaw.
+ * Try sending command without itpm flag set and if that
+ * fails, repeat with itpm flag set.
+ */
+static int probe_itpm(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+ static const u8 cmd_getticks[] = {
+ 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x00, 0x00, 0xf1
+ };
+ size_t len = sizeof(cmd_getticks);
+ u16 vendor;
+
+ if (test_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags))
+ return 0;
+
+ rc = tpm_tis_read16(priv, TPM_DID_VID(0), &vendor);
+ if (rc < 0)
+ return rc;
+
+ /* probe only iTPMS */
+ if (vendor != TPM_VID_INTEL)
+ return 0;
+
+ if (tpm_tis_request_locality(chip, 0) != 0)
+ return -EBUSY;
+
+ rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ if (rc == 0)
+ goto out;
+
+ tpm_tis_ready(chip);
+
+ set_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags);
+
+ rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ if (rc == 0)
+ dev_info(&chip->dev, "Detected an iTPM.\n");
+ else {
+ clear_bit(TPM_TIS_ITPM_WORKAROUND, &priv->flags);
+ rc = -EFAULT;
+ }
+
+out:
+ tpm_tis_ready(chip);
+ tpm_tis_relinquish_locality(chip, priv->locality);
+
+ return rc;
+}
+
+static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (!test_bit(TPM_TIS_DEFAULT_CANCELLATION, &priv->flags)) {
+ switch (priv->manufacturer_id) {
+ case TPM_VID_WINBOND:
+ return ((status == TPM_STS_VALID) ||
+ (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
+ case TPM_VID_STM:
+ return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
+ default:
+ break;
+ }
+ }
+
+ return status == TPM_STS_COMMAND_READY;
+}
+
+static irqreturn_t tpm_tis_revert_interrupts(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ const char *product;
+ const char *vendor;
+
+ dev_warn(&chip->dev, FW_BUG
+ "TPM interrupt storm detected, polling instead\n");
+
+ vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ product = dmi_get_system_info(DMI_PRODUCT_VERSION);
+
+ if (vendor && product) {
+ dev_info(&chip->dev,
+ "Consider adding the following entry to tpm_tis_dmi_table:\n");
+ dev_info(&chip->dev, "\tDMI_SYS_VENDOR: %s\n", vendor);
+ dev_info(&chip->dev, "\tDMI_PRODUCT_VERSION: %s\n", product);
+ }
+
+ if (tpm_tis_request_locality(chip, 0) != 0)
+ return IRQ_NONE;
+
+ __tpm_tis_disable_interrupts(chip);
+ tpm_tis_relinquish_locality(chip, 0);
+
+ schedule_work(&priv->free_irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tpm_tis_update_unhandled_irqs(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ irqreturn_t irqret = IRQ_HANDLED;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ))
+ return IRQ_HANDLED;
+
+ if (time_after(jiffies, priv->last_unhandled_irq + HZ/10))
+ priv->unhandled_irqs = 1;
+ else
+ priv->unhandled_irqs++;
+
+ priv->last_unhandled_irq = jiffies;
+
+ if (priv->unhandled_irqs > TPM_TIS_MAX_UNHANDLED_IRQS)
+ irqret = tpm_tis_revert_interrupts(chip);
+
+ return irqret;
+}
+
+static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u32 interrupt;
+ int rc;
+
+ rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
+ if (rc < 0)
+ goto err;
+
+ if (interrupt == 0)
+ goto err;
+
+ set_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
+ if (interrupt & TPM_INTF_DATA_AVAIL_INT)
+ wake_up_interruptible(&priv->read_queue);
+
+ if (interrupt &
+ (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
+ TPM_INTF_CMD_READY_INT))
+ wake_up_interruptible(&priv->int_queue);
+
+ /* Clear interrupts handled with TPM_EOI */
+ tpm_tis_request_locality(chip, 0);
+ rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt);
+ tpm_tis_relinquish_locality(chip, 0);
+ if (rc < 0)
+ goto err;
+
+ tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
+ return IRQ_HANDLED;
+
+err:
+ return tpm_tis_update_unhandled_irqs(chip);
+}
+
+static void tpm_tis_gen_interrupt(struct tpm_chip *chip)
+{
+ const char *desc = "attempting to generate an interrupt";
+ u32 cap2;
+ cap_t cap;
+ int ret;
+
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+ else
+ ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
+
+ if (ret)
+ chip->flags &= ~TPM_CHIP_FLAG_IRQ;
+}
+
+static void tpm_tis_free_irq_func(struct work_struct *work)
+{
+ struct tpm_tis_data *priv = container_of(work, typeof(*priv), free_irq_work);
+ struct tpm_chip *chip = priv->chip;
+
+ devm_free_irq(chip->dev.parent, priv->irq, chip);
+ priv->irq = 0;
+}
+
+/* Register the IRQ and issue a command that will cause an interrupt. If an
+ * irq is seen then leave the chip setup for IRQ operation, otherwise reverse
+ * everything and leave in polling mode. Returns 0 on success.
+ */
+static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
+ int flags, int irq)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u8 original_int_vec;
+ int rc;
+ u32 int_status;
+
+ INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func);
+
+ rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL,
+ tis_int_handler, IRQF_ONESHOT | flags,
+ dev_name(&chip->dev), chip);
+ if (rc) {
+ dev_info(&chip->dev, "Unable to request irq: %d for probe\n",
+ irq);
+ return -1;
+ }
+ priv->irq = irq;
+
+ rc = tpm_tis_request_locality(chip, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
+ &original_int_vec);
+ if (rc < 0) {
+ tpm_tis_relinquish_locality(chip, priv->locality);
+ return rc;
+ }
+
+ rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
+ if (rc < 0)
+ goto restore_irqs;
+
+ rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
+ if (rc < 0)
+ goto restore_irqs;
+
+ /* Clear all existing */
+ rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
+ if (rc < 0)
+ goto restore_irqs;
+ /* Turn on */
+ rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
+ intmask | TPM_GLOBAL_INT_ENABLE);
+ if (rc < 0)
+ goto restore_irqs;
+
+ clear_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
+
+ /* Generate an interrupt by having the core call through to
+ * tpm_tis_send
+ */
+ tpm_tis_gen_interrupt(chip);
+
+restore_irqs:
+ /* tpm_tis_send will either confirm the interrupt is working or it
+ * will call disable_irq which undoes all of the above.
+ */
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
+ tpm_tis_write8(priv, original_int_vec,
+ TPM_INT_VECTOR(priv->locality));
+ rc = -1;
+ }
+
+ tpm_tis_relinquish_locality(chip, priv->locality);
+
+ return rc;
+}
+
+/* Try to find the IRQ the TPM is using. This is for legacy x86 systems that
+ * do not have ACPI/etc. We typically expect the interrupt to be declared if
+ * present.
+ */
+static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u8 original_int_vec;
+ int i, rc;
+
+ rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
+ &original_int_vec);
+ if (rc < 0)
+ return;
+
+ if (!original_int_vec) {
+ if (IS_ENABLED(CONFIG_X86))
+ for (i = 3; i <= 15; i++)
+ if (!tpm_tis_probe_irq_single(chip, intmask, 0,
+ i))
+ return;
+ } else if (!tpm_tis_probe_irq_single(chip, intmask, 0,
+ original_int_vec))
+ return;
+}
+
+void tpm_tis_remove(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u32 reg = TPM_INT_ENABLE(priv->locality);
+ u32 interrupt;
+ int rc;
+
+ tpm_tis_clkrun_enable(chip, true);
+
+ rc = tpm_tis_read32(priv, reg, &interrupt);
+ if (rc < 0)
+ interrupt = 0;
+
+ tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
+ flush_work(&priv->free_irq_work);
+
+ tpm_tis_clkrun_enable(chip, false);
+
+ if (priv->ilb_base_addr)
+ iounmap(priv->ilb_base_addr);
+}
+EXPORT_SYMBOL_GPL(tpm_tis_remove);
+
+/**
+ * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration
+ * of a single TPM command
+ * @chip: TPM chip to use
+ * @value: 1 - Disable CLKRUN protocol, so that clocks are free running
+ * 0 - Enable CLKRUN protocol
+ * Call this function directly in tpm_tis_remove() in error or driver removal
+ * path, since the chip->ops is set to NULL in tpm_chip_unregister().
+ */
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
+{
+ struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
+ u32 clkrun_val;
+
+ if (!IS_ENABLED(CONFIG_X86) || !is_bsw() ||
+ !data->ilb_base_addr)
+ return;
+
+ if (value) {
+ data->clkrun_enabled++;
+ if (data->clkrun_enabled > 1)
+ return;
+ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /* Disable LPC CLKRUN# */
+ clkrun_val &= ~LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+ } else {
+ data->clkrun_enabled--;
+ if (data->clkrun_enabled)
+ return;
+
+ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /* Enable LPC CLKRUN# */
+ clkrun_val |= LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+ }
+}
+
+static const struct tpm_class_ops tpm_tis = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = tpm_tis_status,
+ .recv = tpm_tis_recv,
+ .send = tpm_tis_send,
+ .cancel = tpm_tis_ready,
+ .update_timeouts = tpm_tis_update_timeouts,
+ .update_durations = tpm_tis_update_durations,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = tpm_tis_req_canceled,
+ .request_locality = tpm_tis_request_locality,
+ .relinquish_locality = tpm_tis_relinquish_locality,
+ .clk_enable = tpm_tis_clkrun_enable,
+};
+
+int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ const struct tpm_tis_phy_ops *phy_ops,
+ acpi_handle acpi_dev_handle)
+{
+ u32 vendor;
+ u32 intfcaps;
+ u32 intmask;
+ u32 clkrun_val;
+ u8 rid;
+ int rc, probe;
+ struct tpm_chip *chip;
+
+ chip = tpmm_chip_alloc(dev, &tpm_tis);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+#ifdef CONFIG_ACPI
+ chip->acpi_dev_handle = acpi_dev_handle;
+#endif
+
+ chip->hwrng.quality = priv->rng_quality;
+
+ /* Maximum timeouts */
+ chip->timeout_a = msecs_to_jiffies(TIS_TIMEOUT_A_MAX);
+ chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX);
+ chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX);
+ chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX);
+ priv->chip = chip;
+ priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
+ priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
+ priv->phy_ops = phy_ops;
+ priv->locality_count = 0;
+ mutex_init(&priv->locality_count_mutex);
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
+ if (rc < 0)
+ return rc;
+
+ priv->manufacturer_id = vendor;
+
+ if (priv->manufacturer_id == TPM_VID_ATML &&
+ !(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+ priv->timeout_min = TIS_TIMEOUT_MIN_ATML;
+ priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
+ }
+
+ if (is_bsw()) {
+ priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
+ ILB_REMAP_SIZE);
+ if (!priv->ilb_base_addr)
+ return -ENOMEM;
+
+ clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET);
+ /* Check if CLKRUN# is already not enabled in the LPC bus */
+ if (!(clkrun_val & LPC_CLKRUN_EN)) {
+ iounmap(priv->ilb_base_addr);
+ priv->ilb_base_addr = NULL;
+ }
+ }
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
+ if (wait_startup(chip, 0) != 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ /* Take control of the TPM's interrupt hardware and shut it off */
+ rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
+ if (rc < 0)
+ goto out_err;
+
+ /* Figure out the capabilities */
+ rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
+ if (rc < 0)
+ goto out_err;
+
+ dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
+ intfcaps);
+ if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
+ dev_dbg(dev, "\tBurst Count Static\n");
+ if (intfcaps & TPM_INTF_CMD_READY_INT) {
+ intmask |= TPM_INTF_CMD_READY_INT;
+ dev_dbg(dev, "\tCommand Ready Int Support\n");
+ }
+ if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
+ dev_dbg(dev, "\tInterrupt Edge Falling\n");
+ if (intfcaps & TPM_INTF_INT_EDGE_RISING)
+ dev_dbg(dev, "\tInterrupt Edge Rising\n");
+ if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
+ dev_dbg(dev, "\tInterrupt Level Low\n");
+ if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
+ dev_dbg(dev, "\tInterrupt Level High\n");
+ if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) {
+ intmask |= TPM_INTF_LOCALITY_CHANGE_INT;
+ dev_dbg(dev, "\tLocality Change Int Support\n");
+ }
+ if (intfcaps & TPM_INTF_STS_VALID_INT) {
+ intmask |= TPM_INTF_STS_VALID_INT;
+ dev_dbg(dev, "\tSts Valid Int Support\n");
+ }
+ if (intfcaps & TPM_INTF_DATA_AVAIL_INT) {
+ intmask |= TPM_INTF_DATA_AVAIL_INT;
+ dev_dbg(dev, "\tData Avail Int Support\n");
+ }
+
+ intmask &= ~TPM_GLOBAL_INT_ENABLE;
+
+ rc = tpm_tis_request_locality(chip, 0);
+ if (rc < 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+ tpm_tis_relinquish_locality(chip, 0);
+
+ rc = tpm_chip_start(chip);
+ if (rc)
+ goto out_err;
+ rc = tpm2_probe(chip);
+ tpm_chip_stop(chip);
+ if (rc)
+ goto out_err;
+
+ rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
+ if (rc < 0)
+ goto out_err;
+
+ dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
+ (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
+ vendor >> 16, rid);
+
+ probe = probe_itpm(chip);
+ if (probe < 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ /* INTERRUPT Setup */
+ init_waitqueue_head(&priv->read_queue);
+ init_waitqueue_head(&priv->int_queue);
+
+ rc = tpm_chip_bootstrap(chip);
+ if (rc)
+ goto out_err;
+
+ if (irq != -1) {
+ /*
+ * Before doing irq testing issue a command to the TPM in polling mode
+ * to make sure it works. May as well use that command to set the
+ * proper timeouts for the driver.
+ */
+
+ rc = tpm_tis_request_locality(chip, 0);
+ if (rc < 0)
+ goto out_err;
+
+ rc = tpm_get_timeouts(chip);
+
+ tpm_tis_relinquish_locality(chip, 0);
+
+ if (rc) {
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ if (irq)
+ tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
+ irq);
+ else
+ tpm_tis_probe_irq(chip, intmask);
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+ priv->int_mask = intmask;
+ } else {
+ dev_err(&chip->dev, FW_BUG
+ "TPM interrupt not working, polling instead\n");
+
+ rc = tpm_tis_request_locality(chip, 0);
+ if (rc < 0)
+ goto out_err;
+ tpm_tis_disable_interrupts(chip);
+ tpm_tis_relinquish_locality(chip, 0);
+ }
+ }
+
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto out_err;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return 0;
+out_err:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ tpm_tis_remove(chip);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_tis_core_init);
+
+#ifdef CONFIG_PM_SLEEP
+static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u32 intmask;
+ int rc;
+
+ /*
+ * Re-enable interrupts that device may have lost or BIOS/firmware may
+ * have disabled.
+ */
+ rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Setting IRQ failed.\n");
+ return;
+ }
+
+ intmask = priv->int_mask | TPM_GLOBAL_INT_ENABLE;
+ rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+ if (rc < 0)
+ dev_err(&chip->dev, "Enabling interrupts failed.\n");
+}
+
+int tpm_tis_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ int ret;
+
+ ret = tpm_chip_start(chip);
+ if (ret)
+ return ret;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ)
+ tpm_tis_reenable_interrupts(chip);
+
+ /*
+ * TPM 1.2 requires self-test on resume. This function actually returns
+ * an error code but for unknown reason it isn't handled.
+ */
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ tpm1_do_selftest(chip);
+
+ tpm_chip_stop(chip);
+
+ ret = tpm_pm_resume(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_tis_resume);
+#endif
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
new file mode 100644
index 0000000000..13e99cf65e
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2005, 2006 IBM Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
+ */
+
+#ifndef __TPM_TIS_CORE_H__
+#define __TPM_TIS_CORE_H__
+
+#include "tpm.h"
+
+enum tis_access {
+ TPM_ACCESS_VALID = 0x80,
+ TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+ TPM_ACCESS_REQUEST_PENDING = 0x04,
+ TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum tis_status {
+ TPM_STS_VALID = 0x80,
+ TPM_STS_COMMAND_READY = 0x40,
+ TPM_STS_GO = 0x20,
+ TPM_STS_DATA_AVAIL = 0x10,
+ TPM_STS_DATA_EXPECT = 0x08,
+ TPM_STS_RESPONSE_RETRY = 0x02,
+ TPM_STS_READ_ZERO = 0x23, /* bits that must be zero on read */
+};
+
+enum tis_int_flags {
+ TPM_GLOBAL_INT_ENABLE = 0x80000000,
+ TPM_INTF_BURST_COUNT_STATIC = 0x100,
+ TPM_INTF_CMD_READY_INT = 0x080,
+ TPM_INTF_INT_EDGE_FALLING = 0x040,
+ TPM_INTF_INT_EDGE_RISING = 0x020,
+ TPM_INTF_INT_LEVEL_LOW = 0x010,
+ TPM_INTF_INT_LEVEL_HIGH = 0x008,
+ TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
+ TPM_INTF_STS_VALID_INT = 0x002,
+ TPM_INTF_DATA_AVAIL_INT = 0x001,
+};
+
+enum tis_defaults {
+ TIS_MEM_LEN = 0x5000,
+ TIS_SHORT_TIMEOUT = 750, /* ms */
+ TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+ TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */
+ TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */
+};
+
+/* Some timeout values are needed before it is known whether the chip is
+ * TPM 1.0 or TPM 2.0.
+ */
+#define TIS_TIMEOUT_A_MAX max_t(int, TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A)
+#define TIS_TIMEOUT_B_MAX max_t(int, TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B)
+#define TIS_TIMEOUT_C_MAX max_t(int, TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C)
+#define TIS_TIMEOUT_D_MAX max_t(int, TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D)
+
+#define TPM_ACCESS(l) (0x0000 | ((l) << 12))
+#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
+#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
+#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
+#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
+#define TPM_STS(l) (0x0018 | ((l) << 12))
+#define TPM_STS3(l) (0x001b | ((l) << 12))
+#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
+
+#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
+#define TPM_RID(l) (0x0F04 | ((l) << 12))
+
+#define LPC_CNTRL_OFFSET 0x84
+#define LPC_CLKRUN_EN (1 << 2)
+#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
+#define ILB_REMAP_SIZE 0x100
+
+enum tpm_tis_flags {
+ TPM_TIS_ITPM_WORKAROUND = 0,
+ TPM_TIS_INVALID_STATUS = 1,
+ TPM_TIS_DEFAULT_CANCELLATION = 2,
+ TPM_TIS_IRQ_TESTED = 3,
+};
+
+struct tpm_tis_data {
+ struct tpm_chip *chip;
+ u16 manufacturer_id;
+ struct mutex locality_count_mutex;
+ unsigned int locality_count;
+ int locality;
+ int irq;
+ struct work_struct free_irq_work;
+ unsigned long last_unhandled_irq;
+ unsigned int unhandled_irqs;
+ unsigned int int_mask;
+ unsigned long flags;
+ void __iomem *ilb_base_addr;
+ u16 clkrun_enabled;
+ wait_queue_head_t int_queue;
+ wait_queue_head_t read_queue;
+ const struct tpm_tis_phy_ops *phy_ops;
+ unsigned short rng_quality;
+ unsigned int timeout_min; /* usecs */
+ unsigned int timeout_max; /* usecs */
+};
+
+/*
+ * IO modes to indicate how many bytes should be read/written at once in the
+ * tpm_tis_phy_ops read_bytes/write_bytes calls. Use TPM_TIS_PHYS_8 to
+ * receive/transmit byte-wise, TPM_TIS_PHYS_16 for two bytes etc.
+ */
+enum tpm_tis_io_mode {
+ TPM_TIS_PHYS_8,
+ TPM_TIS_PHYS_16,
+ TPM_TIS_PHYS_32,
+};
+
+struct tpm_tis_phy_ops {
+ /* data is passed in little endian */
+ int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result, enum tpm_tis_io_mode mode);
+ int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
+ const u8 *value, enum tpm_tis_io_mode mode);
+ int (*verify_crc)(struct tpm_tis_data *data, size_t len,
+ const u8 *value);
+};
+
+static inline int tpm_tis_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result)
+{
+ return data->phy_ops->read_bytes(data, addr, len, result,
+ TPM_TIS_PHYS_8);
+}
+
+static inline int tpm_tis_read8(struct tpm_tis_data *data, u32 addr, u8 *result)
+{
+ return data->phy_ops->read_bytes(data, addr, 1, result, TPM_TIS_PHYS_8);
+}
+
+static inline int tpm_tis_read16(struct tpm_tis_data *data, u32 addr,
+ u16 *result)
+{
+ __le16 result_le;
+ int rc;
+
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
+ (u8 *)&result_le, TPM_TIS_PHYS_16);
+ if (!rc)
+ *result = le16_to_cpu(result_le);
+
+ return rc;
+}
+
+static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
+ u32 *result)
+{
+ __le32 result_le;
+ int rc;
+
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
+ (u8 *)&result_le, TPM_TIS_PHYS_32);
+ if (!rc)
+ *result = le32_to_cpu(result_le);
+
+ return rc;
+}
+
+static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, const u8 *value)
+{
+ return data->phy_ops->write_bytes(data, addr, len, value,
+ TPM_TIS_PHYS_8);
+}
+
+static inline int tpm_tis_write8(struct tpm_tis_data *data, u32 addr, u8 value)
+{
+ return data->phy_ops->write_bytes(data, addr, 1, &value,
+ TPM_TIS_PHYS_8);
+}
+
+static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
+ u32 value)
+{
+ __le32 value_le;
+ int rc;
+
+ value_le = cpu_to_le32(value);
+ rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
+ (u8 *)&value_le, TPM_TIS_PHYS_32);
+ return rc;
+}
+
+static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ if (!data->phy_ops->verify_crc)
+ return 0;
+ return data->phy_ops->verify_crc(data, len, value);
+}
+
+static inline bool is_bsw(void)
+{
+#ifdef CONFIG_X86
+ return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+#else
+ return false;
+#endif
+}
+
+void tpm_tis_remove(struct tpm_chip *chip);
+int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ const struct tpm_tis_phy_ops *phy_ops,
+ acpi_handle acpi_dev_handle);
+
+#ifdef CONFIG_PM_SLEEP
+int tpm_tis_resume(struct device *dev);
+#endif
+
+#endif
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
new file mode 100644
index 0000000000..a897402cc3
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2021 Nuvoton Technology corporation
+ * Copyright (C) 2019-2022 Infineon Technologies AG
+ *
+ * This device driver implements the TPM interface as defined in the TCG PC
+ * Client Platform TPM Profile (PTP) Specification for TPM 2.0 v1.04
+ * Revision 14.
+ *
+ * It is based on the tpm_tis_spi device driver.
+ */
+
+#include <linux/i2c.h>
+#include <linux/crc-ccitt.h>
+#include "tpm_tis_core.h"
+
+/* TPM registers */
+#define TPM_I2C_LOC_SEL 0x00
+#define TPM_I2C_ACCESS 0x04
+#define TPM_I2C_INTERFACE_CAPABILITY 0x30
+#define TPM_I2C_DEVICE_ADDRESS 0x38
+#define TPM_I2C_DATA_CSUM_ENABLE 0x40
+#define TPM_DATA_CSUM 0x44
+#define TPM_I2C_DID_VID 0x48
+#define TPM_I2C_RID 0x4C
+
+/* TIS-compatible register address to avoid clash with TPM_ACCESS (0x00) */
+#define TPM_LOC_SEL 0x0FFF
+
+/* Mask to extract the I2C register from TIS register addresses */
+#define TPM_TIS_REGISTER_MASK 0x0FFF
+
+/* Default Guard Time of 250µs until interface capability register is read */
+#define GUARD_TIME_DEFAULT_MIN 250
+#define GUARD_TIME_DEFAULT_MAX 300
+
+/* Guard Time of 250µs after I2C slave NACK */
+#define GUARD_TIME_ERR_MIN 250
+#define GUARD_TIME_ERR_MAX 300
+
+/* Guard Time bit masks; SR is repeated start, RW is read then write, etc. */
+#define TPM_GUARD_TIME_SR_MASK 0x40000000
+#define TPM_GUARD_TIME_RR_MASK 0x00100000
+#define TPM_GUARD_TIME_RW_MASK 0x00080000
+#define TPM_GUARD_TIME_WR_MASK 0x00040000
+#define TPM_GUARD_TIME_WW_MASK 0x00020000
+#define TPM_GUARD_TIME_MIN_MASK 0x0001FE00
+#define TPM_GUARD_TIME_MIN_SHIFT 9
+
+/* Masks with bits that must be read zero */
+#define TPM_ACCESS_READ_ZERO 0x48
+#define TPM_INT_ENABLE_ZERO 0x7FFFFF60
+#define TPM_STS_READ_ZERO 0x23
+#define TPM_INTF_CAPABILITY_ZERO 0x0FFFF000
+#define TPM_I2C_INTERFACE_CAPABILITY_ZERO 0x80000000
+
+struct tpm_tis_i2c_phy {
+ struct tpm_tis_data priv;
+ struct i2c_client *i2c_client;
+ bool guard_time_read;
+ bool guard_time_write;
+ u16 guard_time_min;
+ u16 guard_time_max;
+ u8 *io_buf;
+};
+
+static inline struct tpm_tis_i2c_phy *
+to_tpm_tis_i2c_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_i2c_phy, priv);
+}
+
+/*
+ * tpm_tis_core uses the register addresses as defined in Table 19 "Allocation
+ * of Register Space for FIFO TPM Access" of the TCG PC Client PTP
+ * Specification. In order for this code to work together with tpm_tis_core,
+ * those addresses need to mapped to the registers defined for I2C TPMs in
+ * Table 51 "I2C-TPM Register Overview".
+ *
+ * For most addresses this can be done by simply stripping off the locality
+ * information from the address. A few addresses need to be mapped explicitly,
+ * since the corresponding I2C registers have been moved around. TPM_LOC_SEL is
+ * only defined for I2C TPMs and is also mapped explicitly here to distinguish
+ * it from TPM_ACCESS(0).
+ *
+ * Locality information is ignored, since this driver assumes exclusive access
+ * to the TPM and always uses locality 0.
+ */
+static u8 tpm_tis_i2c_address_to_register(u32 addr)
+{
+ addr &= TPM_TIS_REGISTER_MASK;
+
+ switch (addr) {
+ case TPM_ACCESS(0):
+ return TPM_I2C_ACCESS;
+ case TPM_LOC_SEL:
+ return TPM_I2C_LOC_SEL;
+ case TPM_DID_VID(0):
+ return TPM_I2C_DID_VID;
+ case TPM_RID(0):
+ return TPM_I2C_RID;
+ default:
+ return addr;
+ }
+}
+
+static int tpm_tis_i2c_retry_transfer_until_ack(struct tpm_tis_data *data,
+ struct i2c_msg *msg)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ bool guard_time;
+ int i = 0;
+ int ret;
+
+ if (msg->flags & I2C_M_RD)
+ guard_time = phy->guard_time_read;
+ else
+ guard_time = phy->guard_time_write;
+
+ do {
+ ret = i2c_transfer(phy->i2c_client->adapter, msg, 1);
+ if (ret < 0)
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ else if (guard_time)
+ usleep_range(phy->guard_time_min, phy->guard_time_max);
+ /* retry on TPM NACK */
+ } while (ret < 0 && i++ < TPM_RETRY);
+
+ return ret;
+}
+
+/* Check that bits which must be read zero are not set */
+static int tpm_tis_i2c_sanity_check_read(u8 reg, u16 len, u8 *buf)
+{
+ u32 zero_mask;
+ u32 value;
+
+ switch (len) {
+ case sizeof(u8):
+ value = buf[0];
+ break;
+ case sizeof(u16):
+ value = le16_to_cpup((__le16 *)buf);
+ break;
+ case sizeof(u32):
+ value = le32_to_cpup((__le32 *)buf);
+ break;
+ default:
+ /* unknown length, skip check */
+ return 0;
+ }
+
+ switch (reg) {
+ case TPM_I2C_ACCESS:
+ zero_mask = TPM_ACCESS_READ_ZERO;
+ break;
+ case TPM_INT_ENABLE(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INT_ENABLE_ZERO;
+ break;
+ case TPM_STS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_STS_READ_ZERO;
+ break;
+ case TPM_INTF_CAPS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INTF_CAPABILITY_ZERO;
+ break;
+ case TPM_I2C_INTERFACE_CAPABILITY:
+ zero_mask = TPM_I2C_INTERFACE_CAPABILITY_ZERO;
+ break;
+ default:
+ /* unknown register, skip check */
+ return 0;
+ }
+
+ if (unlikely((value & zero_mask) != 0x00)) {
+ pr_debug("TPM I2C read of register 0x%02x failed sanity check: 0x%x\n", reg, value);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tpm_tis_i2c_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int i;
+ int ret;
+
+ for (i = 0; i < TPM_RETRY; i++) {
+ u16 read = 0;
+
+ while (read < len) {
+ /* write register */
+ msg.len = sizeof(reg);
+ msg.buf = &reg;
+ msg.flags = 0;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ /* read data */
+ msg.buf = result + read;
+ msg.len = len - read;
+ msg.flags = I2C_M_RD;
+ if (msg.len > I2C_SMBUS_BLOCK_MAX)
+ msg.len = I2C_SMBUS_BLOCK_MAX;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+ read += msg.len;
+ }
+
+ ret = tpm_tis_i2c_sanity_check_read(reg, len, result);
+ if (ret == 0)
+ return 0;
+
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ }
+
+ return ret;
+}
+
+static int tpm_tis_i2c_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ const u8 *value,
+ enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int ret;
+ u16 wrote = 0;
+
+ if (len > TPM_BUFSIZE - 1)
+ return -EIO;
+
+ phy->io_buf[0] = reg;
+ msg.buf = phy->io_buf;
+ while (wrote < len) {
+ /* write register and data in one go */
+ msg.len = sizeof(reg) + len - wrote;
+ if (msg.len > I2C_SMBUS_BLOCK_MAX)
+ msg.len = I2C_SMBUS_BLOCK_MAX;
+
+ memcpy(phy->io_buf + sizeof(reg), value + wrote,
+ msg.len - sizeof(reg));
+
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+ wrote += msg.len - sizeof(reg);
+ }
+
+ return 0;
+}
+
+static int tpm_tis_i2c_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ u16 crc_tpm, crc_host;
+ int rc;
+
+ rc = tpm_tis_read16(data, TPM_DATA_CSUM, &crc_tpm);
+ if (rc < 0)
+ return rc;
+
+ /* reflect crc result, regardless of host endianness */
+ crc_host = swab16(crc_ccitt(0, value, len));
+ if (crc_tpm != crc_host)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Guard Time:
+ * After each I2C operation, the TPM might require the master to wait.
+ * The time period is vendor-specific and must be read from the
+ * TPM_I2C_INTERFACE_CAPABILITY register.
+ *
+ * Before the Guard Time is read (or after the TPM failed to send an I2C NACK),
+ * a Guard Time of 250µs applies.
+ *
+ * Various flags in the same register indicate if a guard time is needed:
+ * - SR: <I2C read with repeated start> <guard time> <I2C read>
+ * - RR: <I2C read> <guard time> <I2C read>
+ * - RW: <I2C read> <guard time> <I2C write>
+ * - WR: <I2C write> <guard time> <I2C read>
+ * - WW: <I2C write> <guard time> <I2C write>
+ *
+ * See TCG PC Client PTP Specification v1.04, 8.1.10 GUARD_TIME
+ */
+static int tpm_tis_i2c_init_guard_time(struct tpm_tis_i2c_phy *phy)
+{
+ u32 i2c_caps;
+ int ret;
+
+ phy->guard_time_read = true;
+ phy->guard_time_write = true;
+ phy->guard_time_min = GUARD_TIME_DEFAULT_MIN;
+ phy->guard_time_max = GUARD_TIME_DEFAULT_MAX;
+
+ ret = tpm_tis_i2c_read_bytes(&phy->priv, TPM_I2C_INTERFACE_CAPABILITY,
+ sizeof(i2c_caps), (u8 *)&i2c_caps,
+ TPM_TIS_PHYS_32);
+ if (ret)
+ return ret;
+
+ phy->guard_time_read = (i2c_caps & TPM_GUARD_TIME_RR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_RW_MASK);
+ phy->guard_time_write = (i2c_caps & TPM_GUARD_TIME_WR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_WW_MASK);
+ phy->guard_time_min = (i2c_caps & TPM_GUARD_TIME_MIN_MASK) >>
+ TPM_GUARD_TIME_MIN_SHIFT;
+ /* guard_time_max = guard_time_min * 1.2 */
+ phy->guard_time_max = phy->guard_time_min + phy->guard_time_min / 5;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static const struct tpm_tis_phy_ops tpm_i2c_phy_ops = {
+ .read_bytes = tpm_tis_i2c_read_bytes,
+ .write_bytes = tpm_tis_i2c_write_bytes,
+ .verify_crc = tpm_tis_i2c_verify_crc,
+};
+
+static int tpm_tis_i2c_probe(struct i2c_client *dev)
+{
+ struct tpm_tis_i2c_phy *phy;
+ const u8 crc_enable = 1;
+ const u8 locality = 0;
+ int ret;
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_i2c_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->io_buf = devm_kzalloc(&dev->dev, TPM_BUFSIZE, GFP_KERNEL);
+ if (!phy->io_buf)
+ return -ENOMEM;
+
+ set_bit(TPM_TIS_DEFAULT_CANCELLATION, &phy->priv.flags);
+ phy->i2c_client = dev;
+
+ /* must precede all communication with the tpm */
+ ret = tpm_tis_i2c_init_guard_time(phy);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_LOC_SEL, sizeof(locality),
+ &locality, TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_I2C_DATA_CSUM_ENABLE,
+ sizeof(crc_enable), &crc_enable,
+ TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_i2c_phy_ops,
+ NULL);
+}
+
+static void tpm_tis_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+}
+
+static const struct i2c_device_id tpm_tis_i2c_id[] = {
+ { "tpm_tis_i2c", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_tis_i2c_match[] = {
+ { .compatible = "infineon,slb9673", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tis_i2c_match);
+#endif
+
+static struct i2c_driver tpm_tis_i2c_driver = {
+ .driver = {
+ .name = "tpm_tis_i2c",
+ .pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(of_tis_i2c_match),
+ },
+ .probe = tpm_tis_i2c_probe,
+ .remove = tpm_tis_i2c_remove,
+ .id_table = tpm_tis_i2c_id,
+};
+module_i2c_driver(tpm_tis_i2c_driver);
+
+MODULE_DESCRIPTION("TPM Driver for native I2C access");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
new file mode 100644
index 0000000000..e70abd69e1
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -0,0 +1,795 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Based on Infineon TPM driver by Peter Huewe.
+ *
+ * cr50 is a firmware for H1 secure modules that requires special
+ * handling for the I2C interface.
+ *
+ * - Use an interrupt for transaction status instead of hardcoded delays.
+ * - Must use write+wait+read read protocol.
+ * - All 4 bytes of status register must be read/written at once.
+ * - Burst count max is 63 bytes, and burst count behaves slightly differently
+ * than other I2C TPMs.
+ * - When reading from FIFO the full burstcnt must be read instead of just
+ * reading header and determining the remainder.
+ */
+
+#include <linux/acpi.h>
+#include <linux/completion.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "tpm_tis_core.h"
+
+#define TPM_CR50_MAX_BUFSIZE 64
+#define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */
+#define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */
+#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */
+#define TPM_TI50_I2C_DID_VID 0x504a6666L /* Device and vendor ID reg value */
+#define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */
+#define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */
+#define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */
+
+#define TPM_I2C_ACCESS(l) (0x0000 | ((l) << 4))
+#define TPM_I2C_STS(l) (0x0001 | ((l) << 4))
+#define TPM_I2C_DATA_FIFO(l) (0x0005 | ((l) << 4))
+#define TPM_I2C_DID_VID(l) (0x0006 | ((l) << 4))
+
+/**
+ * struct tpm_i2c_cr50_priv_data - Driver private data.
+ * @irq: Irq number used for this chip.
+ * If irq <= 0, then a fixed timeout is used instead of waiting for irq.
+ * @tpm_ready: Struct used by irq handler to signal R/W readiness.
+ * @buf: Buffer used for i2c writes, with i2c address prepended to content.
+ *
+ * Private driver struct used by kernel threads and interrupt context.
+ */
+struct tpm_i2c_cr50_priv_data {
+ int irq;
+ struct completion tpm_ready;
+ u8 buf[TPM_CR50_MAX_BUFSIZE];
+};
+
+/**
+ * tpm_cr50_i2c_int_handler() - cr50 interrupt handler.
+ * @dummy: Unused parameter.
+ * @tpm_info: TPM chip information.
+ *
+ * The cr50 interrupt handler signals waiting threads that the
+ * interrupt has been asserted. It does not do any interrupt triggered
+ * processing but is instead used to avoid fixed delays.
+ *
+ * Return:
+ * IRQ_HANDLED signifies irq was handled by this device.
+ */
+static irqreturn_t tpm_cr50_i2c_int_handler(int dummy, void *tpm_info)
+{
+ struct tpm_chip *chip = tpm_info;
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ complete(&priv->tpm_ready);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * tpm_cr50_i2c_wait_tpm_ready() - Wait for tpm to signal ready.
+ * @chip: A TPM chip.
+ *
+ * Wait for completion interrupt if available, otherwise use a fixed
+ * delay for the TPM to be ready.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_wait_tpm_ready(struct tpm_chip *chip)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ /* Use a safe fixed delay if interrupt is not supported */
+ if (priv->irq <= 0) {
+ msleep(TPM_CR50_TIMEOUT_NOIRQ_MS);
+ return 0;
+ }
+
+ /* Wait for interrupt to indicate TPM is ready to respond */
+ if (!wait_for_completion_timeout(&priv->tpm_ready, chip->timeout_a)) {
+ dev_warn(&chip->dev, "Timeout waiting for TPM ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * tpm_cr50_i2c_enable_tpm_irq() - Enable TPM irq.
+ * @chip: A TPM chip.
+ */
+static void tpm_cr50_i2c_enable_tpm_irq(struct tpm_chip *chip)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (priv->irq > 0) {
+ reinit_completion(&priv->tpm_ready);
+ enable_irq(priv->irq);
+ }
+}
+
+/**
+ * tpm_cr50_i2c_disable_tpm_irq() - Disable TPM irq.
+ * @chip: A TPM chip.
+ */
+static void tpm_cr50_i2c_disable_tpm_irq(struct tpm_chip *chip)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (priv->irq > 0)
+ disable_irq(priv->irq);
+}
+
+/**
+ * tpm_cr50_i2c_transfer_message() - Transfer a message over i2c.
+ * @dev: Device information.
+ * @adapter: I2C adapter.
+ * @msg: Message to transfer.
+ *
+ * Call unlocked i2c transfer routine with the provided parameters and
+ * retry in case of bus errors.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_transfer_message(struct device *dev,
+ struct i2c_adapter *adapter,
+ struct i2c_msg *msg)
+{
+ unsigned int try;
+ int rc;
+
+ for (try = 0; try < TPM_CR50_I2C_MAX_RETRIES; try++) {
+ rc = __i2c_transfer(adapter, msg, 1);
+ if (rc == 1)
+ return 0; /* Successfully transferred the message */
+ if (try)
+ dev_warn(dev, "i2c transfer failed (attempt %d/%d): %d\n",
+ try + 1, TPM_CR50_I2C_MAX_RETRIES, rc);
+ usleep_range(TPM_CR50_I2C_RETRY_DELAY_LO, TPM_CR50_I2C_RETRY_DELAY_HI);
+ }
+
+ /* No i2c message transferred */
+ return -EIO;
+}
+
+/**
+ * tpm_cr50_i2c_read() - Read from TPM register.
+ * @chip: A TPM chip.
+ * @addr: Register address to read from.
+ * @buffer: Read destination, provided by caller.
+ * @len: Number of bytes to read.
+ *
+ * Sends the register address byte to the TPM, then waits until TPM
+ * is ready via interrupt signal or timeout expiration, then 'len'
+ * bytes are read from TPM response into the provided 'buffer'.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t len)
+{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ struct i2c_msg msg_reg_addr = {
+ .addr = client->addr,
+ .len = 1,
+ .buf = &addr
+ };
+ struct i2c_msg msg_response = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buffer
+ };
+ int rc;
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ /* Prepare for completion interrupt */
+ tpm_cr50_i2c_enable_tpm_irq(chip);
+
+ /* Send the register address byte to the TPM */
+ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg_reg_addr);
+ if (rc < 0)
+ goto out;
+
+ /* Wait for TPM to be ready with response data */
+ rc = tpm_cr50_i2c_wait_tpm_ready(chip);
+ if (rc < 0)
+ goto out;
+
+ /* Read response data from the TPM */
+ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg_response);
+
+out:
+ tpm_cr50_i2c_disable_tpm_irq(chip);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * tpm_cr50_i2c_write()- Write to TPM register.
+ * @chip: A TPM chip.
+ * @addr: Register address to write to.
+ * @buffer: Data to write.
+ * @len: Number of bytes to write.
+ *
+ * The provided address is prepended to the data in 'buffer', the
+ * cobined address+data is sent to the TPM, then wait for TPM to
+ * indicate it is done writing.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer,
+ size_t len)
+{
+ struct tpm_i2c_cr50_priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .len = len + 1,
+ .buf = priv->buf
+ };
+ int rc;
+
+ if (len > TPM_CR50_MAX_BUFSIZE - 1)
+ return -EINVAL;
+
+ /* Prepend the 'register address' to the buffer */
+ priv->buf[0] = addr;
+ memcpy(priv->buf + 1, buffer, len);
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ /* Prepare for completion interrupt */
+ tpm_cr50_i2c_enable_tpm_irq(chip);
+
+ /* Send write request buffer with address */
+ rc = tpm_cr50_i2c_transfer_message(&chip->dev, client->adapter, &msg);
+ if (rc < 0)
+ goto out;
+
+ /* Wait for TPM to be ready, ignore timeout */
+ tpm_cr50_i2c_wait_tpm_ready(chip);
+
+out:
+ tpm_cr50_i2c_disable_tpm_irq(chip);
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * tpm_cr50_check_locality() - Verify TPM locality 0 is active.
+ * @chip: A TPM chip.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_check_locality(struct tpm_chip *chip)
+{
+ u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY;
+ u8 buf;
+ int rc;
+
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ if (rc < 0)
+ return rc;
+
+ if ((buf & mask) == mask)
+ return 0;
+
+ return -EIO;
+}
+
+/**
+ * tpm_cr50_release_locality() - Release TPM locality.
+ * @chip: A TPM chip.
+ * @force: Flag to force release if set.
+ */
+static void tpm_cr50_release_locality(struct tpm_chip *chip, bool force)
+{
+ u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_REQUEST_PENDING;
+ u8 addr = TPM_I2C_ACCESS(0);
+ u8 buf;
+
+ if (tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)) < 0)
+ return;
+
+ if (force || (buf & mask) == mask) {
+ buf = TPM_ACCESS_ACTIVE_LOCALITY;
+ tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf));
+ }
+}
+
+/**
+ * tpm_cr50_request_locality() - Request TPM locality 0.
+ * @chip: A TPM chip.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_request_locality(struct tpm_chip *chip)
+{
+ u8 buf = TPM_ACCESS_REQUEST_USE;
+ unsigned long stop;
+ int rc;
+
+ if (!tpm_cr50_check_locality(chip))
+ return 0;
+
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ if (rc < 0)
+ return rc;
+
+ stop = jiffies + chip->timeout_a;
+ do {
+ if (!tpm_cr50_check_locality(chip))
+ return 0;
+
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+ } while (time_before(jiffies, stop));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * tpm_cr50_i2c_tis_status() - Read cr50 tis status.
+ * @chip: A TPM chip.
+ *
+ * cr50 requires all 4 bytes of status register to be read.
+ *
+ * Return:
+ * TPM status byte.
+ */
+static u8 tpm_cr50_i2c_tis_status(struct tpm_chip *chip)
+{
+ u8 buf[4];
+
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0)
+ return 0;
+
+ return buf[0];
+}
+
+/**
+ * tpm_cr50_i2c_tis_set_ready() - Set status register to ready.
+ * @chip: A TPM chip.
+ *
+ * cr50 requires all 4 bytes of status register to be written.
+ */
+static void tpm_cr50_i2c_tis_set_ready(struct tpm_chip *chip)
+{
+ u8 buf[4] = { TPM_STS_COMMAND_READY };
+
+ tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), buf, sizeof(buf));
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+}
+
+/**
+ * tpm_cr50_i2c_get_burst_and_status() - Get burst count and status.
+ * @chip: A TPM chip.
+ * @mask: Status mask.
+ * @burst: Return value for burst.
+ * @status: Return value for status.
+ *
+ * cr50 uses bytes 3:2 of status register for burst count and
+ * all 4 bytes must be read.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_get_burst_and_status(struct tpm_chip *chip, u8 mask,
+ size_t *burst, u32 *status)
+{
+ unsigned long stop;
+ u8 buf[4];
+
+ *status = 0;
+
+ /* wait for burstcount */
+ stop = jiffies + chip->timeout_b;
+
+ do {
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) {
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+ continue;
+ }
+
+ *status = *buf;
+ *burst = le16_to_cpup((__le16 *)(buf + 1));
+
+ if ((*status & mask) == mask &&
+ *burst > 0 && *burst <= TPM_CR50_MAX_BUFSIZE - 1)
+ return 0;
+
+ msleep(TPM_CR50_TIMEOUT_SHORT_MS);
+ } while (time_before(jiffies, stop));
+
+ dev_err(&chip->dev, "Timeout reading burst and status\n");
+ return -ETIMEDOUT;
+}
+
+/**
+ * tpm_cr50_i2c_tis_recv() - TPM reception callback.
+ * @chip: A TPM chip.
+ * @buf: Reception buffer.
+ * @buf_len: Buffer length to read.
+ *
+ * Return:
+ * - >= 0: Number of read bytes.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
+{
+
+ u8 mask = TPM_STS_VALID | TPM_STS_DATA_AVAIL;
+ size_t burstcnt, cur, len, expected;
+ u8 addr = TPM_I2C_DATA_FIFO(0);
+ u32 status;
+ int rc;
+
+ if (buf_len < TPM_HEADER_SIZE)
+ return -EINVAL;
+
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+
+ if (burstcnt > buf_len || burstcnt < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev,
+ "Unexpected burstcnt: %zu (max=%zu, min=%d)\n",
+ burstcnt, buf_len, TPM_HEADER_SIZE);
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /* Read first chunk of burstcnt bytes */
+ rc = tpm_cr50_i2c_read(chip, addr, buf, burstcnt);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Read of first chunk failed\n");
+ goto out_err;
+ }
+
+ /* Determine expected data in the return buffer */
+ expected = be32_to_cpup((__be32 *)(buf + 2));
+ if (expected > buf_len) {
+ dev_err(&chip->dev, "Buffer too small to receive i2c data\n");
+ rc = -E2BIG;
+ goto out_err;
+ }
+
+ /* Now read the rest of the data */
+ cur = burstcnt;
+ while (cur < expected) {
+ /* Read updated burst count and check status */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+
+ len = min_t(size_t, burstcnt, expected - cur);
+ rc = tpm_cr50_i2c_read(chip, addr, buf + cur, len);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Read failed\n");
+ goto out_err;
+ }
+
+ cur += len;
+ }
+
+ /* Ensure TPM is done reading data */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, TPM_STS_VALID, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+ if (status & TPM_STS_DATA_AVAIL) {
+ dev_err(&chip->dev, "Data still available\n");
+ rc = -EIO;
+ goto out_err;
+ }
+
+ tpm_cr50_release_locality(chip, false);
+ return cur;
+
+out_err:
+ /* Abort current transaction if still pending */
+ if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
+ tpm_cr50_i2c_tis_set_ready(chip);
+
+ tpm_cr50_release_locality(chip, false);
+ return rc;
+}
+
+/**
+ * tpm_cr50_i2c_tis_send() - TPM transmission callback.
+ * @chip: A TPM chip.
+ * @buf: Buffer to send.
+ * @len: Buffer length.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ size_t burstcnt, limit, sent = 0;
+ u8 tpm_go[4] = { TPM_STS_GO };
+ unsigned long stop;
+ u32 status;
+ int rc;
+
+ rc = tpm_cr50_request_locality(chip);
+ if (rc < 0)
+ return rc;
+
+ /* Wait until TPM is ready for a command */
+ stop = jiffies + chip->timeout_b;
+ while (!(tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)) {
+ if (time_after(jiffies, stop)) {
+ rc = -ETIMEDOUT;
+ goto out_err;
+ }
+
+ tpm_cr50_i2c_tis_set_ready(chip);
+ }
+
+ while (len > 0) {
+ u8 mask = TPM_STS_VALID;
+
+ /* Wait for data if this is not the first chunk */
+ if (sent > 0)
+ mask |= TPM_STS_DATA_EXPECT;
+
+ /* Read burst count and check status */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, mask, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+
+ /*
+ * Use burstcnt - 1 to account for the address byte
+ * that is inserted by tpm_cr50_i2c_write()
+ */
+ limit = min_t(size_t, burstcnt - 1, len);
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(0), &buf[sent], limit);
+ if (rc < 0) {
+ dev_err(&chip->dev, "Write failed\n");
+ goto out_err;
+ }
+
+ sent += limit;
+ len -= limit;
+ }
+
+ /* Ensure TPM is not expecting more data */
+ rc = tpm_cr50_i2c_get_burst_and_status(chip, TPM_STS_VALID, &burstcnt, &status);
+ if (rc < 0)
+ goto out_err;
+ if (status & TPM_STS_DATA_EXPECT) {
+ dev_err(&chip->dev, "Data still expected\n");
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /* Start the TPM command */
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), tpm_go,
+ sizeof(tpm_go));
+ if (rc < 0) {
+ dev_err(&chip->dev, "Start command failed\n");
+ goto out_err;
+ }
+ return 0;
+
+out_err:
+ /* Abort current transaction if still pending */
+ if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
+ tpm_cr50_i2c_tis_set_ready(chip);
+
+ tpm_cr50_release_locality(chip, false);
+ return rc;
+}
+
+/**
+ * tpm_cr50_i2c_req_canceled() - Callback to notify a request cancel.
+ * @chip: A TPM chip.
+ * @status: Status given by the cancel callback.
+ *
+ * Return:
+ * True if command is ready, False otherwise.
+ */
+static bool tpm_cr50_i2c_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return status == TPM_STS_COMMAND_READY;
+}
+
+static bool tpm_cr50_i2c_is_firmware_power_managed(struct device *dev)
+{
+ u8 val;
+ int ret;
+
+ /* This flag should default true when the device property is not present */
+ ret = device_property_read_u8(dev, "firmware-power-managed", &val);
+ if (ret)
+ return true;
+
+ return val;
+}
+
+static const struct tpm_class_ops cr50_i2c = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = &tpm_cr50_i2c_tis_status,
+ .recv = &tpm_cr50_i2c_tis_recv,
+ .send = &tpm_cr50_i2c_tis_send,
+ .cancel = &tpm_cr50_i2c_tis_set_ready,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = &tpm_cr50_i2c_req_canceled,
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cr50_i2c_acpi_id[] = {
+ { "GOOG0005", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cr50_i2c_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_cr50_i2c_match[] = {
+ { .compatible = "google,cr50", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_cr50_i2c_match);
+#endif
+
+/**
+ * tpm_cr50_i2c_probe() - Driver probe function.
+ * @client: I2C client information.
+ * @id: I2C device id.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static int tpm_cr50_i2c_probe(struct i2c_client *client)
+{
+ struct tpm_i2c_cr50_priv_data *priv;
+ struct device *dev = &client->dev;
+ struct tpm_chip *chip;
+ u32 vendor;
+ u8 buf[4];
+ int rc;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ chip = tpmm_chip_alloc(dev, &cr50_i2c);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* cr50 is a TPM 2.0 chip */
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ if (tpm_cr50_i2c_is_firmware_power_managed(dev))
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+
+ /* Default timeouts */
+ chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+ dev_set_drvdata(&chip->dev, priv);
+ init_completion(&priv->tpm_ready);
+
+ if (client->irq > 0) {
+ rc = devm_request_irq(dev, client->irq, tpm_cr50_i2c_int_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+ IRQF_NO_AUTOEN,
+ dev->driver->name, chip);
+ if (rc < 0) {
+ dev_err(dev, "Failed to probe IRQ %d\n", client->irq);
+ return rc;
+ }
+
+ priv->irq = client->irq;
+ } else {
+ dev_warn(dev, "No IRQ, will use %ums delay for TPM ready\n",
+ TPM_CR50_TIMEOUT_NOIRQ_MS);
+ }
+
+ rc = tpm_cr50_request_locality(chip);
+ if (rc < 0) {
+ dev_err(dev, "Could not request locality\n");
+ return rc;
+ }
+
+ /* Read four bytes from DID_VID register */
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(0), buf, sizeof(buf));
+ if (rc < 0) {
+ dev_err(dev, "Could not read vendor id\n");
+ tpm_cr50_release_locality(chip, true);
+ return rc;
+ }
+
+ vendor = le32_to_cpup((__le32 *)buf);
+ if (vendor != TPM_CR50_I2C_DID_VID && vendor != TPM_TI50_I2C_DID_VID) {
+ dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor);
+ tpm_cr50_release_locality(chip, true);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "%s TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n",
+ vendor == TPM_TI50_I2C_DID_VID ? "ti50" : "cr50",
+ client->addr, client->irq, vendor >> 16);
+ return tpm_chip_register(chip);
+}
+
+/**
+ * tpm_cr50_i2c_remove() - Driver remove function.
+ * @client: I2C client information.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
+ */
+static void tpm_cr50_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+
+ if (!chip) {
+ dev_crit(dev, "Could not get client data at remove, memory corruption ahead\n");
+ return;
+ }
+
+ tpm_chip_unregister(chip);
+ tpm_cr50_release_locality(chip, true);
+}
+
+static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
+
+static struct i2c_driver cr50_i2c_driver = {
+ .probe = tpm_cr50_i2c_probe,
+ .remove = tpm_cr50_i2c_remove,
+ .driver = {
+ .name = "cr50_i2c",
+ .pm = &cr50_i2c_pm,
+ .acpi_match_table = ACPI_PTR(cr50_i2c_acpi_id),
+ .of_match_table = of_match_ptr(of_cr50_i2c_match),
+ },
+};
+
+module_i2c_driver(cr50_i2c_driver);
+
+MODULE_DESCRIPTION("cr50 TPM I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_spi.h b/drivers/char/tpm/tpm_tis_spi.h
new file mode 100644
index 0000000000..d0f66f6f19
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Infineon Technologies AG
+ * Copyright (C) 2016 STMicroelectronics SAS
+ */
+
+#ifndef TPM_TIS_SPI_H
+#define TPM_TIS_SPI_H
+
+#include "tpm_tis_core.h"
+
+struct tpm_tis_spi_phy {
+ struct tpm_tis_data priv;
+ struct spi_device *spi_device;
+ int (*flow_control)(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *xfer);
+ struct completion ready;
+ unsigned long wake_after;
+
+ u8 *iobuf;
+};
+
+static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_spi_phy, priv);
+}
+
+extern int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+ int irq, const struct tpm_tis_phy_ops *phy_ops);
+
+extern int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out);
+
+#ifdef CONFIG_TCG_TIS_SPI_CR50
+extern int cr50_spi_probe(struct spi_device *spi);
+#else
+static inline int cr50_spi_probe(struct spi_device *spi)
+{
+ return -ENODEV;
+}
+#endif
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_TCG_TIS_SPI_CR50)
+extern int tpm_tis_spi_resume(struct device *dev);
+#else
+#define tpm_tis_spi_resume NULL
+#endif
+
+#endif
diff --git a/drivers/char/tpm/tpm_tis_spi_cr50.c b/drivers/char/tpm/tpm_tis_spi_cr50.c
new file mode 100644
index 0000000000..f4937280e9
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi_cr50.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This device driver implements a TCG PTP FIFO interface over SPI for chips
+ * with Cr50 firmware.
+ * It is based on tpm_tis_spi driver by Peter Huewe and Christophe Ricard.
+ */
+
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
+
+#include "tpm_tis_core.h"
+#include "tpm_tis_spi.h"
+
+/*
+ * Cr50 timing constants:
+ * - can go to sleep not earlier than after CR50_SLEEP_DELAY_MSEC.
+ * - needs up to CR50_WAKE_START_DELAY_USEC to wake after sleep.
+ * - requires waiting for "ready" IRQ, if supported; or waiting for at least
+ * CR50_NOIRQ_ACCESS_DELAY_MSEC between transactions, if IRQ is not supported.
+ * - waits for up to CR50_FLOW_CONTROL for flow control 'ready' indication.
+ */
+#define CR50_SLEEP_DELAY_MSEC 1000
+#define CR50_WAKE_START_DELAY_USEC 1000
+#define CR50_NOIRQ_ACCESS_DELAY msecs_to_jiffies(2)
+#define CR50_READY_IRQ_TIMEOUT msecs_to_jiffies(TPM2_TIMEOUT_A)
+#define CR50_FLOW_CONTROL msecs_to_jiffies(TPM2_TIMEOUT_A)
+#define MAX_IRQ_CONFIRMATION_ATTEMPTS 3
+
+#define TPM_CR50_FW_VER(l) (0x0f90 | ((l) << 12))
+#define TPM_CR50_MAX_FW_VER_LEN 64
+
+/* Default quality for hwrng. */
+#define TPM_CR50_DEFAULT_RNG_QUALITY 700
+
+struct cr50_spi_phy {
+ struct tpm_tis_spi_phy spi_phy;
+
+ struct mutex time_track_mutex;
+ unsigned long last_access;
+
+ unsigned long access_delay;
+
+ unsigned int irq_confirmation_attempt;
+ bool irq_needs_confirmation;
+ bool irq_confirmed;
+};
+
+static inline struct cr50_spi_phy *to_cr50_spi_phy(struct tpm_tis_spi_phy *phy)
+{
+ return container_of(phy, struct cr50_spi_phy, spi_phy);
+}
+
+/*
+ * The cr50 interrupt handler just signals waiting threads that the
+ * interrupt was asserted. It does not do any processing triggered
+ * by interrupts but is instead used to avoid fixed delays.
+ */
+static irqreturn_t cr50_spi_irq_handler(int dummy, void *dev_id)
+{
+ struct cr50_spi_phy *cr50_phy = dev_id;
+
+ cr50_phy->irq_confirmed = true;
+ complete(&cr50_phy->spi_phy.ready);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Cr50 needs to have at least some delay between consecutive
+ * transactions. Make sure we wait.
+ */
+static void cr50_ensure_access_delay(struct cr50_spi_phy *phy)
+{
+ unsigned long allowed_access = phy->last_access + phy->access_delay;
+ unsigned long time_now = jiffies;
+ struct device *dev = &phy->spi_phy.spi_device->dev;
+
+ /*
+ * Note: There is a small chance, if Cr50 is not accessed in a few days,
+ * that time_in_range will not provide the correct result after the wrap
+ * around for jiffies. In this case, we'll have an unneeded short delay,
+ * which is fine.
+ */
+ if (time_in_range_open(time_now, phy->last_access, allowed_access)) {
+ unsigned long remaining, timeout = allowed_access - time_now;
+
+ remaining = wait_for_completion_timeout(&phy->spi_phy.ready,
+ timeout);
+ if (!remaining && phy->irq_confirmed)
+ dev_warn(dev, "Timeout waiting for TPM ready IRQ\n");
+ }
+
+ if (phy->irq_needs_confirmation) {
+ unsigned int attempt = ++phy->irq_confirmation_attempt;
+
+ if (phy->irq_confirmed) {
+ phy->irq_needs_confirmation = false;
+ phy->access_delay = CR50_READY_IRQ_TIMEOUT;
+ dev_info(dev, "TPM ready IRQ confirmed on attempt %u\n",
+ attempt);
+ } else if (attempt > MAX_IRQ_CONFIRMATION_ATTEMPTS) {
+ phy->irq_needs_confirmation = false;
+ dev_warn(dev, "IRQ not confirmed - will use delays\n");
+ }
+ }
+}
+
+/*
+ * Cr50 might go to sleep if there is no SPI activity for some time and
+ * miss the first few bits/bytes on the bus. In such case, wake it up
+ * by asserting CS and give it time to start up.
+ */
+static bool cr50_needs_waking(struct cr50_spi_phy *phy)
+{
+ /*
+ * Note: There is a small chance, if Cr50 is not accessed in a few days,
+ * that time_in_range will not provide the correct result after the wrap
+ * around for jiffies. In this case, we'll probably timeout or read
+ * incorrect value from TPM_STS and just retry the operation.
+ */
+ return !time_in_range_open(jiffies, phy->last_access,
+ phy->spi_phy.wake_after);
+}
+
+static void cr50_wake_if_needed(struct cr50_spi_phy *cr50_phy)
+{
+ struct tpm_tis_spi_phy *phy = &cr50_phy->spi_phy;
+
+ if (cr50_needs_waking(cr50_phy)) {
+ /* Assert CS, wait 1 msec, deassert CS */
+ struct spi_transfer spi_cs_wake = {
+ .delay = {
+ .value = 1000,
+ .unit = SPI_DELAY_UNIT_USECS
+ }
+ };
+
+ spi_sync_transfer(phy->spi_device, &spi_cs_wake, 1);
+ /* Wait for it to fully wake */
+ usleep_range(CR50_WAKE_START_DELAY_USEC,
+ CR50_WAKE_START_DELAY_USEC * 2);
+ }
+
+ /* Reset the time when we need to wake Cr50 again */
+ phy->wake_after = jiffies + msecs_to_jiffies(CR50_SLEEP_DELAY_MSEC);
+}
+
+/*
+ * Flow control: clock the bus and wait for cr50 to set LSB before
+ * sending/receiving data. TCG PTP spec allows it to happen during
+ * the last byte of header, but cr50 never does that in practice,
+ * and earlier versions had a bug when it was set too early, so don't
+ * check for it during header transfer.
+ */
+static int cr50_spi_flow_control(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *spi_xfer)
+{
+ struct device *dev = &phy->spi_device->dev;
+ unsigned long timeout = jiffies + CR50_FLOW_CONTROL;
+ struct spi_message m;
+ int ret;
+
+ spi_xfer->len = 1;
+
+ do {
+ spi_message_init(&m);
+ spi_message_add_tail(spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+
+ if (time_after(jiffies, timeout)) {
+ dev_warn(dev, "Timeout during flow control\n");
+ return -EBUSY;
+ }
+ } while (!(phy->iobuf[0] & 0x01));
+
+ return 0;
+}
+
+static bool tpm_cr50_spi_is_firmware_power_managed(struct device *dev)
+{
+ u8 val;
+ int ret;
+
+ /* This flag should default true when the device property is not present */
+ ret = device_property_read_u8(dev, "firmware-power-managed", &val);
+ if (ret)
+ return true;
+
+ return val;
+}
+
+static int tpm_tis_spi_cr50_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct cr50_spi_phy *cr50_phy = to_cr50_spi_phy(phy);
+ int ret;
+
+ mutex_lock(&cr50_phy->time_track_mutex);
+ /*
+ * Do this outside of spi_bus_lock in case cr50 is not the
+ * only device on that spi bus.
+ */
+ cr50_ensure_access_delay(cr50_phy);
+ cr50_wake_if_needed(cr50_phy);
+
+ ret = tpm_tis_spi_transfer(data, addr, len, in, out);
+
+ cr50_phy->last_access = jiffies;
+ mutex_unlock(&cr50_phy->time_track_mutex);
+
+ return ret;
+}
+
+static int tpm_tis_spi_cr50_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ return tpm_tis_spi_cr50_transfer(data, addr, len, result, NULL);
+}
+
+static int tpm_tis_spi_cr50_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, const u8 *value, enum tpm_tis_io_mode io_mode)
+{
+ return tpm_tis_spi_cr50_transfer(data, addr, len, NULL, value);
+}
+
+static const struct tpm_tis_phy_ops tpm_spi_cr50_phy_ops = {
+ .read_bytes = tpm_tis_spi_cr50_read_bytes,
+ .write_bytes = tpm_tis_spi_cr50_write_bytes,
+};
+
+static void cr50_print_fw_version(struct tpm_tis_data *data)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ int i, len = 0;
+ char fw_ver[TPM_CR50_MAX_FW_VER_LEN + 1];
+ char fw_ver_block[4];
+
+ /*
+ * Write anything to TPM_CR50_FW_VER to start from the beginning
+ * of the version string
+ */
+ tpm_tis_write8(data, TPM_CR50_FW_VER(data->locality), 0);
+
+ /* Read the string, 4 bytes at a time, until we get '\0' */
+ do {
+ tpm_tis_read_bytes(data, TPM_CR50_FW_VER(data->locality), 4,
+ fw_ver_block);
+ for (i = 0; i < 4 && fw_ver_block[i]; ++len, ++i)
+ fw_ver[len] = fw_ver_block[i];
+ } while (i == 4 && len < TPM_CR50_MAX_FW_VER_LEN);
+ fw_ver[len] = '\0';
+
+ dev_info(&phy->spi_device->dev, "Cr50 firmware version: %s\n", fw_ver);
+}
+
+int cr50_spi_probe(struct spi_device *spi)
+{
+ struct tpm_tis_spi_phy *phy;
+ struct cr50_spi_phy *cr50_phy;
+ int ret;
+ struct tpm_chip *chip;
+
+ cr50_phy = devm_kzalloc(&spi->dev, sizeof(*cr50_phy), GFP_KERNEL);
+ if (!cr50_phy)
+ return -ENOMEM;
+
+ phy = &cr50_phy->spi_phy;
+ phy->flow_control = cr50_spi_flow_control;
+ phy->wake_after = jiffies;
+ phy->priv.rng_quality = TPM_CR50_DEFAULT_RNG_QUALITY;
+ init_completion(&phy->ready);
+
+ cr50_phy->access_delay = CR50_NOIRQ_ACCESS_DELAY;
+ cr50_phy->last_access = jiffies;
+ mutex_init(&cr50_phy->time_track_mutex);
+
+ if (spi->irq > 0) {
+ ret = devm_request_irq(&spi->dev, spi->irq,
+ cr50_spi_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "cr50_spi", cr50_phy);
+ if (ret < 0) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_warn(&spi->dev, "Requesting IRQ %d failed: %d\n",
+ spi->irq, ret);
+ /*
+ * This is not fatal, the driver will fall back to
+ * delays automatically, since ready will never
+ * be completed without a registered irq handler.
+ * So, just fall through.
+ */
+ } else {
+ /*
+ * IRQ requested, let's verify that it is actually
+ * triggered, before relying on it.
+ */
+ cr50_phy->irq_needs_confirmation = true;
+ }
+ } else {
+ dev_warn(&spi->dev,
+ "No IRQ - will use delays between transactions.\n");
+ }
+
+ ret = tpm_tis_spi_init(spi, phy, -1, &tpm_spi_cr50_phy_ops);
+ if (ret)
+ return ret;
+
+ cr50_print_fw_version(&phy->priv);
+
+ chip = dev_get_drvdata(&spi->dev);
+ if (tpm_cr50_spi_is_firmware_power_managed(&spi->dev))
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+int tpm_tis_spi_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ /*
+ * Jiffies not increased during suspend, so we need to reset
+ * the time to wake Cr50 after resume.
+ */
+ phy->wake_after = jiffies;
+
+ return tpm_tis_resume(dev);
+}
+#endif
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
new file mode 100644
index 0000000000..c5c3197ee2
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Infineon Technologies AG
+ * Copyright (C) 2016 STMicroelectronics SAS
+ *
+ * Authors:
+ * Peter Huewe <peter.huewe@infineon.com>
+ * Christophe Ricard <christophe-h.ricard@st.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native
+ * SPI access_.
+ *
+ * It is based on the original tpm_tis device driver from Leendert van
+ * Dorn and Kyleen Hall and Jarko Sakkinnen.
+ */
+
+#include <linux/acpi.h>
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+#include <linux/tpm.h>
+
+#include "tpm.h"
+#include "tpm_tis_core.h"
+#include "tpm_tis_spi.h"
+
+#define MAX_SPI_FRAMESIZE 64
+
+/*
+ * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
+ * keep trying to read from the device until MISO goes high indicating the
+ * wait state has ended.
+ *
+ * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
+ */
+static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *spi_xfer)
+{
+ struct spi_message m;
+ int ret, i;
+
+ if ((phy->iobuf[3] & 0x01) == 0) {
+ // handle SPI wait states
+ for (i = 0; i < TPM_RETRY; i++) {
+ spi_xfer->len = 1;
+ spi_message_init(&m);
+ spi_message_add_tail(spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+ if (phy->iobuf[0] & 0x01)
+ break;
+ }
+
+ if (i == TPM_RETRY)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Half duplex controller with support for TPM wait state detection like
+ * Tegra QSPI need CMD, ADDR & DATA sent in single message to manage HW flow
+ * control. Each phase sent in different transfer for controller to idenity
+ * phase.
+ */
+static int tpm_tis_spi_transfer_half(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct spi_transfer spi_xfer[3];
+ struct spi_message m;
+ u8 transfer_len;
+ int ret;
+
+ while (len) {
+ transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
+
+ spi_message_init(&m);
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+ phy->iobuf[1] = 0xd4;
+ phy->iobuf[2] = addr >> 8;
+ phy->iobuf[3] = addr;
+
+ memset(&spi_xfer, 0, sizeof(spi_xfer));
+
+ spi_xfer[0].tx_buf = phy->iobuf;
+ spi_xfer[0].len = 1;
+ spi_message_add_tail(&spi_xfer[0], &m);
+
+ spi_xfer[1].tx_buf = phy->iobuf + 1;
+ spi_xfer[1].len = 3;
+ spi_message_add_tail(&spi_xfer[1], &m);
+
+ if (out) {
+ spi_xfer[2].tx_buf = &phy->iobuf[4];
+ spi_xfer[2].rx_buf = NULL;
+ memcpy(&phy->iobuf[4], out, transfer_len);
+ out += transfer_len;
+ }
+
+ if (in) {
+ spi_xfer[2].tx_buf = NULL;
+ spi_xfer[2].rx_buf = &phy->iobuf[4];
+ }
+
+ spi_xfer[2].len = transfer_len;
+ spi_message_add_tail(&spi_xfer[2], &m);
+
+ reinit_completion(&phy->ready);
+
+ ret = spi_sync(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+
+ if (in) {
+ memcpy(in, &phy->iobuf[4], transfer_len);
+ in += transfer_len;
+ }
+
+ len -= transfer_len;
+ }
+
+ return ret;
+}
+
+static int tpm_tis_spi_transfer_full(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ int ret = 0;
+ struct spi_message m;
+ struct spi_transfer spi_xfer;
+ u8 transfer_len;
+
+ spi_bus_lock(phy->spi_device->master);
+
+ while (len) {
+ transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
+
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+ phy->iobuf[1] = 0xd4;
+ phy->iobuf[2] = addr >> 8;
+ phy->iobuf[3] = addr;
+
+ memset(&spi_xfer, 0, sizeof(spi_xfer));
+ spi_xfer.tx_buf = phy->iobuf;
+ spi_xfer.rx_buf = phy->iobuf;
+ spi_xfer.len = 4;
+ spi_xfer.cs_change = 1;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ goto exit;
+
+ /* Flow control transfers are receive only */
+ spi_xfer.tx_buf = NULL;
+ ret = phy->flow_control(phy, &spi_xfer);
+ if (ret < 0)
+ goto exit;
+
+ spi_xfer.cs_change = 0;
+ spi_xfer.len = transfer_len;
+ spi_xfer.delay.value = 5;
+ spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
+
+ if (out) {
+ spi_xfer.tx_buf = phy->iobuf;
+ spi_xfer.rx_buf = NULL;
+ memcpy(phy->iobuf, out, transfer_len);
+ out += transfer_len;
+ }
+
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+ reinit_completion(&phy->ready);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ goto exit;
+
+ if (in) {
+ memcpy(in, phy->iobuf, transfer_len);
+ in += transfer_len;
+ }
+
+ len -= transfer_len;
+ }
+
+exit:
+ if (ret < 0) {
+ /* Deactivate chip select */
+ memset(&spi_xfer, 0, sizeof(spi_xfer));
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+ spi_sync_locked(phy->spi_device, &m);
+ }
+
+ spi_bus_unlock(phy->spi_device->master);
+ return ret;
+}
+
+int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct spi_controller *ctlr = phy->spi_device->controller;
+
+ /*
+ * TPM flow control over SPI requires full duplex support.
+ * Send entire message to a half duplex controller to handle
+ * wait polling in controller.
+ * Set TPM HW flow control flag..
+ */
+ if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
+ return tpm_tis_spi_transfer_half(data, addr, len, in, out);
+ else
+ return tpm_tis_spi_transfer_full(data, addr, len, in, out);
+}
+
+static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ return tpm_tis_spi_transfer(data, addr, len, result, NULL);
+}
+
+static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, const u8 *value, enum tpm_tis_io_mode io_mode)
+{
+ return tpm_tis_spi_transfer(data, addr, len, NULL, value);
+}
+
+int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+ int irq, const struct tpm_tis_phy_ops *phy_ops)
+{
+ phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ if (!phy->iobuf)
+ return -ENOMEM;
+
+ phy->spi_device = spi;
+
+ return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL);
+}
+
+static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
+ .read_bytes = tpm_tis_spi_read_bytes,
+ .write_bytes = tpm_tis_spi_write_bytes,
+};
+
+static int tpm_tis_spi_probe(struct spi_device *dev)
+{
+ struct tpm_tis_spi_phy *phy;
+ int irq;
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->flow_control = tpm_tis_spi_flow_control;
+
+ if (dev->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)
+ dev->mode |= SPI_TPM_HW_FLOW;
+
+ /* If the SPI device has an IRQ then use that */
+ if (dev->irq > 0)
+ irq = dev->irq;
+ else
+ irq = -1;
+
+ init_completion(&phy->ready);
+ return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops);
+}
+
+typedef int (*tpm_tis_spi_probe_func)(struct spi_device *);
+
+static int tpm_tis_spi_driver_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *spi_dev_id = spi_get_device_id(spi);
+ tpm_tis_spi_probe_func probe_func;
+
+ probe_func = of_device_get_match_data(&spi->dev);
+ if (!probe_func) {
+ if (spi_dev_id) {
+ probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
+ if (!probe_func)
+ return -ENODEV;
+ } else
+ probe_func = tpm_tis_spi_probe;
+ }
+
+ return probe_func(spi);
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
+
+static void tpm_tis_spi_remove(struct spi_device *dev)
+{
+ struct tpm_chip *chip = spi_get_drvdata(dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+}
+
+static const struct spi_device_id tpm_tis_spi_id[] = {
+ { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
+ { "slb9670", (unsigned long)tpm_tis_spi_probe },
+ { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
+ { "tpm_tis-spi", (unsigned long)tpm_tis_spi_probe },
+ { "cr50", (unsigned long)cr50_spi_probe },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
+
+static const struct of_device_id of_tis_spi_match[] __maybe_unused = {
+ { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
+ { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
+ { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
+ { .compatible = "google,cr50", .data = cr50_spi_probe },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tis_spi_match);
+
+static const struct acpi_device_id acpi_tis_spi_match[] __maybe_unused = {
+ {"SMO0768", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
+
+static struct spi_driver tpm_tis_spi_driver = {
+ .driver = {
+ .name = "tpm_tis_spi",
+ .pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(of_tis_spi_match),
+ .acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = tpm_tis_spi_driver_probe,
+ .remove = tpm_tis_spi_remove,
+ .id_table = tpm_tis_spi_id,
+};
+module_spi_driver(tpm_tis_spi_driver);
+
+MODULE_DESCRIPTION("TPM Driver for native SPI access");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_synquacer.c b/drivers/char/tpm/tpm_tis_synquacer.c
new file mode 100644
index 0000000000..0621ebec53
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_synquacer.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Linaro Ltd.
+ *
+ * This device driver implements MMIO TPM on SynQuacer Platform.
+ */
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/kernel.h>
+#include "tpm.h"
+#include "tpm_tis_core.h"
+
+/*
+ * irq > 0 means: use irq $irq;
+ * irq = 0 means: autoprobe for an irq;
+ * irq = -1 means: no irq support
+ */
+struct tpm_tis_synquacer_info {
+ struct resource res;
+ int irq;
+};
+
+struct tpm_tis_synquacer_phy {
+ struct tpm_tis_data priv;
+ void __iomem *iobase;
+};
+
+static inline struct tpm_tis_synquacer_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_synquacer_phy, priv);
+}
+
+static int tpm_tis_synquacer_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result,
+ enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
+ switch (io_mode) {
+ case TPM_TIS_PHYS_8:
+ while (len--)
+ *result++ = ioread8(phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_16:
+ result[1] = ioread8(phy->iobase + addr + 1);
+ result[0] = ioread8(phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_32:
+ result[3] = ioread8(phy->iobase + addr + 3);
+ result[2] = ioread8(phy->iobase + addr + 2);
+ result[1] = ioread8(phy->iobase + addr + 1);
+ result[0] = ioread8(phy->iobase + addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int tpm_tis_synquacer_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, const u8 *value,
+ enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
+ switch (io_mode) {
+ case TPM_TIS_PHYS_8:
+ while (len--)
+ iowrite8(*value++, phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_16:
+ return -EINVAL;
+ case TPM_TIS_PHYS_32:
+ /*
+ * Due to the limitation of SPI controller on SynQuacer,
+ * 16/32 bits access must be done in byte-wise and descending order.
+ */
+ iowrite8(value[3], phy->iobase + addr + 3);
+ iowrite8(value[2], phy->iobase + addr + 2);
+ iowrite8(value[1], phy->iobase + addr + 1);
+ iowrite8(value[0], phy->iobase + addr);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct tpm_tis_phy_ops tpm_tcg_bw = {
+ .read_bytes = tpm_tis_synquacer_read_bytes,
+ .write_bytes = tpm_tis_synquacer_write_bytes,
+};
+
+static int tpm_tis_synquacer_init(struct device *dev,
+ struct tpm_tis_synquacer_info *tpm_info)
+{
+ struct tpm_tis_synquacer_phy *phy;
+
+ phy = devm_kzalloc(dev, sizeof(struct tpm_tis_synquacer_phy), GFP_KERNEL);
+ if (phy == NULL)
+ return -ENOMEM;
+
+ phy->iobase = devm_ioremap_resource(dev, &tpm_info->res);
+ if (IS_ERR(phy->iobase))
+ return PTR_ERR(phy->iobase);
+
+ return tpm_tis_core_init(dev, &phy->priv, tpm_info->irq, &tpm_tcg_bw,
+ ACPI_HANDLE(dev));
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_synquacer_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static int tpm_tis_synquacer_probe(struct platform_device *pdev)
+{
+ struct tpm_tis_synquacer_info tpm_info = {};
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+ tpm_info.res = *res;
+
+ tpm_info.irq = -1;
+
+ return tpm_tis_synquacer_init(&pdev->dev, &tpm_info);
+}
+
+static void tpm_tis_synquacer_remove(struct platform_device *pdev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tis_synquacer_of_platform_match[] = {
+ {.compatible = "socionext,synquacer-tpm-mmio"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tis_synquacer_of_platform_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id tpm_synquacer_acpi_tbl[] = {
+ { "SCX0009" },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, tpm_synquacer_acpi_tbl);
+#endif
+
+static struct platform_driver tis_synquacer_drv = {
+ .probe = tpm_tis_synquacer_probe,
+ .remove_new = tpm_tis_synquacer_remove,
+ .driver = {
+ .name = "tpm_tis_synquacer",
+ .pm = &tpm_tis_synquacer_pm,
+ .of_match_table = of_match_ptr(tis_synquacer_of_platform_match),
+ .acpi_match_table = ACPI_PTR(tpm_synquacer_acpi_tbl),
+ },
+};
+
+module_platform_driver(tis_synquacer_drv);
+
+MODULE_DESCRIPTION("TPM MMIO Driver for Socionext SynQuacer platform");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
new file mode 100644
index 0000000000..30e953988c
--- /dev/null
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -0,0 +1,717 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015, 2016 IBM Corporation
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Stefan Berger <stefanb@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for vTPM (vTPM proxy driver)
+ */
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/miscdevice.h>
+#include <linux/vtpm_proxy.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/poll.h>
+#include <linux/compat.h>
+
+#include "tpm.h"
+
+#define VTPM_PROXY_REQ_COMPLETE_FLAG BIT(0)
+
+struct proxy_dev {
+ struct tpm_chip *chip;
+
+ u32 flags; /* public API flags */
+
+ wait_queue_head_t wq;
+
+ struct mutex buf_lock; /* protect buffer and flags */
+
+ long state; /* internal state */
+#define STATE_OPENED_FLAG BIT(0)
+#define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */
+#define STATE_REGISTERED_FLAG BIT(2)
+#define STATE_DRIVER_COMMAND BIT(3) /* sending a driver specific command */
+
+ size_t req_len; /* length of queued TPM request */
+ size_t resp_len; /* length of queued TPM response */
+ u8 buffer[TPM_BUFSIZE]; /* request/response buffer */
+
+ struct work_struct work; /* task that retrieves TPM timeouts */
+};
+
+/* all supported flags */
+#define VTPM_PROXY_FLAGS_ALL (VTPM_PROXY_FLAG_TPM2)
+
+static struct workqueue_struct *workqueue;
+
+static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev);
+
+/*
+ * Functions related to 'server side'
+ */
+
+/**
+ * vtpm_proxy_fops_read - Read TPM commands on 'server side'
+ *
+ * @filp: file pointer
+ * @buf: read buffer
+ * @count: number of bytes to read
+ * @off: offset
+ *
+ * Return:
+ * Number of bytes read or negative error code
+ */
+static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *off)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+ size_t len;
+ int sig, rc;
+
+ sig = wait_event_interruptible(proxy_dev->wq,
+ proxy_dev->req_len != 0 ||
+ !(proxy_dev->state & STATE_OPENED_FLAG));
+ if (sig)
+ return -EINTR;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EPIPE;
+ }
+
+ len = proxy_dev->req_len;
+
+ if (count < len || len > sizeof(proxy_dev->buffer)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n",
+ count, len);
+ return -EIO;
+ }
+
+ rc = copy_to_user(buf, proxy_dev->buffer, len);
+ memset(proxy_dev->buffer, 0, len);
+ proxy_dev->req_len = 0;
+
+ if (!rc)
+ proxy_dev->state |= STATE_WAIT_RESPONSE_FLAG;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ if (rc)
+ return -EFAULT;
+
+ return len;
+}
+
+/**
+ * vtpm_proxy_fops_write - Write TPM responses on 'server side'
+ *
+ * @filp: file pointer
+ * @buf: write buffer
+ * @count: number of bytes to write
+ * @off: offset
+ *
+ * Return:
+ * Number of bytes read or negative error value
+ */
+static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EPIPE;
+ }
+
+ if (count > sizeof(proxy_dev->buffer) ||
+ !(proxy_dev->state & STATE_WAIT_RESPONSE_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EIO;
+ }
+
+ proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG;
+
+ proxy_dev->req_len = 0;
+
+ if (copy_from_user(proxy_dev->buffer, buf, count)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EFAULT;
+ }
+
+ proxy_dev->resp_len = count;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ wake_up_interruptible(&proxy_dev->wq);
+
+ return count;
+}
+
+/*
+ * vtpm_proxy_fops_poll - Poll status on 'server side'
+ *
+ * @filp: file pointer
+ * @wait: poll table
+ *
+ * Return: Poll flags
+ */
+static __poll_t vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+ __poll_t ret;
+
+ poll_wait(filp, &proxy_dev->wq, wait);
+
+ ret = EPOLLOUT;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (proxy_dev->req_len)
+ ret |= EPOLLIN | EPOLLRDNORM;
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG))
+ ret |= EPOLLHUP;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ return ret;
+}
+
+/*
+ * vtpm_proxy_fops_open - Open vTPM device on 'server side'
+ *
+ * @filp: file pointer
+ *
+ * Called when setting up the anonymous file descriptor
+ */
+static void vtpm_proxy_fops_open(struct file *filp)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+
+ proxy_dev->state |= STATE_OPENED_FLAG;
+}
+
+/**
+ * vtpm_proxy_fops_undo_open - counter-part to vtpm_fops_open
+ * Call to undo vtpm_proxy_fops_open
+ *
+ *@proxy_dev: tpm proxy device
+ */
+static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev)
+{
+ mutex_lock(&proxy_dev->buf_lock);
+
+ proxy_dev->state &= ~STATE_OPENED_FLAG;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ /* no more TPM responses -- wake up anyone waiting for them */
+ wake_up_interruptible(&proxy_dev->wq);
+}
+
+/*
+ * vtpm_proxy_fops_release - Close 'server side'
+ *
+ * @inode: inode
+ * @filp: file pointer
+ * Return:
+ * Always returns 0.
+ */
+static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+
+ filp->private_data = NULL;
+
+ vtpm_proxy_delete_device(proxy_dev);
+
+ return 0;
+}
+
+static const struct file_operations vtpm_proxy_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = vtpm_proxy_fops_read,
+ .write = vtpm_proxy_fops_write,
+ .poll = vtpm_proxy_fops_poll,
+ .release = vtpm_proxy_fops_release,
+};
+
+/*
+ * Functions invoked by the core TPM driver to send TPM commands to
+ * 'server side' and receive responses from there.
+ */
+
+/*
+ * Called when core TPM driver reads TPM responses from 'server side'
+ *
+ * @chip: tpm chip to use
+ * @buf: receive buffer
+ * @count: bytes to read
+ * Return:
+ * Number of TPM response bytes read, negative error value otherwise
+ */
+static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+ size_t len;
+
+ /* process gone ? */
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EPIPE;
+ }
+
+ len = proxy_dev->resp_len;
+ if (count < len) {
+ dev_err(&chip->dev,
+ "Invalid size in recv: count=%zd, resp_len=%zd\n",
+ count, len);
+ len = -EIO;
+ goto out;
+ }
+
+ memcpy(buf, proxy_dev->buffer, len);
+ proxy_dev->resp_len = 0;
+
+out:
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ return len;
+}
+
+static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
+ u8 *buf, size_t count)
+{
+ struct tpm_header *hdr = (struct tpm_header *)buf;
+
+ if (count < sizeof(struct tpm_header))
+ return 0;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ switch (be32_to_cpu(hdr->ordinal)) {
+ case TPM2_CC_SET_LOCALITY:
+ return 1;
+ }
+ } else {
+ switch (be32_to_cpu(hdr->ordinal)) {
+ case TPM_ORD_SET_LOCALITY:
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Called when core TPM driver forwards TPM requests to 'server side'.
+ *
+ * @chip: tpm chip to use
+ * @buf: send buffer
+ * @count: bytes to send
+ *
+ * Return:
+ * 0 in case of success, negative error value otherwise.
+ */
+static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+
+ if (count > sizeof(proxy_dev->buffer)) {
+ dev_err(&chip->dev,
+ "Invalid size in send: count=%zd, buffer size=%zd\n",
+ count, sizeof(proxy_dev->buffer));
+ return -EIO;
+ }
+
+ if (!(proxy_dev->state & STATE_DRIVER_COMMAND) &&
+ vtpm_proxy_is_driver_command(chip, buf, count))
+ return -EFAULT;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EPIPE;
+ }
+
+ proxy_dev->resp_len = 0;
+
+ proxy_dev->req_len = count;
+ memcpy(proxy_dev->buffer, buf, count);
+
+ proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ wake_up_interruptible(&proxy_dev->wq);
+
+ return 0;
+}
+
+static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip)
+{
+ /* not supported */
+}
+
+static u8 vtpm_proxy_tpm_op_status(struct tpm_chip *chip)
+{
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+
+ if (proxy_dev->resp_len)
+ return VTPM_PROXY_REQ_COMPLETE_FLAG;
+
+ return 0;
+}
+
+static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+ bool ret;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ ret = !(proxy_dev->state & STATE_OPENED_FLAG);
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ return ret;
+}
+
+static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality)
+{
+ struct tpm_buf buf;
+ int rc;
+ const struct tpm_header *header;
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS,
+ TPM2_CC_SET_LOCALITY);
+ else
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND,
+ TPM_ORD_SET_LOCALITY);
+ if (rc)
+ return rc;
+ tpm_buf_append_u8(&buf, locality);
+
+ proxy_dev->state |= STATE_DRIVER_COMMAND;
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to set locality");
+
+ proxy_dev->state &= ~STATE_DRIVER_COMMAND;
+
+ if (rc < 0) {
+ locality = rc;
+ goto out;
+ }
+
+ header = (const struct tpm_header *)buf.data;
+ rc = be32_to_cpu(header->return_code);
+ if (rc)
+ locality = -1;
+
+out:
+ tpm_buf_destroy(&buf);
+
+ return locality;
+}
+
+static const struct tpm_class_ops vtpm_proxy_tpm_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = vtpm_proxy_tpm_op_recv,
+ .send = vtpm_proxy_tpm_op_send,
+ .cancel = vtpm_proxy_tpm_op_cancel,
+ .status = vtpm_proxy_tpm_op_status,
+ .req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG,
+ .req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG,
+ .req_canceled = vtpm_proxy_tpm_req_canceled,
+ .request_locality = vtpm_proxy_request_locality,
+};
+
+/*
+ * Code related to the startup of the TPM 2 and startup of TPM 1.2 +
+ * retrieval of timeouts and durations.
+ */
+
+static void vtpm_proxy_work(struct work_struct *work)
+{
+ struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev,
+ work);
+ int rc;
+
+ rc = tpm_chip_register(proxy_dev->chip);
+ if (rc)
+ vtpm_proxy_fops_undo_open(proxy_dev);
+ else
+ proxy_dev->state |= STATE_REGISTERED_FLAG;
+}
+
+/*
+ * vtpm_proxy_work_stop: make sure the work has finished
+ *
+ * This function is useful when user space closed the fd
+ * while the driver still determines timeouts.
+ */
+static void vtpm_proxy_work_stop(struct proxy_dev *proxy_dev)
+{
+ vtpm_proxy_fops_undo_open(proxy_dev);
+ flush_work(&proxy_dev->work);
+}
+
+/*
+ * vtpm_proxy_work_start: Schedule the work for TPM 1.2 & 2 initialization
+ */
+static inline void vtpm_proxy_work_start(struct proxy_dev *proxy_dev)
+{
+ queue_work(workqueue, &proxy_dev->work);
+}
+
+/*
+ * Code related to creation and deletion of device pairs
+ */
+static struct proxy_dev *vtpm_proxy_create_proxy_dev(void)
+{
+ struct proxy_dev *proxy_dev;
+ struct tpm_chip *chip;
+ int err;
+
+ proxy_dev = kzalloc(sizeof(*proxy_dev), GFP_KERNEL);
+ if (proxy_dev == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&proxy_dev->wq);
+ mutex_init(&proxy_dev->buf_lock);
+ INIT_WORK(&proxy_dev->work, vtpm_proxy_work);
+
+ chip = tpm_chip_alloc(NULL, &vtpm_proxy_tpm_ops);
+ if (IS_ERR(chip)) {
+ err = PTR_ERR(chip);
+ goto err_proxy_dev_free;
+ }
+ dev_set_drvdata(&chip->dev, proxy_dev);
+
+ proxy_dev->chip = chip;
+
+ return proxy_dev;
+
+err_proxy_dev_free:
+ kfree(proxy_dev);
+
+ return ERR_PTR(err);
+}
+
+/*
+ * Undo what has been done in vtpm_create_proxy_dev
+ */
+static inline void vtpm_proxy_delete_proxy_dev(struct proxy_dev *proxy_dev)
+{
+ put_device(&proxy_dev->chip->dev); /* frees chip */
+ kfree(proxy_dev);
+}
+
+/*
+ * Create a /dev/tpm%d and 'server side' file descriptor pair
+ *
+ * Return:
+ * Returns file pointer on success, an error value otherwise
+ */
+static struct file *vtpm_proxy_create_device(
+ struct vtpm_proxy_new_dev *vtpm_new_dev)
+{
+ struct proxy_dev *proxy_dev;
+ int rc, fd;
+ struct file *file;
+
+ if (vtpm_new_dev->flags & ~VTPM_PROXY_FLAGS_ALL)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ proxy_dev = vtpm_proxy_create_proxy_dev();
+ if (IS_ERR(proxy_dev))
+ return ERR_CAST(proxy_dev);
+
+ proxy_dev->flags = vtpm_new_dev->flags;
+
+ /* setup an anonymous file for the server-side */
+ fd = get_unused_fd_flags(O_RDWR);
+ if (fd < 0) {
+ rc = fd;
+ goto err_delete_proxy_dev;
+ }
+
+ file = anon_inode_getfile("[vtpms]", &vtpm_proxy_fops, proxy_dev,
+ O_RDWR);
+ if (IS_ERR(file)) {
+ rc = PTR_ERR(file);
+ goto err_put_unused_fd;
+ }
+
+ /* from now on we can unwind with put_unused_fd() + fput() */
+ /* simulate an open() on the server side */
+ vtpm_proxy_fops_open(file);
+
+ if (proxy_dev->flags & VTPM_PROXY_FLAG_TPM2)
+ proxy_dev->chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ vtpm_proxy_work_start(proxy_dev);
+
+ vtpm_new_dev->fd = fd;
+ vtpm_new_dev->major = MAJOR(proxy_dev->chip->dev.devt);
+ vtpm_new_dev->minor = MINOR(proxy_dev->chip->dev.devt);
+ vtpm_new_dev->tpm_num = proxy_dev->chip->dev_num;
+
+ return file;
+
+err_put_unused_fd:
+ put_unused_fd(fd);
+
+err_delete_proxy_dev:
+ vtpm_proxy_delete_proxy_dev(proxy_dev);
+
+ return ERR_PTR(rc);
+}
+
+/*
+ * Counter part to vtpm_create_device.
+ */
+static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
+{
+ vtpm_proxy_work_stop(proxy_dev);
+
+ /*
+ * A client may hold the 'ops' lock, so let it know that the server
+ * side shuts down before we try to grab the 'ops' lock when
+ * unregistering the chip.
+ */
+ vtpm_proxy_fops_undo_open(proxy_dev);
+
+ if (proxy_dev->state & STATE_REGISTERED_FLAG)
+ tpm_chip_unregister(proxy_dev->chip);
+
+ vtpm_proxy_delete_proxy_dev(proxy_dev);
+}
+
+/*
+ * Code related to the control device /dev/vtpmx
+ */
+
+/**
+ * vtpmx_ioc_new_dev - handler for the %VTPM_PROXY_IOC_NEW_DEV ioctl
+ * @file: /dev/vtpmx
+ * @ioctl: the ioctl number
+ * @arg: pointer to the struct vtpmx_proxy_new_dev
+ *
+ * Creates an anonymous file that is used by the process acting as a TPM to
+ * communicate with the client processes. The function will also add a new TPM
+ * device through which data is proxied to this TPM acting process. The caller
+ * will be provided with a file descriptor to communicate with the clients and
+ * major and minor numbers for the TPM device.
+ */
+static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
+ struct vtpm_proxy_new_dev vtpm_new_dev;
+ struct file *vtpm_file;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ vtpm_new_dev_p = argp;
+
+ if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
+ sizeof(vtpm_new_dev)))
+ return -EFAULT;
+
+ vtpm_file = vtpm_proxy_create_device(&vtpm_new_dev);
+ if (IS_ERR(vtpm_file))
+ return PTR_ERR(vtpm_file);
+
+ if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
+ sizeof(vtpm_new_dev))) {
+ put_unused_fd(vtpm_new_dev.fd);
+ fput(vtpm_file);
+ return -EFAULT;
+ }
+
+ fd_install(vtpm_new_dev.fd, vtpm_file);
+ return 0;
+}
+
+/*
+ * vtpmx_fops_ioctl: ioctl on /dev/vtpmx
+ *
+ * Return:
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ switch (ioctl) {
+ case VTPM_PROXY_IOC_NEW_DEV:
+ return vtpmx_ioc_new_dev(f, ioctl, arg);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static const struct file_operations vtpmx_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = vtpmx_fops_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice vtpmx_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vtpmx",
+ .fops = &vtpmx_fops,
+};
+
+static int __init vtpm_module_init(void)
+{
+ int rc;
+
+ workqueue = create_workqueue("tpm-vtpm");
+ if (!workqueue) {
+ pr_err("couldn't create workqueue\n");
+ return -ENOMEM;
+ }
+
+ rc = misc_register(&vtpmx_miscdev);
+ if (rc) {
+ pr_err("couldn't create vtpmx device\n");
+ destroy_workqueue(workqueue);
+ }
+
+ return rc;
+}
+
+static void __exit vtpm_module_exit(void)
+{
+ destroy_workqueue(workqueue);
+ misc_deregister(&vtpmx_miscdev);
+}
+
+module_init(vtpm_module_init);
+module_exit(vtpm_module_exit);
+
+MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
+MODULE_DESCRIPTION("vTPM Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
new file mode 100644
index 0000000000..eef0fb06ea
--- /dev/null
+++ b/drivers/char/tpm/tpmrm-dev.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 James.Bottomley@HansenPartnership.com
+ */
+#include <linux/slab.h>
+#include "tpm-dev.h"
+
+struct tpmrm_priv {
+ struct file_priv priv;
+ struct tpm_space space;
+};
+
+static int tpmrm_open(struct inode *inode, struct file *file)
+{
+ struct tpm_chip *chip;
+ struct tpmrm_priv *priv;
+ int rc;
+
+ chip = container_of(inode->i_cdev, struct tpm_chip, cdevs);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE);
+ if (rc) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+
+ tpm_common_open(file, chip, &priv->priv, &priv->space);
+
+ return 0;
+}
+
+static int tpmrm_release(struct inode *inode, struct file *file)
+{
+ struct file_priv *fpriv = file->private_data;
+ struct tpmrm_priv *priv = container_of(fpriv, struct tpmrm_priv, priv);
+
+ tpm_common_release(file, fpriv);
+ tpm2_del_space(fpriv->chip, &priv->space);
+ kfree(priv);
+
+ return 0;
+}
+
+const struct file_operations tpmrm_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpmrm_open,
+ .read = tpm_common_read,
+ .write = tpm_common_write,
+ .poll = tpm_common_poll,
+ .release = tpmrm_release,
+};
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
new file mode 100644
index 0000000000..80cca3b83b
--- /dev/null
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of the Xen vTPM device frontend
+ *
+ * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
+ */
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/freezer.h>
+#include <xen/xen.h>
+#include <xen/events.h>
+#include <xen/interface/io/tpmif.h>
+#include <xen/grant_table.h>
+#include <xen/xenbus.h>
+#include <xen/page.h>
+#include "tpm.h"
+#include <xen/platform_pci.h>
+
+struct tpm_private {
+ struct tpm_chip *chip;
+ struct xenbus_device *dev;
+
+ struct vtpm_shared_page *shr;
+
+ unsigned int evtchn;
+ int ring_ref;
+ domid_t backend_id;
+ int irq;
+ wait_queue_head_t read_queue;
+};
+
+enum status_bits {
+ VTPM_STATUS_RUNNING = 0x1,
+ VTPM_STATUS_IDLE = 0x2,
+ VTPM_STATUS_RESULT = 0x4,
+ VTPM_STATUS_CANCELED = 0x8,
+};
+
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ unsigned long timeout, wait_queue_head_t *queue,
+ bool check_cancel)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+ bool canceled = false;
+
+ /* check current status */
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ tpm_msleep(TPM_TIMEOUT);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+
+static u8 vtpm_status(struct tpm_chip *chip)
+{
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+ switch (priv->shr->state) {
+ case VTPM_STATE_IDLE:
+ return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
+ case VTPM_STATE_FINISH:
+ return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
+ case VTPM_STATE_SUBMIT:
+ case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
+ return VTPM_STATUS_RUNNING;
+ default:
+ return 0;
+ }
+}
+
+static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return status & VTPM_STATUS_CANCELED;
+}
+
+static void vtpm_cancel(struct tpm_chip *chip)
+{
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+ priv->shr->state = VTPM_STATE_CANCEL;
+ wmb();
+ notify_remote_via_evtchn(priv->evtchn);
+}
+
+static size_t shr_data_offset(struct vtpm_shared_page *shr)
+{
+ return struct_size(shr, extra_pages, shr->nr_extra_pages);
+}
+
+static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+ struct vtpm_shared_page *shr = priv->shr;
+ size_t offset = shr_data_offset(shr);
+
+ u32 ordinal;
+ unsigned long duration;
+
+ if (offset > PAGE_SIZE)
+ return -EINVAL;
+
+ if (offset + count > PAGE_SIZE)
+ return -EINVAL;
+
+ /* Wait for completion of any existing command or cancellation */
+ if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c,
+ &priv->read_queue, true) < 0) {
+ vtpm_cancel(chip);
+ return -ETIME;
+ }
+
+ memcpy(offset + (u8 *)shr, buf, count);
+ shr->length = count;
+ barrier();
+ shr->state = VTPM_STATE_SUBMIT;
+ wmb();
+ notify_remote_via_evtchn(priv->evtchn);
+
+ ordinal = be32_to_cpu(((struct tpm_header *)buf)->ordinal);
+ duration = tpm_calc_ordinal_duration(chip, ordinal);
+
+ if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
+ &priv->read_queue, true) < 0) {
+ /* got a signal or timeout, try to cancel */
+ vtpm_cancel(chip);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+ struct vtpm_shared_page *shr = priv->shr;
+ size_t offset = shr_data_offset(shr);
+ size_t length = shr->length;
+
+ if (shr->state == VTPM_STATE_IDLE)
+ return -ECANCELED;
+
+ /* In theory the wait at the end of _send makes this one unnecessary */
+ if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->timeout_c,
+ &priv->read_queue, true) < 0) {
+ vtpm_cancel(chip);
+ return -ETIME;
+ }
+
+ if (offset > PAGE_SIZE)
+ return -EIO;
+
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ if (length > count)
+ length = count;
+
+ memcpy(buf, offset + (u8 *)shr, length);
+
+ return length;
+}
+
+static const struct tpm_class_ops tpm_vtpm = {
+ .status = vtpm_status,
+ .recv = vtpm_recv,
+ .send = vtpm_send,
+ .cancel = vtpm_cancel,
+ .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
+ .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
+ .req_canceled = vtpm_req_canceled,
+};
+
+static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
+{
+ struct tpm_private *priv = dev_id;
+
+ switch (priv->shr->state) {
+ case VTPM_STATE_IDLE:
+ case VTPM_STATE_FINISH:
+ wake_up_interruptible(&priv->read_queue);
+ break;
+ case VTPM_STATE_SUBMIT:
+ case VTPM_STATE_CANCEL:
+ default:
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+static int setup_chip(struct device *dev, struct tpm_private *priv)
+{
+ struct tpm_chip *chip;
+
+ chip = tpmm_chip_alloc(dev, &tpm_vtpm);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ init_waitqueue_head(&priv->read_queue);
+
+ priv->chip = chip;
+ dev_set_drvdata(&chip->dev, priv);
+
+ return 0;
+}
+
+/* caller must clean up in case of errors */
+static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
+{
+ struct xenbus_transaction xbt;
+ const char *message = NULL;
+ int rv;
+
+ rv = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&priv->shr, 1,
+ &priv->ring_ref);
+ if (rv < 0)
+ return rv;
+
+ rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
+ if (rv)
+ return rv;
+
+ rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
+ "tpmif", priv);
+ if (rv <= 0) {
+ xenbus_dev_fatal(dev, rv, "allocating TPM irq");
+ return rv;
+ }
+ priv->irq = rv;
+
+ again:
+ rv = xenbus_transaction_start(&xbt);
+ if (rv) {
+ xenbus_dev_fatal(dev, rv, "starting transaction");
+ return rv;
+ }
+
+ rv = xenbus_printf(xbt, dev->nodename,
+ "ring-ref", "%u", priv->ring_ref);
+ if (rv) {
+ message = "writing ring-ref";
+ goto abort_transaction;
+ }
+
+ rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+ priv->evtchn);
+ if (rv) {
+ message = "writing event-channel";
+ goto abort_transaction;
+ }
+
+ rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
+ if (rv) {
+ message = "writing feature-protocol-v2";
+ goto abort_transaction;
+ }
+
+ rv = xenbus_transaction_end(xbt, 0);
+ if (rv == -EAGAIN)
+ goto again;
+ if (rv) {
+ xenbus_dev_fatal(dev, rv, "completing transaction");
+ return rv;
+ }
+
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+
+ abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ if (message)
+ xenbus_dev_error(dev, rv, "%s", message);
+
+ return rv;
+}
+
+static void ring_free(struct tpm_private *priv)
+{
+ if (!priv)
+ return;
+
+ xenbus_teardown_ring((void **)&priv->shr, 1, &priv->ring_ref);
+
+ if (priv->irq)
+ unbind_from_irqhandler(priv->irq, priv);
+
+ kfree(priv);
+}
+
+static int tpmfront_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ struct tpm_private *priv;
+ int rv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
+ return -ENOMEM;
+ }
+
+ rv = setup_chip(&dev->dev, priv);
+ if (rv) {
+ kfree(priv);
+ return rv;
+ }
+
+ rv = setup_ring(dev, priv);
+ if (rv) {
+ ring_free(priv);
+ return rv;
+ }
+
+ tpm_get_timeouts(priv->chip);
+
+ return tpm_chip_register(priv->chip);
+}
+
+static void tpmfront_remove(struct xenbus_device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
+ tpm_chip_unregister(chip);
+ ring_free(priv);
+ dev_set_drvdata(&chip->dev, NULL);
+}
+
+static int tpmfront_resume(struct xenbus_device *dev)
+{
+ /* A suspend/resume/migrate will interrupt a vTPM anyway */
+ tpmfront_remove(dev);
+ return tpmfront_probe(dev, NULL);
+}
+
+static void backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ switch (backend_state) {
+ case XenbusStateInitialised:
+ case XenbusStateConnected:
+ if (dev->state == XenbusStateConnected)
+ break;
+
+ if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2",
+ 0)) {
+ xenbus_dev_fatal(dev, -EINVAL,
+ "vTPM protocol 2 required");
+ return;
+ }
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosing:
+ case XenbusStateClosed:
+ device_unregister(&dev->dev);
+ xenbus_frontend_closed(dev);
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct xenbus_device_id tpmfront_ids[] = {
+ { "vtpm" },
+ { "" }
+};
+MODULE_ALIAS("xen:vtpm");
+
+static struct xenbus_driver tpmfront_driver = {
+ .ids = tpmfront_ids,
+ .probe = tpmfront_probe,
+ .remove = tpmfront_remove,
+ .resume = tpmfront_resume,
+ .otherend_changed = backend_changed,
+};
+
+static int __init xen_tpmfront_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
+ return xenbus_register_frontend(&tpmfront_driver);
+}
+module_init(xen_tpmfront_init);
+
+static void __exit xen_tpmfront_exit(void)
+{
+ xenbus_unregister_driver(&tpmfront_driver);
+}
+module_exit(xen_tpmfront_exit);
+
+MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
+MODULE_DESCRIPTION("Xen vTPM Driver");
+MODULE_LICENSE("GPL");