diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/nvmem | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
45 files changed, 11655 insertions, 0 deletions
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig new file mode 100644 index 0000000000..5bc9c4874f --- /dev/null +++ b/drivers/nvmem/Kconfig @@ -0,0 +1,420 @@ +# SPDX-License-Identifier: GPL-2.0-only +menuconfig NVMEM + bool "NVMEM Support" + help + Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES... + + This framework is designed to provide a generic interface to NVMEM + from both the Linux Kernel and the userspace. + + If unsure, say no. + +if NVMEM + +config NVMEM_SYSFS + bool "/sys/bus/nvmem/devices/*/nvmem (sysfs interface)" + depends on SYSFS + default y + help + Say Y here to add a sysfs interface for NVMEM. + + This interface is mostly used by userspace applications to + read/write directly into nvmem. + +# Layouts + +source "drivers/nvmem/layouts/Kconfig" + +# Devices + +config NVMEM_APPLE_EFUSES + tristate "Apple eFuse support" + depends on ARCH_APPLE || COMPILE_TEST + default ARCH_APPLE + help + Say y here to enable support for reading eFuses on Apple SoCs + such as the M1. These are e.g. used to store factory programmed + calibration data required for the PCIe or the USB-C PHY. + + This driver can also be built as a module. If so, the module will + be called nvmem-apple-efuses. + +config NVMEM_BCM_OCOTP + tristate "Broadcom On-Chip OTP Controller support" + depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on HAS_IOMEM + default ARCH_BCM_IPROC + help + Say y here to enable read/write access to the Broadcom OTP + controller. + + This driver can also be built as a module. If so, the module + will be called nvmem-bcm-ocotp. + +config NVMEM_BRCM_NVRAM + tristate "Broadcom's NVRAM support" + depends on ARCH_BCM_5301X || COMPILE_TEST + depends on HAS_IOMEM + select GENERIC_NET_UTILS + help + This driver provides support for Broadcom's NVRAM that can be accessed + using I/O mapping. + +config NVMEM_IMX_IIM + tristate "i.MX IC Identification Module support" + depends on ARCH_MXC || COMPILE_TEST + help + This is a driver for the IC Identification Module (IIM) available on + i.MX SoCs, providing access to 4 Kbits of programmable + eFuses. + + This driver can also be built as a module. If so, the module + will be called nvmem-imx-iim. + +config NVMEM_IMX_OCOTP + tristate "i.MX 6/7/8 On-Chip OTP Controller support" + depends on ARCH_MXC || COMPILE_TEST + depends on HAS_IOMEM + help + This is a driver for the On-Chip OTP Controller (OCOTP) available on + i.MX6 SoCs, providing access to 4 Kbits of one-time programmable + eFuses. + + This driver can also be built as a module. If so, the module + will be called nvmem-imx-ocotp. + +config NVMEM_IMX_OCOTP_ELE + tristate "i.MX On-Chip OTP Controller support" + depends on ARCH_MXC || COMPILE_TEST + depends on HAS_IOMEM + depends on OF + help + This is a driver for the On-Chip OTP Controller (OCOTP) + available on i.MX SoCs which has ELE. + +config NVMEM_IMX_OCOTP_SCU + tristate "i.MX8 SCU On-Chip OTP Controller support" + depends on IMX_SCU + depends on HAVE_ARM_SMCCC + help + This is a driver for the SCU On-Chip OTP Controller (OCOTP) + available on i.MX8 SoCs. + +config NVMEM_JZ4780_EFUSE + tristate "JZ4780 EFUSE Memory Support" + depends on MACH_INGENIC || COMPILE_TEST + depends on HAS_IOMEM + depends on OF + select REGMAP_MMIO + help + Say Y here to include support for JZ4780 efuse memory found on + all JZ4780 SoC based devices. + To compile this driver as a module, choose M here: the module + will be called nvmem_jz4780_efuse. + +config NVMEM_LAN9662_OTPC + tristate "Microchip LAN9662 OTP controller support" + depends on SOC_LAN966 || COMPILE_TEST + depends on HAS_IOMEM + help + This driver enables the OTP controller available on Microchip LAN9662 + SoCs. It controls the access to the OTP memory connected to it. + +config NVMEM_LAYERSCAPE_SFP + tristate "Layerscape SFP (Security Fuse Processor) support" + depends on ARCH_LAYERSCAPE || COMPILE_TEST + depends on HAS_IOMEM + select REGMAP_MMIO + help + This driver provides support to read the eFuses on Freescale + Layerscape SoC's. For example, the vendor provides a per part + unique ID there. + + This driver can also be built as a module. If so, the module + will be called layerscape-sfp. + +config NVMEM_LPC18XX_EEPROM + tristate "NXP LPC18XX EEPROM Memory Support" + depends on ARCH_LPC18XX || COMPILE_TEST + depends on HAS_IOMEM + help + Say Y here to include support for NXP LPC18xx EEPROM memory found in + NXP LPC185x/3x and LPC435x/3x/2x/1x devices. + To compile this driver as a module, choose M here: the module + will be called nvmem_lpc18xx_eeprom. + +config NVMEM_LPC18XX_OTP + tristate "NXP LPC18XX OTP Memory Support" + depends on ARCH_LPC18XX || COMPILE_TEST + depends on HAS_IOMEM + help + Say Y here to include support for NXP LPC18xx OTP memory found on + all LPC18xx and LPC43xx devices. + To compile this driver as a module, choose M here: the module + will be called nvmem_lpc18xx_otp. + +config NVMEM_MESON_EFUSE + tristate "Amlogic Meson GX eFuse Support" + depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM + help + This is a driver to retrieve specific values from the eFuse found on + the Amlogic Meson GX SoCs. + + This driver can also be built as a module. If so, the module + will be called nvmem_meson_efuse. + +config NVMEM_MESON_MX_EFUSE + tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support" + depends on ARCH_MESON || COMPILE_TEST + help + This is a driver to retrieve specific values from the eFuse found on + the Amlogic Meson6, Meson8 and Meson8b SoCs. + + This driver can also be built as a module. If so, the module + will be called nvmem_meson_mx_efuse. + +config NVMEM_MICROCHIP_OTPC + tristate "Microchip OTPC support" + depends on ARCH_AT91 || COMPILE_TEST + help + This driver enable the OTP controller available on Microchip SAMA7G5 + SoCs. It controls the access to the OTP memory connected to it. + +config NVMEM_MTK_EFUSE + tristate "Mediatek SoCs EFUSE support" + depends on ARCH_MEDIATEK || COMPILE_TEST + depends on HAS_IOMEM + help + This is a driver to access hardware related data like sensor + calibration, HDMI impedance etc. + + This driver can also be built as a module. If so, the module + will be called efuse-mtk. + +config NVMEM_MXS_OCOTP + tristate "Freescale MXS On-Chip OTP Memory Support" + depends on ARCH_MXS || COMPILE_TEST + depends on HAS_IOMEM + help + If you say Y here, you will get readonly access to the + One Time Programmable memory pages that are stored + on the Freescale i.MX23/i.MX28 processor. + + This driver can also be built as a module. If so, the module + will be called nvmem-mxs-ocotp. + +config NVMEM_NINTENDO_OTP + tristate "Nintendo Wii and Wii U OTP Support" + depends on WII || COMPILE_TEST + help + This is a driver exposing the OTP of a Nintendo Wii or Wii U console. + + This memory contains common and per-console keys, signatures and + related data required to access peripherals. + + This driver can also be built as a module. If so, the module + will be called nvmem-nintendo-otp. + +config NVMEM_QCOM_QFPROM + tristate "QCOM QFPROM Support" + depends on ARCH_QCOM || COMPILE_TEST + depends on HAS_IOMEM + help + Say y here to enable QFPROM support. The QFPROM provides access + functions for QFPROM data to rest of the drivers via nvmem interface. + + This driver can also be built as a module. If so, the module + will be called nvmem_qfprom. + +config NVMEM_QCOM_SEC_QFPROM + tristate "QCOM SECURE QFPROM Support" + depends on ARCH_QCOM || COMPILE_TEST + depends on HAS_IOMEM + depends on OF + select QCOM_SCM + help + Say y here to enable secure QFPROM support. The secure QFPROM provides access + functions for QFPROM data to rest of the drivers via nvmem interface. + + This driver can also be built as a module. If so, the module will be called + nvmem_sec_qfprom. + +config NVMEM_RAVE_SP_EEPROM + tristate "Rave SP EEPROM Support" + depends on RAVE_SP_CORE + help + Say y here to enable Rave SP EEPROM support. + +config NVMEM_RMEM + tristate "Reserved Memory Based Driver Support" + depends on HAS_IOMEM + help + This driver maps reserved memory into an nvmem device. It might be + useful to expose information left by firmware in memory. + + This driver can also be built as a module. If so, the module + will be called nvmem-rmem. + +config NVMEM_ROCKCHIP_EFUSE + tristate "Rockchip eFuse Support" + depends on ARCH_ROCKCHIP || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple driver to dump specified values of Rockchip SoC + from eFuse, such as cpu-leakage. + + This driver can also be built as a module. If so, the module + will be called nvmem_rockchip_efuse. + +config NVMEM_ROCKCHIP_OTP + tristate "Rockchip OTP controller support" + depends on ARCH_ROCKCHIP || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple driver to dump specified values of Rockchip SoC + from OTP, such as cpu-leakage. + + This driver can also be built as a module. If so, the module + will be called nvmem_rockchip_otp. + +config NVMEM_SC27XX_EFUSE + tristate "Spreadtrum SC27XX eFuse Support" + depends on MFD_SC27XX_PMIC || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple driver to dump specified values of Spreadtrum + SC27XX PMICs from eFuse. + + This driver can also be built as a module. If so, the module + will be called nvmem-sc27xx-efuse. + +config NVMEM_SNVS_LPGPR + tristate "Support for Low Power General Purpose Register" + depends on ARCH_MXC || COMPILE_TEST + help + This is a driver for Low Power General Purpose Register (LPGPR) available on + i.MX6 and i.MX7 SoCs in Secure Non-Volatile Storage (SNVS) of this chip. + + This driver can also be built as a module. If so, the module + will be called nvmem-snvs-lpgpr. + +config NVMEM_SPMI_SDAM + tristate "SPMI SDAM Support" + depends on SPMI + help + This driver supports the Shared Direct Access Memory Module on + Qualcomm Technologies, Inc. PMICs. It provides the clients + an interface to read/write to the SDAM module's shared memory. + +config NVMEM_SPRD_EFUSE + tristate "Spreadtrum SoC eFuse Support" + depends on ARCH_SPRD || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple driver to dump specified values of Spreadtrum + SoCs from eFuse. + + This driver can also be built as a module. If so, the module + will be called nvmem-sprd-efuse. + +config NVMEM_STM32_BSEC_OPTEE_TA + def_bool NVMEM_STM32_ROMEM && OPTEE + help + Say y here to enable the accesses to STM32MP SoC OTPs by the OP-TEE + trusted application STM32MP BSEC. + + This library is a used by stm32-romem driver or included in the module + called nvmem-stm32-romem. + +config NVMEM_STM32_ROMEM + tristate "STMicroelectronics STM32 factory-programmed memory support" + depends on ARCH_STM32 || COMPILE_TEST + depends on OPTEE || !OPTEE + help + Say y here to enable read-only access for STMicroelectronics STM32 + factory-programmed memory area. + + This driver can also be built as a module. If so, the module + will be called nvmem-stm32-romem. + +config NVMEM_SUNPLUS_OCOTP + tristate "Sunplus SoC OTP support" + depends on SOC_SP7021 || COMPILE_TEST + depends on HAS_IOMEM + help + This is a driver for the On-chip OTP controller (OCOTP) available + on Sunplus SoCs. It provides access to 128 bytes of one-time + programmable eFuse. + + This driver can also be built as a module. If so, the module + will be called nvmem-sunplus-ocotp. + +config NVMEM_SUNXI_SID + tristate "Allwinner SoCs SID support" + depends on ARCH_SUNXI + help + This is a driver for the 'security ID' available on various Allwinner + devices. + + This driver can also be built as a module. If so, the module + will be called nvmem_sunxi_sid. + +config NVMEM_U_BOOT_ENV + tristate "U-Boot environment variables support" + depends on OF && MTD + select CRC32 + select GENERIC_NET_UTILS + help + U-Boot stores its setup as environment variables. This driver adds + support for verifying & exporting such data. It also exposes variables + as NVMEM cells so they can be referenced by other drivers. + + Currently this drivers works only with env variables on top of MTD. + + If compiled as module it will be called nvmem_u-boot-env. + +config NVMEM_UNIPHIER_EFUSE + tristate "UniPhier SoCs eFuse support" + depends on ARCH_UNIPHIER || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple driver to dump specified values of UniPhier SoC + from eFuse. + + This driver can also be built as a module. If so, the module + will be called nvmem-uniphier-efuse. + +config NVMEM_VF610_OCOTP + tristate "VF610 SoC OCOTP support" + depends on SOC_VF610 || COMPILE_TEST + depends on HAS_IOMEM + help + This is a driver for the 'OCOTP' peripheral available on Vybrid + devices like VF5xx and VF6xx. + + This driver can also be build as a module. If so, the module will + be called nvmem-vf610-ocotp. + +config NVMEM_ZYNQMP + tristate "Xilinx ZYNQMP SoC nvmem firmware support" + depends on ARCH_ZYNQMP + help + This is a driver to access hardware related data like + soc revision, IDCODE... etc by using the firmware + interface. + + If sure, say yes. If unsure, say no. + +config NVMEM_QORIQ_EFUSE + tristate "NXP QorIQ eFuse support" + depends on PPC_85xx || COMPILE_TEST + depends on HAS_IOMEM + help + This driver provides read support for the eFuses (SFP) on NXP QorIQ + series SoC's. This includes secure boot settings, the globally unique + NXP ID 'FUIDR' and the OEM unique ID 'OUIDR'. + + This driver can also be built as a module. If so, the module + will be called nvmem_qoriq_efuse. + +endif diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile new file mode 100644 index 0000000000..423baf0895 --- /dev/null +++ b/drivers/nvmem/Makefile @@ -0,0 +1,83 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for nvmem drivers. +# + +obj-$(CONFIG_NVMEM) += nvmem_core.o +nvmem_core-y := core.o +obj-y += layouts/ + +# Devices +obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o +nvmem-apple-efuses-y := apple-efuses.o +obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o +nvmem-bcm-ocotp-y := bcm-ocotp.o +obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o +nvmem_brcm_nvram-y := brcm_nvram.o +obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o +nvmem-imx-iim-y := imx-iim.o +obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o +nvmem-imx-ocotp-y := imx-ocotp.o +obj-$(CONFIG_NVMEM_IMX_OCOTP_ELE) += nvmem-imx-ocotp-ele.o +nvmem-imx-ocotp-ele-y := imx-ocotp-ele.o +obj-$(CONFIG_NVMEM_IMX_OCOTP_SCU) += nvmem-imx-ocotp-scu.o +nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o +obj-$(CONFIG_NVMEM_JZ4780_EFUSE) += nvmem_jz4780_efuse.o +nvmem_jz4780_efuse-y := jz4780-efuse.o +obj-$(CONFIG_NVMEM_LAN9662_OTPC) += nvmem-lan9662-otpc.o +nvmem-lan9662-otpc-y := lan9662-otpc.o +obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o +nvmem-layerscape-sfp-y := layerscape-sfp.o +obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o +nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o +obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o +nvmem_lpc18xx_otp-y := lpc18xx_otp.o +obj-$(CONFIG_NVMEM_MESON_EFUSE) += nvmem_meson_efuse.o +nvmem_meson_efuse-y := meson-efuse.o +obj-$(CONFIG_NVMEM_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o +nvmem_meson_mx_efuse-y := meson-mx-efuse.o +obj-$(CONFIG_NVMEM_MICROCHIP_OTPC) += nvmem-microchip-otpc.o +nvmem-microchip-otpc-y := microchip-otpc.o +obj-$(CONFIG_NVMEM_MTK_EFUSE) += nvmem_mtk-efuse.o +nvmem_mtk-efuse-y := mtk-efuse.o +obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o +nvmem-mxs-ocotp-y := mxs-ocotp.o +obj-$(CONFIG_NVMEM_NINTENDO_OTP) += nvmem-nintendo-otp.o +nvmem-nintendo-otp-y := nintendo-otp.o +obj-$(CONFIG_NVMEM_QCOM_QFPROM) += nvmem_qfprom.o +nvmem_qfprom-y := qfprom.o +obj-$(CONFIG_NVMEM_QCOM_SEC_QFPROM) += nvmem_sec_qfprom.o +nvmem_sec_qfprom-y := sec-qfprom.o +obj-$(CONFIG_NVMEM_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o +nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o +obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o +nvmem-rmem-y := rmem.o +obj-$(CONFIG_NVMEM_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o +nvmem_rockchip_efuse-y := rockchip-efuse.o +obj-$(CONFIG_NVMEM_ROCKCHIP_OTP) += nvmem-rockchip-otp.o +nvmem-rockchip-otp-y := rockchip-otp.o +obj-$(CONFIG_NVMEM_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o +nvmem-sc27xx-efuse-y := sc27xx-efuse.o +obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o +nvmem_snvs_lpgpr-y := snvs_lpgpr.o +obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o +nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o +obj-$(CONFIG_NVMEM_SPRD_EFUSE) += nvmem_sprd_efuse.o +nvmem_sprd_efuse-y := sprd-efuse.o +obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o +nvmem_stm32_romem-y := stm32-romem.o +nvmem_stm32_romem-$(CONFIG_NVMEM_STM32_BSEC_OPTEE_TA) += stm32-bsec-optee-ta.o +obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o +nvmem_sunplus_ocotp-y := sunplus-ocotp.o +obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o +nvmem_sunxi_sid-y := sunxi_sid.o +obj-$(CONFIG_NVMEM_U_BOOT_ENV) += nvmem_u-boot-env.o +nvmem_u-boot-env-y := u-boot-env.o +obj-$(CONFIG_NVMEM_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o +nvmem-uniphier-efuse-y := uniphier-efuse.o +obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o +nvmem-vf610-ocotp-y := vf610-ocotp.o +obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o +nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o +obj-$(CONFIG_NVMEM_QORIQ_EFUSE) += nvmem-qoriq-efuse.o +nvmem-qoriq-efuse-y := qoriq-efuse.o diff --git a/drivers/nvmem/apple-efuses.c b/drivers/nvmem/apple-efuses.c new file mode 100644 index 0000000000..9b7c871021 --- /dev/null +++ b/drivers/nvmem/apple-efuses.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Apple SoC eFuse driver + * + * Copyright (C) The Asahi Linux Contributors + */ + +#include <linux/io.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> + +struct apple_efuses_priv { + void __iomem *fuses; +}; + +static int apple_efuses_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct apple_efuses_priv *priv = context; + u32 *dst = val; + + while (bytes >= sizeof(u32)) { + *dst++ = readl_relaxed(priv->fuses + offset); + bytes -= sizeof(u32); + offset += sizeof(u32); + } + + return 0; +} + +static int apple_efuses_probe(struct platform_device *pdev) +{ + struct apple_efuses_priv *priv; + struct resource *res; + struct nvmem_config config = { + .dev = &pdev->dev, + .read_only = true, + .reg_read = apple_efuses_read, + .stride = sizeof(u32), + .word_size = sizeof(u32), + .name = "apple_efuses_nvmem", + .id = NVMEM_DEVID_AUTO, + .root_only = true, + }; + + priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->fuses = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->fuses)) + return PTR_ERR(priv->fuses); + + config.priv = priv; + config.size = resource_size(res); + + return PTR_ERR_OR_ZERO(devm_nvmem_register(config.dev, &config)); +} + +static const struct of_device_id apple_efuses_of_match[] = { + { .compatible = "apple,efuses", }, + {} +}; + +MODULE_DEVICE_TABLE(of, apple_efuses_of_match); + +static struct platform_driver apple_efuses_driver = { + .driver = { + .name = "apple_efuses", + .of_match_table = apple_efuses_of_match, + }, + .probe = apple_efuses_probe, +}; + +module_platform_driver(apple_efuses_driver); + +MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c new file mode 100644 index 0000000000..2490f44caa --- /dev/null +++ b/drivers/nvmem/bcm-ocotp.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2016 Broadcom + +#include <linux/acpi.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +/* + * # of tries for OTP Status. The time to execute a command varies. The slowest + * commands are writes which also vary based on the # of bits turned on. Writing + * 0xffffffff takes ~3800 us. + */ +#define OTPC_RETRIES 5000 + +/* Sequence to enable OTP program */ +#define OTPC_PROG_EN_SEQ { 0xf, 0x4, 0x8, 0xd } + +/* OTPC Commands */ +#define OTPC_CMD_READ 0x0 +#define OTPC_CMD_OTP_PROG_ENABLE 0x2 +#define OTPC_CMD_OTP_PROG_DISABLE 0x3 +#define OTPC_CMD_PROGRAM 0x8 + +/* OTPC Status Bits */ +#define OTPC_STAT_CMD_DONE BIT(1) +#define OTPC_STAT_PROG_OK BIT(2) + +/* OTPC register definition */ +#define OTPC_MODE_REG_OFFSET 0x0 +#define OTPC_MODE_REG_OTPC_MODE 0 +#define OTPC_COMMAND_OFFSET 0x4 +#define OTPC_COMMAND_COMMAND_WIDTH 6 +#define OTPC_CMD_START_OFFSET 0x8 +#define OTPC_CMD_START_START 0 +#define OTPC_CPU_STATUS_OFFSET 0xc +#define OTPC_CPUADDR_REG_OFFSET 0x28 +#define OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH 16 +#define OTPC_CPU_WRITE_REG_OFFSET 0x2c + +#define OTPC_CMD_MASK (BIT(OTPC_COMMAND_COMMAND_WIDTH) - 1) +#define OTPC_ADDR_MASK (BIT(OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH) - 1) + + +struct otpc_map { + /* in words. */ + u32 otpc_row_size; + /* 128 bit row / 4 words support. */ + u16 data_r_offset[4]; + /* 128 bit row / 4 words support. */ + u16 data_w_offset[4]; +}; + +static struct otpc_map otp_map = { + .otpc_row_size = 1, + .data_r_offset = {0x10}, + .data_w_offset = {0x2c}, +}; + +static struct otpc_map otp_map_v2 = { + .otpc_row_size = 2, + .data_r_offset = {0x10, 0x5c}, + .data_w_offset = {0x2c, 0x64}, +}; + +struct otpc_priv { + struct device *dev; + void __iomem *base; + const struct otpc_map *map; + struct nvmem_config *config; +}; + +static inline void set_command(void __iomem *base, u32 command) +{ + writel(command & OTPC_CMD_MASK, base + OTPC_COMMAND_OFFSET); +} + +static inline void set_cpu_address(void __iomem *base, u32 addr) +{ + writel(addr & OTPC_ADDR_MASK, base + OTPC_CPUADDR_REG_OFFSET); +} + +static inline void set_start_bit(void __iomem *base) +{ + writel(1 << OTPC_CMD_START_START, base + OTPC_CMD_START_OFFSET); +} + +static inline void reset_start_bit(void __iomem *base) +{ + writel(0, base + OTPC_CMD_START_OFFSET); +} + +static inline void write_cpu_data(void __iomem *base, u32 value) +{ + writel(value, base + OTPC_CPU_WRITE_REG_OFFSET); +} + +static int poll_cpu_status(void __iomem *base, u32 value) +{ + u32 status; + u32 retries; + + for (retries = 0; retries < OTPC_RETRIES; retries++) { + status = readl(base + OTPC_CPU_STATUS_OFFSET); + if (status & value) + break; + udelay(1); + } + if (retries == OTPC_RETRIES) + return -EAGAIN; + + return 0; +} + +static int enable_ocotp_program(void __iomem *base) +{ + static const u32 vals[] = OTPC_PROG_EN_SEQ; + int i; + int ret; + + /* Write the magic sequence to enable programming */ + set_command(base, OTPC_CMD_OTP_PROG_ENABLE); + for (i = 0; i < ARRAY_SIZE(vals); i++) { + write_cpu_data(base, vals[i]); + set_start_bit(base); + ret = poll_cpu_status(base, OTPC_STAT_CMD_DONE); + reset_start_bit(base); + if (ret) + return ret; + } + + return poll_cpu_status(base, OTPC_STAT_PROG_OK); +} + +static int disable_ocotp_program(void __iomem *base) +{ + int ret; + + set_command(base, OTPC_CMD_OTP_PROG_DISABLE); + set_start_bit(base); + ret = poll_cpu_status(base, OTPC_STAT_PROG_OK); + reset_start_bit(base); + + return ret; +} + +static int bcm_otpc_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct otpc_priv *priv = context; + u32 *buf = val; + u32 bytes_read; + u32 address = offset / priv->config->word_size; + int i, ret; + + for (bytes_read = 0; bytes_read < bytes;) { + set_command(priv->base, OTPC_CMD_READ); + set_cpu_address(priv->base, address++); + set_start_bit(priv->base); + ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE); + if (ret) { + dev_err(priv->dev, "otp read error: 0x%x", ret); + return -EIO; + } + + for (i = 0; i < priv->map->otpc_row_size; i++) { + *buf++ = readl(priv->base + + priv->map->data_r_offset[i]); + bytes_read += sizeof(*buf); + } + + reset_start_bit(priv->base); + } + + return 0; +} + +static int bcm_otpc_write(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct otpc_priv *priv = context; + u32 *buf = val; + u32 bytes_written; + u32 address = offset / priv->config->word_size; + int i, ret; + + if (offset % priv->config->word_size) + return -EINVAL; + + ret = enable_ocotp_program(priv->base); + if (ret) + return -EIO; + + for (bytes_written = 0; bytes_written < bytes;) { + set_command(priv->base, OTPC_CMD_PROGRAM); + set_cpu_address(priv->base, address++); + for (i = 0; i < priv->map->otpc_row_size; i++) { + writel(*buf, priv->base + priv->map->data_w_offset[i]); + buf++; + bytes_written += sizeof(*buf); + } + set_start_bit(priv->base); + ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE); + reset_start_bit(priv->base); + if (ret) { + dev_err(priv->dev, "otp write error: 0x%x", ret); + return -EIO; + } + } + + disable_ocotp_program(priv->base); + + return 0; +} + +static struct nvmem_config bcm_otpc_nvmem_config = { + .name = "bcm-ocotp", + .read_only = false, + .word_size = 4, + .stride = 4, + .reg_read = bcm_otpc_read, + .reg_write = bcm_otpc_write, +}; + +static const struct of_device_id bcm_otpc_dt_ids[] = { + { .compatible = "brcm,ocotp", .data = &otp_map }, + { .compatible = "brcm,ocotp-v2", .data = &otp_map_v2 }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids); + +static const struct acpi_device_id bcm_otpc_acpi_ids[] __maybe_unused = { + { .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map }, + { .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, bcm_otpc_acpi_ids); + +static int bcm_otpc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct otpc_priv *priv; + struct nvmem_device *nvmem; + int err; + u32 num_words; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->map = device_get_match_data(dev); + if (!priv->map) + return -ENODEV; + + /* Get OTP base address register. */ + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + dev_err(dev, "unable to map I/O memory\n"); + return PTR_ERR(priv->base); + } + + /* Enable CPU access to OTPC. */ + writel(readl(priv->base + OTPC_MODE_REG_OFFSET) | + BIT(OTPC_MODE_REG_OTPC_MODE), + priv->base + OTPC_MODE_REG_OFFSET); + reset_start_bit(priv->base); + + /* Read size of memory in words. */ + err = device_property_read_u32(dev, "brcm,ocotp-size", &num_words); + if (err) { + dev_err(dev, "size parameter not specified\n"); + return -EINVAL; + } else if (num_words == 0) { + dev_err(dev, "size must be > 0\n"); + return -EINVAL; + } + + bcm_otpc_nvmem_config.size = 4 * num_words; + bcm_otpc_nvmem_config.dev = dev; + bcm_otpc_nvmem_config.priv = priv; + + if (priv->map == &otp_map_v2) { + bcm_otpc_nvmem_config.word_size = 8; + bcm_otpc_nvmem_config.stride = 8; + } + + priv->config = &bcm_otpc_nvmem_config; + + nvmem = devm_nvmem_register(dev, &bcm_otpc_nvmem_config); + if (IS_ERR(nvmem)) { + dev_err(dev, "error registering nvmem config\n"); + return PTR_ERR(nvmem); + } + + return 0; +} + +static struct platform_driver bcm_otpc_driver = { + .probe = bcm_otpc_probe, + .driver = { + .name = "brcm-otpc", + .of_match_table = bcm_otpc_dt_ids, + .acpi_match_table = ACPI_PTR(bcm_otpc_acpi_ids), + }, +}; +module_platform_driver(bcm_otpc_driver); + +MODULE_DESCRIPTION("Broadcom OTPC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c new file mode 100644 index 0000000000..5cdf339cfb --- /dev/null +++ b/drivers/nvmem/brcm_nvram.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl> + */ + +#include <linux/bcm47xx_nvram.h> +#include <linux/etherdevice.h> +#include <linux/if_ether.h> +#include <linux/io.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define NVRAM_MAGIC "FLSH" + +/** + * struct brcm_nvram - driver state internal struct + * + * @dev: NVMEM device pointer + * @nvmem_size: Size of the whole space available for NVRAM + * @data: NVRAM data copy stored to avoid poking underlaying flash controller + * @data_len: NVRAM data size + * @padding_byte: Padding value used to fill remaining space + * @cells: Array of discovered NVMEM cells + * @ncells: Number of elements in cells + */ +struct brcm_nvram { + struct device *dev; + size_t nvmem_size; + uint8_t *data; + size_t data_len; + uint8_t padding_byte; + struct nvmem_cell_info *cells; + int ncells; +}; + +struct brcm_nvram_header { + char magic[4]; + __le32 len; + __le32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */ + __le32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */ + __le32 config_ncdl; /* ncdl values for memc */ +}; + +static int brcm_nvram_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct brcm_nvram *priv = context; + size_t to_copy; + + if (offset + bytes > priv->data_len) + to_copy = max_t(ssize_t, (ssize_t)priv->data_len - offset, 0); + else + to_copy = bytes; + + memcpy(val, priv->data + offset, to_copy); + + memset((uint8_t *)val + to_copy, priv->padding_byte, bytes - to_copy); + + return 0; +} + +static int brcm_nvram_copy_data(struct brcm_nvram *priv, struct platform_device *pdev) +{ + struct resource *res; + void __iomem *base; + + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(base)) + return PTR_ERR(base); + + priv->nvmem_size = resource_size(res); + + priv->padding_byte = readb(base + priv->nvmem_size - 1); + for (priv->data_len = priv->nvmem_size; + priv->data_len; + priv->data_len--) { + if (readb(base + priv->data_len - 1) != priv->padding_byte) + break; + } + WARN(priv->data_len > SZ_128K, "Unexpected (big) NVRAM size: %zu B\n", priv->data_len); + + priv->data = devm_kzalloc(priv->dev, priv->data_len, GFP_KERNEL); + if (!priv->data) + return -ENOMEM; + + memcpy_fromio(priv->data, base, priv->data_len); + + bcm47xx_nvram_init_from_iomem(base, priv->data_len); + + return 0; +} + +static int brcm_nvram_read_post_process_macaddr(void *context, const char *id, int index, + unsigned int offset, void *buf, size_t bytes) +{ + u8 mac[ETH_ALEN]; + + if (bytes != 3 * ETH_ALEN - 1) + return -EINVAL; + + if (!mac_pton(buf, mac)) + return -EINVAL; + + if (index) + eth_addr_add(mac, index); + + ether_addr_copy(buf, mac); + + return 0; +} + +static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data, + size_t len) +{ + struct device *dev = priv->dev; + char *var, *value; + uint8_t tmp; + int idx; + int err = 0; + + tmp = priv->data[len - 1]; + priv->data[len - 1] = '\0'; + + priv->ncells = 0; + for (var = data + sizeof(struct brcm_nvram_header); + var < (char *)data + len && *var; + var += strlen(var) + 1) { + priv->ncells++; + } + + priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL); + if (!priv->cells) { + err = -ENOMEM; + goto out; + } + + for (var = data + sizeof(struct brcm_nvram_header), idx = 0; + var < (char *)data + len && *var; + var = value + strlen(value) + 1, idx++) { + char *eq, *name; + + eq = strchr(var, '='); + if (!eq) + break; + *eq = '\0'; + name = devm_kstrdup(dev, var, GFP_KERNEL); + *eq = '='; + if (!name) { + err = -ENOMEM; + goto out; + } + value = eq + 1; + + priv->cells[idx].name = name; + priv->cells[idx].offset = value - (char *)data; + priv->cells[idx].bytes = strlen(value); + priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name); + if (!strcmp(name, "et0macaddr") || + !strcmp(name, "et1macaddr") || + !strcmp(name, "et2macaddr")) { + priv->cells[idx].raw_len = strlen(value); + priv->cells[idx].bytes = ETH_ALEN; + priv->cells[idx].read_post_process = brcm_nvram_read_post_process_macaddr; + } + } + +out: + priv->data[len - 1] = tmp; + return err; +} + +static int brcm_nvram_parse(struct brcm_nvram *priv) +{ + struct brcm_nvram_header *header = (struct brcm_nvram_header *)priv->data; + struct device *dev = priv->dev; + size_t len; + int err; + + if (memcmp(header->magic, NVRAM_MAGIC, 4)) { + dev_err(dev, "Invalid NVRAM magic\n"); + return -EINVAL; + } + + len = le32_to_cpu(header->len); + if (len > priv->nvmem_size) { + dev_err(dev, "NVRAM length (%zd) exceeds mapped size (%zd)\n", len, + priv->nvmem_size); + return -EINVAL; + } + + err = brcm_nvram_add_cells(priv, priv->data, len); + if (err) + dev_err(dev, "Failed to add cells: %d\n", err); + + return 0; +} + +static int brcm_nvram_probe(struct platform_device *pdev) +{ + struct nvmem_config config = { + .name = "brcm-nvram", + .reg_read = brcm_nvram_read, + }; + struct device *dev = &pdev->dev; + struct brcm_nvram *priv; + int err; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + priv->dev = dev; + + err = brcm_nvram_copy_data(priv, pdev); + if (err) + return err; + + err = brcm_nvram_parse(priv); + if (err) + return err; + + config.dev = dev; + config.cells = priv->cells; + config.ncells = priv->ncells; + config.priv = priv; + config.size = priv->nvmem_size; + + return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config)); +} + +static const struct of_device_id brcm_nvram_of_match_table[] = { + { .compatible = "brcm,nvram", }, + {}, +}; + +static struct platform_driver brcm_nvram_driver = { + .probe = brcm_nvram_probe, + .driver = { + .name = "brcm_nvram", + .of_match_table = brcm_nvram_of_match_table, + }, +}; + +static int __init brcm_nvram_init(void) +{ + return platform_driver_register(&brcm_nvram_driver); +} + +subsys_initcall_sync(brcm_nvram_init); + +MODULE_AUTHOR("Rafał Miłecki"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(of, brcm_nvram_of_match_table); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c new file mode 100644 index 0000000000..5b3955ad40 --- /dev/null +++ b/drivers/nvmem/core.c @@ -0,0 +1,2148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * nvmem framework core. + * + * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> + */ + +#include <linux/device.h> +#include <linux/export.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/init.h> +#include <linux/kref.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/nvmem-provider.h> +#include <linux/gpio/consumer.h> +#include <linux/of.h> +#include <linux/slab.h> + +struct nvmem_device { + struct module *owner; + struct device dev; + int stride; + int word_size; + int id; + struct kref refcnt; + size_t size; + bool read_only; + bool root_only; + int flags; + enum nvmem_type type; + struct bin_attribute eeprom; + struct device *base_dev; + struct list_head cells; + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + struct gpio_desc *wp_gpio; + struct nvmem_layout *layout; + void *priv; +}; + +#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) + +#define FLAG_COMPAT BIT(0) +struct nvmem_cell_entry { + const char *name; + int offset; + size_t raw_len; + int bytes; + int bit_offset; + int nbits; + nvmem_cell_post_process_t read_post_process; + void *priv; + struct device_node *np; + struct nvmem_device *nvmem; + struct list_head node; +}; + +struct nvmem_cell { + struct nvmem_cell_entry *entry; + const char *id; + int index; +}; + +static DEFINE_MUTEX(nvmem_mutex); +static DEFINE_IDA(nvmem_ida); + +static DEFINE_MUTEX(nvmem_cell_mutex); +static LIST_HEAD(nvmem_cell_tables); + +static DEFINE_MUTEX(nvmem_lookup_mutex); +static LIST_HEAD(nvmem_lookup_list); + +static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); + +static DEFINE_SPINLOCK(nvmem_layout_lock); +static LIST_HEAD(nvmem_layouts); + +static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, + void *val, size_t bytes) +{ + if (nvmem->reg_read) + return nvmem->reg_read(nvmem->priv, offset, val, bytes); + + return -EINVAL; +} + +static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, + void *val, size_t bytes) +{ + int ret; + + if (nvmem->reg_write) { + gpiod_set_value_cansleep(nvmem->wp_gpio, 0); + ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); + gpiod_set_value_cansleep(nvmem->wp_gpio, 1); + return ret; + } + + return -EINVAL; +} + +static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, + unsigned int offset, void *val, + size_t bytes, int write) +{ + + unsigned int end = offset + bytes; + unsigned int kend, ksize; + const struct nvmem_keepout *keepout = nvmem->keepout; + const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; + int rc; + + /* + * Skip all keepouts before the range being accessed. + * Keepouts are sorted. + */ + while ((keepout < keepoutend) && (keepout->end <= offset)) + keepout++; + + while ((offset < end) && (keepout < keepoutend)) { + /* Access the valid portion before the keepout. */ + if (offset < keepout->start) { + kend = min(end, keepout->start); + ksize = kend - offset; + if (write) + rc = __nvmem_reg_write(nvmem, offset, val, ksize); + else + rc = __nvmem_reg_read(nvmem, offset, val, ksize); + + if (rc) + return rc; + + offset += ksize; + val += ksize; + } + + /* + * Now we're aligned to the start of this keepout zone. Go + * through it. + */ + kend = min(end, keepout->end); + ksize = kend - offset; + if (!write) + memset(val, keepout->value, ksize); + + val += ksize; + offset += ksize; + keepout++; + } + + /* + * If we ran out of keepouts but there's still stuff to do, send it + * down directly + */ + if (offset < end) { + ksize = end - offset; + if (write) + return __nvmem_reg_write(nvmem, offset, val, ksize); + else + return __nvmem_reg_read(nvmem, offset, val, ksize); + } + + return 0; +} + +static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, + void *val, size_t bytes) +{ + if (!nvmem->nkeepout) + return __nvmem_reg_read(nvmem, offset, val, bytes); + + return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); +} + +static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, + void *val, size_t bytes) +{ + if (!nvmem->nkeepout) + return __nvmem_reg_write(nvmem, offset, val, bytes); + + return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); +} + +#ifdef CONFIG_NVMEM_SYSFS +static const char * const nvmem_type_str[] = { + [NVMEM_TYPE_UNKNOWN] = "Unknown", + [NVMEM_TYPE_EEPROM] = "EEPROM", + [NVMEM_TYPE_OTP] = "OTP", + [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", + [NVMEM_TYPE_FRAM] = "FRAM", +}; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key eeprom_lock_key; +#endif + +static ssize_t type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvmem_device *nvmem = to_nvmem_device(dev); + + return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); +} + +static DEVICE_ATTR_RO(type); + +static struct attribute *nvmem_attrs[] = { + &dev_attr_type.attr, + NULL, +}; + +static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t pos, size_t count) +{ + struct device *dev; + struct nvmem_device *nvmem; + int rc; + + if (attr->private) + dev = attr->private; + else + dev = kobj_to_dev(kobj); + nvmem = to_nvmem_device(dev); + + /* Stop the user from reading */ + if (pos >= nvmem->size) + return 0; + + if (!IS_ALIGNED(pos, nvmem->stride)) + return -EINVAL; + + if (count < nvmem->word_size) + return -EINVAL; + + if (pos + count > nvmem->size) + count = nvmem->size - pos; + + count = round_down(count, nvmem->word_size); + + if (!nvmem->reg_read) + return -EPERM; + + rc = nvmem_reg_read(nvmem, pos, buf, count); + + if (rc) + return rc; + + return count; +} + +static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t pos, size_t count) +{ + struct device *dev; + struct nvmem_device *nvmem; + int rc; + + if (attr->private) + dev = attr->private; + else + dev = kobj_to_dev(kobj); + nvmem = to_nvmem_device(dev); + + /* Stop the user from writing */ + if (pos >= nvmem->size) + return -EFBIG; + + if (!IS_ALIGNED(pos, nvmem->stride)) + return -EINVAL; + + if (count < nvmem->word_size) + return -EINVAL; + + if (pos + count > nvmem->size) + count = nvmem->size - pos; + + count = round_down(count, nvmem->word_size); + + if (!nvmem->reg_write) + return -EPERM; + + rc = nvmem_reg_write(nvmem, pos, buf, count); + + if (rc) + return rc; + + return count; +} + +static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) +{ + umode_t mode = 0400; + + if (!nvmem->root_only) + mode |= 0044; + + if (!nvmem->read_only) + mode |= 0200; + + if (!nvmem->reg_write) + mode &= ~0200; + + if (!nvmem->reg_read) + mode &= ~0444; + + return mode; +} + +static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, + struct bin_attribute *attr, int i) +{ + struct device *dev = kobj_to_dev(kobj); + struct nvmem_device *nvmem = to_nvmem_device(dev); + + attr->size = nvmem->size; + + return nvmem_bin_attr_get_umode(nvmem); +} + +/* default read/write permissions */ +static struct bin_attribute bin_attr_rw_nvmem = { + .attr = { + .name = "nvmem", + .mode = 0644, + }, + .read = bin_attr_nvmem_read, + .write = bin_attr_nvmem_write, +}; + +static struct bin_attribute *nvmem_bin_attributes[] = { + &bin_attr_rw_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_group = { + .bin_attrs = nvmem_bin_attributes, + .attrs = nvmem_attrs, + .is_bin_visible = nvmem_bin_attr_is_visible, +}; + +static const struct attribute_group *nvmem_dev_groups[] = { + &nvmem_bin_group, + NULL, +}; + +static struct bin_attribute bin_attr_nvmem_eeprom_compat = { + .attr = { + .name = "eeprom", + }, + .read = bin_attr_nvmem_read, + .write = bin_attr_nvmem_write, +}; + +/* + * nvmem_setup_compat() - Create an additional binary entry in + * drivers sys directory, to be backwards compatible with the older + * drivers/misc/eeprom drivers. + */ +static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ + int rval; + + if (!config->compat) + return 0; + + if (!config->base_dev) + return -EINVAL; + + if (config->type == NVMEM_TYPE_FRAM) + bin_attr_nvmem_eeprom_compat.attr.name = "fram"; + + nvmem->eeprom = bin_attr_nvmem_eeprom_compat; + nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); + nvmem->eeprom.size = nvmem->size; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + nvmem->eeprom.attr.key = &eeprom_lock_key; +#endif + nvmem->eeprom.private = &nvmem->dev; + nvmem->base_dev = config->base_dev; + + rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); + if (rval) { + dev_err(&nvmem->dev, + "Failed to create eeprom binary file %d\n", rval); + return rval; + } + + nvmem->flags |= FLAG_COMPAT; + + return 0; +} + +static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ + if (config->compat) + device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); +} + +#else /* CONFIG_NVMEM_SYSFS */ + +static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ + return -ENOSYS; +} +static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ +} + +#endif /* CONFIG_NVMEM_SYSFS */ + +static void nvmem_release(struct device *dev) +{ + struct nvmem_device *nvmem = to_nvmem_device(dev); + + ida_free(&nvmem_ida, nvmem->id); + gpiod_put(nvmem->wp_gpio); + kfree(nvmem); +} + +static const struct device_type nvmem_provider_type = { + .release = nvmem_release, +}; + +static struct bus_type nvmem_bus_type = { + .name = "nvmem", +}; + +static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) +{ + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); + mutex_lock(&nvmem_mutex); + list_del(&cell->node); + mutex_unlock(&nvmem_mutex); + of_node_put(cell->np); + kfree_const(cell->name); + kfree(cell); +} + +static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) +{ + struct nvmem_cell_entry *cell, *p; + + list_for_each_entry_safe(cell, p, &nvmem->cells, node) + nvmem_cell_entry_drop(cell); +} + +static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) +{ + mutex_lock(&nvmem_mutex); + list_add_tail(&cell->node, &cell->nvmem->cells); + mutex_unlock(&nvmem_mutex); + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); +} + +static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + struct nvmem_cell_entry *cell) +{ + cell->nvmem = nvmem; + cell->offset = info->offset; + cell->raw_len = info->raw_len ?: info->bytes; + cell->bytes = info->bytes; + cell->name = info->name; + cell->read_post_process = info->read_post_process; + cell->priv = info->priv; + + cell->bit_offset = info->bit_offset; + cell->nbits = info->nbits; + cell->np = info->np; + + if (cell->nbits) + cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, + BITS_PER_BYTE); + + if (!IS_ALIGNED(cell->offset, nvmem->stride)) { + dev_err(&nvmem->dev, + "cell %s unaligned to nvmem stride %d\n", + cell->name ?: "<unknown>", nvmem->stride); + return -EINVAL; + } + + return 0; +} + +static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + struct nvmem_cell_entry *cell) +{ + int err; + + err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); + if (err) + return err; + + cell->name = kstrdup_const(info->name, GFP_KERNEL); + if (!cell->name) + return -ENOMEM; + + return 0; +} + +/** + * nvmem_add_one_cell() - Add one cell information to an nvmem device + * + * @nvmem: nvmem device to add cells to. + * @info: nvmem cell info to add to the device + * + * Return: 0 or negative error code on failure. + */ +int nvmem_add_one_cell(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info) +{ + struct nvmem_cell_entry *cell; + int rval; + + cell = kzalloc(sizeof(*cell), GFP_KERNEL); + if (!cell) + return -ENOMEM; + + rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); + if (rval) { + kfree(cell); + return rval; + } + + nvmem_cell_entry_add(cell); + + return 0; +} +EXPORT_SYMBOL_GPL(nvmem_add_one_cell); + +/** + * nvmem_add_cells() - Add cell information to an nvmem device + * + * @nvmem: nvmem device to add cells to. + * @info: nvmem cell info to add to the device + * @ncells: number of cells in info + * + * Return: 0 or negative error code on failure. + */ +static int nvmem_add_cells(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + int ncells) +{ + int i, rval; + + for (i = 0; i < ncells; i++) { + rval = nvmem_add_one_cell(nvmem, &info[i]); + if (rval) + return rval; + } + + return 0; +} + +/** + * nvmem_register_notifier() - Register a notifier block for nvmem events. + * + * @nb: notifier block to be called on nvmem events. + * + * Return: 0 on success, negative error number on failure. + */ +int nvmem_register_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&nvmem_notifier, nb); +} +EXPORT_SYMBOL_GPL(nvmem_register_notifier); + +/** + * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. + * + * @nb: notifier block to be unregistered. + * + * Return: 0 on success, negative error number on failure. + */ +int nvmem_unregister_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&nvmem_notifier, nb); +} +EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); + +static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) +{ + const struct nvmem_cell_info *info; + struct nvmem_cell_table *table; + struct nvmem_cell_entry *cell; + int rval = 0, i; + + mutex_lock(&nvmem_cell_mutex); + list_for_each_entry(table, &nvmem_cell_tables, node) { + if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { + for (i = 0; i < table->ncells; i++) { + info = &table->cells[i]; + + cell = kzalloc(sizeof(*cell), GFP_KERNEL); + if (!cell) { + rval = -ENOMEM; + goto out; + } + + rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); + if (rval) { + kfree(cell); + goto out; + } + + nvmem_cell_entry_add(cell); + } + } + } + +out: + mutex_unlock(&nvmem_cell_mutex); + return rval; +} + +static struct nvmem_cell_entry * +nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) +{ + struct nvmem_cell_entry *iter, *cell = NULL; + + mutex_lock(&nvmem_mutex); + list_for_each_entry(iter, &nvmem->cells, node) { + if (strcmp(cell_id, iter->name) == 0) { + cell = iter; + break; + } + } + mutex_unlock(&nvmem_mutex); + + return cell; +} + +static int nvmem_validate_keepouts(struct nvmem_device *nvmem) +{ + unsigned int cur = 0; + const struct nvmem_keepout *keepout = nvmem->keepout; + const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; + + while (keepout < keepoutend) { + /* Ensure keepouts are sorted and don't overlap. */ + if (keepout->start < cur) { + dev_err(&nvmem->dev, + "Keepout regions aren't sorted or overlap.\n"); + + return -ERANGE; + } + + if (keepout->end < keepout->start) { + dev_err(&nvmem->dev, + "Invalid keepout region.\n"); + + return -EINVAL; + } + + /* + * Validate keepouts (and holes between) don't violate + * word_size constraints. + */ + if ((keepout->end - keepout->start < nvmem->word_size) || + ((keepout->start != cur) && + (keepout->start - cur < nvmem->word_size))) { + + dev_err(&nvmem->dev, + "Keepout regions violate word_size constraints.\n"); + + return -ERANGE; + } + + /* Validate keepouts don't violate stride (alignment). */ + if (!IS_ALIGNED(keepout->start, nvmem->stride) || + !IS_ALIGNED(keepout->end, nvmem->stride)) { + + dev_err(&nvmem->dev, + "Keepout regions violate stride.\n"); + + return -EINVAL; + } + + cur = keepout->end; + keepout++; + } + + return 0; +} + +static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np) +{ + struct nvmem_layout *layout = nvmem->layout; + struct device *dev = &nvmem->dev; + struct device_node *child; + const __be32 *addr; + int len, ret; + + for_each_child_of_node(np, child) { + struct nvmem_cell_info info = {0}; + + addr = of_get_property(child, "reg", &len); + if (!addr) + continue; + if (len < 2 * sizeof(u32)) { + dev_err(dev, "nvmem: invalid reg on %pOF\n", child); + of_node_put(child); + return -EINVAL; + } + + info.offset = be32_to_cpup(addr++); + info.bytes = be32_to_cpup(addr); + info.name = kasprintf(GFP_KERNEL, "%pOFn", child); + + addr = of_get_property(child, "bits", &len); + if (addr && len == (2 * sizeof(u32))) { + info.bit_offset = be32_to_cpup(addr++); + info.nbits = be32_to_cpup(addr); + } + + info.np = of_node_get(child); + + if (layout && layout->fixup_cell_info) + layout->fixup_cell_info(nvmem, layout, &info); + + ret = nvmem_add_one_cell(nvmem, &info); + kfree(info.name); + if (ret) { + of_node_put(child); + return ret; + } + } + + return 0; +} + +static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem) +{ + return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node); +} + +static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem) +{ + struct device_node *layout_np; + int err = 0; + + layout_np = of_nvmem_layout_get_container(nvmem); + if (!layout_np) + return 0; + + if (of_device_is_compatible(layout_np, "fixed-layout")) + err = nvmem_add_cells_from_dt(nvmem, layout_np); + + of_node_put(layout_np); + + return err; +} + +int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner) +{ + layout->owner = owner; + + spin_lock(&nvmem_layout_lock); + list_add(&layout->node, &nvmem_layouts); + spin_unlock(&nvmem_layout_lock); + + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_ADD, layout); + + return 0; +} +EXPORT_SYMBOL_GPL(__nvmem_layout_register); + +void nvmem_layout_unregister(struct nvmem_layout *layout) +{ + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_REMOVE, layout); + + spin_lock(&nvmem_layout_lock); + list_del(&layout->node); + spin_unlock(&nvmem_layout_lock); +} +EXPORT_SYMBOL_GPL(nvmem_layout_unregister); + +static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem) +{ + struct device_node *layout_np; + struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER); + + layout_np = of_nvmem_layout_get_container(nvmem); + if (!layout_np) + return NULL; + + /* Fixed layouts don't have a matching driver */ + if (of_device_is_compatible(layout_np, "fixed-layout")) { + of_node_put(layout_np); + return NULL; + } + + /* + * In case the nvmem device was built-in while the layout was built as a + * module, we shall manually request the layout driver loading otherwise + * we'll never have any match. + */ + of_request_module(layout_np); + + spin_lock(&nvmem_layout_lock); + + list_for_each_entry(l, &nvmem_layouts, node) { + if (of_match_node(l->of_match_table, layout_np)) { + if (try_module_get(l->owner)) + layout = l; + + break; + } + } + + spin_unlock(&nvmem_layout_lock); + of_node_put(layout_np); + + return layout; +} + +static void nvmem_layout_put(struct nvmem_layout *layout) +{ + if (layout) + module_put(layout->owner); +} + +static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem) +{ + struct nvmem_layout *layout = nvmem->layout; + int ret; + + if (layout && layout->add_cells) { + ret = layout->add_cells(&nvmem->dev, nvmem, layout); + if (ret) + return ret; + } + + return 0; +} + +#if IS_ENABLED(CONFIG_OF) +/** + * of_nvmem_layout_get_container() - Get OF node to layout container. + * + * @nvmem: nvmem device. + * + * Return: a node pointer with refcount incremented or NULL if no + * container exists. Use of_node_put() on it when done. + */ +struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem) +{ + return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout"); +} +EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container); +#endif + +const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem, + struct nvmem_layout *layout) +{ + struct device_node __maybe_unused *layout_np; + const struct of_device_id *match; + + layout_np = of_nvmem_layout_get_container(nvmem); + match = of_match_node(layout->of_match_table, layout_np); + + return match ? match->data : NULL; +} +EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data); + +/** + * nvmem_register() - Register a nvmem device for given nvmem_config. + * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem + * + * @config: nvmem device configuration with which nvmem device is created. + * + * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device + * on success. + */ + +struct nvmem_device *nvmem_register(const struct nvmem_config *config) +{ + struct nvmem_device *nvmem; + int rval; + + if (!config->dev) + return ERR_PTR(-EINVAL); + + if (!config->reg_read && !config->reg_write) + return ERR_PTR(-EINVAL); + + nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); + if (!nvmem) + return ERR_PTR(-ENOMEM); + + rval = ida_alloc(&nvmem_ida, GFP_KERNEL); + if (rval < 0) { + kfree(nvmem); + return ERR_PTR(rval); + } + + nvmem->id = rval; + + nvmem->dev.type = &nvmem_provider_type; + nvmem->dev.bus = &nvmem_bus_type; + nvmem->dev.parent = config->dev; + + device_initialize(&nvmem->dev); + + if (!config->ignore_wp) + nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", + GPIOD_OUT_HIGH); + if (IS_ERR(nvmem->wp_gpio)) { + rval = PTR_ERR(nvmem->wp_gpio); + nvmem->wp_gpio = NULL; + goto err_put_device; + } + + kref_init(&nvmem->refcnt); + INIT_LIST_HEAD(&nvmem->cells); + + nvmem->owner = config->owner; + if (!nvmem->owner && config->dev->driver) + nvmem->owner = config->dev->driver->owner; + nvmem->stride = config->stride ?: 1; + nvmem->word_size = config->word_size ?: 1; + nvmem->size = config->size; + nvmem->root_only = config->root_only; + nvmem->priv = config->priv; + nvmem->type = config->type; + nvmem->reg_read = config->reg_read; + nvmem->reg_write = config->reg_write; + nvmem->keepout = config->keepout; + nvmem->nkeepout = config->nkeepout; + if (config->of_node) + nvmem->dev.of_node = config->of_node; + else if (!config->no_of_node) + nvmem->dev.of_node = config->dev->of_node; + + switch (config->id) { + case NVMEM_DEVID_NONE: + rval = dev_set_name(&nvmem->dev, "%s", config->name); + break; + case NVMEM_DEVID_AUTO: + rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); + break; + default: + rval = dev_set_name(&nvmem->dev, "%s%d", + config->name ? : "nvmem", + config->name ? config->id : nvmem->id); + break; + } + + if (rval) + goto err_put_device; + + nvmem->read_only = device_property_present(config->dev, "read-only") || + config->read_only || !nvmem->reg_write; + +#ifdef CONFIG_NVMEM_SYSFS + nvmem->dev.groups = nvmem_dev_groups; +#endif + + if (nvmem->nkeepout) { + rval = nvmem_validate_keepouts(nvmem); + if (rval) + goto err_put_device; + } + + if (config->compat) { + rval = nvmem_sysfs_setup_compat(nvmem, config); + if (rval) + goto err_put_device; + } + + /* + * If the driver supplied a layout by config->layout, the module + * pointer will be NULL and nvmem_layout_put() will be a noop. + */ + nvmem->layout = config->layout ?: nvmem_layout_get(nvmem); + if (IS_ERR(nvmem->layout)) { + rval = PTR_ERR(nvmem->layout); + nvmem->layout = NULL; + + if (rval == -EPROBE_DEFER) + goto err_teardown_compat; + } + + if (config->cells) { + rval = nvmem_add_cells(nvmem, config->cells, config->ncells); + if (rval) + goto err_remove_cells; + } + + rval = nvmem_add_cells_from_table(nvmem); + if (rval) + goto err_remove_cells; + + rval = nvmem_add_cells_from_legacy_of(nvmem); + if (rval) + goto err_remove_cells; + + rval = nvmem_add_cells_from_fixed_layout(nvmem); + if (rval) + goto err_remove_cells; + + rval = nvmem_add_cells_from_layout(nvmem); + if (rval) + goto err_remove_cells; + + dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); + + rval = device_add(&nvmem->dev); + if (rval) + goto err_remove_cells; + + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); + + return nvmem; + +err_remove_cells: + nvmem_device_remove_all_cells(nvmem); + nvmem_layout_put(nvmem->layout); +err_teardown_compat: + if (config->compat) + nvmem_sysfs_remove_compat(nvmem, config); +err_put_device: + put_device(&nvmem->dev); + + return ERR_PTR(rval); +} +EXPORT_SYMBOL_GPL(nvmem_register); + +static void nvmem_device_release(struct kref *kref) +{ + struct nvmem_device *nvmem; + + nvmem = container_of(kref, struct nvmem_device, refcnt); + + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); + + if (nvmem->flags & FLAG_COMPAT) + device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); + + nvmem_device_remove_all_cells(nvmem); + nvmem_layout_put(nvmem->layout); + device_unregister(&nvmem->dev); +} + +/** + * nvmem_unregister() - Unregister previously registered nvmem device + * + * @nvmem: Pointer to previously registered nvmem device. + */ +void nvmem_unregister(struct nvmem_device *nvmem) +{ + if (nvmem) + kref_put(&nvmem->refcnt, nvmem_device_release); +} +EXPORT_SYMBOL_GPL(nvmem_unregister); + +static void devm_nvmem_unregister(void *nvmem) +{ + nvmem_unregister(nvmem); +} + +/** + * devm_nvmem_register() - Register a managed nvmem device for given + * nvmem_config. + * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem + * + * @dev: Device that uses the nvmem device. + * @config: nvmem device configuration with which nvmem device is created. + * + * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device + * on success. + */ +struct nvmem_device *devm_nvmem_register(struct device *dev, + const struct nvmem_config *config) +{ + struct nvmem_device *nvmem; + int ret; + + nvmem = nvmem_register(config); + if (IS_ERR(nvmem)) + return nvmem; + + ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); + if (ret) + return ERR_PTR(ret); + + return nvmem; +} +EXPORT_SYMBOL_GPL(devm_nvmem_register); + +static struct nvmem_device *__nvmem_device_get(void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct nvmem_device *nvmem = NULL; + struct device *dev; + + mutex_lock(&nvmem_mutex); + dev = bus_find_device(&nvmem_bus_type, NULL, data, match); + if (dev) + nvmem = to_nvmem_device(dev); + mutex_unlock(&nvmem_mutex); + if (!nvmem) + return ERR_PTR(-EPROBE_DEFER); + + if (!try_module_get(nvmem->owner)) { + dev_err(&nvmem->dev, + "could not increase module refcount for cell %s\n", + nvmem_dev_name(nvmem)); + + put_device(&nvmem->dev); + return ERR_PTR(-EINVAL); + } + + kref_get(&nvmem->refcnt); + + return nvmem; +} + +static void __nvmem_device_put(struct nvmem_device *nvmem) +{ + put_device(&nvmem->dev); + module_put(nvmem->owner); + kref_put(&nvmem->refcnt, nvmem_device_release); +} + +#if IS_ENABLED(CONFIG_OF) +/** + * of_nvmem_device_get() - Get nvmem device from a given id + * + * @np: Device tree node that uses the nvmem device. + * @id: nvmem name from nvmem-names property. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. + */ +struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) +{ + + struct device_node *nvmem_np; + struct nvmem_device *nvmem; + int index = 0; + + if (id) + index = of_property_match_string(np, "nvmem-names", id); + + nvmem_np = of_parse_phandle(np, "nvmem", index); + if (!nvmem_np) + return ERR_PTR(-ENOENT); + + nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); + of_node_put(nvmem_np); + return nvmem; +} +EXPORT_SYMBOL_GPL(of_nvmem_device_get); +#endif + +/** + * nvmem_device_get() - Get nvmem device from a given id + * + * @dev: Device that uses the nvmem device. + * @dev_name: name of the requested nvmem device. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. + */ +struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) +{ + if (dev->of_node) { /* try dt first */ + struct nvmem_device *nvmem; + + nvmem = of_nvmem_device_get(dev->of_node, dev_name); + + if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) + return nvmem; + + } + + return __nvmem_device_get((void *)dev_name, device_match_name); +} +EXPORT_SYMBOL_GPL(nvmem_device_get); + +/** + * nvmem_device_find() - Find nvmem device with matching function + * + * @data: Data to pass to match function + * @match: Callback function to check device + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. + */ +struct nvmem_device *nvmem_device_find(void *data, + int (*match)(struct device *dev, const void *data)) +{ + return __nvmem_device_get(data, match); +} +EXPORT_SYMBOL_GPL(nvmem_device_find); + +static int devm_nvmem_device_match(struct device *dev, void *res, void *data) +{ + struct nvmem_device **nvmem = res; + + if (WARN_ON(!nvmem || !*nvmem)) + return 0; + + return *nvmem == data; +} + +static void devm_nvmem_device_release(struct device *dev, void *res) +{ + nvmem_device_put(*(struct nvmem_device **)res); +} + +/** + * devm_nvmem_device_put() - put alredy got nvmem device + * + * @dev: Device that uses the nvmem device. + * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), + * that needs to be released. + */ +void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) +{ + int ret; + + ret = devres_release(dev, devm_nvmem_device_release, + devm_nvmem_device_match, nvmem); + + WARN_ON(ret); +} +EXPORT_SYMBOL_GPL(devm_nvmem_device_put); + +/** + * nvmem_device_put() - put alredy got nvmem device + * + * @nvmem: pointer to nvmem device that needs to be released. + */ +void nvmem_device_put(struct nvmem_device *nvmem) +{ + __nvmem_device_put(nvmem); +} +EXPORT_SYMBOL_GPL(nvmem_device_put); + +/** + * devm_nvmem_device_get() - Get nvmem cell of device form a given id + * + * @dev: Device that requests the nvmem device. + * @id: name id for the requested nvmem device. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell + * on success. The nvmem_cell will be freed by the automatically once the + * device is freed. + */ +struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) +{ + struct nvmem_device **ptr, *nvmem; + + ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + nvmem = nvmem_device_get(dev, id); + if (!IS_ERR(nvmem)) { + *ptr = nvmem; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return nvmem; +} +EXPORT_SYMBOL_GPL(devm_nvmem_device_get); + +static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, + const char *id, int index) +{ + struct nvmem_cell *cell; + const char *name = NULL; + + cell = kzalloc(sizeof(*cell), GFP_KERNEL); + if (!cell) + return ERR_PTR(-ENOMEM); + + if (id) { + name = kstrdup_const(id, GFP_KERNEL); + if (!name) { + kfree(cell); + return ERR_PTR(-ENOMEM); + } + } + + cell->id = name; + cell->entry = entry; + cell->index = index; + + return cell; +} + +static struct nvmem_cell * +nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) +{ + struct nvmem_cell_entry *cell_entry; + struct nvmem_cell *cell = ERR_PTR(-ENOENT); + struct nvmem_cell_lookup *lookup; + struct nvmem_device *nvmem; + const char *dev_id; + + if (!dev) + return ERR_PTR(-EINVAL); + + dev_id = dev_name(dev); + + mutex_lock(&nvmem_lookup_mutex); + + list_for_each_entry(lookup, &nvmem_lookup_list, node) { + if ((strcmp(lookup->dev_id, dev_id) == 0) && + (strcmp(lookup->con_id, con_id) == 0)) { + /* This is the right entry. */ + nvmem = __nvmem_device_get((void *)lookup->nvmem_name, + device_match_name); + if (IS_ERR(nvmem)) { + /* Provider may not be registered yet. */ + cell = ERR_CAST(nvmem); + break; + } + + cell_entry = nvmem_find_cell_entry_by_name(nvmem, + lookup->cell_name); + if (!cell_entry) { + __nvmem_device_put(nvmem); + cell = ERR_PTR(-ENOENT); + } else { + cell = nvmem_create_cell(cell_entry, con_id, 0); + if (IS_ERR(cell)) + __nvmem_device_put(nvmem); + } + break; + } + } + + mutex_unlock(&nvmem_lookup_mutex); + return cell; +} + +#if IS_ENABLED(CONFIG_OF) +static struct nvmem_cell_entry * +nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) +{ + struct nvmem_cell_entry *iter, *cell = NULL; + + mutex_lock(&nvmem_mutex); + list_for_each_entry(iter, &nvmem->cells, node) { + if (np == iter->np) { + cell = iter; + break; + } + } + mutex_unlock(&nvmem_mutex); + + return cell; +} + +/** + * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id + * + * @np: Device tree node that uses the nvmem cell. + * @id: nvmem cell name from nvmem-cell-names property, or NULL + * for the cell at index 0 (the lone cell with no accompanying + * nvmem-cell-names property). + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * nvmem_cell_put(). + */ +struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) +{ + struct device_node *cell_np, *nvmem_np; + struct nvmem_device *nvmem; + struct nvmem_cell_entry *cell_entry; + struct nvmem_cell *cell; + struct of_phandle_args cell_spec; + int index = 0; + int cell_index = 0; + int ret; + + /* if cell name exists, find index to the name */ + if (id) + index = of_property_match_string(np, "nvmem-cell-names", id); + + ret = of_parse_phandle_with_optional_args(np, "nvmem-cells", + "#nvmem-cell-cells", + index, &cell_spec); + if (ret) + return ERR_PTR(-ENOENT); + + if (cell_spec.args_count > 1) + return ERR_PTR(-EINVAL); + + cell_np = cell_spec.np; + if (cell_spec.args_count) + cell_index = cell_spec.args[0]; + + nvmem_np = of_get_parent(cell_np); + if (!nvmem_np) { + of_node_put(cell_np); + return ERR_PTR(-EINVAL); + } + + /* nvmem layouts produce cells within the nvmem-layout container */ + if (of_node_name_eq(nvmem_np, "nvmem-layout")) { + nvmem_np = of_get_next_parent(nvmem_np); + if (!nvmem_np) { + of_node_put(cell_np); + return ERR_PTR(-EINVAL); + } + } + + nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); + of_node_put(nvmem_np); + if (IS_ERR(nvmem)) { + of_node_put(cell_np); + return ERR_CAST(nvmem); + } + + cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); + of_node_put(cell_np); + if (!cell_entry) { + __nvmem_device_put(nvmem); + return ERR_PTR(-ENOENT); + } + + cell = nvmem_create_cell(cell_entry, id, cell_index); + if (IS_ERR(cell)) + __nvmem_device_put(nvmem); + + return cell; +} +EXPORT_SYMBOL_GPL(of_nvmem_cell_get); +#endif + +/** + * nvmem_cell_get() - Get nvmem cell of device form a given cell name + * + * @dev: Device that requests the nvmem cell. + * @id: nvmem cell name to get (this corresponds with the name from the + * nvmem-cell-names property for DT systems and with the con_id from + * the lookup entry for non-DT systems). + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * nvmem_cell_put(). + */ +struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) +{ + struct nvmem_cell *cell; + + if (dev->of_node) { /* try dt first */ + cell = of_nvmem_cell_get(dev->of_node, id); + if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) + return cell; + } + + /* NULL cell id only allowed for device tree; invalid otherwise */ + if (!id) + return ERR_PTR(-EINVAL); + + return nvmem_cell_get_from_lookup(dev, id); +} +EXPORT_SYMBOL_GPL(nvmem_cell_get); + +static void devm_nvmem_cell_release(struct device *dev, void *res) +{ + nvmem_cell_put(*(struct nvmem_cell **)res); +} + +/** + * devm_nvmem_cell_get() - Get nvmem cell of device form a given id + * + * @dev: Device that requests the nvmem cell. + * @id: nvmem cell name id to get. + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * automatically once the device is freed. + */ +struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) +{ + struct nvmem_cell **ptr, *cell; + + ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + cell = nvmem_cell_get(dev, id); + if (!IS_ERR(cell)) { + *ptr = cell; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return cell; +} +EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); + +static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) +{ + struct nvmem_cell **c = res; + + if (WARN_ON(!c || !*c)) + return 0; + + return *c == data; +} + +/** + * devm_nvmem_cell_put() - Release previously allocated nvmem cell + * from devm_nvmem_cell_get. + * + * @dev: Device that requests the nvmem cell. + * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). + */ +void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) +{ + int ret; + + ret = devres_release(dev, devm_nvmem_cell_release, + devm_nvmem_cell_match, cell); + + WARN_ON(ret); +} +EXPORT_SYMBOL(devm_nvmem_cell_put); + +/** + * nvmem_cell_put() - Release previously allocated nvmem cell. + * + * @cell: Previously allocated nvmem cell by nvmem_cell_get(). + */ +void nvmem_cell_put(struct nvmem_cell *cell) +{ + struct nvmem_device *nvmem = cell->entry->nvmem; + + if (cell->id) + kfree_const(cell->id); + + kfree(cell); + __nvmem_device_put(nvmem); +} +EXPORT_SYMBOL_GPL(nvmem_cell_put); + +static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) +{ + u8 *p, *b; + int i, extra, bit_offset = cell->bit_offset; + + p = b = buf; + if (bit_offset) { + /* First shift */ + *b++ >>= bit_offset; + + /* setup rest of the bytes if any */ + for (i = 1; i < cell->bytes; i++) { + /* Get bits from next byte and shift them towards msb */ + *p |= *b << (BITS_PER_BYTE - bit_offset); + + p = b; + *b++ >>= bit_offset; + } + } else { + /* point to the msb */ + p += cell->bytes - 1; + } + + /* result fits in less bytes */ + extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); + while (--extra >= 0) + *p-- = 0; + + /* clear msb bits if any leftover in the last byte */ + if (cell->nbits % BITS_PER_BYTE) + *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); +} + +static int __nvmem_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell_entry *cell, + void *buf, size_t *len, const char *id, int index) +{ + int rc; + + rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len); + + if (rc) + return rc; + + /* shift bits in-place */ + if (cell->bit_offset || cell->nbits) + nvmem_shift_read_buffer_in_place(cell, buf); + + if (cell->read_post_process) { + rc = cell->read_post_process(cell->priv, id, index, + cell->offset, buf, cell->raw_len); + if (rc) + return rc; + } + + if (len) + *len = cell->bytes; + + return 0; +} + +/** + * nvmem_cell_read() - Read a given nvmem cell + * + * @cell: nvmem cell to be read. + * @len: pointer to length of cell which will be populated on successful read; + * can be NULL. + * + * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The + * buffer should be freed by the consumer with a kfree(). + */ +void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) +{ + struct nvmem_cell_entry *entry = cell->entry; + struct nvmem_device *nvmem = entry->nvmem; + u8 *buf; + int rc; + + if (!nvmem) + return ERR_PTR(-EINVAL); + + buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index); + if (rc) { + kfree(buf); + return ERR_PTR(rc); + } + + return buf; +} +EXPORT_SYMBOL_GPL(nvmem_cell_read); + +static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, + u8 *_buf, int len) +{ + struct nvmem_device *nvmem = cell->nvmem; + int i, rc, nbits, bit_offset = cell->bit_offset; + u8 v, *p, *buf, *b, pbyte, pbits; + + nbits = cell->nbits; + buf = kzalloc(cell->bytes, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + memcpy(buf, _buf, len); + p = b = buf; + + if (bit_offset) { + pbyte = *b; + *b <<= bit_offset; + + /* setup the first byte with lsb bits from nvmem */ + rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); + if (rc) + goto err; + *b++ |= GENMASK(bit_offset - 1, 0) & v; + + /* setup rest of the byte if any */ + for (i = 1; i < cell->bytes; i++) { + /* Get last byte bits and shift them towards lsb */ + pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); + pbyte = *b; + p = b; + *b <<= bit_offset; + *b++ |= pbits; + } + } + + /* if it's not end on byte boundary */ + if ((nbits + bit_offset) % BITS_PER_BYTE) { + /* setup the last byte with msb bits from nvmem */ + rc = nvmem_reg_read(nvmem, + cell->offset + cell->bytes - 1, &v, 1); + if (rc) + goto err; + *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; + + } + + return buf; +err: + kfree(buf); + return ERR_PTR(rc); +} + +static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) +{ + struct nvmem_device *nvmem = cell->nvmem; + int rc; + + if (!nvmem || nvmem->read_only || + (cell->bit_offset == 0 && len != cell->bytes)) + return -EINVAL; + + /* + * Any cells which have a read_post_process hook are read-only because + * we cannot reverse the operation and it might affect other cells, + * too. + */ + if (cell->read_post_process) + return -EINVAL; + + if (cell->bit_offset || cell->nbits) { + buf = nvmem_cell_prepare_write_buffer(cell, buf, len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + } + + rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); + + /* free the tmp buffer */ + if (cell->bit_offset || cell->nbits) + kfree(buf); + + if (rc) + return rc; + + return len; +} + +/** + * nvmem_cell_write() - Write to a given nvmem cell + * + * @cell: nvmem cell to be written. + * @buf: Buffer to be written. + * @len: length of buffer to be written to nvmem cell. + * + * Return: length of bytes written or negative on failure. + */ +int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) +{ + return __nvmem_cell_entry_write(cell->entry, buf, len); +} + +EXPORT_SYMBOL_GPL(nvmem_cell_write); + +static int nvmem_cell_read_common(struct device *dev, const char *cell_id, + void *val, size_t count) +{ + struct nvmem_cell *cell; + void *buf; + size_t len; + + cell = nvmem_cell_get(dev, cell_id); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + buf = nvmem_cell_read(cell, &len); + if (IS_ERR(buf)) { + nvmem_cell_put(cell); + return PTR_ERR(buf); + } + if (len != count) { + kfree(buf); + nvmem_cell_put(cell); + return -EINVAL; + } + memcpy(val, buf, count); + kfree(buf); + nvmem_cell_put(cell); + + return 0; +} + +/** + * nvmem_cell_read_u8() - Read a cell value as a u8 + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) +{ + return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); + +/** + * nvmem_cell_read_u16() - Read a cell value as a u16 + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) +{ + return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); + +/** + * nvmem_cell_read_u32() - Read a cell value as a u32 + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) +{ + return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); + +/** + * nvmem_cell_read_u64() - Read a cell value as a u64 + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) +{ + return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); + +static const void *nvmem_cell_read_variable_common(struct device *dev, + const char *cell_id, + size_t max_len, size_t *len) +{ + struct nvmem_cell *cell; + int nbits; + void *buf; + + cell = nvmem_cell_get(dev, cell_id); + if (IS_ERR(cell)) + return cell; + + nbits = cell->entry->nbits; + buf = nvmem_cell_read(cell, len); + nvmem_cell_put(cell); + if (IS_ERR(buf)) + return buf; + + /* + * If nbits is set then nvmem_cell_read() can significantly exaggerate + * the length of the real data. Throw away the extra junk. + */ + if (nbits) + *len = DIV_ROUND_UP(nbits, 8); + + if (*len > max_len) { + kfree(buf); + return ERR_PTR(-ERANGE); + } + + return buf; +} + +/** + * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, + u32 *val) +{ + size_t len; + const u8 *buf; + int i; + + buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + /* Copy w/ implicit endian conversion */ + *val = 0; + for (i = 0; i < len; i++) + *val |= buf[i] << (8 * i); + + kfree(buf); + + return 0; +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); + +/** + * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, + u64 *val) +{ + size_t len; + const u8 *buf; + int i; + + buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + /* Copy w/ implicit endian conversion */ + *val = 0; + for (i = 0; i < len; i++) + *val |= (uint64_t)buf[i] << (8 * i); + + kfree(buf); + + return 0; +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); + +/** + * nvmem_device_cell_read() - Read a given nvmem device and cell + * + * @nvmem: nvmem device to read from. + * @info: nvmem cell info to be read. + * @buf: buffer pointer which will be populated on successful read. + * + * Return: length of successful bytes read on success and negative + * error code on error. + */ +ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf) +{ + struct nvmem_cell_entry cell; + int rc; + ssize_t len; + + if (!nvmem) + return -EINVAL; + + rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); + if (rc) + return rc; + + rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0); + if (rc) + return rc; + + return len; +} +EXPORT_SYMBOL_GPL(nvmem_device_cell_read); + +/** + * nvmem_device_cell_write() - Write cell to a given nvmem device + * + * @nvmem: nvmem device to be written to. + * @info: nvmem cell info to be written. + * @buf: buffer to be written to cell. + * + * Return: length of bytes written or negative error code on failure. + */ +int nvmem_device_cell_write(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf) +{ + struct nvmem_cell_entry cell; + int rc; + + if (!nvmem) + return -EINVAL; + + rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); + if (rc) + return rc; + + return __nvmem_cell_entry_write(&cell, buf, cell.bytes); +} +EXPORT_SYMBOL_GPL(nvmem_device_cell_write); + +/** + * nvmem_device_read() - Read from a given nvmem device + * + * @nvmem: nvmem device to read from. + * @offset: offset in nvmem device. + * @bytes: number of bytes to read. + * @buf: buffer pointer which will be populated on successful read. + * + * Return: length of successful bytes read on success and negative + * error code on error. + */ +int nvmem_device_read(struct nvmem_device *nvmem, + unsigned int offset, + size_t bytes, void *buf) +{ + int rc; + + if (!nvmem) + return -EINVAL; + + rc = nvmem_reg_read(nvmem, offset, buf, bytes); + + if (rc) + return rc; + + return bytes; +} +EXPORT_SYMBOL_GPL(nvmem_device_read); + +/** + * nvmem_device_write() - Write cell to a given nvmem device + * + * @nvmem: nvmem device to be written to. + * @offset: offset in nvmem device. + * @bytes: number of bytes to write. + * @buf: buffer to be written. + * + * Return: length of bytes written or negative error code on failure. + */ +int nvmem_device_write(struct nvmem_device *nvmem, + unsigned int offset, + size_t bytes, void *buf) +{ + int rc; + + if (!nvmem) + return -EINVAL; + + rc = nvmem_reg_write(nvmem, offset, buf, bytes); + + if (rc) + return rc; + + + return bytes; +} +EXPORT_SYMBOL_GPL(nvmem_device_write); + +/** + * nvmem_add_cell_table() - register a table of cell info entries + * + * @table: table of cell info entries + */ +void nvmem_add_cell_table(struct nvmem_cell_table *table) +{ + mutex_lock(&nvmem_cell_mutex); + list_add_tail(&table->node, &nvmem_cell_tables); + mutex_unlock(&nvmem_cell_mutex); +} +EXPORT_SYMBOL_GPL(nvmem_add_cell_table); + +/** + * nvmem_del_cell_table() - remove a previously registered cell info table + * + * @table: table of cell info entries + */ +void nvmem_del_cell_table(struct nvmem_cell_table *table) +{ + mutex_lock(&nvmem_cell_mutex); + list_del(&table->node); + mutex_unlock(&nvmem_cell_mutex); +} +EXPORT_SYMBOL_GPL(nvmem_del_cell_table); + +/** + * nvmem_add_cell_lookups() - register a list of cell lookup entries + * + * @entries: array of cell lookup entries + * @nentries: number of cell lookup entries in the array + */ +void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) +{ + int i; + + mutex_lock(&nvmem_lookup_mutex); + for (i = 0; i < nentries; i++) + list_add_tail(&entries[i].node, &nvmem_lookup_list); + mutex_unlock(&nvmem_lookup_mutex); +} +EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); + +/** + * nvmem_del_cell_lookups() - remove a list of previously added cell lookup + * entries + * + * @entries: array of cell lookup entries + * @nentries: number of cell lookup entries in the array + */ +void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) +{ + int i; + + mutex_lock(&nvmem_lookup_mutex); + for (i = 0; i < nentries; i++) + list_del(&entries[i].node); + mutex_unlock(&nvmem_lookup_mutex); +} +EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); + +/** + * nvmem_dev_name() - Get the name of a given nvmem device. + * + * @nvmem: nvmem device. + * + * Return: name of the nvmem device. + */ +const char *nvmem_dev_name(struct nvmem_device *nvmem) +{ + return dev_name(&nvmem->dev); +} +EXPORT_SYMBOL_GPL(nvmem_dev_name); + +static int __init nvmem_init(void) +{ + return bus_register(&nvmem_bus_type); +} + +static void __exit nvmem_exit(void) +{ + bus_unregister(&nvmem_bus_type); +} + +subsys_initcall(nvmem_init); +module_exit(nvmem_exit); + +MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); +MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); +MODULE_DESCRIPTION("nvmem Driver Core"); diff --git a/drivers/nvmem/imx-iim.c b/drivers/nvmem/imx-iim.c new file mode 100644 index 0000000000..f13bbd1640 --- /dev/null +++ b/drivers/nvmem/imx-iim.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * i.MX IIM driver + * + * Copyright (c) 2017 Pengutronix, Michael Grzeschik <m.grzeschik@pengutronix.de> + * + * Based on the barebox iim driver, + * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>, + * Orex Computed Radiography + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/clk.h> + +#define IIM_BANK_BASE(n) (0x800 + 0x400 * (n)) + +struct imx_iim_drvdata { + unsigned int nregs; +}; + +struct iim_priv { + void __iomem *base; + struct clk *clk; +}; + +static int imx_iim_read(void *context, unsigned int offset, + void *buf, size_t bytes) +{ + struct iim_priv *iim = context; + int i, ret; + u8 *buf8 = buf; + + ret = clk_prepare_enable(iim->clk); + if (ret) + return ret; + + for (i = offset; i < offset + bytes; i++) { + int bank = i >> 5; + int reg = i & 0x1f; + + *buf8++ = readl(iim->base + IIM_BANK_BASE(bank) + reg * 4); + } + + clk_disable_unprepare(iim->clk); + + return 0; +} + +static struct imx_iim_drvdata imx27_drvdata = { + .nregs = 2 * 32, +}; + +static struct imx_iim_drvdata imx25_imx31_imx35_drvdata = { + .nregs = 3 * 32, +}; + +static struct imx_iim_drvdata imx51_drvdata = { + .nregs = 4 * 32, +}; + +static struct imx_iim_drvdata imx53_drvdata = { + .nregs = 4 * 32 + 16, +}; + +static const struct of_device_id imx_iim_dt_ids[] = { + { + .compatible = "fsl,imx25-iim", + .data = &imx25_imx31_imx35_drvdata, + }, { + .compatible = "fsl,imx27-iim", + .data = &imx27_drvdata, + }, { + .compatible = "fsl,imx31-iim", + .data = &imx25_imx31_imx35_drvdata, + }, { + .compatible = "fsl,imx35-iim", + .data = &imx25_imx31_imx35_drvdata, + }, { + .compatible = "fsl,imx51-iim", + .data = &imx51_drvdata, + }, { + .compatible = "fsl,imx53-iim", + .data = &imx53_drvdata, + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, imx_iim_dt_ids); + +static int imx_iim_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct iim_priv *iim; + struct nvmem_device *nvmem; + struct nvmem_config cfg = {}; + const struct imx_iim_drvdata *drvdata = NULL; + + iim = devm_kzalloc(dev, sizeof(*iim), GFP_KERNEL); + if (!iim) + return -ENOMEM; + + iim->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(iim->base)) + return PTR_ERR(iim->base); + + drvdata = of_device_get_match_data(&pdev->dev); + + iim->clk = devm_clk_get(dev, NULL); + if (IS_ERR(iim->clk)) + return PTR_ERR(iim->clk); + + cfg.name = "imx-iim", + cfg.read_only = true, + cfg.word_size = 1, + cfg.stride = 1, + cfg.reg_read = imx_iim_read, + cfg.dev = dev; + cfg.size = drvdata->nregs; + cfg.priv = iim; + + nvmem = devm_nvmem_register(dev, &cfg); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver imx_iim_driver = { + .probe = imx_iim_probe, + .driver = { + .name = "imx-iim", + .of_match_table = imx_iim_dt_ids, + }, +}; +module_platform_driver(imx_iim_driver); + +MODULE_AUTHOR("Michael Grzeschik <m.grzeschik@pengutronix.de>"); +MODULE_DESCRIPTION("i.MX IIM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c new file mode 100644 index 0000000000..cf920542f9 --- /dev/null +++ b/drivers/nvmem/imx-ocotp-ele.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * i.MX9 OCOTP fusebox driver + * + * Copyright 2023 NXP + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +enum fuse_type { + FUSE_FSB = 1, + FUSE_ELE = 2, + FUSE_INVALID = -1 +}; + +struct ocotp_map_entry { + u32 start; /* start word */ + u32 num; /* num words */ + enum fuse_type type; +}; + +struct ocotp_devtype_data { + u32 reg_off; + char *name; + u32 size; + u32 num_entry; + u32 flag; + nvmem_reg_read_t reg_read; + struct ocotp_map_entry entry[]; +}; + +struct imx_ocotp_priv { + struct device *dev; + void __iomem *base; + struct nvmem_config config; + struct mutex lock; + const struct ocotp_devtype_data *data; +}; + +static enum fuse_type imx_ocotp_fuse_type(void *context, u32 index) +{ + struct imx_ocotp_priv *priv = context; + const struct ocotp_devtype_data *data = priv->data; + u32 start, end; + int i; + + for (i = 0; i < data->num_entry; i++) { + start = data->entry[i].start; + end = data->entry[i].start + data->entry[i].num; + + if (index >= start && index < end) + return data->entry[i].type; + } + + return FUSE_INVALID; +} + +static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, size_t bytes) +{ + struct imx_ocotp_priv *priv = context; + void __iomem *reg = priv->base + priv->data->reg_off; + u32 count, index, num_bytes; + enum fuse_type type; + u32 *buf; + void *p; + int i; + + index = offset; + num_bytes = round_up(bytes, 4); + count = num_bytes >> 2; + + if (count > ((priv->data->size >> 2) - index)) + count = (priv->data->size >> 2) - index; + + p = kzalloc(num_bytes, GFP_KERNEL); + if (!p) + return -ENOMEM; + + mutex_lock(&priv->lock); + + buf = p; + + for (i = index; i < (index + count); i++) { + type = imx_ocotp_fuse_type(context, i); + if (type == FUSE_INVALID || type == FUSE_ELE) { + *buf++ = 0; + continue; + } + + *buf++ = readl_relaxed(reg + (i << 2)); + } + + memcpy(val, (u8 *)p, bytes); + + mutex_unlock(&priv->lock); + + kfree(p); + + return 0; +}; + +static int imx_ele_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct imx_ocotp_priv *priv; + struct nvmem_device *nvmem; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->data = of_device_get_match_data(dev); + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->config.dev = dev; + priv->config.name = "ELE-OCOTP"; + priv->config.id = NVMEM_DEVID_AUTO; + priv->config.owner = THIS_MODULE; + priv->config.size = priv->data->size; + priv->config.reg_read = priv->data->reg_read; + priv->config.word_size = 4; + priv->config.stride = 1; + priv->config.priv = priv; + priv->config.read_only = true; + mutex_init(&priv->lock); + + nvmem = devm_nvmem_register(dev, &priv->config); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + return 0; +} + +static const struct ocotp_devtype_data imx93_ocotp_data = { + .reg_off = 0x8000, + .reg_read = imx_ocotp_reg_read, + .size = 2048, + .num_entry = 6, + .entry = { + { 0, 52, FUSE_FSB }, + { 63, 1, FUSE_ELE}, + { 128, 16, FUSE_ELE }, + { 182, 1, FUSE_ELE }, + { 188, 1, FUSE_ELE }, + { 312, 200, FUSE_FSB } + }, +}; + +static const struct of_device_id imx_ele_ocotp_dt_ids[] = { + { .compatible = "fsl,imx93-ocotp", .data = &imx93_ocotp_data, }, + {}, +}; +MODULE_DEVICE_TABLE(of, imx_ele_ocotp_dt_ids); + +static struct platform_driver imx_ele_ocotp_driver = { + .driver = { + .name = "imx_ele_ocotp", + .of_match_table = imx_ele_ocotp_dt_ids, + }, + .probe = imx_ele_ocotp_probe, +}; +module_platform_driver(imx_ele_ocotp_driver); + +MODULE_DESCRIPTION("i.MX OCOTP/ELE driver"); +MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c new file mode 100644 index 0000000000..c38d9c1c3f --- /dev/null +++ b/drivers/nvmem/imx-ocotp-scu.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * i.MX8 OCOTP fusebox driver + * + * Copyright 2019 NXP + * + * Peng Fan <peng.fan@nxp.com> + */ + +#include <linux/arm-smccc.h> +#include <linux/firmware/imx/sci.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define IMX_SIP_OTP_WRITE 0xc200000B + +enum ocotp_devtype { + IMX8QXP, + IMX8QM, +}; + +#define ECC_REGION BIT(0) +#define HOLE_REGION BIT(1) + +struct ocotp_region { + u32 start; + u32 end; + u32 flag; +}; + +struct ocotp_devtype_data { + int devtype; + int nregs; + u32 num_region; + struct ocotp_region region[]; +}; + +struct ocotp_priv { + struct device *dev; + const struct ocotp_devtype_data *data; + struct imx_sc_ipc *nvmem_ipc; +}; + +struct imx_sc_msg_misc_fuse_read { + struct imx_sc_rpc_msg hdr; + u32 word; +} __packed; + +static DEFINE_MUTEX(scu_ocotp_mutex); + +static struct ocotp_devtype_data imx8qxp_data = { + .devtype = IMX8QXP, + .nregs = 800, + .num_region = 3, + .region = { + {0x10, 0x10f, ECC_REGION}, + {0x110, 0x21F, HOLE_REGION}, + {0x220, 0x31F, ECC_REGION}, + }, +}; + +static struct ocotp_devtype_data imx8qm_data = { + .devtype = IMX8QM, + .nregs = 800, + .num_region = 2, + .region = { + {0x10, 0x10f, ECC_REGION}, + {0x1a0, 0x1ff, ECC_REGION}, + }, +}; + +static bool in_hole(void *context, u32 index) +{ + struct ocotp_priv *priv = context; + const struct ocotp_devtype_data *data = priv->data; + int i; + + for (i = 0; i < data->num_region; i++) { + if (data->region[i].flag & HOLE_REGION) { + if ((index >= data->region[i].start) && + (index <= data->region[i].end)) + return true; + } + } + + return false; +} + +static bool in_ecc(void *context, u32 index) +{ + struct ocotp_priv *priv = context; + const struct ocotp_devtype_data *data = priv->data; + int i; + + for (i = 0; i < data->num_region; i++) { + if (data->region[i].flag & ECC_REGION) { + if ((index >= data->region[i].start) && + (index <= data->region[i].end)) + return true; + } + } + + return false; +} + +static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word, + u32 *val) +{ + struct imx_sc_msg_misc_fuse_read msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + int ret; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_MISC; + hdr->func = IMX_SC_MISC_FUNC_OTP_FUSE_READ; + hdr->size = 2; + + msg.word = word; + + ret = imx_scu_call_rpc(ipc, &msg, true); + if (ret) + return ret; + + *val = msg.word; + + return 0; +} + +static int imx_scu_ocotp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct ocotp_priv *priv = context; + u32 count, index, num_bytes; + u32 *buf; + void *p; + int i, ret; + + index = offset; + num_bytes = round_up(bytes, 4); + count = num_bytes >> 2; + + if (count > (priv->data->nregs - index)) + count = priv->data->nregs - index; + + p = kzalloc(num_bytes, GFP_KERNEL); + if (!p) + return -ENOMEM; + + mutex_lock(&scu_ocotp_mutex); + + buf = p; + + for (i = index; i < (index + count); i++) { + if (in_hole(context, i)) { + *buf++ = 0; + continue; + } + + ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, i, buf); + if (ret) { + mutex_unlock(&scu_ocotp_mutex); + kfree(p); + return ret; + } + buf++; + } + + memcpy(val, (u8 *)p, bytes); + + mutex_unlock(&scu_ocotp_mutex); + + kfree(p); + + return 0; +} + +static int imx_scu_ocotp_write(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct ocotp_priv *priv = context; + struct arm_smccc_res res; + u32 *buf = val; + u32 tmp; + u32 index; + int ret; + + /* allow only writing one complete OTP word at a time */ + if (bytes != 4) + return -EINVAL; + + index = offset; + + if (in_hole(context, index)) + return -EINVAL; + + if (in_ecc(context, index)) { + pr_warn("ECC region, only program once\n"); + mutex_lock(&scu_ocotp_mutex); + ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, index, &tmp); + mutex_unlock(&scu_ocotp_mutex); + if (ret) + return ret; + if (tmp) { + pr_warn("ECC region, already has value: %x\n", tmp); + return -EIO; + } + } + + mutex_lock(&scu_ocotp_mutex); + + arm_smccc_smc(IMX_SIP_OTP_WRITE, index, *buf, 0, 0, 0, 0, 0, &res); + + mutex_unlock(&scu_ocotp_mutex); + + return res.a0; +} + +static struct nvmem_config imx_scu_ocotp_nvmem_config = { + .name = "imx-scu-ocotp", + .read_only = false, + .word_size = 4, + .stride = 1, + .owner = THIS_MODULE, + .reg_read = imx_scu_ocotp_read, + .reg_write = imx_scu_ocotp_write, +}; + +static const struct of_device_id imx_scu_ocotp_dt_ids[] = { + { .compatible = "fsl,imx8qxp-scu-ocotp", (void *)&imx8qxp_data }, + { .compatible = "fsl,imx8qm-scu-ocotp", (void *)&imx8qm_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, imx_scu_ocotp_dt_ids); + +static int imx_scu_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ocotp_priv *priv; + struct nvmem_device *nvmem; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = imx_scu_get_handle(&priv->nvmem_ipc); + if (ret) + return ret; + + priv->data = of_device_get_match_data(dev); + priv->dev = dev; + imx_scu_ocotp_nvmem_config.size = 4 * priv->data->nregs; + imx_scu_ocotp_nvmem_config.dev = dev; + imx_scu_ocotp_nvmem_config.priv = priv; + nvmem = devm_nvmem_register(dev, &imx_scu_ocotp_nvmem_config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver imx_scu_ocotp_driver = { + .probe = imx_scu_ocotp_probe, + .driver = { + .name = "imx_scu_ocotp", + .of_match_table = imx_scu_ocotp_dt_ids, + }, +}; +module_platform_driver(imx_scu_ocotp_driver); + +MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>"); +MODULE_DESCRIPTION("i.MX8 SCU OCOTP fuse box driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c new file mode 100644 index 0000000000..e8b6f19492 --- /dev/null +++ b/drivers/nvmem/imx-ocotp.c @@ -0,0 +1,645 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * i.MX6 OCOTP fusebox driver + * + * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de> + * + * Copyright 2019 NXP + * + * Based on the barebox ocotp driver, + * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>, + * Orex Computed Radiography + * + * Write support based on the fsl_otp driver, + * Copyright (C) 2010-2013 Freescale Semiconductor, Inc + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/delay.h> + +#define IMX_OCOTP_OFFSET_B0W0 0x400 /* Offset from base address of the + * OTP Bank0 Word0 + */ +#define IMX_OCOTP_OFFSET_PER_WORD 0x10 /* Offset between the start addr + * of two consecutive OTP words. + */ + +#define IMX_OCOTP_ADDR_CTRL 0x0000 +#define IMX_OCOTP_ADDR_CTRL_SET 0x0004 +#define IMX_OCOTP_ADDR_CTRL_CLR 0x0008 +#define IMX_OCOTP_ADDR_TIMING 0x0010 +#define IMX_OCOTP_ADDR_DATA0 0x0020 +#define IMX_OCOTP_ADDR_DATA1 0x0030 +#define IMX_OCOTP_ADDR_DATA2 0x0040 +#define IMX_OCOTP_ADDR_DATA3 0x0050 + +#define IMX_OCOTP_BM_CTRL_ADDR 0x000000FF +#define IMX_OCOTP_BM_CTRL_BUSY 0x00000100 +#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200 +#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400 + +#define IMX_OCOTP_BM_CTRL_ADDR_8MP 0x000001FF +#define IMX_OCOTP_BM_CTRL_BUSY_8MP 0x00000200 +#define IMX_OCOTP_BM_CTRL_ERROR_8MP 0x00000400 +#define IMX_OCOTP_BM_CTRL_REL_SHADOWS_8MP 0x00000800 + +#define IMX_OCOTP_BM_CTRL_DEFAULT \ + { \ + .bm_addr = IMX_OCOTP_BM_CTRL_ADDR, \ + .bm_busy = IMX_OCOTP_BM_CTRL_BUSY, \ + .bm_error = IMX_OCOTP_BM_CTRL_ERROR, \ + .bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS,\ + } + +#define IMX_OCOTP_BM_CTRL_8MP \ + { \ + .bm_addr = IMX_OCOTP_BM_CTRL_ADDR_8MP, \ + .bm_busy = IMX_OCOTP_BM_CTRL_BUSY_8MP, \ + .bm_error = IMX_OCOTP_BM_CTRL_ERROR_8MP, \ + .bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS_8MP,\ + } + +#define TIMING_STROBE_PROG_US 10 /* Min time to blow a fuse */ +#define TIMING_STROBE_READ_NS 37 /* Min time before read */ +#define TIMING_RELAX_NS 17 +#define DEF_FSOURCE 1001 /* > 1000 ns */ +#define DEF_STROBE_PROG 10000 /* IPG clocks */ +#define IMX_OCOTP_WR_UNLOCK 0x3E770000 +#define IMX_OCOTP_READ_LOCKED_VAL 0xBADABADA + +static DEFINE_MUTEX(ocotp_mutex); + +struct ocotp_priv { + struct device *dev; + struct clk *clk; + void __iomem *base; + const struct ocotp_params *params; + struct nvmem_config *config; +}; + +struct ocotp_ctrl_reg { + u32 bm_addr; + u32 bm_busy; + u32 bm_error; + u32 bm_rel_shadows; +}; + +struct ocotp_params { + unsigned int nregs; + unsigned int bank_address_words; + void (*set_timing)(struct ocotp_priv *priv); + struct ocotp_ctrl_reg ctrl; +}; + +static int imx_ocotp_wait_for_busy(struct ocotp_priv *priv, u32 flags) +{ + int count; + u32 c, mask; + u32 bm_ctrl_busy, bm_ctrl_error; + void __iomem *base = priv->base; + + bm_ctrl_busy = priv->params->ctrl.bm_busy; + bm_ctrl_error = priv->params->ctrl.bm_error; + + mask = bm_ctrl_busy | bm_ctrl_error | flags; + + for (count = 10000; count >= 0; count--) { + c = readl(base + IMX_OCOTP_ADDR_CTRL); + if (!(c & mask)) + break; + cpu_relax(); + } + + if (count < 0) { + /* HW_OCOTP_CTRL[ERROR] will be set under the following + * conditions: + * - A write is performed to a shadow register during a shadow + * reload (essentially, while HW_OCOTP_CTRL[RELOAD_SHADOWS] is + * set. In addition, the contents of the shadow register shall + * not be updated. + * - A write is performed to a shadow register which has been + * locked. + * - A read is performed to from a shadow register which has + * been read locked. + * - A program is performed to a fuse word which has been locked + * - A read is performed to from a fuse word which has been read + * locked. + */ + if (c & bm_ctrl_error) + return -EPERM; + return -ETIMEDOUT; + } + + return 0; +} + +static void imx_ocotp_clr_err_if_set(struct ocotp_priv *priv) +{ + u32 c, bm_ctrl_error; + void __iomem *base = priv->base; + + bm_ctrl_error = priv->params->ctrl.bm_error; + + c = readl(base + IMX_OCOTP_ADDR_CTRL); + if (!(c & bm_ctrl_error)) + return; + + writel(bm_ctrl_error, base + IMX_OCOTP_ADDR_CTRL_CLR); +} + +static int imx_ocotp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct ocotp_priv *priv = context; + unsigned int count; + u8 *buf, *p; + int i, ret; + u32 index, num_bytes; + + index = offset >> 2; + num_bytes = round_up((offset % 4) + bytes, 4); + count = num_bytes >> 2; + + if (count > (priv->params->nregs - index)) + count = priv->params->nregs - index; + + p = kzalloc(num_bytes, GFP_KERNEL); + if (!p) + return -ENOMEM; + + mutex_lock(&ocotp_mutex); + + buf = p; + + ret = clk_prepare_enable(priv->clk); + if (ret < 0) { + mutex_unlock(&ocotp_mutex); + dev_err(priv->dev, "failed to prepare/enable ocotp clk\n"); + kfree(p); + return ret; + } + + ret = imx_ocotp_wait_for_busy(priv, 0); + if (ret < 0) { + dev_err(priv->dev, "timeout during read setup\n"); + goto read_end; + } + + for (i = index; i < (index + count); i++) { + *(u32 *)buf = readl(priv->base + IMX_OCOTP_OFFSET_B0W0 + + i * IMX_OCOTP_OFFSET_PER_WORD); + + /* 47.3.1.2 + * For "read locked" registers 0xBADABADA will be returned and + * HW_OCOTP_CTRL[ERROR] will be set. It must be cleared by + * software before any new write, read or reload access can be + * issued + */ + if (*((u32 *)buf) == IMX_OCOTP_READ_LOCKED_VAL) + imx_ocotp_clr_err_if_set(priv); + + buf += 4; + } + + index = offset % 4; + memcpy(val, &p[index], bytes); + +read_end: + clk_disable_unprepare(priv->clk); + mutex_unlock(&ocotp_mutex); + + kfree(p); + + return ret; +} + +static int imx_ocotp_cell_pp(void *context, const char *id, int index, + unsigned int offset, void *data, size_t bytes) +{ + u8 *buf = data; + int i; + + /* Deal with some post processing of nvmem cell data */ + if (id && !strcmp(id, "mac-address")) + for (i = 0; i < bytes / 2; i++) + swap(buf[i], buf[bytes - i - 1]); + + return 0; +} + +static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv) +{ + unsigned long clk_rate; + unsigned long strobe_read, relax, strobe_prog; + u32 timing; + + /* 47.3.1.3.1 + * Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX] + * fields with timing values to match the current frequency of the + * ipg_clk. OTP writes will work at maximum bus frequencies as long + * as the HW_OCOTP_TIMING parameters are set correctly. + * + * Note: there are minimum timings required to ensure an OTP fuse burns + * correctly that are independent of the ipg_clk. Those values are not + * formally documented anywhere however, working from the minimum + * timings given in u-boot we can say: + * + * - Minimum STROBE_PROG time is 10 microseconds. Intuitively 10 + * microseconds feels about right as representative of a minimum time + * to physically burn out a fuse. + * + * - Minimum STROBE_READ i.e. the time to wait post OTP fuse burn before + * performing another read is 37 nanoseconds + * + * - Minimum RELAX timing is 17 nanoseconds. This final RELAX minimum + * timing is not entirely clear the documentation says "This + * count value specifies the time to add to all default timing + * parameters other than the Tpgm and Trd. It is given in number + * of ipg_clk periods." where Tpgm and Trd refer to STROBE_PROG + * and STROBE_READ respectively. What the other timing parameters + * are though, is not specified. Experience shows a zero RELAX + * value will mess up a re-load of the shadow registers post OTP + * burn. + */ + clk_rate = clk_get_rate(priv->clk); + + relax = DIV_ROUND_UP(clk_rate * TIMING_RELAX_NS, 1000000000) - 1; + strobe_read = DIV_ROUND_UP(clk_rate * TIMING_STROBE_READ_NS, + 1000000000); + strobe_read += 2 * (relax + 1) - 1; + strobe_prog = DIV_ROUND_CLOSEST(clk_rate * TIMING_STROBE_PROG_US, + 1000000); + strobe_prog += 2 * (relax + 1) - 1; + + timing = readl(priv->base + IMX_OCOTP_ADDR_TIMING) & 0x0FC00000; + timing |= strobe_prog & 0x00000FFF; + timing |= (relax << 12) & 0x0000F000; + timing |= (strobe_read << 16) & 0x003F0000; + + writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING); +} + +static void imx_ocotp_set_imx7_timing(struct ocotp_priv *priv) +{ + unsigned long clk_rate; + u64 fsource, strobe_prog; + u32 timing; + + /* i.MX 7Solo Applications Processor Reference Manual, Rev. 0.1 + * 6.4.3.3 + */ + clk_rate = clk_get_rate(priv->clk); + fsource = DIV_ROUND_UP_ULL((u64)clk_rate * DEF_FSOURCE, + NSEC_PER_SEC) + 1; + strobe_prog = DIV_ROUND_CLOSEST_ULL((u64)clk_rate * DEF_STROBE_PROG, + NSEC_PER_SEC) + 1; + + timing = strobe_prog & 0x00000FFF; + timing |= (fsource << 12) & 0x000FF000; + + writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING); +} + +static int imx_ocotp_write(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct ocotp_priv *priv = context; + u32 *buf = val; + int ret; + + u32 ctrl; + u8 waddr; + u8 word = 0; + + /* allow only writing one complete OTP word at a time */ + if ((bytes != priv->config->word_size) || + (offset % priv->config->word_size)) + return -EINVAL; + + mutex_lock(&ocotp_mutex); + + ret = clk_prepare_enable(priv->clk); + if (ret < 0) { + mutex_unlock(&ocotp_mutex); + dev_err(priv->dev, "failed to prepare/enable ocotp clk\n"); + return ret; + } + + /* Setup the write timing values */ + priv->params->set_timing(priv); + + /* 47.3.1.3.2 + * Check that HW_OCOTP_CTRL[BUSY] and HW_OCOTP_CTRL[ERROR] are clear. + * Overlapped accesses are not supported by the controller. Any pending + * write or reload must be completed before a write access can be + * requested. + */ + ret = imx_ocotp_wait_for_busy(priv, 0); + if (ret < 0) { + dev_err(priv->dev, "timeout during timing setup\n"); + goto write_end; + } + + /* 47.3.1.3.3 + * Write the requested address to HW_OCOTP_CTRL[ADDR] and program the + * unlock code into HW_OCOTP_CTRL[WR_UNLOCK]. This must be programmed + * for each write access. The lock code is documented in the register + * description. Both the unlock code and address can be written in the + * same operation. + */ + if (priv->params->bank_address_words != 0) { + /* + * In banked/i.MX7 mode the OTP register bank goes into waddr + * see i.MX 7Solo Applications Processor Reference Manual, Rev. + * 0.1 section 6.4.3.1 + */ + offset = offset / priv->config->word_size; + waddr = offset / priv->params->bank_address_words; + word = offset & (priv->params->bank_address_words - 1); + } else { + /* + * Non-banked i.MX6 mode. + * OTP write/read address specifies one of 128 word address + * locations + */ + waddr = offset / 4; + } + + ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL); + ctrl &= ~priv->params->ctrl.bm_addr; + ctrl |= waddr & priv->params->ctrl.bm_addr; + ctrl |= IMX_OCOTP_WR_UNLOCK; + + writel(ctrl, priv->base + IMX_OCOTP_ADDR_CTRL); + + /* 47.3.1.3.4 + * Write the data to the HW_OCOTP_DATA register. This will automatically + * set HW_OCOTP_CTRL[BUSY] and clear HW_OCOTP_CTRL[WR_UNLOCK]. To + * protect programming same OTP bit twice, before program OCOTP will + * automatically read fuse value in OTP and use read value to mask + * program data. The controller will use masked program data to program + * a 32-bit word in the OTP per the address in HW_OCOTP_CTRL[ADDR]. Bit + * fields with 1's will result in that OTP bit being programmed. Bit + * fields with 0's will be ignored. At the same time that the write is + * accepted, the controller makes an internal copy of + * HW_OCOTP_CTRL[ADDR] which cannot be updated until the next write + * sequence is initiated. This copy guarantees that erroneous writes to + * HW_OCOTP_CTRL[ADDR] will not affect an active write operation. It + * should also be noted that during the programming HW_OCOTP_DATA will + * shift right (with zero fill). This shifting is required to program + * the OTP serially. During the write operation, HW_OCOTP_DATA cannot be + * modified. + * Note: on i.MX7 there are four data fields to write for banked write + * with the fuse blowing operation only taking place after data0 + * has been written. This is why data0 must always be the last + * register written. + */ + if (priv->params->bank_address_words != 0) { + /* Banked/i.MX7 mode */ + switch (word) { + case 0: + writel(0, priv->base + IMX_OCOTP_ADDR_DATA1); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA2); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA3); + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0); + break; + case 1: + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA1); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA2); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA3); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA0); + break; + case 2: + writel(0, priv->base + IMX_OCOTP_ADDR_DATA1); + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA2); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA3); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA0); + break; + case 3: + writel(0, priv->base + IMX_OCOTP_ADDR_DATA1); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA2); + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA3); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA0); + break; + } + } else { + /* Non-banked i.MX6 mode */ + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0); + } + + /* 47.4.1.4.5 + * Once complete, the controller will clear BUSY. A write request to a + * protected or locked region will result in no OTP access and no + * setting of HW_OCOTP_CTRL[BUSY]. In addition HW_OCOTP_CTRL[ERROR] will + * be set. It must be cleared by software before any new write access + * can be issued. + */ + ret = imx_ocotp_wait_for_busy(priv, 0); + if (ret < 0) { + if (ret == -EPERM) { + dev_err(priv->dev, "failed write to locked region"); + imx_ocotp_clr_err_if_set(priv); + } else { + dev_err(priv->dev, "timeout during data write\n"); + } + goto write_end; + } + + /* 47.3.1.4 + * Write Postamble: Due to internal electrical characteristics of the + * OTP during writes, all OTP operations following a write must be + * separated by 2 us after the clearing of HW_OCOTP_CTRL_BUSY following + * the write. + */ + udelay(2); + + /* reload all shadow registers */ + writel(priv->params->ctrl.bm_rel_shadows, + priv->base + IMX_OCOTP_ADDR_CTRL_SET); + ret = imx_ocotp_wait_for_busy(priv, + priv->params->ctrl.bm_rel_shadows); + if (ret < 0) + dev_err(priv->dev, "timeout during shadow register reload\n"); + +write_end: + clk_disable_unprepare(priv->clk); + mutex_unlock(&ocotp_mutex); + return ret < 0 ? ret : bytes; +} + +static struct nvmem_config imx_ocotp_nvmem_config = { + .name = "imx-ocotp", + .read_only = false, + .word_size = 4, + .stride = 1, + .reg_read = imx_ocotp_read, + .reg_write = imx_ocotp_write, +}; + +static const struct ocotp_params imx6q_params = { + .nregs = 128, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx6sl_params = { + .nregs = 64, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx6sll_params = { + .nregs = 80, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx6sx_params = { + .nregs = 128, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx6ul_params = { + .nregs = 144, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx6ull_params = { + .nregs = 80, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx7d_params = { + .nregs = 64, + .bank_address_words = 4, + .set_timing = imx_ocotp_set_imx7_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx7ulp_params = { + .nregs = 256, + .bank_address_words = 0, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx8mq_params = { + .nregs = 256, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx8mm_params = { + .nregs = 256, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx8mn_params = { + .nregs = 256, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx8mp_params = { + .nregs = 384, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_8MP, +}; + +static const struct of_device_id imx_ocotp_dt_ids[] = { + { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params }, + { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params }, + { .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params }, + { .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params }, + { .compatible = "fsl,imx6ull-ocotp", .data = &imx6ull_params }, + { .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params }, + { .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params }, + { .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params }, + { .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params }, + { .compatible = "fsl,imx8mm-ocotp", .data = &imx8mm_params }, + { .compatible = "fsl,imx8mn-ocotp", .data = &imx8mn_params }, + { .compatible = "fsl,imx8mp-ocotp", .data = &imx8mp_params }, + { }, +}; +MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids); + +static void imx_ocotp_fixup_cell_info(struct nvmem_device *nvmem, + struct nvmem_layout *layout, + struct nvmem_cell_info *cell) +{ + cell->read_post_process = imx_ocotp_cell_pp; +} + +static struct nvmem_layout imx_ocotp_layout = { + .fixup_cell_info = imx_ocotp_fixup_cell_info, +}; + +static int imx_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ocotp_priv *priv; + struct nvmem_device *nvmem; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->params = of_device_get_match_data(&pdev->dev); + imx_ocotp_nvmem_config.size = 4 * priv->params->nregs; + imx_ocotp_nvmem_config.dev = dev; + imx_ocotp_nvmem_config.priv = priv; + imx_ocotp_nvmem_config.layout = &imx_ocotp_layout; + + priv->config = &imx_ocotp_nvmem_config; + + clk_prepare_enable(priv->clk); + imx_ocotp_clr_err_if_set(priv); + clk_disable_unprepare(priv->clk); + + nvmem = devm_nvmem_register(dev, &imx_ocotp_nvmem_config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver imx_ocotp_driver = { + .probe = imx_ocotp_probe, + .driver = { + .name = "imx_ocotp", + .of_match_table = imx_ocotp_dt_ids, + }, +}; +module_platform_driver(imx_ocotp_driver); + +MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>"); +MODULE_DESCRIPTION("i.MX6/i.MX7 OCOTP fuse box driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/jz4780-efuse.c b/drivers/nvmem/jz4780-efuse.c new file mode 100644 index 0000000000..0b01b840ed --- /dev/null +++ b/drivers/nvmem/jz4780-efuse.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * JZ4780 EFUSE Memory Support driver + * + * Copyright (c) 2017 PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com> + * Copyright (c) 2020 H. Nikolaus Schaller <hns@goldelico.com> + */ + +/* + * Currently supports JZ4780 efuse which has 8K programmable bit. + * Efuse is separated into seven segments as below: + * + * ----------------------------------------------------------------------- + * | 64 bit | 128 bit | 128 bit | 3520 bit | 8 bit | 2296 bit | 2048 bit | + * ----------------------------------------------------------------------- + * + * The rom itself is accessed using a 9 bit address line and an 8 word wide bus + * which reads/writes based on strobes. The strobe is configured in the config + * register and is based on number of cycles of the bus clock. + * + * Driver supports read only as the writes are done in the Factory. + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/timer.h> + +#define JZ_EFUCTRL (0x0) /* Control Register */ +#define JZ_EFUCFG (0x4) /* Configure Register*/ +#define JZ_EFUSTATE (0x8) /* Status Register */ +#define JZ_EFUDATA(n) (0xC + (n) * 4) + +/* We read 32 byte chunks to avoid complexity in the driver. */ +#define JZ_EFU_READ_SIZE 32 + +#define EFUCTRL_ADDR_MASK 0x3FF +#define EFUCTRL_ADDR_SHIFT 21 +#define EFUCTRL_LEN_MASK 0x1F +#define EFUCTRL_LEN_SHIFT 16 +#define EFUCTRL_PG_EN BIT(15) +#define EFUCTRL_WR_EN BIT(1) +#define EFUCTRL_RD_EN BIT(0) + +#define EFUCFG_INT_EN BIT(31) +#define EFUCFG_RD_ADJ_MASK 0xF +#define EFUCFG_RD_ADJ_SHIFT 20 +#define EFUCFG_RD_STR_MASK 0xF +#define EFUCFG_RD_STR_SHIFT 16 +#define EFUCFG_WR_ADJ_MASK 0xF +#define EFUCFG_WR_ADJ_SHIFT 12 +#define EFUCFG_WR_STR_MASK 0xFFF +#define EFUCFG_WR_STR_SHIFT 0 + +#define EFUSTATE_WR_DONE BIT(1) +#define EFUSTATE_RD_DONE BIT(0) + +struct jz4780_efuse { + struct device *dev; + struct regmap *map; + struct clk *clk; +}; + +/* main entry point */ +static int jz4780_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct jz4780_efuse *efuse = context; + + while (bytes > 0) { + size_t start = offset & ~(JZ_EFU_READ_SIZE - 1); + size_t chunk = min(bytes, (start + JZ_EFU_READ_SIZE) + - offset); + char buf[JZ_EFU_READ_SIZE]; + unsigned int tmp; + u32 ctrl; + int ret; + + ctrl = (start << EFUCTRL_ADDR_SHIFT) + | ((JZ_EFU_READ_SIZE - 1) << EFUCTRL_LEN_SHIFT) + | EFUCTRL_RD_EN; + + regmap_update_bits(efuse->map, JZ_EFUCTRL, + (EFUCTRL_ADDR_MASK << EFUCTRL_ADDR_SHIFT) | + (EFUCTRL_LEN_MASK << EFUCTRL_LEN_SHIFT) | + EFUCTRL_PG_EN | EFUCTRL_WR_EN | + EFUCTRL_RD_EN, + ctrl); + + ret = regmap_read_poll_timeout(efuse->map, JZ_EFUSTATE, + tmp, tmp & EFUSTATE_RD_DONE, + 1 * MSEC_PER_SEC, + 50 * MSEC_PER_SEC); + if (ret < 0) { + dev_err(efuse->dev, "Time out while reading efuse data"); + return ret; + } + + ret = regmap_bulk_read(efuse->map, JZ_EFUDATA(0), + buf, JZ_EFU_READ_SIZE / sizeof(u32)); + if (ret < 0) + return ret; + + memcpy(val, &buf[offset - start], chunk); + + val += chunk; + offset += chunk; + bytes -= chunk; + } + + return 0; +} + +static struct nvmem_config jz4780_efuse_nvmem_config = { + .name = "jz4780-efuse", + .size = 1024, + .word_size = 1, + .stride = 1, + .owner = THIS_MODULE, + .reg_read = jz4780_efuse_read, +}; + +static const struct regmap_config jz4780_efuse_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = JZ_EFUDATA(7), +}; + +static void clk_disable_unprepare_helper(void *clock) +{ + clk_disable_unprepare(clock); +} + +static int jz4780_efuse_probe(struct platform_device *pdev) +{ + struct nvmem_device *nvmem; + struct jz4780_efuse *efuse; + struct nvmem_config cfg; + unsigned long clk_rate; + unsigned long rd_adj; + unsigned long rd_strobe; + struct device *dev = &pdev->dev; + void __iomem *regs; + int ret; + + efuse = devm_kzalloc(dev, sizeof(*efuse), GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + efuse->map = devm_regmap_init_mmio(dev, regs, + &jz4780_efuse_regmap_config); + if (IS_ERR(efuse->map)) + return PTR_ERR(efuse->map); + + efuse->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(efuse->clk)) + return PTR_ERR(efuse->clk); + + ret = clk_prepare_enable(efuse->clk); + if (ret < 0) + return ret; + + ret = devm_add_action_or_reset(&pdev->dev, + clk_disable_unprepare_helper, + efuse->clk); + if (ret < 0) + return ret; + + clk_rate = clk_get_rate(efuse->clk); + + efuse->dev = dev; + + /* + * rd_adj and rd_strobe are 4 bit values + * conditions: + * bus clk_period * (rd_adj + 1) > 6.5ns + * bus clk_period * (rd_adj + 5 + rd_strobe) > 35ns + * i.e. rd_adj >= 6.5ns / clk_period + * i.e. rd_strobe >= 35 ns / clk_period - 5 - rd_adj + 1 + * constants: + * 1 / 6.5ns == 153846154 Hz + * 1 / 35ns == 28571429 Hz + */ + + rd_adj = clk_rate / 153846154; + rd_strobe = clk_rate / 28571429 - 5 - rd_adj + 1; + + if (rd_adj > EFUCFG_RD_ADJ_MASK || + rd_strobe > EFUCFG_RD_STR_MASK) { + dev_err(&pdev->dev, "Cannot set clock configuration\n"); + return -EINVAL; + } + + regmap_update_bits(efuse->map, JZ_EFUCFG, + (EFUCFG_RD_ADJ_MASK << EFUCFG_RD_ADJ_SHIFT) | + (EFUCFG_RD_STR_MASK << EFUCFG_RD_STR_SHIFT), + (rd_adj << EFUCFG_RD_ADJ_SHIFT) | + (rd_strobe << EFUCFG_RD_STR_SHIFT)); + + cfg = jz4780_efuse_nvmem_config; + cfg.dev = &pdev->dev; + cfg.priv = efuse; + + nvmem = devm_nvmem_register(dev, &cfg); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id jz4780_efuse_match[] = { + { .compatible = "ingenic,jz4780-efuse" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, jz4780_efuse_match); + +static struct platform_driver jz4780_efuse_driver = { + .probe = jz4780_efuse_probe, + .driver = { + .name = "jz4780-efuse", + .of_match_table = jz4780_efuse_match, + }, +}; +module_platform_driver(jz4780_efuse_driver); + +MODULE_AUTHOR("PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>"); +MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>"); +MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>"); +MODULE_DESCRIPTION("Ingenic JZ4780 efuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/lan9662-otpc.c b/drivers/nvmem/lan9662-otpc.c new file mode 100644 index 0000000000..56fc19f092 --- /dev/null +++ b/drivers/nvmem/lan9662-otpc.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#define OTP_OTP_PWR_DN(t) (t + 0x00) +#define OTP_OTP_PWR_DN_OTP_PWRDN_N BIT(0) +#define OTP_OTP_ADDR_HI(t) (t + 0x04) +#define OTP_OTP_ADDR_LO(t) (t + 0x08) +#define OTP_OTP_PRGM_DATA(t) (t + 0x10) +#define OTP_OTP_PRGM_MODE(t) (t + 0x14) +#define OTP_OTP_PRGM_MODE_OTP_PGM_MODE_BYTE BIT(0) +#define OTP_OTP_RD_DATA(t) (t + 0x18) +#define OTP_OTP_FUNC_CMD(t) (t + 0x20) +#define OTP_OTP_FUNC_CMD_OTP_PROGRAM BIT(1) +#define OTP_OTP_FUNC_CMD_OTP_READ BIT(0) +#define OTP_OTP_CMD_GO(t) (t + 0x28) +#define OTP_OTP_CMD_GO_OTP_GO BIT(0) +#define OTP_OTP_PASS_FAIL(t) (t + 0x2c) +#define OTP_OTP_PASS_FAIL_OTP_READ_PROHIBITED BIT(3) +#define OTP_OTP_PASS_FAIL_OTP_WRITE_PROHIBITED BIT(2) +#define OTP_OTP_PASS_FAIL_OTP_FAIL BIT(0) +#define OTP_OTP_STATUS(t) (t + 0x30) +#define OTP_OTP_STATUS_OTP_CPUMPEN BIT(1) +#define OTP_OTP_STATUS_OTP_BUSY BIT(0) + +#define OTP_MEM_SIZE 8192 +#define OTP_SLEEP_US 10 +#define OTP_TIMEOUT_US 500000 + +struct lan9662_otp { + struct device *dev; + void __iomem *base; +}; + +static int lan9662_otp_wait_flag_clear(void __iomem *reg, u32 flag) +{ + u32 val; + + return readl_poll_timeout(reg, val, !(val & flag), + OTP_SLEEP_US, OTP_TIMEOUT_US); +} + +static int lan9662_otp_power(struct lan9662_otp *otp, bool up) +{ + void __iomem *pwrdn = OTP_OTP_PWR_DN(otp->base); + + if (up) { + writel(readl(pwrdn) & ~OTP_OTP_PWR_DN_OTP_PWRDN_N, pwrdn); + if (lan9662_otp_wait_flag_clear(OTP_OTP_STATUS(otp->base), + OTP_OTP_STATUS_OTP_CPUMPEN)) + return -ETIMEDOUT; + } else { + writel(readl(pwrdn) | OTP_OTP_PWR_DN_OTP_PWRDN_N, pwrdn); + } + + return 0; +} + +static int lan9662_otp_execute(struct lan9662_otp *otp) +{ + if (lan9662_otp_wait_flag_clear(OTP_OTP_CMD_GO(otp->base), + OTP_OTP_CMD_GO_OTP_GO)) + return -ETIMEDOUT; + + if (lan9662_otp_wait_flag_clear(OTP_OTP_STATUS(otp->base), + OTP_OTP_STATUS_OTP_BUSY)) + return -ETIMEDOUT; + + return 0; +} + +static void lan9662_otp_set_address(struct lan9662_otp *otp, u32 offset) +{ + writel(0xff & (offset >> 8), OTP_OTP_ADDR_HI(otp->base)); + writel(0xff & offset, OTP_OTP_ADDR_LO(otp->base)); +} + +static int lan9662_otp_read_byte(struct lan9662_otp *otp, u32 offset, u8 *dst) +{ + u32 pass; + int rc; + + lan9662_otp_set_address(otp, offset); + writel(OTP_OTP_FUNC_CMD_OTP_READ, OTP_OTP_FUNC_CMD(otp->base)); + writel(OTP_OTP_CMD_GO_OTP_GO, OTP_OTP_CMD_GO(otp->base)); + rc = lan9662_otp_execute(otp); + if (!rc) { + pass = readl(OTP_OTP_PASS_FAIL(otp->base)); + if (pass & OTP_OTP_PASS_FAIL_OTP_READ_PROHIBITED) + return -EACCES; + *dst = (u8) readl(OTP_OTP_RD_DATA(otp->base)); + } + return rc; +} + +static int lan9662_otp_write_byte(struct lan9662_otp *otp, u32 offset, u8 data) +{ + u32 pass; + int rc; + + lan9662_otp_set_address(otp, offset); + writel(OTP_OTP_PRGM_MODE_OTP_PGM_MODE_BYTE, OTP_OTP_PRGM_MODE(otp->base)); + writel(data, OTP_OTP_PRGM_DATA(otp->base)); + writel(OTP_OTP_FUNC_CMD_OTP_PROGRAM, OTP_OTP_FUNC_CMD(otp->base)); + writel(OTP_OTP_CMD_GO_OTP_GO, OTP_OTP_CMD_GO(otp->base)); + + rc = lan9662_otp_execute(otp); + if (!rc) { + pass = readl(OTP_OTP_PASS_FAIL(otp->base)); + if (pass & OTP_OTP_PASS_FAIL_OTP_WRITE_PROHIBITED) + return -EACCES; + if (pass & OTP_OTP_PASS_FAIL_OTP_FAIL) + return -EIO; + } + return rc; +} + +static int lan9662_otp_read(void *context, unsigned int offset, + void *_val, size_t bytes) +{ + struct lan9662_otp *otp = context; + u8 *val = _val; + uint8_t data; + int i, rc = 0; + + lan9662_otp_power(otp, true); + for (i = 0; i < bytes; i++) { + rc = lan9662_otp_read_byte(otp, offset + i, &data); + if (rc < 0) + break; + *val++ = data; + } + lan9662_otp_power(otp, false); + + return rc; +} + +static int lan9662_otp_write(void *context, unsigned int offset, + void *_val, size_t bytes) +{ + struct lan9662_otp *otp = context; + u8 *val = _val; + u8 data, newdata; + int i, rc = 0; + + lan9662_otp_power(otp, true); + for (i = 0; i < bytes; i++) { + /* Skip zero bytes */ + if (val[i]) { + rc = lan9662_otp_read_byte(otp, offset + i, &data); + if (rc < 0) + break; + + newdata = data | val[i]; + if (newdata == data) + continue; + + rc = lan9662_otp_write_byte(otp, offset + i, + newdata); + if (rc < 0) + break; + } + } + lan9662_otp_power(otp, false); + + return rc; +} + +static struct nvmem_config otp_config = { + .name = "lan9662-otp", + .stride = 1, + .word_size = 1, + .reg_read = lan9662_otp_read, + .reg_write = lan9662_otp_write, + .size = OTP_MEM_SIZE, +}; + +static int lan9662_otp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct nvmem_device *nvmem; + struct lan9662_otp *otp; + + otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL); + if (!otp) + return -ENOMEM; + + otp->dev = dev; + otp->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(otp->base)) + return PTR_ERR(otp->base); + + otp_config.priv = otp; + otp_config.dev = dev; + + nvmem = devm_nvmem_register(dev, &otp_config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id lan9662_otp_match[] = { + { .compatible = "microchip,lan9662-otpc", }, + { }, +}; +MODULE_DEVICE_TABLE(of, lan9662_otp_match); + +static struct platform_driver lan9662_otp_driver = { + .probe = lan9662_otp_probe, + .driver = { + .name = "lan9662-otp", + .of_match_table = lan9662_otp_match, + }, +}; +module_platform_driver(lan9662_otp_driver); + +MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>"); +MODULE_DESCRIPTION("lan9662 OTP driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/layerscape-sfp.c b/drivers/nvmem/layerscape-sfp.c new file mode 100644 index 0000000000..e2b4245619 --- /dev/null +++ b/drivers/nvmem/layerscape-sfp.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Layerscape SFP driver + * + * Copyright (c) 2022 Michael Walle <michael@walle.cc> + * + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regmap.h> + +#define LAYERSCAPE_SFP_OTP_OFFSET 0x0200 + +struct layerscape_sfp_priv { + struct regmap *regmap; +}; + +struct layerscape_sfp_data { + int size; + enum regmap_endian endian; +}; + +static int layerscape_sfp_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct layerscape_sfp_priv *priv = context; + + return regmap_bulk_read(priv->regmap, + LAYERSCAPE_SFP_OTP_OFFSET + offset, val, + bytes / 4); +} + +static struct nvmem_config layerscape_sfp_nvmem_config = { + .name = "fsl-sfp", + .reg_read = layerscape_sfp_read, + .word_size = 4, + .stride = 4, +}; + +static int layerscape_sfp_probe(struct platform_device *pdev) +{ + const struct layerscape_sfp_data *data; + struct layerscape_sfp_priv *priv; + struct nvmem_device *nvmem; + struct regmap_config config = { 0 }; + void __iomem *base; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + data = device_get_match_data(&pdev->dev); + config.reg_bits = 32; + config.reg_stride = 4; + config.val_bits = 32; + config.val_format_endian = data->endian; + config.max_register = LAYERSCAPE_SFP_OTP_OFFSET + data->size - 4; + priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, &config); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + + layerscape_sfp_nvmem_config.size = data->size; + layerscape_sfp_nvmem_config.dev = &pdev->dev; + layerscape_sfp_nvmem_config.priv = priv; + + nvmem = devm_nvmem_register(&pdev->dev, &layerscape_sfp_nvmem_config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct layerscape_sfp_data ls1021a_data = { + .size = 0x88, + .endian = REGMAP_ENDIAN_BIG, +}; + +static const struct layerscape_sfp_data ls1028a_data = { + .size = 0x88, + .endian = REGMAP_ENDIAN_LITTLE, +}; + +static const struct of_device_id layerscape_sfp_dt_ids[] = { + { .compatible = "fsl,ls1021a-sfp", .data = &ls1021a_data }, + { .compatible = "fsl,ls1028a-sfp", .data = &ls1028a_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, layerscape_sfp_dt_ids); + +static struct platform_driver layerscape_sfp_driver = { + .probe = layerscape_sfp_probe, + .driver = { + .name = "layerscape_sfp", + .of_match_table = layerscape_sfp_dt_ids, + }, +}; +module_platform_driver(layerscape_sfp_driver); + +MODULE_AUTHOR("Michael Walle <michael@walle.cc>"); +MODULE_DESCRIPTION("Layerscape Security Fuse Processor driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/layouts/Kconfig b/drivers/nvmem/layouts/Kconfig new file mode 100644 index 0000000000..7ff1ee1c1f --- /dev/null +++ b/drivers/nvmem/layouts/Kconfig @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0 + +menu "Layout Types" + +config NVMEM_LAYOUT_SL28_VPD + tristate "Kontron sl28 VPD layout support" + select CRC8 + help + Say Y here if you want to support the VPD layout of the Kontron + SMARC-sAL28 boards. + + If unsure, say N. + +config NVMEM_LAYOUT_ONIE_TLV + tristate "ONIE tlv support" + select CRC32 + help + Say Y here if you want to support the Open Compute Project ONIE + Type-Length-Value standard table. + + If unsure, say N. + +endmenu diff --git a/drivers/nvmem/layouts/Makefile b/drivers/nvmem/layouts/Makefile new file mode 100644 index 0000000000..2974bd7d33 --- /dev/null +++ b/drivers/nvmem/layouts/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for nvmem layouts. +# + +obj-$(CONFIG_NVMEM_LAYOUT_SL28_VPD) += sl28vpd.o +obj-$(CONFIG_NVMEM_LAYOUT_ONIE_TLV) += onie-tlv.o diff --git a/drivers/nvmem/layouts/onie-tlv.c b/drivers/nvmem/layouts/onie-tlv.c new file mode 100644 index 0000000000..59fc87ccfc --- /dev/null +++ b/drivers/nvmem/layouts/onie-tlv.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ONIE tlv NVMEM cells provider + * + * Copyright (C) 2022 Open Compute Group ONIE + * Author: Miquel Raynal <miquel.raynal@bootlin.com> + * Based on the nvmem driver written by: Vadym Kochan <vadym.kochan@plvision.eu> + * Inspired by the first layout written by: Rafał Miłecki <rafal@milecki.pl> + */ + +#include <linux/crc32.h> +#include <linux/etherdevice.h> +#include <linux/nvmem-consumer.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> + +#define ONIE_TLV_MAX_LEN 2048 +#define ONIE_TLV_CRC_FIELD_SZ 6 +#define ONIE_TLV_CRC_SZ 4 +#define ONIE_TLV_HDR_ID "TlvInfo" + +struct onie_tlv_hdr { + u8 id[8]; + u8 version; + __be16 data_len; +} __packed; + +struct onie_tlv { + u8 type; + u8 len; +} __packed; + +static const char *onie_tlv_cell_name(u8 type) +{ + switch (type) { + case 0x21: + return "product-name"; + case 0x22: + return "part-number"; + case 0x23: + return "serial-number"; + case 0x24: + return "mac-address"; + case 0x25: + return "manufacture-date"; + case 0x26: + return "device-version"; + case 0x27: + return "label-revision"; + case 0x28: + return "platform-name"; + case 0x29: + return "onie-version"; + case 0x2A: + return "num-macs"; + case 0x2B: + return "manufacturer"; + case 0x2C: + return "country-code"; + case 0x2D: + return "vendor"; + case 0x2E: + return "diag-version"; + case 0x2F: + return "service-tag"; + case 0xFD: + return "vendor-extension"; + case 0xFE: + return "crc32"; + default: + break; + } + + return NULL; +} + +static int onie_tlv_mac_read_cb(void *priv, const char *id, int index, + unsigned int offset, void *buf, + size_t bytes) +{ + eth_addr_add(buf, index); + + return 0; +} + +static nvmem_cell_post_process_t onie_tlv_read_cb(u8 type, u8 *buf) +{ + switch (type) { + case 0x24: + return &onie_tlv_mac_read_cb; + default: + break; + } + + return NULL; +} + +static int onie_tlv_add_cells(struct device *dev, struct nvmem_device *nvmem, + size_t data_len, u8 *data) +{ + struct nvmem_cell_info cell = {}; + struct device_node *layout; + struct onie_tlv tlv; + unsigned int hdr_len = sizeof(struct onie_tlv_hdr); + unsigned int offset = 0; + int ret; + + layout = of_nvmem_layout_get_container(nvmem); + if (!layout) + return -ENOENT; + + while (offset < data_len) { + memcpy(&tlv, data + offset, sizeof(tlv)); + if (offset + tlv.len >= data_len) { + dev_err(dev, "Out of bounds field (0x%x bytes at 0x%x)\n", + tlv.len, hdr_len + offset); + break; + } + + cell.name = onie_tlv_cell_name(tlv.type); + if (!cell.name) + continue; + + cell.offset = hdr_len + offset + sizeof(tlv.type) + sizeof(tlv.len); + cell.bytes = tlv.len; + cell.np = of_get_child_by_name(layout, cell.name); + cell.read_post_process = onie_tlv_read_cb(tlv.type, data + offset + sizeof(tlv)); + + ret = nvmem_add_one_cell(nvmem, &cell); + if (ret) { + of_node_put(layout); + return ret; + } + + offset += sizeof(tlv) + tlv.len; + } + + of_node_put(layout); + + return 0; +} + +static bool onie_tlv_hdr_is_valid(struct device *dev, struct onie_tlv_hdr *hdr) +{ + if (memcmp(hdr->id, ONIE_TLV_HDR_ID, sizeof(hdr->id))) { + dev_err(dev, "Invalid header\n"); + return false; + } + + if (hdr->version != 0x1) { + dev_err(dev, "Invalid version number\n"); + return false; + } + + return true; +} + +static bool onie_tlv_crc_is_valid(struct device *dev, size_t table_len, u8 *table) +{ + struct onie_tlv crc_hdr; + u32 read_crc, calc_crc; + __be32 crc_be; + + memcpy(&crc_hdr, table + table_len - ONIE_TLV_CRC_FIELD_SZ, sizeof(crc_hdr)); + if (crc_hdr.type != 0xfe || crc_hdr.len != ONIE_TLV_CRC_SZ) { + dev_err(dev, "Invalid CRC field\n"); + return false; + } + + /* The table contains a JAMCRC, which is XOR'ed compared to the original + * CRC32 implementation as known in the Ethernet world. + */ + memcpy(&crc_be, table + table_len - ONIE_TLV_CRC_SZ, ONIE_TLV_CRC_SZ); + read_crc = be32_to_cpu(crc_be); + calc_crc = crc32(~0, table, table_len - ONIE_TLV_CRC_SZ) ^ 0xFFFFFFFF; + if (read_crc != calc_crc) { + dev_err(dev, "Invalid CRC read: 0x%08x, expected: 0x%08x\n", + read_crc, calc_crc); + return false; + } + + return true; +} + +static int onie_tlv_parse_table(struct device *dev, struct nvmem_device *nvmem, + struct nvmem_layout *layout) +{ + struct onie_tlv_hdr hdr; + size_t table_len, data_len, hdr_len; + u8 *table, *data; + int ret; + + ret = nvmem_device_read(nvmem, 0, sizeof(hdr), &hdr); + if (ret < 0) + return ret; + + if (!onie_tlv_hdr_is_valid(dev, &hdr)) { + dev_err(dev, "Invalid ONIE TLV header\n"); + return -EINVAL; + } + + hdr_len = sizeof(hdr.id) + sizeof(hdr.version) + sizeof(hdr.data_len); + data_len = be16_to_cpu(hdr.data_len); + table_len = hdr_len + data_len; + if (table_len > ONIE_TLV_MAX_LEN) { + dev_err(dev, "Invalid ONIE TLV data length\n"); + return -EINVAL; + } + + table = devm_kmalloc(dev, table_len, GFP_KERNEL); + if (!table) + return -ENOMEM; + + ret = nvmem_device_read(nvmem, 0, table_len, table); + if (ret != table_len) + return ret; + + if (!onie_tlv_crc_is_valid(dev, table_len, table)) + return -EINVAL; + + data = table + hdr_len; + ret = onie_tlv_add_cells(dev, nvmem, data_len, data); + if (ret) + return ret; + + return 0; +} + +static const struct of_device_id onie_tlv_of_match_table[] = { + { .compatible = "onie,tlv-layout", }, + {}, +}; +MODULE_DEVICE_TABLE(of, onie_tlv_of_match_table); + +static struct nvmem_layout onie_tlv_layout = { + .name = "ONIE tlv layout", + .of_match_table = onie_tlv_of_match_table, + .add_cells = onie_tlv_parse_table, +}; +module_nvmem_layout_driver(onie_tlv_layout); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>"); +MODULE_DESCRIPTION("NVMEM layout driver for Onie TLV table parsing"); diff --git a/drivers/nvmem/layouts/sl28vpd.c b/drivers/nvmem/layouts/sl28vpd.c new file mode 100644 index 0000000000..05671371f6 --- /dev/null +++ b/drivers/nvmem/layouts/sl28vpd.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/crc8.h> +#include <linux/etherdevice.h> +#include <linux/nvmem-consumer.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <uapi/linux/if_ether.h> + +#define SL28VPD_MAGIC 'V' + +struct sl28vpd_header { + u8 magic; + u8 version; +} __packed; + +struct sl28vpd_v1 { + struct sl28vpd_header header; + char serial_number[15]; + u8 base_mac_address[ETH_ALEN]; + u8 crc8; +} __packed; + +static int sl28vpd_mac_address_pp(void *priv, const char *id, int index, + unsigned int offset, void *buf, + size_t bytes) +{ + if (bytes != ETH_ALEN) + return -EINVAL; + + if (index < 0) + return -EINVAL; + + if (!is_valid_ether_addr(buf)) + return -EINVAL; + + eth_addr_add(buf, index); + + return 0; +} + +static const struct nvmem_cell_info sl28vpd_v1_entries[] = { + { + .name = "serial-number", + .offset = offsetof(struct sl28vpd_v1, serial_number), + .bytes = sizeof_field(struct sl28vpd_v1, serial_number), + }, + { + .name = "base-mac-address", + .offset = offsetof(struct sl28vpd_v1, base_mac_address), + .bytes = sizeof_field(struct sl28vpd_v1, base_mac_address), + .read_post_process = sl28vpd_mac_address_pp, + }, +}; + +static int sl28vpd_v1_check_crc(struct device *dev, struct nvmem_device *nvmem) +{ + struct sl28vpd_v1 data_v1; + u8 table[CRC8_TABLE_SIZE]; + int ret; + u8 crc; + + crc8_populate_msb(table, 0x07); + + ret = nvmem_device_read(nvmem, 0, sizeof(data_v1), &data_v1); + if (ret < 0) + return ret; + else if (ret != sizeof(data_v1)) + return -EIO; + + crc = crc8(table, (void *)&data_v1, sizeof(data_v1) - 1, 0); + + if (crc != data_v1.crc8) { + dev_err(dev, + "Checksum is invalid (got %02x, expected %02x).\n", + crc, data_v1.crc8); + return -EINVAL; + } + + return 0; +} + +static int sl28vpd_add_cells(struct device *dev, struct nvmem_device *nvmem, + struct nvmem_layout *layout) +{ + const struct nvmem_cell_info *pinfo; + struct nvmem_cell_info info = {0}; + struct device_node *layout_np; + struct sl28vpd_header hdr; + int ret, i; + + /* check header */ + ret = nvmem_device_read(nvmem, 0, sizeof(hdr), &hdr); + if (ret < 0) + return ret; + else if (ret != sizeof(hdr)) + return -EIO; + + if (hdr.magic != SL28VPD_MAGIC) { + dev_err(dev, "Invalid magic value (%02x)\n", hdr.magic); + return -EINVAL; + } + + if (hdr.version != 1) { + dev_err(dev, "Version %d is unsupported.\n", hdr.version); + return -EINVAL; + } + + ret = sl28vpd_v1_check_crc(dev, nvmem); + if (ret) + return ret; + + layout_np = of_nvmem_layout_get_container(nvmem); + if (!layout_np) + return -ENOENT; + + for (i = 0; i < ARRAY_SIZE(sl28vpd_v1_entries); i++) { + pinfo = &sl28vpd_v1_entries[i]; + + info.name = pinfo->name; + info.offset = pinfo->offset; + info.bytes = pinfo->bytes; + info.read_post_process = pinfo->read_post_process; + info.np = of_get_child_by_name(layout_np, pinfo->name); + + ret = nvmem_add_one_cell(nvmem, &info); + if (ret) { + of_node_put(layout_np); + return ret; + } + } + + of_node_put(layout_np); + + return 0; +} + +static const struct of_device_id sl28vpd_of_match_table[] = { + { .compatible = "kontron,sl28-vpd" }, + {}, +}; +MODULE_DEVICE_TABLE(of, sl28vpd_of_match_table); + +static struct nvmem_layout sl28vpd_layout = { + .name = "sl28-vpd", + .of_match_table = sl28vpd_of_match_table, + .add_cells = sl28vpd_add_cells, +}; +module_nvmem_layout_driver(sl28vpd_layout); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michael Walle <michael@walle.cc>"); +MODULE_DESCRIPTION("NVMEM layout driver for the VPD of Kontron sl28 boards"); diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c new file mode 100644 index 0000000000..a0275b29af --- /dev/null +++ b/drivers/nvmem/lpc18xx_eeprom.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * NXP LPC18xx/LPC43xx EEPROM memory NVMEM driver + * + * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com> + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +/* Registers */ +#define LPC18XX_EEPROM_AUTOPROG 0x00c +#define LPC18XX_EEPROM_AUTOPROG_WORD 0x1 + +#define LPC18XX_EEPROM_CLKDIV 0x014 + +#define LPC18XX_EEPROM_PWRDWN 0x018 +#define LPC18XX_EEPROM_PWRDWN_NO 0x0 +#define LPC18XX_EEPROM_PWRDWN_YES 0x1 + +#define LPC18XX_EEPROM_INTSTAT 0xfe0 +#define LPC18XX_EEPROM_INTSTAT_END_OF_PROG BIT(2) + +#define LPC18XX_EEPROM_INTSTATCLR 0xfe8 +#define LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST BIT(2) + +/* Fixed page size (bytes) */ +#define LPC18XX_EEPROM_PAGE_SIZE 0x80 + +/* EEPROM device requires a ~1500 kHz clock (min 800 kHz, max 1600 kHz) */ +#define LPC18XX_EEPROM_CLOCK_HZ 1500000 + +/* EEPROM requires 3 ms of erase/program time between each writing */ +#define LPC18XX_EEPROM_PROGRAM_TIME 3 + +struct lpc18xx_eeprom_dev { + struct clk *clk; + void __iomem *reg_base; + void __iomem *mem_base; + struct nvmem_device *nvmem; + unsigned reg_bytes; + unsigned val_bytes; + int size; +}; + +static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom, + u32 reg, u32 val) +{ + writel(val, eeprom->reg_base + reg); +} + +static inline u32 lpc18xx_eeprom_readl(struct lpc18xx_eeprom_dev *eeprom, + u32 reg) +{ + return readl(eeprom->reg_base + reg); +} + +static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom) +{ + unsigned long end; + u32 val; + + /* Wait until EEPROM program operation has finished */ + end = jiffies + msecs_to_jiffies(LPC18XX_EEPROM_PROGRAM_TIME * 10); + + while (time_is_after_jiffies(end)) { + val = lpc18xx_eeprom_readl(eeprom, LPC18XX_EEPROM_INTSTAT); + + if (val & LPC18XX_EEPROM_INTSTAT_END_OF_PROG) { + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_INTSTATCLR, + LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST); + return 0; + } + + usleep_range(LPC18XX_EEPROM_PROGRAM_TIME * USEC_PER_MSEC, + (LPC18XX_EEPROM_PROGRAM_TIME + 1) * USEC_PER_MSEC); + } + + return -ETIMEDOUT; +} + +static int lpc18xx_eeprom_gather_write(void *context, unsigned int reg, + void *val, size_t bytes) +{ + struct lpc18xx_eeprom_dev *eeprom = context; + unsigned int offset = reg; + int ret; + + /* + * The last page contains the EEPROM initialization data and is not + * writable. + */ + if ((reg > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE) || + (reg + bytes > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE)) + return -EINVAL; + + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_NO); + + /* Wait 100 us while the EEPROM wakes up */ + usleep_range(100, 200); + + while (bytes) { + writel(*(u32 *)val, eeprom->mem_base + offset); + ret = lpc18xx_eeprom_busywait_until_prog(eeprom); + if (ret < 0) + return ret; + + bytes -= eeprom->val_bytes; + val += eeprom->val_bytes; + offset += eeprom->val_bytes; + } + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_YES); + + return 0; +} + +static int lpc18xx_eeprom_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct lpc18xx_eeprom_dev *eeprom = context; + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_NO); + + /* Wait 100 us while the EEPROM wakes up */ + usleep_range(100, 200); + + while (bytes) { + *(u32 *)val = readl(eeprom->mem_base + offset); + bytes -= eeprom->val_bytes; + val += eeprom->val_bytes; + offset += eeprom->val_bytes; + } + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_YES); + + return 0; +} + + +static struct nvmem_config lpc18xx_nvmem_config = { + .name = "lpc18xx-eeprom", + .stride = 4, + .word_size = 4, + .reg_read = lpc18xx_eeprom_read, + .reg_write = lpc18xx_eeprom_gather_write, +}; + +static int lpc18xx_eeprom_probe(struct platform_device *pdev) +{ + struct lpc18xx_eeprom_dev *eeprom; + struct device *dev = &pdev->dev; + struct reset_control *rst; + unsigned long clk_rate; + struct resource *res; + int ret; + + eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL); + if (!eeprom) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); + eeprom->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(eeprom->reg_base)) + return PTR_ERR(eeprom->reg_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); + eeprom->mem_base = devm_ioremap_resource(dev, res); + if (IS_ERR(eeprom->mem_base)) + return PTR_ERR(eeprom->mem_base); + + eeprom->clk = devm_clk_get(&pdev->dev, "eeprom"); + if (IS_ERR(eeprom->clk)) { + dev_err(&pdev->dev, "failed to get eeprom clock\n"); + return PTR_ERR(eeprom->clk); + } + + ret = clk_prepare_enable(eeprom->clk); + if (ret < 0) { + dev_err(dev, "failed to prepare/enable eeprom clk: %d\n", ret); + return ret; + } + + rst = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(rst)) { + dev_err(dev, "failed to get reset: %ld\n", PTR_ERR(rst)); + ret = PTR_ERR(rst); + goto err_clk; + } + + ret = reset_control_assert(rst); + if (ret < 0) { + dev_err(dev, "failed to assert reset: %d\n", ret); + goto err_clk; + } + + eeprom->val_bytes = 4; + eeprom->reg_bytes = 4; + + /* + * Clock rate is generated by dividing the system bus clock by the + * division factor, contained in the divider register (minus 1 encoded). + */ + clk_rate = clk_get_rate(eeprom->clk); + clk_rate = DIV_ROUND_UP(clk_rate, LPC18XX_EEPROM_CLOCK_HZ) - 1; + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_CLKDIV, clk_rate); + + /* + * Writing a single word to the page will start the erase/program cycle + * automatically + */ + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_AUTOPROG, + LPC18XX_EEPROM_AUTOPROG_WORD); + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_YES); + + eeprom->size = resource_size(res); + lpc18xx_nvmem_config.size = resource_size(res); + lpc18xx_nvmem_config.dev = dev; + lpc18xx_nvmem_config.priv = eeprom; + + eeprom->nvmem = devm_nvmem_register(dev, &lpc18xx_nvmem_config); + if (IS_ERR(eeprom->nvmem)) { + ret = PTR_ERR(eeprom->nvmem); + goto err_clk; + } + + platform_set_drvdata(pdev, eeprom); + + return 0; + +err_clk: + clk_disable_unprepare(eeprom->clk); + + return ret; +} + +static int lpc18xx_eeprom_remove(struct platform_device *pdev) +{ + struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev); + + clk_disable_unprepare(eeprom->clk); + + return 0; +} + +static const struct of_device_id lpc18xx_eeprom_of_match[] = { + { .compatible = "nxp,lpc1857-eeprom" }, + { }, +}; +MODULE_DEVICE_TABLE(of, lpc18xx_eeprom_of_match); + +static struct platform_driver lpc18xx_eeprom_driver = { + .probe = lpc18xx_eeprom_probe, + .remove = lpc18xx_eeprom_remove, + .driver = { + .name = "lpc18xx-eeprom", + .of_match_table = lpc18xx_eeprom_of_match, + }, +}; + +module_platform_driver(lpc18xx_eeprom_driver); + +MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>"); +MODULE_DESCRIPTION("NXP LPC18xx EEPROM memory Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/lpc18xx_otp.c b/drivers/nvmem/lpc18xx_otp.c new file mode 100644 index 0000000000..adc9948e7b --- /dev/null +++ b/drivers/nvmem/lpc18xx_otp.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * NXP LPC18xx/43xx OTP memory NVMEM driver + * + * Copyright (c) 2016 Joachim Eastwood <manabian@gmail.com> + * + * Based on the imx ocotp driver, + * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de> + * + * TODO: add support for writing OTP register via API in boot ROM. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* + * LPC18xx OTP memory contains 4 banks with 4 32-bit words. Bank 0 starts + * at offset 0 from the base. + * + * Bank 0 contains the part ID for Flashless devices and is reseverd for + * devices with Flash. + * Bank 1/2 is generale purpose or AES key storage for secure devices. + * Bank 3 contains control data, USB ID and generale purpose words. + */ +#define LPC18XX_OTP_NUM_BANKS 4 +#define LPC18XX_OTP_WORDS_PER_BANK 4 +#define LPC18XX_OTP_WORD_SIZE sizeof(u32) +#define LPC18XX_OTP_SIZE (LPC18XX_OTP_NUM_BANKS * \ + LPC18XX_OTP_WORDS_PER_BANK * \ + LPC18XX_OTP_WORD_SIZE) + +struct lpc18xx_otp { + void __iomem *base; +}; + +static int lpc18xx_otp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct lpc18xx_otp *otp = context; + unsigned int count = bytes >> 2; + u32 index = offset >> 2; + u32 *buf = val; + int i; + + if (count > (LPC18XX_OTP_SIZE - index)) + count = LPC18XX_OTP_SIZE - index; + + for (i = index; i < (index + count); i++) + *buf++ = readl(otp->base + i * LPC18XX_OTP_WORD_SIZE); + + return 0; +} + +static struct nvmem_config lpc18xx_otp_nvmem_config = { + .name = "lpc18xx-otp", + .read_only = true, + .word_size = LPC18XX_OTP_WORD_SIZE, + .stride = LPC18XX_OTP_WORD_SIZE, + .reg_read = lpc18xx_otp_read, +}; + +static int lpc18xx_otp_probe(struct platform_device *pdev) +{ + struct nvmem_device *nvmem; + struct lpc18xx_otp *otp; + + otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL); + if (!otp) + return -ENOMEM; + + otp->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(otp->base)) + return PTR_ERR(otp->base); + + lpc18xx_otp_nvmem_config.size = LPC18XX_OTP_SIZE; + lpc18xx_otp_nvmem_config.dev = &pdev->dev; + lpc18xx_otp_nvmem_config.priv = otp; + + nvmem = devm_nvmem_register(&pdev->dev, &lpc18xx_otp_nvmem_config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id lpc18xx_otp_dt_ids[] = { + { .compatible = "nxp,lpc1850-otp" }, + { }, +}; +MODULE_DEVICE_TABLE(of, lpc18xx_otp_dt_ids); + +static struct platform_driver lpc18xx_otp_driver = { + .probe = lpc18xx_otp_probe, + .driver = { + .name = "lpc18xx_otp", + .of_match_table = lpc18xx_otp_dt_ids, + }, +}; +module_platform_driver(lpc18xx_otp_driver); + +MODULE_AUTHOR("Joachim Eastwoood <manabian@gmail.com>"); +MODULE_DESCRIPTION("NXP LPC18xx OTP NVMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c new file mode 100644 index 0000000000..d6b533497c --- /dev/null +++ b/drivers/nvmem/meson-efuse.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Amlogic Meson GX eFuse Driver + * + * Copyright (c) 2016 Endless Computers, Inc. + * Author: Carlo Caione <carlo@endlessm.com> + */ + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include <linux/firmware/meson/meson_sm.h> + +static int meson_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct meson_sm_firmware *fw = context; + + return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset, + bytes, 0, 0, 0); +} + +static int meson_efuse_write(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct meson_sm_firmware *fw = context; + + return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset, + bytes, 0, 0, 0); +} + +static const struct of_device_id meson_efuse_match[] = { + { .compatible = "amlogic,meson-gxbb-efuse", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, meson_efuse_match); + +static int meson_efuse_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct meson_sm_firmware *fw; + struct device_node *sm_np; + struct nvmem_device *nvmem; + struct nvmem_config *econfig; + struct clk *clk; + unsigned int size; + int ret; + + sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0); + if (!sm_np) { + dev_err(&pdev->dev, "no secure-monitor node\n"); + return -ENODEV; + } + + fw = meson_sm_get(sm_np); + of_node_put(sm_np); + if (!fw) + return -EPROBE_DEFER; + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get efuse gate"); + return ret; + } + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(dev, "failed to enable gate"); + return ret; + } + + ret = devm_add_action_or_reset(dev, + (void(*)(void *))clk_disable_unprepare, + clk); + if (ret) { + dev_err(dev, "failed to add disable callback"); + return ret; + } + + if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) { + dev_err(dev, "failed to get max user"); + return -EINVAL; + } + + econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL); + if (!econfig) + return -ENOMEM; + + econfig->dev = dev; + econfig->name = dev_name(dev); + econfig->stride = 1; + econfig->word_size = 1; + econfig->reg_read = meson_efuse_read; + econfig->reg_write = meson_efuse_write; + econfig->size = size; + econfig->priv = fw; + + nvmem = devm_nvmem_register(&pdev->dev, econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver meson_efuse_driver = { + .probe = meson_efuse_probe, + .driver = { + .name = "meson-efuse", + .of_match_table = meson_efuse_match, + }, +}; + +module_platform_driver(meson_efuse_driver); + +MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>"); +MODULE_DESCRIPTION("Amlogic Meson GX NVMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c new file mode 100644 index 0000000000..d6d7aeda31 --- /dev/null +++ b/drivers/nvmem/meson-mx-efuse.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Amlogic Meson6, Meson8 and Meson8b eFuse Driver + * + * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/sizes.h> +#include <linux/slab.h> + +#define MESON_MX_EFUSE_CNTL1 0x04 +#define MESON_MX_EFUSE_CNTL1_PD_ENABLE BIT(27) +#define MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY BIT(26) +#define MESON_MX_EFUSE_CNTL1_AUTO_RD_START BIT(25) +#define MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE BIT(24) +#define MESON_MX_EFUSE_CNTL1_BYTE_WR_DATA GENMASK(23, 16) +#define MESON_MX_EFUSE_CNTL1_AUTO_WR_BUSY BIT(14) +#define MESON_MX_EFUSE_CNTL1_AUTO_WR_START BIT(13) +#define MESON_MX_EFUSE_CNTL1_AUTO_WR_ENABLE BIT(12) +#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET BIT(11) +#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK GENMASK(10, 0) + +#define MESON_MX_EFUSE_CNTL2 0x08 + +#define MESON_MX_EFUSE_CNTL4 0x10 +#define MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE BIT(10) + +struct meson_mx_efuse_platform_data { + const char *name; + unsigned int word_size; +}; + +struct meson_mx_efuse { + void __iomem *base; + struct clk *core_clk; + struct nvmem_device *nvmem; + struct nvmem_config config; +}; + +static void meson_mx_efuse_mask_bits(struct meson_mx_efuse *efuse, u32 reg, + u32 mask, u32 set) +{ + u32 data; + + data = readl(efuse->base + reg); + data &= ~mask; + data |= (set & mask); + + writel(data, efuse->base + reg); +} + +static int meson_mx_efuse_hw_enable(struct meson_mx_efuse *efuse) +{ + int err; + + err = clk_prepare_enable(efuse->core_clk); + if (err) + return err; + + /* power up the efuse */ + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_PD_ENABLE, 0); + + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL4, + MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE, 0); + + return 0; +} + +static void meson_mx_efuse_hw_disable(struct meson_mx_efuse *efuse) +{ + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_PD_ENABLE, + MESON_MX_EFUSE_CNTL1_PD_ENABLE); + + clk_disable_unprepare(efuse->core_clk); +} + +static int meson_mx_efuse_read_addr(struct meson_mx_efuse *efuse, + unsigned int addr, u32 *value) +{ + int err; + u32 regval; + + /* write the address to read */ + regval = FIELD_PREP(MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, addr); + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, regval); + + /* inform the hardware that we changed the address */ + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET, + MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET); + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET, 0); + + /* start the read process */ + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_AUTO_RD_START, + MESON_MX_EFUSE_CNTL1_AUTO_RD_START); + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_AUTO_RD_START, 0); + + /* + * perform a dummy read to ensure that the HW has the RD_BUSY bit set + * when polling for the status below. + */ + readl(efuse->base + MESON_MX_EFUSE_CNTL1); + + err = readl_poll_timeout_atomic(efuse->base + MESON_MX_EFUSE_CNTL1, + regval, + (!(regval & MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY)), + 1, 1000); + if (err) { + dev_err(efuse->config.dev, + "Timeout while reading efuse address %u\n", addr); + return err; + } + + *value = readl(efuse->base + MESON_MX_EFUSE_CNTL2); + + return 0; +} + +static int meson_mx_efuse_read(void *context, unsigned int offset, + void *buf, size_t bytes) +{ + struct meson_mx_efuse *efuse = context; + u32 tmp; + int err, i, addr; + + err = meson_mx_efuse_hw_enable(efuse); + if (err) + return err; + + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, + MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE); + + for (i = 0; i < bytes; i += efuse->config.word_size) { + addr = (offset + i) / efuse->config.word_size; + + err = meson_mx_efuse_read_addr(efuse, addr, &tmp); + if (err) + break; + + memcpy(buf + i, &tmp, + min_t(size_t, bytes - i, efuse->config.word_size)); + } + + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, 0); + + meson_mx_efuse_hw_disable(efuse); + + return err; +} + +static const struct meson_mx_efuse_platform_data meson6_efuse_data = { + .name = "meson6-efuse", + .word_size = 1, +}; + +static const struct meson_mx_efuse_platform_data meson8_efuse_data = { + .name = "meson8-efuse", + .word_size = 4, +}; + +static const struct meson_mx_efuse_platform_data meson8b_efuse_data = { + .name = "meson8b-efuse", + .word_size = 4, +}; + +static const struct of_device_id meson_mx_efuse_match[] = { + { .compatible = "amlogic,meson6-efuse", .data = &meson6_efuse_data }, + { .compatible = "amlogic,meson8-efuse", .data = &meson8_efuse_data }, + { .compatible = "amlogic,meson8b-efuse", .data = &meson8b_efuse_data }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, meson_mx_efuse_match); + +static int meson_mx_efuse_probe(struct platform_device *pdev) +{ + const struct meson_mx_efuse_platform_data *drvdata; + struct meson_mx_efuse *efuse; + + drvdata = of_device_get_match_data(&pdev->dev); + if (!drvdata) + return -EINVAL; + + efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + efuse->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(efuse->base)) + return PTR_ERR(efuse->base); + + efuse->config.name = drvdata->name; + efuse->config.owner = THIS_MODULE; + efuse->config.dev = &pdev->dev; + efuse->config.priv = efuse; + efuse->config.stride = drvdata->word_size; + efuse->config.word_size = drvdata->word_size; + efuse->config.size = SZ_512; + efuse->config.read_only = true; + efuse->config.reg_read = meson_mx_efuse_read; + + efuse->core_clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(efuse->core_clk)) { + dev_err(&pdev->dev, "Failed to get core clock\n"); + return PTR_ERR(efuse->core_clk); + } + + efuse->nvmem = devm_nvmem_register(&pdev->dev, &efuse->config); + + return PTR_ERR_OR_ZERO(efuse->nvmem); +} + +static struct platform_driver meson_mx_efuse_driver = { + .probe = meson_mx_efuse_probe, + .driver = { + .name = "meson-mx-efuse", + .of_match_table = meson_mx_efuse_match, + }, +}; + +module_platform_driver(meson_mx_efuse_driver); + +MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); +MODULE_DESCRIPTION("Amlogic Meson MX eFuse NVMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/microchip-otpc.c b/drivers/nvmem/microchip-otpc.c new file mode 100644 index 0000000000..436e0dc4f3 --- /dev/null +++ b/drivers/nvmem/microchip-otpc.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * OTP Memory controller + * + * Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries + * + * Author: Claudiu Beznea <claudiu.beznea@microchip.com> + */ + +#include <linux/bitfield.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#define MCHP_OTPC_CR (0x0) +#define MCHP_OTPC_CR_READ BIT(6) +#define MCHP_OTPC_MR (0x4) +#define MCHP_OTPC_MR_ADDR GENMASK(31, 16) +#define MCHP_OTPC_AR (0x8) +#define MCHP_OTPC_SR (0xc) +#define MCHP_OTPC_SR_READ BIT(6) +#define MCHP_OTPC_HR (0x20) +#define MCHP_OTPC_HR_SIZE GENMASK(15, 8) +#define MCHP_OTPC_DR (0x24) + +#define MCHP_OTPC_NAME "mchp-otpc" +#define MCHP_OTPC_SIZE (11 * 1024) + +/** + * struct mchp_otpc - OTPC private data structure + * @base: base address + * @dev: struct device pointer + * @packets: list of packets in OTP memory + * @npackets: number of packets in OTP memory + */ +struct mchp_otpc { + void __iomem *base; + struct device *dev; + struct list_head packets; + u32 npackets; +}; + +/** + * struct mchp_otpc_packet - OTPC packet data structure + * @list: list head + * @id: packet ID + * @offset: packet offset (in words) in OTP memory + */ +struct mchp_otpc_packet { + struct list_head list; + u32 id; + u32 offset; +}; + +static struct mchp_otpc_packet *mchp_otpc_id_to_packet(struct mchp_otpc *otpc, + u32 id) +{ + struct mchp_otpc_packet *packet; + + if (id >= otpc->npackets) + return NULL; + + list_for_each_entry(packet, &otpc->packets, list) { + if (packet->id == id) + return packet; + } + + return NULL; +} + +static int mchp_otpc_prepare_read(struct mchp_otpc *otpc, + unsigned int offset) +{ + u32 tmp; + + /* Set address. */ + tmp = readl_relaxed(otpc->base + MCHP_OTPC_MR); + tmp &= ~MCHP_OTPC_MR_ADDR; + tmp |= FIELD_PREP(MCHP_OTPC_MR_ADDR, offset); + writel_relaxed(tmp, otpc->base + MCHP_OTPC_MR); + + /* Set read. */ + tmp = readl_relaxed(otpc->base + MCHP_OTPC_CR); + tmp |= MCHP_OTPC_CR_READ; + writel_relaxed(tmp, otpc->base + MCHP_OTPC_CR); + + /* Wait for packet to be transferred into temporary buffers. */ + return read_poll_timeout(readl_relaxed, tmp, !(tmp & MCHP_OTPC_SR_READ), + 10000, 2000, false, otpc->base + MCHP_OTPC_SR); +} + +/* + * OTPC memory is organized into packets. Each packets contains a header and + * a payload. Header is 4 bytes long and contains the size of the payload. + * Payload size varies. The memory footprint is something as follows: + * + * Memory offset Memory footprint Packet ID + * ------------- ---------------- --------- + * + * 0x0 +------------+ <-- packet 0 + * | header 0 | + * 0x4 +------------+ + * | payload 0 | + * . . + * . ... . + * . . + * offset1 +------------+ <-- packet 1 + * | header 1 | + * offset1 + 0x4 +------------+ + * | payload 1 | + * . . + * . ... . + * . . + * offset2 +------------+ <-- packet 2 + * . . + * . ... . + * . . + * offsetN +------------+ <-- packet N + * | header N | + * offsetN + 0x4 +------------+ + * | payload N | + * . . + * . ... . + * . . + * +------------+ + * + * where offset1, offset2, offsetN depends on the size of payload 0, payload 1, + * payload N-1. + * + * The access to memory is done on a per packet basis: the control registers + * need to be updated with an offset address (within a packet range) and the + * data registers will be update by controller with information contained by + * that packet. E.g. if control registers are updated with any address within + * the range [offset1, offset2) the data registers are updated by controller + * with packet 1. Header data is accessible though MCHP_OTPC_HR register. + * Payload data is accessible though MCHP_OTPC_DR and MCHP_OTPC_AR registers. + * There is no direct mapping b/w the offset requested by software and the + * offset returned by hardware. + * + * For this, the read function will return the first requested bytes in the + * packet. The user will have to be aware of the memory footprint before doing + * the read request. + */ +static int mchp_otpc_read(void *priv, unsigned int off, void *val, + size_t bytes) +{ + struct mchp_otpc *otpc = priv; + struct mchp_otpc_packet *packet; + u32 *buf = val; + u32 offset; + size_t len = 0; + int ret, payload_size; + + /* + * We reach this point with off being multiple of stride = 4 to + * be able to cross the subsystem. Inside the driver we use continuous + * unsigned integer numbers for packet id, thus devide off by 4 + * before passing it to mchp_otpc_id_to_packet(). + */ + packet = mchp_otpc_id_to_packet(otpc, off / 4); + if (!packet) + return -EINVAL; + offset = packet->offset; + + while (len < bytes) { + ret = mchp_otpc_prepare_read(otpc, offset); + if (ret) + return ret; + + /* Read and save header content. */ + *buf++ = readl_relaxed(otpc->base + MCHP_OTPC_HR); + len += sizeof(*buf); + offset++; + if (len >= bytes) + break; + + /* Read and save payload content. */ + payload_size = FIELD_GET(MCHP_OTPC_HR_SIZE, *(buf - 1)); + writel_relaxed(0UL, otpc->base + MCHP_OTPC_AR); + do { + *buf++ = readl_relaxed(otpc->base + MCHP_OTPC_DR); + len += sizeof(*buf); + offset++; + payload_size--; + } while (payload_size >= 0 && len < bytes); + } + + return 0; +} + +static int mchp_otpc_init_packets_list(struct mchp_otpc *otpc, u32 *size) +{ + struct mchp_otpc_packet *packet; + u32 word, word_pos = 0, id = 0, npackets = 0, payload_size; + int ret; + + INIT_LIST_HEAD(&otpc->packets); + *size = 0; + + while (*size < MCHP_OTPC_SIZE) { + ret = mchp_otpc_prepare_read(otpc, word_pos); + if (ret) + return ret; + + word = readl_relaxed(otpc->base + MCHP_OTPC_HR); + payload_size = FIELD_GET(MCHP_OTPC_HR_SIZE, word); + if (!payload_size) + break; + + packet = devm_kzalloc(otpc->dev, sizeof(*packet), GFP_KERNEL); + if (!packet) + return -ENOMEM; + + packet->id = id++; + packet->offset = word_pos; + INIT_LIST_HEAD(&packet->list); + list_add_tail(&packet->list, &otpc->packets); + + /* Count size by adding header and paload sizes. */ + *size += 4 * (payload_size + 1); + /* Next word: this packet (header, payload) position + 1. */ + word_pos += payload_size + 2; + + npackets++; + } + + otpc->npackets = npackets; + + return 0; +} + +static struct nvmem_config mchp_nvmem_config = { + .name = MCHP_OTPC_NAME, + .type = NVMEM_TYPE_OTP, + .read_only = true, + .word_size = 4, + .stride = 4, + .reg_read = mchp_otpc_read, +}; + +static int mchp_otpc_probe(struct platform_device *pdev) +{ + struct nvmem_device *nvmem; + struct mchp_otpc *otpc; + u32 size; + int ret; + + otpc = devm_kzalloc(&pdev->dev, sizeof(*otpc), GFP_KERNEL); + if (!otpc) + return -ENOMEM; + + otpc->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(otpc->base)) + return PTR_ERR(otpc->base); + + otpc->dev = &pdev->dev; + ret = mchp_otpc_init_packets_list(otpc, &size); + if (ret) + return ret; + + mchp_nvmem_config.dev = otpc->dev; + mchp_nvmem_config.size = size; + mchp_nvmem_config.priv = otpc; + nvmem = devm_nvmem_register(&pdev->dev, &mchp_nvmem_config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id __maybe_unused mchp_otpc_ids[] = { + { .compatible = "microchip,sama7g5-otpc", }, + { }, +}; +MODULE_DEVICE_TABLE(of, mchp_otpc_ids); + +static struct platform_driver mchp_otpc_driver = { + .probe = mchp_otpc_probe, + .driver = { + .name = MCHP_OTPC_NAME, + .of_match_table = of_match_ptr(mchp_otpc_ids), + }, +}; +module_platform_driver(mchp_otpc_driver); + +MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea@microchip.com>"); +MODULE_DESCRIPTION("Microchip SAMA7G5 OTPC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c new file mode 100644 index 0000000000..b36cd0dcc8 --- /dev/null +++ b/drivers/nvmem/mtk-efuse.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com> + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/io.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> +#include <linux/property.h> + +struct mtk_efuse_pdata { + bool uses_post_processing; +}; + +struct mtk_efuse_priv { + void __iomem *base; +}; + +static int mtk_reg_read(void *context, + unsigned int reg, void *_val, size_t bytes) +{ + struct mtk_efuse_priv *priv = context; + void __iomem *addr = priv->base + reg; + u8 *val = _val; + int i; + + for (i = 0; i < bytes; i++, val++) + *val = readb(addr + i); + + return 0; +} + +static int mtk_efuse_gpu_speedbin_pp(void *context, const char *id, int index, + unsigned int offset, void *data, size_t bytes) +{ + u8 *val = data; + + if (val[0] < 8) + val[0] = BIT(val[0]); + + return 0; +} + +static void mtk_efuse_fixup_cell_info(struct nvmem_device *nvmem, + struct nvmem_layout *layout, + struct nvmem_cell_info *cell) +{ + size_t sz = strlen(cell->name); + + /* + * On some SoCs, the GPU speedbin is not read as bitmask but as + * a number with range [0-7] (max 3 bits): post process to use + * it in OPP tables to describe supported-hw. + */ + if (cell->nbits <= 3 && + strncmp(cell->name, "gpu-speedbin", min(sz, strlen("gpu-speedbin"))) == 0) + cell->read_post_process = mtk_efuse_gpu_speedbin_pp; +} + +static struct nvmem_layout mtk_efuse_layout = { + .fixup_cell_info = mtk_efuse_fixup_cell_info, +}; + +static int mtk_efuse_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct nvmem_config econfig = {}; + struct mtk_efuse_priv *priv; + const struct mtk_efuse_pdata *pdata; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + pdata = device_get_match_data(dev); + econfig.stride = 1; + econfig.word_size = 1; + econfig.reg_read = mtk_reg_read; + econfig.size = resource_size(res); + econfig.priv = priv; + econfig.dev = dev; + if (pdata->uses_post_processing) + econfig.layout = &mtk_efuse_layout; + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct mtk_efuse_pdata mtk_mt8186_efuse_pdata = { + .uses_post_processing = true, +}; + +static const struct mtk_efuse_pdata mtk_efuse_pdata = { + .uses_post_processing = false, +}; + +static const struct of_device_id mtk_efuse_of_match[] = { + { .compatible = "mediatek,mt8173-efuse", .data = &mtk_efuse_pdata }, + { .compatible = "mediatek,mt8186-efuse", .data = &mtk_mt8186_efuse_pdata }, + { .compatible = "mediatek,efuse", .data = &mtk_efuse_pdata }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, mtk_efuse_of_match); + +static struct platform_driver mtk_efuse_driver = { + .probe = mtk_efuse_probe, + .driver = { + .name = "mediatek,efuse", + .of_match_table = mtk_efuse_of_match, + }, +}; + +static int __init mtk_efuse_init(void) +{ + int ret; + + ret = platform_driver_register(&mtk_efuse_driver); + if (ret) { + pr_err("Failed to register efuse driver\n"); + return ret; + } + + return 0; +} + +static void __exit mtk_efuse_exit(void) +{ + return platform_driver_unregister(&mtk_efuse_driver); +} + +subsys_initcall(mtk_efuse_init); +module_exit(mtk_efuse_exit); + +MODULE_AUTHOR("Andrew-CT Chen <andrew-ct.chen@mediatek.com>"); +MODULE_DESCRIPTION("Mediatek EFUSE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c new file mode 100644 index 0000000000..588ab56d75 --- /dev/null +++ b/drivers/nvmem/mxs-ocotp.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Freescale MXS On-Chip OTP driver + * + * Copyright (C) 2015 Stefan Wahren <stefan.wahren@i2se.com> + * + * Based on the driver from Huang Shijie and Christoph G. Baumann + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/stmp_device.h> + +/* OCOTP registers and bits */ + +#define BM_OCOTP_CTRL_RD_BANK_OPEN BIT(12) +#define BM_OCOTP_CTRL_ERROR BIT(9) +#define BM_OCOTP_CTRL_BUSY BIT(8) + +#define OCOTP_TIMEOUT 10000 +#define OCOTP_DATA_OFFSET 0x20 + +struct mxs_ocotp { + struct clk *clk; + void __iomem *base; + struct nvmem_device *nvmem; +}; + +static int mxs_ocotp_wait(struct mxs_ocotp *otp) +{ + int timeout = OCOTP_TIMEOUT; + unsigned int status = 0; + + while (timeout--) { + status = readl(otp->base); + + if (!(status & (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR))) + break; + + cpu_relax(); + } + + if (status & BM_OCOTP_CTRL_BUSY) + return -EBUSY; + else if (status & BM_OCOTP_CTRL_ERROR) + return -EIO; + + return 0; +} + +static int mxs_ocotp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct mxs_ocotp *otp = context; + u32 *buf = val; + int ret; + + ret = clk_enable(otp->clk); + if (ret) + return ret; + + writel(BM_OCOTP_CTRL_ERROR, otp->base + STMP_OFFSET_REG_CLR); + + ret = mxs_ocotp_wait(otp); + if (ret) + goto disable_clk; + + /* open OCOTP banks for read */ + writel(BM_OCOTP_CTRL_RD_BANK_OPEN, otp->base + STMP_OFFSET_REG_SET); + + /* approximately wait 33 hclk cycles */ + udelay(1); + + ret = mxs_ocotp_wait(otp); + if (ret) + goto close_banks; + + while (bytes) { + if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) { + /* fill up non-data register */ + *buf++ = 0; + } else { + *buf++ = readl(otp->base + offset); + } + + bytes -= 4; + offset += 4; + } + +close_banks: + /* close banks for power saving */ + writel(BM_OCOTP_CTRL_RD_BANK_OPEN, otp->base + STMP_OFFSET_REG_CLR); + +disable_clk: + clk_disable(otp->clk); + + return ret; +} + +static struct nvmem_config ocotp_config = { + .name = "mxs-ocotp", + .stride = 16, + .word_size = 4, + .reg_read = mxs_ocotp_read, +}; + +struct mxs_data { + int size; +}; + +static const struct mxs_data imx23_data = { + .size = 0x220, +}; + +static const struct mxs_data imx28_data = { + .size = 0x2a0, +}; + +static const struct of_device_id mxs_ocotp_match[] = { + { .compatible = "fsl,imx23-ocotp", .data = &imx23_data }, + { .compatible = "fsl,imx28-ocotp", .data = &imx28_data }, + { /* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, mxs_ocotp_match); + +static void mxs_ocotp_action(void *data) +{ + clk_unprepare(data); +} + +static int mxs_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct mxs_data *data; + struct mxs_ocotp *otp; + const struct of_device_id *match; + int ret; + + match = of_match_device(dev->driver->of_match_table, dev); + if (!match || !match->data) + return -EINVAL; + + otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL); + if (!otp) + return -ENOMEM; + + otp->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(otp->base)) + return PTR_ERR(otp->base); + + otp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(otp->clk)) + return PTR_ERR(otp->clk); + + ret = clk_prepare(otp->clk); + if (ret < 0) { + dev_err(dev, "failed to prepare clk: %d\n", ret); + return ret; + } + + ret = devm_add_action_or_reset(&pdev->dev, mxs_ocotp_action, otp->clk); + if (ret) + return ret; + + data = match->data; + + ocotp_config.size = data->size; + ocotp_config.priv = otp; + ocotp_config.dev = dev; + otp->nvmem = devm_nvmem_register(dev, &ocotp_config); + if (IS_ERR(otp->nvmem)) + return PTR_ERR(otp->nvmem); + + platform_set_drvdata(pdev, otp); + + return 0; +} + +static struct platform_driver mxs_ocotp_driver = { + .probe = mxs_ocotp_probe, + .driver = { + .name = "mxs-ocotp", + .of_match_table = mxs_ocotp_match, + }, +}; + +module_platform_driver(mxs_ocotp_driver); +MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net"); +MODULE_DESCRIPTION("driver for OCOTP in i.MX23/i.MX28"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/nintendo-otp.c b/drivers/nvmem/nintendo-otp.c new file mode 100644 index 0000000000..355e7f1fc6 --- /dev/null +++ b/drivers/nvmem/nintendo-otp.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Nintendo Wii and Wii U OTP driver + * + * This is a driver exposing the OTP of a Nintendo Wii or Wii U console. + * + * This memory contains common and per-console keys, signatures and + * related data required to access peripherals. + * + * Based on reversed documentation from https://wiiubrew.org/wiki/Hardware/OTP + * + * Copyright (C) 2021 Emmanuel Gil Peyrot <linkmauve@linkmauve.fr> + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +#define HW_OTPCMD 0 +#define HW_OTPDATA 4 +#define OTP_READ 0x80000000 +#define BANK_SIZE 128 +#define WORD_SIZE 4 + +struct nintendo_otp_priv { + void __iomem *regs; +}; + +struct nintendo_otp_devtype_data { + const char *name; + unsigned int num_banks; +}; + +static const struct nintendo_otp_devtype_data hollywood_otp_data = { + .name = "wii-otp", + .num_banks = 1, +}; + +static const struct nintendo_otp_devtype_data latte_otp_data = { + .name = "wiiu-otp", + .num_banks = 8, +}; + +static int nintendo_otp_reg_read(void *context, + unsigned int reg, void *_val, size_t bytes) +{ + struct nintendo_otp_priv *priv = context; + u32 *val = _val; + int words = bytes / WORD_SIZE; + u32 bank, addr; + + while (words--) { + bank = (reg / BANK_SIZE) << 8; + addr = (reg / WORD_SIZE) % (BANK_SIZE / WORD_SIZE); + iowrite32be(OTP_READ | bank | addr, priv->regs + HW_OTPCMD); + *val++ = ioread32be(priv->regs + HW_OTPDATA); + reg += WORD_SIZE; + } + + return 0; +} + +static const struct of_device_id nintendo_otp_of_table[] = { + { .compatible = "nintendo,hollywood-otp", .data = &hollywood_otp_data }, + { .compatible = "nintendo,latte-otp", .data = &latte_otp_data }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, nintendo_otp_of_table); + +static int nintendo_otp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct of_device_id *of_id = + of_match_device(nintendo_otp_of_table, dev); + struct nvmem_device *nvmem; + struct nintendo_otp_priv *priv; + + struct nvmem_config config = { + .stride = WORD_SIZE, + .word_size = WORD_SIZE, + .reg_read = nintendo_otp_reg_read, + .read_only = true, + .root_only = true, + }; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->regs)) + return PTR_ERR(priv->regs); + + if (of_id->data) { + const struct nintendo_otp_devtype_data *data = of_id->data; + config.name = data->name; + config.size = data->num_banks * BANK_SIZE; + } + + config.dev = dev; + config.priv = priv; + + nvmem = devm_nvmem_register(dev, &config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver nintendo_otp_driver = { + .probe = nintendo_otp_probe, + .driver = { + .name = "nintendo-otp", + .of_match_table = nintendo_otp_of_table, + }, +}; +module_platform_driver(nintendo_otp_driver); +MODULE_AUTHOR("Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>"); +MODULE_DESCRIPTION("Nintendo Wii and Wii U OTP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c new file mode 100644 index 0000000000..70f2d4f2ef --- /dev/null +++ b/drivers/nvmem/qcom-spmi-sdam.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017, 2020-2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#define SDAM_MEM_START 0x40 +#define REGISTER_MAP_ID 0x40 +#define REGISTER_MAP_VERSION 0x41 +#define SDAM_SIZE 0x44 +#define SDAM_PBS_TRIG_SET 0xE5 +#define SDAM_PBS_TRIG_CLR 0xE6 + +struct sdam_chip { + struct regmap *regmap; + struct nvmem_config sdam_config; + unsigned int base; + unsigned int size; +}; + +/* read only register offsets */ +static const u8 sdam_ro_map[] = { + REGISTER_MAP_ID, + REGISTER_MAP_VERSION, + SDAM_SIZE +}; + +static bool sdam_is_valid(struct sdam_chip *sdam, unsigned int offset, + size_t len) +{ + unsigned int sdam_mem_end = SDAM_MEM_START + sdam->size - 1; + + if (!len) + return false; + + if (offset >= SDAM_MEM_START && offset <= sdam_mem_end + && (offset + len - 1) <= sdam_mem_end) + return true; + else if ((offset == SDAM_PBS_TRIG_SET || offset == SDAM_PBS_TRIG_CLR) + && (len == 1)) + return true; + + return false; +} + +static bool sdam_is_ro(unsigned int offset, size_t len) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sdam_ro_map); i++) + if (offset <= sdam_ro_map[i] && (offset + len) > sdam_ro_map[i]) + return true; + + return false; +} + +static int sdam_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct sdam_chip *sdam = priv; + struct device *dev = sdam->sdam_config.dev; + int rc; + + if (!sdam_is_valid(sdam, offset, bytes)) { + dev_err(dev, "Invalid SDAM offset %#x len=%zd\n", + offset, bytes); + return -EINVAL; + } + + rc = regmap_bulk_read(sdam->regmap, sdam->base + offset, val, bytes); + if (rc < 0) + dev_err(dev, "Failed to read SDAM offset %#x len=%zd, rc=%d\n", + offset, bytes, rc); + + return rc; +} + +static int sdam_write(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct sdam_chip *sdam = priv; + struct device *dev = sdam->sdam_config.dev; + int rc; + + if (!sdam_is_valid(sdam, offset, bytes)) { + dev_err(dev, "Invalid SDAM offset %#x len=%zd\n", + offset, bytes); + return -EINVAL; + } + + if (sdam_is_ro(offset, bytes)) { + dev_err(dev, "Invalid write offset %#x len=%zd\n", + offset, bytes); + return -EINVAL; + } + + rc = regmap_bulk_write(sdam->regmap, sdam->base + offset, val, bytes); + if (rc < 0) + dev_err(dev, "Failed to write SDAM offset %#x len=%zd, rc=%d\n", + offset, bytes, rc); + + return rc; +} + +static int sdam_probe(struct platform_device *pdev) +{ + struct sdam_chip *sdam; + struct nvmem_device *nvmem; + unsigned int val; + int rc; + + sdam = devm_kzalloc(&pdev->dev, sizeof(*sdam), GFP_KERNEL); + if (!sdam) + return -ENOMEM; + + sdam->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!sdam->regmap) { + dev_err(&pdev->dev, "Failed to get regmap handle\n"); + return -ENXIO; + } + + rc = of_property_read_u32(pdev->dev.of_node, "reg", &sdam->base); + if (rc < 0) { + dev_err(&pdev->dev, "Failed to get SDAM base, rc=%d\n", rc); + return -EINVAL; + } + + rc = regmap_read(sdam->regmap, sdam->base + SDAM_SIZE, &val); + if (rc < 0) { + dev_err(&pdev->dev, "Failed to read SDAM_SIZE rc=%d\n", rc); + return -EINVAL; + } + sdam->size = val * 32; + + sdam->sdam_config.dev = &pdev->dev; + sdam->sdam_config.name = "spmi_sdam"; + sdam->sdam_config.id = NVMEM_DEVID_AUTO; + sdam->sdam_config.owner = THIS_MODULE; + sdam->sdam_config.stride = 1; + sdam->sdam_config.word_size = 1; + sdam->sdam_config.reg_read = sdam_read; + sdam->sdam_config.reg_write = sdam_write; + sdam->sdam_config.priv = sdam; + + nvmem = devm_nvmem_register(&pdev->dev, &sdam->sdam_config); + if (IS_ERR(nvmem)) { + dev_err(&pdev->dev, + "Failed to register SDAM nvmem device rc=%ld\n", + PTR_ERR(nvmem)); + return -ENXIO; + } + dev_dbg(&pdev->dev, + "SDAM base=%#x size=%u registered successfully\n", + sdam->base, sdam->size); + + return 0; +} + +static const struct of_device_id sdam_match_table[] = { + { .compatible = "qcom,spmi-sdam" }, + {}, +}; +MODULE_DEVICE_TABLE(of, sdam_match_table); + +static struct platform_driver sdam_driver = { + .driver = { + .name = "qcom,spmi-sdam", + .of_match_table = sdam_match_table, + }, + .probe = sdam_probe, +}; +module_platform_driver(sdam_driver); + +MODULE_DESCRIPTION("QCOM SPMI SDAM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c new file mode 100644 index 0000000000..14814cba2d --- /dev/null +++ b/drivers/nvmem/qfprom.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/property.h> +#include <linux/regulator/consumer.h> + +/* Blow timer clock frequency in Mhz */ +#define QFPROM_BLOW_TIMER_OFFSET 0x03c + +/* Amount of time required to hold charge to blow fuse in micro-seconds */ +#define QFPROM_FUSE_BLOW_POLL_US 100 +#define QFPROM_FUSE_BLOW_TIMEOUT_US 10000 + +#define QFPROM_BLOW_STATUS_OFFSET 0x048 +#define QFPROM_BLOW_STATUS_BUSY 0x1 +#define QFPROM_BLOW_STATUS_READY 0x0 + +#define QFPROM_ACCEL_OFFSET 0x044 + +#define QFPROM_VERSION_OFFSET 0x0 +#define QFPROM_MAJOR_VERSION_SHIFT 28 +#define QFPROM_MAJOR_VERSION_MASK GENMASK(31, QFPROM_MAJOR_VERSION_SHIFT) +#define QFPROM_MINOR_VERSION_SHIFT 16 +#define QFPROM_MINOR_VERSION_MASK GENMASK(27, QFPROM_MINOR_VERSION_SHIFT) + +static bool read_raw_data; +module_param(read_raw_data, bool, 0644); +MODULE_PARM_DESC(read_raw_data, "Read raw instead of corrected data"); + +/** + * struct qfprom_soc_data - config that varies from SoC to SoC. + * + * @accel_value: Should contain qfprom accel value. + * @qfprom_blow_timer_value: The timer value of qfprom when doing efuse blow. + * @qfprom_blow_set_freq: The frequency required to set when we start the + * fuse blowing. + * @qfprom_blow_uV: LDO voltage to be set when doing efuse blow + */ +struct qfprom_soc_data { + u32 accel_value; + u32 qfprom_blow_timer_value; + u32 qfprom_blow_set_freq; + int qfprom_blow_uV; +}; + +/** + * struct qfprom_priv - structure holding qfprom attributes + * + * @qfpraw: iomapped memory space for qfprom-efuse raw address space. + * @qfpconf: iomapped memory space for qfprom-efuse configuration address + * space. + * @qfpcorrected: iomapped memory space for qfprom corrected address space. + * @qfpsecurity: iomapped memory space for qfprom security control space. + * @dev: qfprom device structure. + * @secclk: Clock supply. + * @vcc: Regulator supply. + * @soc_data: Data that for things that varies from SoC to SoC. + */ +struct qfprom_priv { + void __iomem *qfpraw; + void __iomem *qfpconf; + void __iomem *qfpcorrected; + void __iomem *qfpsecurity; + struct device *dev; + struct clk *secclk; + struct regulator *vcc; + const struct qfprom_soc_data *soc_data; +}; + +/** + * struct qfprom_touched_values - saved values to restore after blowing + * + * @clk_rate: The rate the clock was at before blowing. + * @accel_val: The value of the accel reg before blowing. + * @timer_val: The value of the timer before blowing. + */ +struct qfprom_touched_values { + unsigned long clk_rate; + u32 accel_val; + u32 timer_val; +}; + +/** + * struct qfprom_soc_compatible_data - Data matched against the SoC + * compatible string. + * + * @keepout: Array of keepout regions for this SoC. + * @nkeepout: Number of elements in the keepout array. + */ +struct qfprom_soc_compatible_data { + const struct nvmem_keepout *keepout; + unsigned int nkeepout; +}; + +static const struct nvmem_keepout sc7180_qfprom_keepout[] = { + {.start = 0x128, .end = 0x148}, + {.start = 0x220, .end = 0x228} +}; + +static const struct qfprom_soc_compatible_data sc7180_qfprom = { + .keepout = sc7180_qfprom_keepout, + .nkeepout = ARRAY_SIZE(sc7180_qfprom_keepout) +}; + +static const struct nvmem_keepout sc7280_qfprom_keepout[] = { + {.start = 0x128, .end = 0x148}, + {.start = 0x238, .end = 0x248} +}; + +static const struct qfprom_soc_compatible_data sc7280_qfprom = { + .keepout = sc7280_qfprom_keepout, + .nkeepout = ARRAY_SIZE(sc7280_qfprom_keepout) +}; + +/** + * qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing. + * @priv: Our driver data. + * @old: The data that was stashed from before fuse blowing. + * + * Resets the value of the blow timer, accel register and the clock + * and voltage settings. + * + * Prints messages if there are errors but doesn't return an error code + * since there's not much we can do upon failure. + */ +static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv, + const struct qfprom_touched_values *old) +{ + int ret; + + writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET); + writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET); + + dev_pm_genpd_set_performance_state(priv->dev, 0); + pm_runtime_put(priv->dev); + + /* + * This may be a shared rail and may be able to run at a lower rate + * when we're not blowing fuses. At the moment, the regulator framework + * applies voltage constraints even on disabled rails, so remove our + * constraints and allow the rail to be adjusted by other users. + */ + ret = regulator_set_voltage(priv->vcc, 0, INT_MAX); + if (ret) + dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n"); + + ret = regulator_disable(priv->vcc); + if (ret) + dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n"); + + ret = clk_set_rate(priv->secclk, old->clk_rate); + if (ret) + dev_warn(priv->dev, + "Failed to set clock rate for disable (ignoring)\n"); + + clk_disable_unprepare(priv->secclk); +} + +/** + * qfprom_enable_fuse_blowing() - Enable fuse blowing. + * @priv: Our driver data. + * @old: We'll stash stuff here to use when disabling. + * + * Sets the value of the blow timer, accel register and the clock + * and voltage settings. + * + * Prints messages if there are errors so caller doesn't need to. + * + * Return: 0 or -err. + */ +static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv, + struct qfprom_touched_values *old) +{ + int ret; + int qfprom_blow_uV = priv->soc_data->qfprom_blow_uV; + + ret = clk_prepare_enable(priv->secclk); + if (ret) { + dev_err(priv->dev, "Failed to enable clock\n"); + return ret; + } + + old->clk_rate = clk_get_rate(priv->secclk); + ret = clk_set_rate(priv->secclk, priv->soc_data->qfprom_blow_set_freq); + if (ret) { + dev_err(priv->dev, "Failed to set clock rate for enable\n"); + goto err_clk_prepared; + } + + /* + * Hardware requires a minimum voltage for fuse blowing. + * This may be a shared rail so don't specify a maximum. + * Regulator constraints will cap to the actual maximum. + */ + ret = regulator_set_voltage(priv->vcc, qfprom_blow_uV, INT_MAX); + if (ret) { + dev_err(priv->dev, "Failed to set %duV\n", qfprom_blow_uV); + goto err_clk_rate_set; + } + + ret = regulator_enable(priv->vcc); + if (ret) { + dev_err(priv->dev, "Failed to enable regulator\n"); + goto err_clk_rate_set; + } + + ret = pm_runtime_resume_and_get(priv->dev); + if (ret < 0) { + dev_err(priv->dev, "Failed to enable power-domain\n"); + goto err_reg_enable; + } + dev_pm_genpd_set_performance_state(priv->dev, INT_MAX); + + old->timer_val = readl(priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET); + old->accel_val = readl(priv->qfpconf + QFPROM_ACCEL_OFFSET); + writel(priv->soc_data->qfprom_blow_timer_value, + priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET); + writel(priv->soc_data->accel_value, + priv->qfpconf + QFPROM_ACCEL_OFFSET); + + return 0; + +err_reg_enable: + regulator_disable(priv->vcc); +err_clk_rate_set: + clk_set_rate(priv->secclk, old->clk_rate); +err_clk_prepared: + clk_disable_unprepare(priv->secclk); + return ret; +} + +/** + * qfprom_reg_write() - Write to fuses. + * @context: Our driver data. + * @reg: The offset to write at. + * @_val: Pointer to data to write. + * @bytes: The number of bytes to write. + * + * Writes to fuses. WARNING: THIS IS PERMANENT. + * + * Return: 0 or -err. + */ +static int qfprom_reg_write(void *context, unsigned int reg, void *_val, + size_t bytes) +{ + struct qfprom_priv *priv = context; + struct qfprom_touched_values old; + int words = bytes / 4; + u32 *value = _val; + u32 blow_status; + int ret; + int i; + + dev_dbg(priv->dev, + "Writing to raw qfprom region : %#010x of size: %zu\n", + reg, bytes); + + /* + * The hardware only allows us to write word at a time, but we can + * read byte at a time. Until the nvmem framework allows a separate + * word_size and stride for reading vs. writing, we'll enforce here. + */ + if (bytes % 4) { + dev_err(priv->dev, + "%zu is not an integral number of words\n", bytes); + return -EINVAL; + } + if (reg % 4) { + dev_err(priv->dev, + "Invalid offset: %#x. Must be word aligned\n", reg); + return -EINVAL; + } + + ret = qfprom_enable_fuse_blowing(priv, &old); + if (ret) + return ret; + + ret = readl_relaxed_poll_timeout( + priv->qfpconf + QFPROM_BLOW_STATUS_OFFSET, + blow_status, blow_status == QFPROM_BLOW_STATUS_READY, + QFPROM_FUSE_BLOW_POLL_US, QFPROM_FUSE_BLOW_TIMEOUT_US); + + if (ret) { + dev_err(priv->dev, + "Timeout waiting for initial ready; aborting.\n"); + goto exit_enabled_fuse_blowing; + } + + for (i = 0; i < words; i++) + writel(value[i], priv->qfpraw + reg + (i * 4)); + + ret = readl_relaxed_poll_timeout( + priv->qfpconf + QFPROM_BLOW_STATUS_OFFSET, + blow_status, blow_status == QFPROM_BLOW_STATUS_READY, + QFPROM_FUSE_BLOW_POLL_US, QFPROM_FUSE_BLOW_TIMEOUT_US); + + /* Give an error, but not much we can do in this case */ + if (ret) + dev_err(priv->dev, "Timeout waiting for finish.\n"); + +exit_enabled_fuse_blowing: + qfprom_disable_fuse_blowing(priv, &old); + + return ret; +} + +static int qfprom_reg_read(void *context, + unsigned int reg, void *_val, size_t bytes) +{ + struct qfprom_priv *priv = context; + u8 *val = _val; + int i = 0, words = bytes; + void __iomem *base = priv->qfpcorrected; + + if (read_raw_data && priv->qfpraw) + base = priv->qfpraw; + + while (words--) + *val++ = readb(base + reg + i++); + + return 0; +} + +static void qfprom_runtime_disable(void *data) +{ + pm_runtime_disable(data); +} + +static const struct qfprom_soc_data qfprom_7_8_data = { + .accel_value = 0xD10, + .qfprom_blow_timer_value = 25, + .qfprom_blow_set_freq = 4800000, + .qfprom_blow_uV = 1800000, +}; + +static const struct qfprom_soc_data qfprom_7_15_data = { + .accel_value = 0xD08, + .qfprom_blow_timer_value = 24, + .qfprom_blow_set_freq = 4800000, + .qfprom_blow_uV = 1900000, +}; + +static int qfprom_probe(struct platform_device *pdev) +{ + struct nvmem_config econfig = { + .name = "qfprom", + .stride = 1, + .word_size = 1, + .id = NVMEM_DEVID_AUTO, + .reg_read = qfprom_reg_read, + }; + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + const struct qfprom_soc_compatible_data *soc_data; + struct qfprom_priv *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + /* The corrected section is always provided */ + priv->qfpcorrected = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->qfpcorrected)) + return PTR_ERR(priv->qfpcorrected); + + econfig.size = resource_size(res); + econfig.dev = dev; + econfig.priv = priv; + + priv->dev = dev; + soc_data = device_get_match_data(dev); + if (soc_data) { + econfig.keepout = soc_data->keepout; + econfig.nkeepout = soc_data->nkeepout; + } + + /* + * If more than one region is provided then the OS has the ability + * to write. + */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + u32 version; + int major_version, minor_version; + + priv->qfpraw = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->qfpraw)) + return PTR_ERR(priv->qfpraw); + priv->qfpconf = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(priv->qfpconf)) + return PTR_ERR(priv->qfpconf); + priv->qfpsecurity = devm_platform_ioremap_resource(pdev, 3); + if (IS_ERR(priv->qfpsecurity)) + return PTR_ERR(priv->qfpsecurity); + + version = readl(priv->qfpsecurity + QFPROM_VERSION_OFFSET); + major_version = (version & QFPROM_MAJOR_VERSION_MASK) >> + QFPROM_MAJOR_VERSION_SHIFT; + minor_version = (version & QFPROM_MINOR_VERSION_MASK) >> + QFPROM_MINOR_VERSION_SHIFT; + + if (major_version == 7 && minor_version == 8) + priv->soc_data = &qfprom_7_8_data; + else if (major_version == 7 && minor_version == 15) + priv->soc_data = &qfprom_7_15_data; + + priv->vcc = devm_regulator_get(&pdev->dev, "vcc"); + if (IS_ERR(priv->vcc)) + return PTR_ERR(priv->vcc); + + priv->secclk = devm_clk_get(dev, "core"); + if (IS_ERR(priv->secclk)) + return dev_err_probe(dev, PTR_ERR(priv->secclk), "Error getting clock\n"); + + /* Only enable writing if we have SoC data. */ + if (priv->soc_data) + econfig.reg_write = qfprom_reg_write; + } + + pm_runtime_enable(dev); + ret = devm_add_action_or_reset(dev, qfprom_runtime_disable, dev); + if (ret) + return ret; + + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id qfprom_of_match[] = { + { .compatible = "qcom,qfprom",}, + { .compatible = "qcom,sc7180-qfprom", .data = &sc7180_qfprom}, + { .compatible = "qcom,sc7280-qfprom", .data = &sc7280_qfprom}, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, qfprom_of_match); + +static struct platform_driver qfprom_driver = { + .probe = qfprom_probe, + .driver = { + .name = "qcom,qfprom", + .of_match_table = qfprom_of_match, + }, +}; +module_platform_driver(qfprom_driver); +MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm QFPROM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/qoriq-efuse.c b/drivers/nvmem/qoriq-efuse.c new file mode 100644 index 0000000000..e7fd04d6dd --- /dev/null +++ b/drivers/nvmem/qoriq-efuse.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023 Westermo Network Technologies AB + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> + +struct qoriq_efuse_priv { + void __iomem *base; +}; + +static int qoriq_efuse_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct qoriq_efuse_priv *priv = context; + + /* .stride = 4 so offset is guaranteed to be aligned */ + __ioread32_copy(val, priv->base + offset, bytes / 4); + + /* Ignore trailing bytes (there shouldn't be any) */ + + return 0; +} + +static int qoriq_efuse_probe(struct platform_device *pdev) +{ + struct nvmem_config config = { + .dev = &pdev->dev, + .read_only = true, + .reg_read = qoriq_efuse_read, + .stride = sizeof(u32), + .word_size = sizeof(u32), + .name = "qoriq_efuse_read", + .id = NVMEM_DEVID_AUTO, + .root_only = true, + }; + struct qoriq_efuse_priv *priv; + struct nvmem_device *nvmem; + struct resource *res; + + priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + config.size = resource_size(res); + config.priv = priv; + nvmem = devm_nvmem_register(config.dev, &config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id qoriq_efuse_of_match[] = { + { .compatible = "fsl,t1023-sfp", }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, qoriq_efuse_of_match); + +static struct platform_driver qoriq_efuse_driver = { + .probe = qoriq_efuse_probe, + .driver = { + .name = "qoriq-efuse", + .of_match_table = qoriq_efuse_of_match, + }, +}; +module_platform_driver(qoriq_efuse_driver); + +MODULE_AUTHOR("Richard Alpe <richard.alpe@bit42.se>"); +MODULE_DESCRIPTION("NXP QorIQ Security Fuse Processor (SFP) Reader"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/rave-sp-eeprom.c b/drivers/nvmem/rave-sp-eeprom.c new file mode 100644 index 0000000000..df6a1c594b --- /dev/null +++ b/drivers/nvmem/rave-sp-eeprom.c @@ -0,0 +1,361 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * EEPROM driver for RAVE SP + * + * Copyright (C) 2018 Zodiac Inflight Innovations + * + */ +#include <linux/kernel.h> +#include <linux/mfd/rave-sp.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/sizes.h> + +/** + * enum rave_sp_eeprom_access_type - Supported types of EEPROM access + * + * @RAVE_SP_EEPROM_WRITE: EEPROM write + * @RAVE_SP_EEPROM_READ: EEPROM read + */ +enum rave_sp_eeprom_access_type { + RAVE_SP_EEPROM_WRITE = 0, + RAVE_SP_EEPROM_READ = 1, +}; + +/** + * enum rave_sp_eeprom_header_size - EEPROM command header sizes + * + * @RAVE_SP_EEPROM_HEADER_SMALL: EEPROM header size for "small" devices (< 8K) + * @RAVE_SP_EEPROM_HEADER_BIG: EEPROM header size for "big" devices (> 8K) + */ +enum rave_sp_eeprom_header_size { + RAVE_SP_EEPROM_HEADER_SMALL = 4U, + RAVE_SP_EEPROM_HEADER_BIG = 5U, +}; +#define RAVE_SP_EEPROM_HEADER_MAX RAVE_SP_EEPROM_HEADER_BIG + +#define RAVE_SP_EEPROM_PAGE_SIZE 32U + +/** + * struct rave_sp_eeprom_page - RAVE SP EEPROM page + * + * @type: Access type (see enum rave_sp_eeprom_access_type) + * @success: Success flag (Success = 1, Failure = 0) + * @data: Read data + * + * Note this structure corresponds to RSP_*_EEPROM payload from RAVE + * SP ICD + */ +struct rave_sp_eeprom_page { + u8 type; + u8 success; + u8 data[RAVE_SP_EEPROM_PAGE_SIZE]; +} __packed; + +/** + * struct rave_sp_eeprom - RAVE SP EEPROM device + * + * @sp: Pointer to parent RAVE SP device + * @mutex: Lock protecting access to EEPROM + * @address: EEPROM device address + * @header_size: Size of EEPROM command header for this device + * @dev: Pointer to corresponding struct device used for logging + */ +struct rave_sp_eeprom { + struct rave_sp *sp; + struct mutex mutex; + u8 address; + unsigned int header_size; + struct device *dev; +}; + +/** + * rave_sp_eeprom_io - Low-level part of EEPROM page access + * + * @eeprom: EEPROM device to write to + * @type: EEPROM access type (read or write) + * @idx: number of the EEPROM page + * @page: Data to write or buffer to store result (via page->data) + * + * This function does all of the low-level work required to perform a + * EEPROM access. This includes formatting correct command payload, + * sending it and checking received results. + * + * Returns zero in case of success or negative error code in + * case of failure. + */ +static int rave_sp_eeprom_io(struct rave_sp_eeprom *eeprom, + enum rave_sp_eeprom_access_type type, + u16 idx, + struct rave_sp_eeprom_page *page) +{ + const bool is_write = type == RAVE_SP_EEPROM_WRITE; + const unsigned int data_size = is_write ? sizeof(page->data) : 0; + const unsigned int cmd_size = eeprom->header_size + data_size; + const unsigned int rsp_size = + is_write ? sizeof(*page) - sizeof(page->data) : sizeof(*page); + unsigned int offset = 0; + u8 cmd[RAVE_SP_EEPROM_HEADER_MAX + sizeof(page->data)]; + int ret; + + if (WARN_ON(cmd_size > sizeof(cmd))) + return -EINVAL; + + cmd[offset++] = eeprom->address; + cmd[offset++] = 0; + cmd[offset++] = type; + cmd[offset++] = idx; + + /* + * If there's still room in this command's header it means we + * are talkin to EEPROM that uses 16-bit page numbers and we + * have to specify index's MSB in payload as well. + */ + if (offset < eeprom->header_size) + cmd[offset++] = idx >> 8; + /* + * Copy our data to write to command buffer first. In case of + * a read data_size should be zero and memcpy would become a + * no-op + */ + memcpy(&cmd[offset], page->data, data_size); + + ret = rave_sp_exec(eeprom->sp, cmd, cmd_size, page, rsp_size); + if (ret) + return ret; + + if (page->type != type) + return -EPROTO; + + if (!page->success) + return -EIO; + + return 0; +} + +/** + * rave_sp_eeprom_page_access - Access single EEPROM page + * + * @eeprom: EEPROM device to access + * @type: Access type to perform (read or write) + * @offset: Offset within EEPROM to access + * @data: Data buffer + * @data_len: Size of the data buffer + * + * This function performs a generic access to a single page or a + * portion thereof. Requested access MUST NOT cross the EEPROM page + * boundary. + * + * Returns zero in case of success or negative error code in + * case of failure. + */ +static int +rave_sp_eeprom_page_access(struct rave_sp_eeprom *eeprom, + enum rave_sp_eeprom_access_type type, + unsigned int offset, u8 *data, + size_t data_len) +{ + const unsigned int page_offset = offset % RAVE_SP_EEPROM_PAGE_SIZE; + const unsigned int page_nr = offset / RAVE_SP_EEPROM_PAGE_SIZE; + struct rave_sp_eeprom_page page; + int ret; + + /* + * This function will not work if data access we've been asked + * to do is crossing EEPROM page boundary. Normally this + * should never happen and getting here would indicate a bug + * in the code. + */ + if (WARN_ON(data_len > sizeof(page.data) - page_offset)) + return -EINVAL; + + if (type == RAVE_SP_EEPROM_WRITE) { + /* + * If doing a partial write we need to do a read first + * to fill the rest of the page with correct data. + */ + if (data_len < RAVE_SP_EEPROM_PAGE_SIZE) { + ret = rave_sp_eeprom_io(eeprom, RAVE_SP_EEPROM_READ, + page_nr, &page); + if (ret) + return ret; + } + + memcpy(&page.data[page_offset], data, data_len); + } + + ret = rave_sp_eeprom_io(eeprom, type, page_nr, &page); + if (ret) + return ret; + + /* + * Since we receive the result of the read via 'page.data' + * buffer we need to copy that to 'data' + */ + if (type == RAVE_SP_EEPROM_READ) + memcpy(data, &page.data[page_offset], data_len); + + return 0; +} + +/** + * rave_sp_eeprom_access - Access EEPROM data + * + * @eeprom: EEPROM device to access + * @type: Access type to perform (read or write) + * @offset: Offset within EEPROM to access + * @data: Data buffer + * @data_len: Size of the data buffer + * + * This function performs a generic access (either read or write) at + * arbitrary offset (not necessary page aligned) of arbitrary length + * (is not constrained by EEPROM page size). + * + * Returns zero in case of success or negative error code in case of + * failure. + */ +static int rave_sp_eeprom_access(struct rave_sp_eeprom *eeprom, + enum rave_sp_eeprom_access_type type, + unsigned int offset, u8 *data, + unsigned int data_len) +{ + unsigned int residue; + unsigned int chunk; + unsigned int head; + int ret; + + mutex_lock(&eeprom->mutex); + + head = offset % RAVE_SP_EEPROM_PAGE_SIZE; + residue = data_len; + + do { + /* + * First iteration, if we are doing an access that is + * not 32-byte aligned, we need to access only data up + * to a page boundary to avoid corssing it in + * rave_sp_eeprom_page_access() + */ + if (unlikely(head)) { + chunk = RAVE_SP_EEPROM_PAGE_SIZE - head; + /* + * This can only happen once per + * rave_sp_eeprom_access() call, so we set + * head to zero to process all the other + * iterations normally. + */ + head = 0; + } else { + chunk = RAVE_SP_EEPROM_PAGE_SIZE; + } + + /* + * We should never read more that 'residue' bytes + */ + chunk = min(chunk, residue); + ret = rave_sp_eeprom_page_access(eeprom, type, offset, + data, chunk); + if (ret) + goto out; + + residue -= chunk; + offset += chunk; + data += chunk; + } while (residue); +out: + mutex_unlock(&eeprom->mutex); + return ret; +} + +static int rave_sp_eeprom_reg_read(void *eeprom, unsigned int offset, + void *val, size_t bytes) +{ + return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_READ, + offset, val, bytes); +} + +static int rave_sp_eeprom_reg_write(void *eeprom, unsigned int offset, + void *val, size_t bytes) +{ + return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_WRITE, + offset, val, bytes); +} + +static int rave_sp_eeprom_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rave_sp *sp = dev_get_drvdata(dev->parent); + struct device_node *np = dev->of_node; + struct nvmem_config config = { 0 }; + struct rave_sp_eeprom *eeprom; + struct nvmem_device *nvmem; + u32 reg[2], size; + + if (of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg))) { + dev_err(dev, "Failed to parse \"reg\" property\n"); + return -EINVAL; + } + + size = reg[1]; + /* + * Per ICD, we have no more than 2 bytes to specify EEPROM + * page. + */ + if (size > U16_MAX * RAVE_SP_EEPROM_PAGE_SIZE) { + dev_err(dev, "Specified size is too big\n"); + return -EINVAL; + } + + eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL); + if (!eeprom) + return -ENOMEM; + + eeprom->address = reg[0]; + eeprom->sp = sp; + eeprom->dev = dev; + + if (size > SZ_8K) + eeprom->header_size = RAVE_SP_EEPROM_HEADER_BIG; + else + eeprom->header_size = RAVE_SP_EEPROM_HEADER_SMALL; + + mutex_init(&eeprom->mutex); + + config.id = -1; + of_property_read_string(np, "zii,eeprom-name", &config.name); + config.priv = eeprom; + config.dev = dev; + config.size = size; + config.reg_read = rave_sp_eeprom_reg_read; + config.reg_write = rave_sp_eeprom_reg_write; + config.word_size = 1; + config.stride = 1; + + nvmem = devm_nvmem_register(dev, &config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id rave_sp_eeprom_of_match[] = { + { .compatible = "zii,rave-sp-eeprom" }, + {} +}; +MODULE_DEVICE_TABLE(of, rave_sp_eeprom_of_match); + +static struct platform_driver rave_sp_eeprom_driver = { + .probe = rave_sp_eeprom_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = rave_sp_eeprom_of_match, + }, +}; +module_platform_driver(rave_sp_eeprom_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>"); +MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>"); +MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>"); +MODULE_DESCRIPTION("RAVE SP EEPROM driver"); diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c new file mode 100644 index 0000000000..752d0bf444 --- /dev/null +++ b/drivers/nvmem/rmem.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2020 Nicolas Saenz Julienne <nsaenzjulienne@suse.de> + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> + +struct rmem { + struct device *dev; + struct nvmem_device *nvmem; + struct reserved_mem *mem; + + phys_addr_t size; +}; + +static int rmem_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rmem *priv = context; + size_t available = priv->mem->size; + loff_t off = offset; + void *addr; + int count; + + /* + * Only map the reserved memory at this point to avoid potential rogue + * kernel threads inadvertently modifying it. Based on the current + * uses-cases for this driver, the performance hit isn't a concern. + * Nor is likely to be, given the nature of the subsystem. Most nvmem + * devices operate over slow buses to begin with. + * + * An alternative would be setting the memory as RO, set_memory_ro(), + * but as of Dec 2020 this isn't possible on arm64. + */ + addr = memremap(priv->mem->base, available, MEMREMAP_WB); + if (!addr) { + dev_err(priv->dev, "Failed to remap memory region\n"); + return -ENOMEM; + } + + count = memory_read_from_buffer(val, bytes, &off, addr, available); + + memunmap(addr); + + return count; +} + +static int rmem_probe(struct platform_device *pdev) +{ + struct nvmem_config config = { }; + struct device *dev = &pdev->dev; + struct reserved_mem *mem; + struct rmem *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + priv->dev = dev; + + mem = of_reserved_mem_lookup(dev->of_node); + if (!mem) { + dev_err(dev, "Failed to lookup reserved memory\n"); + return -EINVAL; + } + priv->mem = mem; + + config.dev = dev; + config.priv = priv; + config.name = "rmem"; + config.id = NVMEM_DEVID_AUTO; + config.size = mem->size; + config.reg_read = rmem_read; + + return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config)); +} + +static const struct of_device_id rmem_match[] = { + { .compatible = "nvmem-rmem", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, rmem_match); + +static struct platform_driver rmem_driver = { + .probe = rmem_probe, + .driver = { + .name = "rmem", + .of_match_table = rmem_match, + }, +}; +module_platform_driver(rmem_driver); + +MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>"); +MODULE_DESCRIPTION("Reserved Memory Based nvmem Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c new file mode 100644 index 0000000000..4004c5bece --- /dev/null +++ b/drivers/nvmem/rockchip-efuse.c @@ -0,0 +1,300 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Rockchip eFuse Driver + * + * Copyright (c) 2015 Rockchip Electronics Co. Ltd. + * Author: Caesar Wang <wxt@rock-chips.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +#define RK3288_A_SHIFT 6 +#define RK3288_A_MASK 0x3ff +#define RK3288_PGENB BIT(3) +#define RK3288_LOAD BIT(2) +#define RK3288_STROBE BIT(1) +#define RK3288_CSB BIT(0) + +#define RK3328_SECURE_SIZES 96 +#define RK3328_INT_STATUS 0x0018 +#define RK3328_DOUT 0x0020 +#define RK3328_AUTO_CTRL 0x0024 +#define RK3328_INT_FINISH BIT(0) +#define RK3328_AUTO_ENB BIT(0) +#define RK3328_AUTO_RD BIT(1) + +#define RK3399_A_SHIFT 16 +#define RK3399_A_MASK 0x3ff +#define RK3399_NBYTES 4 +#define RK3399_STROBSFTSEL BIT(9) +#define RK3399_RSB BIT(7) +#define RK3399_PD BIT(5) +#define RK3399_PGENB BIT(3) +#define RK3399_LOAD BIT(2) +#define RK3399_STROBE BIT(1) +#define RK3399_CSB BIT(0) + +#define REG_EFUSE_CTRL 0x0000 +#define REG_EFUSE_DOUT 0x0004 + +struct rockchip_efuse_chip { + struct device *dev; + void __iomem *base; + struct clk *clk; +}; + +static int rockchip_rk3288_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_efuse_chip *efuse = context; + u8 *buf = val; + int ret; + + ret = clk_prepare_enable(efuse->clk); + if (ret < 0) { + dev_err(efuse->dev, "failed to prepare/enable efuse clk\n"); + return ret; + } + + writel(RK3288_LOAD | RK3288_PGENB, efuse->base + REG_EFUSE_CTRL); + udelay(1); + while (bytes--) { + writel(readl(efuse->base + REG_EFUSE_CTRL) & + (~(RK3288_A_MASK << RK3288_A_SHIFT)), + efuse->base + REG_EFUSE_CTRL); + writel(readl(efuse->base + REG_EFUSE_CTRL) | + ((offset++ & RK3288_A_MASK) << RK3288_A_SHIFT), + efuse->base + REG_EFUSE_CTRL); + udelay(1); + writel(readl(efuse->base + REG_EFUSE_CTRL) | + RK3288_STROBE, efuse->base + REG_EFUSE_CTRL); + udelay(1); + *buf++ = readb(efuse->base + REG_EFUSE_DOUT); + writel(readl(efuse->base + REG_EFUSE_CTRL) & + (~RK3288_STROBE), efuse->base + REG_EFUSE_CTRL); + udelay(1); + } + + /* Switch to standby mode */ + writel(RK3288_PGENB | RK3288_CSB, efuse->base + REG_EFUSE_CTRL); + + clk_disable_unprepare(efuse->clk); + + return 0; +} + +static int rockchip_rk3328_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_efuse_chip *efuse = context; + unsigned int addr_start, addr_end, addr_offset, addr_len; + u32 out_value, status; + u8 *buf; + int ret, i = 0; + + ret = clk_prepare_enable(efuse->clk); + if (ret < 0) { + dev_err(efuse->dev, "failed to prepare/enable efuse clk\n"); + return ret; + } + + /* 128 Byte efuse, 96 Byte for secure, 32 Byte for non-secure */ + offset += RK3328_SECURE_SIZES; + addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES; + addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES; + addr_offset = offset % RK3399_NBYTES; + addr_len = addr_end - addr_start; + + buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)), + GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto nomem; + } + + while (addr_len--) { + writel(RK3328_AUTO_RD | RK3328_AUTO_ENB | + ((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT), + efuse->base + RK3328_AUTO_CTRL); + udelay(4); + status = readl(efuse->base + RK3328_INT_STATUS); + if (!(status & RK3328_INT_FINISH)) { + ret = -EIO; + goto err; + } + out_value = readl(efuse->base + RK3328_DOUT); + writel(RK3328_INT_FINISH, efuse->base + RK3328_INT_STATUS); + + memcpy(&buf[i], &out_value, RK3399_NBYTES); + i += RK3399_NBYTES; + } + + memcpy(val, buf + addr_offset, bytes); +err: + kfree(buf); +nomem: + clk_disable_unprepare(efuse->clk); + + return ret; +} + +static int rockchip_rk3399_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_efuse_chip *efuse = context; + unsigned int addr_start, addr_end, addr_offset, addr_len; + u32 out_value; + u8 *buf; + int ret, i = 0; + + ret = clk_prepare_enable(efuse->clk); + if (ret < 0) { + dev_err(efuse->dev, "failed to prepare/enable efuse clk\n"); + return ret; + } + + addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES; + addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES; + addr_offset = offset % RK3399_NBYTES; + addr_len = addr_end - addr_start; + + buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)), + GFP_KERNEL); + if (!buf) { + clk_disable_unprepare(efuse->clk); + return -ENOMEM; + } + + writel(RK3399_LOAD | RK3399_PGENB | RK3399_STROBSFTSEL | RK3399_RSB, + efuse->base + REG_EFUSE_CTRL); + udelay(1); + while (addr_len--) { + writel(readl(efuse->base + REG_EFUSE_CTRL) | RK3399_STROBE | + ((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT), + efuse->base + REG_EFUSE_CTRL); + udelay(1); + out_value = readl(efuse->base + REG_EFUSE_DOUT); + writel(readl(efuse->base + REG_EFUSE_CTRL) & (~RK3399_STROBE), + efuse->base + REG_EFUSE_CTRL); + udelay(1); + + memcpy(&buf[i], &out_value, RK3399_NBYTES); + i += RK3399_NBYTES; + } + + /* Switch to standby mode */ + writel(RK3399_PD | RK3399_CSB, efuse->base + REG_EFUSE_CTRL); + + memcpy(val, buf + addr_offset, bytes); + + kfree(buf); + + clk_disable_unprepare(efuse->clk); + + return 0; +} + +static struct nvmem_config econfig = { + .name = "rockchip-efuse", + .stride = 1, + .word_size = 1, + .read_only = true, +}; + +static const struct of_device_id rockchip_efuse_match[] = { + /* deprecated but kept around for dts binding compatibility */ + { + .compatible = "rockchip,rockchip-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3066a-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3188-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3228-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3288-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3368-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3328-efuse", + .data = (void *)&rockchip_rk3328_efuse_read, + }, + { + .compatible = "rockchip,rk3399-efuse", + .data = (void *)&rockchip_rk3399_efuse_read, + }, + { /* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, rockchip_efuse_match); + +static int rockchip_efuse_probe(struct platform_device *pdev) +{ + struct resource *res; + struct nvmem_device *nvmem; + struct rockchip_efuse_chip *efuse; + const void *data; + struct device *dev = &pdev->dev; + + data = of_device_get_match_data(dev); + if (!data) { + dev_err(dev, "failed to get match data\n"); + return -EINVAL; + } + + efuse = devm_kzalloc(dev, sizeof(struct rockchip_efuse_chip), + GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + efuse->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(efuse->base)) + return PTR_ERR(efuse->base); + + efuse->clk = devm_clk_get(dev, "pclk_efuse"); + if (IS_ERR(efuse->clk)) + return PTR_ERR(efuse->clk); + + efuse->dev = dev; + if (of_property_read_u32(dev->of_node, "rockchip,efuse-size", + &econfig.size)) + econfig.size = resource_size(res); + econfig.reg_read = data; + econfig.priv = efuse; + econfig.dev = efuse->dev; + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver rockchip_efuse_driver = { + .probe = rockchip_efuse_probe, + .driver = { + .name = "rockchip-efuse", + .of_match_table = rockchip_efuse_match, + }, +}; + +module_platform_driver(rockchip_efuse_driver); +MODULE_DESCRIPTION("rockchip_efuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c new file mode 100644 index 0000000000..cb9aa54283 --- /dev/null +++ b/drivers/nvmem/rockchip-otp.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Rockchip OTP Driver + * + * Copyright (c) 2018 Rockchip Electronics Co. Ltd. + * Author: Finley Xiao <finley.xiao@rock-chips.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/reset.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +/* OTP Register Offsets */ +#define OTPC_SBPI_CTRL 0x0020 +#define OTPC_SBPI_CMD_VALID_PRE 0x0024 +#define OTPC_SBPI_CS_VALID_PRE 0x0028 +#define OTPC_SBPI_STATUS 0x002C +#define OTPC_USER_CTRL 0x0100 +#define OTPC_USER_ADDR 0x0104 +#define OTPC_USER_ENABLE 0x0108 +#define OTPC_USER_Q 0x0124 +#define OTPC_INT_STATUS 0x0304 +#define OTPC_SBPI_CMD0_OFFSET 0x1000 +#define OTPC_SBPI_CMD1_OFFSET 0x1004 + +/* OTP Register bits and masks */ +#define OTPC_USER_ADDR_MASK GENMASK(31, 16) +#define OTPC_USE_USER BIT(0) +#define OTPC_USE_USER_MASK GENMASK(16, 16) +#define OTPC_USER_FSM_ENABLE BIT(0) +#define OTPC_USER_FSM_ENABLE_MASK GENMASK(16, 16) +#define OTPC_SBPI_DONE BIT(1) +#define OTPC_USER_DONE BIT(2) + +#define SBPI_DAP_ADDR 0x02 +#define SBPI_DAP_ADDR_SHIFT 8 +#define SBPI_DAP_ADDR_MASK GENMASK(31, 24) +#define SBPI_CMD_VALID_MASK GENMASK(31, 16) +#define SBPI_DAP_CMD_WRF 0xC0 +#define SBPI_DAP_REG_ECC 0x3A +#define SBPI_ECC_ENABLE 0x00 +#define SBPI_ECC_DISABLE 0x09 +#define SBPI_ENABLE BIT(0) +#define SBPI_ENABLE_MASK GENMASK(16, 16) + +#define OTPC_TIMEOUT 10000 + +/* RK3588 Register */ +#define RK3588_OTPC_AUTO_CTRL 0x04 +#define RK3588_OTPC_AUTO_EN 0x08 +#define RK3588_OTPC_INT_ST 0x84 +#define RK3588_OTPC_DOUT0 0x20 +#define RK3588_NO_SECURE_OFFSET 0x300 +#define RK3588_NBYTES 4 +#define RK3588_BURST_NUM 1 +#define RK3588_BURST_SHIFT 8 +#define RK3588_ADDR_SHIFT 16 +#define RK3588_AUTO_EN BIT(0) +#define RK3588_RD_DONE BIT(1) + +struct rockchip_data { + int size; + const char * const *clks; + int num_clks; + nvmem_reg_read_t reg_read; +}; + +struct rockchip_otp { + struct device *dev; + void __iomem *base; + struct clk_bulk_data *clks; + struct reset_control *rst; + const struct rockchip_data *data; +}; + +static int rockchip_otp_reset(struct rockchip_otp *otp) +{ + int ret; + + ret = reset_control_assert(otp->rst); + if (ret) { + dev_err(otp->dev, "failed to assert otp phy %d\n", ret); + return ret; + } + + udelay(2); + + ret = reset_control_deassert(otp->rst); + if (ret) { + dev_err(otp->dev, "failed to deassert otp phy %d\n", ret); + return ret; + } + + return 0; +} + +static int rockchip_otp_wait_status(struct rockchip_otp *otp, + unsigned int reg, u32 flag) +{ + u32 status = 0; + int ret; + + ret = readl_poll_timeout_atomic(otp->base + reg, status, + (status & flag), 1, OTPC_TIMEOUT); + if (ret) + return ret; + + /* clean int status */ + writel(flag, otp->base + reg); + + return 0; +} + +static int rockchip_otp_ecc_enable(struct rockchip_otp *otp, bool enable) +{ + int ret = 0; + + writel(SBPI_DAP_ADDR_MASK | (SBPI_DAP_ADDR << SBPI_DAP_ADDR_SHIFT), + otp->base + OTPC_SBPI_CTRL); + + writel(SBPI_CMD_VALID_MASK | 0x1, otp->base + OTPC_SBPI_CMD_VALID_PRE); + writel(SBPI_DAP_CMD_WRF | SBPI_DAP_REG_ECC, + otp->base + OTPC_SBPI_CMD0_OFFSET); + if (enable) + writel(SBPI_ECC_ENABLE, otp->base + OTPC_SBPI_CMD1_OFFSET); + else + writel(SBPI_ECC_DISABLE, otp->base + OTPC_SBPI_CMD1_OFFSET); + + writel(SBPI_ENABLE_MASK | SBPI_ENABLE, otp->base + OTPC_SBPI_CTRL); + + ret = rockchip_otp_wait_status(otp, OTPC_INT_STATUS, OTPC_SBPI_DONE); + if (ret < 0) + dev_err(otp->dev, "timeout during ecc_enable\n"); + + return ret; +} + +static int px30_otp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_otp *otp = context; + u8 *buf = val; + int ret; + + ret = rockchip_otp_reset(otp); + if (ret) { + dev_err(otp->dev, "failed to reset otp phy\n"); + return ret; + } + + ret = rockchip_otp_ecc_enable(otp, false); + if (ret < 0) { + dev_err(otp->dev, "rockchip_otp_ecc_enable err\n"); + return ret; + } + + writel(OTPC_USE_USER | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL); + udelay(5); + while (bytes--) { + writel(offset++ | OTPC_USER_ADDR_MASK, + otp->base + OTPC_USER_ADDR); + writel(OTPC_USER_FSM_ENABLE | OTPC_USER_FSM_ENABLE_MASK, + otp->base + OTPC_USER_ENABLE); + ret = rockchip_otp_wait_status(otp, OTPC_INT_STATUS, OTPC_USER_DONE); + if (ret < 0) { + dev_err(otp->dev, "timeout during read setup\n"); + goto read_end; + } + *buf++ = readb(otp->base + OTPC_USER_Q); + } + +read_end: + writel(0x0 | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL); + + return ret; +} + +static int rk3588_otp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_otp *otp = context; + unsigned int addr_start, addr_end, addr_len; + int ret, i = 0; + u32 data; + u8 *buf; + + addr_start = round_down(offset, RK3588_NBYTES) / RK3588_NBYTES; + addr_end = round_up(offset + bytes, RK3588_NBYTES) / RK3588_NBYTES; + addr_len = addr_end - addr_start; + addr_start += RK3588_NO_SECURE_OFFSET; + + buf = kzalloc(array_size(addr_len, RK3588_NBYTES), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + while (addr_len--) { + writel((addr_start << RK3588_ADDR_SHIFT) | + (RK3588_BURST_NUM << RK3588_BURST_SHIFT), + otp->base + RK3588_OTPC_AUTO_CTRL); + writel(RK3588_AUTO_EN, otp->base + RK3588_OTPC_AUTO_EN); + + ret = rockchip_otp_wait_status(otp, RK3588_OTPC_INT_ST, + RK3588_RD_DONE); + if (ret < 0) { + dev_err(otp->dev, "timeout during read setup\n"); + goto read_end; + } + + data = readl(otp->base + RK3588_OTPC_DOUT0); + memcpy(&buf[i], &data, RK3588_NBYTES); + + i += RK3588_NBYTES; + addr_start++; + } + + memcpy(val, buf + offset % RK3588_NBYTES, bytes); + +read_end: + kfree(buf); + + return ret; +} + +static int rockchip_otp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_otp *otp = context; + int ret; + + if (!otp->data || !otp->data->reg_read) + return -EINVAL; + + ret = clk_bulk_prepare_enable(otp->data->num_clks, otp->clks); + if (ret < 0) { + dev_err(otp->dev, "failed to prepare/enable clks\n"); + return ret; + } + + ret = otp->data->reg_read(context, offset, val, bytes); + + clk_bulk_disable_unprepare(otp->data->num_clks, otp->clks); + + return ret; +} + +static struct nvmem_config otp_config = { + .name = "rockchip-otp", + .owner = THIS_MODULE, + .read_only = true, + .stride = 1, + .word_size = 1, + .reg_read = rockchip_otp_read, +}; + +static const char * const px30_otp_clocks[] = { + "otp", "apb_pclk", "phy", +}; + +static const struct rockchip_data px30_data = { + .size = 0x40, + .clks = px30_otp_clocks, + .num_clks = ARRAY_SIZE(px30_otp_clocks), + .reg_read = px30_otp_read, +}; + +static const char * const rk3588_otp_clocks[] = { + "otp", "apb_pclk", "phy", "arb", +}; + +static const struct rockchip_data rk3588_data = { + .size = 0x400, + .clks = rk3588_otp_clocks, + .num_clks = ARRAY_SIZE(rk3588_otp_clocks), + .reg_read = rk3588_otp_read, +}; + +static const struct of_device_id rockchip_otp_match[] = { + { + .compatible = "rockchip,px30-otp", + .data = &px30_data, + }, + { + .compatible = "rockchip,rk3308-otp", + .data = &px30_data, + }, + { + .compatible = "rockchip,rk3588-otp", + .data = &rk3588_data, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, rockchip_otp_match); + +static int rockchip_otp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rockchip_otp *otp; + const struct rockchip_data *data; + struct nvmem_device *nvmem; + int ret, i; + + data = of_device_get_match_data(dev); + if (!data) + return dev_err_probe(dev, -EINVAL, "failed to get match data\n"); + + otp = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_otp), + GFP_KERNEL); + if (!otp) + return -ENOMEM; + + otp->data = data; + otp->dev = dev; + otp->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(otp->base)) + return dev_err_probe(dev, PTR_ERR(otp->base), + "failed to ioremap resource\n"); + + otp->clks = devm_kcalloc(dev, data->num_clks, sizeof(*otp->clks), + GFP_KERNEL); + if (!otp->clks) + return -ENOMEM; + + for (i = 0; i < data->num_clks; ++i) + otp->clks[i].id = data->clks[i]; + + ret = devm_clk_bulk_get(dev, data->num_clks, otp->clks); + if (ret) + return dev_err_probe(dev, ret, "failed to get clocks\n"); + + otp->rst = devm_reset_control_array_get_exclusive(dev); + if (IS_ERR(otp->rst)) + return dev_err_probe(dev, PTR_ERR(otp->rst), + "failed to get resets\n"); + + otp_config.size = data->size; + otp_config.priv = otp; + otp_config.dev = dev; + + nvmem = devm_nvmem_register(dev, &otp_config); + if (IS_ERR(nvmem)) + return dev_err_probe(dev, PTR_ERR(nvmem), + "failed to register nvmem device\n"); + return 0; +} + +static struct platform_driver rockchip_otp_driver = { + .probe = rockchip_otp_probe, + .driver = { + .name = "rockchip-otp", + .of_match_table = rockchip_otp_match, + }, +}; + +module_platform_driver(rockchip_otp_driver); +MODULE_DESCRIPTION("Rockchip OTP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c new file mode 100644 index 0000000000..2210da40df --- /dev/null +++ b/drivers/nvmem/sc27xx-efuse.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Spreadtrum Communications Inc. + +#include <linux/hwspinlock.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/nvmem-provider.h> + +/* PMIC global registers definition */ +#define SC27XX_MODULE_EN 0xc08 +#define SC2730_MODULE_EN 0x1808 +#define SC27XX_EFUSE_EN BIT(6) + +/* Efuse controller registers definition */ +#define SC27XX_EFUSE_GLB_CTRL 0x0 +#define SC27XX_EFUSE_DATA_RD 0x4 +#define SC27XX_EFUSE_DATA_WR 0x8 +#define SC27XX_EFUSE_BLOCK_INDEX 0xc +#define SC27XX_EFUSE_MODE_CTRL 0x10 +#define SC27XX_EFUSE_STATUS 0x14 +#define SC27XX_EFUSE_WR_TIMING_CTRL 0x20 +#define SC27XX_EFUSE_RD_TIMING_CTRL 0x24 +#define SC27XX_EFUSE_EFUSE_DEB_CTRL 0x28 + +/* Mask definition for SC27XX_EFUSE_BLOCK_INDEX register */ +#define SC27XX_EFUSE_BLOCK_MASK GENMASK(4, 0) + +/* Bits definitions for SC27XX_EFUSE_MODE_CTRL register */ +#define SC27XX_EFUSE_PG_START BIT(0) +#define SC27XX_EFUSE_RD_START BIT(1) +#define SC27XX_EFUSE_CLR_RDDONE BIT(2) + +/* Bits definitions for SC27XX_EFUSE_STATUS register */ +#define SC27XX_EFUSE_PGM_BUSY BIT(0) +#define SC27XX_EFUSE_READ_BUSY BIT(1) +#define SC27XX_EFUSE_STANDBY BIT(2) +#define SC27XX_EFUSE_GLOBAL_PROT BIT(3) +#define SC27XX_EFUSE_RD_DONE BIT(4) + +/* Block number and block width (bytes) definitions */ +#define SC27XX_EFUSE_BLOCK_MAX 32 +#define SC27XX_EFUSE_BLOCK_WIDTH 2 + +/* Timeout (ms) for the trylock of hardware spinlocks */ +#define SC27XX_EFUSE_HWLOCK_TIMEOUT 5000 + +/* Timeout (us) of polling the status */ +#define SC27XX_EFUSE_POLL_TIMEOUT 3000000 +#define SC27XX_EFUSE_POLL_DELAY_US 10000 + +/* + * Since different PMICs of SC27xx series can have different + * address , we should save address in the device data structure. + */ +struct sc27xx_efuse_variant_data { + u32 module_en; +}; + +struct sc27xx_efuse { + struct device *dev; + struct regmap *regmap; + struct hwspinlock *hwlock; + struct mutex mutex; + u32 base; + const struct sc27xx_efuse_variant_data *var_data; +}; + +static const struct sc27xx_efuse_variant_data sc2731_edata = { + .module_en = SC27XX_MODULE_EN, +}; + +static const struct sc27xx_efuse_variant_data sc2730_edata = { + .module_en = SC2730_MODULE_EN, +}; + +/* + * On Spreadtrum platform, we have multi-subsystems will access the unique + * efuse controller, so we need one hardware spinlock to synchronize between + * the multiple subsystems. + */ +static int sc27xx_efuse_lock(struct sc27xx_efuse *efuse) +{ + int ret; + + mutex_lock(&efuse->mutex); + + ret = hwspin_lock_timeout_raw(efuse->hwlock, + SC27XX_EFUSE_HWLOCK_TIMEOUT); + if (ret) { + dev_err(efuse->dev, "timeout to get the hwspinlock\n"); + mutex_unlock(&efuse->mutex); + return ret; + } + + return 0; +} + +static void sc27xx_efuse_unlock(struct sc27xx_efuse *efuse) +{ + hwspin_unlock_raw(efuse->hwlock); + mutex_unlock(&efuse->mutex); +} + +static int sc27xx_efuse_poll_status(struct sc27xx_efuse *efuse, u32 bits) +{ + int ret; + u32 val; + + ret = regmap_read_poll_timeout(efuse->regmap, + efuse->base + SC27XX_EFUSE_STATUS, + val, (val & bits), + SC27XX_EFUSE_POLL_DELAY_US, + SC27XX_EFUSE_POLL_TIMEOUT); + if (ret) { + dev_err(efuse->dev, "timeout to update the efuse status\n"); + return ret; + } + + return 0; +} + +static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes) +{ + struct sc27xx_efuse *efuse = context; + u32 buf, blk_index = offset / SC27XX_EFUSE_BLOCK_WIDTH; + u32 blk_offset = (offset % SC27XX_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE; + int ret; + + if (blk_index > SC27XX_EFUSE_BLOCK_MAX || + bytes > SC27XX_EFUSE_BLOCK_WIDTH) + return -EINVAL; + + ret = sc27xx_efuse_lock(efuse); + if (ret) + return ret; + + /* Enable the efuse controller. */ + ret = regmap_update_bits(efuse->regmap, efuse->var_data->module_en, + SC27XX_EFUSE_EN, SC27XX_EFUSE_EN); + if (ret) + goto unlock_efuse; + + /* + * Before reading, we should ensure the efuse controller is in + * standby state. + */ + ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_STANDBY); + if (ret) + goto disable_efuse; + + /* Set the block address to be read. */ + ret = regmap_write(efuse->regmap, + efuse->base + SC27XX_EFUSE_BLOCK_INDEX, + blk_index & SC27XX_EFUSE_BLOCK_MASK); + if (ret) + goto disable_efuse; + + /* Start reading process from efuse memory. */ + ret = regmap_update_bits(efuse->regmap, + efuse->base + SC27XX_EFUSE_MODE_CTRL, + SC27XX_EFUSE_RD_START, + SC27XX_EFUSE_RD_START); + if (ret) + goto disable_efuse; + + /* + * Polling the read done status to make sure the reading process + * is completed, that means the data can be read out now. + */ + ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_RD_DONE); + if (ret) + goto disable_efuse; + + /* Read data from efuse memory. */ + ret = regmap_read(efuse->regmap, efuse->base + SC27XX_EFUSE_DATA_RD, + &buf); + if (ret) + goto disable_efuse; + + /* Clear the read done flag. */ + ret = regmap_update_bits(efuse->regmap, + efuse->base + SC27XX_EFUSE_MODE_CTRL, + SC27XX_EFUSE_CLR_RDDONE, + SC27XX_EFUSE_CLR_RDDONE); + +disable_efuse: + /* Disable the efuse controller after reading. */ + regmap_update_bits(efuse->regmap, efuse->var_data->module_en, SC27XX_EFUSE_EN, 0); +unlock_efuse: + sc27xx_efuse_unlock(efuse); + + if (!ret) { + buf >>= blk_offset; + memcpy(val, &buf, bytes); + } + + return ret; +} + +static int sc27xx_efuse_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct nvmem_config econfig = { }; + struct nvmem_device *nvmem; + struct sc27xx_efuse *efuse; + int ret; + + efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + efuse->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!efuse->regmap) { + dev_err(&pdev->dev, "failed to get efuse regmap\n"); + return -ENODEV; + } + + ret = of_property_read_u32(np, "reg", &efuse->base); + if (ret) { + dev_err(&pdev->dev, "failed to get efuse base address\n"); + return ret; + } + + ret = of_hwspin_lock_get_id(np, 0); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get hwspinlock id\n"); + return ret; + } + + efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret); + if (!efuse->hwlock) { + dev_err(&pdev->dev, "failed to request hwspinlock\n"); + return -ENXIO; + } + + mutex_init(&efuse->mutex); + efuse->dev = &pdev->dev; + efuse->var_data = of_device_get_match_data(&pdev->dev); + + econfig.stride = 1; + econfig.word_size = 1; + econfig.read_only = true; + econfig.name = "sc27xx-efuse"; + econfig.size = SC27XX_EFUSE_BLOCK_MAX * SC27XX_EFUSE_BLOCK_WIDTH; + econfig.reg_read = sc27xx_efuse_read; + econfig.priv = efuse; + econfig.dev = &pdev->dev; + nvmem = devm_nvmem_register(&pdev->dev, &econfig); + if (IS_ERR(nvmem)) { + dev_err(&pdev->dev, "failed to register nvmem config\n"); + return PTR_ERR(nvmem); + } + + return 0; +} + +static const struct of_device_id sc27xx_efuse_of_match[] = { + { .compatible = "sprd,sc2731-efuse", .data = &sc2731_edata}, + { .compatible = "sprd,sc2730-efuse", .data = &sc2730_edata}, + { } +}; + +static struct platform_driver sc27xx_efuse_driver = { + .probe = sc27xx_efuse_probe, + .driver = { + .name = "sc27xx-efuse", + .of_match_table = sc27xx_efuse_of_match, + }, +}; + +module_platform_driver(sc27xx_efuse_driver); + +MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>"); +MODULE_DESCRIPTION("Spreadtrum SC27xx efuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/sec-qfprom.c b/drivers/nvmem/sec-qfprom.c new file mode 100644 index 0000000000..e48c2dc0c4 --- /dev/null +++ b/drivers/nvmem/sec-qfprom.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +/** + * struct sec_qfprom - structure holding secure qfprom attributes + * + * @base: starting physical address for secure qfprom corrected address space. + * @dev: qfprom device structure. + */ +struct sec_qfprom { + phys_addr_t base; + struct device *dev; +}; + +static int sec_qfprom_reg_read(void *context, unsigned int reg, void *_val, size_t bytes) +{ + struct sec_qfprom *priv = context; + unsigned int i; + u8 *val = _val; + u32 read_val; + u8 *tmp; + + for (i = 0; i < bytes; i++, reg++) { + if (i == 0 || reg % 4 == 0) { + if (qcom_scm_io_readl(priv->base + (reg & ~3), &read_val)) { + dev_err(priv->dev, "Couldn't access fuse register\n"); + return -EINVAL; + } + tmp = (u8 *)&read_val; + } + + val[i] = tmp[reg & 3]; + } + + return 0; +} + +static int sec_qfprom_probe(struct platform_device *pdev) +{ + struct nvmem_config econfig = { + .name = "sec-qfprom", + .stride = 1, + .word_size = 1, + .id = NVMEM_DEVID_AUTO, + .reg_read = sec_qfprom_reg_read, + }; + struct device *dev = &pdev->dev; + struct nvmem_device *nvmem; + struct sec_qfprom *priv; + struct resource *res; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + priv->base = res->start; + + econfig.size = resource_size(res); + econfig.dev = dev; + econfig.priv = priv; + + priv->dev = dev; + + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id sec_qfprom_of_match[] = { + { .compatible = "qcom,sec-qfprom" }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, sec_qfprom_of_match); + +static struct platform_driver qfprom_driver = { + .probe = sec_qfprom_probe, + .driver = { + .name = "qcom_sec_qfprom", + .of_match_table = sec_qfprom_of_match, + }, +}; +module_platform_driver(qfprom_driver); +MODULE_DESCRIPTION("Qualcomm Secure QFPROM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/snvs_lpgpr.c b/drivers/nvmem/snvs_lpgpr.c new file mode 100644 index 0000000000..89c2711232 --- /dev/null +++ b/drivers/nvmem/snvs_lpgpr.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de> + * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> + */ + +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#define IMX6Q_SNVS_HPLR 0x00 +#define IMX6Q_SNVS_LPLR 0x34 +#define IMX6Q_SNVS_LPGPR 0x68 + +#define IMX7D_SNVS_HPLR 0x00 +#define IMX7D_SNVS_LPLR 0x34 +#define IMX7D_SNVS_LPGPR 0x90 + +#define IMX_GPR_SL BIT(5) +#define IMX_GPR_HL BIT(5) + +struct snvs_lpgpr_cfg { + int offset; + int offset_hplr; + int offset_lplr; + int size; +}; + +struct snvs_lpgpr_priv { + struct device_d *dev; + struct regmap *regmap; + struct nvmem_config cfg; + const struct snvs_lpgpr_cfg *dcfg; +}; + +static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx6q = { + .offset = IMX6Q_SNVS_LPGPR, + .offset_hplr = IMX6Q_SNVS_HPLR, + .offset_lplr = IMX6Q_SNVS_LPLR, + .size = 4, +}; + +static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx7d = { + .offset = IMX7D_SNVS_LPGPR, + .offset_hplr = IMX7D_SNVS_HPLR, + .offset_lplr = IMX7D_SNVS_LPLR, + .size = 16, +}; + +static int snvs_lpgpr_write(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct snvs_lpgpr_priv *priv = context; + const struct snvs_lpgpr_cfg *dcfg = priv->dcfg; + unsigned int lock_reg; + int ret; + + ret = regmap_read(priv->regmap, dcfg->offset_hplr, &lock_reg); + if (ret < 0) + return ret; + + if (lock_reg & IMX_GPR_SL) + return -EPERM; + + ret = regmap_read(priv->regmap, dcfg->offset_lplr, &lock_reg); + if (ret < 0) + return ret; + + if (lock_reg & IMX_GPR_HL) + return -EPERM; + + return regmap_bulk_write(priv->regmap, dcfg->offset + offset, val, + bytes / 4); +} + +static int snvs_lpgpr_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct snvs_lpgpr_priv *priv = context; + const struct snvs_lpgpr_cfg *dcfg = priv->dcfg; + + return regmap_bulk_read(priv->regmap, dcfg->offset + offset, + val, bytes / 4); +} + +static int snvs_lpgpr_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct device_node *syscon_node; + struct snvs_lpgpr_priv *priv; + struct nvmem_config *cfg; + struct nvmem_device *nvmem; + const struct snvs_lpgpr_cfg *dcfg; + + if (!node) + return -ENOENT; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dcfg = of_device_get_match_data(dev); + if (!dcfg) + return -EINVAL; + + syscon_node = of_get_parent(node); + if (!syscon_node) + return -ENODEV; + + priv->regmap = syscon_node_to_regmap(syscon_node); + of_node_put(syscon_node); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + + priv->dcfg = dcfg; + + cfg = &priv->cfg; + cfg->priv = priv; + cfg->name = dev_name(dev); + cfg->dev = dev; + cfg->stride = 4; + cfg->word_size = 4; + cfg->size = dcfg->size; + cfg->owner = THIS_MODULE; + cfg->reg_read = snvs_lpgpr_read; + cfg->reg_write = snvs_lpgpr_write; + + nvmem = devm_nvmem_register(dev, cfg); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id snvs_lpgpr_dt_ids[] = { + { .compatible = "fsl,imx6q-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx6q }, + { .compatible = "fsl,imx6ul-snvs-lpgpr", + .data = &snvs_lpgpr_cfg_imx6q }, + { .compatible = "fsl,imx7d-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx7d }, + { }, +}; +MODULE_DEVICE_TABLE(of, snvs_lpgpr_dt_ids); + +static struct platform_driver snvs_lpgpr_driver = { + .probe = snvs_lpgpr_probe, + .driver = { + .name = "snvs_lpgpr", + .of_match_table = snvs_lpgpr_dt_ids, + }, +}; +module_platform_driver(snvs_lpgpr_driver); + +MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>"); +MODULE_DESCRIPTION("Low Power General Purpose Register in i.MX6 and i.MX7 Secure Non-Volatile Storage"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c new file mode 100644 index 0000000000..7e6e31db4b --- /dev/null +++ b/drivers/nvmem/sprd-efuse.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2019 Spreadtrum Communications Inc. + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/hwspinlock.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#define SPRD_EFUSE_ENABLE 0x20 +#define SPRD_EFUSE_ERR_FLAG 0x24 +#define SPRD_EFUSE_ERR_CLR 0x28 +#define SPRD_EFUSE_MAGIC_NUM 0x2c +#define SPRD_EFUSE_FW_CFG 0x50 +#define SPRD_EFUSE_PW_SWT 0x54 +#define SPRD_EFUSE_MEM(val) (0x1000 + ((val) << 2)) + +#define SPRD_EFUSE_VDD_EN BIT(0) +#define SPRD_EFUSE_AUTO_CHECK_EN BIT(1) +#define SPRD_EFUSE_DOUBLE_EN BIT(2) +#define SPRD_EFUSE_MARGIN_RD_EN BIT(3) +#define SPRD_EFUSE_LOCK_WR_EN BIT(4) + +#define SPRD_EFUSE_ERR_CLR_MASK GENMASK(13, 0) + +#define SPRD_EFUSE_ENK1_ON BIT(0) +#define SPRD_EFUSE_ENK2_ON BIT(1) +#define SPRD_EFUSE_PROG_EN BIT(2) + +#define SPRD_EFUSE_MAGIC_NUMBER 0x8810 + +/* Block width (bytes) definitions */ +#define SPRD_EFUSE_BLOCK_WIDTH 4 + +/* + * The Spreadtrum AP efuse contains 2 parts: normal efuse and secure efuse, + * and we can only access the normal efuse in kernel. So define the normal + * block offset index and normal block numbers. + */ +#define SPRD_EFUSE_NORMAL_BLOCK_NUMS 24 +#define SPRD_EFUSE_NORMAL_BLOCK_OFFSET 72 + +/* Timeout (ms) for the trylock of hardware spinlocks */ +#define SPRD_EFUSE_HWLOCK_TIMEOUT 5000 + +/* + * Since different Spreadtrum SoC chip can have different normal block numbers + * and offset. And some SoC can support block double feature, which means + * when reading or writing data to efuse memory, the controller can save double + * data in case one data become incorrect after a long period. + * + * Thus we should save them in the device data structure. + */ +struct sprd_efuse_variant_data { + u32 blk_nums; + u32 blk_offset; + bool blk_double; +}; + +struct sprd_efuse { + struct device *dev; + struct clk *clk; + struct hwspinlock *hwlock; + struct mutex mutex; + void __iomem *base; + const struct sprd_efuse_variant_data *data; +}; + +static const struct sprd_efuse_variant_data ums312_data = { + .blk_nums = SPRD_EFUSE_NORMAL_BLOCK_NUMS, + .blk_offset = SPRD_EFUSE_NORMAL_BLOCK_OFFSET, + .blk_double = false, +}; + +/* + * On Spreadtrum platform, we have multi-subsystems will access the unique + * efuse controller, so we need one hardware spinlock to synchronize between + * the multiple subsystems. + */ +static int sprd_efuse_lock(struct sprd_efuse *efuse) +{ + int ret; + + mutex_lock(&efuse->mutex); + + ret = hwspin_lock_timeout_raw(efuse->hwlock, + SPRD_EFUSE_HWLOCK_TIMEOUT); + if (ret) { + dev_err(efuse->dev, "timeout get the hwspinlock\n"); + mutex_unlock(&efuse->mutex); + return ret; + } + + return 0; +} + +static void sprd_efuse_unlock(struct sprd_efuse *efuse) +{ + hwspin_unlock_raw(efuse->hwlock); + mutex_unlock(&efuse->mutex); +} + +static void sprd_efuse_set_prog_power(struct sprd_efuse *efuse, bool en) +{ + u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT); + + if (en) + val &= ~SPRD_EFUSE_ENK2_ON; + else + val &= ~SPRD_EFUSE_ENK1_ON; + + writel(val, efuse->base + SPRD_EFUSE_PW_SWT); + + /* Open or close efuse power need wait 1000us to make power stable. */ + usleep_range(1000, 1200); + + if (en) + val |= SPRD_EFUSE_ENK1_ON; + else + val |= SPRD_EFUSE_ENK2_ON; + + writel(val, efuse->base + SPRD_EFUSE_PW_SWT); + + /* Open or close efuse power need wait 1000us to make power stable. */ + usleep_range(1000, 1200); +} + +static void sprd_efuse_set_read_power(struct sprd_efuse *efuse, bool en) +{ + u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE); + + if (en) + val |= SPRD_EFUSE_VDD_EN; + else + val &= ~SPRD_EFUSE_VDD_EN; + + writel(val, efuse->base + SPRD_EFUSE_ENABLE); + + /* Open or close efuse power need wait 1000us to make power stable. */ + usleep_range(1000, 1200); +} + +static void sprd_efuse_set_prog_lock(struct sprd_efuse *efuse, bool en) +{ + u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE); + + if (en) + val |= SPRD_EFUSE_LOCK_WR_EN; + else + val &= ~SPRD_EFUSE_LOCK_WR_EN; + + writel(val, efuse->base + SPRD_EFUSE_ENABLE); +} + +static void sprd_efuse_set_auto_check(struct sprd_efuse *efuse, bool en) +{ + u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE); + + if (en) + val |= SPRD_EFUSE_AUTO_CHECK_EN; + else + val &= ~SPRD_EFUSE_AUTO_CHECK_EN; + + writel(val, efuse->base + SPRD_EFUSE_ENABLE); +} + +static void sprd_efuse_set_data_double(struct sprd_efuse *efuse, bool en) +{ + u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE); + + if (en) + val |= SPRD_EFUSE_DOUBLE_EN; + else + val &= ~SPRD_EFUSE_DOUBLE_EN; + + writel(val, efuse->base + SPRD_EFUSE_ENABLE); +} + +static void sprd_efuse_set_prog_en(struct sprd_efuse *efuse, bool en) +{ + u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT); + + if (en) + val |= SPRD_EFUSE_PROG_EN; + else + val &= ~SPRD_EFUSE_PROG_EN; + + writel(val, efuse->base + SPRD_EFUSE_PW_SWT); +} + +static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub, + bool lock, u32 *data) +{ + u32 status; + int ret = 0; + + /* + * We need set the correct magic number before writing the efuse to + * allow programming, and block other programming until we clear the + * magic number. + */ + writel(SPRD_EFUSE_MAGIC_NUMBER, + efuse->base + SPRD_EFUSE_MAGIC_NUM); + + /* + * Power on the efuse, enable programme and enable double data + * if asked. + */ + sprd_efuse_set_prog_power(efuse, true); + sprd_efuse_set_prog_en(efuse, true); + sprd_efuse_set_data_double(efuse, doub); + + /* + * Enable the auto-check function to validate if the programming is + * successful. + */ + if (lock) + sprd_efuse_set_auto_check(efuse, true); + + writel(*data, efuse->base + SPRD_EFUSE_MEM(blk)); + + /* Disable auto-check and data double after programming */ + if (lock) + sprd_efuse_set_auto_check(efuse, false); + sprd_efuse_set_data_double(efuse, false); + + /* + * Check the efuse error status, if the programming is successful, + * we should lock this efuse block to avoid programming again. + */ + status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG); + if (status) { + dev_err(efuse->dev, + "write error status %u of block %d\n", status, blk); + + writel(SPRD_EFUSE_ERR_CLR_MASK, + efuse->base + SPRD_EFUSE_ERR_CLR); + ret = -EBUSY; + } else if (lock) { + sprd_efuse_set_prog_lock(efuse, lock); + writel(0, efuse->base + SPRD_EFUSE_MEM(blk)); + sprd_efuse_set_prog_lock(efuse, false); + } + + sprd_efuse_set_prog_power(efuse, false); + writel(0, efuse->base + SPRD_EFUSE_MAGIC_NUM); + + return ret; +} + +static int sprd_efuse_raw_read(struct sprd_efuse *efuse, int blk, u32 *val, + bool doub) +{ + u32 status; + + /* + * Need power on the efuse before reading data from efuse, and will + * power off the efuse after reading process. + */ + sprd_efuse_set_read_power(efuse, true); + + /* Enable double data if asked */ + sprd_efuse_set_data_double(efuse, doub); + + /* Start to read data from efuse block */ + *val = readl(efuse->base + SPRD_EFUSE_MEM(blk)); + + /* Disable double data */ + sprd_efuse_set_data_double(efuse, false); + + /* Power off the efuse */ + sprd_efuse_set_read_power(efuse, false); + + /* + * Check the efuse error status and clear them if there are some + * errors occurred. + */ + status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG); + if (status) { + dev_err(efuse->dev, + "read error status %d of block %d\n", status, blk); + + writel(SPRD_EFUSE_ERR_CLR_MASK, + efuse->base + SPRD_EFUSE_ERR_CLR); + return -EBUSY; + } + + return 0; +} + +static int sprd_efuse_read(void *context, u32 offset, void *val, size_t bytes) +{ + struct sprd_efuse *efuse = context; + bool blk_double = efuse->data->blk_double; + u32 index = offset / SPRD_EFUSE_BLOCK_WIDTH + efuse->data->blk_offset; + u32 blk_offset = (offset % SPRD_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE; + u32 data; + int ret; + + ret = sprd_efuse_lock(efuse); + if (ret) + return ret; + + ret = clk_prepare_enable(efuse->clk); + if (ret) + goto unlock; + + ret = sprd_efuse_raw_read(efuse, index, &data, blk_double); + if (!ret) { + data >>= blk_offset; + memcpy(val, &data, bytes); + } + + clk_disable_unprepare(efuse->clk); + +unlock: + sprd_efuse_unlock(efuse); + return ret; +} + +static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes) +{ + struct sprd_efuse *efuse = context; + bool blk_double = efuse->data->blk_double; + bool lock; + int ret; + + ret = sprd_efuse_lock(efuse); + if (ret) + return ret; + + ret = clk_prepare_enable(efuse->clk); + if (ret) + goto unlock; + + /* + * If the writing bytes are equal with the block width, which means the + * whole block will be programmed. For this case, we should not allow + * this block to be programmed again by locking this block. + * + * If the block was programmed partially, we should allow this block to + * be programmed again. + */ + if (bytes < SPRD_EFUSE_BLOCK_WIDTH) + lock = false; + else + lock = true; + + ret = sprd_efuse_raw_prog(efuse, offset, blk_double, lock, val); + + clk_disable_unprepare(efuse->clk); + +unlock: + sprd_efuse_unlock(efuse); + return ret; +} + +static int sprd_efuse_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct nvmem_device *nvmem; + struct nvmem_config econfig = { }; + struct sprd_efuse *efuse; + const struct sprd_efuse_variant_data *pdata; + int ret; + + pdata = of_device_get_match_data(&pdev->dev); + if (!pdata) { + dev_err(&pdev->dev, "No matching driver data found\n"); + return -EINVAL; + } + + efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + efuse->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(efuse->base)) + return PTR_ERR(efuse->base); + + ret = of_hwspin_lock_get_id(np, 0); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get hwlock id\n"); + return ret; + } + + efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret); + if (!efuse->hwlock) { + dev_err(&pdev->dev, "failed to request hwlock\n"); + return -ENXIO; + } + + efuse->clk = devm_clk_get(&pdev->dev, "enable"); + if (IS_ERR(efuse->clk)) { + dev_err(&pdev->dev, "failed to get enable clock\n"); + return PTR_ERR(efuse->clk); + } + + mutex_init(&efuse->mutex); + efuse->dev = &pdev->dev; + efuse->data = pdata; + + econfig.stride = 1; + econfig.word_size = 1; + econfig.read_only = false; + econfig.name = "sprd-efuse"; + econfig.size = efuse->data->blk_nums * SPRD_EFUSE_BLOCK_WIDTH; + econfig.reg_read = sprd_efuse_read; + econfig.reg_write = sprd_efuse_write; + econfig.priv = efuse; + econfig.dev = &pdev->dev; + nvmem = devm_nvmem_register(&pdev->dev, &econfig); + if (IS_ERR(nvmem)) { + dev_err(&pdev->dev, "failed to register nvmem\n"); + return PTR_ERR(nvmem); + } + + return 0; +} + +static const struct of_device_id sprd_efuse_of_match[] = { + { .compatible = "sprd,ums312-efuse", .data = &ums312_data }, + { } +}; + +static struct platform_driver sprd_efuse_driver = { + .probe = sprd_efuse_probe, + .driver = { + .name = "sprd-efuse", + .of_match_table = sprd_efuse_of_match, + }, +}; + +module_platform_driver(sprd_efuse_driver); + +MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>"); +MODULE_DESCRIPTION("Spreadtrum AP efuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/stm32-bsec-optee-ta.c b/drivers/nvmem/stm32-bsec-optee-ta.c new file mode 100644 index 0000000000..f89ce791dd --- /dev/null +++ b/drivers/nvmem/stm32-bsec-optee-ta.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * OP-TEE STM32MP BSEC PTA interface, used by STM32 ROMEM driver + * + * Copyright (C) 2022, STMicroelectronics - All Rights Reserved + */ + +#include <linux/tee_drv.h> + +#include "stm32-bsec-optee-ta.h" + +/* + * Read OTP memory + * + * [in] value[0].a OTP start offset in byte + * [in] value[0].b Access type (0:shadow, 1:fuse, 2:lock) + * [out] memref[1].buffer Output buffer to store read values + * [out] memref[1].size Size of OTP to be read + * + * Return codes: + * TEE_SUCCESS - Invoke command success + * TEE_ERROR_BAD_PARAMETERS - Incorrect input param + * TEE_ERROR_ACCESS_DENIED - OTP not accessible by caller + */ +#define PTA_BSEC_READ_MEM 0x0 + +/* + * Write OTP memory + * + * [in] value[0].a OTP start offset in byte + * [in] value[0].b Access type (0:shadow, 1:fuse, 2:lock) + * [in] memref[1].buffer Input buffer to read values + * [in] memref[1].size Size of OTP to be written + * + * Return codes: + * TEE_SUCCESS - Invoke command success + * TEE_ERROR_BAD_PARAMETERS - Incorrect input param + * TEE_ERROR_ACCESS_DENIED - OTP not accessible by caller + */ +#define PTA_BSEC_WRITE_MEM 0x1 + +/* value of PTA_BSEC access type = value[in] b */ +#define SHADOW_ACCESS 0 +#define FUSE_ACCESS 1 +#define LOCK_ACCESS 2 + +/* Bitfield definition for LOCK status */ +#define LOCK_PERM BIT(30) + +/* OP-TEE STM32MP BSEC TA UUID */ +static const uuid_t stm32mp_bsec_ta_uuid = + UUID_INIT(0x94cf71ad, 0x80e6, 0x40b5, + 0xa7, 0xc6, 0x3d, 0xc5, 0x01, 0xeb, 0x28, 0x03); + +/* + * Check whether this driver supports the BSEC TA in the TEE instance + * represented by the params (ver/data) to this function. + */ +static int stm32_bsec_optee_ta_match(struct tee_ioctl_version_data *ver, + const void *data) +{ + /* Currently this driver only supports GP compliant, OP-TEE based TA */ + if ((ver->impl_id == TEE_IMPL_ID_OPTEE) && + (ver->gen_caps & TEE_GEN_CAP_GP)) + return 1; + else + return 0; +} + +/* Open a session to OP-TEE for STM32MP BSEC TA */ +static int stm32_bsec_ta_open_session(struct tee_context *ctx, u32 *id) +{ + struct tee_ioctl_open_session_arg sess_arg; + int rc; + + memset(&sess_arg, 0, sizeof(sess_arg)); + export_uuid(sess_arg.uuid, &stm32mp_bsec_ta_uuid); + sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL; + sess_arg.num_params = 0; + + rc = tee_client_open_session(ctx, &sess_arg, NULL); + if ((rc < 0) || (sess_arg.ret != 0)) { + pr_err("%s: tee_client_open_session failed err:%#x, ret:%#x\n", + __func__, sess_arg.ret, rc); + if (!rc) + rc = -EINVAL; + } else { + *id = sess_arg.session; + } + + return rc; +} + +/* close a session to OP-TEE for STM32MP BSEC TA */ +static void stm32_bsec_ta_close_session(void *ctx, u32 id) +{ + tee_client_close_session(ctx, id); +} + +/* stm32_bsec_optee_ta_open() - initialize the STM32MP BSEC TA */ +int stm32_bsec_optee_ta_open(struct tee_context **ctx) +{ + struct tee_context *tee_ctx; + u32 session_id; + int rc; + + /* Open context with TEE driver */ + tee_ctx = tee_client_open_context(NULL, stm32_bsec_optee_ta_match, NULL, NULL); + if (IS_ERR(tee_ctx)) { + rc = PTR_ERR(tee_ctx); + if (rc == -ENOENT) + return -EPROBE_DEFER; + pr_err("%s: tee_client_open_context failed (%d)\n", __func__, rc); + + return rc; + } + + /* Check STM32MP BSEC TA presence */ + rc = stm32_bsec_ta_open_session(tee_ctx, &session_id); + if (rc) { + tee_client_close_context(tee_ctx); + return rc; + } + + stm32_bsec_ta_close_session(tee_ctx, session_id); + + *ctx = tee_ctx; + + return 0; +} + +/* stm32_bsec_optee_ta_open() - release the PTA STM32MP BSEC TA */ +void stm32_bsec_optee_ta_close(void *ctx) +{ + tee_client_close_context(ctx); +} + +/* stm32_bsec_optee_ta_read() - nvmem read access using PTA client driver */ +int stm32_bsec_optee_ta_read(struct tee_context *ctx, unsigned int offset, + void *buf, size_t bytes) +{ + struct tee_shm *shm; + struct tee_ioctl_invoke_arg arg; + struct tee_param param[2]; + u8 *shm_buf; + u32 start, num_bytes; + int ret; + u32 session_id; + + ret = stm32_bsec_ta_open_session(ctx, &session_id); + if (ret) + return ret; + + memset(&arg, 0, sizeof(arg)); + memset(¶m, 0, sizeof(param)); + + arg.func = PTA_BSEC_READ_MEM; + arg.session = session_id; + arg.num_params = 2; + + /* align access on 32bits */ + start = ALIGN_DOWN(offset, 4); + num_bytes = round_up(offset + bytes - start, 4); + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = start; + param[0].u.value.b = SHADOW_ACCESS; + + shm = tee_shm_alloc_kernel_buf(ctx, num_bytes); + if (IS_ERR(shm)) { + ret = PTR_ERR(shm); + goto out_tee_session; + } + + param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT; + param[1].u.memref.shm = shm; + param[1].u.memref.size = num_bytes; + + ret = tee_client_invoke_func(ctx, &arg, param); + if (ret < 0 || arg.ret != 0) { + pr_err("TA_BSEC invoke failed TEE err:%#x, ret:%#x\n", + arg.ret, ret); + if (!ret) + ret = -EIO; + } + if (!ret) { + shm_buf = tee_shm_get_va(shm, 0); + if (IS_ERR(shm_buf)) { + ret = PTR_ERR(shm_buf); + pr_err("tee_shm_get_va failed for transmit (%d)\n", ret); + } else { + /* read data from 32 bits aligned buffer */ + memcpy(buf, &shm_buf[offset % 4], bytes); + } + } + + tee_shm_free(shm); + +out_tee_session: + stm32_bsec_ta_close_session(ctx, session_id); + + return ret; +} + +/* stm32_bsec_optee_ta_write() - nvmem write access using PTA client driver */ +int stm32_bsec_optee_ta_write(struct tee_context *ctx, unsigned int lower, + unsigned int offset, void *buf, size_t bytes) +{ struct tee_shm *shm; + struct tee_ioctl_invoke_arg arg; + struct tee_param param[2]; + u8 *shm_buf; + int ret; + u32 session_id; + + ret = stm32_bsec_ta_open_session(ctx, &session_id); + if (ret) + return ret; + + /* Allow only writing complete 32-bits aligned words */ + if ((bytes % 4) || (offset % 4)) + return -EINVAL; + + memset(&arg, 0, sizeof(arg)); + memset(¶m, 0, sizeof(param)); + + arg.func = PTA_BSEC_WRITE_MEM; + arg.session = session_id; + arg.num_params = 2; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = offset; + param[0].u.value.b = FUSE_ACCESS; + + shm = tee_shm_alloc_kernel_buf(ctx, bytes); + if (IS_ERR(shm)) { + ret = PTR_ERR(shm); + goto out_tee_session; + } + + param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + param[1].u.memref.shm = shm; + param[1].u.memref.size = bytes; + + shm_buf = tee_shm_get_va(shm, 0); + if (IS_ERR(shm_buf)) { + ret = PTR_ERR(shm_buf); + pr_err("tee_shm_get_va failed for transmit (%d)\n", ret); + tee_shm_free(shm); + + goto out_tee_session; + } + + memcpy(shm_buf, buf, bytes); + + ret = tee_client_invoke_func(ctx, &arg, param); + if (ret < 0 || arg.ret != 0) { + pr_err("TA_BSEC invoke failed TEE err:%#x, ret:%#x\n", arg.ret, ret); + if (!ret) + ret = -EIO; + } + pr_debug("Write OTPs %d to %zu, ret=%d\n", offset / 4, (offset + bytes) / 4, ret); + + /* Lock the upper OTPs with ECC protection, word programming only */ + if (!ret && ((offset + bytes) >= (lower * 4))) { + u32 start, nb_lock; + u32 *lock = (u32 *)shm_buf; + int i; + + /* + * don't lock the lower OTPs, no ECC protection and incremental + * bit programming, a second write is allowed + */ + start = max_t(u32, offset, lower * 4); + nb_lock = (offset + bytes - start) / 4; + + param[0].u.value.a = start; + param[0].u.value.b = LOCK_ACCESS; + param[1].u.memref.size = nb_lock * 4; + + for (i = 0; i < nb_lock; i++) + lock[i] = LOCK_PERM; + + ret = tee_client_invoke_func(ctx, &arg, param); + if (ret < 0 || arg.ret != 0) { + pr_err("TA_BSEC invoke failed TEE err:%#x, ret:%#x\n", arg.ret, ret); + if (!ret) + ret = -EIO; + } + pr_debug("Lock upper OTPs %d to %d, ret=%d\n", + start / 4, start / 4 + nb_lock, ret); + } + + tee_shm_free(shm); + +out_tee_session: + stm32_bsec_ta_close_session(ctx, session_id); + + return ret; +} diff --git a/drivers/nvmem/stm32-bsec-optee-ta.h b/drivers/nvmem/stm32-bsec-optee-ta.h new file mode 100644 index 0000000000..3966a05351 --- /dev/null +++ b/drivers/nvmem/stm32-bsec-optee-ta.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * OP-TEE STM32MP BSEC PTA interface, used by STM32 ROMEM driver + * + * Copyright (C) 2022, STMicroelectronics - All Rights Reserved + */ + +#if IS_ENABLED(CONFIG_NVMEM_STM32_BSEC_OPTEE_TA) +/** + * stm32_bsec_optee_ta_open() - initialize the STM32 BSEC TA + * @ctx: the OP-TEE context on success + * + * Return: + * On success, 0. On failure, -errno. + */ +int stm32_bsec_optee_ta_open(struct tee_context **ctx); + +/** + * stm32_bsec_optee_ta_close() - release the STM32 BSEC TA + * @ctx: the OP-TEE context + * + * This function used to clean the OP-TEE resources initialized in + * stm32_bsec_optee_ta_open(); it can be used as callback to + * devm_add_action_or_reset() + */ +void stm32_bsec_optee_ta_close(void *ctx); + +/** + * stm32_bsec_optee_ta_read() - nvmem read access using TA client driver + * @ctx: the OP-TEE context provided by stm32_bsec_optee_ta_open + * @offset: nvmem offset + * @buf: buffer to fill with nvem values + * @bytes: number of bytes to read + * + * Return: + * On success, 0. On failure, -errno. + */ +int stm32_bsec_optee_ta_read(struct tee_context *ctx, unsigned int offset, + void *buf, size_t bytes); + +/** + * stm32_bsec_optee_ta_write() - nvmem write access using TA client driver + * @ctx: the OP-TEE context provided by stm32_bsec_optee_ta_open + * @lower: number of lower OTP, not protected by ECC + * @offset: nvmem offset + * @buf: buffer with nvem values + * @bytes: number of bytes to write + * + * Return: + * On success, 0. On failure, -errno. + */ +int stm32_bsec_optee_ta_write(struct tee_context *ctx, unsigned int lower, + unsigned int offset, void *buf, size_t bytes); + +#else + +static inline int stm32_bsec_optee_ta_open(struct tee_context **ctx) +{ + return -EOPNOTSUPP; +} + +static inline void stm32_bsec_optee_ta_close(void *ctx) +{ +} + +static inline int stm32_bsec_optee_ta_read(struct tee_context *ctx, + unsigned int offset, void *buf, + size_t bytes) +{ + return -EOPNOTSUPP; +} + +static inline int stm32_bsec_optee_ta_write(struct tee_context *ctx, + unsigned int lower, + unsigned int offset, void *buf, + size_t bytes) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_NVMEM_STM32_BSEC_OPTEE_TA */ diff --git a/drivers/nvmem/stm32-romem.c b/drivers/nvmem/stm32-romem.c new file mode 100644 index 0000000000..0f84044bd1 --- /dev/null +++ b/drivers/nvmem/stm32-romem.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * STM32 Factory-programmed memory read access driver + * + * Copyright (C) 2017, STMicroelectronics - All Rights Reserved + * Author: Fabrice Gasnier <fabrice.gasnier@st.com> for STMicroelectronics. + */ + +#include <linux/arm-smccc.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/tee_drv.h> + +#include "stm32-bsec-optee-ta.h" + +/* BSEC secure service access from non-secure */ +#define STM32_SMC_BSEC 0x82001003 +#define STM32_SMC_READ_SHADOW 0x01 +#define STM32_SMC_PROG_OTP 0x02 +#define STM32_SMC_WRITE_SHADOW 0x03 +#define STM32_SMC_READ_OTP 0x04 + +/* shadow registers offset */ +#define STM32MP15_BSEC_DATA0 0x200 + +struct stm32_romem_cfg { + int size; + u8 lower; + bool ta; +}; + +struct stm32_romem_priv { + void __iomem *base; + struct nvmem_config cfg; + u8 lower; + struct tee_context *ctx; +}; + +static int stm32_romem_read(void *context, unsigned int offset, void *buf, + size_t bytes) +{ + struct stm32_romem_priv *priv = context; + u8 *buf8 = buf; + int i; + + for (i = offset; i < offset + bytes; i++) + *buf8++ = readb_relaxed(priv->base + i); + + return 0; +} + +static int stm32_bsec_smc(u8 op, u32 otp, u32 data, u32 *result) +{ +#if IS_ENABLED(CONFIG_HAVE_ARM_SMCCC) + struct arm_smccc_res res; + + arm_smccc_smc(STM32_SMC_BSEC, op, otp, data, 0, 0, 0, 0, &res); + if (res.a0) + return -EIO; + + if (result) + *result = (u32)res.a1; + + return 0; +#else + return -ENXIO; +#endif +} + +static int stm32_bsec_read(void *context, unsigned int offset, void *buf, + size_t bytes) +{ + struct stm32_romem_priv *priv = context; + struct device *dev = priv->cfg.dev; + u32 roffset, rbytes, val; + u8 *buf8 = buf, *val8 = (u8 *)&val; + int i, j = 0, ret, skip_bytes, size; + + /* Round unaligned access to 32-bits */ + roffset = rounddown(offset, 4); + skip_bytes = offset & 0x3; + rbytes = roundup(bytes + skip_bytes, 4); + + if (roffset + rbytes > priv->cfg.size) + return -EINVAL; + + for (i = roffset; (i < roffset + rbytes); i += 4) { + u32 otp = i >> 2; + + if (otp < priv->lower) { + /* read lower data from shadow registers */ + val = readl_relaxed( + priv->base + STM32MP15_BSEC_DATA0 + i); + } else { + ret = stm32_bsec_smc(STM32_SMC_READ_SHADOW, otp, 0, + &val); + if (ret) { + dev_err(dev, "Can't read data%d (%d)\n", otp, + ret); + return ret; + } + } + /* skip first bytes in case of unaligned read */ + if (skip_bytes) + size = min(bytes, (size_t)(4 - skip_bytes)); + else + size = min(bytes, (size_t)4); + memcpy(&buf8[j], &val8[skip_bytes], size); + bytes -= size; + j += size; + skip_bytes = 0; + } + + return 0; +} + +static int stm32_bsec_write(void *context, unsigned int offset, void *buf, + size_t bytes) +{ + struct stm32_romem_priv *priv = context; + struct device *dev = priv->cfg.dev; + u32 *buf32 = buf; + int ret, i; + + /* Allow only writing complete 32-bits aligned words */ + if ((bytes % 4) || (offset % 4)) + return -EINVAL; + + for (i = offset; i < offset + bytes; i += 4) { + ret = stm32_bsec_smc(STM32_SMC_PROG_OTP, i >> 2, *buf32++, + NULL); + if (ret) { + dev_err(dev, "Can't write data%d (%d)\n", i >> 2, ret); + return ret; + } + } + + if (offset + bytes >= priv->lower * 4) + dev_warn(dev, "Update of upper OTPs with ECC protection (word programming, only once)\n"); + + return 0; +} + +static int stm32_bsec_pta_read(void *context, unsigned int offset, void *buf, + size_t bytes) +{ + struct stm32_romem_priv *priv = context; + + return stm32_bsec_optee_ta_read(priv->ctx, offset, buf, bytes); +} + +static int stm32_bsec_pta_write(void *context, unsigned int offset, void *buf, + size_t bytes) +{ + struct stm32_romem_priv *priv = context; + + return stm32_bsec_optee_ta_write(priv->ctx, priv->lower, offset, buf, bytes); +} + +static bool stm32_bsec_smc_check(void) +{ + u32 val; + int ret; + + /* check that the OP-TEE support the BSEC SMC (legacy mode) */ + ret = stm32_bsec_smc(STM32_SMC_READ_SHADOW, 0, 0, &val); + + return !ret; +} + +static bool optee_presence_check(void) +{ + struct device_node *np; + bool tee_detected = false; + + /* check that the OP-TEE node is present and available. */ + np = of_find_compatible_node(NULL, NULL, "linaro,optee-tz"); + if (np && of_device_is_available(np)) + tee_detected = true; + of_node_put(np); + + return tee_detected; +} + +static int stm32_romem_probe(struct platform_device *pdev) +{ + const struct stm32_romem_cfg *cfg; + struct device *dev = &pdev->dev; + struct stm32_romem_priv *priv; + struct resource *res; + int rc; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->cfg.name = "stm32-romem"; + priv->cfg.word_size = 1; + priv->cfg.stride = 1; + priv->cfg.dev = dev; + priv->cfg.priv = priv; + priv->cfg.owner = THIS_MODULE; + priv->cfg.type = NVMEM_TYPE_OTP; + + priv->lower = 0; + + cfg = (const struct stm32_romem_cfg *) + of_match_device(dev->driver->of_match_table, dev)->data; + if (!cfg) { + priv->cfg.read_only = true; + priv->cfg.size = resource_size(res); + priv->cfg.reg_read = stm32_romem_read; + } else { + priv->cfg.size = cfg->size; + priv->lower = cfg->lower; + if (cfg->ta || optee_presence_check()) { + rc = stm32_bsec_optee_ta_open(&priv->ctx); + if (rc) { + /* wait for OP-TEE client driver to be up and ready */ + if (rc == -EPROBE_DEFER) + return -EPROBE_DEFER; + /* BSEC PTA is required or SMC not supported */ + if (cfg->ta || !stm32_bsec_smc_check()) + return rc; + } + } + if (priv->ctx) { + rc = devm_add_action_or_reset(dev, stm32_bsec_optee_ta_close, priv->ctx); + if (rc) { + dev_err(dev, "devm_add_action_or_reset() failed (%d)\n", rc); + return rc; + } + priv->cfg.reg_read = stm32_bsec_pta_read; + priv->cfg.reg_write = stm32_bsec_pta_write; + } else { + priv->cfg.reg_read = stm32_bsec_read; + priv->cfg.reg_write = stm32_bsec_write; + } + } + + return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &priv->cfg)); +} + +/* + * STM32MP15/13 BSEC OTP regions: 4096 OTP bits (with 3072 effective bits) + * => 96 x 32-bits data words + * - Lower: 1K bits, 2:1 redundancy, incremental bit programming + * => 32 (x 32-bits) lower shadow registers = words 0 to 31 + * - Upper: 2K bits, ECC protection, word programming only + * => 64 (x 32-bits) = words 32 to 95 + */ +static const struct stm32_romem_cfg stm32mp15_bsec_cfg = { + .size = 384, + .lower = 32, + .ta = false, +}; + +static const struct stm32_romem_cfg stm32mp13_bsec_cfg = { + .size = 384, + .lower = 32, + .ta = true, +}; + +static const struct of_device_id stm32_romem_of_match[] __maybe_unused = { + { .compatible = "st,stm32f4-otp", }, { + .compatible = "st,stm32mp15-bsec", + .data = (void *)&stm32mp15_bsec_cfg, + }, { + .compatible = "st,stm32mp13-bsec", + .data = (void *)&stm32mp13_bsec_cfg, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, stm32_romem_of_match); + +static struct platform_driver stm32_romem_driver = { + .probe = stm32_romem_probe, + .driver = { + .name = "stm32-romem", + .of_match_table = of_match_ptr(stm32_romem_of_match), + }, +}; +module_platform_driver(stm32_romem_driver); + +MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STM32 RO-MEM"); +MODULE_ALIAS("platform:nvmem-stm32-romem"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c new file mode 100644 index 0000000000..f3a18aa0a6 --- /dev/null +++ b/drivers/nvmem/sunplus-ocotp.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * The OCOTP driver for Sunplus SP7021 + * + * Copyright (C) 2019 Sunplus Technology Inc., All rights reserved. + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> + +/* + * OTP memory + * Each bank contains 4 words (32 bits). + * Bank 0 starts at offset 0 from the base. + */ + +#define OTP_WORDS_PER_BANK 4 +#define OTP_WORD_SIZE sizeof(u32) +#define OTP_BIT_ADDR_OF_BANK (8 * OTP_WORD_SIZE * OTP_WORDS_PER_BANK) +#define QAC628_OTP_NUM_BANKS 8 +#define QAC628_OTP_SIZE (QAC628_OTP_NUM_BANKS * OTP_WORDS_PER_BANK * OTP_WORD_SIZE) +#define OTP_READ_TIMEOUT_US 200000 + +/* HB_GPIO */ +#define ADDRESS_8_DATA 0x20 + +/* OTP_RX */ +#define OTP_CONTROL_2 0x48 +#define OTP_RD_PERIOD GENMASK(15, 8) +#define OTP_RD_PERIOD_MASK ~GENMASK(15, 8) +#define CPU_CLOCK FIELD_PREP(OTP_RD_PERIOD, 30) +#define SEL_BAK_KEY2 BIT(5) +#define SEL_BAK_KEY2_MASK ~BIT(5) +#define SW_TRIM_EN BIT(4) +#define SW_TRIM_EN_MASK ~BIT(4) +#define SEL_BAK_KEY BIT(3) +#define SEL_BAK_KEY_MASK ~BIT(3) +#define OTP_READ BIT(2) +#define OTP_LOAD_SECURE_DATA BIT(1) +#define OTP_LOAD_SECURE_DATA_MASK ~BIT(1) +#define OTP_DO_CRC BIT(0) +#define OTP_DO_CRC_MASK ~BIT(0) +#define OTP_STATUS 0x4c +#define OTP_READ_DONE BIT(4) +#define OTP_READ_DONE_MASK ~BIT(4) +#define OTP_LOAD_SECURE_DONE_MASK ~BIT(2) +#define OTP_READ_ADDRESS 0x50 + +enum base_type { + HB_GPIO, + OTPRX, + BASEMAX, +}; + +struct sp_ocotp_priv { + struct device *dev; + void __iomem *base[BASEMAX]; + struct clk *clk; +}; + +struct sp_ocotp_data { + int size; +}; + +static const struct sp_ocotp_data sp_otp_v0 = { + .size = QAC628_OTP_SIZE, +}; + +static int sp_otp_read_real(struct sp_ocotp_priv *otp, int addr, char *value) +{ + unsigned int addr_data; + unsigned int byte_shift; + unsigned int status; + int ret; + + addr_data = addr % (OTP_WORD_SIZE * OTP_WORDS_PER_BANK); + addr_data = addr_data / OTP_WORD_SIZE; + + byte_shift = addr % (OTP_WORD_SIZE * OTP_WORDS_PER_BANK); + byte_shift = byte_shift % OTP_WORD_SIZE; + + addr = addr / (OTP_WORD_SIZE * OTP_WORDS_PER_BANK); + addr = addr * OTP_BIT_ADDR_OF_BANK; + + writel(readl(otp->base[OTPRX] + OTP_STATUS) & OTP_READ_DONE_MASK & + OTP_LOAD_SECURE_DONE_MASK, otp->base[OTPRX] + OTP_STATUS); + writel(addr, otp->base[OTPRX] + OTP_READ_ADDRESS); + writel(readl(otp->base[OTPRX] + OTP_CONTROL_2) | OTP_READ, + otp->base[OTPRX] + OTP_CONTROL_2); + writel(readl(otp->base[OTPRX] + OTP_CONTROL_2) & SEL_BAK_KEY2_MASK & SW_TRIM_EN_MASK + & SEL_BAK_KEY_MASK & OTP_LOAD_SECURE_DATA_MASK & OTP_DO_CRC_MASK, + otp->base[OTPRX] + OTP_CONTROL_2); + writel((readl(otp->base[OTPRX] + OTP_CONTROL_2) & OTP_RD_PERIOD_MASK) | CPU_CLOCK, + otp->base[OTPRX] + OTP_CONTROL_2); + + ret = readl_poll_timeout(otp->base[OTPRX] + OTP_STATUS, status, + status & OTP_READ_DONE, 10, OTP_READ_TIMEOUT_US); + + if (ret < 0) + return ret; + + *value = (readl(otp->base[HB_GPIO] + ADDRESS_8_DATA + addr_data * OTP_WORD_SIZE) + >> (8 * byte_shift)) & 0xff; + + return ret; +} + +static int sp_ocotp_read(void *priv, unsigned int offset, void *value, size_t bytes) +{ + struct sp_ocotp_priv *otp = priv; + unsigned int addr; + char *buf = value; + char val[4]; + int ret; + + ret = clk_enable(otp->clk); + if (ret) + return ret; + + *buf = 0; + for (addr = offset; addr < (offset + bytes); addr++) { + ret = sp_otp_read_real(otp, addr, val); + if (ret < 0) { + dev_err(otp->dev, "OTP read fail:%d at %d", ret, addr); + goto disable_clk; + } + + *buf++ = *val; + } + +disable_clk: + clk_disable(otp->clk); + + return ret; +} + +static struct nvmem_config sp_ocotp_nvmem_config = { + .name = "sp-ocotp", + .read_only = true, + .word_size = 1, + .size = QAC628_OTP_SIZE, + .stride = 1, + .reg_read = sp_ocotp_read, + .owner = THIS_MODULE, +}; + +static int sp_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct nvmem_device *nvmem; + struct sp_ocotp_priv *otp; + struct resource *res; + int ret; + + otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL); + if (!otp) + return -ENOMEM; + + otp->dev = dev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hb_gpio"); + otp->base[HB_GPIO] = devm_ioremap_resource(dev, res); + if (IS_ERR(otp->base[HB_GPIO])) + return PTR_ERR(otp->base[HB_GPIO]); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otprx"); + otp->base[OTPRX] = devm_ioremap_resource(dev, res); + if (IS_ERR(otp->base[OTPRX])) + return PTR_ERR(otp->base[OTPRX]); + + otp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(otp->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(otp->clk), + "devm_clk_get fail\n"); + + ret = clk_prepare(otp->clk); + if (ret < 0) { + dev_err(dev, "failed to prepare clk: %d\n", ret); + return ret; + } + + sp_ocotp_nvmem_config.priv = otp; + sp_ocotp_nvmem_config.dev = dev; + + nvmem = devm_nvmem_register(dev, &sp_ocotp_nvmem_config); + if (IS_ERR(nvmem)) { + ret = dev_err_probe(&pdev->dev, PTR_ERR(nvmem), + "register nvmem device fail\n"); + goto err; + } + + platform_set_drvdata(pdev, nvmem); + + dev_dbg(dev, "banks:%d x wpb:%d x wsize:%d = %d", + (int)QAC628_OTP_NUM_BANKS, (int)OTP_WORDS_PER_BANK, + (int)OTP_WORD_SIZE, (int)QAC628_OTP_SIZE); + + return 0; +err: + clk_unprepare(otp->clk); + return ret; +} + +static const struct of_device_id sp_ocotp_dt_ids[] = { + { .compatible = "sunplus,sp7021-ocotp", .data = &sp_otp_v0 }, + { } +}; +MODULE_DEVICE_TABLE(of, sp_ocotp_dt_ids); + +static struct platform_driver sp_otp_driver = { + .probe = sp_ocotp_probe, + .driver = { + .name = "sunplus,sp7021-ocotp", + .of_match_table = sp_ocotp_dt_ids, + } +}; +module_platform_driver(sp_otp_driver); + +MODULE_AUTHOR("Vincent Shih <vincent.sunplus@gmail.com>"); +MODULE_DESCRIPTION("Sunplus On-Chip OTP driver"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c new file mode 100644 index 0000000000..5d364d8534 --- /dev/null +++ b/drivers/nvmem/sunxi_sid.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Allwinner sunXi SoCs Security ID support. + * + * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl> + * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com> + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/random.h> + +/* Registers and special values for doing register-based SID readout on H3 */ +#define SUN8I_SID_PRCTL 0x40 +#define SUN8I_SID_RDKEY 0x60 + +#define SUN8I_SID_OFFSET_MASK 0x1FF +#define SUN8I_SID_OFFSET_SHIFT 16 +#define SUN8I_SID_OP_LOCK (0xAC << 8) +#define SUN8I_SID_READ BIT(1) + +struct sunxi_sid_cfg { + u32 value_offset; + u32 size; + bool need_register_readout; +}; + +struct sunxi_sid { + void __iomem *base; + u32 value_offset; +}; + +static int sunxi_sid_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct sunxi_sid *sid = context; + u32 word; + + /* .stride = 4 so offset is guaranteed to be aligned */ + __ioread32_copy(val, sid->base + sid->value_offset + offset, bytes / 4); + + val += round_down(bytes, 4); + offset += round_down(bytes, 4); + bytes = bytes % 4; + + if (!bytes) + return 0; + + /* Handle any trailing bytes */ + word = readl_relaxed(sid->base + sid->value_offset + offset); + memcpy(val, &word, bytes); + + return 0; +} + +static int sun8i_sid_register_readout(const struct sunxi_sid *sid, + const unsigned int offset, + u32 *out) +{ + u32 reg_val; + int ret; + + /* Set word, lock access, and set read command */ + reg_val = (offset & SUN8I_SID_OFFSET_MASK) + << SUN8I_SID_OFFSET_SHIFT; + reg_val |= SUN8I_SID_OP_LOCK | SUN8I_SID_READ; + writel(reg_val, sid->base + SUN8I_SID_PRCTL); + + ret = readl_poll_timeout(sid->base + SUN8I_SID_PRCTL, reg_val, + !(reg_val & SUN8I_SID_READ), 100, 250000); + if (ret) + return ret; + + if (out) + *out = readl(sid->base + SUN8I_SID_RDKEY); + + writel(0, sid->base + SUN8I_SID_PRCTL); + + return 0; +} + +/* + * On Allwinner H3, the value on the 0x200 offset of the SID controller seems + * to be not reliable at all. + * Read by the registers instead. + */ +static int sun8i_sid_read_by_reg(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct sunxi_sid *sid = context; + u32 word; + int ret; + + /* .stride = 4 so offset is guaranteed to be aligned */ + while (bytes >= 4) { + ret = sun8i_sid_register_readout(sid, offset, val); + if (ret) + return ret; + + val += 4; + offset += 4; + bytes -= 4; + } + + if (!bytes) + return 0; + + /* Handle any trailing bytes */ + ret = sun8i_sid_register_readout(sid, offset, &word); + if (ret) + return ret; + + memcpy(val, &word, bytes); + + return 0; +} + +static int sunxi_sid_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct nvmem_config *nvmem_cfg; + struct nvmem_device *nvmem; + struct sunxi_sid *sid; + int size; + char *randomness; + const struct sunxi_sid_cfg *cfg; + + sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); + if (!sid) + return -ENOMEM; + + cfg = of_device_get_match_data(dev); + if (!cfg) + return -EINVAL; + sid->value_offset = cfg->value_offset; + + sid->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(sid->base)) + return PTR_ERR(sid->base); + + size = cfg->size; + + nvmem_cfg = devm_kzalloc(dev, sizeof(*nvmem_cfg), GFP_KERNEL); + if (!nvmem_cfg) + return -ENOMEM; + + nvmem_cfg->dev = dev; + nvmem_cfg->name = "sunxi-sid"; + nvmem_cfg->type = NVMEM_TYPE_OTP; + nvmem_cfg->read_only = true; + nvmem_cfg->size = cfg->size; + nvmem_cfg->word_size = 1; + nvmem_cfg->stride = 4; + nvmem_cfg->priv = sid; + if (cfg->need_register_readout) + nvmem_cfg->reg_read = sun8i_sid_read_by_reg; + else + nvmem_cfg->reg_read = sunxi_sid_read; + + nvmem = devm_nvmem_register(dev, nvmem_cfg); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + randomness = kzalloc(size, GFP_KERNEL); + if (!randomness) + return -ENOMEM; + + nvmem_cfg->reg_read(sid, 0, randomness, size); + add_device_randomness(randomness, size); + kfree(randomness); + + platform_set_drvdata(pdev, nvmem); + + return 0; +} + +static const struct sunxi_sid_cfg sun4i_a10_cfg = { + .size = 0x10, +}; + +static const struct sunxi_sid_cfg sun7i_a20_cfg = { + .size = 0x200, +}; + +static const struct sunxi_sid_cfg sun8i_h3_cfg = { + .value_offset = 0x200, + .size = 0x100, + .need_register_readout = true, +}; + +static const struct sunxi_sid_cfg sun50i_a64_cfg = { + .value_offset = 0x200, + .size = 0x100, +}; + +static const struct sunxi_sid_cfg sun50i_h6_cfg = { + .value_offset = 0x200, + .size = 0x200, +}; + +static const struct of_device_id sunxi_sid_of_match[] = { + { .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg }, + { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg }, + { .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg }, + { .compatible = "allwinner,sun20i-d1-sid", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun50i-h6-sid", .data = &sun50i_h6_cfg }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, sunxi_sid_of_match); + +static struct platform_driver sunxi_sid_driver = { + .probe = sunxi_sid_probe, + .driver = { + .name = "eeprom-sunxi-sid", + .of_match_table = sunxi_sid_of_match, + }, +}; +module_platform_driver(sunxi_sid_driver); + +MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>"); +MODULE_DESCRIPTION("Allwinner sunxi security id driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c new file mode 100644 index 0000000000..c4ae94af4a --- /dev/null +++ b/drivers/nvmem/u-boot-env.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 Rafał Miłecki <rafal@milecki.pl> + */ + +#include <linux/crc32.h> +#include <linux/etherdevice.h> +#include <linux/if_ether.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/nvmem-consumer.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +enum u_boot_env_format { + U_BOOT_FORMAT_SINGLE, + U_BOOT_FORMAT_REDUNDANT, + U_BOOT_FORMAT_BROADCOM, +}; + +struct u_boot_env { + struct device *dev; + enum u_boot_env_format format; + + struct mtd_info *mtd; + + /* Cells */ + struct nvmem_cell_info *cells; + int ncells; +}; + +struct u_boot_env_image_single { + __le32 crc32; + uint8_t data[]; +} __packed; + +struct u_boot_env_image_redundant { + __le32 crc32; + u8 mark; + uint8_t data[]; +} __packed; + +struct u_boot_env_image_broadcom { + __le32 magic; + __le32 len; + __le32 crc32; + DECLARE_FLEX_ARRAY(uint8_t, data); +} __packed; + +static int u_boot_env_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct u_boot_env *priv = context; + struct device *dev = priv->dev; + size_t bytes_read; + int err; + + err = mtd_read(priv->mtd, offset, bytes, &bytes_read, val); + if (err && !mtd_is_bitflip(err)) { + dev_err(dev, "Failed to read from mtd: %d\n", err); + return err; + } + + if (bytes_read != bytes) { + dev_err(dev, "Failed to read %zu bytes\n", bytes); + return -EIO; + } + + return 0; +} + +static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, int index, + unsigned int offset, void *buf, size_t bytes) +{ + u8 mac[ETH_ALEN]; + + if (bytes != 3 * ETH_ALEN - 1) + return -EINVAL; + + if (!mac_pton(buf, mac)) + return -EINVAL; + + if (index) + eth_addr_add(mac, index); + + ether_addr_copy(buf, mac); + + return 0; +} + +static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf, + size_t data_offset, size_t data_len) +{ + struct device *dev = priv->dev; + char *data = buf + data_offset; + char *var, *value, *eq; + int idx; + + priv->ncells = 0; + for (var = data; var < data + data_len && *var; var += strlen(var) + 1) + priv->ncells++; + + priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL); + if (!priv->cells) + return -ENOMEM; + + for (var = data, idx = 0; + var < data + data_len && *var; + var = value + strlen(value) + 1, idx++) { + eq = strchr(var, '='); + if (!eq) + break; + *eq = '\0'; + value = eq + 1; + + priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL); + if (!priv->cells[idx].name) + return -ENOMEM; + priv->cells[idx].offset = data_offset + value - data; + priv->cells[idx].bytes = strlen(value); + priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name); + if (!strcmp(var, "ethaddr")) { + priv->cells[idx].raw_len = strlen(value); + priv->cells[idx].bytes = ETH_ALEN; + priv->cells[idx].read_post_process = u_boot_env_read_post_process_ethaddr; + } + } + + if (WARN_ON(idx != priv->ncells)) + priv->ncells = idx; + + return 0; +} + +static int u_boot_env_parse(struct u_boot_env *priv) +{ + struct device *dev = priv->dev; + size_t crc32_data_offset; + size_t crc32_data_len; + size_t crc32_offset; + size_t data_offset; + size_t data_len; + uint32_t crc32; + uint32_t calc; + size_t bytes; + uint8_t *buf; + int err; + + buf = kcalloc(1, priv->mtd->size, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto err_out; + } + + err = mtd_read(priv->mtd, 0, priv->mtd->size, &bytes, buf); + if ((err && !mtd_is_bitflip(err)) || bytes != priv->mtd->size) { + dev_err(dev, "Failed to read from mtd: %d\n", err); + goto err_kfree; + } + + switch (priv->format) { + case U_BOOT_FORMAT_SINGLE: + crc32_offset = offsetof(struct u_boot_env_image_single, crc32); + crc32_data_offset = offsetof(struct u_boot_env_image_single, data); + data_offset = offsetof(struct u_boot_env_image_single, data); + break; + case U_BOOT_FORMAT_REDUNDANT: + crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32); + crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data); + data_offset = offsetof(struct u_boot_env_image_redundant, data); + break; + case U_BOOT_FORMAT_BROADCOM: + crc32_offset = offsetof(struct u_boot_env_image_broadcom, crc32); + crc32_data_offset = offsetof(struct u_boot_env_image_broadcom, data); + data_offset = offsetof(struct u_boot_env_image_broadcom, data); + break; + } + crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset)); + crc32_data_len = priv->mtd->size - crc32_data_offset; + data_len = priv->mtd->size - data_offset; + + calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L; + if (calc != crc32) { + dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32); + err = -EINVAL; + goto err_kfree; + } + + buf[priv->mtd->size - 1] = '\0'; + err = u_boot_env_add_cells(priv, buf, data_offset, data_len); + if (err) + dev_err(dev, "Failed to add cells: %d\n", err); + +err_kfree: + kfree(buf); +err_out: + return err; +} + +static int u_boot_env_probe(struct platform_device *pdev) +{ + struct nvmem_config config = { + .name = "u-boot-env", + .reg_read = u_boot_env_read, + }; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct u_boot_env *priv; + int err; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + priv->dev = dev; + + priv->format = (uintptr_t)of_device_get_match_data(dev); + + priv->mtd = of_get_mtd_device_by_node(np); + if (IS_ERR(priv->mtd)) { + dev_err_probe(dev, PTR_ERR(priv->mtd), "Failed to get %pOF MTD\n", np); + return PTR_ERR(priv->mtd); + } + + err = u_boot_env_parse(priv); + if (err) + return err; + + config.dev = dev; + config.cells = priv->cells; + config.ncells = priv->ncells; + config.priv = priv; + config.size = priv->mtd->size; + + return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config)); +} + +static const struct of_device_id u_boot_env_of_match_table[] = { + { .compatible = "u-boot,env", .data = (void *)U_BOOT_FORMAT_SINGLE, }, + { .compatible = "u-boot,env-redundant-bool", .data = (void *)U_BOOT_FORMAT_REDUNDANT, }, + { .compatible = "u-boot,env-redundant-count", .data = (void *)U_BOOT_FORMAT_REDUNDANT, }, + { .compatible = "brcm,env", .data = (void *)U_BOOT_FORMAT_BROADCOM, }, + {}, +}; + +static struct platform_driver u_boot_env_driver = { + .probe = u_boot_env_probe, + .driver = { + .name = "u_boot_env", + .of_match_table = u_boot_env_of_match_table, + }, +}; +module_platform_driver(u_boot_env_driver); + +MODULE_AUTHOR("Rafał Miłecki"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(of, u_boot_env_of_match_table); diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c new file mode 100644 index 0000000000..0a1dbb8053 --- /dev/null +++ b/drivers/nvmem/uniphier-efuse.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * UniPhier eFuse driver + * + * Copyright (C) 2017 Socionext Inc. + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> + +struct uniphier_efuse_priv { + void __iomem *base; +}; + +static int uniphier_reg_read(void *context, + unsigned int reg, void *_val, size_t bytes) +{ + struct uniphier_efuse_priv *priv = context; + u8 *val = _val; + int offs; + + for (offs = 0; offs < bytes; offs += sizeof(u8)) + *val++ = readb(priv->base + reg + offs); + + return 0; +} + +static int uniphier_efuse_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct nvmem_config econfig = {}; + struct uniphier_efuse_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + econfig.stride = 1; + econfig.word_size = 1; + econfig.read_only = true; + econfig.reg_read = uniphier_reg_read; + econfig.size = resource_size(res); + econfig.priv = priv; + econfig.dev = dev; + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id uniphier_efuse_of_match[] = { + { .compatible = "socionext,uniphier-efuse",}, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, uniphier_efuse_of_match); + +static struct platform_driver uniphier_efuse_driver = { + .probe = uniphier_efuse_probe, + .driver = { + .name = "uniphier-efuse", + .of_match_table = uniphier_efuse_of_match, + }, +}; +module_platform_driver(uniphier_efuse_driver); + +MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>"); +MODULE_DESCRIPTION("UniPhier eFuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/vf610-ocotp.c b/drivers/nvmem/vf610-ocotp.c new file mode 100644 index 0000000000..ee9c61ae72 --- /dev/null +++ b/drivers/nvmem/vf610-ocotp.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015 Toradex AG. + * + * Author: Sanchayan Maity <sanchayan.maity@toradex.com> + * + * Based on the barebox ocotp driver, + * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il> + * Orex Computed Radiography + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* OCOTP Register Offsets */ +#define OCOTP_CTRL_REG 0x00 +#define OCOTP_CTRL_SET 0x04 +#define OCOTP_CTRL_CLR 0x08 +#define OCOTP_TIMING 0x10 +#define OCOTP_DATA 0x20 +#define OCOTP_READ_CTRL_REG 0x30 +#define OCOTP_READ_FUSE_DATA 0x40 + +/* OCOTP Register bits and masks */ +#define OCOTP_CTRL_WR_UNLOCK 16 +#define OCOTP_CTRL_WR_UNLOCK_KEY 0x3E77 +#define OCOTP_CTRL_WR_UNLOCK_MASK GENMASK(31, 16) +#define OCOTP_CTRL_ADDR 0 +#define OCOTP_CTRL_ADDR_MASK GENMASK(6, 0) +#define OCOTP_CTRL_RELOAD_SHADOWS BIT(10) +#define OCOTP_CTRL_ERR BIT(9) +#define OCOTP_CTRL_BUSY BIT(8) + +#define OCOTP_TIMING_STROBE_READ 16 +#define OCOTP_TIMING_STROBE_READ_MASK GENMASK(21, 16) +#define OCOTP_TIMING_RELAX 12 +#define OCOTP_TIMING_RELAX_MASK GENMASK(15, 12) +#define OCOTP_TIMING_STROBE_PROG 0 +#define OCOTP_TIMING_STROBE_PROG_MASK GENMASK(11, 0) + +#define OCOTP_READ_CTRL_READ_FUSE 0x1 + +#define VF610_OCOTP_TIMEOUT 100000 + +#define BF(value, field) (((value) << field) & field##_MASK) + +#define DEF_RELAX 20 + +static const int base_to_fuse_addr_mappings[][2] = { + {0x400, 0x00}, + {0x410, 0x01}, + {0x420, 0x02}, + {0x450, 0x05}, + {0x4F0, 0x0F}, + {0x600, 0x20}, + {0x610, 0x21}, + {0x620, 0x22}, + {0x630, 0x23}, + {0x640, 0x24}, + {0x650, 0x25}, + {0x660, 0x26}, + {0x670, 0x27}, + {0x6F0, 0x2F}, + {0x880, 0x38}, + {0x890, 0x39}, + {0x8A0, 0x3A}, + {0x8B0, 0x3B}, + {0x8C0, 0x3C}, + {0x8D0, 0x3D}, + {0x8E0, 0x3E}, + {0x8F0, 0x3F}, + {0xC80, 0x78}, + {0xC90, 0x79}, + {0xCA0, 0x7A}, + {0xCB0, 0x7B}, + {0xCC0, 0x7C}, + {0xCD0, 0x7D}, + {0xCE0, 0x7E}, + {0xCF0, 0x7F}, +}; + +struct vf610_ocotp { + void __iomem *base; + struct clk *clk; + struct device *dev; + struct nvmem_device *nvmem; + int timing; +}; + +static int vf610_ocotp_wait_busy(void __iomem *base) +{ + int timeout = VF610_OCOTP_TIMEOUT; + + while ((readl(base) & OCOTP_CTRL_BUSY) && --timeout) + udelay(10); + + if (!timeout) { + writel(OCOTP_CTRL_ERR, base + OCOTP_CTRL_CLR); + return -ETIMEDOUT; + } + + udelay(10); + + return 0; +} + +static int vf610_ocotp_calculate_timing(struct vf610_ocotp *ocotp_dev) +{ + u32 clk_rate; + u32 relax, strobe_read, strobe_prog; + u32 timing; + + clk_rate = clk_get_rate(ocotp_dev->clk); + + /* Refer section OTP read/write timing parameters in TRM */ + relax = clk_rate / (1000000000 / DEF_RELAX) - 1; + strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1; + strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1; + + timing = BF(relax, OCOTP_TIMING_RELAX); + timing |= BF(strobe_read, OCOTP_TIMING_STROBE_READ); + timing |= BF(strobe_prog, OCOTP_TIMING_STROBE_PROG); + + return timing; +} + +static int vf610_get_fuse_address(int base_addr_offset) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(base_to_fuse_addr_mappings); i++) { + if (base_to_fuse_addr_mappings[i][0] == base_addr_offset) + return base_to_fuse_addr_mappings[i][1]; + } + + return -EINVAL; +} + +static int vf610_ocotp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct vf610_ocotp *ocotp = context; + void __iomem *base = ocotp->base; + u32 reg, *buf = val; + int fuse_addr; + int ret; + + while (bytes > 0) { + fuse_addr = vf610_get_fuse_address(offset); + if (fuse_addr > 0) { + writel(ocotp->timing, base + OCOTP_TIMING); + ret = vf610_ocotp_wait_busy(base + OCOTP_CTRL_REG); + if (ret) + return ret; + + reg = readl(base + OCOTP_CTRL_REG); + reg &= ~OCOTP_CTRL_ADDR_MASK; + reg &= ~OCOTP_CTRL_WR_UNLOCK_MASK; + reg |= BF(fuse_addr, OCOTP_CTRL_ADDR); + writel(reg, base + OCOTP_CTRL_REG); + + writel(OCOTP_READ_CTRL_READ_FUSE, + base + OCOTP_READ_CTRL_REG); + ret = vf610_ocotp_wait_busy(base + OCOTP_CTRL_REG); + if (ret) + return ret; + + if (readl(base) & OCOTP_CTRL_ERR) { + dev_dbg(ocotp->dev, "Error reading from fuse address %x\n", + fuse_addr); + writel(OCOTP_CTRL_ERR, base + OCOTP_CTRL_CLR); + } + + /* + * In case of error, we do not abort and expect to read + * 0xBADABADA as mentioned by the TRM. We just read this + * value and return. + */ + *buf = readl(base + OCOTP_READ_FUSE_DATA); + } else { + *buf = 0; + } + + buf++; + bytes -= 4; + offset += 4; + } + + return 0; +} + +static struct nvmem_config ocotp_config = { + .name = "ocotp", + .stride = 4, + .word_size = 4, + .reg_read = vf610_ocotp_read, +}; + +static const struct of_device_id ocotp_of_match[] = { + { .compatible = "fsl,vf610-ocotp", }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, ocotp_of_match); + +static int vf610_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct vf610_ocotp *ocotp_dev; + + ocotp_dev = devm_kzalloc(dev, sizeof(struct vf610_ocotp), GFP_KERNEL); + if (!ocotp_dev) + return -ENOMEM; + + ocotp_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(ocotp_dev->base)) + return PTR_ERR(ocotp_dev->base); + + ocotp_dev->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ocotp_dev->clk)) { + dev_err(dev, "failed getting clock, err = %ld\n", + PTR_ERR(ocotp_dev->clk)); + return PTR_ERR(ocotp_dev->clk); + } + ocotp_dev->dev = dev; + ocotp_dev->timing = vf610_ocotp_calculate_timing(ocotp_dev); + + ocotp_config.size = resource_size(res); + ocotp_config.priv = ocotp_dev; + ocotp_config.dev = dev; + + ocotp_dev->nvmem = devm_nvmem_register(dev, &ocotp_config); + + return PTR_ERR_OR_ZERO(ocotp_dev->nvmem); +} + +static struct platform_driver vf610_ocotp_driver = { + .probe = vf610_ocotp_probe, + .driver = { + .name = "vf610-ocotp", + .of_match_table = ocotp_of_match, + }, +}; +module_platform_driver(vf610_ocotp_driver); +MODULE_AUTHOR("Sanchayan Maity <sanchayan.maity@toradex.com>"); +MODULE_DESCRIPTION("Vybrid OCOTP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c new file mode 100644 index 0000000000..f49bb9a26d --- /dev/null +++ b/drivers/nvmem/zynqmp_nvmem.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 Xilinx, Inc. + */ + +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/firmware/xlnx-zynqmp.h> + +#define SILICON_REVISION_MASK 0xF + +struct zynqmp_nvmem_data { + struct device *dev; + struct nvmem_device *nvmem; +}; + +static int zynqmp_nvmem_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + int ret; + int idcode, version; + struct zynqmp_nvmem_data *priv = context; + + ret = zynqmp_pm_get_chipid(&idcode, &version); + if (ret < 0) + return ret; + + dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version); + *(int *)val = version & SILICON_REVISION_MASK; + + return 0; +} + +static struct nvmem_config econfig = { + .name = "zynqmp-nvmem", + .owner = THIS_MODULE, + .word_size = 1, + .size = 1, + .read_only = true, +}; + +static const struct of_device_id zynqmp_nvmem_match[] = { + { .compatible = "xlnx,zynqmp-nvmem-fw", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match); + +static int zynqmp_nvmem_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct zynqmp_nvmem_data *priv; + + priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + econfig.dev = dev; + econfig.reg_read = zynqmp_nvmem_read; + econfig.priv = priv; + + priv->nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(priv->nvmem); +} + +static struct platform_driver zynqmp_nvmem_driver = { + .probe = zynqmp_nvmem_probe, + .driver = { + .name = "zynqmp-nvmem", + .of_match_table = zynqmp_nvmem_match, + }, +}; + +module_platform_driver(zynqmp_nvmem_driver); + +MODULE_AUTHOR("Michal Simek <michal.simek@amd.com>, Nava kishore Manne <nava.kishore.manne@amd.com>"); +MODULE_DESCRIPTION("ZynqMP NVMEM driver"); +MODULE_LICENSE("GPL"); |