diff options
Diffstat (limited to 'drivers/nvmem')
27 files changed, 7867 insertions, 0 deletions
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig new file mode 100644 index 000000000..954d3b4a5 --- /dev/null +++ b/drivers/nvmem/Kconfig @@ -0,0 +1,273 @@ +# SPDX-License-Identifier: GPL-2.0-only +menuconfig NVMEM + bool "NVMEM Support" + help + Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES... + + This framework is designed to provide a generic interface to NVMEM + from both the Linux Kernel and the userspace. + + If unsure, say no. + +if NVMEM + +config NVMEM_SYSFS + bool "/sys/bus/nvmem/devices/*/nvmem (sysfs interface)" + depends on SYSFS + default y + help + Say Y here to add a sysfs interface for NVMEM. + + This interface is mostly used by userspace applications to + read/write directly into nvmem. + +config NVMEM_IMX_IIM + tristate "i.MX IC Identification Module support" + depends on ARCH_MXC || COMPILE_TEST + help + This is a driver for the IC Identification Module (IIM) available on + i.MX SoCs, providing access to 4 Kbits of programmable + eFuses. + + This driver can also be built as a module. If so, the module + will be called nvmem-imx-iim. + +config NVMEM_IMX_OCOTP + tristate "i.MX 6/7/8 On-Chip OTP Controller support" + depends on ARCH_MXC || COMPILE_TEST + depends on HAS_IOMEM + help + This is a driver for the On-Chip OTP Controller (OCOTP) available on + i.MX6 SoCs, providing access to 4 Kbits of one-time programmable + eFuses. + + This driver can also be built as a module. If so, the module + will be called nvmem-imx-ocotp. + +config NVMEM_IMX_OCOTP_SCU + tristate "i.MX8 SCU On-Chip OTP Controller support" + depends on IMX_SCU + depends on HAVE_ARM_SMCCC + help + This is a driver for the SCU On-Chip OTP Controller (OCOTP) + available on i.MX8 SoCs. + +config JZ4780_EFUSE + tristate "JZ4780 EFUSE Memory Support" + depends on MACH_INGENIC || COMPILE_TEST + depends on HAS_IOMEM + depends on OF + select REGMAP_MMIO + help + Say Y here to include support for JZ4780 efuse memory found on + all JZ4780 SoC based devices. + To compile this driver as a module, choose M here: the module + will be called nvmem_jz4780_efuse. + +config NVMEM_LPC18XX_EEPROM + tristate "NXP LPC18XX EEPROM Memory Support" + depends on ARCH_LPC18XX || COMPILE_TEST + depends on HAS_IOMEM + help + Say Y here to include support for NXP LPC18xx EEPROM memory found in + NXP LPC185x/3x and LPC435x/3x/2x/1x devices. + To compile this driver as a module, choose M here: the module + will be called nvmem_lpc18xx_eeprom. + +config NVMEM_LPC18XX_OTP + tristate "NXP LPC18XX OTP Memory Support" + depends on ARCH_LPC18XX || COMPILE_TEST + depends on HAS_IOMEM + help + Say Y here to include support for NXP LPC18xx OTP memory found on + all LPC18xx and LPC43xx devices. + To compile this driver as a module, choose M here: the module + will be called nvmem_lpc18xx_otp. + +config NVMEM_MXS_OCOTP + tristate "Freescale MXS On-Chip OTP Memory Support" + depends on ARCH_MXS || COMPILE_TEST + depends on HAS_IOMEM + help + If you say Y here, you will get readonly access to the + One Time Programmable memory pages that are stored + on the Freescale i.MX23/i.MX28 processor. + + This driver can also be built as a module. If so, the module + will be called nvmem-mxs-ocotp. + +config MTK_EFUSE + tristate "Mediatek SoCs EFUSE support" + depends on ARCH_MEDIATEK || COMPILE_TEST + depends on HAS_IOMEM + help + This is a driver to access hardware related data like sensor + calibration, HDMI impedance etc. + + This driver can also be built as a module. If so, the module + will be called efuse-mtk. + +config QCOM_QFPROM + tristate "QCOM QFPROM Support" + depends on ARCH_QCOM || COMPILE_TEST + depends on HAS_IOMEM + help + Say y here to enable QFPROM support. The QFPROM provides access + functions for QFPROM data to rest of the drivers via nvmem interface. + + This driver can also be built as a module. If so, the module + will be called nvmem_qfprom. + +config NVMEM_SPMI_SDAM + tristate "SPMI SDAM Support" + depends on SPMI + help + This driver supports the Shared Direct Access Memory Module on + Qualcomm Technologies, Inc. PMICs. It provides the clients + an interface to read/write to the SDAM module's shared memory. + +config ROCKCHIP_EFUSE + tristate "Rockchip eFuse Support" + depends on ARCH_ROCKCHIP || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple drive to dump specified values of Rockchip SoC + from eFuse, such as cpu-leakage. + + This driver can also be built as a module. If so, the module + will be called nvmem_rockchip_efuse. + +config ROCKCHIP_OTP + tristate "Rockchip OTP controller support" + depends on ARCH_ROCKCHIP || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple drive to dump specified values of Rockchip SoC + from otp, such as cpu-leakage. + + This driver can also be built as a module. If so, the module + will be called nvmem_rockchip_otp. + +config NVMEM_BCM_OCOTP + tristate "Broadcom On-Chip OTP Controller support" + depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on HAS_IOMEM + default ARCH_BCM_IPROC + help + Say y here to enable read/write access to the Broadcom OTP + controller. + + This driver can also be built as a module. If so, the module + will be called nvmem-bcm-ocotp. + +config NVMEM_STM32_ROMEM + tristate "STMicroelectronics STM32 factory-programmed memory support" + depends on ARCH_STM32 || COMPILE_TEST + help + Say y here to enable read-only access for STMicroelectronics STM32 + factory-programmed memory area. + + This driver can also be built as a module. If so, the module + will be called nvmem-stm32-romem. + +config NVMEM_SUNXI_SID + tristate "Allwinner SoCs SID support" + depends on ARCH_SUNXI + help + This is a driver for the 'security ID' available on various Allwinner + devices. + + This driver can also be built as a module. If so, the module + will be called nvmem_sunxi_sid. + +config UNIPHIER_EFUSE + tristate "UniPhier SoCs eFuse support" + depends on ARCH_UNIPHIER || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple driver to dump specified values of UniPhier SoC + from eFuse. + + This driver can also be built as a module. If so, the module + will be called nvmem-uniphier-efuse. + +config NVMEM_VF610_OCOTP + tristate "VF610 SoC OCOTP support" + depends on SOC_VF610 || COMPILE_TEST + depends on HAS_IOMEM + help + This is a driver for the 'OCOTP' peripheral available on Vybrid + devices like VF5xx and VF6xx. + + This driver can also be build as a module. If so, the module will + be called nvmem-vf610-ocotp. + +config MESON_EFUSE + tristate "Amlogic Meson GX eFuse Support" + depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM + help + This is a driver to retrieve specific values from the eFuse found on + the Amlogic Meson GX SoCs. + + This driver can also be built as a module. If so, the module + will be called nvmem_meson_efuse. + +config MESON_MX_EFUSE + tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support" + depends on ARCH_MESON || COMPILE_TEST + help + This is a driver to retrieve specific values from the eFuse found on + the Amlogic Meson6, Meson8 and Meson8b SoCs. + + This driver can also be built as a module. If so, the module + will be called nvmem_meson_mx_efuse. + +config NVMEM_SNVS_LPGPR + tristate "Support for Low Power General Purpose Register" + depends on ARCH_MXC || COMPILE_TEST + help + This is a driver for Low Power General Purpose Register (LPGPR) available on + i.MX6 and i.MX7 SoCs in Secure Non-Volatile Storage (SNVS) of this chip. + + This driver can also be built as a module. If so, the module + will be called nvmem-snvs-lpgpr. + +config RAVE_SP_EEPROM + tristate "Rave SP EEPROM Support" + depends on RAVE_SP_CORE + help + Say y here to enable Rave SP EEPROM support. + +config SC27XX_EFUSE + tristate "Spreadtrum SC27XX eFuse Support" + depends on MFD_SC27XX_PMIC || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple driver to dump specified values of Spreadtrum + SC27XX PMICs from eFuse. + + This driver can also be built as a module. If so, the module + will be called nvmem-sc27xx-efuse. + +config NVMEM_ZYNQMP + bool "Xilinx ZYNQMP SoC nvmem firmware support" + depends on ARCH_ZYNQMP + help + This is a driver to access hardware related data like + soc revision, IDCODE... etc by using the firmware + interface. + + If sure, say yes. If unsure, say no. + +config SPRD_EFUSE + tristate "Spreadtrum SoC eFuse Support" + depends on ARCH_SPRD || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple driver to dump specified values of Spreadtrum + SoCs from eFuse. + + This driver can also be built as a module. If so, the module + will be called nvmem-sprd-efuse. + +endif diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile new file mode 100644 index 000000000..a7c377218 --- /dev/null +++ b/drivers/nvmem/Makefile @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for nvmem drivers. +# + +obj-$(CONFIG_NVMEM) += nvmem_core.o +nvmem_core-y := core.o + +# Devices +obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o +nvmem-bcm-ocotp-y := bcm-ocotp.o +obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o +nvmem-imx-iim-y := imx-iim.o +obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o +nvmem-imx-ocotp-y := imx-ocotp.o +obj-$(CONFIG_NVMEM_IMX_OCOTP_SCU) += nvmem-imx-ocotp-scu.o +nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o +obj-$(CONFIG_JZ4780_EFUSE) += nvmem_jz4780_efuse.o +nvmem_jz4780_efuse-y := jz4780-efuse.o +obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o +nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o +obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o +nvmem_lpc18xx_otp-y := lpc18xx_otp.o +obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o +nvmem-mxs-ocotp-y := mxs-ocotp.o +obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o +nvmem_mtk-efuse-y := mtk-efuse.o +obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o +nvmem_qfprom-y := qfprom.o +obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o +nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o +obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o +nvmem_rockchip_efuse-y := rockchip-efuse.o +obj-$(CONFIG_ROCKCHIP_OTP) += nvmem-rockchip-otp.o +nvmem-rockchip-otp-y := rockchip-otp.o +obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o +nvmem_stm32_romem-y := stm32-romem.o +obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o +nvmem_sunxi_sid-y := sunxi_sid.o +obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o +nvmem-uniphier-efuse-y := uniphier-efuse.o +obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o +nvmem-vf610-ocotp-y := vf610-ocotp.o +obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o +nvmem_meson_efuse-y := meson-efuse.o +obj-$(CONFIG_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o +nvmem_meson_mx_efuse-y := meson-mx-efuse.o +obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o +nvmem_snvs_lpgpr-y := snvs_lpgpr.o +obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o +nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o +obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o +nvmem-sc27xx-efuse-y := sc27xx-efuse.o +obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o +nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o +obj-$(CONFIG_SPRD_EFUSE) += nvmem_sprd_efuse.o +nvmem_sprd_efuse-y := sprd-efuse.o diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c new file mode 100644 index 000000000..a80975115 --- /dev/null +++ b/drivers/nvmem/bcm-ocotp.c @@ -0,0 +1,326 @@ +/* + * Copyright (C) 2016 Broadcom + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/acpi.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +/* + * # of tries for OTP Status. The time to execute a command varies. The slowest + * commands are writes which also vary based on the # of bits turned on. Writing + * 0xffffffff takes ~3800 us. + */ +#define OTPC_RETRIES 5000 + +/* Sequence to enable OTP program */ +#define OTPC_PROG_EN_SEQ { 0xf, 0x4, 0x8, 0xd } + +/* OTPC Commands */ +#define OTPC_CMD_READ 0x0 +#define OTPC_CMD_OTP_PROG_ENABLE 0x2 +#define OTPC_CMD_OTP_PROG_DISABLE 0x3 +#define OTPC_CMD_PROGRAM 0x8 + +/* OTPC Status Bits */ +#define OTPC_STAT_CMD_DONE BIT(1) +#define OTPC_STAT_PROG_OK BIT(2) + +/* OTPC register definition */ +#define OTPC_MODE_REG_OFFSET 0x0 +#define OTPC_MODE_REG_OTPC_MODE 0 +#define OTPC_COMMAND_OFFSET 0x4 +#define OTPC_COMMAND_COMMAND_WIDTH 6 +#define OTPC_CMD_START_OFFSET 0x8 +#define OTPC_CMD_START_START 0 +#define OTPC_CPU_STATUS_OFFSET 0xc +#define OTPC_CPUADDR_REG_OFFSET 0x28 +#define OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH 16 +#define OTPC_CPU_WRITE_REG_OFFSET 0x2c + +#define OTPC_CMD_MASK (BIT(OTPC_COMMAND_COMMAND_WIDTH) - 1) +#define OTPC_ADDR_MASK (BIT(OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH) - 1) + + +struct otpc_map { + /* in words. */ + u32 otpc_row_size; + /* 128 bit row / 4 words support. */ + u16 data_r_offset[4]; + /* 128 bit row / 4 words support. */ + u16 data_w_offset[4]; +}; + +static struct otpc_map otp_map = { + .otpc_row_size = 1, + .data_r_offset = {0x10}, + .data_w_offset = {0x2c}, +}; + +static struct otpc_map otp_map_v2 = { + .otpc_row_size = 2, + .data_r_offset = {0x10, 0x5c}, + .data_w_offset = {0x2c, 0x64}, +}; + +struct otpc_priv { + struct device *dev; + void __iomem *base; + const struct otpc_map *map; + struct nvmem_config *config; +}; + +static inline void set_command(void __iomem *base, u32 command) +{ + writel(command & OTPC_CMD_MASK, base + OTPC_COMMAND_OFFSET); +} + +static inline void set_cpu_address(void __iomem *base, u32 addr) +{ + writel(addr & OTPC_ADDR_MASK, base + OTPC_CPUADDR_REG_OFFSET); +} + +static inline void set_start_bit(void __iomem *base) +{ + writel(1 << OTPC_CMD_START_START, base + OTPC_CMD_START_OFFSET); +} + +static inline void reset_start_bit(void __iomem *base) +{ + writel(0, base + OTPC_CMD_START_OFFSET); +} + +static inline void write_cpu_data(void __iomem *base, u32 value) +{ + writel(value, base + OTPC_CPU_WRITE_REG_OFFSET); +} + +static int poll_cpu_status(void __iomem *base, u32 value) +{ + u32 status; + u32 retries; + + for (retries = 0; retries < OTPC_RETRIES; retries++) { + status = readl(base + OTPC_CPU_STATUS_OFFSET); + if (status & value) + break; + udelay(1); + } + if (retries == OTPC_RETRIES) + return -EAGAIN; + + return 0; +} + +static int enable_ocotp_program(void __iomem *base) +{ + static const u32 vals[] = OTPC_PROG_EN_SEQ; + int i; + int ret; + + /* Write the magic sequence to enable programming */ + set_command(base, OTPC_CMD_OTP_PROG_ENABLE); + for (i = 0; i < ARRAY_SIZE(vals); i++) { + write_cpu_data(base, vals[i]); + set_start_bit(base); + ret = poll_cpu_status(base, OTPC_STAT_CMD_DONE); + reset_start_bit(base); + if (ret) + return ret; + } + + return poll_cpu_status(base, OTPC_STAT_PROG_OK); +} + +static int disable_ocotp_program(void __iomem *base) +{ + int ret; + + set_command(base, OTPC_CMD_OTP_PROG_DISABLE); + set_start_bit(base); + ret = poll_cpu_status(base, OTPC_STAT_PROG_OK); + reset_start_bit(base); + + return ret; +} + +static int bcm_otpc_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct otpc_priv *priv = context; + u32 *buf = val; + u32 bytes_read; + u32 address = offset / priv->config->word_size; + int i, ret; + + for (bytes_read = 0; bytes_read < bytes;) { + set_command(priv->base, OTPC_CMD_READ); + set_cpu_address(priv->base, address++); + set_start_bit(priv->base); + ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE); + if (ret) { + dev_err(priv->dev, "otp read error: 0x%x", ret); + return -EIO; + } + + for (i = 0; i < priv->map->otpc_row_size; i++) { + *buf++ = readl(priv->base + + priv->map->data_r_offset[i]); + bytes_read += sizeof(*buf); + } + + reset_start_bit(priv->base); + } + + return 0; +} + +static int bcm_otpc_write(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct otpc_priv *priv = context; + u32 *buf = val; + u32 bytes_written; + u32 address = offset / priv->config->word_size; + int i, ret; + + if (offset % priv->config->word_size) + return -EINVAL; + + ret = enable_ocotp_program(priv->base); + if (ret) + return -EIO; + + for (bytes_written = 0; bytes_written < bytes;) { + set_command(priv->base, OTPC_CMD_PROGRAM); + set_cpu_address(priv->base, address++); + for (i = 0; i < priv->map->otpc_row_size; i++) { + writel(*buf, priv->base + priv->map->data_w_offset[i]); + buf++; + bytes_written += sizeof(*buf); + } + set_start_bit(priv->base); + ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE); + reset_start_bit(priv->base); + if (ret) { + dev_err(priv->dev, "otp write error: 0x%x", ret); + return -EIO; + } + } + + disable_ocotp_program(priv->base); + + return 0; +} + +static struct nvmem_config bcm_otpc_nvmem_config = { + .name = "bcm-ocotp", + .read_only = false, + .word_size = 4, + .stride = 4, + .reg_read = bcm_otpc_read, + .reg_write = bcm_otpc_write, +}; + +static const struct of_device_id bcm_otpc_dt_ids[] = { + { .compatible = "brcm,ocotp", .data = &otp_map }, + { .compatible = "brcm,ocotp-v2", .data = &otp_map_v2 }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids); + +static const struct acpi_device_id bcm_otpc_acpi_ids[] = { + { .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map }, + { .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, bcm_otpc_acpi_ids); + +static int bcm_otpc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct otpc_priv *priv; + struct nvmem_device *nvmem; + int err; + u32 num_words; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->map = device_get_match_data(dev); + if (!priv->map) + return -ENODEV; + + /* Get OTP base address register. */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) { + dev_err(dev, "unable to map I/O memory\n"); + return PTR_ERR(priv->base); + } + + /* Enable CPU access to OTPC. */ + writel(readl(priv->base + OTPC_MODE_REG_OFFSET) | + BIT(OTPC_MODE_REG_OTPC_MODE), + priv->base + OTPC_MODE_REG_OFFSET); + reset_start_bit(priv->base); + + /* Read size of memory in words. */ + err = device_property_read_u32(dev, "brcm,ocotp-size", &num_words); + if (err) { + dev_err(dev, "size parameter not specified\n"); + return -EINVAL; + } else if (num_words == 0) { + dev_err(dev, "size must be > 0\n"); + return -EINVAL; + } + + bcm_otpc_nvmem_config.size = 4 * num_words; + bcm_otpc_nvmem_config.dev = dev; + bcm_otpc_nvmem_config.priv = priv; + + if (priv->map == &otp_map_v2) { + bcm_otpc_nvmem_config.word_size = 8; + bcm_otpc_nvmem_config.stride = 8; + } + + priv->config = &bcm_otpc_nvmem_config; + + nvmem = devm_nvmem_register(dev, &bcm_otpc_nvmem_config); + if (IS_ERR(nvmem)) { + dev_err(dev, "error registering nvmem config\n"); + return PTR_ERR(nvmem); + } + + return 0; +} + +static struct platform_driver bcm_otpc_driver = { + .probe = bcm_otpc_probe, + .driver = { + .name = "brcm-otpc", + .of_match_table = bcm_otpc_dt_ids, + .acpi_match_table = ACPI_PTR(bcm_otpc_acpi_ids), + }, +}; +module_platform_driver(bcm_otpc_driver); + +MODULE_DESCRIPTION("Broadcom OTPC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c new file mode 100644 index 000000000..1505c7451 --- /dev/null +++ b/drivers/nvmem/core.c @@ -0,0 +1,1677 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * nvmem framework core. + * + * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> + */ + +#include <linux/device.h> +#include <linux/export.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/init.h> +#include <linux/kref.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/nvmem-provider.h> +#include <linux/gpio/consumer.h> +#include <linux/of.h> +#include <linux/slab.h> + +struct nvmem_device { + struct module *owner; + struct device dev; + int stride; + int word_size; + int id; + struct kref refcnt; + size_t size; + bool read_only; + bool root_only; + int flags; + enum nvmem_type type; + struct bin_attribute eeprom; + struct device *base_dev; + struct list_head cells; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + struct gpio_desc *wp_gpio; + void *priv; +}; + +#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) + +#define FLAG_COMPAT BIT(0) + +struct nvmem_cell { + const char *name; + int offset; + int bytes; + int bit_offset; + int nbits; + struct device_node *np; + struct nvmem_device *nvmem; + struct list_head node; +}; + +static DEFINE_MUTEX(nvmem_mutex); +static DEFINE_IDA(nvmem_ida); + +static DEFINE_MUTEX(nvmem_cell_mutex); +static LIST_HEAD(nvmem_cell_tables); + +static DEFINE_MUTEX(nvmem_lookup_mutex); +static LIST_HEAD(nvmem_lookup_list); + +static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); + +static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, + void *val, size_t bytes) +{ + if (nvmem->reg_read) + return nvmem->reg_read(nvmem->priv, offset, val, bytes); + + return -EINVAL; +} + +static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, + void *val, size_t bytes) +{ + int ret; + + if (nvmem->reg_write) { + gpiod_set_value_cansleep(nvmem->wp_gpio, 0); + ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); + gpiod_set_value_cansleep(nvmem->wp_gpio, 1); + return ret; + } + + return -EINVAL; +} + +#ifdef CONFIG_NVMEM_SYSFS +static const char * const nvmem_type_str[] = { + [NVMEM_TYPE_UNKNOWN] = "Unknown", + [NVMEM_TYPE_EEPROM] = "EEPROM", + [NVMEM_TYPE_OTP] = "OTP", + [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", +}; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key eeprom_lock_key; +#endif + +static ssize_t type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvmem_device *nvmem = to_nvmem_device(dev); + + return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); +} + +static DEVICE_ATTR_RO(type); + +static struct attribute *nvmem_attrs[] = { + &dev_attr_type.attr, + NULL, +}; + +static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t pos, size_t count) +{ + struct device *dev; + struct nvmem_device *nvmem; + int rc; + + if (attr->private) + dev = attr->private; + else + dev = kobj_to_dev(kobj); + nvmem = to_nvmem_device(dev); + + /* Stop the user from reading */ + if (pos >= nvmem->size) + return 0; + + if (!IS_ALIGNED(pos, nvmem->stride)) + return -EINVAL; + + if (count < nvmem->word_size) + return -EINVAL; + + if (pos + count > nvmem->size) + count = nvmem->size - pos; + + count = round_down(count, nvmem->word_size); + + if (!nvmem->reg_read) + return -EPERM; + + rc = nvmem_reg_read(nvmem, pos, buf, count); + + if (rc) + return rc; + + return count; +} + +static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t pos, size_t count) +{ + struct device *dev; + struct nvmem_device *nvmem; + int rc; + + if (attr->private) + dev = attr->private; + else + dev = kobj_to_dev(kobj); + nvmem = to_nvmem_device(dev); + + /* Stop the user from writing */ + if (pos >= nvmem->size) + return -EFBIG; + + if (!IS_ALIGNED(pos, nvmem->stride)) + return -EINVAL; + + if (count < nvmem->word_size) + return -EINVAL; + + if (pos + count > nvmem->size) + count = nvmem->size - pos; + + count = round_down(count, nvmem->word_size); + + if (!nvmem->reg_write) + return -EPERM; + + rc = nvmem_reg_write(nvmem, pos, buf, count); + + if (rc) + return rc; + + return count; +} + +static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) +{ + umode_t mode = 0400; + + if (!nvmem->root_only) + mode |= 0044; + + if (!nvmem->read_only) + mode |= 0200; + + if (!nvmem->reg_write) + mode &= ~0200; + + if (!nvmem->reg_read) + mode &= ~0444; + + return mode; +} + +static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, + struct bin_attribute *attr, int i) +{ + struct device *dev = kobj_to_dev(kobj); + struct nvmem_device *nvmem = to_nvmem_device(dev); + + attr->size = nvmem->size; + + return nvmem_bin_attr_get_umode(nvmem); +} + +/* default read/write permissions */ +static struct bin_attribute bin_attr_rw_nvmem = { + .attr = { + .name = "nvmem", + .mode = 0644, + }, + .read = bin_attr_nvmem_read, + .write = bin_attr_nvmem_write, +}; + +static struct bin_attribute *nvmem_bin_attributes[] = { + &bin_attr_rw_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_group = { + .bin_attrs = nvmem_bin_attributes, + .attrs = nvmem_attrs, + .is_bin_visible = nvmem_bin_attr_is_visible, +}; + +static const struct attribute_group *nvmem_dev_groups[] = { + &nvmem_bin_group, + NULL, +}; + +static struct bin_attribute bin_attr_nvmem_eeprom_compat = { + .attr = { + .name = "eeprom", + }, + .read = bin_attr_nvmem_read, + .write = bin_attr_nvmem_write, +}; + +/* + * nvmem_setup_compat() - Create an additional binary entry in + * drivers sys directory, to be backwards compatible with the older + * drivers/misc/eeprom drivers. + */ +static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ + int rval; + + if (!config->compat) + return 0; + + if (!config->base_dev) + return -EINVAL; + + nvmem->eeprom = bin_attr_nvmem_eeprom_compat; + nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); + nvmem->eeprom.size = nvmem->size; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + nvmem->eeprom.attr.key = &eeprom_lock_key; +#endif + nvmem->eeprom.private = &nvmem->dev; + nvmem->base_dev = config->base_dev; + + rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); + if (rval) { + dev_err(&nvmem->dev, + "Failed to create eeprom binary file %d\n", rval); + return rval; + } + + nvmem->flags |= FLAG_COMPAT; + + return 0; +} + +static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ + if (config->compat) + device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); +} + +#else /* CONFIG_NVMEM_SYSFS */ + +static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ + return -ENOSYS; +} +static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ +} + +#endif /* CONFIG_NVMEM_SYSFS */ + +static void nvmem_release(struct device *dev) +{ + struct nvmem_device *nvmem = to_nvmem_device(dev); + + ida_free(&nvmem_ida, nvmem->id); + gpiod_put(nvmem->wp_gpio); + kfree(nvmem); +} + +static const struct device_type nvmem_provider_type = { + .release = nvmem_release, +}; + +static struct bus_type nvmem_bus_type = { + .name = "nvmem", +}; + +static void nvmem_cell_drop(struct nvmem_cell *cell) +{ + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); + mutex_lock(&nvmem_mutex); + list_del(&cell->node); + mutex_unlock(&nvmem_mutex); + of_node_put(cell->np); + kfree_const(cell->name); + kfree(cell); +} + +static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) +{ + struct nvmem_cell *cell, *p; + + list_for_each_entry_safe(cell, p, &nvmem->cells, node) + nvmem_cell_drop(cell); +} + +static void nvmem_cell_add(struct nvmem_cell *cell) +{ + mutex_lock(&nvmem_mutex); + list_add_tail(&cell->node, &cell->nvmem->cells); + mutex_unlock(&nvmem_mutex); + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); +} + +static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + struct nvmem_cell *cell) +{ + cell->nvmem = nvmem; + cell->offset = info->offset; + cell->bytes = info->bytes; + cell->name = info->name; + + cell->bit_offset = info->bit_offset; + cell->nbits = info->nbits; + + if (cell->nbits) + cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, + BITS_PER_BYTE); + + if (!IS_ALIGNED(cell->offset, nvmem->stride)) { + dev_err(&nvmem->dev, + "cell %s unaligned to nvmem stride %d\n", + cell->name ?: "<unknown>", nvmem->stride); + return -EINVAL; + } + + return 0; +} + +static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + struct nvmem_cell *cell) +{ + int err; + + err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell); + if (err) + return err; + + cell->name = kstrdup_const(info->name, GFP_KERNEL); + if (!cell->name) + return -ENOMEM; + + return 0; +} + +/** + * nvmem_add_cells() - Add cell information to an nvmem device + * + * @nvmem: nvmem device to add cells to. + * @info: nvmem cell info to add to the device + * @ncells: number of cells in info + * + * Return: 0 or negative error code on failure. + */ +static int nvmem_add_cells(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + int ncells) +{ + struct nvmem_cell **cells; + int i, rval; + + cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); + if (!cells) + return -ENOMEM; + + for (i = 0; i < ncells; i++) { + cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); + if (!cells[i]) { + rval = -ENOMEM; + goto err; + } + + rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); + if (rval) { + kfree(cells[i]); + goto err; + } + + nvmem_cell_add(cells[i]); + } + + /* remove tmp array */ + kfree(cells); + + return 0; +err: + while (i--) + nvmem_cell_drop(cells[i]); + + kfree(cells); + + return rval; +} + +/** + * nvmem_register_notifier() - Register a notifier block for nvmem events. + * + * @nb: notifier block to be called on nvmem events. + * + * Return: 0 on success, negative error number on failure. + */ +int nvmem_register_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&nvmem_notifier, nb); +} +EXPORT_SYMBOL_GPL(nvmem_register_notifier); + +/** + * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. + * + * @nb: notifier block to be unregistered. + * + * Return: 0 on success, negative error number on failure. + */ +int nvmem_unregister_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&nvmem_notifier, nb); +} +EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); + +static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) +{ + const struct nvmem_cell_info *info; + struct nvmem_cell_table *table; + struct nvmem_cell *cell; + int rval = 0, i; + + mutex_lock(&nvmem_cell_mutex); + list_for_each_entry(table, &nvmem_cell_tables, node) { + if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { + for (i = 0; i < table->ncells; i++) { + info = &table->cells[i]; + + cell = kzalloc(sizeof(*cell), GFP_KERNEL); + if (!cell) { + rval = -ENOMEM; + goto out; + } + + rval = nvmem_cell_info_to_nvmem_cell(nvmem, + info, + cell); + if (rval) { + kfree(cell); + goto out; + } + + nvmem_cell_add(cell); + } + } + } + +out: + mutex_unlock(&nvmem_cell_mutex); + return rval; +} + +static struct nvmem_cell * +nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) +{ + struct nvmem_cell *iter, *cell = NULL; + + mutex_lock(&nvmem_mutex); + list_for_each_entry(iter, &nvmem->cells, node) { + if (strcmp(cell_id, iter->name) == 0) { + cell = iter; + break; + } + } + mutex_unlock(&nvmem_mutex); + + return cell; +} + +static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) +{ + struct device_node *parent, *child; + struct device *dev = &nvmem->dev; + struct nvmem_cell *cell; + const __be32 *addr; + int len; + + parent = dev->of_node; + + for_each_child_of_node(parent, child) { + addr = of_get_property(child, "reg", &len); + if (!addr) + continue; + if (len < 2 * sizeof(u32)) { + dev_err(dev, "nvmem: invalid reg on %pOF\n", child); + of_node_put(child); + return -EINVAL; + } + + cell = kzalloc(sizeof(*cell), GFP_KERNEL); + if (!cell) { + of_node_put(child); + return -ENOMEM; + } + + cell->nvmem = nvmem; + cell->offset = be32_to_cpup(addr++); + cell->bytes = be32_to_cpup(addr); + cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); + + addr = of_get_property(child, "bits", &len); + if (addr && len == (2 * sizeof(u32))) { + cell->bit_offset = be32_to_cpup(addr++); + cell->nbits = be32_to_cpup(addr); + } + + if (cell->nbits) + cell->bytes = DIV_ROUND_UP( + cell->nbits + cell->bit_offset, + BITS_PER_BYTE); + + if (!IS_ALIGNED(cell->offset, nvmem->stride)) { + dev_err(dev, "cell %s unaligned to nvmem stride %d\n", + cell->name, nvmem->stride); + /* Cells already added will be freed later. */ + kfree_const(cell->name); + kfree(cell); + of_node_put(child); + return -EINVAL; + } + + cell->np = of_node_get(child); + nvmem_cell_add(cell); + } + + return 0; +} + +/** + * nvmem_register() - Register a nvmem device for given nvmem_config. + * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem + * + * @config: nvmem device configuration with which nvmem device is created. + * + * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device + * on success. + */ + +struct nvmem_device *nvmem_register(const struct nvmem_config *config) +{ + struct nvmem_device *nvmem; + int rval; + + if (!config->dev) + return ERR_PTR(-EINVAL); + + if (!config->reg_read && !config->reg_write) + return ERR_PTR(-EINVAL); + + nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); + if (!nvmem) + return ERR_PTR(-ENOMEM); + + rval = ida_alloc(&nvmem_ida, GFP_KERNEL); + if (rval < 0) { + kfree(nvmem); + return ERR_PTR(rval); + } + + nvmem->id = rval; + + nvmem->dev.type = &nvmem_provider_type; + nvmem->dev.bus = &nvmem_bus_type; + nvmem->dev.parent = config->dev; + + device_initialize(&nvmem->dev); + + if (!config->ignore_wp) + nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", + GPIOD_OUT_HIGH); + if (IS_ERR(nvmem->wp_gpio)) { + rval = PTR_ERR(nvmem->wp_gpio); + nvmem->wp_gpio = NULL; + goto err_put_device; + } + + kref_init(&nvmem->refcnt); + INIT_LIST_HEAD(&nvmem->cells); + + nvmem->owner = config->owner; + if (!nvmem->owner && config->dev->driver) + nvmem->owner = config->dev->driver->owner; + nvmem->stride = config->stride ?: 1; + nvmem->word_size = config->word_size ?: 1; + nvmem->size = config->size; + nvmem->root_only = config->root_only; + nvmem->priv = config->priv; + nvmem->type = config->type; + nvmem->reg_read = config->reg_read; + nvmem->reg_write = config->reg_write; + if (!config->no_of_node) + nvmem->dev.of_node = config->dev->of_node; + + switch (config->id) { + case NVMEM_DEVID_NONE: + rval = dev_set_name(&nvmem->dev, "%s", config->name); + break; + case NVMEM_DEVID_AUTO: + rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); + break; + default: + rval = dev_set_name(&nvmem->dev, "%s%d", + config->name ? : "nvmem", + config->name ? config->id : nvmem->id); + break; + } + + if (rval) + goto err_put_device; + + nvmem->read_only = device_property_present(config->dev, "read-only") || + config->read_only || !nvmem->reg_write; + +#ifdef CONFIG_NVMEM_SYSFS + nvmem->dev.groups = nvmem_dev_groups; +#endif + + if (config->compat) { + rval = nvmem_sysfs_setup_compat(nvmem, config); + if (rval) + goto err_put_device; + } + + if (config->cells) { + rval = nvmem_add_cells(nvmem, config->cells, config->ncells); + if (rval) + goto err_remove_cells; + } + + rval = nvmem_add_cells_from_table(nvmem); + if (rval) + goto err_remove_cells; + + rval = nvmem_add_cells_from_of(nvmem); + if (rval) + goto err_remove_cells; + + dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); + + rval = device_add(&nvmem->dev); + if (rval) + goto err_remove_cells; + + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); + + return nvmem; + +err_remove_cells: + nvmem_device_remove_all_cells(nvmem); + if (config->compat) + nvmem_sysfs_remove_compat(nvmem, config); +err_put_device: + put_device(&nvmem->dev); + + return ERR_PTR(rval); +} +EXPORT_SYMBOL_GPL(nvmem_register); + +static void nvmem_device_release(struct kref *kref) +{ + struct nvmem_device *nvmem; + + nvmem = container_of(kref, struct nvmem_device, refcnt); + + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); + + if (nvmem->flags & FLAG_COMPAT) + device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); + + nvmem_device_remove_all_cells(nvmem); + device_unregister(&nvmem->dev); +} + +/** + * nvmem_unregister() - Unregister previously registered nvmem device + * + * @nvmem: Pointer to previously registered nvmem device. + */ +void nvmem_unregister(struct nvmem_device *nvmem) +{ + kref_put(&nvmem->refcnt, nvmem_device_release); +} +EXPORT_SYMBOL_GPL(nvmem_unregister); + +static void devm_nvmem_release(struct device *dev, void *res) +{ + nvmem_unregister(*(struct nvmem_device **)res); +} + +/** + * devm_nvmem_register() - Register a managed nvmem device for given + * nvmem_config. + * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem + * + * @dev: Device that uses the nvmem device. + * @config: nvmem device configuration with which nvmem device is created. + * + * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device + * on success. + */ +struct nvmem_device *devm_nvmem_register(struct device *dev, + const struct nvmem_config *config) +{ + struct nvmem_device **ptr, *nvmem; + + ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + nvmem = nvmem_register(config); + + if (!IS_ERR(nvmem)) { + *ptr = nvmem; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return nvmem; +} +EXPORT_SYMBOL_GPL(devm_nvmem_register); + +static int devm_nvmem_match(struct device *dev, void *res, void *data) +{ + struct nvmem_device **r = res; + + return *r == data; +} + +/** + * devm_nvmem_unregister() - Unregister previously registered managed nvmem + * device. + * + * @dev: Device that uses the nvmem device. + * @nvmem: Pointer to previously registered nvmem device. + * + * Return: Will be negative on error or zero on success. + */ +int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) +{ + return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); +} +EXPORT_SYMBOL(devm_nvmem_unregister); + +static struct nvmem_device *__nvmem_device_get(void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct nvmem_device *nvmem = NULL; + struct device *dev; + + mutex_lock(&nvmem_mutex); + dev = bus_find_device(&nvmem_bus_type, NULL, data, match); + if (dev) + nvmem = to_nvmem_device(dev); + mutex_unlock(&nvmem_mutex); + if (!nvmem) + return ERR_PTR(-EPROBE_DEFER); + + if (!try_module_get(nvmem->owner)) { + dev_err(&nvmem->dev, + "could not increase module refcount for cell %s\n", + nvmem_dev_name(nvmem)); + + put_device(&nvmem->dev); + return ERR_PTR(-EINVAL); + } + + kref_get(&nvmem->refcnt); + + return nvmem; +} + +static void __nvmem_device_put(struct nvmem_device *nvmem) +{ + put_device(&nvmem->dev); + module_put(nvmem->owner); + kref_put(&nvmem->refcnt, nvmem_device_release); +} + +#if IS_ENABLED(CONFIG_OF) +/** + * of_nvmem_device_get() - Get nvmem device from a given id + * + * @np: Device tree node that uses the nvmem device. + * @id: nvmem name from nvmem-names property. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. + */ +struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) +{ + + struct device_node *nvmem_np; + struct nvmem_device *nvmem; + int index = 0; + + if (id) + index = of_property_match_string(np, "nvmem-names", id); + + nvmem_np = of_parse_phandle(np, "nvmem", index); + if (!nvmem_np) + return ERR_PTR(-ENOENT); + + nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); + of_node_put(nvmem_np); + return nvmem; +} +EXPORT_SYMBOL_GPL(of_nvmem_device_get); +#endif + +/** + * nvmem_device_get() - Get nvmem device from a given id + * + * @dev: Device that uses the nvmem device. + * @dev_name: name of the requested nvmem device. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. + */ +struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) +{ + if (dev->of_node) { /* try dt first */ + struct nvmem_device *nvmem; + + nvmem = of_nvmem_device_get(dev->of_node, dev_name); + + if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) + return nvmem; + + } + + return __nvmem_device_get((void *)dev_name, device_match_name); +} +EXPORT_SYMBOL_GPL(nvmem_device_get); + +/** + * nvmem_device_find() - Find nvmem device with matching function + * + * @data: Data to pass to match function + * @match: Callback function to check device + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. + */ +struct nvmem_device *nvmem_device_find(void *data, + int (*match)(struct device *dev, const void *data)) +{ + return __nvmem_device_get(data, match); +} +EXPORT_SYMBOL_GPL(nvmem_device_find); + +static int devm_nvmem_device_match(struct device *dev, void *res, void *data) +{ + struct nvmem_device **nvmem = res; + + if (WARN_ON(!nvmem || !*nvmem)) + return 0; + + return *nvmem == data; +} + +static void devm_nvmem_device_release(struct device *dev, void *res) +{ + nvmem_device_put(*(struct nvmem_device **)res); +} + +/** + * devm_nvmem_device_put() - put alredy got nvmem device + * + * @dev: Device that uses the nvmem device. + * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), + * that needs to be released. + */ +void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) +{ + int ret; + + ret = devres_release(dev, devm_nvmem_device_release, + devm_nvmem_device_match, nvmem); + + WARN_ON(ret); +} +EXPORT_SYMBOL_GPL(devm_nvmem_device_put); + +/** + * nvmem_device_put() - put alredy got nvmem device + * + * @nvmem: pointer to nvmem device that needs to be released. + */ +void nvmem_device_put(struct nvmem_device *nvmem) +{ + __nvmem_device_put(nvmem); +} +EXPORT_SYMBOL_GPL(nvmem_device_put); + +/** + * devm_nvmem_device_get() - Get nvmem cell of device form a given id + * + * @dev: Device that requests the nvmem device. + * @id: name id for the requested nvmem device. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell + * on success. The nvmem_cell will be freed by the automatically once the + * device is freed. + */ +struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) +{ + struct nvmem_device **ptr, *nvmem; + + ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + nvmem = nvmem_device_get(dev, id); + if (!IS_ERR(nvmem)) { + *ptr = nvmem; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return nvmem; +} +EXPORT_SYMBOL_GPL(devm_nvmem_device_get); + +static struct nvmem_cell * +nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) +{ + struct nvmem_cell *cell = ERR_PTR(-ENOENT); + struct nvmem_cell_lookup *lookup; + struct nvmem_device *nvmem; + const char *dev_id; + + if (!dev) + return ERR_PTR(-EINVAL); + + dev_id = dev_name(dev); + + mutex_lock(&nvmem_lookup_mutex); + + list_for_each_entry(lookup, &nvmem_lookup_list, node) { + if ((strcmp(lookup->dev_id, dev_id) == 0) && + (strcmp(lookup->con_id, con_id) == 0)) { + /* This is the right entry. */ + nvmem = __nvmem_device_get((void *)lookup->nvmem_name, + device_match_name); + if (IS_ERR(nvmem)) { + /* Provider may not be registered yet. */ + cell = ERR_CAST(nvmem); + break; + } + + cell = nvmem_find_cell_by_name(nvmem, + lookup->cell_name); + if (!cell) { + __nvmem_device_put(nvmem); + cell = ERR_PTR(-ENOENT); + } + break; + } + } + + mutex_unlock(&nvmem_lookup_mutex); + return cell; +} + +#if IS_ENABLED(CONFIG_OF) +static struct nvmem_cell * +nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) +{ + struct nvmem_cell *iter, *cell = NULL; + + mutex_lock(&nvmem_mutex); + list_for_each_entry(iter, &nvmem->cells, node) { + if (np == iter->np) { + cell = iter; + break; + } + } + mutex_unlock(&nvmem_mutex); + + return cell; +} + +/** + * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id + * + * @np: Device tree node that uses the nvmem cell. + * @id: nvmem cell name from nvmem-cell-names property, or NULL + * for the cell at index 0 (the lone cell with no accompanying + * nvmem-cell-names property). + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * nvmem_cell_put(). + */ +struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) +{ + struct device_node *cell_np, *nvmem_np; + struct nvmem_device *nvmem; + struct nvmem_cell *cell; + int index = 0; + + /* if cell name exists, find index to the name */ + if (id) + index = of_property_match_string(np, "nvmem-cell-names", id); + + cell_np = of_parse_phandle(np, "nvmem-cells", index); + if (!cell_np) + return ERR_PTR(-ENOENT); + + nvmem_np = of_get_next_parent(cell_np); + if (!nvmem_np) + return ERR_PTR(-EINVAL); + + nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); + of_node_put(nvmem_np); + if (IS_ERR(nvmem)) + return ERR_CAST(nvmem); + + cell = nvmem_find_cell_by_node(nvmem, cell_np); + if (!cell) { + __nvmem_device_put(nvmem); + return ERR_PTR(-ENOENT); + } + + return cell; +} +EXPORT_SYMBOL_GPL(of_nvmem_cell_get); +#endif + +/** + * nvmem_cell_get() - Get nvmem cell of device form a given cell name + * + * @dev: Device that requests the nvmem cell. + * @id: nvmem cell name to get (this corresponds with the name from the + * nvmem-cell-names property for DT systems and with the con_id from + * the lookup entry for non-DT systems). + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * nvmem_cell_put(). + */ +struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) +{ + struct nvmem_cell *cell; + + if (dev->of_node) { /* try dt first */ + cell = of_nvmem_cell_get(dev->of_node, id); + if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) + return cell; + } + + /* NULL cell id only allowed for device tree; invalid otherwise */ + if (!id) + return ERR_PTR(-EINVAL); + + return nvmem_cell_get_from_lookup(dev, id); +} +EXPORT_SYMBOL_GPL(nvmem_cell_get); + +static void devm_nvmem_cell_release(struct device *dev, void *res) +{ + nvmem_cell_put(*(struct nvmem_cell **)res); +} + +/** + * devm_nvmem_cell_get() - Get nvmem cell of device form a given id + * + * @dev: Device that requests the nvmem cell. + * @id: nvmem cell name id to get. + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * automatically once the device is freed. + */ +struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) +{ + struct nvmem_cell **ptr, *cell; + + ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + cell = nvmem_cell_get(dev, id); + if (!IS_ERR(cell)) { + *ptr = cell; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return cell; +} +EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); + +static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) +{ + struct nvmem_cell **c = res; + + if (WARN_ON(!c || !*c)) + return 0; + + return *c == data; +} + +/** + * devm_nvmem_cell_put() - Release previously allocated nvmem cell + * from devm_nvmem_cell_get. + * + * @dev: Device that requests the nvmem cell. + * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). + */ +void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) +{ + int ret; + + ret = devres_release(dev, devm_nvmem_cell_release, + devm_nvmem_cell_match, cell); + + WARN_ON(ret); +} +EXPORT_SYMBOL(devm_nvmem_cell_put); + +/** + * nvmem_cell_put() - Release previously allocated nvmem cell. + * + * @cell: Previously allocated nvmem cell by nvmem_cell_get(). + */ +void nvmem_cell_put(struct nvmem_cell *cell) +{ + struct nvmem_device *nvmem = cell->nvmem; + + __nvmem_device_put(nvmem); +} +EXPORT_SYMBOL_GPL(nvmem_cell_put); + +static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) +{ + u8 *p, *b; + int i, extra, bit_offset = cell->bit_offset; + + p = b = buf; + if (bit_offset) { + /* First shift */ + *b++ >>= bit_offset; + + /* setup rest of the bytes if any */ + for (i = 1; i < cell->bytes; i++) { + /* Get bits from next byte and shift them towards msb */ + *p |= *b << (BITS_PER_BYTE - bit_offset); + + p = b; + *b++ >>= bit_offset; + } + } else { + /* point to the msb */ + p += cell->bytes - 1; + } + + /* result fits in less bytes */ + extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); + while (--extra >= 0) + *p-- = 0; + + /* clear msb bits if any leftover in the last byte */ + if (cell->nbits % BITS_PER_BYTE) + *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); +} + +static int __nvmem_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell *cell, + void *buf, size_t *len) +{ + int rc; + + rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); + + if (rc) + return rc; + + /* shift bits in-place */ + if (cell->bit_offset || cell->nbits) + nvmem_shift_read_buffer_in_place(cell, buf); + + if (len) + *len = cell->bytes; + + return 0; +} + +/** + * nvmem_cell_read() - Read a given nvmem cell + * + * @cell: nvmem cell to be read. + * @len: pointer to length of cell which will be populated on successful read; + * can be NULL. + * + * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The + * buffer should be freed by the consumer with a kfree(). + */ +void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) +{ + struct nvmem_device *nvmem = cell->nvmem; + u8 *buf; + int rc; + + if (!nvmem) + return ERR_PTR(-EINVAL); + + buf = kzalloc(cell->bytes, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + rc = __nvmem_cell_read(nvmem, cell, buf, len); + if (rc) { + kfree(buf); + return ERR_PTR(rc); + } + + return buf; +} +EXPORT_SYMBOL_GPL(nvmem_cell_read); + +static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, + u8 *_buf, int len) +{ + struct nvmem_device *nvmem = cell->nvmem; + int i, rc, nbits, bit_offset = cell->bit_offset; + u8 v, *p, *buf, *b, pbyte, pbits; + + nbits = cell->nbits; + buf = kzalloc(cell->bytes, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + memcpy(buf, _buf, len); + p = b = buf; + + if (bit_offset) { + pbyte = *b; + *b <<= bit_offset; + + /* setup the first byte with lsb bits from nvmem */ + rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); + if (rc) + goto err; + *b++ |= GENMASK(bit_offset - 1, 0) & v; + + /* setup rest of the byte if any */ + for (i = 1; i < cell->bytes; i++) { + /* Get last byte bits and shift them towards lsb */ + pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); + pbyte = *b; + p = b; + *b <<= bit_offset; + *b++ |= pbits; + } + } + + /* if it's not end on byte boundary */ + if ((nbits + bit_offset) % BITS_PER_BYTE) { + /* setup the last byte with msb bits from nvmem */ + rc = nvmem_reg_read(nvmem, + cell->offset + cell->bytes - 1, &v, 1); + if (rc) + goto err; + *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; + + } + + return buf; +err: + kfree(buf); + return ERR_PTR(rc); +} + +/** + * nvmem_cell_write() - Write to a given nvmem cell + * + * @cell: nvmem cell to be written. + * @buf: Buffer to be written. + * @len: length of buffer to be written to nvmem cell. + * + * Return: length of bytes written or negative on failure. + */ +int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) +{ + struct nvmem_device *nvmem = cell->nvmem; + int rc; + + if (!nvmem || nvmem->read_only || + (cell->bit_offset == 0 && len != cell->bytes)) + return -EINVAL; + + if (cell->bit_offset || cell->nbits) { + buf = nvmem_cell_prepare_write_buffer(cell, buf, len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + } + + rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); + + /* free the tmp buffer */ + if (cell->bit_offset || cell->nbits) + kfree(buf); + + if (rc) + return rc; + + return len; +} +EXPORT_SYMBOL_GPL(nvmem_cell_write); + +static int nvmem_cell_read_common(struct device *dev, const char *cell_id, + void *val, size_t count) +{ + struct nvmem_cell *cell; + void *buf; + size_t len; + + cell = nvmem_cell_get(dev, cell_id); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + buf = nvmem_cell_read(cell, &len); + if (IS_ERR(buf)) { + nvmem_cell_put(cell); + return PTR_ERR(buf); + } + if (len != count) { + kfree(buf); + nvmem_cell_put(cell); + return -EINVAL; + } + memcpy(val, buf, count); + kfree(buf); + nvmem_cell_put(cell); + + return 0; +} + +/** + * nvmem_cell_read_u8() - Read a cell value as a u8 + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) +{ + return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); + +/** + * nvmem_cell_read_u16() - Read a cell value as a u16 + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) +{ + return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); + +/** + * nvmem_cell_read_u32() - Read a cell value as a u32 + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) +{ + return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); + +/** + * nvmem_cell_read_u64() - Read a cell value as a u64 + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) +{ + return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); + +/** + * nvmem_device_cell_read() - Read a given nvmem device and cell + * + * @nvmem: nvmem device to read from. + * @info: nvmem cell info to be read. + * @buf: buffer pointer which will be populated on successful read. + * + * Return: length of successful bytes read on success and negative + * error code on error. + */ +ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf) +{ + struct nvmem_cell cell; + int rc; + ssize_t len; + + if (!nvmem) + return -EINVAL; + + rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); + if (rc) + return rc; + + rc = __nvmem_cell_read(nvmem, &cell, buf, &len); + if (rc) + return rc; + + return len; +} +EXPORT_SYMBOL_GPL(nvmem_device_cell_read); + +/** + * nvmem_device_cell_write() - Write cell to a given nvmem device + * + * @nvmem: nvmem device to be written to. + * @info: nvmem cell info to be written. + * @buf: buffer to be written to cell. + * + * Return: length of bytes written or negative error code on failure. + */ +int nvmem_device_cell_write(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf) +{ + struct nvmem_cell cell; + int rc; + + if (!nvmem) + return -EINVAL; + + rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); + if (rc) + return rc; + + return nvmem_cell_write(&cell, buf, cell.bytes); +} +EXPORT_SYMBOL_GPL(nvmem_device_cell_write); + +/** + * nvmem_device_read() - Read from a given nvmem device + * + * @nvmem: nvmem device to read from. + * @offset: offset in nvmem device. + * @bytes: number of bytes to read. + * @buf: buffer pointer which will be populated on successful read. + * + * Return: length of successful bytes read on success and negative + * error code on error. + */ +int nvmem_device_read(struct nvmem_device *nvmem, + unsigned int offset, + size_t bytes, void *buf) +{ + int rc; + + if (!nvmem) + return -EINVAL; + + rc = nvmem_reg_read(nvmem, offset, buf, bytes); + + if (rc) + return rc; + + return bytes; +} +EXPORT_SYMBOL_GPL(nvmem_device_read); + +/** + * nvmem_device_write() - Write cell to a given nvmem device + * + * @nvmem: nvmem device to be written to. + * @offset: offset in nvmem device. + * @bytes: number of bytes to write. + * @buf: buffer to be written. + * + * Return: length of bytes written or negative error code on failure. + */ +int nvmem_device_write(struct nvmem_device *nvmem, + unsigned int offset, + size_t bytes, void *buf) +{ + int rc; + + if (!nvmem) + return -EINVAL; + + rc = nvmem_reg_write(nvmem, offset, buf, bytes); + + if (rc) + return rc; + + + return bytes; +} +EXPORT_SYMBOL_GPL(nvmem_device_write); + +/** + * nvmem_add_cell_table() - register a table of cell info entries + * + * @table: table of cell info entries + */ +void nvmem_add_cell_table(struct nvmem_cell_table *table) +{ + mutex_lock(&nvmem_cell_mutex); + list_add_tail(&table->node, &nvmem_cell_tables); + mutex_unlock(&nvmem_cell_mutex); +} +EXPORT_SYMBOL_GPL(nvmem_add_cell_table); + +/** + * nvmem_del_cell_table() - remove a previously registered cell info table + * + * @table: table of cell info entries + */ +void nvmem_del_cell_table(struct nvmem_cell_table *table) +{ + mutex_lock(&nvmem_cell_mutex); + list_del(&table->node); + mutex_unlock(&nvmem_cell_mutex); +} +EXPORT_SYMBOL_GPL(nvmem_del_cell_table); + +/** + * nvmem_add_cell_lookups() - register a list of cell lookup entries + * + * @entries: array of cell lookup entries + * @nentries: number of cell lookup entries in the array + */ +void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) +{ + int i; + + mutex_lock(&nvmem_lookup_mutex); + for (i = 0; i < nentries; i++) + list_add_tail(&entries[i].node, &nvmem_lookup_list); + mutex_unlock(&nvmem_lookup_mutex); +} +EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); + +/** + * nvmem_del_cell_lookups() - remove a list of previously added cell lookup + * entries + * + * @entries: array of cell lookup entries + * @nentries: number of cell lookup entries in the array + */ +void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) +{ + int i; + + mutex_lock(&nvmem_lookup_mutex); + for (i = 0; i < nentries; i++) + list_del(&entries[i].node); + mutex_unlock(&nvmem_lookup_mutex); +} +EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); + +/** + * nvmem_dev_name() - Get the name of a given nvmem device. + * + * @nvmem: nvmem device. + * + * Return: name of the nvmem device. + */ +const char *nvmem_dev_name(struct nvmem_device *nvmem) +{ + return dev_name(&nvmem->dev); +} +EXPORT_SYMBOL_GPL(nvmem_dev_name); + +static int __init nvmem_init(void) +{ + return bus_register(&nvmem_bus_type); +} + +static void __exit nvmem_exit(void) +{ + bus_unregister(&nvmem_bus_type); +} + +subsys_initcall(nvmem_init); +module_exit(nvmem_exit); + +MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); +MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); +MODULE_DESCRIPTION("nvmem Driver Core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/imx-iim.c b/drivers/nvmem/imx-iim.c new file mode 100644 index 000000000..701704b87 --- /dev/null +++ b/drivers/nvmem/imx-iim.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * i.MX IIM driver + * + * Copyright (c) 2017 Pengutronix, Michael Grzeschik <m.grzeschik@pengutronix.de> + * + * Based on the barebox iim driver, + * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>, + * Orex Computed Radiography + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/clk.h> + +#define IIM_BANK_BASE(n) (0x800 + 0x400 * (n)) + +struct imx_iim_drvdata { + unsigned int nregs; +}; + +struct iim_priv { + void __iomem *base; + struct clk *clk; +}; + +static int imx_iim_read(void *context, unsigned int offset, + void *buf, size_t bytes) +{ + struct iim_priv *iim = context; + int i, ret; + u8 *buf8 = buf; + + ret = clk_prepare_enable(iim->clk); + if (ret) + return ret; + + for (i = offset; i < offset + bytes; i++) { + int bank = i >> 5; + int reg = i & 0x1f; + + *buf8++ = readl(iim->base + IIM_BANK_BASE(bank) + reg * 4); + } + + clk_disable_unprepare(iim->clk); + + return 0; +} + +static struct imx_iim_drvdata imx27_drvdata = { + .nregs = 2 * 32, +}; + +static struct imx_iim_drvdata imx25_imx31_imx35_drvdata = { + .nregs = 3 * 32, +}; + +static struct imx_iim_drvdata imx51_drvdata = { + .nregs = 4 * 32, +}; + +static struct imx_iim_drvdata imx53_drvdata = { + .nregs = 4 * 32 + 16, +}; + +static const struct of_device_id imx_iim_dt_ids[] = { + { + .compatible = "fsl,imx25-iim", + .data = &imx25_imx31_imx35_drvdata, + }, { + .compatible = "fsl,imx27-iim", + .data = &imx27_drvdata, + }, { + .compatible = "fsl,imx31-iim", + .data = &imx25_imx31_imx35_drvdata, + }, { + .compatible = "fsl,imx35-iim", + .data = &imx25_imx31_imx35_drvdata, + }, { + .compatible = "fsl,imx51-iim", + .data = &imx51_drvdata, + }, { + .compatible = "fsl,imx53-iim", + .data = &imx53_drvdata, + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, imx_iim_dt_ids); + +static int imx_iim_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + struct device *dev = &pdev->dev; + struct iim_priv *iim; + struct nvmem_device *nvmem; + struct nvmem_config cfg = {}; + const struct imx_iim_drvdata *drvdata = NULL; + + iim = devm_kzalloc(dev, sizeof(*iim), GFP_KERNEL); + if (!iim) + return -ENOMEM; + + iim->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(iim->base)) + return PTR_ERR(iim->base); + + of_id = of_match_device(imx_iim_dt_ids, dev); + if (!of_id) + return -ENODEV; + + drvdata = of_id->data; + + iim->clk = devm_clk_get(dev, NULL); + if (IS_ERR(iim->clk)) + return PTR_ERR(iim->clk); + + cfg.name = "imx-iim", + cfg.read_only = true, + cfg.word_size = 1, + cfg.stride = 1, + cfg.reg_read = imx_iim_read, + cfg.dev = dev; + cfg.size = drvdata->nregs; + cfg.priv = iim; + + nvmem = devm_nvmem_register(dev, &cfg); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver imx_iim_driver = { + .probe = imx_iim_probe, + .driver = { + .name = "imx-iim", + .of_match_table = imx_iim_dt_ids, + }, +}; +module_platform_driver(imx_iim_driver); + +MODULE_AUTHOR("Michael Grzeschik <m.grzeschik@pengutronix.de>"); +MODULE_DESCRIPTION("i.MX IIM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c new file mode 100644 index 000000000..399e1eb8b --- /dev/null +++ b/drivers/nvmem/imx-ocotp-scu.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * i.MX8 OCOTP fusebox driver + * + * Copyright 2019 NXP + * + * Peng Fan <peng.fan@nxp.com> + */ + +#include <linux/arm-smccc.h> +#include <linux/firmware/imx/sci.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define IMX_SIP_OTP_WRITE 0xc200000B + +enum ocotp_devtype { + IMX8QXP, + IMX8QM, +}; + +#define ECC_REGION BIT(0) +#define HOLE_REGION BIT(1) + +struct ocotp_region { + u32 start; + u32 end; + u32 flag; +}; + +struct ocotp_devtype_data { + int devtype; + int nregs; + u32 num_region; + struct ocotp_region region[]; +}; + +struct ocotp_priv { + struct device *dev; + const struct ocotp_devtype_data *data; + struct imx_sc_ipc *nvmem_ipc; +}; + +struct imx_sc_msg_misc_fuse_read { + struct imx_sc_rpc_msg hdr; + u32 word; +} __packed; + +static DEFINE_MUTEX(scu_ocotp_mutex); + +static struct ocotp_devtype_data imx8qxp_data = { + .devtype = IMX8QXP, + .nregs = 800, + .num_region = 3, + .region = { + {0x10, 0x10f, ECC_REGION}, + {0x110, 0x21F, HOLE_REGION}, + {0x220, 0x31F, ECC_REGION}, + }, +}; + +static struct ocotp_devtype_data imx8qm_data = { + .devtype = IMX8QM, + .nregs = 800, + .num_region = 2, + .region = { + {0x10, 0x10f, ECC_REGION}, + {0x1a0, 0x1ff, ECC_REGION}, + }, +}; + +static bool in_hole(void *context, u32 index) +{ + struct ocotp_priv *priv = context; + const struct ocotp_devtype_data *data = priv->data; + int i; + + for (i = 0; i < data->num_region; i++) { + if (data->region[i].flag & HOLE_REGION) { + if ((index >= data->region[i].start) && + (index <= data->region[i].end)) + return true; + } + } + + return false; +} + +static bool in_ecc(void *context, u32 index) +{ + struct ocotp_priv *priv = context; + const struct ocotp_devtype_data *data = priv->data; + int i; + + for (i = 0; i < data->num_region; i++) { + if (data->region[i].flag & ECC_REGION) { + if ((index >= data->region[i].start) && + (index <= data->region[i].end)) + return true; + } + } + + return false; +} + +static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word, + u32 *val) +{ + struct imx_sc_msg_misc_fuse_read msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + int ret; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_MISC; + hdr->func = IMX_SC_MISC_FUNC_OTP_FUSE_READ; + hdr->size = 2; + + msg.word = word; + + ret = imx_scu_call_rpc(ipc, &msg, true); + if (ret) + return ret; + + *val = msg.word; + + return 0; +} + +static int imx_scu_ocotp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct ocotp_priv *priv = context; + u32 count, index, num_bytes; + u32 *buf; + void *p; + int i, ret; + + index = offset; + num_bytes = round_up(bytes, 4); + count = num_bytes >> 2; + + if (count > (priv->data->nregs - index)) + count = priv->data->nregs - index; + + p = kzalloc(num_bytes, GFP_KERNEL); + if (!p) + return -ENOMEM; + + mutex_lock(&scu_ocotp_mutex); + + buf = p; + + for (i = index; i < (index + count); i++) { + if (in_hole(context, i)) { + *buf++ = 0; + continue; + } + + ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, i, buf); + if (ret) { + mutex_unlock(&scu_ocotp_mutex); + kfree(p); + return ret; + } + buf++; + } + + memcpy(val, (u8 *)p, bytes); + + mutex_unlock(&scu_ocotp_mutex); + + kfree(p); + + return 0; +} + +static int imx_scu_ocotp_write(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct ocotp_priv *priv = context; + struct arm_smccc_res res; + u32 *buf = val; + u32 tmp; + u32 index; + int ret; + + /* allow only writing one complete OTP word at a time */ + if (bytes != 4) + return -EINVAL; + + index = offset; + + if (in_hole(context, index)) + return -EINVAL; + + if (in_ecc(context, index)) { + pr_warn("ECC region, only program once\n"); + mutex_lock(&scu_ocotp_mutex); + ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, index, &tmp); + mutex_unlock(&scu_ocotp_mutex); + if (ret) + return ret; + if (tmp) { + pr_warn("ECC region, already has value: %x\n", tmp); + return -EIO; + } + } + + mutex_lock(&scu_ocotp_mutex); + + arm_smccc_smc(IMX_SIP_OTP_WRITE, index, *buf, 0, 0, 0, 0, 0, &res); + + mutex_unlock(&scu_ocotp_mutex); + + return res.a0; +} + +static struct nvmem_config imx_scu_ocotp_nvmem_config = { + .name = "imx-scu-ocotp", + .read_only = false, + .word_size = 4, + .stride = 1, + .owner = THIS_MODULE, + .reg_read = imx_scu_ocotp_read, + .reg_write = imx_scu_ocotp_write, +}; + +static const struct of_device_id imx_scu_ocotp_dt_ids[] = { + { .compatible = "fsl,imx8qxp-scu-ocotp", (void *)&imx8qxp_data }, + { .compatible = "fsl,imx8qm-scu-ocotp", (void *)&imx8qm_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, imx_scu_ocotp_dt_ids); + +static int imx_scu_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ocotp_priv *priv; + struct nvmem_device *nvmem; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = imx_scu_get_handle(&priv->nvmem_ipc); + if (ret) + return ret; + + priv->data = of_device_get_match_data(dev); + priv->dev = dev; + imx_scu_ocotp_nvmem_config.size = 4 * priv->data->nregs; + imx_scu_ocotp_nvmem_config.dev = dev; + imx_scu_ocotp_nvmem_config.priv = priv; + nvmem = devm_nvmem_register(dev, &imx_scu_ocotp_nvmem_config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver imx_scu_ocotp_driver = { + .probe = imx_scu_ocotp_probe, + .driver = { + .name = "imx_scu_ocotp", + .of_match_table = imx_scu_ocotp_dt_ids, + }, +}; +module_platform_driver(imx_scu_ocotp_driver); + +MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>"); +MODULE_DESCRIPTION("i.MX8 SCU OCOTP fuse box driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c new file mode 100644 index 000000000..fddb5459e --- /dev/null +++ b/drivers/nvmem/imx-ocotp.c @@ -0,0 +1,601 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * i.MX6 OCOTP fusebox driver + * + * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de> + * + * Based on the barebox ocotp driver, + * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>, + * Orex Computed Radiography + * + * Write support based on the fsl_otp driver, + * Copyright (C) 2010-2013 Freescale Semiconductor, Inc + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/delay.h> + +#define IMX_OCOTP_OFFSET_B0W0 0x400 /* Offset from base address of the + * OTP Bank0 Word0 + */ +#define IMX_OCOTP_OFFSET_PER_WORD 0x10 /* Offset between the start addr + * of two consecutive OTP words. + */ + +#define IMX_OCOTP_ADDR_CTRL 0x0000 +#define IMX_OCOTP_ADDR_CTRL_SET 0x0004 +#define IMX_OCOTP_ADDR_CTRL_CLR 0x0008 +#define IMX_OCOTP_ADDR_TIMING 0x0010 +#define IMX_OCOTP_ADDR_DATA0 0x0020 +#define IMX_OCOTP_ADDR_DATA1 0x0030 +#define IMX_OCOTP_ADDR_DATA2 0x0040 +#define IMX_OCOTP_ADDR_DATA3 0x0050 + +#define IMX_OCOTP_BM_CTRL_ADDR 0x000000FF +#define IMX_OCOTP_BM_CTRL_BUSY 0x00000100 +#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200 +#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400 + +#define IMX_OCOTP_BM_CTRL_ADDR_8MP 0x000001FF +#define IMX_OCOTP_BM_CTRL_BUSY_8MP 0x00000200 +#define IMX_OCOTP_BM_CTRL_ERROR_8MP 0x00000400 +#define IMX_OCOTP_BM_CTRL_REL_SHADOWS_8MP 0x00000800 + +#define IMX_OCOTP_BM_CTRL_DEFAULT \ + { \ + .bm_addr = IMX_OCOTP_BM_CTRL_ADDR, \ + .bm_busy = IMX_OCOTP_BM_CTRL_BUSY, \ + .bm_error = IMX_OCOTP_BM_CTRL_ERROR, \ + .bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS,\ + } + +#define IMX_OCOTP_BM_CTRL_8MP \ + { \ + .bm_addr = IMX_OCOTP_BM_CTRL_ADDR_8MP, \ + .bm_busy = IMX_OCOTP_BM_CTRL_BUSY_8MP, \ + .bm_error = IMX_OCOTP_BM_CTRL_ERROR_8MP, \ + .bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS_8MP,\ + } + +#define TIMING_STROBE_PROG_US 10 /* Min time to blow a fuse */ +#define TIMING_STROBE_READ_NS 37 /* Min time before read */ +#define TIMING_RELAX_NS 17 +#define DEF_FSOURCE 1001 /* > 1000 ns */ +#define DEF_STROBE_PROG 10000 /* IPG clocks */ +#define IMX_OCOTP_WR_UNLOCK 0x3E770000 +#define IMX_OCOTP_READ_LOCKED_VAL 0xBADABADA + +static DEFINE_MUTEX(ocotp_mutex); + +struct ocotp_priv { + struct device *dev; + struct clk *clk; + void __iomem *base; + const struct ocotp_params *params; + struct nvmem_config *config; +}; + +struct ocotp_ctrl_reg { + u32 bm_addr; + u32 bm_busy; + u32 bm_error; + u32 bm_rel_shadows; +}; + +struct ocotp_params { + unsigned int nregs; + unsigned int bank_address_words; + void (*set_timing)(struct ocotp_priv *priv); + struct ocotp_ctrl_reg ctrl; +}; + +static int imx_ocotp_wait_for_busy(struct ocotp_priv *priv, u32 flags) +{ + int count; + u32 c, mask; + u32 bm_ctrl_busy, bm_ctrl_error; + void __iomem *base = priv->base; + + bm_ctrl_busy = priv->params->ctrl.bm_busy; + bm_ctrl_error = priv->params->ctrl.bm_error; + + mask = bm_ctrl_busy | bm_ctrl_error | flags; + + for (count = 10000; count >= 0; count--) { + c = readl(base + IMX_OCOTP_ADDR_CTRL); + if (!(c & mask)) + break; + cpu_relax(); + } + + if (count < 0) { + /* HW_OCOTP_CTRL[ERROR] will be set under the following + * conditions: + * - A write is performed to a shadow register during a shadow + * reload (essentially, while HW_OCOTP_CTRL[RELOAD_SHADOWS] is + * set. In addition, the contents of the shadow register shall + * not be updated. + * - A write is performed to a shadow register which has been + * locked. + * - A read is performed to from a shadow register which has + * been read locked. + * - A program is performed to a fuse word which has been locked + * - A read is performed to from a fuse word which has been read + * locked. + */ + if (c & bm_ctrl_error) + return -EPERM; + return -ETIMEDOUT; + } + + return 0; +} + +static void imx_ocotp_clr_err_if_set(struct ocotp_priv *priv) +{ + u32 c, bm_ctrl_error; + void __iomem *base = priv->base; + + bm_ctrl_error = priv->params->ctrl.bm_error; + + c = readl(base + IMX_OCOTP_ADDR_CTRL); + if (!(c & bm_ctrl_error)) + return; + + writel(bm_ctrl_error, base + IMX_OCOTP_ADDR_CTRL_CLR); +} + +static int imx_ocotp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct ocotp_priv *priv = context; + unsigned int count; + u32 *buf = val; + int i, ret; + u32 index; + + index = offset >> 2; + count = bytes >> 2; + + if (count > (priv->params->nregs - index)) + count = priv->params->nregs - index; + + mutex_lock(&ocotp_mutex); + + ret = clk_prepare_enable(priv->clk); + if (ret < 0) { + mutex_unlock(&ocotp_mutex); + dev_err(priv->dev, "failed to prepare/enable ocotp clk\n"); + return ret; + } + + ret = imx_ocotp_wait_for_busy(priv, 0); + if (ret < 0) { + dev_err(priv->dev, "timeout during read setup\n"); + goto read_end; + } + + for (i = index; i < (index + count); i++) { + *buf++ = readl(priv->base + IMX_OCOTP_OFFSET_B0W0 + + i * IMX_OCOTP_OFFSET_PER_WORD); + + /* 47.3.1.2 + * For "read locked" registers 0xBADABADA will be returned and + * HW_OCOTP_CTRL[ERROR] will be set. It must be cleared by + * software before any new write, read or reload access can be + * issued + */ + if (*(buf - 1) == IMX_OCOTP_READ_LOCKED_VAL) + imx_ocotp_clr_err_if_set(priv); + } + +read_end: + clk_disable_unprepare(priv->clk); + mutex_unlock(&ocotp_mutex); + return ret; +} + +static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv) +{ + unsigned long clk_rate; + unsigned long strobe_read, relax, strobe_prog; + u32 timing; + + /* 47.3.1.3.1 + * Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX] + * fields with timing values to match the current frequency of the + * ipg_clk. OTP writes will work at maximum bus frequencies as long + * as the HW_OCOTP_TIMING parameters are set correctly. + * + * Note: there are minimum timings required to ensure an OTP fuse burns + * correctly that are independent of the ipg_clk. Those values are not + * formally documented anywhere however, working from the minimum + * timings given in u-boot we can say: + * + * - Minimum STROBE_PROG time is 10 microseconds. Intuitively 10 + * microseconds feels about right as representative of a minimum time + * to physically burn out a fuse. + * + * - Minimum STROBE_READ i.e. the time to wait post OTP fuse burn before + * performing another read is 37 nanoseconds + * + * - Minimum RELAX timing is 17 nanoseconds. This final RELAX minimum + * timing is not entirely clear the documentation says "This + * count value specifies the time to add to all default timing + * parameters other than the Tpgm and Trd. It is given in number + * of ipg_clk periods." where Tpgm and Trd refer to STROBE_PROG + * and STROBE_READ respectively. What the other timing parameters + * are though, is not specified. Experience shows a zero RELAX + * value will mess up a re-load of the shadow registers post OTP + * burn. + */ + clk_rate = clk_get_rate(priv->clk); + + relax = DIV_ROUND_UP(clk_rate * TIMING_RELAX_NS, 1000000000) - 1; + strobe_read = DIV_ROUND_UP(clk_rate * TIMING_STROBE_READ_NS, + 1000000000); + strobe_read += 2 * (relax + 1) - 1; + strobe_prog = DIV_ROUND_CLOSEST(clk_rate * TIMING_STROBE_PROG_US, + 1000000); + strobe_prog += 2 * (relax + 1) - 1; + + timing = readl(priv->base + IMX_OCOTP_ADDR_TIMING) & 0x0FC00000; + timing |= strobe_prog & 0x00000FFF; + timing |= (relax << 12) & 0x0000F000; + timing |= (strobe_read << 16) & 0x003F0000; + + writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING); +} + +static void imx_ocotp_set_imx7_timing(struct ocotp_priv *priv) +{ + unsigned long clk_rate; + u64 fsource, strobe_prog; + u32 timing; + + /* i.MX 7Solo Applications Processor Reference Manual, Rev. 0.1 + * 6.4.3.3 + */ + clk_rate = clk_get_rate(priv->clk); + fsource = DIV_ROUND_UP_ULL((u64)clk_rate * DEF_FSOURCE, + NSEC_PER_SEC) + 1; + strobe_prog = DIV_ROUND_CLOSEST_ULL((u64)clk_rate * DEF_STROBE_PROG, + NSEC_PER_SEC) + 1; + + timing = strobe_prog & 0x00000FFF; + timing |= (fsource << 12) & 0x000FF000; + + writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING); +} + +static int imx_ocotp_write(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct ocotp_priv *priv = context; + u32 *buf = val; + int ret; + + u32 ctrl; + u8 waddr; + u8 word = 0; + + /* allow only writing one complete OTP word at a time */ + if ((bytes != priv->config->word_size) || + (offset % priv->config->word_size)) + return -EINVAL; + + mutex_lock(&ocotp_mutex); + + ret = clk_prepare_enable(priv->clk); + if (ret < 0) { + mutex_unlock(&ocotp_mutex); + dev_err(priv->dev, "failed to prepare/enable ocotp clk\n"); + return ret; + } + + /* Setup the write timing values */ + priv->params->set_timing(priv); + + /* 47.3.1.3.2 + * Check that HW_OCOTP_CTRL[BUSY] and HW_OCOTP_CTRL[ERROR] are clear. + * Overlapped accesses are not supported by the controller. Any pending + * write or reload must be completed before a write access can be + * requested. + */ + ret = imx_ocotp_wait_for_busy(priv, 0); + if (ret < 0) { + dev_err(priv->dev, "timeout during timing setup\n"); + goto write_end; + } + + /* 47.3.1.3.3 + * Write the requested address to HW_OCOTP_CTRL[ADDR] and program the + * unlock code into HW_OCOTP_CTRL[WR_UNLOCK]. This must be programmed + * for each write access. The lock code is documented in the register + * description. Both the unlock code and address can be written in the + * same operation. + */ + if (priv->params->bank_address_words != 0) { + /* + * In banked/i.MX7 mode the OTP register bank goes into waddr + * see i.MX 7Solo Applications Processor Reference Manual, Rev. + * 0.1 section 6.4.3.1 + */ + offset = offset / priv->config->word_size; + waddr = offset / priv->params->bank_address_words; + word = offset & (priv->params->bank_address_words - 1); + } else { + /* + * Non-banked i.MX6 mode. + * OTP write/read address specifies one of 128 word address + * locations + */ + waddr = offset / 4; + } + + ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL); + ctrl &= ~priv->params->ctrl.bm_addr; + ctrl |= waddr & priv->params->ctrl.bm_addr; + ctrl |= IMX_OCOTP_WR_UNLOCK; + + writel(ctrl, priv->base + IMX_OCOTP_ADDR_CTRL); + + /* 47.3.1.3.4 + * Write the data to the HW_OCOTP_DATA register. This will automatically + * set HW_OCOTP_CTRL[BUSY] and clear HW_OCOTP_CTRL[WR_UNLOCK]. To + * protect programming same OTP bit twice, before program OCOTP will + * automatically read fuse value in OTP and use read value to mask + * program data. The controller will use masked program data to program + * a 32-bit word in the OTP per the address in HW_OCOTP_CTRL[ADDR]. Bit + * fields with 1's will result in that OTP bit being programmed. Bit + * fields with 0's will be ignored. At the same time that the write is + * accepted, the controller makes an internal copy of + * HW_OCOTP_CTRL[ADDR] which cannot be updated until the next write + * sequence is initiated. This copy guarantees that erroneous writes to + * HW_OCOTP_CTRL[ADDR] will not affect an active write operation. It + * should also be noted that during the programming HW_OCOTP_DATA will + * shift right (with zero fill). This shifting is required to program + * the OTP serially. During the write operation, HW_OCOTP_DATA cannot be + * modified. + * Note: on i.MX7 there are four data fields to write for banked write + * with the fuse blowing operation only taking place after data0 + * has been written. This is why data0 must always be the last + * register written. + */ + if (priv->params->bank_address_words != 0) { + /* Banked/i.MX7 mode */ + switch (word) { + case 0: + writel(0, priv->base + IMX_OCOTP_ADDR_DATA1); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA2); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA3); + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0); + break; + case 1: + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA1); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA2); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA3); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA0); + break; + case 2: + writel(0, priv->base + IMX_OCOTP_ADDR_DATA1); + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA2); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA3); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA0); + break; + case 3: + writel(0, priv->base + IMX_OCOTP_ADDR_DATA1); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA2); + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA3); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA0); + break; + } + } else { + /* Non-banked i.MX6 mode */ + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0); + } + + /* 47.4.1.4.5 + * Once complete, the controller will clear BUSY. A write request to a + * protected or locked region will result in no OTP access and no + * setting of HW_OCOTP_CTRL[BUSY]. In addition HW_OCOTP_CTRL[ERROR] will + * be set. It must be cleared by software before any new write access + * can be issued. + */ + ret = imx_ocotp_wait_for_busy(priv, 0); + if (ret < 0) { + if (ret == -EPERM) { + dev_err(priv->dev, "failed write to locked region"); + imx_ocotp_clr_err_if_set(priv); + } else { + dev_err(priv->dev, "timeout during data write\n"); + } + goto write_end; + } + + /* 47.3.1.4 + * Write Postamble: Due to internal electrical characteristics of the + * OTP during writes, all OTP operations following a write must be + * separated by 2 us after the clearing of HW_OCOTP_CTRL_BUSY following + * the write. + */ + udelay(2); + + /* reload all shadow registers */ + writel(priv->params->ctrl.bm_rel_shadows, + priv->base + IMX_OCOTP_ADDR_CTRL_SET); + ret = imx_ocotp_wait_for_busy(priv, + priv->params->ctrl.bm_rel_shadows); + if (ret < 0) + dev_err(priv->dev, "timeout during shadow register reload\n"); + +write_end: + clk_disable_unprepare(priv->clk); + mutex_unlock(&ocotp_mutex); + return ret < 0 ? ret : bytes; +} + +static struct nvmem_config imx_ocotp_nvmem_config = { + .name = "imx-ocotp", + .read_only = false, + .word_size = 4, + .stride = 4, + .reg_read = imx_ocotp_read, + .reg_write = imx_ocotp_write, +}; + +static const struct ocotp_params imx6q_params = { + .nregs = 128, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx6sl_params = { + .nregs = 64, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx6sll_params = { + .nregs = 80, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx6sx_params = { + .nregs = 128, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx6ul_params = { + .nregs = 144, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx6ull_params = { + .nregs = 80, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx7d_params = { + .nregs = 64, + .bank_address_words = 4, + .set_timing = imx_ocotp_set_imx7_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx7ulp_params = { + .nregs = 256, + .bank_address_words = 0, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx8mq_params = { + .nregs = 256, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx8mm_params = { + .nregs = 256, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx8mn_params = { + .nregs = 256, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, +}; + +static const struct ocotp_params imx8mp_params = { + .nregs = 384, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, + .ctrl = IMX_OCOTP_BM_CTRL_8MP, +}; + +static const struct of_device_id imx_ocotp_dt_ids[] = { + { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params }, + { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params }, + { .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params }, + { .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params }, + { .compatible = "fsl,imx6ull-ocotp", .data = &imx6ull_params }, + { .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params }, + { .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params }, + { .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params }, + { .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params }, + { .compatible = "fsl,imx8mm-ocotp", .data = &imx8mm_params }, + { .compatible = "fsl,imx8mn-ocotp", .data = &imx8mn_params }, + { .compatible = "fsl,imx8mp-ocotp", .data = &imx8mp_params }, + { }, +}; +MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids); + +static int imx_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ocotp_priv *priv; + struct nvmem_device *nvmem; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->params = of_device_get_match_data(&pdev->dev); + imx_ocotp_nvmem_config.size = 4 * priv->params->nregs; + imx_ocotp_nvmem_config.dev = dev; + imx_ocotp_nvmem_config.priv = priv; + priv->config = &imx_ocotp_nvmem_config; + + clk_prepare_enable(priv->clk); + imx_ocotp_clr_err_if_set(priv); + clk_disable_unprepare(priv->clk); + + nvmem = devm_nvmem_register(dev, &imx_ocotp_nvmem_config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver imx_ocotp_driver = { + .probe = imx_ocotp_probe, + .driver = { + .name = "imx_ocotp", + .of_match_table = imx_ocotp_dt_ids, + }, +}; +module_platform_driver(imx_ocotp_driver); + +MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>"); +MODULE_DESCRIPTION("i.MX6/i.MX7 OCOTP fuse box driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/jz4780-efuse.c b/drivers/nvmem/jz4780-efuse.c new file mode 100644 index 000000000..0b01b840e --- /dev/null +++ b/drivers/nvmem/jz4780-efuse.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * JZ4780 EFUSE Memory Support driver + * + * Copyright (c) 2017 PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com> + * Copyright (c) 2020 H. Nikolaus Schaller <hns@goldelico.com> + */ + +/* + * Currently supports JZ4780 efuse which has 8K programmable bit. + * Efuse is separated into seven segments as below: + * + * ----------------------------------------------------------------------- + * | 64 bit | 128 bit | 128 bit | 3520 bit | 8 bit | 2296 bit | 2048 bit | + * ----------------------------------------------------------------------- + * + * The rom itself is accessed using a 9 bit address line and an 8 word wide bus + * which reads/writes based on strobes. The strobe is configured in the config + * register and is based on number of cycles of the bus clock. + * + * Driver supports read only as the writes are done in the Factory. + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/timer.h> + +#define JZ_EFUCTRL (0x0) /* Control Register */ +#define JZ_EFUCFG (0x4) /* Configure Register*/ +#define JZ_EFUSTATE (0x8) /* Status Register */ +#define JZ_EFUDATA(n) (0xC + (n) * 4) + +/* We read 32 byte chunks to avoid complexity in the driver. */ +#define JZ_EFU_READ_SIZE 32 + +#define EFUCTRL_ADDR_MASK 0x3FF +#define EFUCTRL_ADDR_SHIFT 21 +#define EFUCTRL_LEN_MASK 0x1F +#define EFUCTRL_LEN_SHIFT 16 +#define EFUCTRL_PG_EN BIT(15) +#define EFUCTRL_WR_EN BIT(1) +#define EFUCTRL_RD_EN BIT(0) + +#define EFUCFG_INT_EN BIT(31) +#define EFUCFG_RD_ADJ_MASK 0xF +#define EFUCFG_RD_ADJ_SHIFT 20 +#define EFUCFG_RD_STR_MASK 0xF +#define EFUCFG_RD_STR_SHIFT 16 +#define EFUCFG_WR_ADJ_MASK 0xF +#define EFUCFG_WR_ADJ_SHIFT 12 +#define EFUCFG_WR_STR_MASK 0xFFF +#define EFUCFG_WR_STR_SHIFT 0 + +#define EFUSTATE_WR_DONE BIT(1) +#define EFUSTATE_RD_DONE BIT(0) + +struct jz4780_efuse { + struct device *dev; + struct regmap *map; + struct clk *clk; +}; + +/* main entry point */ +static int jz4780_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct jz4780_efuse *efuse = context; + + while (bytes > 0) { + size_t start = offset & ~(JZ_EFU_READ_SIZE - 1); + size_t chunk = min(bytes, (start + JZ_EFU_READ_SIZE) + - offset); + char buf[JZ_EFU_READ_SIZE]; + unsigned int tmp; + u32 ctrl; + int ret; + + ctrl = (start << EFUCTRL_ADDR_SHIFT) + | ((JZ_EFU_READ_SIZE - 1) << EFUCTRL_LEN_SHIFT) + | EFUCTRL_RD_EN; + + regmap_update_bits(efuse->map, JZ_EFUCTRL, + (EFUCTRL_ADDR_MASK << EFUCTRL_ADDR_SHIFT) | + (EFUCTRL_LEN_MASK << EFUCTRL_LEN_SHIFT) | + EFUCTRL_PG_EN | EFUCTRL_WR_EN | + EFUCTRL_RD_EN, + ctrl); + + ret = regmap_read_poll_timeout(efuse->map, JZ_EFUSTATE, + tmp, tmp & EFUSTATE_RD_DONE, + 1 * MSEC_PER_SEC, + 50 * MSEC_PER_SEC); + if (ret < 0) { + dev_err(efuse->dev, "Time out while reading efuse data"); + return ret; + } + + ret = regmap_bulk_read(efuse->map, JZ_EFUDATA(0), + buf, JZ_EFU_READ_SIZE / sizeof(u32)); + if (ret < 0) + return ret; + + memcpy(val, &buf[offset - start], chunk); + + val += chunk; + offset += chunk; + bytes -= chunk; + } + + return 0; +} + +static struct nvmem_config jz4780_efuse_nvmem_config = { + .name = "jz4780-efuse", + .size = 1024, + .word_size = 1, + .stride = 1, + .owner = THIS_MODULE, + .reg_read = jz4780_efuse_read, +}; + +static const struct regmap_config jz4780_efuse_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = JZ_EFUDATA(7), +}; + +static void clk_disable_unprepare_helper(void *clock) +{ + clk_disable_unprepare(clock); +} + +static int jz4780_efuse_probe(struct platform_device *pdev) +{ + struct nvmem_device *nvmem; + struct jz4780_efuse *efuse; + struct nvmem_config cfg; + unsigned long clk_rate; + unsigned long rd_adj; + unsigned long rd_strobe; + struct device *dev = &pdev->dev; + void __iomem *regs; + int ret; + + efuse = devm_kzalloc(dev, sizeof(*efuse), GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + efuse->map = devm_regmap_init_mmio(dev, regs, + &jz4780_efuse_regmap_config); + if (IS_ERR(efuse->map)) + return PTR_ERR(efuse->map); + + efuse->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(efuse->clk)) + return PTR_ERR(efuse->clk); + + ret = clk_prepare_enable(efuse->clk); + if (ret < 0) + return ret; + + ret = devm_add_action_or_reset(&pdev->dev, + clk_disable_unprepare_helper, + efuse->clk); + if (ret < 0) + return ret; + + clk_rate = clk_get_rate(efuse->clk); + + efuse->dev = dev; + + /* + * rd_adj and rd_strobe are 4 bit values + * conditions: + * bus clk_period * (rd_adj + 1) > 6.5ns + * bus clk_period * (rd_adj + 5 + rd_strobe) > 35ns + * i.e. rd_adj >= 6.5ns / clk_period + * i.e. rd_strobe >= 35 ns / clk_period - 5 - rd_adj + 1 + * constants: + * 1 / 6.5ns == 153846154 Hz + * 1 / 35ns == 28571429 Hz + */ + + rd_adj = clk_rate / 153846154; + rd_strobe = clk_rate / 28571429 - 5 - rd_adj + 1; + + if (rd_adj > EFUCFG_RD_ADJ_MASK || + rd_strobe > EFUCFG_RD_STR_MASK) { + dev_err(&pdev->dev, "Cannot set clock configuration\n"); + return -EINVAL; + } + + regmap_update_bits(efuse->map, JZ_EFUCFG, + (EFUCFG_RD_ADJ_MASK << EFUCFG_RD_ADJ_SHIFT) | + (EFUCFG_RD_STR_MASK << EFUCFG_RD_STR_SHIFT), + (rd_adj << EFUCFG_RD_ADJ_SHIFT) | + (rd_strobe << EFUCFG_RD_STR_SHIFT)); + + cfg = jz4780_efuse_nvmem_config; + cfg.dev = &pdev->dev; + cfg.priv = efuse; + + nvmem = devm_nvmem_register(dev, &cfg); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id jz4780_efuse_match[] = { + { .compatible = "ingenic,jz4780-efuse" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, jz4780_efuse_match); + +static struct platform_driver jz4780_efuse_driver = { + .probe = jz4780_efuse_probe, + .driver = { + .name = "jz4780-efuse", + .of_match_table = jz4780_efuse_match, + }, +}; +module_platform_driver(jz4780_efuse_driver); + +MODULE_AUTHOR("PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>"); +MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>"); +MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>"); +MODULE_DESCRIPTION("Ingenic JZ4780 efuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c new file mode 100644 index 000000000..a0275b29a --- /dev/null +++ b/drivers/nvmem/lpc18xx_eeprom.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * NXP LPC18xx/LPC43xx EEPROM memory NVMEM driver + * + * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com> + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +/* Registers */ +#define LPC18XX_EEPROM_AUTOPROG 0x00c +#define LPC18XX_EEPROM_AUTOPROG_WORD 0x1 + +#define LPC18XX_EEPROM_CLKDIV 0x014 + +#define LPC18XX_EEPROM_PWRDWN 0x018 +#define LPC18XX_EEPROM_PWRDWN_NO 0x0 +#define LPC18XX_EEPROM_PWRDWN_YES 0x1 + +#define LPC18XX_EEPROM_INTSTAT 0xfe0 +#define LPC18XX_EEPROM_INTSTAT_END_OF_PROG BIT(2) + +#define LPC18XX_EEPROM_INTSTATCLR 0xfe8 +#define LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST BIT(2) + +/* Fixed page size (bytes) */ +#define LPC18XX_EEPROM_PAGE_SIZE 0x80 + +/* EEPROM device requires a ~1500 kHz clock (min 800 kHz, max 1600 kHz) */ +#define LPC18XX_EEPROM_CLOCK_HZ 1500000 + +/* EEPROM requires 3 ms of erase/program time between each writing */ +#define LPC18XX_EEPROM_PROGRAM_TIME 3 + +struct lpc18xx_eeprom_dev { + struct clk *clk; + void __iomem *reg_base; + void __iomem *mem_base; + struct nvmem_device *nvmem; + unsigned reg_bytes; + unsigned val_bytes; + int size; +}; + +static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom, + u32 reg, u32 val) +{ + writel(val, eeprom->reg_base + reg); +} + +static inline u32 lpc18xx_eeprom_readl(struct lpc18xx_eeprom_dev *eeprom, + u32 reg) +{ + return readl(eeprom->reg_base + reg); +} + +static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom) +{ + unsigned long end; + u32 val; + + /* Wait until EEPROM program operation has finished */ + end = jiffies + msecs_to_jiffies(LPC18XX_EEPROM_PROGRAM_TIME * 10); + + while (time_is_after_jiffies(end)) { + val = lpc18xx_eeprom_readl(eeprom, LPC18XX_EEPROM_INTSTAT); + + if (val & LPC18XX_EEPROM_INTSTAT_END_OF_PROG) { + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_INTSTATCLR, + LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST); + return 0; + } + + usleep_range(LPC18XX_EEPROM_PROGRAM_TIME * USEC_PER_MSEC, + (LPC18XX_EEPROM_PROGRAM_TIME + 1) * USEC_PER_MSEC); + } + + return -ETIMEDOUT; +} + +static int lpc18xx_eeprom_gather_write(void *context, unsigned int reg, + void *val, size_t bytes) +{ + struct lpc18xx_eeprom_dev *eeprom = context; + unsigned int offset = reg; + int ret; + + /* + * The last page contains the EEPROM initialization data and is not + * writable. + */ + if ((reg > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE) || + (reg + bytes > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE)) + return -EINVAL; + + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_NO); + + /* Wait 100 us while the EEPROM wakes up */ + usleep_range(100, 200); + + while (bytes) { + writel(*(u32 *)val, eeprom->mem_base + offset); + ret = lpc18xx_eeprom_busywait_until_prog(eeprom); + if (ret < 0) + return ret; + + bytes -= eeprom->val_bytes; + val += eeprom->val_bytes; + offset += eeprom->val_bytes; + } + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_YES); + + return 0; +} + +static int lpc18xx_eeprom_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct lpc18xx_eeprom_dev *eeprom = context; + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_NO); + + /* Wait 100 us while the EEPROM wakes up */ + usleep_range(100, 200); + + while (bytes) { + *(u32 *)val = readl(eeprom->mem_base + offset); + bytes -= eeprom->val_bytes; + val += eeprom->val_bytes; + offset += eeprom->val_bytes; + } + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_YES); + + return 0; +} + + +static struct nvmem_config lpc18xx_nvmem_config = { + .name = "lpc18xx-eeprom", + .stride = 4, + .word_size = 4, + .reg_read = lpc18xx_eeprom_read, + .reg_write = lpc18xx_eeprom_gather_write, +}; + +static int lpc18xx_eeprom_probe(struct platform_device *pdev) +{ + struct lpc18xx_eeprom_dev *eeprom; + struct device *dev = &pdev->dev; + struct reset_control *rst; + unsigned long clk_rate; + struct resource *res; + int ret; + + eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL); + if (!eeprom) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); + eeprom->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(eeprom->reg_base)) + return PTR_ERR(eeprom->reg_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); + eeprom->mem_base = devm_ioremap_resource(dev, res); + if (IS_ERR(eeprom->mem_base)) + return PTR_ERR(eeprom->mem_base); + + eeprom->clk = devm_clk_get(&pdev->dev, "eeprom"); + if (IS_ERR(eeprom->clk)) { + dev_err(&pdev->dev, "failed to get eeprom clock\n"); + return PTR_ERR(eeprom->clk); + } + + ret = clk_prepare_enable(eeprom->clk); + if (ret < 0) { + dev_err(dev, "failed to prepare/enable eeprom clk: %d\n", ret); + return ret; + } + + rst = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(rst)) { + dev_err(dev, "failed to get reset: %ld\n", PTR_ERR(rst)); + ret = PTR_ERR(rst); + goto err_clk; + } + + ret = reset_control_assert(rst); + if (ret < 0) { + dev_err(dev, "failed to assert reset: %d\n", ret); + goto err_clk; + } + + eeprom->val_bytes = 4; + eeprom->reg_bytes = 4; + + /* + * Clock rate is generated by dividing the system bus clock by the + * division factor, contained in the divider register (minus 1 encoded). + */ + clk_rate = clk_get_rate(eeprom->clk); + clk_rate = DIV_ROUND_UP(clk_rate, LPC18XX_EEPROM_CLOCK_HZ) - 1; + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_CLKDIV, clk_rate); + + /* + * Writing a single word to the page will start the erase/program cycle + * automatically + */ + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_AUTOPROG, + LPC18XX_EEPROM_AUTOPROG_WORD); + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_YES); + + eeprom->size = resource_size(res); + lpc18xx_nvmem_config.size = resource_size(res); + lpc18xx_nvmem_config.dev = dev; + lpc18xx_nvmem_config.priv = eeprom; + + eeprom->nvmem = devm_nvmem_register(dev, &lpc18xx_nvmem_config); + if (IS_ERR(eeprom->nvmem)) { + ret = PTR_ERR(eeprom->nvmem); + goto err_clk; + } + + platform_set_drvdata(pdev, eeprom); + + return 0; + +err_clk: + clk_disable_unprepare(eeprom->clk); + + return ret; +} + +static int lpc18xx_eeprom_remove(struct platform_device *pdev) +{ + struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev); + + clk_disable_unprepare(eeprom->clk); + + return 0; +} + +static const struct of_device_id lpc18xx_eeprom_of_match[] = { + { .compatible = "nxp,lpc1857-eeprom" }, + { }, +}; +MODULE_DEVICE_TABLE(of, lpc18xx_eeprom_of_match); + +static struct platform_driver lpc18xx_eeprom_driver = { + .probe = lpc18xx_eeprom_probe, + .remove = lpc18xx_eeprom_remove, + .driver = { + .name = "lpc18xx-eeprom", + .of_match_table = lpc18xx_eeprom_of_match, + }, +}; + +module_platform_driver(lpc18xx_eeprom_driver); + +MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>"); +MODULE_DESCRIPTION("NXP LPC18xx EEPROM memory Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/lpc18xx_otp.c b/drivers/nvmem/lpc18xx_otp.c new file mode 100644 index 000000000..16c92ea85 --- /dev/null +++ b/drivers/nvmem/lpc18xx_otp.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * NXP LPC18xx/43xx OTP memory NVMEM driver + * + * Copyright (c) 2016 Joachim Eastwood <manabian@gmail.com> + * + * Based on the imx ocotp driver, + * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de> + * + * TODO: add support for writing OTP register via API in boot ROM. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* + * LPC18xx OTP memory contains 4 banks with 4 32-bit words. Bank 0 starts + * at offset 0 from the base. + * + * Bank 0 contains the part ID for Flashless devices and is reseverd for + * devices with Flash. + * Bank 1/2 is generale purpose or AES key storage for secure devices. + * Bank 3 contains control data, USB ID and generale purpose words. + */ +#define LPC18XX_OTP_NUM_BANKS 4 +#define LPC18XX_OTP_WORDS_PER_BANK 4 +#define LPC18XX_OTP_WORD_SIZE sizeof(u32) +#define LPC18XX_OTP_SIZE (LPC18XX_OTP_NUM_BANKS * \ + LPC18XX_OTP_WORDS_PER_BANK * \ + LPC18XX_OTP_WORD_SIZE) + +struct lpc18xx_otp { + void __iomem *base; +}; + +static int lpc18xx_otp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct lpc18xx_otp *otp = context; + unsigned int count = bytes >> 2; + u32 index = offset >> 2; + u32 *buf = val; + int i; + + if (count > (LPC18XX_OTP_SIZE - index)) + count = LPC18XX_OTP_SIZE - index; + + for (i = index; i < (index + count); i++) + *buf++ = readl(otp->base + i * LPC18XX_OTP_WORD_SIZE); + + return 0; +} + +static struct nvmem_config lpc18xx_otp_nvmem_config = { + .name = "lpc18xx-otp", + .read_only = true, + .word_size = LPC18XX_OTP_WORD_SIZE, + .stride = LPC18XX_OTP_WORD_SIZE, + .reg_read = lpc18xx_otp_read, +}; + +static int lpc18xx_otp_probe(struct platform_device *pdev) +{ + struct nvmem_device *nvmem; + struct lpc18xx_otp *otp; + struct resource *res; + + otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL); + if (!otp) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + otp->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(otp->base)) + return PTR_ERR(otp->base); + + lpc18xx_otp_nvmem_config.size = LPC18XX_OTP_SIZE; + lpc18xx_otp_nvmem_config.dev = &pdev->dev; + lpc18xx_otp_nvmem_config.priv = otp; + + nvmem = devm_nvmem_register(&pdev->dev, &lpc18xx_otp_nvmem_config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id lpc18xx_otp_dt_ids[] = { + { .compatible = "nxp,lpc1850-otp" }, + { }, +}; +MODULE_DEVICE_TABLE(of, lpc18xx_otp_dt_ids); + +static struct platform_driver lpc18xx_otp_driver = { + .probe = lpc18xx_otp_probe, + .driver = { + .name = "lpc18xx_otp", + .of_match_table = lpc18xx_otp_dt_ids, + }, +}; +module_platform_driver(lpc18xx_otp_driver); + +MODULE_AUTHOR("Joachim Eastwoood <manabian@gmail.com>"); +MODULE_DESCRIPTION("NXP LPC18xx OTP NVMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c new file mode 100644 index 000000000..d6b533497 --- /dev/null +++ b/drivers/nvmem/meson-efuse.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Amlogic Meson GX eFuse Driver + * + * Copyright (c) 2016 Endless Computers, Inc. + * Author: Carlo Caione <carlo@endlessm.com> + */ + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include <linux/firmware/meson/meson_sm.h> + +static int meson_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct meson_sm_firmware *fw = context; + + return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset, + bytes, 0, 0, 0); +} + +static int meson_efuse_write(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct meson_sm_firmware *fw = context; + + return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset, + bytes, 0, 0, 0); +} + +static const struct of_device_id meson_efuse_match[] = { + { .compatible = "amlogic,meson-gxbb-efuse", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, meson_efuse_match); + +static int meson_efuse_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct meson_sm_firmware *fw; + struct device_node *sm_np; + struct nvmem_device *nvmem; + struct nvmem_config *econfig; + struct clk *clk; + unsigned int size; + int ret; + + sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0); + if (!sm_np) { + dev_err(&pdev->dev, "no secure-monitor node\n"); + return -ENODEV; + } + + fw = meson_sm_get(sm_np); + of_node_put(sm_np); + if (!fw) + return -EPROBE_DEFER; + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get efuse gate"); + return ret; + } + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(dev, "failed to enable gate"); + return ret; + } + + ret = devm_add_action_or_reset(dev, + (void(*)(void *))clk_disable_unprepare, + clk); + if (ret) { + dev_err(dev, "failed to add disable callback"); + return ret; + } + + if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) { + dev_err(dev, "failed to get max user"); + return -EINVAL; + } + + econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL); + if (!econfig) + return -ENOMEM; + + econfig->dev = dev; + econfig->name = dev_name(dev); + econfig->stride = 1; + econfig->word_size = 1; + econfig->reg_read = meson_efuse_read; + econfig->reg_write = meson_efuse_write; + econfig->size = size; + econfig->priv = fw; + + nvmem = devm_nvmem_register(&pdev->dev, econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver meson_efuse_driver = { + .probe = meson_efuse_probe, + .driver = { + .name = "meson-efuse", + .of_match_table = meson_efuse_match, + }, +}; + +module_platform_driver(meson_efuse_driver); + +MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>"); +MODULE_DESCRIPTION("Amlogic Meson GX NVMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c new file mode 100644 index 000000000..07c9f38c1 --- /dev/null +++ b/drivers/nvmem/meson-mx-efuse.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Amlogic Meson6, Meson8 and Meson8b eFuse Driver + * + * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/sizes.h> +#include <linux/slab.h> + +#define MESON_MX_EFUSE_CNTL1 0x04 +#define MESON_MX_EFUSE_CNTL1_PD_ENABLE BIT(27) +#define MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY BIT(26) +#define MESON_MX_EFUSE_CNTL1_AUTO_RD_START BIT(25) +#define MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE BIT(24) +#define MESON_MX_EFUSE_CNTL1_BYTE_WR_DATA GENMASK(23, 16) +#define MESON_MX_EFUSE_CNTL1_AUTO_WR_BUSY BIT(14) +#define MESON_MX_EFUSE_CNTL1_AUTO_WR_START BIT(13) +#define MESON_MX_EFUSE_CNTL1_AUTO_WR_ENABLE BIT(12) +#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET BIT(11) +#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK GENMASK(10, 0) + +#define MESON_MX_EFUSE_CNTL2 0x08 + +#define MESON_MX_EFUSE_CNTL4 0x10 +#define MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE BIT(10) + +struct meson_mx_efuse_platform_data { + const char *name; + unsigned int word_size; +}; + +struct meson_mx_efuse { + void __iomem *base; + struct clk *core_clk; + struct nvmem_device *nvmem; + struct nvmem_config config; +}; + +static void meson_mx_efuse_mask_bits(struct meson_mx_efuse *efuse, u32 reg, + u32 mask, u32 set) +{ + u32 data; + + data = readl(efuse->base + reg); + data &= ~mask; + data |= (set & mask); + + writel(data, efuse->base + reg); +} + +static int meson_mx_efuse_hw_enable(struct meson_mx_efuse *efuse) +{ + int err; + + err = clk_prepare_enable(efuse->core_clk); + if (err) + return err; + + /* power up the efuse */ + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_PD_ENABLE, 0); + + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL4, + MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE, 0); + + return 0; +} + +static void meson_mx_efuse_hw_disable(struct meson_mx_efuse *efuse) +{ + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_PD_ENABLE, + MESON_MX_EFUSE_CNTL1_PD_ENABLE); + + clk_disable_unprepare(efuse->core_clk); +} + +static int meson_mx_efuse_read_addr(struct meson_mx_efuse *efuse, + unsigned int addr, u32 *value) +{ + int err; + u32 regval; + + /* write the address to read */ + regval = FIELD_PREP(MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, addr); + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, regval); + + /* inform the hardware that we changed the address */ + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET, + MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET); + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET, 0); + + /* start the read process */ + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_AUTO_RD_START, + MESON_MX_EFUSE_CNTL1_AUTO_RD_START); + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_AUTO_RD_START, 0); + + /* + * perform a dummy read to ensure that the HW has the RD_BUSY bit set + * when polling for the status below. + */ + readl(efuse->base + MESON_MX_EFUSE_CNTL1); + + err = readl_poll_timeout_atomic(efuse->base + MESON_MX_EFUSE_CNTL1, + regval, + (!(regval & MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY)), + 1, 1000); + if (err) { + dev_err(efuse->config.dev, + "Timeout while reading efuse address %u\n", addr); + return err; + } + + *value = readl(efuse->base + MESON_MX_EFUSE_CNTL2); + + return 0; +} + +static int meson_mx_efuse_read(void *context, unsigned int offset, + void *buf, size_t bytes) +{ + struct meson_mx_efuse *efuse = context; + u32 tmp; + int err, i, addr; + + err = meson_mx_efuse_hw_enable(efuse); + if (err) + return err; + + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, + MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE); + + for (i = 0; i < bytes; i += efuse->config.word_size) { + addr = (offset + i) / efuse->config.word_size; + + err = meson_mx_efuse_read_addr(efuse, addr, &tmp); + if (err) + break; + + memcpy(buf + i, &tmp, + min_t(size_t, bytes - i, efuse->config.word_size)); + } + + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, 0); + + meson_mx_efuse_hw_disable(efuse); + + return err; +} + +static const struct meson_mx_efuse_platform_data meson6_efuse_data = { + .name = "meson6-efuse", + .word_size = 1, +}; + +static const struct meson_mx_efuse_platform_data meson8_efuse_data = { + .name = "meson8-efuse", + .word_size = 4, +}; + +static const struct meson_mx_efuse_platform_data meson8b_efuse_data = { + .name = "meson8b-efuse", + .word_size = 4, +}; + +static const struct of_device_id meson_mx_efuse_match[] = { + { .compatible = "amlogic,meson6-efuse", .data = &meson6_efuse_data }, + { .compatible = "amlogic,meson8-efuse", .data = &meson8_efuse_data }, + { .compatible = "amlogic,meson8b-efuse", .data = &meson8b_efuse_data }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, meson_mx_efuse_match); + +static int meson_mx_efuse_probe(struct platform_device *pdev) +{ + const struct meson_mx_efuse_platform_data *drvdata; + struct meson_mx_efuse *efuse; + struct resource *res; + + drvdata = of_device_get_match_data(&pdev->dev); + if (!drvdata) + return -EINVAL; + + efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + efuse->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(efuse->base)) + return PTR_ERR(efuse->base); + + efuse->config.name = devm_kstrdup(&pdev->dev, drvdata->name, + GFP_KERNEL); + efuse->config.owner = THIS_MODULE; + efuse->config.dev = &pdev->dev; + efuse->config.priv = efuse; + efuse->config.stride = drvdata->word_size; + efuse->config.word_size = drvdata->word_size; + efuse->config.size = SZ_512; + efuse->config.read_only = true; + efuse->config.reg_read = meson_mx_efuse_read; + + efuse->core_clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(efuse->core_clk)) { + dev_err(&pdev->dev, "Failed to get core clock\n"); + return PTR_ERR(efuse->core_clk); + } + + efuse->nvmem = devm_nvmem_register(&pdev->dev, &efuse->config); + + return PTR_ERR_OR_ZERO(efuse->nvmem); +} + +static struct platform_driver meson_mx_efuse_driver = { + .probe = meson_mx_efuse_probe, + .driver = { + .name = "meson-mx-efuse", + .of_match_table = meson_mx_efuse_match, + }, +}; + +module_platform_driver(meson_mx_efuse_driver); + +MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); +MODULE_DESCRIPTION("Amlogic Meson MX eFuse NVMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c new file mode 100644 index 000000000..6a537d959 --- /dev/null +++ b/drivers/nvmem/mtk-efuse.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com> + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/io.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> + +struct mtk_efuse_priv { + void __iomem *base; +}; + +static int mtk_reg_read(void *context, + unsigned int reg, void *_val, size_t bytes) +{ + struct mtk_efuse_priv *priv = context; + u32 *val = _val; + int i = 0, words = bytes / 4; + + while (words--) + *val++ = readl(priv->base + reg + (i++ * 4)); + + return 0; +} + +static int mtk_efuse_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct nvmem_config econfig = {}; + struct mtk_efuse_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + econfig.stride = 4; + econfig.word_size = 4; + econfig.reg_read = mtk_reg_read; + econfig.size = resource_size(res); + econfig.priv = priv; + econfig.dev = dev; + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id mtk_efuse_of_match[] = { + { .compatible = "mediatek,mt8173-efuse",}, + { .compatible = "mediatek,efuse",}, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, mtk_efuse_of_match); + +static struct platform_driver mtk_efuse_driver = { + .probe = mtk_efuse_probe, + .driver = { + .name = "mediatek,efuse", + .of_match_table = mtk_efuse_of_match, + }, +}; + +static int __init mtk_efuse_init(void) +{ + int ret; + + ret = platform_driver_register(&mtk_efuse_driver); + if (ret) { + pr_err("Failed to register efuse driver\n"); + return ret; + } + + return 0; +} + +static void __exit mtk_efuse_exit(void) +{ + return platform_driver_unregister(&mtk_efuse_driver); +} + +subsys_initcall(mtk_efuse_init); +module_exit(mtk_efuse_exit); + +MODULE_AUTHOR("Andrew-CT Chen <andrew-ct.chen@mediatek.com>"); +MODULE_DESCRIPTION("Mediatek EFUSE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c new file mode 100644 index 000000000..588ab56d7 --- /dev/null +++ b/drivers/nvmem/mxs-ocotp.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Freescale MXS On-Chip OTP driver + * + * Copyright (C) 2015 Stefan Wahren <stefan.wahren@i2se.com> + * + * Based on the driver from Huang Shijie and Christoph G. Baumann + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/stmp_device.h> + +/* OCOTP registers and bits */ + +#define BM_OCOTP_CTRL_RD_BANK_OPEN BIT(12) +#define BM_OCOTP_CTRL_ERROR BIT(9) +#define BM_OCOTP_CTRL_BUSY BIT(8) + +#define OCOTP_TIMEOUT 10000 +#define OCOTP_DATA_OFFSET 0x20 + +struct mxs_ocotp { + struct clk *clk; + void __iomem *base; + struct nvmem_device *nvmem; +}; + +static int mxs_ocotp_wait(struct mxs_ocotp *otp) +{ + int timeout = OCOTP_TIMEOUT; + unsigned int status = 0; + + while (timeout--) { + status = readl(otp->base); + + if (!(status & (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR))) + break; + + cpu_relax(); + } + + if (status & BM_OCOTP_CTRL_BUSY) + return -EBUSY; + else if (status & BM_OCOTP_CTRL_ERROR) + return -EIO; + + return 0; +} + +static int mxs_ocotp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct mxs_ocotp *otp = context; + u32 *buf = val; + int ret; + + ret = clk_enable(otp->clk); + if (ret) + return ret; + + writel(BM_OCOTP_CTRL_ERROR, otp->base + STMP_OFFSET_REG_CLR); + + ret = mxs_ocotp_wait(otp); + if (ret) + goto disable_clk; + + /* open OCOTP banks for read */ + writel(BM_OCOTP_CTRL_RD_BANK_OPEN, otp->base + STMP_OFFSET_REG_SET); + + /* approximately wait 33 hclk cycles */ + udelay(1); + + ret = mxs_ocotp_wait(otp); + if (ret) + goto close_banks; + + while (bytes) { + if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) { + /* fill up non-data register */ + *buf++ = 0; + } else { + *buf++ = readl(otp->base + offset); + } + + bytes -= 4; + offset += 4; + } + +close_banks: + /* close banks for power saving */ + writel(BM_OCOTP_CTRL_RD_BANK_OPEN, otp->base + STMP_OFFSET_REG_CLR); + +disable_clk: + clk_disable(otp->clk); + + return ret; +} + +static struct nvmem_config ocotp_config = { + .name = "mxs-ocotp", + .stride = 16, + .word_size = 4, + .reg_read = mxs_ocotp_read, +}; + +struct mxs_data { + int size; +}; + +static const struct mxs_data imx23_data = { + .size = 0x220, +}; + +static const struct mxs_data imx28_data = { + .size = 0x2a0, +}; + +static const struct of_device_id mxs_ocotp_match[] = { + { .compatible = "fsl,imx23-ocotp", .data = &imx23_data }, + { .compatible = "fsl,imx28-ocotp", .data = &imx28_data }, + { /* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, mxs_ocotp_match); + +static void mxs_ocotp_action(void *data) +{ + clk_unprepare(data); +} + +static int mxs_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct mxs_data *data; + struct mxs_ocotp *otp; + const struct of_device_id *match; + int ret; + + match = of_match_device(dev->driver->of_match_table, dev); + if (!match || !match->data) + return -EINVAL; + + otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL); + if (!otp) + return -ENOMEM; + + otp->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(otp->base)) + return PTR_ERR(otp->base); + + otp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(otp->clk)) + return PTR_ERR(otp->clk); + + ret = clk_prepare(otp->clk); + if (ret < 0) { + dev_err(dev, "failed to prepare clk: %d\n", ret); + return ret; + } + + ret = devm_add_action_or_reset(&pdev->dev, mxs_ocotp_action, otp->clk); + if (ret) + return ret; + + data = match->data; + + ocotp_config.size = data->size; + ocotp_config.priv = otp; + ocotp_config.dev = dev; + otp->nvmem = devm_nvmem_register(dev, &ocotp_config); + if (IS_ERR(otp->nvmem)) + return PTR_ERR(otp->nvmem); + + platform_set_drvdata(pdev, otp); + + return 0; +} + +static struct platform_driver mxs_ocotp_driver = { + .probe = mxs_ocotp_probe, + .driver = { + .name = "mxs-ocotp", + .of_match_table = mxs_ocotp_match, + }, +}; + +module_platform_driver(mxs_ocotp_driver); +MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net"); +MODULE_DESCRIPTION("driver for OCOTP in i.MX23/i.MX28"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c new file mode 100644 index 000000000..1549bfcc4 --- /dev/null +++ b/drivers/nvmem/qcom-spmi-sdam.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017, 2020-2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/nvmem-provider.h> +#include <linux/regmap.h> + +#define SDAM_MEM_START 0x40 +#define REGISTER_MAP_ID 0x40 +#define REGISTER_MAP_VERSION 0x41 +#define SDAM_SIZE 0x44 +#define SDAM_PBS_TRIG_SET 0xE5 +#define SDAM_PBS_TRIG_CLR 0xE6 + +struct sdam_chip { + struct regmap *regmap; + struct nvmem_config sdam_config; + unsigned int base; + unsigned int size; +}; + +/* read only register offsets */ +static const u8 sdam_ro_map[] = { + REGISTER_MAP_ID, + REGISTER_MAP_VERSION, + SDAM_SIZE +}; + +static bool sdam_is_valid(struct sdam_chip *sdam, unsigned int offset, + size_t len) +{ + unsigned int sdam_mem_end = SDAM_MEM_START + sdam->size - 1; + + if (!len) + return false; + + if (offset >= SDAM_MEM_START && offset <= sdam_mem_end + && (offset + len - 1) <= sdam_mem_end) + return true; + else if ((offset == SDAM_PBS_TRIG_SET || offset == SDAM_PBS_TRIG_CLR) + && (len == 1)) + return true; + + return false; +} + +static bool sdam_is_ro(unsigned int offset, size_t len) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sdam_ro_map); i++) + if (offset <= sdam_ro_map[i] && (offset + len) > sdam_ro_map[i]) + return true; + + return false; +} + +static int sdam_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct sdam_chip *sdam = priv; + struct device *dev = sdam->sdam_config.dev; + int rc; + + if (!sdam_is_valid(sdam, offset, bytes)) { + dev_err(dev, "Invalid SDAM offset %#x len=%zd\n", + offset, bytes); + return -EINVAL; + } + + rc = regmap_bulk_read(sdam->regmap, sdam->base + offset, val, bytes); + if (rc < 0) + dev_err(dev, "Failed to read SDAM offset %#x len=%zd, rc=%d\n", + offset, bytes, rc); + + return rc; +} + +static int sdam_write(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct sdam_chip *sdam = priv; + struct device *dev = sdam->sdam_config.dev; + int rc; + + if (!sdam_is_valid(sdam, offset, bytes)) { + dev_err(dev, "Invalid SDAM offset %#x len=%zd\n", + offset, bytes); + return -EINVAL; + } + + if (sdam_is_ro(offset, bytes)) { + dev_err(dev, "Invalid write offset %#x len=%zd\n", + offset, bytes); + return -EINVAL; + } + + rc = regmap_bulk_write(sdam->regmap, sdam->base + offset, val, bytes); + if (rc < 0) + dev_err(dev, "Failed to write SDAM offset %#x len=%zd, rc=%d\n", + offset, bytes, rc); + + return rc; +} + +static int sdam_probe(struct platform_device *pdev) +{ + struct sdam_chip *sdam; + struct nvmem_device *nvmem; + unsigned int val; + int rc; + + sdam = devm_kzalloc(&pdev->dev, sizeof(*sdam), GFP_KERNEL); + if (!sdam) + return -ENOMEM; + + sdam->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!sdam->regmap) { + dev_err(&pdev->dev, "Failed to get regmap handle\n"); + return -ENXIO; + } + + rc = of_property_read_u32(pdev->dev.of_node, "reg", &sdam->base); + if (rc < 0) { + dev_err(&pdev->dev, "Failed to get SDAM base, rc=%d\n", rc); + return -EINVAL; + } + + rc = regmap_read(sdam->regmap, sdam->base + SDAM_SIZE, &val); + if (rc < 0) { + dev_err(&pdev->dev, "Failed to read SDAM_SIZE rc=%d\n", rc); + return -EINVAL; + } + sdam->size = val * 32; + + sdam->sdam_config.dev = &pdev->dev; + sdam->sdam_config.name = "spmi_sdam"; + sdam->sdam_config.id = NVMEM_DEVID_AUTO; + sdam->sdam_config.owner = THIS_MODULE, + sdam->sdam_config.stride = 1; + sdam->sdam_config.word_size = 1; + sdam->sdam_config.reg_read = sdam_read; + sdam->sdam_config.reg_write = sdam_write; + sdam->sdam_config.priv = sdam; + + nvmem = devm_nvmem_register(&pdev->dev, &sdam->sdam_config); + if (IS_ERR(nvmem)) { + dev_err(&pdev->dev, + "Failed to register SDAM nvmem device rc=%ld\n", + PTR_ERR(nvmem)); + return -ENXIO; + } + dev_dbg(&pdev->dev, + "SDAM base=%#x size=%u registered successfully\n", + sdam->base, sdam->size); + + return 0; +} + +static const struct of_device_id sdam_match_table[] = { + { .compatible = "qcom,spmi-sdam" }, + {}, +}; +MODULE_DEVICE_TABLE(of, sdam_match_table); + +static struct platform_driver sdam_driver = { + .driver = { + .name = "qcom,spmi-sdam", + .of_match_table = sdam_match_table, + }, + .probe = sdam_probe, +}; + +static int __init sdam_init(void) +{ + return platform_driver_register(&sdam_driver); +} +subsys_initcall(sdam_init); + +static void __exit sdam_exit(void) +{ + return platform_driver_unregister(&sdam_driver); +} +module_exit(sdam_exit); + +MODULE_DESCRIPTION("QCOM SPMI SDAM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c new file mode 100644 index 000000000..8ef772ccf --- /dev/null +++ b/drivers/nvmem/qfprom.c @@ -0,0 +1,392 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> + +/* Blow timer clock frequency in Mhz */ +#define QFPROM_BLOW_TIMER_OFFSET 0x03c + +/* Amount of time required to hold charge to blow fuse in micro-seconds */ +#define QFPROM_FUSE_BLOW_POLL_US 100 +#define QFPROM_FUSE_BLOW_TIMEOUT_US 1000 + +#define QFPROM_BLOW_STATUS_OFFSET 0x048 +#define QFPROM_BLOW_STATUS_BUSY 0x1 +#define QFPROM_BLOW_STATUS_READY 0x0 + +#define QFPROM_ACCEL_OFFSET 0x044 + +#define QFPROM_VERSION_OFFSET 0x0 +#define QFPROM_MAJOR_VERSION_SHIFT 28 +#define QFPROM_MAJOR_VERSION_MASK GENMASK(31, QFPROM_MAJOR_VERSION_SHIFT) +#define QFPROM_MINOR_VERSION_SHIFT 16 +#define QFPROM_MINOR_VERSION_MASK GENMASK(27, QFPROM_MINOR_VERSION_SHIFT) + +static bool read_raw_data; +module_param(read_raw_data, bool, 0644); +MODULE_PARM_DESC(read_raw_data, "Read raw instead of corrected data"); + +/** + * struct qfprom_soc_data - config that varies from SoC to SoC. + * + * @accel_value: Should contain qfprom accel value. + * @qfprom_blow_timer_value: The timer value of qfprom when doing efuse blow. + * @qfprom_blow_set_freq: The frequency required to set when we start the + * fuse blowing. + */ +struct qfprom_soc_data { + u32 accel_value; + u32 qfprom_blow_timer_value; + u32 qfprom_blow_set_freq; +}; + +/** + * struct qfprom_priv - structure holding qfprom attributes + * + * @qfpraw: iomapped memory space for qfprom-efuse raw address space. + * @qfpconf: iomapped memory space for qfprom-efuse configuration address + * space. + * @qfpcorrected: iomapped memory space for qfprom corrected address space. + * @qfpsecurity: iomapped memory space for qfprom security control space. + * @dev: qfprom device structure. + * @secclk: Clock supply. + * @vcc: Regulator supply. + * @soc_data: Data that for things that varies from SoC to SoC. + */ +struct qfprom_priv { + void __iomem *qfpraw; + void __iomem *qfpconf; + void __iomem *qfpcorrected; + void __iomem *qfpsecurity; + struct device *dev; + struct clk *secclk; + struct regulator *vcc; + const struct qfprom_soc_data *soc_data; +}; + +/** + * struct qfprom_touched_values - saved values to restore after blowing + * + * @clk_rate: The rate the clock was at before blowing. + * @accel_val: The value of the accel reg before blowing. + * @timer_val: The value of the timer before blowing. + */ +struct qfprom_touched_values { + unsigned long clk_rate; + u32 accel_val; + u32 timer_val; +}; + +/** + * qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing. + * @priv: Our driver data. + * @old: The data that was stashed from before fuse blowing. + * + * Resets the value of the blow timer, accel register and the clock + * and voltage settings. + * + * Prints messages if there are errors but doesn't return an error code + * since there's not much we can do upon failure. + */ +static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv, + const struct qfprom_touched_values *old) +{ + int ret; + + writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET); + writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET); + + /* + * This may be a shared rail and may be able to run at a lower rate + * when we're not blowing fuses. At the moment, the regulator framework + * applies voltage constraints even on disabled rails, so remove our + * constraints and allow the rail to be adjusted by other users. + */ + ret = regulator_set_voltage(priv->vcc, 0, INT_MAX); + if (ret) + dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n"); + + ret = regulator_disable(priv->vcc); + if (ret) + dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n"); + + ret = clk_set_rate(priv->secclk, old->clk_rate); + if (ret) + dev_warn(priv->dev, + "Failed to set clock rate for disable (ignoring)\n"); + + clk_disable_unprepare(priv->secclk); +} + +/** + * qfprom_enable_fuse_blowing() - Enable fuse blowing. + * @priv: Our driver data. + * @old: We'll stash stuff here to use when disabling. + * + * Sets the value of the blow timer, accel register and the clock + * and voltage settings. + * + * Prints messages if there are errors so caller doesn't need to. + * + * Return: 0 or -err. + */ +static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv, + struct qfprom_touched_values *old) +{ + int ret; + + ret = clk_prepare_enable(priv->secclk); + if (ret) { + dev_err(priv->dev, "Failed to enable clock\n"); + return ret; + } + + old->clk_rate = clk_get_rate(priv->secclk); + ret = clk_set_rate(priv->secclk, priv->soc_data->qfprom_blow_set_freq); + if (ret) { + dev_err(priv->dev, "Failed to set clock rate for enable\n"); + goto err_clk_prepared; + } + + /* + * Hardware requires 1.8V min for fuse blowing; this may be + * a rail shared do don't specify a max--regulator constraints + * will handle. + */ + ret = regulator_set_voltage(priv->vcc, 1800000, INT_MAX); + if (ret) { + dev_err(priv->dev, "Failed to set 1.8 voltage\n"); + goto err_clk_rate_set; + } + + ret = regulator_enable(priv->vcc); + if (ret) { + dev_err(priv->dev, "Failed to enable regulator\n"); + goto err_clk_rate_set; + } + + old->timer_val = readl(priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET); + old->accel_val = readl(priv->qfpconf + QFPROM_ACCEL_OFFSET); + writel(priv->soc_data->qfprom_blow_timer_value, + priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET); + writel(priv->soc_data->accel_value, + priv->qfpconf + QFPROM_ACCEL_OFFSET); + + return 0; + +err_clk_rate_set: + clk_set_rate(priv->secclk, old->clk_rate); +err_clk_prepared: + clk_disable_unprepare(priv->secclk); + return ret; +} + +/** + * qfprom_efuse_reg_write() - Write to fuses. + * @context: Our driver data. + * @reg: The offset to write at. + * @_val: Pointer to data to write. + * @bytes: The number of bytes to write. + * + * Writes to fuses. WARNING: THIS IS PERMANENT. + * + * Return: 0 or -err. + */ +static int qfprom_reg_write(void *context, unsigned int reg, void *_val, + size_t bytes) +{ + struct qfprom_priv *priv = context; + struct qfprom_touched_values old; + int words = bytes / 4; + u32 *value = _val; + u32 blow_status; + int ret; + int i; + + dev_dbg(priv->dev, + "Writing to raw qfprom region : %#010x of size: %zu\n", + reg, bytes); + + /* + * The hardware only allows us to write word at a time, but we can + * read byte at a time. Until the nvmem framework allows a separate + * word_size and stride for reading vs. writing, we'll enforce here. + */ + if (bytes % 4) { + dev_err(priv->dev, + "%zu is not an integral number of words\n", bytes); + return -EINVAL; + } + if (reg % 4) { + dev_err(priv->dev, + "Invalid offset: %#x. Must be word aligned\n", reg); + return -EINVAL; + } + + ret = qfprom_enable_fuse_blowing(priv, &old); + if (ret) + return ret; + + ret = readl_relaxed_poll_timeout( + priv->qfpconf + QFPROM_BLOW_STATUS_OFFSET, + blow_status, blow_status == QFPROM_BLOW_STATUS_READY, + QFPROM_FUSE_BLOW_POLL_US, QFPROM_FUSE_BLOW_TIMEOUT_US); + + if (ret) { + dev_err(priv->dev, + "Timeout waiting for initial ready; aborting.\n"); + goto exit_enabled_fuse_blowing; + } + + for (i = 0; i < words; i++) + writel(value[i], priv->qfpraw + reg + (i * 4)); + + ret = readl_relaxed_poll_timeout( + priv->qfpconf + QFPROM_BLOW_STATUS_OFFSET, + blow_status, blow_status == QFPROM_BLOW_STATUS_READY, + QFPROM_FUSE_BLOW_POLL_US, QFPROM_FUSE_BLOW_TIMEOUT_US); + + /* Give an error, but not much we can do in this case */ + if (ret) + dev_err(priv->dev, "Timeout waiting for finish.\n"); + +exit_enabled_fuse_blowing: + qfprom_disable_fuse_blowing(priv, &old); + + return ret; +} + +static int qfprom_reg_read(void *context, + unsigned int reg, void *_val, size_t bytes) +{ + struct qfprom_priv *priv = context; + u8 *val = _val; + int i = 0, words = bytes; + void __iomem *base = priv->qfpcorrected; + + if (read_raw_data && priv->qfpraw) + base = priv->qfpraw; + + while (words--) + *val++ = readb(base + reg + i++); + + return 0; +} + +static const struct qfprom_soc_data qfprom_7_8_data = { + .accel_value = 0xD10, + .qfprom_blow_timer_value = 25, + .qfprom_blow_set_freq = 4800000, +}; + +static int qfprom_probe(struct platform_device *pdev) +{ + struct nvmem_config econfig = { + .name = "qfprom", + .stride = 1, + .word_size = 1, + .id = NVMEM_DEVID_AUTO, + .reg_read = qfprom_reg_read, + }; + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct qfprom_priv *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + /* The corrected section is always provided */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->qfpcorrected = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->qfpcorrected)) + return PTR_ERR(priv->qfpcorrected); + + econfig.size = resource_size(res); + econfig.dev = dev; + econfig.priv = priv; + + priv->dev = dev; + + /* + * If more than one region is provided then the OS has the ability + * to write. + */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + u32 version; + int major_version, minor_version; + + priv->qfpraw = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->qfpraw)) + return PTR_ERR(priv->qfpraw); + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + priv->qfpconf = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->qfpconf)) + return PTR_ERR(priv->qfpconf); + res = platform_get_resource(pdev, IORESOURCE_MEM, 3); + priv->qfpsecurity = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->qfpsecurity)) + return PTR_ERR(priv->qfpsecurity); + + version = readl(priv->qfpsecurity + QFPROM_VERSION_OFFSET); + major_version = (version & QFPROM_MAJOR_VERSION_MASK) >> + QFPROM_MAJOR_VERSION_SHIFT; + minor_version = (version & QFPROM_MINOR_VERSION_MASK) >> + QFPROM_MINOR_VERSION_SHIFT; + + if (major_version == 7 && minor_version == 8) + priv->soc_data = &qfprom_7_8_data; + + priv->vcc = devm_regulator_get(&pdev->dev, "vcc"); + if (IS_ERR(priv->vcc)) + return PTR_ERR(priv->vcc); + + priv->secclk = devm_clk_get(dev, "core"); + if (IS_ERR(priv->secclk)) { + ret = PTR_ERR(priv->secclk); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Error getting clock: %d\n", ret); + return ret; + } + + /* Only enable writing if we have SoC data. */ + if (priv->soc_data) + econfig.reg_write = qfprom_reg_write; + } + + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id qfprom_of_match[] = { + { .compatible = "qcom,qfprom",}, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, qfprom_of_match); + +static struct platform_driver qfprom_driver = { + .probe = qfprom_probe, + .driver = { + .name = "qcom,qfprom", + .of_match_table = qfprom_of_match, + }, +}; +module_platform_driver(qfprom_driver); +MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm QFPROM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/rave-sp-eeprom.c b/drivers/nvmem/rave-sp-eeprom.c new file mode 100644 index 000000000..66699d44f --- /dev/null +++ b/drivers/nvmem/rave-sp-eeprom.c @@ -0,0 +1,361 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * EEPROM driver for RAVE SP + * + * Copyright (C) 2018 Zodiac Inflight Innovations + * + */ +#include <linux/kernel.h> +#include <linux/mfd/rave-sp.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/sizes.h> + +/** + * enum rave_sp_eeprom_access_type - Supported types of EEPROM access + * + * @RAVE_SP_EEPROM_WRITE: EEPROM write + * @RAVE_SP_EEPROM_READ: EEPROM read + */ +enum rave_sp_eeprom_access_type { + RAVE_SP_EEPROM_WRITE = 0, + RAVE_SP_EEPROM_READ = 1, +}; + +/** + * enum rave_sp_eeprom_header_size - EEPROM command header sizes + * + * @RAVE_SP_EEPROM_HEADER_SMALL: EEPROM header size for "small" devices (< 8K) + * @RAVE_SP_EEPROM_HEADER_BIG: EEPROM header size for "big" devices (> 8K) + */ +enum rave_sp_eeprom_header_size { + RAVE_SP_EEPROM_HEADER_SMALL = 4U, + RAVE_SP_EEPROM_HEADER_BIG = 5U, +}; +#define RAVE_SP_EEPROM_HEADER_MAX RAVE_SP_EEPROM_HEADER_BIG + +#define RAVE_SP_EEPROM_PAGE_SIZE 32U + +/** + * struct rave_sp_eeprom_page - RAVE SP EEPROM page + * + * @type: Access type (see enum rave_sp_eeprom_access_type) + * @success: Success flag (Success = 1, Failure = 0) + * @data: Read data + + * Note this structure corresponds to RSP_*_EEPROM payload from RAVE + * SP ICD + */ +struct rave_sp_eeprom_page { + u8 type; + u8 success; + u8 data[RAVE_SP_EEPROM_PAGE_SIZE]; +} __packed; + +/** + * struct rave_sp_eeprom - RAVE SP EEPROM device + * + * @sp: Pointer to parent RAVE SP device + * @mutex: Lock protecting access to EEPROM + * @address: EEPROM device address + * @header_size: Size of EEPROM command header for this device + * @dev: Pointer to corresponding struct device used for logging + */ +struct rave_sp_eeprom { + struct rave_sp *sp; + struct mutex mutex; + u8 address; + unsigned int header_size; + struct device *dev; +}; + +/** + * rave_sp_eeprom_io - Low-level part of EEPROM page access + * + * @eeprom: EEPROM device to write to + * @type: EEPROM access type (read or write) + * @idx: number of the EEPROM page + * @page: Data to write or buffer to store result (via page->data) + * + * This function does all of the low-level work required to perform a + * EEPROM access. This includes formatting correct command payload, + * sending it and checking received results. + * + * Returns zero in case of success or negative error code in + * case of failure. + */ +static int rave_sp_eeprom_io(struct rave_sp_eeprom *eeprom, + enum rave_sp_eeprom_access_type type, + u16 idx, + struct rave_sp_eeprom_page *page) +{ + const bool is_write = type == RAVE_SP_EEPROM_WRITE; + const unsigned int data_size = is_write ? sizeof(page->data) : 0; + const unsigned int cmd_size = eeprom->header_size + data_size; + const unsigned int rsp_size = + is_write ? sizeof(*page) - sizeof(page->data) : sizeof(*page); + unsigned int offset = 0; + u8 cmd[RAVE_SP_EEPROM_HEADER_MAX + sizeof(page->data)]; + int ret; + + if (WARN_ON(cmd_size > sizeof(cmd))) + return -EINVAL; + + cmd[offset++] = eeprom->address; + cmd[offset++] = 0; + cmd[offset++] = type; + cmd[offset++] = idx; + + /* + * If there's still room in this command's header it means we + * are talkin to EEPROM that uses 16-bit page numbers and we + * have to specify index's MSB in payload as well. + */ + if (offset < eeprom->header_size) + cmd[offset++] = idx >> 8; + /* + * Copy our data to write to command buffer first. In case of + * a read data_size should be zero and memcpy would become a + * no-op + */ + memcpy(&cmd[offset], page->data, data_size); + + ret = rave_sp_exec(eeprom->sp, cmd, cmd_size, page, rsp_size); + if (ret) + return ret; + + if (page->type != type) + return -EPROTO; + + if (!page->success) + return -EIO; + + return 0; +} + +/** + * rave_sp_eeprom_page_access - Access single EEPROM page + * + * @eeprom: EEPROM device to access + * @type: Access type to perform (read or write) + * @offset: Offset within EEPROM to access + * @data: Data buffer + * @data_len: Size of the data buffer + * + * This function performs a generic access to a single page or a + * portion thereof. Requested access MUST NOT cross the EEPROM page + * boundary. + * + * Returns zero in case of success or negative error code in + * case of failure. + */ +static int +rave_sp_eeprom_page_access(struct rave_sp_eeprom *eeprom, + enum rave_sp_eeprom_access_type type, + unsigned int offset, u8 *data, + size_t data_len) +{ + const unsigned int page_offset = offset % RAVE_SP_EEPROM_PAGE_SIZE; + const unsigned int page_nr = offset / RAVE_SP_EEPROM_PAGE_SIZE; + struct rave_sp_eeprom_page page; + int ret; + + /* + * This function will not work if data access we've been asked + * to do is crossing EEPROM page boundary. Normally this + * should never happen and getting here would indicate a bug + * in the code. + */ + if (WARN_ON(data_len > sizeof(page.data) - page_offset)) + return -EINVAL; + + if (type == RAVE_SP_EEPROM_WRITE) { + /* + * If doing a partial write we need to do a read first + * to fill the rest of the page with correct data. + */ + if (data_len < RAVE_SP_EEPROM_PAGE_SIZE) { + ret = rave_sp_eeprom_io(eeprom, RAVE_SP_EEPROM_READ, + page_nr, &page); + if (ret) + return ret; + } + + memcpy(&page.data[page_offset], data, data_len); + } + + ret = rave_sp_eeprom_io(eeprom, type, page_nr, &page); + if (ret) + return ret; + + /* + * Since we receive the result of the read via 'page.data' + * buffer we need to copy that to 'data' + */ + if (type == RAVE_SP_EEPROM_READ) + memcpy(data, &page.data[page_offset], data_len); + + return 0; +} + +/** + * rave_sp_eeprom_access - Access EEPROM data + * + * @eeprom: EEPROM device to access + * @type: Access type to perform (read or write) + * @offset: Offset within EEPROM to access + * @data: Data buffer + * @data_len: Size of the data buffer + * + * This function performs a generic access (either read or write) at + * arbitrary offset (not necessary page aligned) of arbitrary length + * (is not constrained by EEPROM page size). + * + * Returns zero in case of success or negative error code in case of + * failure. + */ +static int rave_sp_eeprom_access(struct rave_sp_eeprom *eeprom, + enum rave_sp_eeprom_access_type type, + unsigned int offset, u8 *data, + unsigned int data_len) +{ + unsigned int residue; + unsigned int chunk; + unsigned int head; + int ret; + + mutex_lock(&eeprom->mutex); + + head = offset % RAVE_SP_EEPROM_PAGE_SIZE; + residue = data_len; + + do { + /* + * First iteration, if we are doing an access that is + * not 32-byte aligned, we need to access only data up + * to a page boundary to avoid corssing it in + * rave_sp_eeprom_page_access() + */ + if (unlikely(head)) { + chunk = RAVE_SP_EEPROM_PAGE_SIZE - head; + /* + * This can only happen once per + * rave_sp_eeprom_access() call, so we set + * head to zero to process all the other + * iterations normally. + */ + head = 0; + } else { + chunk = RAVE_SP_EEPROM_PAGE_SIZE; + } + + /* + * We should never read more that 'residue' bytes + */ + chunk = min(chunk, residue); + ret = rave_sp_eeprom_page_access(eeprom, type, offset, + data, chunk); + if (ret) + goto out; + + residue -= chunk; + offset += chunk; + data += chunk; + } while (residue); +out: + mutex_unlock(&eeprom->mutex); + return ret; +} + +static int rave_sp_eeprom_reg_read(void *eeprom, unsigned int offset, + void *val, size_t bytes) +{ + return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_READ, + offset, val, bytes); +} + +static int rave_sp_eeprom_reg_write(void *eeprom, unsigned int offset, + void *val, size_t bytes) +{ + return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_WRITE, + offset, val, bytes); +} + +static int rave_sp_eeprom_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rave_sp *sp = dev_get_drvdata(dev->parent); + struct device_node *np = dev->of_node; + struct nvmem_config config = { 0 }; + struct rave_sp_eeprom *eeprom; + struct nvmem_device *nvmem; + u32 reg[2], size; + + if (of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg))) { + dev_err(dev, "Failed to parse \"reg\" property\n"); + return -EINVAL; + } + + size = reg[1]; + /* + * Per ICD, we have no more than 2 bytes to specify EEPROM + * page. + */ + if (size > U16_MAX * RAVE_SP_EEPROM_PAGE_SIZE) { + dev_err(dev, "Specified size is too big\n"); + return -EINVAL; + } + + eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL); + if (!eeprom) + return -ENOMEM; + + eeprom->address = reg[0]; + eeprom->sp = sp; + eeprom->dev = dev; + + if (size > SZ_8K) + eeprom->header_size = RAVE_SP_EEPROM_HEADER_BIG; + else + eeprom->header_size = RAVE_SP_EEPROM_HEADER_SMALL; + + mutex_init(&eeprom->mutex); + + config.id = -1; + of_property_read_string(np, "zii,eeprom-name", &config.name); + config.priv = eeprom; + config.dev = dev; + config.size = size; + config.reg_read = rave_sp_eeprom_reg_read; + config.reg_write = rave_sp_eeprom_reg_write; + config.word_size = 1; + config.stride = 1; + + nvmem = devm_nvmem_register(dev, &config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id rave_sp_eeprom_of_match[] = { + { .compatible = "zii,rave-sp-eeprom" }, + {} +}; +MODULE_DEVICE_TABLE(of, rave_sp_eeprom_of_match); + +static struct platform_driver rave_sp_eeprom_driver = { + .probe = rave_sp_eeprom_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = rave_sp_eeprom_of_match, + }, +}; +module_platform_driver(rave_sp_eeprom_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>"); +MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>"); +MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>"); +MODULE_DESCRIPTION("RAVE SP EEPROM driver"); diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c new file mode 100644 index 000000000..e4579de5d --- /dev/null +++ b/drivers/nvmem/rockchip-efuse.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Rockchip eFuse Driver + * + * Copyright (c) 2015 Rockchip Electronics Co. Ltd. + * Author: Caesar Wang <wxt@rock-chips.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +#define RK3288_A_SHIFT 6 +#define RK3288_A_MASK 0x3ff +#define RK3288_PGENB BIT(3) +#define RK3288_LOAD BIT(2) +#define RK3288_STROBE BIT(1) +#define RK3288_CSB BIT(0) + +#define RK3328_SECURE_SIZES 96 +#define RK3328_INT_STATUS 0x0018 +#define RK3328_DOUT 0x0020 +#define RK3328_AUTO_CTRL 0x0024 +#define RK3328_INT_FINISH BIT(0) +#define RK3328_AUTO_ENB BIT(0) +#define RK3328_AUTO_RD BIT(1) + +#define RK3399_A_SHIFT 16 +#define RK3399_A_MASK 0x3ff +#define RK3399_NBYTES 4 +#define RK3399_STROBSFTSEL BIT(9) +#define RK3399_RSB BIT(7) +#define RK3399_PD BIT(5) +#define RK3399_PGENB BIT(3) +#define RK3399_LOAD BIT(2) +#define RK3399_STROBE BIT(1) +#define RK3399_CSB BIT(0) + +#define REG_EFUSE_CTRL 0x0000 +#define REG_EFUSE_DOUT 0x0004 + +struct rockchip_efuse_chip { + struct device *dev; + void __iomem *base; + struct clk *clk; +}; + +static int rockchip_rk3288_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_efuse_chip *efuse = context; + u8 *buf = val; + int ret; + + ret = clk_prepare_enable(efuse->clk); + if (ret < 0) { + dev_err(efuse->dev, "failed to prepare/enable efuse clk\n"); + return ret; + } + + writel(RK3288_LOAD | RK3288_PGENB, efuse->base + REG_EFUSE_CTRL); + udelay(1); + while (bytes--) { + writel(readl(efuse->base + REG_EFUSE_CTRL) & + (~(RK3288_A_MASK << RK3288_A_SHIFT)), + efuse->base + REG_EFUSE_CTRL); + writel(readl(efuse->base + REG_EFUSE_CTRL) | + ((offset++ & RK3288_A_MASK) << RK3288_A_SHIFT), + efuse->base + REG_EFUSE_CTRL); + udelay(1); + writel(readl(efuse->base + REG_EFUSE_CTRL) | + RK3288_STROBE, efuse->base + REG_EFUSE_CTRL); + udelay(1); + *buf++ = readb(efuse->base + REG_EFUSE_DOUT); + writel(readl(efuse->base + REG_EFUSE_CTRL) & + (~RK3288_STROBE), efuse->base + REG_EFUSE_CTRL); + udelay(1); + } + + /* Switch to standby mode */ + writel(RK3288_PGENB | RK3288_CSB, efuse->base + REG_EFUSE_CTRL); + + clk_disable_unprepare(efuse->clk); + + return 0; +} + +static int rockchip_rk3328_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_efuse_chip *efuse = context; + unsigned int addr_start, addr_end, addr_offset, addr_len; + u32 out_value, status; + u8 *buf; + int ret, i = 0; + + ret = clk_prepare_enable(efuse->clk); + if (ret < 0) { + dev_err(efuse->dev, "failed to prepare/enable efuse clk\n"); + return ret; + } + + /* 128 Byte efuse, 96 Byte for secure, 32 Byte for non-secure */ + offset += RK3328_SECURE_SIZES; + addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES; + addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES; + addr_offset = offset % RK3399_NBYTES; + addr_len = addr_end - addr_start; + + buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)), + GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto nomem; + } + + while (addr_len--) { + writel(RK3328_AUTO_RD | RK3328_AUTO_ENB | + ((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT), + efuse->base + RK3328_AUTO_CTRL); + udelay(4); + status = readl(efuse->base + RK3328_INT_STATUS); + if (!(status & RK3328_INT_FINISH)) { + ret = -EIO; + goto err; + } + out_value = readl(efuse->base + RK3328_DOUT); + writel(RK3328_INT_FINISH, efuse->base + RK3328_INT_STATUS); + + memcpy(&buf[i], &out_value, RK3399_NBYTES); + i += RK3399_NBYTES; + } + + memcpy(val, buf + addr_offset, bytes); +err: + kfree(buf); +nomem: + clk_disable_unprepare(efuse->clk); + + return ret; +} + +static int rockchip_rk3399_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_efuse_chip *efuse = context; + unsigned int addr_start, addr_end, addr_offset, addr_len; + u32 out_value; + u8 *buf; + int ret, i = 0; + + ret = clk_prepare_enable(efuse->clk); + if (ret < 0) { + dev_err(efuse->dev, "failed to prepare/enable efuse clk\n"); + return ret; + } + + addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES; + addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES; + addr_offset = offset % RK3399_NBYTES; + addr_len = addr_end - addr_start; + + buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)), + GFP_KERNEL); + if (!buf) { + clk_disable_unprepare(efuse->clk); + return -ENOMEM; + } + + writel(RK3399_LOAD | RK3399_PGENB | RK3399_STROBSFTSEL | RK3399_RSB, + efuse->base + REG_EFUSE_CTRL); + udelay(1); + while (addr_len--) { + writel(readl(efuse->base + REG_EFUSE_CTRL) | RK3399_STROBE | + ((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT), + efuse->base + REG_EFUSE_CTRL); + udelay(1); + out_value = readl(efuse->base + REG_EFUSE_DOUT); + writel(readl(efuse->base + REG_EFUSE_CTRL) & (~RK3399_STROBE), + efuse->base + REG_EFUSE_CTRL); + udelay(1); + + memcpy(&buf[i], &out_value, RK3399_NBYTES); + i += RK3399_NBYTES; + } + + /* Switch to standby mode */ + writel(RK3399_PD | RK3399_CSB, efuse->base + REG_EFUSE_CTRL); + + memcpy(val, buf + addr_offset, bytes); + + kfree(buf); + + clk_disable_unprepare(efuse->clk); + + return 0; +} + +static struct nvmem_config econfig = { + .name = "rockchip-efuse", + .stride = 1, + .word_size = 1, + .read_only = true, +}; + +static const struct of_device_id rockchip_efuse_match[] = { + /* deprecated but kept around for dts binding compatibility */ + { + .compatible = "rockchip,rockchip-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3066a-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3188-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3228-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3288-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3368-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3328-efuse", + .data = (void *)&rockchip_rk3328_efuse_read, + }, + { + .compatible = "rockchip,rk3399-efuse", + .data = (void *)&rockchip_rk3399_efuse_read, + }, + { /* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, rockchip_efuse_match); + +static int rockchip_efuse_probe(struct platform_device *pdev) +{ + struct resource *res; + struct nvmem_device *nvmem; + struct rockchip_efuse_chip *efuse; + const void *data; + struct device *dev = &pdev->dev; + + data = of_device_get_match_data(dev); + if (!data) { + dev_err(dev, "failed to get match data\n"); + return -EINVAL; + } + + efuse = devm_kzalloc(dev, sizeof(struct rockchip_efuse_chip), + GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + efuse->base = devm_ioremap_resource(dev, res); + if (IS_ERR(efuse->base)) + return PTR_ERR(efuse->base); + + efuse->clk = devm_clk_get(dev, "pclk_efuse"); + if (IS_ERR(efuse->clk)) + return PTR_ERR(efuse->clk); + + efuse->dev = dev; + if (of_property_read_u32(dev->of_node, "rockchip,efuse-size", + &econfig.size)) + econfig.size = resource_size(res); + econfig.reg_read = data; + econfig.priv = efuse; + econfig.dev = efuse->dev; + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver rockchip_efuse_driver = { + .probe = rockchip_efuse_probe, + .driver = { + .name = "rockchip-efuse", + .of_match_table = rockchip_efuse_match, + }, +}; + +module_platform_driver(rockchip_efuse_driver); +MODULE_DESCRIPTION("rockchip_efuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c new file mode 100644 index 000000000..9f53bcce2 --- /dev/null +++ b/drivers/nvmem/rockchip-otp.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Rockchip OTP Driver + * + * Copyright (c) 2018 Rockchip Electronics Co. Ltd. + * Author: Finley Xiao <finley.xiao@rock-chips.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/reset.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +/* OTP Register Offsets */ +#define OTPC_SBPI_CTRL 0x0020 +#define OTPC_SBPI_CMD_VALID_PRE 0x0024 +#define OTPC_SBPI_CS_VALID_PRE 0x0028 +#define OTPC_SBPI_STATUS 0x002C +#define OTPC_USER_CTRL 0x0100 +#define OTPC_USER_ADDR 0x0104 +#define OTPC_USER_ENABLE 0x0108 +#define OTPC_USER_Q 0x0124 +#define OTPC_INT_STATUS 0x0304 +#define OTPC_SBPI_CMD0_OFFSET 0x1000 +#define OTPC_SBPI_CMD1_OFFSET 0x1004 + +/* OTP Register bits and masks */ +#define OTPC_USER_ADDR_MASK GENMASK(31, 16) +#define OTPC_USE_USER BIT(0) +#define OTPC_USE_USER_MASK GENMASK(16, 16) +#define OTPC_USER_FSM_ENABLE BIT(0) +#define OTPC_USER_FSM_ENABLE_MASK GENMASK(16, 16) +#define OTPC_SBPI_DONE BIT(1) +#define OTPC_USER_DONE BIT(2) + +#define SBPI_DAP_ADDR 0x02 +#define SBPI_DAP_ADDR_SHIFT 8 +#define SBPI_DAP_ADDR_MASK GENMASK(31, 24) +#define SBPI_CMD_VALID_MASK GENMASK(31, 16) +#define SBPI_DAP_CMD_WRF 0xC0 +#define SBPI_DAP_REG_ECC 0x3A +#define SBPI_ECC_ENABLE 0x00 +#define SBPI_ECC_DISABLE 0x09 +#define SBPI_ENABLE BIT(0) +#define SBPI_ENABLE_MASK GENMASK(16, 16) + +#define OTPC_TIMEOUT 10000 + +struct rockchip_otp { + struct device *dev; + void __iomem *base; + struct clk_bulk_data *clks; + int num_clks; + struct reset_control *rst; +}; + +/* list of required clocks */ +static const char * const rockchip_otp_clocks[] = { + "otp", "apb_pclk", "phy", +}; + +struct rockchip_data { + int size; +}; + +static int rockchip_otp_reset(struct rockchip_otp *otp) +{ + int ret; + + ret = reset_control_assert(otp->rst); + if (ret) { + dev_err(otp->dev, "failed to assert otp phy %d\n", ret); + return ret; + } + + udelay(2); + + ret = reset_control_deassert(otp->rst); + if (ret) { + dev_err(otp->dev, "failed to deassert otp phy %d\n", ret); + return ret; + } + + return 0; +} + +static int rockchip_otp_wait_status(struct rockchip_otp *otp, u32 flag) +{ + u32 status = 0; + int ret; + + ret = readl_poll_timeout_atomic(otp->base + OTPC_INT_STATUS, status, + (status & flag), 1, OTPC_TIMEOUT); + if (ret) + return ret; + + /* clean int status */ + writel(flag, otp->base + OTPC_INT_STATUS); + + return 0; +} + +static int rockchip_otp_ecc_enable(struct rockchip_otp *otp, bool enable) +{ + int ret = 0; + + writel(SBPI_DAP_ADDR_MASK | (SBPI_DAP_ADDR << SBPI_DAP_ADDR_SHIFT), + otp->base + OTPC_SBPI_CTRL); + + writel(SBPI_CMD_VALID_MASK | 0x1, otp->base + OTPC_SBPI_CMD_VALID_PRE); + writel(SBPI_DAP_CMD_WRF | SBPI_DAP_REG_ECC, + otp->base + OTPC_SBPI_CMD0_OFFSET); + if (enable) + writel(SBPI_ECC_ENABLE, otp->base + OTPC_SBPI_CMD1_OFFSET); + else + writel(SBPI_ECC_DISABLE, otp->base + OTPC_SBPI_CMD1_OFFSET); + + writel(SBPI_ENABLE_MASK | SBPI_ENABLE, otp->base + OTPC_SBPI_CTRL); + + ret = rockchip_otp_wait_status(otp, OTPC_SBPI_DONE); + if (ret < 0) + dev_err(otp->dev, "timeout during ecc_enable\n"); + + return ret; +} + +static int rockchip_otp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_otp *otp = context; + u8 *buf = val; + int ret = 0; + + ret = clk_bulk_prepare_enable(otp->num_clks, otp->clks); + if (ret < 0) { + dev_err(otp->dev, "failed to prepare/enable clks\n"); + return ret; + } + + ret = rockchip_otp_reset(otp); + if (ret) { + dev_err(otp->dev, "failed to reset otp phy\n"); + goto disable_clks; + } + + ret = rockchip_otp_ecc_enable(otp, false); + if (ret < 0) { + dev_err(otp->dev, "rockchip_otp_ecc_enable err\n"); + goto disable_clks; + } + + writel(OTPC_USE_USER | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL); + udelay(5); + while (bytes--) { + writel(offset++ | OTPC_USER_ADDR_MASK, + otp->base + OTPC_USER_ADDR); + writel(OTPC_USER_FSM_ENABLE | OTPC_USER_FSM_ENABLE_MASK, + otp->base + OTPC_USER_ENABLE); + ret = rockchip_otp_wait_status(otp, OTPC_USER_DONE); + if (ret < 0) { + dev_err(otp->dev, "timeout during read setup\n"); + goto read_end; + } + *buf++ = readb(otp->base + OTPC_USER_Q); + } + +read_end: + writel(0x0 | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL); +disable_clks: + clk_bulk_disable_unprepare(otp->num_clks, otp->clks); + + return ret; +} + +static struct nvmem_config otp_config = { + .name = "rockchip-otp", + .owner = THIS_MODULE, + .read_only = true, + .stride = 1, + .word_size = 1, + .reg_read = rockchip_otp_read, +}; + +static const struct rockchip_data px30_data = { + .size = 0x40, +}; + +static const struct of_device_id rockchip_otp_match[] = { + { + .compatible = "rockchip,px30-otp", + .data = (void *)&px30_data, + }, + { + .compatible = "rockchip,rk3308-otp", + .data = (void *)&px30_data, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, rockchip_otp_match); + +static int rockchip_otp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rockchip_otp *otp; + const struct rockchip_data *data; + struct nvmem_device *nvmem; + int ret, i; + + data = of_device_get_match_data(dev); + if (!data) { + dev_err(dev, "failed to get match data\n"); + return -EINVAL; + } + + otp = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_otp), + GFP_KERNEL); + if (!otp) + return -ENOMEM; + + otp->dev = dev; + otp->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(otp->base)) + return PTR_ERR(otp->base); + + otp->num_clks = ARRAY_SIZE(rockchip_otp_clocks); + otp->clks = devm_kcalloc(dev, otp->num_clks, + sizeof(*otp->clks), GFP_KERNEL); + if (!otp->clks) + return -ENOMEM; + + for (i = 0; i < otp->num_clks; ++i) + otp->clks[i].id = rockchip_otp_clocks[i]; + + ret = devm_clk_bulk_get(dev, otp->num_clks, otp->clks); + if (ret) + return ret; + + otp->rst = devm_reset_control_get(dev, "phy"); + if (IS_ERR(otp->rst)) + return PTR_ERR(otp->rst); + + otp_config.size = data->size; + otp_config.priv = otp; + otp_config.dev = dev; + nvmem = devm_nvmem_register(dev, &otp_config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver rockchip_otp_driver = { + .probe = rockchip_otp_probe, + .driver = { + .name = "rockchip-otp", + .of_match_table = rockchip_otp_match, + }, +}; + +module_platform_driver(rockchip_otp_driver); +MODULE_DESCRIPTION("Rockchip OTP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c new file mode 100644 index 000000000..c825fc902 --- /dev/null +++ b/drivers/nvmem/sc27xx-efuse.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Spreadtrum Communications Inc. + +#include <linux/hwspinlock.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/nvmem-provider.h> + +/* PMIC global registers definition */ +#define SC27XX_MODULE_EN 0xc08 +#define SC2730_MODULE_EN 0x1808 +#define SC27XX_EFUSE_EN BIT(6) + +/* Efuse controller registers definition */ +#define SC27XX_EFUSE_GLB_CTRL 0x0 +#define SC27XX_EFUSE_DATA_RD 0x4 +#define SC27XX_EFUSE_DATA_WR 0x8 +#define SC27XX_EFUSE_BLOCK_INDEX 0xc +#define SC27XX_EFUSE_MODE_CTRL 0x10 +#define SC27XX_EFUSE_STATUS 0x14 +#define SC27XX_EFUSE_WR_TIMING_CTRL 0x20 +#define SC27XX_EFUSE_RD_TIMING_CTRL 0x24 +#define SC27XX_EFUSE_EFUSE_DEB_CTRL 0x28 + +/* Mask definition for SC27XX_EFUSE_BLOCK_INDEX register */ +#define SC27XX_EFUSE_BLOCK_MASK GENMASK(4, 0) + +/* Bits definitions for SC27XX_EFUSE_MODE_CTRL register */ +#define SC27XX_EFUSE_PG_START BIT(0) +#define SC27XX_EFUSE_RD_START BIT(1) +#define SC27XX_EFUSE_CLR_RDDONE BIT(2) + +/* Bits definitions for SC27XX_EFUSE_STATUS register */ +#define SC27XX_EFUSE_PGM_BUSY BIT(0) +#define SC27XX_EFUSE_READ_BUSY BIT(1) +#define SC27XX_EFUSE_STANDBY BIT(2) +#define SC27XX_EFUSE_GLOBAL_PROT BIT(3) +#define SC27XX_EFUSE_RD_DONE BIT(4) + +/* Block number and block width (bytes) definitions */ +#define SC27XX_EFUSE_BLOCK_MAX 32 +#define SC27XX_EFUSE_BLOCK_WIDTH 2 + +/* Timeout (ms) for the trylock of hardware spinlocks */ +#define SC27XX_EFUSE_HWLOCK_TIMEOUT 5000 + +/* Timeout (us) of polling the status */ +#define SC27XX_EFUSE_POLL_TIMEOUT 3000000 +#define SC27XX_EFUSE_POLL_DELAY_US 10000 + +/* + * Since different PMICs of SC27xx series can have different + * address , we should save address in the device data structure. + */ +struct sc27xx_efuse_variant_data { + u32 module_en; +}; + +struct sc27xx_efuse { + struct device *dev; + struct regmap *regmap; + struct hwspinlock *hwlock; + struct mutex mutex; + u32 base; + const struct sc27xx_efuse_variant_data *var_data; +}; + +static const struct sc27xx_efuse_variant_data sc2731_edata = { + .module_en = SC27XX_MODULE_EN, +}; + +static const struct sc27xx_efuse_variant_data sc2730_edata = { + .module_en = SC2730_MODULE_EN, +}; + +/* + * On Spreadtrum platform, we have multi-subsystems will access the unique + * efuse controller, so we need one hardware spinlock to synchronize between + * the multiple subsystems. + */ +static int sc27xx_efuse_lock(struct sc27xx_efuse *efuse) +{ + int ret; + + mutex_lock(&efuse->mutex); + + ret = hwspin_lock_timeout_raw(efuse->hwlock, + SC27XX_EFUSE_HWLOCK_TIMEOUT); + if (ret) { + dev_err(efuse->dev, "timeout to get the hwspinlock\n"); + mutex_unlock(&efuse->mutex); + return ret; + } + + return 0; +} + +static void sc27xx_efuse_unlock(struct sc27xx_efuse *efuse) +{ + hwspin_unlock_raw(efuse->hwlock); + mutex_unlock(&efuse->mutex); +} + +static int sc27xx_efuse_poll_status(struct sc27xx_efuse *efuse, u32 bits) +{ + int ret; + u32 val; + + ret = regmap_read_poll_timeout(efuse->regmap, + efuse->base + SC27XX_EFUSE_STATUS, + val, (val & bits), + SC27XX_EFUSE_POLL_DELAY_US, + SC27XX_EFUSE_POLL_TIMEOUT); + if (ret) { + dev_err(efuse->dev, "timeout to update the efuse status\n"); + return ret; + } + + return 0; +} + +static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes) +{ + struct sc27xx_efuse *efuse = context; + u32 buf, blk_index = offset / SC27XX_EFUSE_BLOCK_WIDTH; + u32 blk_offset = (offset % SC27XX_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE; + int ret; + + if (blk_index > SC27XX_EFUSE_BLOCK_MAX || + bytes > SC27XX_EFUSE_BLOCK_WIDTH) + return -EINVAL; + + ret = sc27xx_efuse_lock(efuse); + if (ret) + return ret; + + /* Enable the efuse controller. */ + ret = regmap_update_bits(efuse->regmap, efuse->var_data->module_en, + SC27XX_EFUSE_EN, SC27XX_EFUSE_EN); + if (ret) + goto unlock_efuse; + + /* + * Before reading, we should ensure the efuse controller is in + * standby state. + */ + ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_STANDBY); + if (ret) + goto disable_efuse; + + /* Set the block address to be read. */ + ret = regmap_write(efuse->regmap, + efuse->base + SC27XX_EFUSE_BLOCK_INDEX, + blk_index & SC27XX_EFUSE_BLOCK_MASK); + if (ret) + goto disable_efuse; + + /* Start reading process from efuse memory. */ + ret = regmap_update_bits(efuse->regmap, + efuse->base + SC27XX_EFUSE_MODE_CTRL, + SC27XX_EFUSE_RD_START, + SC27XX_EFUSE_RD_START); + if (ret) + goto disable_efuse; + + /* + * Polling the read done status to make sure the reading process + * is completed, that means the data can be read out now. + */ + ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_RD_DONE); + if (ret) + goto disable_efuse; + + /* Read data from efuse memory. */ + ret = regmap_read(efuse->regmap, efuse->base + SC27XX_EFUSE_DATA_RD, + &buf); + if (ret) + goto disable_efuse; + + /* Clear the read done flag. */ + ret = regmap_update_bits(efuse->regmap, + efuse->base + SC27XX_EFUSE_MODE_CTRL, + SC27XX_EFUSE_CLR_RDDONE, + SC27XX_EFUSE_CLR_RDDONE); + +disable_efuse: + /* Disable the efuse controller after reading. */ + regmap_update_bits(efuse->regmap, efuse->var_data->module_en, SC27XX_EFUSE_EN, 0); +unlock_efuse: + sc27xx_efuse_unlock(efuse); + + if (!ret) { + buf >>= blk_offset; + memcpy(val, &buf, bytes); + } + + return ret; +} + +static int sc27xx_efuse_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct nvmem_config econfig = { }; + struct nvmem_device *nvmem; + struct sc27xx_efuse *efuse; + int ret; + + efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + efuse->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!efuse->regmap) { + dev_err(&pdev->dev, "failed to get efuse regmap\n"); + return -ENODEV; + } + + ret = of_property_read_u32(np, "reg", &efuse->base); + if (ret) { + dev_err(&pdev->dev, "failed to get efuse base address\n"); + return ret; + } + + ret = of_hwspin_lock_get_id(np, 0); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get hwspinlock id\n"); + return ret; + } + + efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret); + if (!efuse->hwlock) { + dev_err(&pdev->dev, "failed to request hwspinlock\n"); + return -ENXIO; + } + + mutex_init(&efuse->mutex); + efuse->dev = &pdev->dev; + efuse->var_data = of_device_get_match_data(&pdev->dev); + + econfig.stride = 1; + econfig.word_size = 1; + econfig.read_only = true; + econfig.name = "sc27xx-efuse"; + econfig.size = SC27XX_EFUSE_BLOCK_MAX * SC27XX_EFUSE_BLOCK_WIDTH; + econfig.reg_read = sc27xx_efuse_read; + econfig.priv = efuse; + econfig.dev = &pdev->dev; + nvmem = devm_nvmem_register(&pdev->dev, &econfig); + if (IS_ERR(nvmem)) { + dev_err(&pdev->dev, "failed to register nvmem config\n"); + return PTR_ERR(nvmem); + } + + return 0; +} + +static const struct of_device_id sc27xx_efuse_of_match[] = { + { .compatible = "sprd,sc2731-efuse", .data = &sc2731_edata}, + { .compatible = "sprd,sc2730-efuse", .data = &sc2730_edata}, + { } +}; + +static struct platform_driver sc27xx_efuse_driver = { + .probe = sc27xx_efuse_probe, + .driver = { + .name = "sc27xx-efuse", + .of_match_table = sc27xx_efuse_of_match, + }, +}; + +module_platform_driver(sc27xx_efuse_driver); + +MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>"); +MODULE_DESCRIPTION("Spreadtrum SC27xx efuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/snvs_lpgpr.c b/drivers/nvmem/snvs_lpgpr.c new file mode 100644 index 000000000..c527d26ca --- /dev/null +++ b/drivers/nvmem/snvs_lpgpr.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de> + * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> + */ + +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/regmap.h> + +#define IMX6Q_SNVS_HPLR 0x00 +#define IMX6Q_SNVS_LPLR 0x34 +#define IMX6Q_SNVS_LPGPR 0x68 + +#define IMX7D_SNVS_HPLR 0x00 +#define IMX7D_SNVS_LPLR 0x34 +#define IMX7D_SNVS_LPGPR 0x90 + +#define IMX_GPR_SL BIT(5) +#define IMX_GPR_HL BIT(5) + +struct snvs_lpgpr_cfg { + int offset; + int offset_hplr; + int offset_lplr; + int size; +}; + +struct snvs_lpgpr_priv { + struct device_d *dev; + struct regmap *regmap; + struct nvmem_config cfg; + const struct snvs_lpgpr_cfg *dcfg; +}; + +static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx6q = { + .offset = IMX6Q_SNVS_LPGPR, + .offset_hplr = IMX6Q_SNVS_HPLR, + .offset_lplr = IMX6Q_SNVS_LPLR, + .size = 4, +}; + +static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx7d = { + .offset = IMX7D_SNVS_LPGPR, + .offset_hplr = IMX7D_SNVS_HPLR, + .offset_lplr = IMX7D_SNVS_LPLR, + .size = 16, +}; + +static int snvs_lpgpr_write(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct snvs_lpgpr_priv *priv = context; + const struct snvs_lpgpr_cfg *dcfg = priv->dcfg; + unsigned int lock_reg; + int ret; + + ret = regmap_read(priv->regmap, dcfg->offset_hplr, &lock_reg); + if (ret < 0) + return ret; + + if (lock_reg & IMX_GPR_SL) + return -EPERM; + + ret = regmap_read(priv->regmap, dcfg->offset_lplr, &lock_reg); + if (ret < 0) + return ret; + + if (lock_reg & IMX_GPR_HL) + return -EPERM; + + return regmap_bulk_write(priv->regmap, dcfg->offset + offset, val, + bytes / 4); +} + +static int snvs_lpgpr_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct snvs_lpgpr_priv *priv = context; + const struct snvs_lpgpr_cfg *dcfg = priv->dcfg; + + return regmap_bulk_read(priv->regmap, dcfg->offset + offset, + val, bytes / 4); +} + +static int snvs_lpgpr_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct device_node *syscon_node; + struct snvs_lpgpr_priv *priv; + struct nvmem_config *cfg; + struct nvmem_device *nvmem; + const struct snvs_lpgpr_cfg *dcfg; + + if (!node) + return -ENOENT; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dcfg = of_device_get_match_data(dev); + if (!dcfg) + return -EINVAL; + + syscon_node = of_get_parent(node); + if (!syscon_node) + return -ENODEV; + + priv->regmap = syscon_node_to_regmap(syscon_node); + of_node_put(syscon_node); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + + priv->dcfg = dcfg; + + cfg = &priv->cfg; + cfg->priv = priv; + cfg->name = dev_name(dev); + cfg->dev = dev; + cfg->stride = 4; + cfg->word_size = 4; + cfg->size = dcfg->size, + cfg->owner = THIS_MODULE; + cfg->reg_read = snvs_lpgpr_read; + cfg->reg_write = snvs_lpgpr_write; + + nvmem = devm_nvmem_register(dev, cfg); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id snvs_lpgpr_dt_ids[] = { + { .compatible = "fsl,imx6q-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx6q }, + { .compatible = "fsl,imx6ul-snvs-lpgpr", + .data = &snvs_lpgpr_cfg_imx6q }, + { .compatible = "fsl,imx7d-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx7d }, + { }, +}; +MODULE_DEVICE_TABLE(of, snvs_lpgpr_dt_ids); + +static struct platform_driver snvs_lpgpr_driver = { + .probe = snvs_lpgpr_probe, + .driver = { + .name = "snvs_lpgpr", + .of_match_table = snvs_lpgpr_dt_ids, + }, +}; +module_platform_driver(snvs_lpgpr_driver); + +MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>"); +MODULE_DESCRIPTION("Low Power General Purpose Register in i.MX6 and i.MX7 Secure Non-Volatile Storage"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c new file mode 100644 index 000000000..59523245d --- /dev/null +++ b/drivers/nvmem/sprd-efuse.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2019 Spreadtrum Communications Inc. + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/hwspinlock.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +#define SPRD_EFUSE_ENABLE 0x20 +#define SPRD_EFUSE_ERR_FLAG 0x24 +#define SPRD_EFUSE_ERR_CLR 0x28 +#define SPRD_EFUSE_MAGIC_NUM 0x2c +#define SPRD_EFUSE_FW_CFG 0x50 +#define SPRD_EFUSE_PW_SWT 0x54 +#define SPRD_EFUSE_MEM(val) (0x1000 + ((val) << 2)) + +#define SPRD_EFUSE_VDD_EN BIT(0) +#define SPRD_EFUSE_AUTO_CHECK_EN BIT(1) +#define SPRD_EFUSE_DOUBLE_EN BIT(2) +#define SPRD_EFUSE_MARGIN_RD_EN BIT(3) +#define SPRD_EFUSE_LOCK_WR_EN BIT(4) + +#define SPRD_EFUSE_ERR_CLR_MASK GENMASK(13, 0) + +#define SPRD_EFUSE_ENK1_ON BIT(0) +#define SPRD_EFUSE_ENK2_ON BIT(1) +#define SPRD_EFUSE_PROG_EN BIT(2) + +#define SPRD_EFUSE_MAGIC_NUMBER 0x8810 + +/* Block width (bytes) definitions */ +#define SPRD_EFUSE_BLOCK_WIDTH 4 + +/* + * The Spreadtrum AP efuse contains 2 parts: normal efuse and secure efuse, + * and we can only access the normal efuse in kernel. So define the normal + * block offset index and normal block numbers. + */ +#define SPRD_EFUSE_NORMAL_BLOCK_NUMS 24 +#define SPRD_EFUSE_NORMAL_BLOCK_OFFSET 72 + +/* Timeout (ms) for the trylock of hardware spinlocks */ +#define SPRD_EFUSE_HWLOCK_TIMEOUT 5000 + +/* + * Since different Spreadtrum SoC chip can have different normal block numbers + * and offset. And some SoC can support block double feature, which means + * when reading or writing data to efuse memory, the controller can save double + * data in case one data become incorrect after a long period. + * + * Thus we should save them in the device data structure. + */ +struct sprd_efuse_variant_data { + u32 blk_nums; + u32 blk_offset; + bool blk_double; +}; + +struct sprd_efuse { + struct device *dev; + struct clk *clk; + struct hwspinlock *hwlock; + struct mutex mutex; + void __iomem *base; + const struct sprd_efuse_variant_data *data; +}; + +static const struct sprd_efuse_variant_data ums312_data = { + .blk_nums = SPRD_EFUSE_NORMAL_BLOCK_NUMS, + .blk_offset = SPRD_EFUSE_NORMAL_BLOCK_OFFSET, + .blk_double = false, +}; + +/* + * On Spreadtrum platform, we have multi-subsystems will access the unique + * efuse controller, so we need one hardware spinlock to synchronize between + * the multiple subsystems. + */ +static int sprd_efuse_lock(struct sprd_efuse *efuse) +{ + int ret; + + mutex_lock(&efuse->mutex); + + ret = hwspin_lock_timeout_raw(efuse->hwlock, + SPRD_EFUSE_HWLOCK_TIMEOUT); + if (ret) { + dev_err(efuse->dev, "timeout get the hwspinlock\n"); + mutex_unlock(&efuse->mutex); + return ret; + } + + return 0; +} + +static void sprd_efuse_unlock(struct sprd_efuse *efuse) +{ + hwspin_unlock_raw(efuse->hwlock); + mutex_unlock(&efuse->mutex); +} + +static void sprd_efuse_set_prog_power(struct sprd_efuse *efuse, bool en) +{ + u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT); + + if (en) + val &= ~SPRD_EFUSE_ENK2_ON; + else + val &= ~SPRD_EFUSE_ENK1_ON; + + writel(val, efuse->base + SPRD_EFUSE_PW_SWT); + + /* Open or close efuse power need wait 1000us to make power stable. */ + usleep_range(1000, 1200); + + if (en) + val |= SPRD_EFUSE_ENK1_ON; + else + val |= SPRD_EFUSE_ENK2_ON; + + writel(val, efuse->base + SPRD_EFUSE_PW_SWT); + + /* Open or close efuse power need wait 1000us to make power stable. */ + usleep_range(1000, 1200); +} + +static void sprd_efuse_set_read_power(struct sprd_efuse *efuse, bool en) +{ + u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE); + + if (en) + val |= SPRD_EFUSE_VDD_EN; + else + val &= ~SPRD_EFUSE_VDD_EN; + + writel(val, efuse->base + SPRD_EFUSE_ENABLE); + + /* Open or close efuse power need wait 1000us to make power stable. */ + usleep_range(1000, 1200); +} + +static void sprd_efuse_set_prog_lock(struct sprd_efuse *efuse, bool en) +{ + u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE); + + if (en) + val |= SPRD_EFUSE_LOCK_WR_EN; + else + val &= ~SPRD_EFUSE_LOCK_WR_EN; + + writel(val, efuse->base + SPRD_EFUSE_ENABLE); +} + +static void sprd_efuse_set_auto_check(struct sprd_efuse *efuse, bool en) +{ + u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE); + + if (en) + val |= SPRD_EFUSE_AUTO_CHECK_EN; + else + val &= ~SPRD_EFUSE_AUTO_CHECK_EN; + + writel(val, efuse->base + SPRD_EFUSE_ENABLE); +} + +static void sprd_efuse_set_data_double(struct sprd_efuse *efuse, bool en) +{ + u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE); + + if (en) + val |= SPRD_EFUSE_DOUBLE_EN; + else + val &= ~SPRD_EFUSE_DOUBLE_EN; + + writel(val, efuse->base + SPRD_EFUSE_ENABLE); +} + +static void sprd_efuse_set_prog_en(struct sprd_efuse *efuse, bool en) +{ + u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT); + + if (en) + val |= SPRD_EFUSE_PROG_EN; + else + val &= ~SPRD_EFUSE_PROG_EN; + + writel(val, efuse->base + SPRD_EFUSE_PW_SWT); +} + +static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub, + bool lock, u32 *data) +{ + u32 status; + int ret = 0; + + /* + * We need set the correct magic number before writing the efuse to + * allow programming, and block other programming until we clear the + * magic number. + */ + writel(SPRD_EFUSE_MAGIC_NUMBER, + efuse->base + SPRD_EFUSE_MAGIC_NUM); + + /* + * Power on the efuse, enable programme and enable double data + * if asked. + */ + sprd_efuse_set_prog_power(efuse, true); + sprd_efuse_set_prog_en(efuse, true); + sprd_efuse_set_data_double(efuse, doub); + + /* + * Enable the auto-check function to validate if the programming is + * successful. + */ + if (lock) + sprd_efuse_set_auto_check(efuse, true); + + writel(*data, efuse->base + SPRD_EFUSE_MEM(blk)); + + /* Disable auto-check and data double after programming */ + if (lock) + sprd_efuse_set_auto_check(efuse, false); + sprd_efuse_set_data_double(efuse, false); + + /* + * Check the efuse error status, if the programming is successful, + * we should lock this efuse block to avoid programming again. + */ + status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG); + if (status) { + dev_err(efuse->dev, + "write error status %d of block %d\n", ret, blk); + + writel(SPRD_EFUSE_ERR_CLR_MASK, + efuse->base + SPRD_EFUSE_ERR_CLR); + ret = -EBUSY; + } else if (lock) { + sprd_efuse_set_prog_lock(efuse, lock); + writel(0, efuse->base + SPRD_EFUSE_MEM(blk)); + sprd_efuse_set_prog_lock(efuse, false); + } + + sprd_efuse_set_prog_power(efuse, false); + writel(0, efuse->base + SPRD_EFUSE_MAGIC_NUM); + + return ret; +} + +static int sprd_efuse_raw_read(struct sprd_efuse *efuse, int blk, u32 *val, + bool doub) +{ + u32 status; + + /* + * Need power on the efuse before reading data from efuse, and will + * power off the efuse after reading process. + */ + sprd_efuse_set_read_power(efuse, true); + + /* Enable double data if asked */ + sprd_efuse_set_data_double(efuse, doub); + + /* Start to read data from efuse block */ + *val = readl(efuse->base + SPRD_EFUSE_MEM(blk)); + + /* Disable double data */ + sprd_efuse_set_data_double(efuse, false); + + /* Power off the efuse */ + sprd_efuse_set_read_power(efuse, false); + + /* + * Check the efuse error status and clear them if there are some + * errors occurred. + */ + status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG); + if (status) { + dev_err(efuse->dev, + "read error status %d of block %d\n", status, blk); + + writel(SPRD_EFUSE_ERR_CLR_MASK, + efuse->base + SPRD_EFUSE_ERR_CLR); + return -EBUSY; + } + + return 0; +} + +static int sprd_efuse_read(void *context, u32 offset, void *val, size_t bytes) +{ + struct sprd_efuse *efuse = context; + bool blk_double = efuse->data->blk_double; + u32 index = offset / SPRD_EFUSE_BLOCK_WIDTH + efuse->data->blk_offset; + u32 blk_offset = (offset % SPRD_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE; + u32 data; + int ret; + + ret = sprd_efuse_lock(efuse); + if (ret) + return ret; + + ret = clk_prepare_enable(efuse->clk); + if (ret) + goto unlock; + + ret = sprd_efuse_raw_read(efuse, index, &data, blk_double); + if (!ret) { + data >>= blk_offset; + memcpy(val, &data, bytes); + } + + clk_disable_unprepare(efuse->clk); + +unlock: + sprd_efuse_unlock(efuse); + return ret; +} + +static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes) +{ + struct sprd_efuse *efuse = context; + bool blk_double = efuse->data->blk_double; + bool lock; + int ret; + + ret = sprd_efuse_lock(efuse); + if (ret) + return ret; + + ret = clk_prepare_enable(efuse->clk); + if (ret) + goto unlock; + + /* + * If the writing bytes are equal with the block width, which means the + * whole block will be programmed. For this case, we should not allow + * this block to be programmed again by locking this block. + * + * If the block was programmed partially, we should allow this block to + * be programmed again. + */ + if (bytes < SPRD_EFUSE_BLOCK_WIDTH) + lock = false; + else + lock = true; + + ret = sprd_efuse_raw_prog(efuse, offset, blk_double, lock, val); + + clk_disable_unprepare(efuse->clk); + +unlock: + sprd_efuse_unlock(efuse); + return ret; +} + +static int sprd_efuse_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct nvmem_device *nvmem; + struct nvmem_config econfig = { }; + struct sprd_efuse *efuse; + const struct sprd_efuse_variant_data *pdata; + int ret; + + pdata = of_device_get_match_data(&pdev->dev); + if (!pdata) { + dev_err(&pdev->dev, "No matching driver data found\n"); + return -EINVAL; + } + + efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + efuse->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(efuse->base)) + return PTR_ERR(efuse->base); + + ret = of_hwspin_lock_get_id(np, 0); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get hwlock id\n"); + return ret; + } + + efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret); + if (!efuse->hwlock) { + dev_err(&pdev->dev, "failed to request hwlock\n"); + return -ENXIO; + } + + efuse->clk = devm_clk_get(&pdev->dev, "enable"); + if (IS_ERR(efuse->clk)) { + dev_err(&pdev->dev, "failed to get enable clock\n"); + return PTR_ERR(efuse->clk); + } + + mutex_init(&efuse->mutex); + efuse->dev = &pdev->dev; + efuse->data = pdata; + + econfig.stride = 1; + econfig.word_size = 1; + econfig.read_only = false; + econfig.name = "sprd-efuse"; + econfig.size = efuse->data->blk_nums * SPRD_EFUSE_BLOCK_WIDTH; + econfig.reg_read = sprd_efuse_read; + econfig.reg_write = sprd_efuse_write; + econfig.priv = efuse; + econfig.dev = &pdev->dev; + nvmem = devm_nvmem_register(&pdev->dev, &econfig); + if (IS_ERR(nvmem)) { + dev_err(&pdev->dev, "failed to register nvmem\n"); + return PTR_ERR(nvmem); + } + + return 0; +} + +static const struct of_device_id sprd_efuse_of_match[] = { + { .compatible = "sprd,ums312-efuse", .data = &ums312_data }, + { } +}; + +static struct platform_driver sprd_efuse_driver = { + .probe = sprd_efuse_probe, + .driver = { + .name = "sprd-efuse", + .of_match_table = sprd_efuse_of_match, + }, +}; + +module_platform_driver(sprd_efuse_driver); + +MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>"); +MODULE_DESCRIPTION("Spreadtrum AP efuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/stm32-romem.c b/drivers/nvmem/stm32-romem.c new file mode 100644 index 000000000..354be5268 --- /dev/null +++ b/drivers/nvmem/stm32-romem.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * STM32 Factory-programmed memory read access driver + * + * Copyright (C) 2017, STMicroelectronics - All Rights Reserved + * Author: Fabrice Gasnier <fabrice.gasnier@st.com> for STMicroelectronics. + */ + +#include <linux/arm-smccc.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> + +/* BSEC secure service access from non-secure */ +#define STM32_SMC_BSEC 0x82001003 +#define STM32_SMC_READ_SHADOW 0x01 +#define STM32_SMC_PROG_OTP 0x02 +#define STM32_SMC_WRITE_SHADOW 0x03 +#define STM32_SMC_READ_OTP 0x04 + +/* shadow registers offest */ +#define STM32MP15_BSEC_DATA0 0x200 + +/* 32 (x 32-bits) lower shadow registers */ +#define STM32MP15_BSEC_NUM_LOWER 32 + +struct stm32_romem_cfg { + int size; +}; + +struct stm32_romem_priv { + void __iomem *base; + struct nvmem_config cfg; +}; + +static int stm32_romem_read(void *context, unsigned int offset, void *buf, + size_t bytes) +{ + struct stm32_romem_priv *priv = context; + u8 *buf8 = buf; + int i; + + for (i = offset; i < offset + bytes; i++) + *buf8++ = readb_relaxed(priv->base + i); + + return 0; +} + +static int stm32_bsec_smc(u8 op, u32 otp, u32 data, u32 *result) +{ +#if IS_ENABLED(CONFIG_HAVE_ARM_SMCCC) + struct arm_smccc_res res; + + arm_smccc_smc(STM32_SMC_BSEC, op, otp, data, 0, 0, 0, 0, &res); + if (res.a0) + return -EIO; + + if (result) + *result = (u32)res.a1; + + return 0; +#else + return -ENXIO; +#endif +} + +static int stm32_bsec_read(void *context, unsigned int offset, void *buf, + size_t bytes) +{ + struct stm32_romem_priv *priv = context; + struct device *dev = priv->cfg.dev; + u32 roffset, rbytes, val; + u8 *buf8 = buf, *val8 = (u8 *)&val; + int i, j = 0, ret, skip_bytes, size; + + /* Round unaligned access to 32-bits */ + roffset = rounddown(offset, 4); + skip_bytes = offset & 0x3; + rbytes = roundup(bytes + skip_bytes, 4); + + if (roffset + rbytes > priv->cfg.size) + return -EINVAL; + + for (i = roffset; (i < roffset + rbytes); i += 4) { + u32 otp = i >> 2; + + if (otp < STM32MP15_BSEC_NUM_LOWER) { + /* read lower data from shadow registers */ + val = readl_relaxed( + priv->base + STM32MP15_BSEC_DATA0 + i); + } else { + ret = stm32_bsec_smc(STM32_SMC_READ_SHADOW, otp, 0, + &val); + if (ret) { + dev_err(dev, "Can't read data%d (%d)\n", otp, + ret); + return ret; + } + } + /* skip first bytes in case of unaligned read */ + if (skip_bytes) + size = min(bytes, (size_t)(4 - skip_bytes)); + else + size = min(bytes, (size_t)4); + memcpy(&buf8[j], &val8[skip_bytes], size); + bytes -= size; + j += size; + skip_bytes = 0; + } + + return 0; +} + +static int stm32_bsec_write(void *context, unsigned int offset, void *buf, + size_t bytes) +{ + struct stm32_romem_priv *priv = context; + struct device *dev = priv->cfg.dev; + u32 *buf32 = buf; + int ret, i; + + /* Allow only writing complete 32-bits aligned words */ + if ((bytes % 4) || (offset % 4)) + return -EINVAL; + + for (i = offset; i < offset + bytes; i += 4) { + ret = stm32_bsec_smc(STM32_SMC_PROG_OTP, i >> 2, *buf32++, + NULL); + if (ret) { + dev_err(dev, "Can't write data%d (%d)\n", i >> 2, ret); + return ret; + } + } + + return 0; +} + +static int stm32_romem_probe(struct platform_device *pdev) +{ + const struct stm32_romem_cfg *cfg; + struct device *dev = &pdev->dev; + struct stm32_romem_priv *priv; + struct resource *res; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->cfg.name = "stm32-romem"; + priv->cfg.word_size = 1; + priv->cfg.stride = 1; + priv->cfg.dev = dev; + priv->cfg.priv = priv; + priv->cfg.owner = THIS_MODULE; + + cfg = (const struct stm32_romem_cfg *) + of_match_device(dev->driver->of_match_table, dev)->data; + if (!cfg) { + priv->cfg.read_only = true; + priv->cfg.size = resource_size(res); + priv->cfg.reg_read = stm32_romem_read; + } else { + priv->cfg.size = cfg->size; + priv->cfg.reg_read = stm32_bsec_read; + priv->cfg.reg_write = stm32_bsec_write; + } + + return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &priv->cfg)); +} + +static const struct stm32_romem_cfg stm32mp15_bsec_cfg = { + .size = 384, /* 96 x 32-bits data words */ +}; + +static const struct of_device_id stm32_romem_of_match[] = { + { .compatible = "st,stm32f4-otp", }, { + .compatible = "st,stm32mp15-bsec", + .data = (void *)&stm32mp15_bsec_cfg, + }, { + }, +}; +MODULE_DEVICE_TABLE(of, stm32_romem_of_match); + +static struct platform_driver stm32_romem_driver = { + .probe = stm32_romem_probe, + .driver = { + .name = "stm32-romem", + .of_match_table = of_match_ptr(stm32_romem_of_match), + }, +}; +module_platform_driver(stm32_romem_driver); + +MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STM32 RO-MEM"); +MODULE_ALIAS("platform:nvmem-stm32-romem"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c new file mode 100644 index 000000000..e26ef1bbf --- /dev/null +++ b/drivers/nvmem/sunxi_sid.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Allwinner sunXi SoCs Security ID support. + * + * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl> + * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com> + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/random.h> + +/* Registers and special values for doing register-based SID readout on H3 */ +#define SUN8I_SID_PRCTL 0x40 +#define SUN8I_SID_RDKEY 0x60 + +#define SUN8I_SID_OFFSET_MASK 0x1FF +#define SUN8I_SID_OFFSET_SHIFT 16 +#define SUN8I_SID_OP_LOCK (0xAC << 8) +#define SUN8I_SID_READ BIT(1) + +struct sunxi_sid_cfg { + u32 value_offset; + u32 size; + bool need_register_readout; +}; + +struct sunxi_sid { + void __iomem *base; + u32 value_offset; +}; + +static int sunxi_sid_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct sunxi_sid *sid = context; + + memcpy_fromio(val, sid->base + sid->value_offset + offset, bytes); + + return 0; +} + +static int sun8i_sid_register_readout(const struct sunxi_sid *sid, + const unsigned int offset, + u32 *out) +{ + u32 reg_val; + int ret; + + /* Set word, lock access, and set read command */ + reg_val = (offset & SUN8I_SID_OFFSET_MASK) + << SUN8I_SID_OFFSET_SHIFT; + reg_val |= SUN8I_SID_OP_LOCK | SUN8I_SID_READ; + writel(reg_val, sid->base + SUN8I_SID_PRCTL); + + ret = readl_poll_timeout(sid->base + SUN8I_SID_PRCTL, reg_val, + !(reg_val & SUN8I_SID_READ), 100, 250000); + if (ret) + return ret; + + if (out) + *out = readl(sid->base + SUN8I_SID_RDKEY); + + writel(0, sid->base + SUN8I_SID_PRCTL); + + return 0; +} + +/* + * On Allwinner H3, the value on the 0x200 offset of the SID controller seems + * to be not reliable at all. + * Read by the registers instead. + */ +static int sun8i_sid_read_by_reg(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct sunxi_sid *sid = context; + u32 word; + int ret; + + /* .stride = 4 so offset is guaranteed to be aligned */ + while (bytes >= 4) { + ret = sun8i_sid_register_readout(sid, offset, val); + if (ret) + return ret; + + val += 4; + offset += 4; + bytes -= 4; + } + + if (!bytes) + return 0; + + /* Handle any trailing bytes */ + ret = sun8i_sid_register_readout(sid, offset, &word); + if (ret) + return ret; + + memcpy(val, &word, bytes); + + return 0; +} + +static int sunxi_sid_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_config *nvmem_cfg; + struct nvmem_device *nvmem; + struct sunxi_sid *sid; + int size; + char *randomness; + const struct sunxi_sid_cfg *cfg; + + sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); + if (!sid) + return -ENOMEM; + + cfg = of_device_get_match_data(dev); + if (!cfg) + return -EINVAL; + sid->value_offset = cfg->value_offset; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sid->base = devm_ioremap_resource(dev, res); + if (IS_ERR(sid->base)) + return PTR_ERR(sid->base); + + size = cfg->size; + + nvmem_cfg = devm_kzalloc(dev, sizeof(*nvmem_cfg), GFP_KERNEL); + if (!nvmem_cfg) + return -ENOMEM; + + nvmem_cfg->dev = dev; + nvmem_cfg->name = "sunxi-sid"; + nvmem_cfg->read_only = true; + nvmem_cfg->size = cfg->size; + nvmem_cfg->word_size = 1; + nvmem_cfg->stride = 4; + nvmem_cfg->priv = sid; + if (cfg->need_register_readout) + nvmem_cfg->reg_read = sun8i_sid_read_by_reg; + else + nvmem_cfg->reg_read = sunxi_sid_read; + + nvmem = devm_nvmem_register(dev, nvmem_cfg); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + randomness = kzalloc(size, GFP_KERNEL); + if (!randomness) + return -ENOMEM; + + nvmem_cfg->reg_read(sid, 0, randomness, size); + add_device_randomness(randomness, size); + kfree(randomness); + + platform_set_drvdata(pdev, nvmem); + + return 0; +} + +static const struct sunxi_sid_cfg sun4i_a10_cfg = { + .size = 0x10, +}; + +static const struct sunxi_sid_cfg sun7i_a20_cfg = { + .size = 0x200, +}; + +static const struct sunxi_sid_cfg sun8i_h3_cfg = { + .value_offset = 0x200, + .size = 0x100, + .need_register_readout = true, +}; + +static const struct sunxi_sid_cfg sun50i_a64_cfg = { + .value_offset = 0x200, + .size = 0x100, + .need_register_readout = true, +}; + +static const struct sunxi_sid_cfg sun50i_h6_cfg = { + .value_offset = 0x200, + .size = 0x200, +}; + +static const struct of_device_id sunxi_sid_of_match[] = { + { .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg }, + { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg }, + { .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg }, + { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun50i-h6-sid", .data = &sun50i_h6_cfg }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, sunxi_sid_of_match); + +static struct platform_driver sunxi_sid_driver = { + .probe = sunxi_sid_probe, + .driver = { + .name = "eeprom-sunxi-sid", + .of_match_table = sunxi_sid_of_match, + }, +}; +module_platform_driver(sunxi_sid_driver); + +MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>"); +MODULE_DESCRIPTION("Allwinner sunxi security id driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c new file mode 100644 index 000000000..aca910b3b --- /dev/null +++ b/drivers/nvmem/uniphier-efuse.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * UniPhier eFuse driver + * + * Copyright (C) 2017 Socionext Inc. + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> + +struct uniphier_efuse_priv { + void __iomem *base; +}; + +static int uniphier_reg_read(void *context, + unsigned int reg, void *_val, size_t bytes) +{ + struct uniphier_efuse_priv *priv = context; + u8 *val = _val; + int offs; + + for (offs = 0; offs < bytes; offs += sizeof(u8)) + *val++ = readb(priv->base + reg + offs); + + return 0; +} + +static int uniphier_efuse_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct nvmem_config econfig = {}; + struct uniphier_efuse_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + econfig.stride = 1; + econfig.word_size = 1; + econfig.read_only = true; + econfig.reg_read = uniphier_reg_read; + econfig.size = resource_size(res); + econfig.priv = priv; + econfig.dev = dev; + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id uniphier_efuse_of_match[] = { + { .compatible = "socionext,uniphier-efuse",}, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, uniphier_efuse_of_match); + +static struct platform_driver uniphier_efuse_driver = { + .probe = uniphier_efuse_probe, + .driver = { + .name = "uniphier-efuse", + .of_match_table = uniphier_efuse_of_match, + }, +}; +module_platform_driver(uniphier_efuse_driver); + +MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>"); +MODULE_DESCRIPTION("UniPhier eFuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/vf610-ocotp.c b/drivers/nvmem/vf610-ocotp.c new file mode 100644 index 000000000..5b6cad168 --- /dev/null +++ b/drivers/nvmem/vf610-ocotp.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015 Toradex AG. + * + * Author: Sanchayan Maity <sanchayan.maity@toradex.com> + * + * Based on the barebox ocotp driver, + * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il> + * Orex Computed Radiography + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* OCOTP Register Offsets */ +#define OCOTP_CTRL_REG 0x00 +#define OCOTP_CTRL_SET 0x04 +#define OCOTP_CTRL_CLR 0x08 +#define OCOTP_TIMING 0x10 +#define OCOTP_DATA 0x20 +#define OCOTP_READ_CTRL_REG 0x30 +#define OCOTP_READ_FUSE_DATA 0x40 + +/* OCOTP Register bits and masks */ +#define OCOTP_CTRL_WR_UNLOCK 16 +#define OCOTP_CTRL_WR_UNLOCK_KEY 0x3E77 +#define OCOTP_CTRL_WR_UNLOCK_MASK GENMASK(31, 16) +#define OCOTP_CTRL_ADDR 0 +#define OCOTP_CTRL_ADDR_MASK GENMASK(6, 0) +#define OCOTP_CTRL_RELOAD_SHADOWS BIT(10) +#define OCOTP_CTRL_ERR BIT(9) +#define OCOTP_CTRL_BUSY BIT(8) + +#define OCOTP_TIMING_STROBE_READ 16 +#define OCOTP_TIMING_STROBE_READ_MASK GENMASK(21, 16) +#define OCOTP_TIMING_RELAX 12 +#define OCOTP_TIMING_RELAX_MASK GENMASK(15, 12) +#define OCOTP_TIMING_STROBE_PROG 0 +#define OCOTP_TIMING_STROBE_PROG_MASK GENMASK(11, 0) + +#define OCOTP_READ_CTRL_READ_FUSE 0x1 + +#define VF610_OCOTP_TIMEOUT 100000 + +#define BF(value, field) (((value) << field) & field##_MASK) + +#define DEF_RELAX 20 + +static const int base_to_fuse_addr_mappings[][2] = { + {0x400, 0x00}, + {0x410, 0x01}, + {0x420, 0x02}, + {0x450, 0x05}, + {0x4F0, 0x0F}, + {0x600, 0x20}, + {0x610, 0x21}, + {0x620, 0x22}, + {0x630, 0x23}, + {0x640, 0x24}, + {0x650, 0x25}, + {0x660, 0x26}, + {0x670, 0x27}, + {0x6F0, 0x2F}, + {0x880, 0x38}, + {0x890, 0x39}, + {0x8A0, 0x3A}, + {0x8B0, 0x3B}, + {0x8C0, 0x3C}, + {0x8D0, 0x3D}, + {0x8E0, 0x3E}, + {0x8F0, 0x3F}, + {0xC80, 0x78}, + {0xC90, 0x79}, + {0xCA0, 0x7A}, + {0xCB0, 0x7B}, + {0xCC0, 0x7C}, + {0xCD0, 0x7D}, + {0xCE0, 0x7E}, + {0xCF0, 0x7F}, +}; + +struct vf610_ocotp { + void __iomem *base; + struct clk *clk; + struct device *dev; + struct nvmem_device *nvmem; + int timing; +}; + +static int vf610_ocotp_wait_busy(void __iomem *base) +{ + int timeout = VF610_OCOTP_TIMEOUT; + + while ((readl(base) & OCOTP_CTRL_BUSY) && --timeout) + udelay(10); + + if (!timeout) { + writel(OCOTP_CTRL_ERR, base + OCOTP_CTRL_CLR); + return -ETIMEDOUT; + } + + udelay(10); + + return 0; +} + +static int vf610_ocotp_calculate_timing(struct vf610_ocotp *ocotp_dev) +{ + u32 clk_rate; + u32 relax, strobe_read, strobe_prog; + u32 timing; + + clk_rate = clk_get_rate(ocotp_dev->clk); + + /* Refer section OTP read/write timing parameters in TRM */ + relax = clk_rate / (1000000000 / DEF_RELAX) - 1; + strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1; + strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1; + + timing = BF(relax, OCOTP_TIMING_RELAX); + timing |= BF(strobe_read, OCOTP_TIMING_STROBE_READ); + timing |= BF(strobe_prog, OCOTP_TIMING_STROBE_PROG); + + return timing; +} + +static int vf610_get_fuse_address(int base_addr_offset) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(base_to_fuse_addr_mappings); i++) { + if (base_to_fuse_addr_mappings[i][0] == base_addr_offset) + return base_to_fuse_addr_mappings[i][1]; + } + + return -EINVAL; +} + +static int vf610_ocotp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct vf610_ocotp *ocotp = context; + void __iomem *base = ocotp->base; + u32 reg, *buf = val; + int fuse_addr; + int ret; + + while (bytes > 0) { + fuse_addr = vf610_get_fuse_address(offset); + if (fuse_addr > 0) { + writel(ocotp->timing, base + OCOTP_TIMING); + ret = vf610_ocotp_wait_busy(base + OCOTP_CTRL_REG); + if (ret) + return ret; + + reg = readl(base + OCOTP_CTRL_REG); + reg &= ~OCOTP_CTRL_ADDR_MASK; + reg &= ~OCOTP_CTRL_WR_UNLOCK_MASK; + reg |= BF(fuse_addr, OCOTP_CTRL_ADDR); + writel(reg, base + OCOTP_CTRL_REG); + + writel(OCOTP_READ_CTRL_READ_FUSE, + base + OCOTP_READ_CTRL_REG); + ret = vf610_ocotp_wait_busy(base + OCOTP_CTRL_REG); + if (ret) + return ret; + + if (readl(base) & OCOTP_CTRL_ERR) { + dev_dbg(ocotp->dev, "Error reading from fuse address %x\n", + fuse_addr); + writel(OCOTP_CTRL_ERR, base + OCOTP_CTRL_CLR); + } + + /* + * In case of error, we do not abort and expect to read + * 0xBADABADA as mentioned by the TRM. We just read this + * value and return. + */ + *buf = readl(base + OCOTP_READ_FUSE_DATA); + } else { + *buf = 0; + } + + buf++; + bytes -= 4; + offset += 4; + } + + return 0; +} + +static struct nvmem_config ocotp_config = { + .name = "ocotp", + .stride = 4, + .word_size = 4, + .reg_read = vf610_ocotp_read, +}; + +static const struct of_device_id ocotp_of_match[] = { + { .compatible = "fsl,vf610-ocotp", }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, ocotp_of_match); + +static int vf610_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct vf610_ocotp *ocotp_dev; + + ocotp_dev = devm_kzalloc(dev, sizeof(struct vf610_ocotp), GFP_KERNEL); + if (!ocotp_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ocotp_dev->base = devm_ioremap_resource(dev, res); + if (IS_ERR(ocotp_dev->base)) + return PTR_ERR(ocotp_dev->base); + + ocotp_dev->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ocotp_dev->clk)) { + dev_err(dev, "failed getting clock, err = %ld\n", + PTR_ERR(ocotp_dev->clk)); + return PTR_ERR(ocotp_dev->clk); + } + ocotp_dev->dev = dev; + ocotp_dev->timing = vf610_ocotp_calculate_timing(ocotp_dev); + + ocotp_config.size = resource_size(res); + ocotp_config.priv = ocotp_dev; + ocotp_config.dev = dev; + + ocotp_dev->nvmem = devm_nvmem_register(dev, &ocotp_config); + + return PTR_ERR_OR_ZERO(ocotp_dev->nvmem); +} + +static struct platform_driver vf610_ocotp_driver = { + .probe = vf610_ocotp_probe, + .driver = { + .name = "vf610-ocotp", + .of_match_table = ocotp_of_match, + }, +}; +module_platform_driver(vf610_ocotp_driver); +MODULE_AUTHOR("Sanchayan Maity <sanchayan.maity@toradex.com>"); +MODULE_DESCRIPTION("Vybrid OCOTP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c new file mode 100644 index 000000000..e28d7b133 --- /dev/null +++ b/drivers/nvmem/zynqmp_nvmem.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 Xilinx, Inc. + */ + +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/firmware/xlnx-zynqmp.h> + +#define SILICON_REVISION_MASK 0xF + +struct zynqmp_nvmem_data { + struct device *dev; + struct nvmem_device *nvmem; +}; + +static int zynqmp_nvmem_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + int ret; + int idcode, version; + struct zynqmp_nvmem_data *priv = context; + + ret = zynqmp_pm_get_chipid(&idcode, &version); + if (ret < 0) + return ret; + + dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version); + *(int *)val = version & SILICON_REVISION_MASK; + + return 0; +} + +static struct nvmem_config econfig = { + .name = "zynqmp-nvmem", + .owner = THIS_MODULE, + .word_size = 1, + .size = 1, + .read_only = true, +}; + +static const struct of_device_id zynqmp_nvmem_match[] = { + { .compatible = "xlnx,zynqmp-nvmem-fw", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match); + +static int zynqmp_nvmem_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct zynqmp_nvmem_data *priv; + + priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + econfig.dev = dev; + econfig.reg_read = zynqmp_nvmem_read; + econfig.priv = priv; + + priv->nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(priv->nvmem); +} + +static struct platform_driver zynqmp_nvmem_driver = { + .probe = zynqmp_nvmem_probe, + .driver = { + .name = "zynqmp-nvmem", + .of_match_table = zynqmp_nvmem_match, + }, +}; + +module_platform_driver(zynqmp_nvmem_driver); + +MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>, Nava kishore Manne <navam@xilinx.com>"); +MODULE_DESCRIPTION("ZynqMP NVMEM driver"); +MODULE_LICENSE("GPL"); |