diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
commit | 76cb841cb886eef6b3bee341a2266c76578724ad (patch) | |
tree | f5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /drivers/nvmem | |
parent | Initial commit. (diff) | |
download | linux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip |
Adding upstream version 4.19.249.upstream/4.19.249
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/nvmem')
-rw-r--r-- | drivers/nvmem/Kconfig | 195 | ||||
-rw-r--r-- | drivers/nvmem/Makefile | 43 | ||||
-rw-r--r-- | drivers/nvmem/bcm-ocotp.c | 323 | ||||
-rw-r--r-- | drivers/nvmem/core.c | 1381 | ||||
-rw-r--r-- | drivers/nvmem/imx-iim.c | 157 | ||||
-rw-r--r-- | drivers/nvmem/imx-ocotp.c | 524 | ||||
-rw-r--r-- | drivers/nvmem/lpc18xx_eeprom.c | 288 | ||||
-rw-r--r-- | drivers/nvmem/lpc18xx_otp.c | 111 | ||||
-rw-r--r-- | drivers/nvmem/meson-efuse.c | 83 | ||||
-rw-r--r-- | drivers/nvmem/meson-mx-efuse.c | 253 | ||||
-rw-r--r-- | drivers/nvmem/mtk-efuse.c | 119 | ||||
-rw-r--r-- | drivers/nvmem/mxs-ocotp.c | 217 | ||||
-rw-r--r-- | drivers/nvmem/qfprom.c | 86 | ||||
-rw-r--r-- | drivers/nvmem/rave-sp-eeprom.c | 361 | ||||
-rw-r--r-- | drivers/nvmem/rockchip-efuse.c | 309 | ||||
-rw-r--r-- | drivers/nvmem/sc27xx-efuse.c | 264 | ||||
-rw-r--r-- | drivers/nvmem/snvs_lpgpr.c | 159 | ||||
-rw-r--r-- | drivers/nvmem/sunxi_sid.c | 258 | ||||
-rw-r--r-- | drivers/nvmem/uniphier-efuse.c | 86 | ||||
-rw-r--r-- | drivers/nvmem/vf610-ocotp.c | 263 |
20 files changed, 5480 insertions, 0 deletions
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig new file mode 100644 index 000000000..0a7a470ee --- /dev/null +++ b/drivers/nvmem/Kconfig @@ -0,0 +1,195 @@ +menuconfig NVMEM + bool "NVMEM Support" + help + Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES... + + This framework is designed to provide a generic interface to NVMEM + from both the Linux Kernel and the userspace. + + This driver can also be built as a module. If so, the module + will be called nvmem_core. + + If unsure, say no. + +if NVMEM + +config NVMEM_IMX_IIM + tristate "i.MX IC Identification Module support" + depends on ARCH_MXC || COMPILE_TEST + help + This is a driver for the IC Identification Module (IIM) available on + i.MX SoCs, providing access to 4 Kbits of programmable + eFuses. + + This driver can also be built as a module. If so, the module + will be called nvmem-imx-iim. + +config NVMEM_IMX_OCOTP + tristate "i.MX6 On-Chip OTP Controller support" + depends on SOC_IMX6 || COMPILE_TEST + depends on HAS_IOMEM + help + This is a driver for the On-Chip OTP Controller (OCOTP) available on + i.MX6 SoCs, providing access to 4 Kbits of one-time programmable + eFuses. + + This driver can also be built as a module. If so, the module + will be called nvmem-imx-ocotp. + +config NVMEM_LPC18XX_EEPROM + tristate "NXP LPC18XX EEPROM Memory Support" + depends on ARCH_LPC18XX || COMPILE_TEST + depends on HAS_IOMEM + help + Say Y here to include support for NXP LPC18xx EEPROM memory found in + NXP LPC185x/3x and LPC435x/3x/2x/1x devices. + To compile this driver as a module, choose M here: the module + will be called nvmem_lpc18xx_eeprom. + +config NVMEM_LPC18XX_OTP + tristate "NXP LPC18XX OTP Memory Support" + depends on ARCH_LPC18XX || COMPILE_TEST + depends on HAS_IOMEM + help + Say Y here to include support for NXP LPC18xx OTP memory found on + all LPC18xx and LPC43xx devices. + To compile this driver as a module, choose M here: the module + will be called nvmem_lpc18xx_otp. + +config NVMEM_MXS_OCOTP + tristate "Freescale MXS On-Chip OTP Memory Support" + depends on ARCH_MXS || COMPILE_TEST + depends on HAS_IOMEM + help + If you say Y here, you will get readonly access to the + One Time Programmable memory pages that are stored + on the Freescale i.MX23/i.MX28 processor. + + This driver can also be built as a module. If so, the module + will be called nvmem-mxs-ocotp. + +config MTK_EFUSE + tristate "Mediatek SoCs EFUSE support" + depends on ARCH_MEDIATEK || COMPILE_TEST + depends on HAS_IOMEM + help + This is a driver to access hardware related data like sensor + calibration, HDMI impedance etc. + + This driver can also be built as a module. If so, the module + will be called efuse-mtk. + +config QCOM_QFPROM + tristate "QCOM QFPROM Support" + depends on ARCH_QCOM || COMPILE_TEST + depends on HAS_IOMEM + help + Say y here to enable QFPROM support. The QFPROM provides access + functions for QFPROM data to rest of the drivers via nvmem interface. + + This driver can also be built as a module. If so, the module + will be called nvmem_qfprom. + +config ROCKCHIP_EFUSE + tristate "Rockchip eFuse Support" + depends on ARCH_ROCKCHIP || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple drive to dump specified values of Rockchip SoC + from eFuse, such as cpu-leakage. + + This driver can also be built as a module. If so, the module + will be called nvmem_rockchip_efuse. + +config NVMEM_BCM_OCOTP + tristate "Broadcom On-Chip OTP Controller support" + depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on HAS_IOMEM + default ARCH_BCM_IPROC + help + Say y here to enable read/write access to the Broadcom OTP + controller. + + This driver can also be built as a module. If so, the module + will be called nvmem-bcm-ocotp. + +config NVMEM_SUNXI_SID + tristate "Allwinner SoCs SID support" + depends on ARCH_SUNXI + help + This is a driver for the 'security ID' available on various Allwinner + devices. + + This driver can also be built as a module. If so, the module + will be called nvmem_sunxi_sid. + +config UNIPHIER_EFUSE + tristate "UniPhier SoCs eFuse support" + depends on ARCH_UNIPHIER || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple driver to dump specified values of UniPhier SoC + from eFuse. + + This driver can also be built as a module. If so, the module + will be called nvmem-uniphier-efuse. + +config NVMEM_VF610_OCOTP + tristate "VF610 SoC OCOTP support" + depends on SOC_VF610 || COMPILE_TEST + depends on HAS_IOMEM + help + This is a driver for the 'OCOTP' peripheral available on Vybrid + devices like VF5xx and VF6xx. + + This driver can also be build as a module. If so, the module will + be called nvmem-vf610-ocotp. + +config MESON_EFUSE + tristate "Amlogic Meson GX eFuse Support" + depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM + help + This is a driver to retrieve specific values from the eFuse found on + the Amlogic Meson GX SoCs. + + This driver can also be built as a module. If so, the module + will be called nvmem_meson_efuse. + +config MESON_MX_EFUSE + tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support" + depends on ARCH_MESON || COMPILE_TEST + help + This is a driver to retrieve specific values from the eFuse found on + the Amlogic Meson6, Meson8 and Meson8b SoCs. + + This driver can also be built as a module. If so, the module + will be called nvmem_meson_mx_efuse. + +config NVMEM_SNVS_LPGPR + tristate "Support for Low Power General Purpose Register" + depends on SOC_IMX6 || SOC_IMX7D || COMPILE_TEST + help + This is a driver for Low Power General Purpose Register (LPGPR) available on + i.MX6 and i.MX7 SoCs in Secure Non-Volatile Storage (SNVS) of this chip. + + This driver can also be built as a module. If so, the module + will be called nvmem-snvs-lpgpr. + +config RAVE_SP_EEPROM + tristate "Rave SP EEPROM Support" + depends on RAVE_SP_CORE + help + Say y here to enable Rave SP EEPROM support. + +config SC27XX_EFUSE + tristate "Spreadtrum SC27XX eFuse Support" + depends on MFD_SC27XX_PMIC || COMPILE_TEST + depends on HAS_IOMEM + help + This is a simple driver to dump specified values of Spreadtrum + SC27XX PMICs from eFuse. + + This driver can also be built as a module. If so, the module + will be called nvmem-sc27xx-efuse. + +endif diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile new file mode 100644 index 000000000..4e8c61628 --- /dev/null +++ b/drivers/nvmem/Makefile @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for nvmem drivers. +# + +obj-$(CONFIG_NVMEM) += nvmem_core.o +nvmem_core-y := core.o + +# Devices +obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o +nvmem-bcm-ocotp-y := bcm-ocotp.o +obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o +nvmem-imx-iim-y := imx-iim.o +obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o +nvmem-imx-ocotp-y := imx-ocotp.o +obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o +nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o +obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o +nvmem_lpc18xx_otp-y := lpc18xx_otp.o +obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o +nvmem-mxs-ocotp-y := mxs-ocotp.o +obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o +nvmem_mtk-efuse-y := mtk-efuse.o +obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o +nvmem_qfprom-y := qfprom.o +obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o +nvmem_rockchip_efuse-y := rockchip-efuse.o +obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o +nvmem_sunxi_sid-y := sunxi_sid.o +obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o +nvmem-uniphier-efuse-y := uniphier-efuse.o +obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o +nvmem-vf610-ocotp-y := vf610-ocotp.o +obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o +nvmem_meson_efuse-y := meson-efuse.o +obj-$(CONFIG_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o +nvmem_meson_mx_efuse-y := meson-mx-efuse.o +obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o +nvmem_snvs_lpgpr-y := snvs_lpgpr.o +obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o +nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o +obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o +nvmem-sc27xx-efuse-y := sc27xx-efuse.o diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c new file mode 100644 index 000000000..4159b3f41 --- /dev/null +++ b/drivers/nvmem/bcm-ocotp.c @@ -0,0 +1,323 @@ +/* + * Copyright (C) 2016 Broadcom + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> + +/* + * # of tries for OTP Status. The time to execute a command varies. The slowest + * commands are writes which also vary based on the # of bits turned on. Writing + * 0xffffffff takes ~3800 us. + */ +#define OTPC_RETRIES 5000 + +/* Sequence to enable OTP program */ +#define OTPC_PROG_EN_SEQ { 0xf, 0x4, 0x8, 0xd } + +/* OTPC Commands */ +#define OTPC_CMD_READ 0x0 +#define OTPC_CMD_OTP_PROG_ENABLE 0x2 +#define OTPC_CMD_OTP_PROG_DISABLE 0x3 +#define OTPC_CMD_PROGRAM 0x8 + +/* OTPC Status Bits */ +#define OTPC_STAT_CMD_DONE BIT(1) +#define OTPC_STAT_PROG_OK BIT(2) + +/* OTPC register definition */ +#define OTPC_MODE_REG_OFFSET 0x0 +#define OTPC_MODE_REG_OTPC_MODE 0 +#define OTPC_COMMAND_OFFSET 0x4 +#define OTPC_COMMAND_COMMAND_WIDTH 6 +#define OTPC_CMD_START_OFFSET 0x8 +#define OTPC_CMD_START_START 0 +#define OTPC_CPU_STATUS_OFFSET 0xc +#define OTPC_CPUADDR_REG_OFFSET 0x28 +#define OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH 16 +#define OTPC_CPU_WRITE_REG_OFFSET 0x2c + +#define OTPC_CMD_MASK (BIT(OTPC_COMMAND_COMMAND_WIDTH) - 1) +#define OTPC_ADDR_MASK (BIT(OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH) - 1) + + +struct otpc_map { + /* in words. */ + u32 otpc_row_size; + /* 128 bit row / 4 words support. */ + u16 data_r_offset[4]; + /* 128 bit row / 4 words support. */ + u16 data_w_offset[4]; +}; + +static struct otpc_map otp_map = { + .otpc_row_size = 1, + .data_r_offset = {0x10}, + .data_w_offset = {0x2c}, +}; + +static struct otpc_map otp_map_v2 = { + .otpc_row_size = 2, + .data_r_offset = {0x10, 0x5c}, + .data_w_offset = {0x2c, 0x64}, +}; + +struct otpc_priv { + struct device *dev; + void __iomem *base; + struct otpc_map *map; + struct nvmem_config *config; +}; + +static inline void set_command(void __iomem *base, u32 command) +{ + writel(command & OTPC_CMD_MASK, base + OTPC_COMMAND_OFFSET); +} + +static inline void set_cpu_address(void __iomem *base, u32 addr) +{ + writel(addr & OTPC_ADDR_MASK, base + OTPC_CPUADDR_REG_OFFSET); +} + +static inline void set_start_bit(void __iomem *base) +{ + writel(1 << OTPC_CMD_START_START, base + OTPC_CMD_START_OFFSET); +} + +static inline void reset_start_bit(void __iomem *base) +{ + writel(0, base + OTPC_CMD_START_OFFSET); +} + +static inline void write_cpu_data(void __iomem *base, u32 value) +{ + writel(value, base + OTPC_CPU_WRITE_REG_OFFSET); +} + +static int poll_cpu_status(void __iomem *base, u32 value) +{ + u32 status; + u32 retries; + + for (retries = 0; retries < OTPC_RETRIES; retries++) { + status = readl(base + OTPC_CPU_STATUS_OFFSET); + if (status & value) + break; + udelay(1); + } + if (retries == OTPC_RETRIES) + return -EAGAIN; + + return 0; +} + +static int enable_ocotp_program(void __iomem *base) +{ + static const u32 vals[] = OTPC_PROG_EN_SEQ; + int i; + int ret; + + /* Write the magic sequence to enable programming */ + set_command(base, OTPC_CMD_OTP_PROG_ENABLE); + for (i = 0; i < ARRAY_SIZE(vals); i++) { + write_cpu_data(base, vals[i]); + set_start_bit(base); + ret = poll_cpu_status(base, OTPC_STAT_CMD_DONE); + reset_start_bit(base); + if (ret) + return ret; + } + + return poll_cpu_status(base, OTPC_STAT_PROG_OK); +} + +static int disable_ocotp_program(void __iomem *base) +{ + int ret; + + set_command(base, OTPC_CMD_OTP_PROG_DISABLE); + set_start_bit(base); + ret = poll_cpu_status(base, OTPC_STAT_PROG_OK); + reset_start_bit(base); + + return ret; +} + +static int bcm_otpc_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct otpc_priv *priv = context; + u32 *buf = val; + u32 bytes_read; + u32 address = offset / priv->config->word_size; + int i, ret; + + for (bytes_read = 0; bytes_read < bytes;) { + set_command(priv->base, OTPC_CMD_READ); + set_cpu_address(priv->base, address++); + set_start_bit(priv->base); + ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE); + if (ret) { + dev_err(priv->dev, "otp read error: 0x%x", ret); + return -EIO; + } + + for (i = 0; i < priv->map->otpc_row_size; i++) { + *buf++ = readl(priv->base + + priv->map->data_r_offset[i]); + bytes_read += sizeof(*buf); + } + + reset_start_bit(priv->base); + } + + return 0; +} + +static int bcm_otpc_write(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct otpc_priv *priv = context; + u32 *buf = val; + u32 bytes_written; + u32 address = offset / priv->config->word_size; + int i, ret; + + if (offset % priv->config->word_size) + return -EINVAL; + + ret = enable_ocotp_program(priv->base); + if (ret) + return -EIO; + + for (bytes_written = 0; bytes_written < bytes;) { + set_command(priv->base, OTPC_CMD_PROGRAM); + set_cpu_address(priv->base, address++); + for (i = 0; i < priv->map->otpc_row_size; i++) { + writel(*buf, priv->base + priv->map->data_w_offset[i]); + buf++; + bytes_written += sizeof(*buf); + } + set_start_bit(priv->base); + ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE); + reset_start_bit(priv->base); + if (ret) { + dev_err(priv->dev, "otp write error: 0x%x", ret); + return -EIO; + } + } + + disable_ocotp_program(priv->base); + + return 0; +} + +static struct nvmem_config bcm_otpc_nvmem_config = { + .name = "bcm-ocotp", + .read_only = false, + .word_size = 4, + .stride = 4, + .reg_read = bcm_otpc_read, + .reg_write = bcm_otpc_write, +}; + +static const struct of_device_id bcm_otpc_dt_ids[] = { + { .compatible = "brcm,ocotp" }, + { .compatible = "brcm,ocotp-v2" }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids); + +static int bcm_otpc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *dn = dev->of_node; + struct resource *res; + struct otpc_priv *priv; + struct nvmem_device *nvmem; + int err; + u32 num_words; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + if (of_device_is_compatible(dev->of_node, "brcm,ocotp")) + priv->map = &otp_map; + else if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) + priv->map = &otp_map_v2; + else { + dev_err(dev, "%s otpc config map not defined\n", __func__); + return -EINVAL; + } + + /* Get OTP base address register. */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) { + dev_err(dev, "unable to map I/O memory\n"); + return PTR_ERR(priv->base); + } + + /* Enable CPU access to OTPC. */ + writel(readl(priv->base + OTPC_MODE_REG_OFFSET) | + BIT(OTPC_MODE_REG_OTPC_MODE), + priv->base + OTPC_MODE_REG_OFFSET); + reset_start_bit(priv->base); + + /* Read size of memory in words. */ + err = of_property_read_u32(dn, "brcm,ocotp-size", &num_words); + if (err) { + dev_err(dev, "size parameter not specified\n"); + return -EINVAL; + } else if (num_words == 0) { + dev_err(dev, "size must be > 0\n"); + return -EINVAL; + } + + bcm_otpc_nvmem_config.size = 4 * num_words; + bcm_otpc_nvmem_config.dev = dev; + bcm_otpc_nvmem_config.priv = priv; + + if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) { + bcm_otpc_nvmem_config.word_size = 8; + bcm_otpc_nvmem_config.stride = 8; + } + + priv->config = &bcm_otpc_nvmem_config; + + nvmem = devm_nvmem_register(dev, &bcm_otpc_nvmem_config); + if (IS_ERR(nvmem)) { + dev_err(dev, "error registering nvmem config\n"); + return PTR_ERR(nvmem); + } + + return 0; +} + +static struct platform_driver bcm_otpc_driver = { + .probe = bcm_otpc_probe, + .driver = { + .name = "brcm-otpc", + .of_match_table = bcm_otpc_dt_ids, + }, +}; +module_platform_driver(bcm_otpc_driver); + +MODULE_DESCRIPTION("Broadcom OTPC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c new file mode 100644 index 000000000..2e77d49c2 --- /dev/null +++ b/drivers/nvmem/core.c @@ -0,0 +1,1381 @@ +/* + * nvmem framework core. + * + * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/export.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/slab.h> + +struct nvmem_device { + const char *name; + struct module *owner; + struct device dev; + int stride; + int word_size; + int id; + int users; + size_t size; + bool read_only; + int flags; + struct bin_attribute eeprom; + struct device *base_dev; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + void *priv; +}; + +#define FLAG_COMPAT BIT(0) + +struct nvmem_cell { + const char *name; + int offset; + int bytes; + int bit_offset; + int nbits; + struct nvmem_device *nvmem; + struct list_head node; +}; + +static DEFINE_MUTEX(nvmem_mutex); +static DEFINE_IDA(nvmem_ida); + +static LIST_HEAD(nvmem_cells); +static DEFINE_MUTEX(nvmem_cells_mutex); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key eeprom_lock_key; +#endif + +#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) +static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, + void *val, size_t bytes) +{ + if (nvmem->reg_read) + return nvmem->reg_read(nvmem->priv, offset, val, bytes); + + return -EINVAL; +} + +static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, + void *val, size_t bytes) +{ + if (nvmem->reg_write) + return nvmem->reg_write(nvmem->priv, offset, val, bytes); + + return -EINVAL; +} + +static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t pos, size_t count) +{ + struct device *dev; + struct nvmem_device *nvmem; + int rc; + + if (attr->private) + dev = attr->private; + else + dev = container_of(kobj, struct device, kobj); + nvmem = to_nvmem_device(dev); + + /* Stop the user from reading */ + if (pos >= nvmem->size) + return 0; + + if (count < nvmem->word_size) + return -EINVAL; + + if (pos + count > nvmem->size) + count = nvmem->size - pos; + + count = round_down(count, nvmem->word_size); + + rc = nvmem_reg_read(nvmem, pos, buf, count); + + if (rc) + return rc; + + return count; +} + +static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t pos, size_t count) +{ + struct device *dev; + struct nvmem_device *nvmem; + int rc; + + if (attr->private) + dev = attr->private; + else + dev = container_of(kobj, struct device, kobj); + nvmem = to_nvmem_device(dev); + + /* Stop the user from writing */ + if (pos >= nvmem->size) + return -EFBIG; + + if (count < nvmem->word_size) + return -EINVAL; + + if (pos + count > nvmem->size) + count = nvmem->size - pos; + + count = round_down(count, nvmem->word_size); + + rc = nvmem_reg_write(nvmem, pos, buf, count); + + if (rc) + return rc; + + return count; +} + +/* default read/write permissions */ +static struct bin_attribute bin_attr_rw_nvmem = { + .attr = { + .name = "nvmem", + .mode = S_IWUSR | S_IRUGO, + }, + .read = bin_attr_nvmem_read, + .write = bin_attr_nvmem_write, +}; + +static struct bin_attribute *nvmem_bin_rw_attributes[] = { + &bin_attr_rw_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_rw_group = { + .bin_attrs = nvmem_bin_rw_attributes, +}; + +static const struct attribute_group *nvmem_rw_dev_groups[] = { + &nvmem_bin_rw_group, + NULL, +}; + +/* read only permission */ +static struct bin_attribute bin_attr_ro_nvmem = { + .attr = { + .name = "nvmem", + .mode = S_IRUGO, + }, + .read = bin_attr_nvmem_read, +}; + +static struct bin_attribute *nvmem_bin_ro_attributes[] = { + &bin_attr_ro_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_ro_group = { + .bin_attrs = nvmem_bin_ro_attributes, +}; + +static const struct attribute_group *nvmem_ro_dev_groups[] = { + &nvmem_bin_ro_group, + NULL, +}; + +/* default read/write permissions, root only */ +static struct bin_attribute bin_attr_rw_root_nvmem = { + .attr = { + .name = "nvmem", + .mode = S_IWUSR | S_IRUSR, + }, + .read = bin_attr_nvmem_read, + .write = bin_attr_nvmem_write, +}; + +static struct bin_attribute *nvmem_bin_rw_root_attributes[] = { + &bin_attr_rw_root_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_rw_root_group = { + .bin_attrs = nvmem_bin_rw_root_attributes, +}; + +static const struct attribute_group *nvmem_rw_root_dev_groups[] = { + &nvmem_bin_rw_root_group, + NULL, +}; + +/* read only permission, root only */ +static struct bin_attribute bin_attr_ro_root_nvmem = { + .attr = { + .name = "nvmem", + .mode = S_IRUSR, + }, + .read = bin_attr_nvmem_read, +}; + +static struct bin_attribute *nvmem_bin_ro_root_attributes[] = { + &bin_attr_ro_root_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_ro_root_group = { + .bin_attrs = nvmem_bin_ro_root_attributes, +}; + +static const struct attribute_group *nvmem_ro_root_dev_groups[] = { + &nvmem_bin_ro_root_group, + NULL, +}; + +static void nvmem_release(struct device *dev) +{ + struct nvmem_device *nvmem = to_nvmem_device(dev); + + ida_simple_remove(&nvmem_ida, nvmem->id); + kfree(nvmem); +} + +static const struct device_type nvmem_provider_type = { + .release = nvmem_release, +}; + +static struct bus_type nvmem_bus_type = { + .name = "nvmem", +}; + +static int of_nvmem_match(struct device *dev, void *nvmem_np) +{ + return dev->of_node == nvmem_np; +} + +static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) +{ + struct device *d; + + if (!nvmem_np) + return NULL; + + d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); + + if (!d) + return NULL; + + return to_nvmem_device(d); +} + +static struct nvmem_cell *nvmem_find_cell(const char *cell_id) +{ + struct nvmem_cell *p; + + mutex_lock(&nvmem_cells_mutex); + + list_for_each_entry(p, &nvmem_cells, node) + if (!strcmp(p->name, cell_id)) { + mutex_unlock(&nvmem_cells_mutex); + return p; + } + + mutex_unlock(&nvmem_cells_mutex); + + return NULL; +} + +static void nvmem_cell_drop(struct nvmem_cell *cell) +{ + mutex_lock(&nvmem_cells_mutex); + list_del(&cell->node); + mutex_unlock(&nvmem_cells_mutex); + kfree(cell); +} + +static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) +{ + struct nvmem_cell *cell; + struct list_head *p, *n; + + list_for_each_safe(p, n, &nvmem_cells) { + cell = list_entry(p, struct nvmem_cell, node); + if (cell->nvmem == nvmem) + nvmem_cell_drop(cell); + } +} + +static void nvmem_cell_add(struct nvmem_cell *cell) +{ + mutex_lock(&nvmem_cells_mutex); + list_add_tail(&cell->node, &nvmem_cells); + mutex_unlock(&nvmem_cells_mutex); +} + +static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + struct nvmem_cell *cell) +{ + cell->nvmem = nvmem; + cell->offset = info->offset; + cell->bytes = info->bytes; + cell->name = info->name; + + cell->bit_offset = info->bit_offset; + cell->nbits = info->nbits; + + if (cell->nbits) + cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, + BITS_PER_BYTE); + + if (!IS_ALIGNED(cell->offset, nvmem->stride)) { + dev_err(&nvmem->dev, + "cell %s unaligned to nvmem stride %d\n", + cell->name, nvmem->stride); + return -EINVAL; + } + + return 0; +} + +/** + * nvmem_add_cells() - Add cell information to an nvmem device + * + * @nvmem: nvmem device to add cells to. + * @info: nvmem cell info to add to the device + * @ncells: number of cells in info + * + * Return: 0 or negative error code on failure. + */ +int nvmem_add_cells(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + int ncells) +{ + struct nvmem_cell **cells; + int i, rval; + + cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); + if (!cells) + return -ENOMEM; + + for (i = 0; i < ncells; i++) { + cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); + if (!cells[i]) { + rval = -ENOMEM; + goto err; + } + + rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); + if (rval) { + kfree(cells[i]); + goto err; + } + + nvmem_cell_add(cells[i]); + } + + /* remove tmp array */ + kfree(cells); + + return 0; +err: + while (i--) + nvmem_cell_drop(cells[i]); + + kfree(cells); + + return rval; +} +EXPORT_SYMBOL_GPL(nvmem_add_cells); + +/* + * nvmem_setup_compat() - Create an additional binary entry in + * drivers sys directory, to be backwards compatible with the older + * drivers/misc/eeprom drivers. + */ +static int nvmem_setup_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ + int rval; + + if (!config->base_dev) + return -EINVAL; + + if (nvmem->read_only) { + if (config->root_only) + nvmem->eeprom = bin_attr_ro_root_nvmem; + else + nvmem->eeprom = bin_attr_ro_nvmem; + } else { + if (config->root_only) + nvmem->eeprom = bin_attr_rw_root_nvmem; + else + nvmem->eeprom = bin_attr_rw_nvmem; + } + nvmem->eeprom.attr.name = "eeprom"; + nvmem->eeprom.size = nvmem->size; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + nvmem->eeprom.attr.key = &eeprom_lock_key; +#endif + nvmem->eeprom.private = &nvmem->dev; + nvmem->base_dev = config->base_dev; + + rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); + if (rval) { + dev_err(&nvmem->dev, + "Failed to create eeprom binary file %d\n", rval); + return rval; + } + + nvmem->flags |= FLAG_COMPAT; + + return 0; +} + +/** + * nvmem_register() - Register a nvmem device for given nvmem_config. + * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem + * + * @config: nvmem device configuration with which nvmem device is created. + * + * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device + * on success. + */ + +struct nvmem_device *nvmem_register(const struct nvmem_config *config) +{ + struct nvmem_device *nvmem; + int rval; + + if (!config->dev) + return ERR_PTR(-EINVAL); + + nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); + if (!nvmem) + return ERR_PTR(-ENOMEM); + + rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); + if (rval < 0) { + kfree(nvmem); + return ERR_PTR(rval); + } + + nvmem->id = rval; + nvmem->owner = config->owner; + if (!nvmem->owner && config->dev->driver) + nvmem->owner = config->dev->driver->owner; + nvmem->stride = config->stride ?: 1; + nvmem->word_size = config->word_size ?: 1; + nvmem->size = config->size; + nvmem->dev.type = &nvmem_provider_type; + nvmem->dev.bus = &nvmem_bus_type; + nvmem->dev.parent = config->dev; + nvmem->priv = config->priv; + nvmem->reg_read = config->reg_read; + nvmem->reg_write = config->reg_write; + nvmem->dev.of_node = config->dev->of_node; + + if (config->id == -1 && config->name) { + dev_set_name(&nvmem->dev, "%s", config->name); + } else { + dev_set_name(&nvmem->dev, "%s%d", + config->name ? : "nvmem", + config->name ? config->id : nvmem->id); + } + + nvmem->read_only = device_property_present(config->dev, "read-only") | + config->read_only; + + if (config->root_only) + nvmem->dev.groups = nvmem->read_only ? + nvmem_ro_root_dev_groups : + nvmem_rw_root_dev_groups; + else + nvmem->dev.groups = nvmem->read_only ? + nvmem_ro_dev_groups : + nvmem_rw_dev_groups; + + device_initialize(&nvmem->dev); + + dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); + + rval = device_add(&nvmem->dev); + if (rval) + goto err_put_device; + + if (config->compat) { + rval = nvmem_setup_compat(nvmem, config); + if (rval) + goto err_device_del; + } + + if (config->cells) { + rval = nvmem_add_cells(nvmem, config->cells, config->ncells); + if (rval) + goto err_teardown_compat; + } + + return nvmem; + +err_teardown_compat: + if (config->compat) + device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); +err_device_del: + device_del(&nvmem->dev); +err_put_device: + put_device(&nvmem->dev); + + return ERR_PTR(rval); +} +EXPORT_SYMBOL_GPL(nvmem_register); + +/** + * nvmem_unregister() - Unregister previously registered nvmem device + * + * @nvmem: Pointer to previously registered nvmem device. + * + * Return: Will be an negative on error or a zero on success. + */ +int nvmem_unregister(struct nvmem_device *nvmem) +{ + mutex_lock(&nvmem_mutex); + if (nvmem->users) { + mutex_unlock(&nvmem_mutex); + return -EBUSY; + } + mutex_unlock(&nvmem_mutex); + + if (nvmem->flags & FLAG_COMPAT) + device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); + + nvmem_device_remove_all_cells(nvmem); + device_del(&nvmem->dev); + put_device(&nvmem->dev); + + return 0; +} +EXPORT_SYMBOL_GPL(nvmem_unregister); + +static void devm_nvmem_release(struct device *dev, void *res) +{ + WARN_ON(nvmem_unregister(*(struct nvmem_device **)res)); +} + +/** + * devm_nvmem_register() - Register a managed nvmem device for given + * nvmem_config. + * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem + * + * @dev: Device that uses the nvmem device. + * @config: nvmem device configuration with which nvmem device is created. + * + * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device + * on success. + */ +struct nvmem_device *devm_nvmem_register(struct device *dev, + const struct nvmem_config *config) +{ + struct nvmem_device **ptr, *nvmem; + + ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + nvmem = nvmem_register(config); + + if (!IS_ERR(nvmem)) { + *ptr = nvmem; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return nvmem; +} +EXPORT_SYMBOL_GPL(devm_nvmem_register); + +static int devm_nvmem_match(struct device *dev, void *res, void *data) +{ + struct nvmem_device **r = res; + + return *r == data; +} + +/** + * devm_nvmem_unregister() - Unregister previously registered managed nvmem + * device. + * + * @dev: Device that uses the nvmem device. + * @nvmem: Pointer to previously registered nvmem device. + * + * Return: Will be an negative on error or a zero on success. + */ +int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) +{ + return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); +} +EXPORT_SYMBOL(devm_nvmem_unregister); + + +static struct nvmem_device *__nvmem_device_get(struct device_node *np, + struct nvmem_cell **cellp, + const char *cell_id) +{ + struct nvmem_device *nvmem = NULL; + + mutex_lock(&nvmem_mutex); + + if (np) { + nvmem = of_nvmem_find(np); + if (!nvmem) { + mutex_unlock(&nvmem_mutex); + return ERR_PTR(-EPROBE_DEFER); + } + } else { + struct nvmem_cell *cell = nvmem_find_cell(cell_id); + + if (cell) { + nvmem = cell->nvmem; + *cellp = cell; + } + + if (!nvmem) { + mutex_unlock(&nvmem_mutex); + return ERR_PTR(-ENOENT); + } + } + + nvmem->users++; + mutex_unlock(&nvmem_mutex); + + if (!try_module_get(nvmem->owner)) { + dev_err(&nvmem->dev, + "could not increase module refcount for cell %s\n", + nvmem->name); + + mutex_lock(&nvmem_mutex); + nvmem->users--; + mutex_unlock(&nvmem_mutex); + + return ERR_PTR(-EINVAL); + } + + return nvmem; +} + +static void __nvmem_device_put(struct nvmem_device *nvmem) +{ + module_put(nvmem->owner); + mutex_lock(&nvmem_mutex); + nvmem->users--; + mutex_unlock(&nvmem_mutex); +} + +static struct nvmem_device *nvmem_find(const char *name) +{ + struct device *d; + + d = bus_find_device_by_name(&nvmem_bus_type, NULL, name); + + if (!d) + return ERR_PTR(-ENOENT); + + return to_nvmem_device(d); +} + +#if IS_ENABLED(CONFIG_OF) +/** + * of_nvmem_device_get() - Get nvmem device from a given id + * + * @np: Device tree node that uses the nvmem device. + * @id: nvmem name from nvmem-names property. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. + */ +struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) +{ + + struct device_node *nvmem_np; + int index; + + index = of_property_match_string(np, "nvmem-names", id); + + nvmem_np = of_parse_phandle(np, "nvmem", index); + if (!nvmem_np) + return ERR_PTR(-EINVAL); + + return __nvmem_device_get(nvmem_np, NULL, NULL); +} +EXPORT_SYMBOL_GPL(of_nvmem_device_get); +#endif + +/** + * nvmem_device_get() - Get nvmem device from a given id + * + * @dev: Device that uses the nvmem device. + * @dev_name: name of the requested nvmem device. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. + */ +struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) +{ + if (dev->of_node) { /* try dt first */ + struct nvmem_device *nvmem; + + nvmem = of_nvmem_device_get(dev->of_node, dev_name); + + if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) + return nvmem; + + } + + return nvmem_find(dev_name); +} +EXPORT_SYMBOL_GPL(nvmem_device_get); + +static int devm_nvmem_device_match(struct device *dev, void *res, void *data) +{ + struct nvmem_device **nvmem = res; + + if (WARN_ON(!nvmem || !*nvmem)) + return 0; + + return *nvmem == data; +} + +static void devm_nvmem_device_release(struct device *dev, void *res) +{ + nvmem_device_put(*(struct nvmem_device **)res); +} + +/** + * devm_nvmem_device_put() - put alredy got nvmem device + * + * @dev: Device that uses the nvmem device. + * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), + * that needs to be released. + */ +void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) +{ + int ret; + + ret = devres_release(dev, devm_nvmem_device_release, + devm_nvmem_device_match, nvmem); + + WARN_ON(ret); +} +EXPORT_SYMBOL_GPL(devm_nvmem_device_put); + +/** + * nvmem_device_put() - put alredy got nvmem device + * + * @nvmem: pointer to nvmem device that needs to be released. + */ +void nvmem_device_put(struct nvmem_device *nvmem) +{ + __nvmem_device_put(nvmem); +} +EXPORT_SYMBOL_GPL(nvmem_device_put); + +/** + * devm_nvmem_device_get() - Get nvmem cell of device form a given id + * + * @dev: Device that requests the nvmem device. + * @id: name id for the requested nvmem device. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell + * on success. The nvmem_cell will be freed by the automatically once the + * device is freed. + */ +struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) +{ + struct nvmem_device **ptr, *nvmem; + + ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + nvmem = nvmem_device_get(dev, id); + if (!IS_ERR(nvmem)) { + *ptr = nvmem; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return nvmem; +} +EXPORT_SYMBOL_GPL(devm_nvmem_device_get); + +static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id) +{ + struct nvmem_cell *cell = NULL; + struct nvmem_device *nvmem; + + nvmem = __nvmem_device_get(NULL, &cell, cell_id); + if (IS_ERR(nvmem)) + return ERR_CAST(nvmem); + + return cell; +} + +#if IS_ENABLED(CONFIG_OF) +/** + * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id + * + * @np: Device tree node that uses the nvmem cell. + * @name: nvmem cell name from nvmem-cell-names property, or NULL + * for the cell at index 0 (the lone cell with no accompanying + * nvmem-cell-names property). + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * nvmem_cell_put(). + */ +struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, + const char *name) +{ + struct device_node *cell_np, *nvmem_np; + struct nvmem_cell *cell; + struct nvmem_device *nvmem; + const __be32 *addr; + int rval, len; + int index = 0; + + /* if cell name exists, find index to the name */ + if (name) + index = of_property_match_string(np, "nvmem-cell-names", name); + + cell_np = of_parse_phandle(np, "nvmem-cells", index); + if (!cell_np) + return ERR_PTR(-EINVAL); + + nvmem_np = of_get_next_parent(cell_np); + if (!nvmem_np) + return ERR_PTR(-EINVAL); + + nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); + of_node_put(nvmem_np); + if (IS_ERR(nvmem)) + return ERR_CAST(nvmem); + + addr = of_get_property(cell_np, "reg", &len); + if (!addr || (len < 2 * sizeof(u32))) { + dev_err(&nvmem->dev, "nvmem: invalid reg on %pOF\n", + cell_np); + rval = -EINVAL; + goto err_mem; + } + + cell = kzalloc(sizeof(*cell), GFP_KERNEL); + if (!cell) { + rval = -ENOMEM; + goto err_mem; + } + + cell->nvmem = nvmem; + cell->offset = be32_to_cpup(addr++); + cell->bytes = be32_to_cpup(addr); + cell->name = cell_np->name; + + addr = of_get_property(cell_np, "bits", &len); + if (addr && len == (2 * sizeof(u32))) { + cell->bit_offset = be32_to_cpup(addr++); + cell->nbits = be32_to_cpup(addr); + } + + if (cell->nbits) + cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, + BITS_PER_BYTE); + + if (!IS_ALIGNED(cell->offset, nvmem->stride)) { + dev_err(&nvmem->dev, + "cell %s unaligned to nvmem stride %d\n", + cell->name, nvmem->stride); + rval = -EINVAL; + goto err_sanity; + } + + nvmem_cell_add(cell); + + return cell; + +err_sanity: + kfree(cell); + +err_mem: + __nvmem_device_put(nvmem); + + return ERR_PTR(rval); +} +EXPORT_SYMBOL_GPL(of_nvmem_cell_get); +#endif + +/** + * nvmem_cell_get() - Get nvmem cell of device form a given cell name + * + * @dev: Device that requests the nvmem cell. + * @cell_id: nvmem cell name to get. + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * nvmem_cell_put(). + */ +struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) +{ + struct nvmem_cell *cell; + + if (dev->of_node) { /* try dt first */ + cell = of_nvmem_cell_get(dev->of_node, cell_id); + if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) + return cell; + } + + /* NULL cell_id only allowed for device tree; invalid otherwise */ + if (!cell_id) + return ERR_PTR(-EINVAL); + + return nvmem_cell_get_from_list(cell_id); +} +EXPORT_SYMBOL_GPL(nvmem_cell_get); + +static void devm_nvmem_cell_release(struct device *dev, void *res) +{ + nvmem_cell_put(*(struct nvmem_cell **)res); +} + +/** + * devm_nvmem_cell_get() - Get nvmem cell of device form a given id + * + * @dev: Device that requests the nvmem cell. + * @id: nvmem cell name id to get. + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * automatically once the device is freed. + */ +struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) +{ + struct nvmem_cell **ptr, *cell; + + ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + cell = nvmem_cell_get(dev, id); + if (!IS_ERR(cell)) { + *ptr = cell; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return cell; +} +EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); + +static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) +{ + struct nvmem_cell **c = res; + + if (WARN_ON(!c || !*c)) + return 0; + + return *c == data; +} + +/** + * devm_nvmem_cell_put() - Release previously allocated nvmem cell + * from devm_nvmem_cell_get. + * + * @dev: Device that requests the nvmem cell. + * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). + */ +void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) +{ + int ret; + + ret = devres_release(dev, devm_nvmem_cell_release, + devm_nvmem_cell_match, cell); + + WARN_ON(ret); +} +EXPORT_SYMBOL(devm_nvmem_cell_put); + +/** + * nvmem_cell_put() - Release previously allocated nvmem cell. + * + * @cell: Previously allocated nvmem cell by nvmem_cell_get(). + */ +void nvmem_cell_put(struct nvmem_cell *cell) +{ + struct nvmem_device *nvmem = cell->nvmem; + + __nvmem_device_put(nvmem); + nvmem_cell_drop(cell); +} +EXPORT_SYMBOL_GPL(nvmem_cell_put); + +static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) +{ + u8 *p, *b; + int i, extra, bit_offset = cell->bit_offset; + + p = b = buf; + if (bit_offset) { + /* First shift */ + *b++ >>= bit_offset; + + /* setup rest of the bytes if any */ + for (i = 1; i < cell->bytes; i++) { + /* Get bits from next byte and shift them towards msb */ + *p |= *b << (BITS_PER_BYTE - bit_offset); + + p = b; + *b++ >>= bit_offset; + } + } else { + /* point to the msb */ + p += cell->bytes - 1; + } + + /* result fits in less bytes */ + extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); + while (--extra >= 0) + *p-- = 0; + + /* clear msb bits if any leftover in the last byte */ + if (cell->nbits % BITS_PER_BYTE) + *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); +} + +static int __nvmem_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell *cell, + void *buf, size_t *len) +{ + int rc; + + rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); + + if (rc) + return rc; + + /* shift bits in-place */ + if (cell->bit_offset || cell->nbits) + nvmem_shift_read_buffer_in_place(cell, buf); + + if (len) + *len = cell->bytes; + + return 0; +} + +/** + * nvmem_cell_read() - Read a given nvmem cell + * + * @cell: nvmem cell to be read. + * @len: pointer to length of cell which will be populated on successful read; + * can be NULL. + * + * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The + * buffer should be freed by the consumer with a kfree(). + */ +void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) +{ + struct nvmem_device *nvmem = cell->nvmem; + u8 *buf; + int rc; + + if (!nvmem) + return ERR_PTR(-EINVAL); + + buf = kzalloc(cell->bytes, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + rc = __nvmem_cell_read(nvmem, cell, buf, len); + if (rc) { + kfree(buf); + return ERR_PTR(rc); + } + + return buf; +} +EXPORT_SYMBOL_GPL(nvmem_cell_read); + +static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, + u8 *_buf, int len) +{ + struct nvmem_device *nvmem = cell->nvmem; + int i, rc, nbits, bit_offset = cell->bit_offset; + u8 v, *p, *buf, *b, pbyte, pbits; + + nbits = cell->nbits; + buf = kzalloc(cell->bytes, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + memcpy(buf, _buf, len); + p = b = buf; + + if (bit_offset) { + pbyte = *b; + *b <<= bit_offset; + + /* setup the first byte with lsb bits from nvmem */ + rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); + if (rc) + goto err; + *b++ |= GENMASK(bit_offset - 1, 0) & v; + + /* setup rest of the byte if any */ + for (i = 1; i < cell->bytes; i++) { + /* Get last byte bits and shift them towards lsb */ + pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); + pbyte = *b; + p = b; + *b <<= bit_offset; + *b++ |= pbits; + } + } + + /* if it's not end on byte boundary */ + if ((nbits + bit_offset) % BITS_PER_BYTE) { + /* setup the last byte with msb bits from nvmem */ + rc = nvmem_reg_read(nvmem, + cell->offset + cell->bytes - 1, &v, 1); + if (rc) + goto err; + *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; + + } + + return buf; +err: + kfree(buf); + return ERR_PTR(rc); +} + +/** + * nvmem_cell_write() - Write to a given nvmem cell + * + * @cell: nvmem cell to be written. + * @buf: Buffer to be written. + * @len: length of buffer to be written to nvmem cell. + * + * Return: length of bytes written or negative on failure. + */ +int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) +{ + struct nvmem_device *nvmem = cell->nvmem; + int rc; + + if (!nvmem || nvmem->read_only || + (cell->bit_offset == 0 && len != cell->bytes)) + return -EINVAL; + + if (cell->bit_offset || cell->nbits) { + buf = nvmem_cell_prepare_write_buffer(cell, buf, len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + } + + rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); + + /* free the tmp buffer */ + if (cell->bit_offset || cell->nbits) + kfree(buf); + + if (rc) + return rc; + + return len; +} +EXPORT_SYMBOL_GPL(nvmem_cell_write); + +/** + * nvmem_cell_read_u32() - Read a cell value as an u32 + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) +{ + struct nvmem_cell *cell; + void *buf; + size_t len; + + cell = nvmem_cell_get(dev, cell_id); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + buf = nvmem_cell_read(cell, &len); + if (IS_ERR(buf)) { + nvmem_cell_put(cell); + return PTR_ERR(buf); + } + if (len != sizeof(*val)) { + kfree(buf); + nvmem_cell_put(cell); + return -EINVAL; + } + memcpy(val, buf, sizeof(*val)); + + kfree(buf); + nvmem_cell_put(cell); + return 0; +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); + +/** + * nvmem_device_cell_read() - Read a given nvmem device and cell + * + * @nvmem: nvmem device to read from. + * @info: nvmem cell info to be read. + * @buf: buffer pointer which will be populated on successful read. + * + * Return: length of successful bytes read on success and negative + * error code on error. + */ +ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf) +{ + struct nvmem_cell cell; + int rc; + ssize_t len; + + if (!nvmem) + return -EINVAL; + + rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); + if (rc) + return rc; + + rc = __nvmem_cell_read(nvmem, &cell, buf, &len); + if (rc) + return rc; + + return len; +} +EXPORT_SYMBOL_GPL(nvmem_device_cell_read); + +/** + * nvmem_device_cell_write() - Write cell to a given nvmem device + * + * @nvmem: nvmem device to be written to. + * @info: nvmem cell info to be written. + * @buf: buffer to be written to cell. + * + * Return: length of bytes written or negative error code on failure. + * */ +int nvmem_device_cell_write(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf) +{ + struct nvmem_cell cell; + int rc; + + if (!nvmem) + return -EINVAL; + + rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); + if (rc) + return rc; + + return nvmem_cell_write(&cell, buf, cell.bytes); +} +EXPORT_SYMBOL_GPL(nvmem_device_cell_write); + +/** + * nvmem_device_read() - Read from a given nvmem device + * + * @nvmem: nvmem device to read from. + * @offset: offset in nvmem device. + * @bytes: number of bytes to read. + * @buf: buffer pointer which will be populated on successful read. + * + * Return: length of successful bytes read on success and negative + * error code on error. + */ +int nvmem_device_read(struct nvmem_device *nvmem, + unsigned int offset, + size_t bytes, void *buf) +{ + int rc; + + if (!nvmem) + return -EINVAL; + + rc = nvmem_reg_read(nvmem, offset, buf, bytes); + + if (rc) + return rc; + + return bytes; +} +EXPORT_SYMBOL_GPL(nvmem_device_read); + +/** + * nvmem_device_write() - Write cell to a given nvmem device + * + * @nvmem: nvmem device to be written to. + * @offset: offset in nvmem device. + * @bytes: number of bytes to write. + * @buf: buffer to be written. + * + * Return: length of bytes written or negative error code on failure. + * */ +int nvmem_device_write(struct nvmem_device *nvmem, + unsigned int offset, + size_t bytes, void *buf) +{ + int rc; + + if (!nvmem) + return -EINVAL; + + rc = nvmem_reg_write(nvmem, offset, buf, bytes); + + if (rc) + return rc; + + + return bytes; +} +EXPORT_SYMBOL_GPL(nvmem_device_write); + +static int __init nvmem_init(void) +{ + return bus_register(&nvmem_bus_type); +} + +static void __exit nvmem_exit(void) +{ + bus_unregister(&nvmem_bus_type); +} + +subsys_initcall(nvmem_init); +module_exit(nvmem_exit); + +MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); +MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); +MODULE_DESCRIPTION("nvmem Driver Core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/imx-iim.c b/drivers/nvmem/imx-iim.c new file mode 100644 index 000000000..6651e4cdc --- /dev/null +++ b/drivers/nvmem/imx-iim.c @@ -0,0 +1,157 @@ +/* + * i.MX IIM driver + * + * Copyright (c) 2017 Pengutronix, Michael Grzeschik <m.grzeschik@pengutronix.de> + * + * Based on the barebox iim driver, + * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>, + * Orex Computed Radiography + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/clk.h> + +#define IIM_BANK_BASE(n) (0x800 + 0x400 * (n)) + +struct imx_iim_drvdata { + unsigned int nregs; +}; + +struct iim_priv { + void __iomem *base; + struct clk *clk; +}; + +static int imx_iim_read(void *context, unsigned int offset, + void *buf, size_t bytes) +{ + struct iim_priv *iim = context; + int i, ret; + u8 *buf8 = buf; + + ret = clk_prepare_enable(iim->clk); + if (ret) + return ret; + + for (i = offset; i < offset + bytes; i++) { + int bank = i >> 5; + int reg = i & 0x1f; + + *buf8++ = readl(iim->base + IIM_BANK_BASE(bank) + reg * 4); + } + + clk_disable_unprepare(iim->clk); + + return 0; +} + +static struct imx_iim_drvdata imx27_drvdata = { + .nregs = 2 * 32, +}; + +static struct imx_iim_drvdata imx25_imx31_imx35_drvdata = { + .nregs = 3 * 32, +}; + +static struct imx_iim_drvdata imx51_drvdata = { + .nregs = 4 * 32, +}; + +static struct imx_iim_drvdata imx53_drvdata = { + .nregs = 4 * 32 + 16, +}; + +static const struct of_device_id imx_iim_dt_ids[] = { + { + .compatible = "fsl,imx25-iim", + .data = &imx25_imx31_imx35_drvdata, + }, { + .compatible = "fsl,imx27-iim", + .data = &imx27_drvdata, + }, { + .compatible = "fsl,imx31-iim", + .data = &imx25_imx31_imx35_drvdata, + }, { + .compatible = "fsl,imx35-iim", + .data = &imx25_imx31_imx35_drvdata, + }, { + .compatible = "fsl,imx51-iim", + .data = &imx51_drvdata, + }, { + .compatible = "fsl,imx53-iim", + .data = &imx53_drvdata, + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, imx_iim_dt_ids); + +static int imx_iim_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + struct device *dev = &pdev->dev; + struct resource *res; + struct iim_priv *iim; + struct nvmem_device *nvmem; + struct nvmem_config cfg = {}; + const struct imx_iim_drvdata *drvdata = NULL; + + iim = devm_kzalloc(dev, sizeof(*iim), GFP_KERNEL); + if (!iim) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iim->base = devm_ioremap_resource(dev, res); + if (IS_ERR(iim->base)) + return PTR_ERR(iim->base); + + of_id = of_match_device(imx_iim_dt_ids, dev); + if (!of_id) + return -ENODEV; + + drvdata = of_id->data; + + iim->clk = devm_clk_get(dev, NULL); + if (IS_ERR(iim->clk)) + return PTR_ERR(iim->clk); + + cfg.name = "imx-iim", + cfg.read_only = true, + cfg.word_size = 1, + cfg.stride = 1, + cfg.reg_read = imx_iim_read, + cfg.dev = dev; + cfg.size = drvdata->nregs; + cfg.priv = iim; + + nvmem = devm_nvmem_register(dev, &cfg); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver imx_iim_driver = { + .probe = imx_iim_probe, + .driver = { + .name = "imx-iim", + .of_match_table = imx_iim_dt_ids, + }, +}; +module_platform_driver(imx_iim_driver); + +MODULE_AUTHOR("Michael Grzeschik <m.grzeschik@pengutronix.de>"); +MODULE_DESCRIPTION("i.MX IIM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c new file mode 100644 index 000000000..09281aca8 --- /dev/null +++ b/drivers/nvmem/imx-ocotp.c @@ -0,0 +1,524 @@ +/* + * i.MX6 OCOTP fusebox driver + * + * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de> + * + * Based on the barebox ocotp driver, + * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>, + * Orex Computed Radiography + * + * Write support based on the fsl_otp driver, + * Copyright (C) 2010-2013 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/delay.h> + +#define IMX_OCOTP_OFFSET_B0W0 0x400 /* Offset from base address of the + * OTP Bank0 Word0 + */ +#define IMX_OCOTP_OFFSET_PER_WORD 0x10 /* Offset between the start addr + * of two consecutive OTP words. + */ + +#define IMX_OCOTP_ADDR_CTRL 0x0000 +#define IMX_OCOTP_ADDR_CTRL_SET 0x0004 +#define IMX_OCOTP_ADDR_CTRL_CLR 0x0008 +#define IMX_OCOTP_ADDR_TIMING 0x0010 +#define IMX_OCOTP_ADDR_DATA0 0x0020 +#define IMX_OCOTP_ADDR_DATA1 0x0030 +#define IMX_OCOTP_ADDR_DATA2 0x0040 +#define IMX_OCOTP_ADDR_DATA3 0x0050 + +#define IMX_OCOTP_BM_CTRL_ADDR 0x0000007F +#define IMX_OCOTP_BM_CTRL_BUSY 0x00000100 +#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200 +#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400 + +#define TIMING_STROBE_PROG_US 10 /* Min time to blow a fuse */ +#define TIMING_STROBE_READ_NS 37 /* Min time before read */ +#define TIMING_RELAX_NS 17 +#define DEF_FSOURCE 1001 /* > 1000 ns */ +#define DEF_STROBE_PROG 10000 /* IPG clocks */ +#define IMX_OCOTP_WR_UNLOCK 0x3E770000 +#define IMX_OCOTP_READ_LOCKED_VAL 0xBADABADA + +static DEFINE_MUTEX(ocotp_mutex); + +struct ocotp_priv { + struct device *dev; + struct clk *clk; + void __iomem *base; + const struct ocotp_params *params; + struct nvmem_config *config; +}; + +struct ocotp_params { + unsigned int nregs; + unsigned int bank_address_words; + void (*set_timing)(struct ocotp_priv *priv); +}; + +static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags) +{ + int count; + u32 c, mask; + + mask = IMX_OCOTP_BM_CTRL_BUSY | IMX_OCOTP_BM_CTRL_ERROR | flags; + + for (count = 10000; count >= 0; count--) { + c = readl(base + IMX_OCOTP_ADDR_CTRL); + if (!(c & mask)) + break; + cpu_relax(); + } + + if (count < 0) { + /* HW_OCOTP_CTRL[ERROR] will be set under the following + * conditions: + * - A write is performed to a shadow register during a shadow + * reload (essentially, while HW_OCOTP_CTRL[RELOAD_SHADOWS] is + * set. In addition, the contents of the shadow register shall + * not be updated. + * - A write is performed to a shadow register which has been + * locked. + * - A read is performed to from a shadow register which has + * been read locked. + * - A program is performed to a fuse word which has been locked + * - A read is performed to from a fuse word which has been read + * locked. + */ + if (c & IMX_OCOTP_BM_CTRL_ERROR) + return -EPERM; + return -ETIMEDOUT; + } + + return 0; +} + +static void imx_ocotp_clr_err_if_set(void __iomem *base) +{ + u32 c; + + c = readl(base + IMX_OCOTP_ADDR_CTRL); + if (!(c & IMX_OCOTP_BM_CTRL_ERROR)) + return; + + writel(IMX_OCOTP_BM_CTRL_ERROR, base + IMX_OCOTP_ADDR_CTRL_CLR); +} + +static int imx_ocotp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct ocotp_priv *priv = context; + unsigned int count; + u32 *buf = val; + int i, ret; + u32 index; + + index = offset >> 2; + count = bytes >> 2; + + if (count > (priv->params->nregs - index)) + count = priv->params->nregs - index; + + mutex_lock(&ocotp_mutex); + + ret = clk_prepare_enable(priv->clk); + if (ret < 0) { + mutex_unlock(&ocotp_mutex); + dev_err(priv->dev, "failed to prepare/enable ocotp clk\n"); + return ret; + } + + ret = imx_ocotp_wait_for_busy(priv->base, 0); + if (ret < 0) { + dev_err(priv->dev, "timeout during read setup\n"); + goto read_end; + } + + for (i = index; i < (index + count); i++) { + *buf++ = readl(priv->base + IMX_OCOTP_OFFSET_B0W0 + + i * IMX_OCOTP_OFFSET_PER_WORD); + + /* 47.3.1.2 + * For "read locked" registers 0xBADABADA will be returned and + * HW_OCOTP_CTRL[ERROR] will be set. It must be cleared by + * software before any new write, read or reload access can be + * issued + */ + if (*(buf - 1) == IMX_OCOTP_READ_LOCKED_VAL) + imx_ocotp_clr_err_if_set(priv->base); + } + ret = 0; + +read_end: + clk_disable_unprepare(priv->clk); + mutex_unlock(&ocotp_mutex); + return ret; +} + +static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv) +{ + unsigned long clk_rate = 0; + unsigned long strobe_read, relax, strobe_prog; + u32 timing = 0; + + /* 47.3.1.3.1 + * Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX] + * fields with timing values to match the current frequency of the + * ipg_clk. OTP writes will work at maximum bus frequencies as long + * as the HW_OCOTP_TIMING parameters are set correctly. + * + * Note: there are minimum timings required to ensure an OTP fuse burns + * correctly that are independent of the ipg_clk. Those values are not + * formally documented anywhere however, working from the minimum + * timings given in u-boot we can say: + * + * - Minimum STROBE_PROG time is 10 microseconds. Intuitively 10 + * microseconds feels about right as representative of a minimum time + * to physically burn out a fuse. + * + * - Minimum STROBE_READ i.e. the time to wait post OTP fuse burn before + * performing another read is 37 nanoseconds + * + * - Minimum RELAX timing is 17 nanoseconds. This final RELAX minimum + * timing is not entirely clear the documentation says "This + * count value specifies the time to add to all default timing + * parameters other than the Tpgm and Trd. It is given in number + * of ipg_clk periods." where Tpgm and Trd refer to STROBE_PROG + * and STROBE_READ respectively. What the other timing parameters + * are though, is not specified. Experience shows a zero RELAX + * value will mess up a re-load of the shadow registers post OTP + * burn. + */ + clk_rate = clk_get_rate(priv->clk); + + relax = DIV_ROUND_UP(clk_rate * TIMING_RELAX_NS, 1000000000) - 1; + strobe_read = DIV_ROUND_UP(clk_rate * TIMING_STROBE_READ_NS, + 1000000000); + strobe_read += 2 * (relax + 1) - 1; + strobe_prog = DIV_ROUND_CLOSEST(clk_rate * TIMING_STROBE_PROG_US, + 1000000); + strobe_prog += 2 * (relax + 1) - 1; + + timing = readl(priv->base + IMX_OCOTP_ADDR_TIMING) & 0x0FC00000; + timing |= strobe_prog & 0x00000FFF; + timing |= (relax << 12) & 0x0000F000; + timing |= (strobe_read << 16) & 0x003F0000; + + writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING); +} + +static void imx_ocotp_set_imx7_timing(struct ocotp_priv *priv) +{ + unsigned long clk_rate = 0; + u64 fsource, strobe_prog; + u32 timing = 0; + + /* i.MX 7Solo Applications Processor Reference Manual, Rev. 0.1 + * 6.4.3.3 + */ + clk_rate = clk_get_rate(priv->clk); + fsource = DIV_ROUND_UP_ULL((u64)clk_rate * DEF_FSOURCE, + NSEC_PER_SEC) + 1; + strobe_prog = DIV_ROUND_CLOSEST_ULL((u64)clk_rate * DEF_STROBE_PROG, + NSEC_PER_SEC) + 1; + + timing = strobe_prog & 0x00000FFF; + timing |= (fsource << 12) & 0x000FF000; + + writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING); +} + +static int imx_ocotp_write(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct ocotp_priv *priv = context; + u32 *buf = val; + int ret; + + u32 ctrl; + u8 waddr; + u8 word = 0; + + /* allow only writing one complete OTP word at a time */ + if ((bytes != priv->config->word_size) || + (offset % priv->config->word_size)) + return -EINVAL; + + mutex_lock(&ocotp_mutex); + + ret = clk_prepare_enable(priv->clk); + if (ret < 0) { + mutex_unlock(&ocotp_mutex); + dev_err(priv->dev, "failed to prepare/enable ocotp clk\n"); + return ret; + } + + /* Setup the write timing values */ + priv->params->set_timing(priv); + + /* 47.3.1.3.2 + * Check that HW_OCOTP_CTRL[BUSY] and HW_OCOTP_CTRL[ERROR] are clear. + * Overlapped accesses are not supported by the controller. Any pending + * write or reload must be completed before a write access can be + * requested. + */ + ret = imx_ocotp_wait_for_busy(priv->base, 0); + if (ret < 0) { + dev_err(priv->dev, "timeout during timing setup\n"); + goto write_end; + } + + /* 47.3.1.3.3 + * Write the requested address to HW_OCOTP_CTRL[ADDR] and program the + * unlock code into HW_OCOTP_CTRL[WR_UNLOCK]. This must be programmed + * for each write access. The lock code is documented in the register + * description. Both the unlock code and address can be written in the + * same operation. + */ + if (priv->params->bank_address_words != 0) { + /* + * In banked/i.MX7 mode the OTP register bank goes into waddr + * see i.MX 7Solo Applications Processor Reference Manual, Rev. + * 0.1 section 6.4.3.1 + */ + offset = offset / priv->config->word_size; + waddr = offset / priv->params->bank_address_words; + word = offset & (priv->params->bank_address_words - 1); + } else { + /* + * Non-banked i.MX6 mode. + * OTP write/read address specifies one of 128 word address + * locations + */ + waddr = offset / 4; + } + + ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL); + ctrl &= ~IMX_OCOTP_BM_CTRL_ADDR; + ctrl |= waddr & IMX_OCOTP_BM_CTRL_ADDR; + ctrl |= IMX_OCOTP_WR_UNLOCK; + + writel(ctrl, priv->base + IMX_OCOTP_ADDR_CTRL); + + /* 47.3.1.3.4 + * Write the data to the HW_OCOTP_DATA register. This will automatically + * set HW_OCOTP_CTRL[BUSY] and clear HW_OCOTP_CTRL[WR_UNLOCK]. To + * protect programming same OTP bit twice, before program OCOTP will + * automatically read fuse value in OTP and use read value to mask + * program data. The controller will use masked program data to program + * a 32-bit word in the OTP per the address in HW_OCOTP_CTRL[ADDR]. Bit + * fields with 1's will result in that OTP bit being programmed. Bit + * fields with 0's will be ignored. At the same time that the write is + * accepted, the controller makes an internal copy of + * HW_OCOTP_CTRL[ADDR] which cannot be updated until the next write + * sequence is initiated. This copy guarantees that erroneous writes to + * HW_OCOTP_CTRL[ADDR] will not affect an active write operation. It + * should also be noted that during the programming HW_OCOTP_DATA will + * shift right (with zero fill). This shifting is required to program + * the OTP serially. During the write operation, HW_OCOTP_DATA cannot be + * modified. + * Note: on i.MX7 there are four data fields to write for banked write + * with the fuse blowing operation only taking place after data0 + * has been written. This is why data0 must always be the last + * register written. + */ + if (priv->params->bank_address_words != 0) { + /* Banked/i.MX7 mode */ + switch (word) { + case 0: + writel(0, priv->base + IMX_OCOTP_ADDR_DATA1); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA2); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA3); + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0); + break; + case 1: + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA1); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA2); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA3); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA0); + break; + case 2: + writel(0, priv->base + IMX_OCOTP_ADDR_DATA1); + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA2); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA3); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA0); + break; + case 3: + writel(0, priv->base + IMX_OCOTP_ADDR_DATA1); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA2); + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA3); + writel(0, priv->base + IMX_OCOTP_ADDR_DATA0); + break; + } + } else { + /* Non-banked i.MX6 mode */ + writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0); + } + + /* 47.4.1.4.5 + * Once complete, the controller will clear BUSY. A write request to a + * protected or locked region will result in no OTP access and no + * setting of HW_OCOTP_CTRL[BUSY]. In addition HW_OCOTP_CTRL[ERROR] will + * be set. It must be cleared by software before any new write access + * can be issued. + */ + ret = imx_ocotp_wait_for_busy(priv->base, 0); + if (ret < 0) { + if (ret == -EPERM) { + dev_err(priv->dev, "failed write to locked region"); + imx_ocotp_clr_err_if_set(priv->base); + } else { + dev_err(priv->dev, "timeout during data write\n"); + } + goto write_end; + } + + /* 47.3.1.4 + * Write Postamble: Due to internal electrical characteristics of the + * OTP during writes, all OTP operations following a write must be + * separated by 2 us after the clearing of HW_OCOTP_CTRL_BUSY following + * the write. + */ + udelay(2); + + /* reload all shadow registers */ + writel(IMX_OCOTP_BM_CTRL_REL_SHADOWS, + priv->base + IMX_OCOTP_ADDR_CTRL_SET); + ret = imx_ocotp_wait_for_busy(priv->base, + IMX_OCOTP_BM_CTRL_REL_SHADOWS); + if (ret < 0) { + dev_err(priv->dev, "timeout during shadow register reload\n"); + goto write_end; + } + +write_end: + clk_disable_unprepare(priv->clk); + mutex_unlock(&ocotp_mutex); + if (ret < 0) + return ret; + return bytes; +} + +static struct nvmem_config imx_ocotp_nvmem_config = { + .name = "imx-ocotp", + .read_only = false, + .word_size = 4, + .stride = 4, + .reg_read = imx_ocotp_read, + .reg_write = imx_ocotp_write, +}; + +static const struct ocotp_params imx6q_params = { + .nregs = 128, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, +}; + +static const struct ocotp_params imx6sl_params = { + .nregs = 64, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, +}; + +static const struct ocotp_params imx6sll_params = { + .nregs = 128, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, +}; + +static const struct ocotp_params imx6sx_params = { + .nregs = 128, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, +}; + +static const struct ocotp_params imx6ul_params = { + .nregs = 128, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, +}; + +static const struct ocotp_params imx7d_params = { + .nregs = 64, + .bank_address_words = 4, + .set_timing = imx_ocotp_set_imx7_timing, +}; + +static const struct of_device_id imx_ocotp_dt_ids[] = { + { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params }, + { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params }, + { .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params }, + { .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params }, + { .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params }, + { .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params }, + { }, +}; +MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids); + +static int imx_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct ocotp_priv *priv; + struct nvmem_device *nvmem; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + clk_prepare_enable(priv->clk); + imx_ocotp_clr_err_if_set(priv->base); + clk_disable_unprepare(priv->clk); + + priv->params = of_device_get_match_data(&pdev->dev); + imx_ocotp_nvmem_config.size = 4 * priv->params->nregs; + imx_ocotp_nvmem_config.dev = dev; + imx_ocotp_nvmem_config.priv = priv; + priv->config = &imx_ocotp_nvmem_config; + nvmem = devm_nvmem_register(dev, &imx_ocotp_nvmem_config); + + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver imx_ocotp_driver = { + .probe = imx_ocotp_probe, + .driver = { + .name = "imx_ocotp", + .of_match_table = imx_ocotp_dt_ids, + }, +}; +module_platform_driver(imx_ocotp_driver); + +MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>"); +MODULE_DESCRIPTION("i.MX6/i.MX7 OCOTP fuse box driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c new file mode 100644 index 000000000..a9534a6e8 --- /dev/null +++ b/drivers/nvmem/lpc18xx_eeprom.c @@ -0,0 +1,288 @@ +/* + * NXP LPC18xx/LPC43xx EEPROM memory NVMEM driver + * + * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +/* Registers */ +#define LPC18XX_EEPROM_AUTOPROG 0x00c +#define LPC18XX_EEPROM_AUTOPROG_WORD 0x1 + +#define LPC18XX_EEPROM_CLKDIV 0x014 + +#define LPC18XX_EEPROM_PWRDWN 0x018 +#define LPC18XX_EEPROM_PWRDWN_NO 0x0 +#define LPC18XX_EEPROM_PWRDWN_YES 0x1 + +#define LPC18XX_EEPROM_INTSTAT 0xfe0 +#define LPC18XX_EEPROM_INTSTAT_END_OF_PROG BIT(2) + +#define LPC18XX_EEPROM_INTSTATCLR 0xfe8 +#define LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST BIT(2) + +/* Fixed page size (bytes) */ +#define LPC18XX_EEPROM_PAGE_SIZE 0x80 + +/* EEPROM device requires a ~1500 kHz clock (min 800 kHz, max 1600 kHz) */ +#define LPC18XX_EEPROM_CLOCK_HZ 1500000 + +/* EEPROM requires 3 ms of erase/program time between each writing */ +#define LPC18XX_EEPROM_PROGRAM_TIME 3 + +struct lpc18xx_eeprom_dev { + struct clk *clk; + void __iomem *reg_base; + void __iomem *mem_base; + struct nvmem_device *nvmem; + unsigned reg_bytes; + unsigned val_bytes; + int size; +}; + +static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom, + u32 reg, u32 val) +{ + writel(val, eeprom->reg_base + reg); +} + +static inline u32 lpc18xx_eeprom_readl(struct lpc18xx_eeprom_dev *eeprom, + u32 reg) +{ + return readl(eeprom->reg_base + reg); +} + +static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom) +{ + unsigned long end; + u32 val; + + /* Wait until EEPROM program operation has finished */ + end = jiffies + msecs_to_jiffies(LPC18XX_EEPROM_PROGRAM_TIME * 10); + + while (time_is_after_jiffies(end)) { + val = lpc18xx_eeprom_readl(eeprom, LPC18XX_EEPROM_INTSTAT); + + if (val & LPC18XX_EEPROM_INTSTAT_END_OF_PROG) { + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_INTSTATCLR, + LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST); + return 0; + } + + usleep_range(LPC18XX_EEPROM_PROGRAM_TIME * USEC_PER_MSEC, + (LPC18XX_EEPROM_PROGRAM_TIME + 1) * USEC_PER_MSEC); + } + + return -ETIMEDOUT; +} + +static int lpc18xx_eeprom_gather_write(void *context, unsigned int reg, + void *val, size_t bytes) +{ + struct lpc18xx_eeprom_dev *eeprom = context; + unsigned int offset = reg; + int ret; + + /* + * The last page contains the EEPROM initialization data and is not + * writable. + */ + if ((reg > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE) || + (reg + bytes > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE)) + return -EINVAL; + + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_NO); + + /* Wait 100 us while the EEPROM wakes up */ + usleep_range(100, 200); + + while (bytes) { + writel(*(u32 *)val, eeprom->mem_base + offset); + ret = lpc18xx_eeprom_busywait_until_prog(eeprom); + if (ret < 0) + return ret; + + bytes -= eeprom->val_bytes; + val += eeprom->val_bytes; + offset += eeprom->val_bytes; + } + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_YES); + + return 0; +} + +static int lpc18xx_eeprom_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct lpc18xx_eeprom_dev *eeprom = context; + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_NO); + + /* Wait 100 us while the EEPROM wakes up */ + usleep_range(100, 200); + + while (bytes) { + *(u32 *)val = readl(eeprom->mem_base + offset); + bytes -= eeprom->val_bytes; + val += eeprom->val_bytes; + offset += eeprom->val_bytes; + } + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_YES); + + return 0; +} + + +static struct nvmem_config lpc18xx_nvmem_config = { + .name = "lpc18xx-eeprom", + .stride = 4, + .word_size = 4, + .reg_read = lpc18xx_eeprom_read, + .reg_write = lpc18xx_eeprom_gather_write, +}; + +static int lpc18xx_eeprom_probe(struct platform_device *pdev) +{ + struct lpc18xx_eeprom_dev *eeprom; + struct device *dev = &pdev->dev; + struct reset_control *rst; + unsigned long clk_rate; + struct resource *res; + int ret; + + eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL); + if (!eeprom) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); + eeprom->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(eeprom->reg_base)) + return PTR_ERR(eeprom->reg_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); + eeprom->mem_base = devm_ioremap_resource(dev, res); + if (IS_ERR(eeprom->mem_base)) + return PTR_ERR(eeprom->mem_base); + + eeprom->clk = devm_clk_get(&pdev->dev, "eeprom"); + if (IS_ERR(eeprom->clk)) { + dev_err(&pdev->dev, "failed to get eeprom clock\n"); + return PTR_ERR(eeprom->clk); + } + + ret = clk_prepare_enable(eeprom->clk); + if (ret < 0) { + dev_err(dev, "failed to prepare/enable eeprom clk: %d\n", ret); + return ret; + } + + rst = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(rst)) { + dev_err(dev, "failed to get reset: %ld\n", PTR_ERR(rst)); + ret = PTR_ERR(rst); + goto err_clk; + } + + ret = reset_control_assert(rst); + if (ret < 0) { + dev_err(dev, "failed to assert reset: %d\n", ret); + goto err_clk; + } + + eeprom->val_bytes = 4; + eeprom->reg_bytes = 4; + + /* + * Clock rate is generated by dividing the system bus clock by the + * division factor, contained in the divider register (minus 1 encoded). + */ + clk_rate = clk_get_rate(eeprom->clk); + clk_rate = DIV_ROUND_UP(clk_rate, LPC18XX_EEPROM_CLOCK_HZ) - 1; + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_CLKDIV, clk_rate); + + /* + * Writing a single word to the page will start the erase/program cycle + * automatically + */ + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_AUTOPROG, + LPC18XX_EEPROM_AUTOPROG_WORD); + + lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, + LPC18XX_EEPROM_PWRDWN_YES); + + eeprom->size = resource_size(res); + lpc18xx_nvmem_config.size = resource_size(res); + lpc18xx_nvmem_config.dev = dev; + lpc18xx_nvmem_config.priv = eeprom; + + eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config); + if (IS_ERR(eeprom->nvmem)) { + ret = PTR_ERR(eeprom->nvmem); + goto err_clk; + } + + platform_set_drvdata(pdev, eeprom); + + return 0; + +err_clk: + clk_disable_unprepare(eeprom->clk); + + return ret; +} + +static int lpc18xx_eeprom_remove(struct platform_device *pdev) +{ + struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev); + int ret; + + ret = nvmem_unregister(eeprom->nvmem); + if (ret < 0) + return ret; + + clk_disable_unprepare(eeprom->clk); + + return 0; +} + +static const struct of_device_id lpc18xx_eeprom_of_match[] = { + { .compatible = "nxp,lpc1857-eeprom" }, + { }, +}; +MODULE_DEVICE_TABLE(of, lpc18xx_eeprom_of_match); + +static struct platform_driver lpc18xx_eeprom_driver = { + .probe = lpc18xx_eeprom_probe, + .remove = lpc18xx_eeprom_remove, + .driver = { + .name = "lpc18xx-eeprom", + .of_match_table = lpc18xx_eeprom_of_match, + }, +}; + +module_platform_driver(lpc18xx_eeprom_driver); + +MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>"); +MODULE_DESCRIPTION("NXP LPC18xx EEPROM memory Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/lpc18xx_otp.c b/drivers/nvmem/lpc18xx_otp.c new file mode 100644 index 000000000..549b5298a --- /dev/null +++ b/drivers/nvmem/lpc18xx_otp.c @@ -0,0 +1,111 @@ +/* + * NXP LPC18xx/43xx OTP memory NVMEM driver + * + * Copyright (c) 2016 Joachim Eastwood <manabian@gmail.com> + * + * Based on the imx ocotp driver, + * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * TODO: add support for writing OTP register via API in boot ROM. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* + * LPC18xx OTP memory contains 4 banks with 4 32-bit words. Bank 0 starts + * at offset 0 from the base. + * + * Bank 0 contains the part ID for Flashless devices and is reseverd for + * devices with Flash. + * Bank 1/2 is generale purpose or AES key storage for secure devices. + * Bank 3 contains control data, USB ID and generale purpose words. + */ +#define LPC18XX_OTP_NUM_BANKS 4 +#define LPC18XX_OTP_WORDS_PER_BANK 4 +#define LPC18XX_OTP_WORD_SIZE sizeof(u32) +#define LPC18XX_OTP_SIZE (LPC18XX_OTP_NUM_BANKS * \ + LPC18XX_OTP_WORDS_PER_BANK * \ + LPC18XX_OTP_WORD_SIZE) + +struct lpc18xx_otp { + void __iomem *base; +}; + +static int lpc18xx_otp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct lpc18xx_otp *otp = context; + unsigned int count = bytes >> 2; + u32 index = offset >> 2; + u32 *buf = val; + int i; + + if (count > (LPC18XX_OTP_SIZE - index)) + count = LPC18XX_OTP_SIZE - index; + + for (i = index; i < (index + count); i++) + *buf++ = readl(otp->base + i * LPC18XX_OTP_WORD_SIZE); + + return 0; +} + +static struct nvmem_config lpc18xx_otp_nvmem_config = { + .name = "lpc18xx-otp", + .read_only = true, + .word_size = LPC18XX_OTP_WORD_SIZE, + .stride = LPC18XX_OTP_WORD_SIZE, + .reg_read = lpc18xx_otp_read, +}; + +static int lpc18xx_otp_probe(struct platform_device *pdev) +{ + struct nvmem_device *nvmem; + struct lpc18xx_otp *otp; + struct resource *res; + + otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL); + if (!otp) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + otp->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(otp->base)) + return PTR_ERR(otp->base); + + lpc18xx_otp_nvmem_config.size = LPC18XX_OTP_SIZE; + lpc18xx_otp_nvmem_config.dev = &pdev->dev; + lpc18xx_otp_nvmem_config.priv = otp; + + nvmem = devm_nvmem_register(&pdev->dev, &lpc18xx_otp_nvmem_config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id lpc18xx_otp_dt_ids[] = { + { .compatible = "nxp,lpc1850-otp" }, + { }, +}; +MODULE_DEVICE_TABLE(of, lpc18xx_otp_dt_ids); + +static struct platform_driver lpc18xx_otp_driver = { + .probe = lpc18xx_otp_probe, + .driver = { + .name = "lpc18xx_otp", + .of_match_table = lpc18xx_otp_dt_ids, + }, +}; +module_platform_driver(lpc18xx_otp_driver); + +MODULE_AUTHOR("Joachim Eastwoood <manabian@gmail.com>"); +MODULE_DESCRIPTION("NXP LPC18xx OTP NVMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c new file mode 100644 index 000000000..d769840d1 --- /dev/null +++ b/drivers/nvmem/meson-efuse.c @@ -0,0 +1,83 @@ +/* + * Amlogic Meson GX eFuse Driver + * + * Copyright (c) 2016 Endless Computers, Inc. + * Author: Carlo Caione <carlo@endlessm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include <linux/firmware/meson/meson_sm.h> + +static int meson_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + return meson_sm_call_read((u8 *)val, bytes, SM_EFUSE_READ, offset, + bytes, 0, 0, 0); +} + +static int meson_efuse_write(void *context, unsigned int offset, + void *val, size_t bytes) +{ + return meson_sm_call_write((u8 *)val, bytes, SM_EFUSE_WRITE, offset, + bytes, 0, 0, 0); +} + +static const struct of_device_id meson_efuse_match[] = { + { .compatible = "amlogic,meson-gxbb-efuse", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, meson_efuse_match); + +static int meson_efuse_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct nvmem_device *nvmem; + struct nvmem_config *econfig; + unsigned int size; + + if (meson_sm_call(SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) + return -EINVAL; + + econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL); + if (!econfig) + return -ENOMEM; + + econfig->dev = dev; + econfig->name = dev_name(dev); + econfig->stride = 1; + econfig->word_size = 1; + econfig->reg_read = meson_efuse_read; + econfig->reg_write = meson_efuse_write; + econfig->size = size; + + nvmem = devm_nvmem_register(&pdev->dev, econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver meson_efuse_driver = { + .probe = meson_efuse_probe, + .driver = { + .name = "meson-efuse", + .of_match_table = meson_efuse_match, + }, +}; + +module_platform_driver(meson_efuse_driver); + +MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>"); +MODULE_DESCRIPTION("Amlogic Meson GX NVMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c new file mode 100644 index 000000000..a085563e3 --- /dev/null +++ b/drivers/nvmem/meson-mx-efuse.c @@ -0,0 +1,253 @@ +/* + * Amlogic Meson6, Meson8 and Meson8b eFuse Driver + * + * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/sizes.h> +#include <linux/slab.h> + +#define MESON_MX_EFUSE_CNTL1 0x04 +#define MESON_MX_EFUSE_CNTL1_PD_ENABLE BIT(27) +#define MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY BIT(26) +#define MESON_MX_EFUSE_CNTL1_AUTO_RD_START BIT(25) +#define MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE BIT(24) +#define MESON_MX_EFUSE_CNTL1_BYTE_WR_DATA GENMASK(23, 16) +#define MESON_MX_EFUSE_CNTL1_AUTO_WR_BUSY BIT(14) +#define MESON_MX_EFUSE_CNTL1_AUTO_WR_START BIT(13) +#define MESON_MX_EFUSE_CNTL1_AUTO_WR_ENABLE BIT(12) +#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET BIT(11) +#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK GENMASK(10, 0) + +#define MESON_MX_EFUSE_CNTL2 0x08 + +#define MESON_MX_EFUSE_CNTL4 0x10 +#define MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE BIT(10) + +struct meson_mx_efuse_platform_data { + const char *name; + unsigned int word_size; +}; + +struct meson_mx_efuse { + void __iomem *base; + struct clk *core_clk; + struct nvmem_device *nvmem; + struct nvmem_config config; +}; + +static void meson_mx_efuse_mask_bits(struct meson_mx_efuse *efuse, u32 reg, + u32 mask, u32 set) +{ + u32 data; + + data = readl(efuse->base + reg); + data &= ~mask; + data |= (set & mask); + + writel(data, efuse->base + reg); +} + +static int meson_mx_efuse_hw_enable(struct meson_mx_efuse *efuse) +{ + int err; + + err = clk_prepare_enable(efuse->core_clk); + if (err) + return err; + + /* power up the efuse */ + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_PD_ENABLE, 0); + + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL4, + MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE, 0); + + return 0; +} + +static void meson_mx_efuse_hw_disable(struct meson_mx_efuse *efuse) +{ + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_PD_ENABLE, + MESON_MX_EFUSE_CNTL1_PD_ENABLE); + + clk_disable_unprepare(efuse->core_clk); +} + +static int meson_mx_efuse_read_addr(struct meson_mx_efuse *efuse, + unsigned int addr, u32 *value) +{ + int err; + u32 regval; + + /* write the address to read */ + regval = FIELD_PREP(MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, addr); + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, regval); + + /* inform the hardware that we changed the address */ + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET, + MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET); + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET, 0); + + /* start the read process */ + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_AUTO_RD_START, + MESON_MX_EFUSE_CNTL1_AUTO_RD_START); + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_AUTO_RD_START, 0); + + /* + * perform a dummy read to ensure that the HW has the RD_BUSY bit set + * when polling for the status below. + */ + readl(efuse->base + MESON_MX_EFUSE_CNTL1); + + err = readl_poll_timeout_atomic(efuse->base + MESON_MX_EFUSE_CNTL1, + regval, + (!(regval & MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY)), + 1, 1000); + if (err) { + dev_err(efuse->config.dev, + "Timeout while reading efuse address %u\n", addr); + return err; + } + + *value = readl(efuse->base + MESON_MX_EFUSE_CNTL2); + + return 0; +} + +static int meson_mx_efuse_read(void *context, unsigned int offset, + void *buf, size_t bytes) +{ + struct meson_mx_efuse *efuse = context; + u32 tmp; + int err, i, addr; + + err = meson_mx_efuse_hw_enable(efuse); + if (err) + return err; + + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, + MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE); + + for (i = 0; i < bytes; i += efuse->config.word_size) { + addr = (offset + i) / efuse->config.word_size; + + err = meson_mx_efuse_read_addr(efuse, addr, &tmp); + if (err) + break; + + memcpy(buf + i, &tmp, efuse->config.word_size); + } + + meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, + MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, 0); + + meson_mx_efuse_hw_disable(efuse); + + return err; +} + +static const struct meson_mx_efuse_platform_data meson6_efuse_data = { + .name = "meson6-efuse", + .word_size = 1, +}; + +static const struct meson_mx_efuse_platform_data meson8_efuse_data = { + .name = "meson8-efuse", + .word_size = 4, +}; + +static const struct meson_mx_efuse_platform_data meson8b_efuse_data = { + .name = "meson8b-efuse", + .word_size = 4, +}; + +static const struct of_device_id meson_mx_efuse_match[] = { + { .compatible = "amlogic,meson6-efuse", .data = &meson6_efuse_data }, + { .compatible = "amlogic,meson8-efuse", .data = &meson8_efuse_data }, + { .compatible = "amlogic,meson8b-efuse", .data = &meson8b_efuse_data }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, meson_mx_efuse_match); + +static int meson_mx_efuse_probe(struct platform_device *pdev) +{ + const struct meson_mx_efuse_platform_data *drvdata; + struct meson_mx_efuse *efuse; + struct resource *res; + + drvdata = of_device_get_match_data(&pdev->dev); + if (!drvdata) + return -EINVAL; + + efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + efuse->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(efuse->base)) + return PTR_ERR(efuse->base); + + efuse->config.name = devm_kstrdup(&pdev->dev, drvdata->name, + GFP_KERNEL); + efuse->config.owner = THIS_MODULE; + efuse->config.dev = &pdev->dev; + efuse->config.priv = efuse; + efuse->config.stride = drvdata->word_size; + efuse->config.word_size = drvdata->word_size; + efuse->config.size = SZ_512; + efuse->config.read_only = true; + efuse->config.reg_read = meson_mx_efuse_read; + + efuse->core_clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(efuse->core_clk)) { + dev_err(&pdev->dev, "Failed to get core clock\n"); + return PTR_ERR(efuse->core_clk); + } + + efuse->nvmem = devm_nvmem_register(&pdev->dev, &efuse->config); + + return PTR_ERR_OR_ZERO(efuse->nvmem); +} + +static struct platform_driver meson_mx_efuse_driver = { + .probe = meson_mx_efuse_probe, + .driver = { + .name = "meson-mx-efuse", + .of_match_table = meson_mx_efuse_match, + }, +}; + +module_platform_driver(meson_mx_efuse_driver); + +MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); +MODULE_DESCRIPTION("Amlogic Meson MX eFuse NVMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c new file mode 100644 index 000000000..58c998b2e --- /dev/null +++ b/drivers/nvmem/mtk-efuse.c @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2015 MediaTek Inc. + * Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/io.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> + +struct mtk_efuse_priv { + void __iomem *base; +}; + +static int mtk_reg_read(void *context, + unsigned int reg, void *_val, size_t bytes) +{ + struct mtk_efuse_priv *priv = context; + u32 *val = _val; + int i = 0, words = bytes / 4; + + while (words--) + *val++ = readl(priv->base + reg + (i++ * 4)); + + return 0; +} + +static int mtk_reg_write(void *context, + unsigned int reg, void *_val, size_t bytes) +{ + struct mtk_efuse_priv *priv = context; + u32 *val = _val; + int i = 0, words = bytes / 4; + + while (words--) + writel(*val++, priv->base + reg + (i++ * 4)); + + return 0; +} + +static int mtk_efuse_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct nvmem_config econfig = {}; + struct mtk_efuse_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + econfig.stride = 4; + econfig.word_size = 4; + econfig.reg_read = mtk_reg_read; + econfig.reg_write = mtk_reg_write; + econfig.size = resource_size(res); + econfig.priv = priv; + econfig.dev = dev; + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id mtk_efuse_of_match[] = { + { .compatible = "mediatek,mt8173-efuse",}, + { .compatible = "mediatek,efuse",}, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, mtk_efuse_of_match); + +static struct platform_driver mtk_efuse_driver = { + .probe = mtk_efuse_probe, + .driver = { + .name = "mediatek,efuse", + .of_match_table = mtk_efuse_of_match, + }, +}; + +static int __init mtk_efuse_init(void) +{ + int ret; + + ret = platform_driver_register(&mtk_efuse_driver); + if (ret) { + pr_err("Failed to register efuse driver\n"); + return ret; + } + + return 0; +} + +static void __exit mtk_efuse_exit(void) +{ + return platform_driver_unregister(&mtk_efuse_driver); +} + +subsys_initcall(mtk_efuse_init); +module_exit(mtk_efuse_exit); + +MODULE_AUTHOR("Andrew-CT Chen <andrew-ct.chen@mediatek.com>"); +MODULE_DESCRIPTION("Mediatek EFUSE driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c new file mode 100644 index 000000000..7018e2ef5 --- /dev/null +++ b/drivers/nvmem/mxs-ocotp.c @@ -0,0 +1,217 @@ +/* + * Freescale MXS On-Chip OTP driver + * + * Copyright (C) 2015 Stefan Wahren <stefan.wahren@i2se.com> + * + * Based on the driver from Huang Shijie and Christoph G. Baumann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/stmp_device.h> + +/* OCOTP registers and bits */ + +#define BM_OCOTP_CTRL_RD_BANK_OPEN BIT(12) +#define BM_OCOTP_CTRL_ERROR BIT(9) +#define BM_OCOTP_CTRL_BUSY BIT(8) + +#define OCOTP_TIMEOUT 10000 +#define OCOTP_DATA_OFFSET 0x20 + +struct mxs_ocotp { + struct clk *clk; + void __iomem *base; + struct nvmem_device *nvmem; +}; + +static int mxs_ocotp_wait(struct mxs_ocotp *otp) +{ + int timeout = OCOTP_TIMEOUT; + unsigned int status = 0; + + while (timeout--) { + status = readl(otp->base); + + if (!(status & (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR))) + break; + + cpu_relax(); + } + + if (status & BM_OCOTP_CTRL_BUSY) + return -EBUSY; + else if (status & BM_OCOTP_CTRL_ERROR) + return -EIO; + + return 0; +} + +static int mxs_ocotp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct mxs_ocotp *otp = context; + u32 *buf = val; + int ret; + + ret = clk_enable(otp->clk); + if (ret) + return ret; + + writel(BM_OCOTP_CTRL_ERROR, otp->base + STMP_OFFSET_REG_CLR); + + ret = mxs_ocotp_wait(otp); + if (ret) + goto disable_clk; + + /* open OCOTP banks for read */ + writel(BM_OCOTP_CTRL_RD_BANK_OPEN, otp->base + STMP_OFFSET_REG_SET); + + /* approximately wait 33 hclk cycles */ + udelay(1); + + ret = mxs_ocotp_wait(otp); + if (ret) + goto close_banks; + + while (bytes) { + if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) { + /* fill up non-data register */ + *buf++ = 0; + } else { + *buf++ = readl(otp->base + offset); + } + + bytes -= 4; + offset += 4; + } + +close_banks: + /* close banks for power saving */ + writel(BM_OCOTP_CTRL_RD_BANK_OPEN, otp->base + STMP_OFFSET_REG_CLR); + +disable_clk: + clk_disable(otp->clk); + + return ret; +} + +static struct nvmem_config ocotp_config = { + .name = "mxs-ocotp", + .stride = 16, + .word_size = 4, + .reg_read = mxs_ocotp_read, +}; + +struct mxs_data { + int size; +}; + +static const struct mxs_data imx23_data = { + .size = 0x220, +}; + +static const struct mxs_data imx28_data = { + .size = 0x2a0, +}; + +static const struct of_device_id mxs_ocotp_match[] = { + { .compatible = "fsl,imx23-ocotp", .data = &imx23_data }, + { .compatible = "fsl,imx28-ocotp", .data = &imx28_data }, + { /* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, mxs_ocotp_match); + +static int mxs_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct mxs_data *data; + struct mxs_ocotp *otp; + struct resource *res; + const struct of_device_id *match; + int ret; + + match = of_match_device(dev->driver->of_match_table, dev); + if (!match || !match->data) + return -EINVAL; + + otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL); + if (!otp) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + otp->base = devm_ioremap_resource(dev, res); + if (IS_ERR(otp->base)) + return PTR_ERR(otp->base); + + otp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(otp->clk)) + return PTR_ERR(otp->clk); + + ret = clk_prepare(otp->clk); + if (ret < 0) { + dev_err(dev, "failed to prepare clk: %d\n", ret); + return ret; + } + + data = match->data; + + ocotp_config.size = data->size; + ocotp_config.priv = otp; + ocotp_config.dev = dev; + otp->nvmem = nvmem_register(&ocotp_config); + if (IS_ERR(otp->nvmem)) { + ret = PTR_ERR(otp->nvmem); + goto err_clk; + } + + platform_set_drvdata(pdev, otp); + + return 0; + +err_clk: + clk_unprepare(otp->clk); + + return ret; +} + +static int mxs_ocotp_remove(struct platform_device *pdev) +{ + struct mxs_ocotp *otp = platform_get_drvdata(pdev); + + clk_unprepare(otp->clk); + + return nvmem_unregister(otp->nvmem); +} + +static struct platform_driver mxs_ocotp_driver = { + .probe = mxs_ocotp_probe, + .remove = mxs_ocotp_remove, + .driver = { + .name = "mxs-ocotp", + .of_match_table = mxs_ocotp_match, + }, +}; + +module_platform_driver(mxs_ocotp_driver); +MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); +MODULE_DESCRIPTION("driver for OCOTP in i.MX23/i.MX28"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c new file mode 100644 index 000000000..e62926b29 --- /dev/null +++ b/drivers/nvmem/qfprom.c @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/io.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> + +struct qfprom_priv { + void __iomem *base; +}; + +static int qfprom_reg_read(void *context, + unsigned int reg, void *_val, size_t bytes) +{ + struct qfprom_priv *priv = context; + u8 *val = _val; + int i = 0, words = bytes; + + while (words--) + *val++ = readb(priv->base + reg + i++); + + return 0; +} + +static struct nvmem_config econfig = { + .name = "qfprom", + .stride = 1, + .word_size = 1, + .reg_read = qfprom_reg_read, +}; + +static int qfprom_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct qfprom_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + econfig.size = resource_size(res); + econfig.dev = dev; + econfig.priv = priv; + + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id qfprom_of_match[] = { + { .compatible = "qcom,qfprom",}, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, qfprom_of_match); + +static struct platform_driver qfprom_driver = { + .probe = qfprom_probe, + .driver = { + .name = "qcom,qfprom", + .of_match_table = qfprom_of_match, + }, +}; +module_platform_driver(qfprom_driver); +MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm QFPROM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/rave-sp-eeprom.c b/drivers/nvmem/rave-sp-eeprom.c new file mode 100644 index 000000000..66699d44f --- /dev/null +++ b/drivers/nvmem/rave-sp-eeprom.c @@ -0,0 +1,361 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * EEPROM driver for RAVE SP + * + * Copyright (C) 2018 Zodiac Inflight Innovations + * + */ +#include <linux/kernel.h> +#include <linux/mfd/rave-sp.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/sizes.h> + +/** + * enum rave_sp_eeprom_access_type - Supported types of EEPROM access + * + * @RAVE_SP_EEPROM_WRITE: EEPROM write + * @RAVE_SP_EEPROM_READ: EEPROM read + */ +enum rave_sp_eeprom_access_type { + RAVE_SP_EEPROM_WRITE = 0, + RAVE_SP_EEPROM_READ = 1, +}; + +/** + * enum rave_sp_eeprom_header_size - EEPROM command header sizes + * + * @RAVE_SP_EEPROM_HEADER_SMALL: EEPROM header size for "small" devices (< 8K) + * @RAVE_SP_EEPROM_HEADER_BIG: EEPROM header size for "big" devices (> 8K) + */ +enum rave_sp_eeprom_header_size { + RAVE_SP_EEPROM_HEADER_SMALL = 4U, + RAVE_SP_EEPROM_HEADER_BIG = 5U, +}; +#define RAVE_SP_EEPROM_HEADER_MAX RAVE_SP_EEPROM_HEADER_BIG + +#define RAVE_SP_EEPROM_PAGE_SIZE 32U + +/** + * struct rave_sp_eeprom_page - RAVE SP EEPROM page + * + * @type: Access type (see enum rave_sp_eeprom_access_type) + * @success: Success flag (Success = 1, Failure = 0) + * @data: Read data + + * Note this structure corresponds to RSP_*_EEPROM payload from RAVE + * SP ICD + */ +struct rave_sp_eeprom_page { + u8 type; + u8 success; + u8 data[RAVE_SP_EEPROM_PAGE_SIZE]; +} __packed; + +/** + * struct rave_sp_eeprom - RAVE SP EEPROM device + * + * @sp: Pointer to parent RAVE SP device + * @mutex: Lock protecting access to EEPROM + * @address: EEPROM device address + * @header_size: Size of EEPROM command header for this device + * @dev: Pointer to corresponding struct device used for logging + */ +struct rave_sp_eeprom { + struct rave_sp *sp; + struct mutex mutex; + u8 address; + unsigned int header_size; + struct device *dev; +}; + +/** + * rave_sp_eeprom_io - Low-level part of EEPROM page access + * + * @eeprom: EEPROM device to write to + * @type: EEPROM access type (read or write) + * @idx: number of the EEPROM page + * @page: Data to write or buffer to store result (via page->data) + * + * This function does all of the low-level work required to perform a + * EEPROM access. This includes formatting correct command payload, + * sending it and checking received results. + * + * Returns zero in case of success or negative error code in + * case of failure. + */ +static int rave_sp_eeprom_io(struct rave_sp_eeprom *eeprom, + enum rave_sp_eeprom_access_type type, + u16 idx, + struct rave_sp_eeprom_page *page) +{ + const bool is_write = type == RAVE_SP_EEPROM_WRITE; + const unsigned int data_size = is_write ? sizeof(page->data) : 0; + const unsigned int cmd_size = eeprom->header_size + data_size; + const unsigned int rsp_size = + is_write ? sizeof(*page) - sizeof(page->data) : sizeof(*page); + unsigned int offset = 0; + u8 cmd[RAVE_SP_EEPROM_HEADER_MAX + sizeof(page->data)]; + int ret; + + if (WARN_ON(cmd_size > sizeof(cmd))) + return -EINVAL; + + cmd[offset++] = eeprom->address; + cmd[offset++] = 0; + cmd[offset++] = type; + cmd[offset++] = idx; + + /* + * If there's still room in this command's header it means we + * are talkin to EEPROM that uses 16-bit page numbers and we + * have to specify index's MSB in payload as well. + */ + if (offset < eeprom->header_size) + cmd[offset++] = idx >> 8; + /* + * Copy our data to write to command buffer first. In case of + * a read data_size should be zero and memcpy would become a + * no-op + */ + memcpy(&cmd[offset], page->data, data_size); + + ret = rave_sp_exec(eeprom->sp, cmd, cmd_size, page, rsp_size); + if (ret) + return ret; + + if (page->type != type) + return -EPROTO; + + if (!page->success) + return -EIO; + + return 0; +} + +/** + * rave_sp_eeprom_page_access - Access single EEPROM page + * + * @eeprom: EEPROM device to access + * @type: Access type to perform (read or write) + * @offset: Offset within EEPROM to access + * @data: Data buffer + * @data_len: Size of the data buffer + * + * This function performs a generic access to a single page or a + * portion thereof. Requested access MUST NOT cross the EEPROM page + * boundary. + * + * Returns zero in case of success or negative error code in + * case of failure. + */ +static int +rave_sp_eeprom_page_access(struct rave_sp_eeprom *eeprom, + enum rave_sp_eeprom_access_type type, + unsigned int offset, u8 *data, + size_t data_len) +{ + const unsigned int page_offset = offset % RAVE_SP_EEPROM_PAGE_SIZE; + const unsigned int page_nr = offset / RAVE_SP_EEPROM_PAGE_SIZE; + struct rave_sp_eeprom_page page; + int ret; + + /* + * This function will not work if data access we've been asked + * to do is crossing EEPROM page boundary. Normally this + * should never happen and getting here would indicate a bug + * in the code. + */ + if (WARN_ON(data_len > sizeof(page.data) - page_offset)) + return -EINVAL; + + if (type == RAVE_SP_EEPROM_WRITE) { + /* + * If doing a partial write we need to do a read first + * to fill the rest of the page with correct data. + */ + if (data_len < RAVE_SP_EEPROM_PAGE_SIZE) { + ret = rave_sp_eeprom_io(eeprom, RAVE_SP_EEPROM_READ, + page_nr, &page); + if (ret) + return ret; + } + + memcpy(&page.data[page_offset], data, data_len); + } + + ret = rave_sp_eeprom_io(eeprom, type, page_nr, &page); + if (ret) + return ret; + + /* + * Since we receive the result of the read via 'page.data' + * buffer we need to copy that to 'data' + */ + if (type == RAVE_SP_EEPROM_READ) + memcpy(data, &page.data[page_offset], data_len); + + return 0; +} + +/** + * rave_sp_eeprom_access - Access EEPROM data + * + * @eeprom: EEPROM device to access + * @type: Access type to perform (read or write) + * @offset: Offset within EEPROM to access + * @data: Data buffer + * @data_len: Size of the data buffer + * + * This function performs a generic access (either read or write) at + * arbitrary offset (not necessary page aligned) of arbitrary length + * (is not constrained by EEPROM page size). + * + * Returns zero in case of success or negative error code in case of + * failure. + */ +static int rave_sp_eeprom_access(struct rave_sp_eeprom *eeprom, + enum rave_sp_eeprom_access_type type, + unsigned int offset, u8 *data, + unsigned int data_len) +{ + unsigned int residue; + unsigned int chunk; + unsigned int head; + int ret; + + mutex_lock(&eeprom->mutex); + + head = offset % RAVE_SP_EEPROM_PAGE_SIZE; + residue = data_len; + + do { + /* + * First iteration, if we are doing an access that is + * not 32-byte aligned, we need to access only data up + * to a page boundary to avoid corssing it in + * rave_sp_eeprom_page_access() + */ + if (unlikely(head)) { + chunk = RAVE_SP_EEPROM_PAGE_SIZE - head; + /* + * This can only happen once per + * rave_sp_eeprom_access() call, so we set + * head to zero to process all the other + * iterations normally. + */ + head = 0; + } else { + chunk = RAVE_SP_EEPROM_PAGE_SIZE; + } + + /* + * We should never read more that 'residue' bytes + */ + chunk = min(chunk, residue); + ret = rave_sp_eeprom_page_access(eeprom, type, offset, + data, chunk); + if (ret) + goto out; + + residue -= chunk; + offset += chunk; + data += chunk; + } while (residue); +out: + mutex_unlock(&eeprom->mutex); + return ret; +} + +static int rave_sp_eeprom_reg_read(void *eeprom, unsigned int offset, + void *val, size_t bytes) +{ + return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_READ, + offset, val, bytes); +} + +static int rave_sp_eeprom_reg_write(void *eeprom, unsigned int offset, + void *val, size_t bytes) +{ + return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_WRITE, + offset, val, bytes); +} + +static int rave_sp_eeprom_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rave_sp *sp = dev_get_drvdata(dev->parent); + struct device_node *np = dev->of_node; + struct nvmem_config config = { 0 }; + struct rave_sp_eeprom *eeprom; + struct nvmem_device *nvmem; + u32 reg[2], size; + + if (of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg))) { + dev_err(dev, "Failed to parse \"reg\" property\n"); + return -EINVAL; + } + + size = reg[1]; + /* + * Per ICD, we have no more than 2 bytes to specify EEPROM + * page. + */ + if (size > U16_MAX * RAVE_SP_EEPROM_PAGE_SIZE) { + dev_err(dev, "Specified size is too big\n"); + return -EINVAL; + } + + eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL); + if (!eeprom) + return -ENOMEM; + + eeprom->address = reg[0]; + eeprom->sp = sp; + eeprom->dev = dev; + + if (size > SZ_8K) + eeprom->header_size = RAVE_SP_EEPROM_HEADER_BIG; + else + eeprom->header_size = RAVE_SP_EEPROM_HEADER_SMALL; + + mutex_init(&eeprom->mutex); + + config.id = -1; + of_property_read_string(np, "zii,eeprom-name", &config.name); + config.priv = eeprom; + config.dev = dev; + config.size = size; + config.reg_read = rave_sp_eeprom_reg_read; + config.reg_write = rave_sp_eeprom_reg_write; + config.word_size = 1; + config.stride = 1; + + nvmem = devm_nvmem_register(dev, &config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id rave_sp_eeprom_of_match[] = { + { .compatible = "zii,rave-sp-eeprom" }, + {} +}; +MODULE_DEVICE_TABLE(of, rave_sp_eeprom_of_match); + +static struct platform_driver rave_sp_eeprom_driver = { + .probe = rave_sp_eeprom_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = rave_sp_eeprom_of_match, + }, +}; +module_platform_driver(rave_sp_eeprom_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>"); +MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>"); +MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>"); +MODULE_DESCRIPTION("RAVE SP EEPROM driver"); diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c new file mode 100644 index 000000000..146de9489 --- /dev/null +++ b/drivers/nvmem/rockchip-efuse.c @@ -0,0 +1,309 @@ +/* + * Rockchip eFuse Driver + * + * Copyright (c) 2015 Rockchip Electronics Co. Ltd. + * Author: Caesar Wang <wxt@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +#define RK3288_A_SHIFT 6 +#define RK3288_A_MASK 0x3ff +#define RK3288_PGENB BIT(3) +#define RK3288_LOAD BIT(2) +#define RK3288_STROBE BIT(1) +#define RK3288_CSB BIT(0) + +#define RK3328_SECURE_SIZES 96 +#define RK3328_INT_STATUS 0x0018 +#define RK3328_DOUT 0x0020 +#define RK3328_AUTO_CTRL 0x0024 +#define RK3328_INT_FINISH BIT(0) +#define RK3328_AUTO_ENB BIT(0) +#define RK3328_AUTO_RD BIT(1) + +#define RK3399_A_SHIFT 16 +#define RK3399_A_MASK 0x3ff +#define RK3399_NBYTES 4 +#define RK3399_STROBSFTSEL BIT(9) +#define RK3399_RSB BIT(7) +#define RK3399_PD BIT(5) +#define RK3399_PGENB BIT(3) +#define RK3399_LOAD BIT(2) +#define RK3399_STROBE BIT(1) +#define RK3399_CSB BIT(0) + +#define REG_EFUSE_CTRL 0x0000 +#define REG_EFUSE_DOUT 0x0004 + +struct rockchip_efuse_chip { + struct device *dev; + void __iomem *base; + struct clk *clk; +}; + +static int rockchip_rk3288_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_efuse_chip *efuse = context; + u8 *buf = val; + int ret; + + ret = clk_prepare_enable(efuse->clk); + if (ret < 0) { + dev_err(efuse->dev, "failed to prepare/enable efuse clk\n"); + return ret; + } + + writel(RK3288_LOAD | RK3288_PGENB, efuse->base + REG_EFUSE_CTRL); + udelay(1); + while (bytes--) { + writel(readl(efuse->base + REG_EFUSE_CTRL) & + (~(RK3288_A_MASK << RK3288_A_SHIFT)), + efuse->base + REG_EFUSE_CTRL); + writel(readl(efuse->base + REG_EFUSE_CTRL) | + ((offset++ & RK3288_A_MASK) << RK3288_A_SHIFT), + efuse->base + REG_EFUSE_CTRL); + udelay(1); + writel(readl(efuse->base + REG_EFUSE_CTRL) | + RK3288_STROBE, efuse->base + REG_EFUSE_CTRL); + udelay(1); + *buf++ = readb(efuse->base + REG_EFUSE_DOUT); + writel(readl(efuse->base + REG_EFUSE_CTRL) & + (~RK3288_STROBE), efuse->base + REG_EFUSE_CTRL); + udelay(1); + } + + /* Switch to standby mode */ + writel(RK3288_PGENB | RK3288_CSB, efuse->base + REG_EFUSE_CTRL); + + clk_disable_unprepare(efuse->clk); + + return 0; +} + +static int rockchip_rk3328_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_efuse_chip *efuse = context; + unsigned int addr_start, addr_end, addr_offset, addr_len; + u32 out_value, status; + u8 *buf; + int ret, i = 0; + + ret = clk_prepare_enable(efuse->clk); + if (ret < 0) { + dev_err(efuse->dev, "failed to prepare/enable efuse clk\n"); + return ret; + } + + /* 128 Byte efuse, 96 Byte for secure, 32 Byte for non-secure */ + offset += RK3328_SECURE_SIZES; + addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES; + addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES; + addr_offset = offset % RK3399_NBYTES; + addr_len = addr_end - addr_start; + + buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)), + GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto nomem; + } + + while (addr_len--) { + writel(RK3328_AUTO_RD | RK3328_AUTO_ENB | + ((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT), + efuse->base + RK3328_AUTO_CTRL); + udelay(4); + status = readl(efuse->base + RK3328_INT_STATUS); + if (!(status & RK3328_INT_FINISH)) { + ret = -EIO; + goto err; + } + out_value = readl(efuse->base + RK3328_DOUT); + writel(RK3328_INT_FINISH, efuse->base + RK3328_INT_STATUS); + + memcpy(&buf[i], &out_value, RK3399_NBYTES); + i += RK3399_NBYTES; + } + + memcpy(val, buf + addr_offset, bytes); +err: + kfree(buf); +nomem: + clk_disable_unprepare(efuse->clk); + + return ret; +} + +static int rockchip_rk3399_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct rockchip_efuse_chip *efuse = context; + unsigned int addr_start, addr_end, addr_offset, addr_len; + u32 out_value; + u8 *buf; + int ret, i = 0; + + ret = clk_prepare_enable(efuse->clk); + if (ret < 0) { + dev_err(efuse->dev, "failed to prepare/enable efuse clk\n"); + return ret; + } + + addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES; + addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES; + addr_offset = offset % RK3399_NBYTES; + addr_len = addr_end - addr_start; + + buf = kzalloc(array3_size(addr_len, RK3399_NBYTES, sizeof(*buf)), + GFP_KERNEL); + if (!buf) { + clk_disable_unprepare(efuse->clk); + return -ENOMEM; + } + + writel(RK3399_LOAD | RK3399_PGENB | RK3399_STROBSFTSEL | RK3399_RSB, + efuse->base + REG_EFUSE_CTRL); + udelay(1); + while (addr_len--) { + writel(readl(efuse->base + REG_EFUSE_CTRL) | RK3399_STROBE | + ((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT), + efuse->base + REG_EFUSE_CTRL); + udelay(1); + out_value = readl(efuse->base + REG_EFUSE_DOUT); + writel(readl(efuse->base + REG_EFUSE_CTRL) & (~RK3399_STROBE), + efuse->base + REG_EFUSE_CTRL); + udelay(1); + + memcpy(&buf[i], &out_value, RK3399_NBYTES); + i += RK3399_NBYTES; + } + + /* Switch to standby mode */ + writel(RK3399_PD | RK3399_CSB, efuse->base + REG_EFUSE_CTRL); + + memcpy(val, buf + addr_offset, bytes); + + kfree(buf); + + clk_disable_unprepare(efuse->clk); + + return 0; +} + +static struct nvmem_config econfig = { + .name = "rockchip-efuse", + .stride = 1, + .word_size = 1, + .read_only = true, +}; + +static const struct of_device_id rockchip_efuse_match[] = { + /* deprecated but kept around for dts binding compatibility */ + { + .compatible = "rockchip,rockchip-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3066a-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3188-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3228-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3288-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3368-efuse", + .data = (void *)&rockchip_rk3288_efuse_read, + }, + { + .compatible = "rockchip,rk3328-efuse", + .data = (void *)&rockchip_rk3328_efuse_read, + }, + { + .compatible = "rockchip,rk3399-efuse", + .data = (void *)&rockchip_rk3399_efuse_read, + }, + { /* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, rockchip_efuse_match); + +static int rockchip_efuse_probe(struct platform_device *pdev) +{ + struct resource *res; + struct nvmem_device *nvmem; + struct rockchip_efuse_chip *efuse; + const void *data; + struct device *dev = &pdev->dev; + + data = of_device_get_match_data(dev); + if (!data) { + dev_err(dev, "failed to get match data\n"); + return -EINVAL; + } + + efuse = devm_kzalloc(dev, sizeof(struct rockchip_efuse_chip), + GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + efuse->base = devm_ioremap_resource(dev, res); + if (IS_ERR(efuse->base)) + return PTR_ERR(efuse->base); + + efuse->clk = devm_clk_get(dev, "pclk_efuse"); + if (IS_ERR(efuse->clk)) + return PTR_ERR(efuse->clk); + + efuse->dev = dev; + if (of_property_read_u32(dev->of_node, "rockchip,efuse-size", + &econfig.size)) + econfig.size = resource_size(res); + econfig.reg_read = data; + econfig.priv = efuse; + econfig.dev = efuse->dev; + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static struct platform_driver rockchip_efuse_driver = { + .probe = rockchip_efuse_probe, + .driver = { + .name = "rockchip-efuse", + .of_match_table = rockchip_efuse_match, + }, +}; + +module_platform_driver(rockchip_efuse_driver); +MODULE_DESCRIPTION("rockchip_efuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c new file mode 100644 index 000000000..33185d8d8 --- /dev/null +++ b/drivers/nvmem/sc27xx-efuse.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Spreadtrum Communications Inc. + +#include <linux/hwspinlock.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/nvmem-provider.h> + +/* PMIC global registers definition */ +#define SC27XX_MODULE_EN 0xc08 +#define SC27XX_EFUSE_EN BIT(6) + +/* Efuse controller registers definition */ +#define SC27XX_EFUSE_GLB_CTRL 0x0 +#define SC27XX_EFUSE_DATA_RD 0x4 +#define SC27XX_EFUSE_DATA_WR 0x8 +#define SC27XX_EFUSE_BLOCK_INDEX 0xc +#define SC27XX_EFUSE_MODE_CTRL 0x10 +#define SC27XX_EFUSE_STATUS 0x14 +#define SC27XX_EFUSE_WR_TIMING_CTRL 0x20 +#define SC27XX_EFUSE_RD_TIMING_CTRL 0x24 +#define SC27XX_EFUSE_EFUSE_DEB_CTRL 0x28 + +/* Mask definition for SC27XX_EFUSE_BLOCK_INDEX register */ +#define SC27XX_EFUSE_BLOCK_MASK GENMASK(4, 0) + +/* Bits definitions for SC27XX_EFUSE_MODE_CTRL register */ +#define SC27XX_EFUSE_PG_START BIT(0) +#define SC27XX_EFUSE_RD_START BIT(1) +#define SC27XX_EFUSE_CLR_RDDONE BIT(2) + +/* Bits definitions for SC27XX_EFUSE_STATUS register */ +#define SC27XX_EFUSE_PGM_BUSY BIT(0) +#define SC27XX_EFUSE_READ_BUSY BIT(1) +#define SC27XX_EFUSE_STANDBY BIT(2) +#define SC27XX_EFUSE_GLOBAL_PROT BIT(3) +#define SC27XX_EFUSE_RD_DONE BIT(4) + +/* Block number and block width (bytes) definitions */ +#define SC27XX_EFUSE_BLOCK_MAX 32 +#define SC27XX_EFUSE_BLOCK_WIDTH 2 + +/* Timeout (ms) for the trylock of hardware spinlocks */ +#define SC27XX_EFUSE_HWLOCK_TIMEOUT 5000 + +/* Timeout (us) of polling the status */ +#define SC27XX_EFUSE_POLL_TIMEOUT 3000000 +#define SC27XX_EFUSE_POLL_DELAY_US 10000 + +struct sc27xx_efuse { + struct device *dev; + struct regmap *regmap; + struct hwspinlock *hwlock; + struct mutex mutex; + u32 base; +}; + +/* + * On Spreadtrum platform, we have multi-subsystems will access the unique + * efuse controller, so we need one hardware spinlock to synchronize between + * the multiple subsystems. + */ +static int sc27xx_efuse_lock(struct sc27xx_efuse *efuse) +{ + int ret; + + mutex_lock(&efuse->mutex); + + ret = hwspin_lock_timeout_raw(efuse->hwlock, + SC27XX_EFUSE_HWLOCK_TIMEOUT); + if (ret) { + dev_err(efuse->dev, "timeout to get the hwspinlock\n"); + mutex_unlock(&efuse->mutex); + return ret; + } + + return 0; +} + +static void sc27xx_efuse_unlock(struct sc27xx_efuse *efuse) +{ + hwspin_unlock_raw(efuse->hwlock); + mutex_unlock(&efuse->mutex); +} + +static int sc27xx_efuse_poll_status(struct sc27xx_efuse *efuse, u32 bits) +{ + int ret; + u32 val; + + ret = regmap_read_poll_timeout(efuse->regmap, + efuse->base + SC27XX_EFUSE_STATUS, + val, (val & bits), + SC27XX_EFUSE_POLL_DELAY_US, + SC27XX_EFUSE_POLL_TIMEOUT); + if (ret) { + dev_err(efuse->dev, "timeout to update the efuse status\n"); + return ret; + } + + return 0; +} + +static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes) +{ + struct sc27xx_efuse *efuse = context; + u32 buf; + int ret; + + if (offset > SC27XX_EFUSE_BLOCK_MAX || bytes > SC27XX_EFUSE_BLOCK_WIDTH) + return -EINVAL; + + ret = sc27xx_efuse_lock(efuse); + if (ret) + return ret; + + /* Enable the efuse controller. */ + ret = regmap_update_bits(efuse->regmap, SC27XX_MODULE_EN, + SC27XX_EFUSE_EN, SC27XX_EFUSE_EN); + if (ret) + goto unlock_efuse; + + /* + * Before reading, we should ensure the efuse controller is in + * standby state. + */ + ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_STANDBY); + if (ret) + goto disable_efuse; + + /* Set the block address to be read. */ + ret = regmap_write(efuse->regmap, + efuse->base + SC27XX_EFUSE_BLOCK_INDEX, + offset & SC27XX_EFUSE_BLOCK_MASK); + if (ret) + goto disable_efuse; + + /* Start reading process from efuse memory. */ + ret = regmap_update_bits(efuse->regmap, + efuse->base + SC27XX_EFUSE_MODE_CTRL, + SC27XX_EFUSE_RD_START, + SC27XX_EFUSE_RD_START); + if (ret) + goto disable_efuse; + + /* + * Polling the read done status to make sure the reading process + * is completed, that means the data can be read out now. + */ + ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_RD_DONE); + if (ret) + goto disable_efuse; + + /* Read data from efuse memory. */ + ret = regmap_read(efuse->regmap, efuse->base + SC27XX_EFUSE_DATA_RD, + &buf); + if (ret) + goto disable_efuse; + + /* Clear the read done flag. */ + ret = regmap_update_bits(efuse->regmap, + efuse->base + SC27XX_EFUSE_MODE_CTRL, + SC27XX_EFUSE_CLR_RDDONE, + SC27XX_EFUSE_CLR_RDDONE); + +disable_efuse: + /* Disable the efuse controller after reading. */ + regmap_update_bits(efuse->regmap, SC27XX_MODULE_EN, SC27XX_EFUSE_EN, 0); +unlock_efuse: + sc27xx_efuse_unlock(efuse); + + if (!ret) + memcpy(val, &buf, bytes); + + return ret; +} + +static int sc27xx_efuse_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct nvmem_config econfig = { }; + struct nvmem_device *nvmem; + struct sc27xx_efuse *efuse; + int ret; + + efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL); + if (!efuse) + return -ENOMEM; + + efuse->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!efuse->regmap) { + dev_err(&pdev->dev, "failed to get efuse regmap\n"); + return -ENODEV; + } + + ret = of_property_read_u32(np, "reg", &efuse->base); + if (ret) { + dev_err(&pdev->dev, "failed to get efuse base address\n"); + return ret; + } + + ret = of_hwspin_lock_get_id(np, 0); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get hwspinlock id\n"); + return ret; + } + + efuse->hwlock = hwspin_lock_request_specific(ret); + if (!efuse->hwlock) { + dev_err(&pdev->dev, "failed to request hwspinlock\n"); + return -ENXIO; + } + + mutex_init(&efuse->mutex); + efuse->dev = &pdev->dev; + platform_set_drvdata(pdev, efuse); + + econfig.stride = 1; + econfig.word_size = 1; + econfig.read_only = true; + econfig.name = "sc27xx-efuse"; + econfig.size = SC27XX_EFUSE_BLOCK_MAX * SC27XX_EFUSE_BLOCK_WIDTH; + econfig.reg_read = sc27xx_efuse_read; + econfig.priv = efuse; + econfig.dev = &pdev->dev; + nvmem = devm_nvmem_register(&pdev->dev, &econfig); + if (IS_ERR(nvmem)) { + dev_err(&pdev->dev, "failed to register nvmem config\n"); + hwspin_lock_free(efuse->hwlock); + return PTR_ERR(nvmem); + } + + return 0; +} + +static int sc27xx_efuse_remove(struct platform_device *pdev) +{ + struct sc27xx_efuse *efuse = platform_get_drvdata(pdev); + + hwspin_lock_free(efuse->hwlock); + return 0; +} + +static const struct of_device_id sc27xx_efuse_of_match[] = { + { .compatible = "sprd,sc2731-efuse" }, + { } +}; + +static struct platform_driver sc27xx_efuse_driver = { + .probe = sc27xx_efuse_probe, + .remove = sc27xx_efuse_remove, + .driver = { + .name = "sc27xx-efuse", + .of_match_table = sc27xx_efuse_of_match, + }, +}; + +module_platform_driver(sc27xx_efuse_driver); + +MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>"); +MODULE_DESCRIPTION("Spreadtrum SC27xx efuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/snvs_lpgpr.c b/drivers/nvmem/snvs_lpgpr.c new file mode 100644 index 000000000..c050a23a9 --- /dev/null +++ b/drivers/nvmem/snvs_lpgpr.c @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de> + * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/regmap.h> + +#define IMX6Q_SNVS_HPLR 0x00 +#define IMX6Q_SNVS_LPLR 0x34 +#define IMX6Q_SNVS_LPGPR 0x68 + +#define IMX7D_SNVS_HPLR 0x00 +#define IMX7D_SNVS_LPLR 0x34 +#define IMX7D_SNVS_LPGPR 0x90 + +#define IMX_GPR_SL BIT(5) +#define IMX_GPR_HL BIT(5) + +struct snvs_lpgpr_cfg { + int offset; + int offset_hplr; + int offset_lplr; + int size; +}; + +struct snvs_lpgpr_priv { + struct device_d *dev; + struct regmap *regmap; + struct nvmem_config cfg; + const struct snvs_lpgpr_cfg *dcfg; +}; + +static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx6q = { + .offset = IMX6Q_SNVS_LPGPR, + .offset_hplr = IMX6Q_SNVS_HPLR, + .offset_lplr = IMX6Q_SNVS_LPLR, + .size = 4, +}; + +static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx7d = { + .offset = IMX7D_SNVS_LPGPR, + .offset_hplr = IMX7D_SNVS_HPLR, + .offset_lplr = IMX7D_SNVS_LPLR, + .size = 16, +}; + +static int snvs_lpgpr_write(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct snvs_lpgpr_priv *priv = context; + const struct snvs_lpgpr_cfg *dcfg = priv->dcfg; + unsigned int lock_reg; + int ret; + + ret = regmap_read(priv->regmap, dcfg->offset_hplr, &lock_reg); + if (ret < 0) + return ret; + + if (lock_reg & IMX_GPR_SL) + return -EPERM; + + ret = regmap_read(priv->regmap, dcfg->offset_lplr, &lock_reg); + if (ret < 0) + return ret; + + if (lock_reg & IMX_GPR_HL) + return -EPERM; + + return regmap_bulk_write(priv->regmap, dcfg->offset + offset, val, + bytes / 4); +} + +static int snvs_lpgpr_read(void *context, unsigned int offset, void *val, + size_t bytes) +{ + struct snvs_lpgpr_priv *priv = context; + const struct snvs_lpgpr_cfg *dcfg = priv->dcfg; + + return regmap_bulk_read(priv->regmap, dcfg->offset + offset, + val, bytes / 4); +} + +static int snvs_lpgpr_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct device_node *syscon_node; + struct snvs_lpgpr_priv *priv; + struct nvmem_config *cfg; + struct nvmem_device *nvmem; + const struct snvs_lpgpr_cfg *dcfg; + + if (!node) + return -ENOENT; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dcfg = of_device_get_match_data(dev); + if (!dcfg) + return -EINVAL; + + syscon_node = of_get_parent(node); + if (!syscon_node) + return -ENODEV; + + priv->regmap = syscon_node_to_regmap(syscon_node); + of_node_put(syscon_node); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + + priv->dcfg = dcfg; + + cfg = &priv->cfg; + cfg->priv = priv; + cfg->name = dev_name(dev); + cfg->dev = dev; + cfg->stride = 4; + cfg->word_size = 4; + cfg->size = dcfg->size, + cfg->owner = THIS_MODULE; + cfg->reg_read = snvs_lpgpr_read; + cfg->reg_write = snvs_lpgpr_write; + + nvmem = devm_nvmem_register(dev, cfg); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id snvs_lpgpr_dt_ids[] = { + { .compatible = "fsl,imx6q-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx6q }, + { .compatible = "fsl,imx6ul-snvs-lpgpr", + .data = &snvs_lpgpr_cfg_imx6q }, + { .compatible = "fsl,imx7d-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx7d }, + { }, +}; +MODULE_DEVICE_TABLE(of, snvs_lpgpr_dt_ids); + +static struct platform_driver snvs_lpgpr_driver = { + .probe = snvs_lpgpr_probe, + .driver = { + .name = "snvs_lpgpr", + .of_match_table = snvs_lpgpr_dt_ids, + }, +}; +module_platform_driver(snvs_lpgpr_driver); + +MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>"); +MODULE_DESCRIPTION("Low Power General Purpose Register in i.MX6 and i.MX7 Secure Non-Volatile Storage"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c new file mode 100644 index 000000000..69f8e972e --- /dev/null +++ b/drivers/nvmem/sunxi_sid.c @@ -0,0 +1,258 @@ +/* + * Allwinner sunXi SoCs Security ID support. + * + * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl> + * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/random.h> + +/* Registers and special values for doing register-based SID readout on H3 */ +#define SUN8I_SID_PRCTL 0x40 +#define SUN8I_SID_RDKEY 0x60 + +#define SUN8I_SID_OFFSET_MASK 0x1FF +#define SUN8I_SID_OFFSET_SHIFT 16 +#define SUN8I_SID_OP_LOCK (0xAC << 8) +#define SUN8I_SID_READ BIT(1) + +static struct nvmem_config econfig = { + .name = "sunxi-sid", + .read_only = true, + .stride = 4, + .word_size = 1, +}; + +struct sunxi_sid_cfg { + u32 value_offset; + u32 size; + bool need_register_readout; +}; + +struct sunxi_sid { + void __iomem *base; + u32 value_offset; +}; + +/* We read the entire key, due to a 32 bit read alignment requirement. Since we + * want to return the requested byte, this results in somewhat slower code and + * uses 4 times more reads as needed but keeps code simpler. Since the SID is + * only very rarely probed, this is not really an issue. + */ +static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid, + const unsigned int offset) +{ + u32 sid_key; + + sid_key = ioread32be(sid->base + round_down(offset, 4)); + sid_key >>= (offset % 4) * 8; + + return sid_key; /* Only return the last byte */ +} + +static int sunxi_sid_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct sunxi_sid *sid = context; + u8 *buf = val; + + /* Offset the read operation to the real position of SID */ + offset += sid->value_offset; + + while (bytes--) + *buf++ = sunxi_sid_read_byte(sid, offset++); + + return 0; +} + +static int sun8i_sid_register_readout(const struct sunxi_sid *sid, + const unsigned int offset, + u32 *out) +{ + u32 reg_val; + int ret; + + /* Set word, lock access, and set read command */ + reg_val = (offset & SUN8I_SID_OFFSET_MASK) + << SUN8I_SID_OFFSET_SHIFT; + reg_val |= SUN8I_SID_OP_LOCK | SUN8I_SID_READ; + writel(reg_val, sid->base + SUN8I_SID_PRCTL); + + ret = readl_poll_timeout(sid->base + SUN8I_SID_PRCTL, reg_val, + !(reg_val & SUN8I_SID_READ), 100, 250000); + if (ret) + return ret; + + if (out) + *out = readl(sid->base + SUN8I_SID_RDKEY); + + writel(0, sid->base + SUN8I_SID_PRCTL); + + return 0; +} + +/* + * On Allwinner H3, the value on the 0x200 offset of the SID controller seems + * to be not reliable at all. + * Read by the registers instead. + */ +static int sun8i_sid_read_byte_by_reg(const struct sunxi_sid *sid, + const unsigned int offset, + u8 *out) +{ + u32 word; + int ret; + + ret = sun8i_sid_register_readout(sid, offset & ~0x03, &word); + + if (ret) + return ret; + + *out = (word >> ((offset & 0x3) * 8)) & 0xff; + + return 0; +} + +static int sun8i_sid_read_by_reg(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct sunxi_sid *sid = context; + u8 *buf = val; + int ret; + + while (bytes--) { + ret = sun8i_sid_read_byte_by_reg(sid, offset++, buf++); + if (ret) + return ret; + } + + return 0; +} + +static int sunxi_sid_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct sunxi_sid *sid; + int ret, i, size; + char *randomness; + const struct sunxi_sid_cfg *cfg; + + sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); + if (!sid) + return -ENOMEM; + + cfg = of_device_get_match_data(dev); + if (!cfg) + return -EINVAL; + sid->value_offset = cfg->value_offset; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sid->base = devm_ioremap_resource(dev, res); + if (IS_ERR(sid->base)) + return PTR_ERR(sid->base); + + size = cfg->size; + + econfig.size = size; + econfig.dev = dev; + if (cfg->need_register_readout) + econfig.reg_read = sun8i_sid_read_by_reg; + else + econfig.reg_read = sunxi_sid_read; + econfig.priv = sid; + nvmem = nvmem_register(&econfig); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + randomness = kzalloc(size, GFP_KERNEL); + if (!randomness) { + ret = -EINVAL; + goto err_unreg_nvmem; + } + + for (i = 0; i < size; i++) + econfig.reg_read(sid, i, &randomness[i], 1); + + add_device_randomness(randomness, size); + kfree(randomness); + + platform_set_drvdata(pdev, nvmem); + + return 0; + +err_unreg_nvmem: + nvmem_unregister(nvmem); + return ret; +} + +static int sunxi_sid_remove(struct platform_device *pdev) +{ + struct nvmem_device *nvmem = platform_get_drvdata(pdev); + + return nvmem_unregister(nvmem); +} + +static const struct sunxi_sid_cfg sun4i_a10_cfg = { + .size = 0x10, +}; + +static const struct sunxi_sid_cfg sun7i_a20_cfg = { + .size = 0x200, +}; + +static const struct sunxi_sid_cfg sun8i_h3_cfg = { + .value_offset = 0x200, + .size = 0x100, + .need_register_readout = true, +}; + +static const struct sunxi_sid_cfg sun50i_a64_cfg = { + .value_offset = 0x200, + .size = 0x100, +}; + +static const struct of_device_id sunxi_sid_of_match[] = { + { .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg }, + { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg }, + { .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg }, + { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg }, + { .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, sunxi_sid_of_match); + +static struct platform_driver sunxi_sid_driver = { + .probe = sunxi_sid_probe, + .remove = sunxi_sid_remove, + .driver = { + .name = "eeprom-sunxi-sid", + .of_match_table = sunxi_sid_of_match, + }, +}; +module_platform_driver(sunxi_sid_driver); + +MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>"); +MODULE_DESCRIPTION("Allwinner sunxi security id driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c new file mode 100644 index 000000000..286910336 --- /dev/null +++ b/drivers/nvmem/uniphier-efuse.c @@ -0,0 +1,86 @@ +/* + * UniPhier eFuse driver + * + * Copyright (C) 2017 Socionext Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> + +struct uniphier_efuse_priv { + void __iomem *base; +}; + +static int uniphier_reg_read(void *context, + unsigned int reg, void *_val, size_t bytes) +{ + struct uniphier_efuse_priv *priv = context; + u8 *val = _val; + int offs; + + for (offs = 0; offs < bytes; offs += sizeof(u8)) + *val++ = readb(priv->base + reg + offs); + + return 0; +} + +static int uniphier_efuse_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct nvmem_config econfig = {}; + struct uniphier_efuse_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + econfig.stride = 1; + econfig.word_size = 1; + econfig.read_only = true; + econfig.reg_read = uniphier_reg_read; + econfig.size = resource_size(res); + econfig.priv = priv; + econfig.dev = dev; + nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id uniphier_efuse_of_match[] = { + { .compatible = "socionext,uniphier-efuse",}, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, uniphier_efuse_of_match); + +static struct platform_driver uniphier_efuse_driver = { + .probe = uniphier_efuse_probe, + .driver = { + .name = "uniphier-efuse", + .of_match_table = uniphier_efuse_of_match, + }, +}; +module_platform_driver(uniphier_efuse_driver); + +MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>"); +MODULE_DESCRIPTION("UniPhier eFuse driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/vf610-ocotp.c b/drivers/nvmem/vf610-ocotp.c new file mode 100644 index 000000000..466230948 --- /dev/null +++ b/drivers/nvmem/vf610-ocotp.c @@ -0,0 +1,263 @@ +/* + * Copyright (C) 2015 Toradex AG. + * + * Author: Sanchayan Maity <sanchayan.maity@toradex.com> + * + * Based on the barebox ocotp driver, + * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il> + * Orex Computed Radiography + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* OCOTP Register Offsets */ +#define OCOTP_CTRL_REG 0x00 +#define OCOTP_CTRL_SET 0x04 +#define OCOTP_CTRL_CLR 0x08 +#define OCOTP_TIMING 0x10 +#define OCOTP_DATA 0x20 +#define OCOTP_READ_CTRL_REG 0x30 +#define OCOTP_READ_FUSE_DATA 0x40 + +/* OCOTP Register bits and masks */ +#define OCOTP_CTRL_WR_UNLOCK 16 +#define OCOTP_CTRL_WR_UNLOCK_KEY 0x3E77 +#define OCOTP_CTRL_WR_UNLOCK_MASK GENMASK(31, 16) +#define OCOTP_CTRL_ADDR 0 +#define OCOTP_CTRL_ADDR_MASK GENMASK(6, 0) +#define OCOTP_CTRL_RELOAD_SHADOWS BIT(10) +#define OCOTP_CTRL_ERR BIT(9) +#define OCOTP_CTRL_BUSY BIT(8) + +#define OCOTP_TIMING_STROBE_READ 16 +#define OCOTP_TIMING_STROBE_READ_MASK GENMASK(21, 16) +#define OCOTP_TIMING_RELAX 12 +#define OCOTP_TIMING_RELAX_MASK GENMASK(15, 12) +#define OCOTP_TIMING_STROBE_PROG 0 +#define OCOTP_TIMING_STROBE_PROG_MASK GENMASK(11, 0) + +#define OCOTP_READ_CTRL_READ_FUSE 0x1 + +#define VF610_OCOTP_TIMEOUT 100000 + +#define BF(value, field) (((value) << field) & field##_MASK) + +#define DEF_RELAX 20 + +static const int base_to_fuse_addr_mappings[][2] = { + {0x400, 0x00}, + {0x410, 0x01}, + {0x420, 0x02}, + {0x450, 0x05}, + {0x4F0, 0x0F}, + {0x600, 0x20}, + {0x610, 0x21}, + {0x620, 0x22}, + {0x630, 0x23}, + {0x640, 0x24}, + {0x650, 0x25}, + {0x660, 0x26}, + {0x670, 0x27}, + {0x6F0, 0x2F}, + {0x880, 0x38}, + {0x890, 0x39}, + {0x8A0, 0x3A}, + {0x8B0, 0x3B}, + {0x8C0, 0x3C}, + {0x8D0, 0x3D}, + {0x8E0, 0x3E}, + {0x8F0, 0x3F}, + {0xC80, 0x78}, + {0xC90, 0x79}, + {0xCA0, 0x7A}, + {0xCB0, 0x7B}, + {0xCC0, 0x7C}, + {0xCD0, 0x7D}, + {0xCE0, 0x7E}, + {0xCF0, 0x7F}, +}; + +struct vf610_ocotp { + void __iomem *base; + struct clk *clk; + struct device *dev; + struct nvmem_device *nvmem; + int timing; +}; + +static int vf610_ocotp_wait_busy(void __iomem *base) +{ + int timeout = VF610_OCOTP_TIMEOUT; + + while ((readl(base) & OCOTP_CTRL_BUSY) && --timeout) + udelay(10); + + if (!timeout) { + writel(OCOTP_CTRL_ERR, base + OCOTP_CTRL_CLR); + return -ETIMEDOUT; + } + + udelay(10); + + return 0; +} + +static int vf610_ocotp_calculate_timing(struct vf610_ocotp *ocotp_dev) +{ + u32 clk_rate; + u32 relax, strobe_read, strobe_prog; + u32 timing; + + clk_rate = clk_get_rate(ocotp_dev->clk); + + /* Refer section OTP read/write timing parameters in TRM */ + relax = clk_rate / (1000000000 / DEF_RELAX) - 1; + strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1; + strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1; + + timing = BF(relax, OCOTP_TIMING_RELAX); + timing |= BF(strobe_read, OCOTP_TIMING_STROBE_READ); + timing |= BF(strobe_prog, OCOTP_TIMING_STROBE_PROG); + + return timing; +} + +static int vf610_get_fuse_address(int base_addr_offset) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(base_to_fuse_addr_mappings); i++) { + if (base_to_fuse_addr_mappings[i][0] == base_addr_offset) + return base_to_fuse_addr_mappings[i][1]; + } + + return -EINVAL; +} + +static int vf610_ocotp_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + struct vf610_ocotp *ocotp = context; + void __iomem *base = ocotp->base; + u32 reg, *buf = val; + int fuse_addr; + int ret; + + while (bytes > 0) { + fuse_addr = vf610_get_fuse_address(offset); + if (fuse_addr > 0) { + writel(ocotp->timing, base + OCOTP_TIMING); + ret = vf610_ocotp_wait_busy(base + OCOTP_CTRL_REG); + if (ret) + return ret; + + reg = readl(base + OCOTP_CTRL_REG); + reg &= ~OCOTP_CTRL_ADDR_MASK; + reg &= ~OCOTP_CTRL_WR_UNLOCK_MASK; + reg |= BF(fuse_addr, OCOTP_CTRL_ADDR); + writel(reg, base + OCOTP_CTRL_REG); + + writel(OCOTP_READ_CTRL_READ_FUSE, + base + OCOTP_READ_CTRL_REG); + ret = vf610_ocotp_wait_busy(base + OCOTP_CTRL_REG); + if (ret) + return ret; + + if (readl(base) & OCOTP_CTRL_ERR) { + dev_dbg(ocotp->dev, "Error reading from fuse address %x\n", + fuse_addr); + writel(OCOTP_CTRL_ERR, base + OCOTP_CTRL_CLR); + } + + /* + * In case of error, we do not abort and expect to read + * 0xBADABADA as mentioned by the TRM. We just read this + * value and return. + */ + *buf = readl(base + OCOTP_READ_FUSE_DATA); + } else { + *buf = 0; + } + + buf++; + bytes -= 4; + offset += 4; + } + + return 0; +} + +static struct nvmem_config ocotp_config = { + .name = "ocotp", + .stride = 4, + .word_size = 4, + .reg_read = vf610_ocotp_read, +}; + +static const struct of_device_id ocotp_of_match[] = { + { .compatible = "fsl,vf610-ocotp", }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, ocotp_of_match); + +static int vf610_ocotp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct vf610_ocotp *ocotp_dev; + + ocotp_dev = devm_kzalloc(dev, sizeof(struct vf610_ocotp), GFP_KERNEL); + if (!ocotp_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ocotp_dev->base = devm_ioremap_resource(dev, res); + if (IS_ERR(ocotp_dev->base)) + return PTR_ERR(ocotp_dev->base); + + ocotp_dev->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ocotp_dev->clk)) { + dev_err(dev, "failed getting clock, err = %ld\n", + PTR_ERR(ocotp_dev->clk)); + return PTR_ERR(ocotp_dev->clk); + } + ocotp_dev->dev = dev; + ocotp_dev->timing = vf610_ocotp_calculate_timing(ocotp_dev); + + ocotp_config.size = resource_size(res); + ocotp_config.priv = ocotp_dev; + ocotp_config.dev = dev; + + ocotp_dev->nvmem = devm_nvmem_register(dev, &ocotp_config); + + return PTR_ERR_OR_ZERO(ocotp_dev->nvmem); +} + +static struct platform_driver vf610_ocotp_driver = { + .probe = vf610_ocotp_probe, + .driver = { + .name = "vf610-ocotp", + .of_match_table = ocotp_of_match, + }, +}; +module_platform_driver(vf610_ocotp_driver); +MODULE_AUTHOR("Sanchayan Maity <sanchayan.maity@toradex.com>"); +MODULE_DESCRIPTION("Vybrid OCOTP driver"); +MODULE_LICENSE("GPL v2"); |