summaryrefslogtreecommitdiffstats
path: root/drivers/i3c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/i3c
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/i3c')
-rw-r--r--drivers/i3c/Kconfig24
-rw-r--r--drivers/i3c/Makefile4
-rw-r--r--drivers/i3c/device.c280
-rw-r--r--drivers/i3c/internals.h26
-rw-r--r--drivers/i3c/master.c2718
-rw-r--r--drivers/i3c/master/Kconfig23
-rw-r--r--drivers/i3c/master/Makefile3
-rw-r--r--drivers/i3c/master/dw-i3c-master.c1219
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c1691
9 files changed, 5988 insertions, 0 deletions
diff --git a/drivers/i3c/Kconfig b/drivers/i3c/Kconfig
new file mode 100644
index 000000000..30a441506
--- /dev/null
+++ b/drivers/i3c/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig I3C
+ tristate "I3C support"
+ select I2C
+ help
+ I3C is a serial protocol standardized by the MIPI alliance.
+
+ It's supposed to be backward compatible with I2C while providing
+ support for high speed transfers and native interrupt support
+ without the need for extra pins.
+
+ The I3C protocol also standardizes the slave device types and is
+ mainly designed to communicate with sensors.
+
+ If you want I3C support, you should say Y here and also to the
+ specific driver for your bus adapter(s) below.
+
+ This I3C support can also be built as a module. If so, the module
+ will be called i3c.
+
+if I3C
+source "drivers/i3c/master/Kconfig"
+endif # I3C
diff --git a/drivers/i3c/Makefile b/drivers/i3c/Makefile
new file mode 100644
index 000000000..11982efbc
--- /dev/null
+++ b/drivers/i3c/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+i3c-y := device.o master.o
+obj-$(CONFIG_I3C) += i3c.o
+obj-$(CONFIG_I3C) += master/
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
new file mode 100644
index 000000000..bb8e60dff
--- /dev/null
+++ b/drivers/i3c/device.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "internals.h"
+
+/**
+ * i3c_device_do_priv_xfers() - do I3C SDR private transfers directed to a
+ * specific device
+ *
+ * @dev: device with which the transfers should be done
+ * @xfers: array of transfers
+ * @nxfers: number of transfers
+ *
+ * Initiate one or several private SDR transfers with @dev.
+ *
+ * This function can sleep and thus cannot be called in atomic context.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ int ret, i;
+
+ if (nxfers < 1)
+ return 0;
+
+ for (i = 0; i < nxfers; i++) {
+ if (!xfers[i].len || !xfers[i].data.in)
+ return -EINVAL;
+ }
+
+ i3c_bus_normaluse_lock(dev->bus);
+ ret = i3c_dev_do_priv_xfers_locked(dev->desc, xfers, nxfers);
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_do_priv_xfers);
+
+/**
+ * i3c_device_get_info() - get I3C device information
+ *
+ * @dev: device we want information on
+ * @info: the information object to fill in
+ *
+ * Retrieve I3C dev info.
+ */
+void i3c_device_get_info(struct i3c_device *dev,
+ struct i3c_device_info *info)
+{
+ if (!info)
+ return;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc)
+ *info = dev->desc->info;
+ i3c_bus_normaluse_unlock(dev->bus);
+}
+EXPORT_SYMBOL_GPL(i3c_device_get_info);
+
+/**
+ * i3c_device_disable_ibi() - Disable IBIs coming from a specific device
+ * @dev: device on which IBIs should be disabled
+ *
+ * This function disable IBIs coming from a specific device and wait for
+ * all pending IBIs to be processed.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_disable_ibi(struct i3c_device *dev)
+{
+ int ret = -ENOENT;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc) {
+ mutex_lock(&dev->desc->ibi_lock);
+ ret = i3c_dev_disable_ibi_locked(dev->desc);
+ mutex_unlock(&dev->desc->ibi_lock);
+ }
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_disable_ibi);
+
+/**
+ * i3c_device_enable_ibi() - Enable IBIs coming from a specific device
+ * @dev: device on which IBIs should be enabled
+ *
+ * This function enable IBIs coming from a specific device and wait for
+ * all pending IBIs to be processed. This should be called on a device
+ * where i3c_device_request_ibi() has succeeded.
+ *
+ * Note that IBIs from this device might be received before this function
+ * returns to its caller.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_enable_ibi(struct i3c_device *dev)
+{
+ int ret = -ENOENT;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc) {
+ mutex_lock(&dev->desc->ibi_lock);
+ ret = i3c_dev_enable_ibi_locked(dev->desc);
+ mutex_unlock(&dev->desc->ibi_lock);
+ }
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_enable_ibi);
+
+/**
+ * i3c_device_request_ibi() - Request an IBI
+ * @dev: device for which we should enable IBIs
+ * @req: setup requested for this IBI
+ *
+ * This function is responsible for pre-allocating all resources needed to
+ * process IBIs coming from @dev. When this function returns, the IBI is not
+ * enabled until i3c_device_enable_ibi() is called.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_request_ibi(struct i3c_device *dev,
+ const struct i3c_ibi_setup *req)
+{
+ int ret = -ENOENT;
+
+ if (!req->handler || !req->num_slots)
+ return -EINVAL;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc) {
+ mutex_lock(&dev->desc->ibi_lock);
+ ret = i3c_dev_request_ibi_locked(dev->desc, req);
+ mutex_unlock(&dev->desc->ibi_lock);
+ }
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_request_ibi);
+
+/**
+ * i3c_device_free_ibi() - Free all resources needed for IBI handling
+ * @dev: device on which you want to release IBI resources
+ *
+ * This function is responsible for de-allocating resources previously
+ * allocated by i3c_device_request_ibi(). It should be called after disabling
+ * IBIs with i3c_device_disable_ibi().
+ */
+void i3c_device_free_ibi(struct i3c_device *dev)
+{
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc) {
+ mutex_lock(&dev->desc->ibi_lock);
+ i3c_dev_free_ibi_locked(dev->desc);
+ mutex_unlock(&dev->desc->ibi_lock);
+ }
+ i3c_bus_normaluse_unlock(dev->bus);
+}
+EXPORT_SYMBOL_GPL(i3c_device_free_ibi);
+
+/**
+ * i3cdev_to_dev() - Returns the device embedded in @i3cdev
+ * @i3cdev: I3C device
+ *
+ * Return: a pointer to a device object.
+ */
+struct device *i3cdev_to_dev(struct i3c_device *i3cdev)
+{
+ return &i3cdev->dev;
+}
+EXPORT_SYMBOL_GPL(i3cdev_to_dev);
+
+/**
+ * dev_to_i3cdev() - Returns the I3C device containing @dev
+ * @dev: device object
+ *
+ * Return: a pointer to an I3C device object.
+ */
+struct i3c_device *dev_to_i3cdev(struct device *dev)
+{
+ return container_of(dev, struct i3c_device, dev);
+}
+EXPORT_SYMBOL_GPL(dev_to_i3cdev);
+
+/**
+ * i3c_device_match_id() - Returns the i3c_device_id entry matching @i3cdev
+ * @i3cdev: I3C device
+ * @id_table: I3C device match table
+ *
+ * Return: a pointer to an i3c_device_id object or NULL if there's no match.
+ */
+const struct i3c_device_id *
+i3c_device_match_id(struct i3c_device *i3cdev,
+ const struct i3c_device_id *id_table)
+{
+ struct i3c_device_info devinfo;
+ const struct i3c_device_id *id;
+ u16 manuf, part, ext_info;
+ bool rndpid;
+
+ i3c_device_get_info(i3cdev, &devinfo);
+
+ manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ part = I3C_PID_PART_ID(devinfo.pid);
+ ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
+ rndpid = I3C_PID_RND_LOWER_32BITS(devinfo.pid);
+
+ for (id = id_table; id->match_flags != 0; id++) {
+ if ((id->match_flags & I3C_MATCH_DCR) &&
+ id->dcr != devinfo.dcr)
+ continue;
+
+ if ((id->match_flags & I3C_MATCH_MANUF) &&
+ id->manuf_id != manuf)
+ continue;
+
+ if ((id->match_flags & I3C_MATCH_PART) &&
+ (rndpid || id->part_id != part))
+ continue;
+
+ if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
+ (rndpid || id->extra_info != ext_info))
+ continue;
+
+ return id;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(i3c_device_match_id);
+
+/**
+ * i3c_driver_register_with_owner() - register an I3C device driver
+ *
+ * @drv: driver to register
+ * @owner: module that owns this driver
+ *
+ * Register @drv to the core.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_driver_register_with_owner(struct i3c_driver *drv, struct module *owner)
+{
+ drv->driver.owner = owner;
+ drv->driver.bus = &i3c_bus_type;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(i3c_driver_register_with_owner);
+
+/**
+ * i3c_driver_unregister() - unregister an I3C device driver
+ *
+ * @drv: driver to unregister
+ *
+ * Unregister @drv.
+ */
+void i3c_driver_unregister(struct i3c_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(i3c_driver_unregister);
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
new file mode 100644
index 000000000..86b7b44cf
--- /dev/null
+++ b/drivers/i3c/internals.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_INTERNALS_H
+#define I3C_INTERNALS_H
+
+#include <linux/i3c/master.h>
+
+extern struct bus_type i3c_bus_type;
+
+void i3c_bus_normaluse_lock(struct i3c_bus *bus);
+void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
+
+int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev);
+int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev);
+int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev);
+#endif /* I3C_INTERNAL_H */
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
new file mode 100644
index 000000000..828fb236a
--- /dev/null
+++ b/drivers/i3c/master.c
@@ -0,0 +1,2718 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "internals.h"
+
+static DEFINE_IDR(i3c_bus_idr);
+static DEFINE_MUTEX(i3c_core_lock);
+
+/**
+ * i3c_bus_maintenance_lock - Lock the bus for a maintenance operation
+ * @bus: I3C bus to take the lock on
+ *
+ * This function takes the bus lock so that no other operations can occur on
+ * the bus. This is needed for all kind of bus maintenance operation, like
+ * - enabling/disabling slave events
+ * - re-triggering DAA
+ * - changing the dynamic address of a device
+ * - relinquishing mastership
+ * - ...
+ *
+ * The reason for this kind of locking is that we don't want drivers and core
+ * logic to rely on I3C device information that could be changed behind their
+ * back.
+ */
+static void i3c_bus_maintenance_lock(struct i3c_bus *bus)
+{
+ down_write(&bus->lock);
+}
+
+/**
+ * i3c_bus_maintenance_unlock - Release the bus lock after a maintenance
+ * operation
+ * @bus: I3C bus to release the lock on
+ *
+ * Should be called when the bus maintenance operation is done. See
+ * i3c_bus_maintenance_lock() for more details on what these maintenance
+ * operations are.
+ */
+static void i3c_bus_maintenance_unlock(struct i3c_bus *bus)
+{
+ up_write(&bus->lock);
+}
+
+/**
+ * i3c_bus_normaluse_lock - Lock the bus for a normal operation
+ * @bus: I3C bus to take the lock on
+ *
+ * This function takes the bus lock for any operation that is not a maintenance
+ * operation (see i3c_bus_maintenance_lock() for a non-exhaustive list of
+ * maintenance operations). Basically all communications with I3C devices are
+ * normal operations (HDR, SDR transfers or CCC commands that do not change bus
+ * state or I3C dynamic address).
+ *
+ * Note that this lock is not guaranteeing serialization of normal operations.
+ * In other words, transfer requests passed to the I3C master can be submitted
+ * in parallel and I3C master drivers have to use their own locking to make
+ * sure two different communications are not inter-mixed, or access to the
+ * output/input queue is not done while the engine is busy.
+ */
+void i3c_bus_normaluse_lock(struct i3c_bus *bus)
+{
+ down_read(&bus->lock);
+}
+
+/**
+ * i3c_bus_normaluse_unlock - Release the bus lock after a normal operation
+ * @bus: I3C bus to release the lock on
+ *
+ * Should be called when a normal operation is done. See
+ * i3c_bus_normaluse_lock() for more details on what these normal operations
+ * are.
+ */
+void i3c_bus_normaluse_unlock(struct i3c_bus *bus)
+{
+ up_read(&bus->lock);
+}
+
+static struct i3c_master_controller *
+i3c_bus_to_i3c_master(struct i3c_bus *i3cbus)
+{
+ return container_of(i3cbus, struct i3c_master_controller, bus);
+}
+
+static struct i3c_master_controller *dev_to_i3cmaster(struct device *dev)
+{
+ return container_of(dev, struct i3c_master_controller, dev);
+}
+
+static const struct device_type i3c_device_type;
+
+static struct i3c_bus *dev_to_i3cbus(struct device *dev)
+{
+ struct i3c_master_controller *master;
+
+ if (dev->type == &i3c_device_type)
+ return dev_to_i3cdev(dev)->bus;
+
+ master = dev_to_i3cmaster(dev);
+
+ return &master->bus;
+}
+
+static struct i3c_dev_desc *dev_to_i3cdesc(struct device *dev)
+{
+ struct i3c_master_controller *master;
+
+ if (dev->type == &i3c_device_type)
+ return dev_to_i3cdev(dev)->desc;
+
+ master = dev_to_i3cmaster(dev);
+
+ return master->this;
+}
+
+static ssize_t bcr_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%x\n", desc->info.bcr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(bcr);
+
+static ssize_t dcr_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%x\n", desc->info.dcr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(dcr);
+
+static ssize_t pid_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%llx\n", desc->info.pid);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(pid);
+
+static ssize_t dynamic_address_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%02x\n", desc->info.dyn_addr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(dynamic_address);
+
+static const char * const hdrcap_strings[] = {
+ "hdr-ddr", "hdr-tsp", "hdr-tsl",
+};
+
+static ssize_t hdrcap_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t offset = 0, ret;
+ unsigned long caps;
+ int mode;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ caps = desc->info.hdr_cap;
+ for_each_set_bit(mode, &caps, 8) {
+ if (mode >= ARRAY_SIZE(hdrcap_strings))
+ break;
+
+ if (!hdrcap_strings[mode])
+ continue;
+
+ ret = sprintf(buf + offset, offset ? " %s" : "%s",
+ hdrcap_strings[mode]);
+ if (ret < 0)
+ goto out;
+
+ offset += ret;
+ }
+
+ ret = sprintf(buf + offset, "\n");
+ if (ret < 0)
+ goto out;
+
+ ret = offset + ret;
+
+out:
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(hdrcap);
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct i3c_device *i3c = dev_to_i3cdev(dev);
+ struct i3c_device_info devinfo;
+ u16 manuf, part, ext;
+
+ i3c_device_get_info(i3c, &devinfo);
+ manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ part = I3C_PID_PART_ID(devinfo.pid);
+ ext = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+ if (I3C_PID_RND_LOWER_32BITS(devinfo.pid))
+ return sprintf(buf, "i3c:dcr%02Xmanuf%04X", devinfo.dcr,
+ manuf);
+
+ return sprintf(buf, "i3c:dcr%02Xmanuf%04Xpart%04Xext%04X",
+ devinfo.dcr, manuf, part, ext);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *i3c_device_attrs[] = {
+ &dev_attr_bcr.attr,
+ &dev_attr_dcr.attr,
+ &dev_attr_pid.attr,
+ &dev_attr_dynamic_address.attr,
+ &dev_attr_hdrcap.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(i3c_device);
+
+static int i3c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_device_info devinfo;
+ u16 manuf, part, ext;
+
+ i3c_device_get_info(i3cdev, &devinfo);
+ manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ part = I3C_PID_PART_ID(devinfo.pid);
+ ext = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+ if (I3C_PID_RND_LOWER_32BITS(devinfo.pid))
+ return add_uevent_var(env, "MODALIAS=i3c:dcr%02Xmanuf%04X",
+ devinfo.dcr, manuf);
+
+ return add_uevent_var(env,
+ "MODALIAS=i3c:dcr%02Xmanuf%04Xpart%04Xext%04X",
+ devinfo.dcr, manuf, part, ext);
+}
+
+static const struct device_type i3c_device_type = {
+ .groups = i3c_device_groups,
+ .uevent = i3c_device_uevent,
+};
+
+static int i3c_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct i3c_device *i3cdev;
+ struct i3c_driver *i3cdrv;
+
+ if (dev->type != &i3c_device_type)
+ return 0;
+
+ i3cdev = dev_to_i3cdev(dev);
+ i3cdrv = drv_to_i3cdrv(drv);
+ if (i3c_device_match_id(i3cdev, i3cdrv->id_table))
+ return 1;
+
+ return 0;
+}
+
+static int i3c_device_probe(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
+
+ return driver->probe(i3cdev);
+}
+
+static int i3c_device_remove(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
+ int ret;
+
+ ret = driver->remove(i3cdev);
+ if (ret)
+ return ret;
+
+ i3c_device_free_ibi(i3cdev);
+
+ return ret;
+}
+
+struct bus_type i3c_bus_type = {
+ .name = "i3c",
+ .match = i3c_device_match,
+ .probe = i3c_device_probe,
+ .remove = i3c_device_remove,
+};
+
+static enum i3c_addr_slot_status
+i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
+{
+ int status, bitpos = addr * 2;
+
+ if (addr > I2C_MAX_ADDR)
+ return I3C_ADDR_SLOT_RSVD;
+
+ status = bus->addrslots[bitpos / BITS_PER_LONG];
+ status >>= bitpos % BITS_PER_LONG;
+
+ return status & I3C_ADDR_SLOT_STATUS_MASK;
+}
+
+static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
+ enum i3c_addr_slot_status status)
+{
+ int bitpos = addr * 2;
+ unsigned long *ptr;
+
+ if (addr > I2C_MAX_ADDR)
+ return;
+
+ ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
+ *ptr &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK <<
+ (bitpos % BITS_PER_LONG));
+ *ptr |= (unsigned long)status << (bitpos % BITS_PER_LONG);
+}
+
+static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
+{
+ enum i3c_addr_slot_status status;
+
+ status = i3c_bus_get_addr_slot_status(bus, addr);
+
+ return status == I3C_ADDR_SLOT_FREE;
+}
+
+static int i3c_bus_get_free_addr(struct i3c_bus *bus, u8 start_addr)
+{
+ enum i3c_addr_slot_status status;
+ u8 addr;
+
+ for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) {
+ status = i3c_bus_get_addr_slot_status(bus, addr);
+ if (status == I3C_ADDR_SLOT_FREE)
+ return addr;
+ }
+
+ return -ENOMEM;
+}
+
+static void i3c_bus_init_addrslots(struct i3c_bus *bus)
+{
+ int i;
+
+ /* Addresses 0 to 7 are reserved. */
+ for (i = 0; i < 8; i++)
+ i3c_bus_set_addr_slot_status(bus, i, I3C_ADDR_SLOT_RSVD);
+
+ /*
+ * Reserve broadcast address and all addresses that might collide
+ * with the broadcast address when facing a single bit error.
+ */
+ i3c_bus_set_addr_slot_status(bus, I3C_BROADCAST_ADDR,
+ I3C_ADDR_SLOT_RSVD);
+ for (i = 0; i < 7; i++)
+ i3c_bus_set_addr_slot_status(bus, I3C_BROADCAST_ADDR ^ BIT(i),
+ I3C_ADDR_SLOT_RSVD);
+}
+
+static void i3c_bus_cleanup(struct i3c_bus *i3cbus)
+{
+ mutex_lock(&i3c_core_lock);
+ idr_remove(&i3c_bus_idr, i3cbus->id);
+ mutex_unlock(&i3c_core_lock);
+}
+
+static int i3c_bus_init(struct i3c_bus *i3cbus)
+{
+ int ret;
+
+ init_rwsem(&i3cbus->lock);
+ INIT_LIST_HEAD(&i3cbus->devs.i2c);
+ INIT_LIST_HEAD(&i3cbus->devs.i3c);
+ i3c_bus_init_addrslots(i3cbus);
+ i3cbus->mode = I3C_BUS_MODE_PURE;
+
+ mutex_lock(&i3c_core_lock);
+ ret = idr_alloc(&i3c_bus_idr, i3cbus, 0, 0, GFP_KERNEL);
+ mutex_unlock(&i3c_core_lock);
+
+ if (ret < 0)
+ return ret;
+
+ i3cbus->id = ret;
+
+ return 0;
+}
+
+static const char * const i3c_bus_mode_strings[] = {
+ [I3C_BUS_MODE_PURE] = "pure",
+ [I3C_BUS_MODE_MIXED_FAST] = "mixed-fast",
+ [I3C_BUS_MODE_MIXED_LIMITED] = "mixed-limited",
+ [I3C_BUS_MODE_MIXED_SLOW] = "mixed-slow",
+};
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ if (i3cbus->mode < 0 ||
+ i3cbus->mode >= ARRAY_SIZE(i3c_bus_mode_strings) ||
+ !i3c_bus_mode_strings[i3cbus->mode])
+ ret = sprintf(buf, "unknown\n");
+ else
+ ret = sprintf(buf, "%s\n", i3c_bus_mode_strings[i3cbus->mode]);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(mode);
+
+static ssize_t current_master_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sprintf(buf, "%d-%llx\n", i3cbus->id,
+ i3cbus->cur_master->info.pid);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(current_master);
+
+static ssize_t i3c_scl_frequency_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i3c);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(i3c_scl_frequency);
+
+static ssize_t i2c_scl_frequency_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i2c);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(i2c_scl_frequency);
+
+static struct attribute *i3c_masterdev_attrs[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_current_master.attr,
+ &dev_attr_i3c_scl_frequency.attr,
+ &dev_attr_i2c_scl_frequency.attr,
+ &dev_attr_bcr.attr,
+ &dev_attr_dcr.attr,
+ &dev_attr_pid.attr,
+ &dev_attr_dynamic_address.attr,
+ &dev_attr_hdrcap.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(i3c_masterdev);
+
+static void i3c_masterdev_release(struct device *dev)
+{
+ struct i3c_master_controller *master = dev_to_i3cmaster(dev);
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+
+ if (master->wq)
+ destroy_workqueue(master->wq);
+
+ WARN_ON(!list_empty(&bus->devs.i2c) || !list_empty(&bus->devs.i3c));
+ i3c_bus_cleanup(bus);
+
+ of_node_put(dev->of_node);
+}
+
+static const struct device_type i3c_masterdev_type = {
+ .groups = i3c_masterdev_groups,
+};
+
+static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
+ unsigned long max_i2c_scl_rate)
+{
+ struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus);
+
+ i3cbus->mode = mode;
+
+ switch (i3cbus->mode) {
+ case I3C_BUS_MODE_PURE:
+ if (!i3cbus->scl_rate.i3c)
+ i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ break;
+ case I3C_BUS_MODE_MIXED_FAST:
+ case I3C_BUS_MODE_MIXED_LIMITED:
+ if (!i3cbus->scl_rate.i3c)
+ i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ if (!i3cbus->scl_rate.i2c)
+ i3cbus->scl_rate.i2c = max_i2c_scl_rate;
+ break;
+ case I3C_BUS_MODE_MIXED_SLOW:
+ if (!i3cbus->scl_rate.i2c)
+ i3cbus->scl_rate.i2c = max_i2c_scl_rate;
+ if (!i3cbus->scl_rate.i3c ||
+ i3cbus->scl_rate.i3c > i3cbus->scl_rate.i2c)
+ i3cbus->scl_rate.i3c = i3cbus->scl_rate.i2c;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(&master->dev, "i2c-scl = %ld Hz i3c-scl = %ld Hz\n",
+ i3cbus->scl_rate.i2c, i3cbus->scl_rate.i3c);
+
+ /*
+ * I3C/I2C frequency may have been overridden, check that user-provided
+ * values are not exceeding max possible frequency.
+ */
+ if (i3cbus->scl_rate.i3c > I3C_BUS_MAX_I3C_SCL_RATE ||
+ i3cbus->scl_rate.i2c > I3C_BUS_I2C_FM_PLUS_SCL_RATE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct i3c_master_controller *
+i2c_adapter_to_i3c_master(struct i2c_adapter *adap)
+{
+ return container_of(adap, struct i3c_master_controller, i2c);
+}
+
+static struct i2c_adapter *
+i3c_master_to_i2c_adapter(struct i3c_master_controller *master)
+{
+ return &master->i2c;
+}
+
+static void i3c_master_free_i2c_dev(struct i2c_dev_desc *dev)
+{
+ kfree(dev);
+}
+
+static struct i2c_dev_desc *
+i3c_master_alloc_i2c_dev(struct i3c_master_controller *master,
+ const struct i2c_dev_boardinfo *boardinfo)
+{
+ struct i2c_dev_desc *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->common.master = master;
+ dev->boardinfo = boardinfo;
+ dev->addr = boardinfo->base.addr;
+ dev->lvr = boardinfo->lvr;
+
+ return dev;
+}
+
+static void *i3c_ccc_cmd_dest_init(struct i3c_ccc_cmd_dest *dest, u8 addr,
+ u16 payloadlen)
+{
+ dest->addr = addr;
+ dest->payload.len = payloadlen;
+ if (payloadlen)
+ dest->payload.data = kzalloc(payloadlen, GFP_KERNEL);
+ else
+ dest->payload.data = NULL;
+
+ return dest->payload.data;
+}
+
+static void i3c_ccc_cmd_dest_cleanup(struct i3c_ccc_cmd_dest *dest)
+{
+ kfree(dest->payload.data);
+}
+
+static void i3c_ccc_cmd_init(struct i3c_ccc_cmd *cmd, bool rnw, u8 id,
+ struct i3c_ccc_cmd_dest *dests,
+ unsigned int ndests)
+{
+ cmd->rnw = rnw ? 1 : 0;
+ cmd->id = id;
+ cmd->dests = dests;
+ cmd->ndests = ndests;
+ cmd->err = I3C_ERROR_UNKNOWN;
+}
+
+static int i3c_master_send_ccc_cmd_locked(struct i3c_master_controller *master,
+ struct i3c_ccc_cmd *cmd)
+{
+ int ret;
+
+ if (!cmd || !master)
+ return -EINVAL;
+
+ if (WARN_ON(master->init_done &&
+ !rwsem_is_locked(&master->bus.lock)))
+ return -EINVAL;
+
+ if (!master->ops->send_ccc_cmd)
+ return -ENOTSUPP;
+
+ if ((cmd->id & I3C_CCC_DIRECT) && (!cmd->dests || !cmd->ndests))
+ return -EINVAL;
+
+ if (master->ops->supports_ccc_cmd &&
+ !master->ops->supports_ccc_cmd(master, cmd))
+ return -ENOTSUPP;
+
+ ret = master->ops->send_ccc_cmd(master, cmd);
+ if (ret) {
+ if (cmd->err != I3C_ERROR_UNKNOWN)
+ return cmd->err;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct i2c_dev_desc *
+i3c_master_find_i2c_dev_by_addr(const struct i3c_master_controller *master,
+ u16 addr)
+{
+ struct i2c_dev_desc *dev;
+
+ i3c_bus_for_each_i2cdev(&master->bus, dev) {
+ if (dev->boardinfo->base.addr == addr)
+ return dev;
+ }
+
+ return NULL;
+}
+
+/**
+ * i3c_master_get_free_addr() - get a free address on the bus
+ * @master: I3C master object
+ * @start_addr: where to start searching
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: the first free address starting at @start_addr (included) or -ENOMEM
+ * if there's no more address available.
+ */
+int i3c_master_get_free_addr(struct i3c_master_controller *master,
+ u8 start_addr)
+{
+ return i3c_bus_get_free_addr(&master->bus, start_addr);
+}
+EXPORT_SYMBOL_GPL(i3c_master_get_free_addr);
+
+static void i3c_device_release(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+
+ WARN_ON(i3cdev->desc);
+
+ of_node_put(i3cdev->dev.of_node);
+ kfree(i3cdev);
+}
+
+static void i3c_master_free_i3c_dev(struct i3c_dev_desc *dev)
+{
+ kfree(dev);
+}
+
+static struct i3c_dev_desc *
+i3c_master_alloc_i3c_dev(struct i3c_master_controller *master,
+ const struct i3c_device_info *info)
+{
+ struct i3c_dev_desc *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->common.master = master;
+ dev->info = *info;
+ mutex_init(&dev->ibi_lock);
+
+ return dev;
+}
+
+static int i3c_master_rstdaa_locked(struct i3c_master_controller *master,
+ u8 addr)
+{
+ enum i3c_addr_slot_status addrstat;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ if (!master)
+ return -EINVAL;
+
+ addrstat = i3c_bus_get_addr_slot_status(&master->bus, addr);
+ if (addr != I3C_BROADCAST_ADDR && addrstat != I3C_ADDR_SLOT_I3C_DEV)
+ return -EINVAL;
+
+ i3c_ccc_cmd_dest_init(&dest, addr, 0);
+ i3c_ccc_cmd_init(&cmd, false,
+ I3C_CCC_RSTDAA(addr == I3C_BROADCAST_ADDR),
+ &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+/**
+ * i3c_master_entdaa_locked() - start a DAA (Dynamic Address Assignment)
+ * procedure
+ * @master: master used to send frames on the bus
+ *
+ * Send a ENTDAA CCC command to start a DAA procedure.
+ *
+ * Note that this function only sends the ENTDAA CCC command, all the logic
+ * behind dynamic address assignment has to be handled in the I3C master
+ * driver.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_entdaa_locked(struct i3c_master_controller *master)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR, 0);
+ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_ENTDAA, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_entdaa_locked);
+
+static int i3c_master_enec_disec_locked(struct i3c_master_controller *master,
+ u8 addr, bool enable, u8 evts)
+{
+ struct i3c_ccc_events *events;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ events = i3c_ccc_cmd_dest_init(&dest, addr, sizeof(*events));
+ if (!events)
+ return -ENOMEM;
+
+ events->events = evts;
+ i3c_ccc_cmd_init(&cmd, false,
+ enable ?
+ I3C_CCC_ENEC(addr == I3C_BROADCAST_ADDR) :
+ I3C_CCC_DISEC(addr == I3C_BROADCAST_ADDR),
+ &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+/**
+ * i3c_master_disec_locked() - send a DISEC CCC command
+ * @master: master used to send frames on the bus
+ * @addr: a valid I3C slave address or %I3C_BROADCAST_ADDR
+ * @evts: events to disable
+ *
+ * Send a DISEC CCC command to disable some or all events coming from a
+ * specific slave, or all devices if @addr is %I3C_BROADCAST_ADDR.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts)
+{
+ return i3c_master_enec_disec_locked(master, addr, false, evts);
+}
+EXPORT_SYMBOL_GPL(i3c_master_disec_locked);
+
+/**
+ * i3c_master_enec_locked() - send an ENEC CCC command
+ * @master: master used to send frames on the bus
+ * @addr: a valid I3C slave address or %I3C_BROADCAST_ADDR
+ * @evts: events to disable
+ *
+ * Sends an ENEC CCC command to enable some or all events coming from a
+ * specific slave, or all devices if @addr is %I3C_BROADCAST_ADDR.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts)
+{
+ return i3c_master_enec_disec_locked(master, addr, true, evts);
+}
+EXPORT_SYMBOL_GPL(i3c_master_enec_locked);
+
+/**
+ * i3c_master_defslvs_locked() - send a DEFSLVS CCC command
+ * @master: master used to send frames on the bus
+ *
+ * Send a DEFSLVS CCC command containing all the devices known to the @master.
+ * This is useful when you have secondary masters on the bus to propagate
+ * device information.
+ *
+ * This should be called after all I3C devices have been discovered (in other
+ * words, after the DAA procedure has finished) and instantiated in
+ * &i3c_master_controller_ops->bus_init().
+ * It should also be called if a master ACKed an Hot-Join request and assigned
+ * a dynamic address to the device joining the bus.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_defslvs_locked(struct i3c_master_controller *master)
+{
+ struct i3c_ccc_defslvs *defslvs;
+ struct i3c_ccc_dev_desc *desc;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_dev_desc *i3cdev;
+ struct i2c_dev_desc *i2cdev;
+ struct i3c_ccc_cmd cmd;
+ struct i3c_bus *bus;
+ bool send = false;
+ int ndevs = 0, ret;
+
+ if (!master)
+ return -EINVAL;
+
+ bus = i3c_master_get_bus(master);
+ i3c_bus_for_each_i3cdev(bus, i3cdev) {
+ ndevs++;
+
+ if (i3cdev == master->this)
+ continue;
+
+ if (I3C_BCR_DEVICE_ROLE(i3cdev->info.bcr) ==
+ I3C_BCR_I3C_MASTER)
+ send = true;
+ }
+
+ /* No other master on the bus, skip DEFSLVS. */
+ if (!send)
+ return 0;
+
+ i3c_bus_for_each_i2cdev(bus, i2cdev)
+ ndevs++;
+
+ defslvs = i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR,
+ struct_size(defslvs, slaves,
+ ndevs - 1));
+ if (!defslvs)
+ return -ENOMEM;
+
+ defslvs->count = ndevs;
+ defslvs->master.bcr = master->this->info.bcr;
+ defslvs->master.dcr = master->this->info.dcr;
+ defslvs->master.dyn_addr = master->this->info.dyn_addr << 1;
+ defslvs->master.static_addr = I3C_BROADCAST_ADDR << 1;
+
+ desc = defslvs->slaves;
+ i3c_bus_for_each_i2cdev(bus, i2cdev) {
+ desc->lvr = i2cdev->lvr;
+ desc->static_addr = i2cdev->addr << 1;
+ desc++;
+ }
+
+ i3c_bus_for_each_i3cdev(bus, i3cdev) {
+ /* Skip the I3C dev representing this master. */
+ if (i3cdev == master->this)
+ continue;
+
+ desc->bcr = i3cdev->info.bcr;
+ desc->dcr = i3cdev->info.dcr;
+ desc->dyn_addr = i3cdev->info.dyn_addr << 1;
+ desc->static_addr = i3cdev->info.static_addr << 1;
+ desc++;
+ }
+
+ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_DEFSLVS, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_defslvs_locked);
+
+static int i3c_master_setda_locked(struct i3c_master_controller *master,
+ u8 oldaddr, u8 newaddr, bool setdasa)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_setda *setda;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ if (!oldaddr || !newaddr)
+ return -EINVAL;
+
+ setda = i3c_ccc_cmd_dest_init(&dest, oldaddr, sizeof(*setda));
+ if (!setda)
+ return -ENOMEM;
+
+ setda->addr = newaddr << 1;
+ i3c_ccc_cmd_init(&cmd, false,
+ setdasa ? I3C_CCC_SETDASA : I3C_CCC_SETNEWDA,
+ &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_setdasa_locked(struct i3c_master_controller *master,
+ u8 static_addr, u8 dyn_addr)
+{
+ return i3c_master_setda_locked(master, static_addr, dyn_addr, true);
+}
+
+static int i3c_master_setnewda_locked(struct i3c_master_controller *master,
+ u8 oldaddr, u8 newaddr)
+{
+ return i3c_master_setda_locked(master, oldaddr, newaddr, false);
+}
+
+static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_mrl *mrl;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ mrl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mrl));
+ if (!mrl)
+ return -ENOMEM;
+
+ /*
+ * When the device does not have IBI payload GETMRL only returns 2
+ * bytes of data.
+ */
+ if (!(info->bcr & I3C_BCR_IBI_PAYLOAD))
+ dest.payload.len -= 1;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMRL, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ switch (dest.payload.len) {
+ case 3:
+ info->max_ibi_len = mrl->ibi_len;
+ fallthrough;
+ case 2:
+ info->max_read_len = be16_to_cpu(mrl->read_len);
+ break;
+ default:
+ ret = -EIO;
+ goto out;
+ }
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getmwl_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_mwl *mwl;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ mwl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mwl));
+ if (!mwl)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMWL, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != sizeof(*mwl)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ info->max_write_len = be16_to_cpu(mwl->len);
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getmxds_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getmxds *getmaxds;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getmaxds = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr,
+ sizeof(*getmaxds));
+ if (!getmaxds)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMXDS, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != 2 && dest.payload.len != 5) {
+ ret = -EIO;
+ goto out;
+ }
+
+ info->max_read_ds = getmaxds->maxrd;
+ info->max_write_ds = getmaxds->maxwr;
+ if (dest.payload.len == 5)
+ info->max_read_turnaround = getmaxds->maxrdturn[0] |
+ ((u32)getmaxds->maxrdturn[1] << 8) |
+ ((u32)getmaxds->maxrdturn[2] << 16);
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_gethdrcap_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_gethdrcap *gethdrcap;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ gethdrcap = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr,
+ sizeof(*gethdrcap));
+ if (!gethdrcap)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETHDRCAP, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != 1) {
+ ret = -EIO;
+ goto out;
+ }
+
+ info->hdr_cap = gethdrcap->modes;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getpid_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getpid *getpid;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret, i;
+
+ getpid = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getpid));
+ if (!getpid)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETPID, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->pid = 0;
+ for (i = 0; i < sizeof(getpid->pid); i++) {
+ int sft = (sizeof(getpid->pid) - i - 1) * 8;
+
+ info->pid |= (u64)getpid->pid[i] << sft;
+ }
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getbcr_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getbcr *getbcr;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getbcr = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getbcr));
+ if (!getbcr)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETBCR, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->bcr = getbcr->bcr;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getdcr_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getdcr *getdcr;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getdcr = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getdcr));
+ if (!getdcr)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETDCR, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->dcr = getdcr->dcr;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ enum i3c_addr_slot_status slot_status;
+ int ret;
+
+ if (!dev->info.dyn_addr)
+ return -EINVAL;
+
+ slot_status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.dyn_addr);
+ if (slot_status == I3C_ADDR_SLOT_RSVD ||
+ slot_status == I3C_ADDR_SLOT_I2C_DEV)
+ return -EINVAL;
+
+ ret = i3c_master_getpid_locked(master, &dev->info);
+ if (ret)
+ return ret;
+
+ ret = i3c_master_getbcr_locked(master, &dev->info);
+ if (ret)
+ return ret;
+
+ ret = i3c_master_getdcr_locked(master, &dev->info);
+ if (ret)
+ return ret;
+
+ if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM) {
+ ret = i3c_master_getmxds_locked(master, &dev->info);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+ dev->info.max_ibi_len = 1;
+
+ i3c_master_getmrl_locked(master, &dev->info);
+ i3c_master_getmwl_locked(master, &dev->info);
+
+ if (dev->info.bcr & I3C_BCR_HDR_CAP) {
+ ret = i3c_master_gethdrcap_locked(master, &dev->info);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void i3c_master_put_i3c_addrs(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+ if (dev->info.static_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.static_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ if (dev->info.dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ if (dev->boardinfo && dev->boardinfo->init_dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+}
+
+static int i3c_master_get_i3c_addrs(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ enum i3c_addr_slot_status status;
+
+ if (!dev->info.static_addr && !dev->info.dyn_addr)
+ return 0;
+
+ if (dev->info.static_addr) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.static_addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ return -EBUSY;
+
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.static_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+ }
+
+ /*
+ * ->init_dyn_addr should have been reserved before that, so, if we're
+ * trying to apply a pre-reserved dynamic address, we should not try
+ * to reserve the address slot a second time.
+ */
+ if (dev->info.dyn_addr &&
+ (!dev->boardinfo ||
+ dev->boardinfo->init_dyn_addr != dev->info.dyn_addr)) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.dyn_addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ goto err_release_static_addr;
+
+ i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+ }
+
+ return 0;
+
+err_release_static_addr:
+ if (dev->info.static_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.static_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ return -EBUSY;
+}
+
+static int i3c_master_attach_i3c_dev(struct i3c_master_controller *master,
+ struct i3c_dev_desc *dev)
+{
+ int ret;
+
+ /*
+ * We don't attach devices to the controller until they are
+ * addressable on the bus.
+ */
+ if (!dev->info.static_addr && !dev->info.dyn_addr)
+ return 0;
+
+ ret = i3c_master_get_i3c_addrs(dev);
+ if (ret)
+ return ret;
+
+ /* Do not attach the master device itself. */
+ if (master->this != dev && master->ops->attach_i3c_dev) {
+ ret = master->ops->attach_i3c_dev(dev);
+ if (ret) {
+ i3c_master_put_i3c_addrs(dev);
+ return ret;
+ }
+ }
+
+ list_add_tail(&dev->common.node, &master->bus.devs.i3c);
+
+ return 0;
+}
+
+static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ enum i3c_addr_slot_status status;
+ int ret;
+
+ if (dev->info.dyn_addr != old_dyn_addr &&
+ (!dev->boardinfo ||
+ dev->info.dyn_addr != dev->boardinfo->init_dyn_addr)) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.dyn_addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ return -EBUSY;
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.dyn_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+ }
+
+ if (master->ops->reattach_i3c_dev) {
+ ret = master->ops->reattach_i3c_dev(dev, old_dyn_addr);
+ if (ret) {
+ i3c_master_put_i3c_addrs(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+ /* Do not detach the master device itself. */
+ if (master->this != dev && master->ops->detach_i3c_dev)
+ master->ops->detach_i3c_dev(dev);
+
+ i3c_master_put_i3c_addrs(dev);
+ list_del(&dev->common.node);
+}
+
+static int i3c_master_attach_i2c_dev(struct i3c_master_controller *master,
+ struct i2c_dev_desc *dev)
+{
+ int ret;
+
+ if (master->ops->attach_i2c_dev) {
+ ret = master->ops->attach_i2c_dev(dev);
+ if (ret)
+ return ret;
+ }
+
+ list_add_tail(&dev->common.node, &master->bus.devs.i2c);
+
+ return 0;
+}
+
+static void i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i2c_dev_get_master(dev);
+
+ list_del(&dev->common.node);
+
+ if (master->ops->detach_i2c_dev)
+ master->ops->detach_i2c_dev(dev);
+}
+
+static int i3c_master_early_i3c_dev_add(struct i3c_master_controller *master,
+ struct i3c_dev_boardinfo *boardinfo)
+{
+ struct i3c_device_info info = {
+ .static_addr = boardinfo->static_addr,
+ };
+ struct i3c_dev_desc *i3cdev;
+ int ret;
+
+ i3cdev = i3c_master_alloc_i3c_dev(master, &info);
+ if (IS_ERR(i3cdev))
+ return -ENOMEM;
+
+ i3cdev->boardinfo = boardinfo;
+
+ ret = i3c_master_attach_i3c_dev(master, i3cdev);
+ if (ret)
+ goto err_free_dev;
+
+ ret = i3c_master_setdasa_locked(master, i3cdev->info.static_addr,
+ i3cdev->boardinfo->init_dyn_addr);
+ if (ret)
+ goto err_detach_dev;
+
+ i3cdev->info.dyn_addr = i3cdev->boardinfo->init_dyn_addr;
+ ret = i3c_master_reattach_i3c_dev(i3cdev, 0);
+ if (ret)
+ goto err_rstdaa;
+
+ ret = i3c_master_retrieve_dev_info(i3cdev);
+ if (ret)
+ goto err_rstdaa;
+
+ return 0;
+
+err_rstdaa:
+ i3c_master_rstdaa_locked(master, i3cdev->boardinfo->init_dyn_addr);
+err_detach_dev:
+ i3c_master_detach_i3c_dev(i3cdev);
+err_free_dev:
+ i3c_master_free_i3c_dev(i3cdev);
+
+ return ret;
+}
+
+static void
+i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *desc;
+ int ret;
+
+ if (!master->init_done)
+ return;
+
+ i3c_bus_for_each_i3cdev(&master->bus, desc) {
+ if (desc->dev || !desc->info.dyn_addr || desc == master->this)
+ continue;
+
+ desc->dev = kzalloc(sizeof(*desc->dev), GFP_KERNEL);
+ if (!desc->dev)
+ continue;
+
+ desc->dev->bus = &master->bus;
+ desc->dev->desc = desc;
+ desc->dev->dev.parent = &master->dev;
+ desc->dev->dev.type = &i3c_device_type;
+ desc->dev->dev.bus = &i3c_bus_type;
+ desc->dev->dev.release = i3c_device_release;
+ dev_set_name(&desc->dev->dev, "%d-%llx", master->bus.id,
+ desc->info.pid);
+
+ if (desc->boardinfo)
+ desc->dev->dev.of_node = desc->boardinfo->of_node;
+
+ ret = device_register(&desc->dev->dev);
+ if (ret) {
+ dev_err(&master->dev,
+ "Failed to add I3C device (err = %d)\n", ret);
+ put_device(&desc->dev->dev);
+ }
+ }
+}
+
+/**
+ * i3c_master_do_daa() - do a DAA (Dynamic Address Assignment)
+ * @master: master doing the DAA
+ *
+ * This function is instantiating an I3C device object and adding it to the
+ * I3C device list. All device information are automatically retrieved using
+ * standard CCC commands.
+ *
+ * The I3C device object is returned in case the master wants to attach
+ * private data to it using i3c_dev_set_master_data().
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_do_daa(struct i3c_master_controller *master)
+{
+ int ret;
+
+ i3c_bus_maintenance_lock(&master->bus);
+ ret = master->ops->do_daa(master);
+ i3c_bus_maintenance_unlock(&master->bus);
+
+ if (ret)
+ return ret;
+
+ i3c_bus_normaluse_lock(&master->bus);
+ i3c_master_register_new_i3c_devs(master);
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_master_do_daa);
+
+/**
+ * i3c_master_set_info() - set master device information
+ * @master: master used to send frames on the bus
+ * @info: I3C device information
+ *
+ * Set master device info. This should be called from
+ * &i3c_master_controller_ops->bus_init().
+ *
+ * Not all &i3c_device_info fields are meaningful for a master device.
+ * Here is a list of fields that should be properly filled:
+ *
+ * - &i3c_device_info->dyn_addr
+ * - &i3c_device_info->bcr
+ * - &i3c_device_info->dcr
+ * - &i3c_device_info->pid
+ * - &i3c_device_info->hdr_cap if %I3C_BCR_HDR_CAP bit is set in
+ * &i3c_device_info->bcr
+ *
+ * This function must be called with the bus lock held in maintenance mode.
+ *
+ * Return: 0 if @info contains valid information (not every piece of
+ * information can be checked, but we can at least make sure @info->dyn_addr
+ * and @info->bcr are correct), -EINVAL otherwise.
+ */
+int i3c_master_set_info(struct i3c_master_controller *master,
+ const struct i3c_device_info *info)
+{
+ struct i3c_dev_desc *i3cdev;
+ int ret;
+
+ if (!i3c_bus_dev_addr_is_avail(&master->bus, info->dyn_addr))
+ return -EINVAL;
+
+ if (I3C_BCR_DEVICE_ROLE(info->bcr) == I3C_BCR_I3C_MASTER &&
+ master->secondary)
+ return -EINVAL;
+
+ if (master->this)
+ return -EINVAL;
+
+ i3cdev = i3c_master_alloc_i3c_dev(master, info);
+ if (IS_ERR(i3cdev))
+ return PTR_ERR(i3cdev);
+
+ master->this = i3cdev;
+ master->bus.cur_master = master->this;
+
+ ret = i3c_master_attach_i3c_dev(master, i3cdev);
+ if (ret)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ i3c_master_free_i3c_dev(i3cdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_set_info);
+
+static void i3c_master_detach_free_devs(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *i3cdev, *i3ctmp;
+ struct i2c_dev_desc *i2cdev, *i2ctmp;
+
+ list_for_each_entry_safe(i3cdev, i3ctmp, &master->bus.devs.i3c,
+ common.node) {
+ i3c_master_detach_i3c_dev(i3cdev);
+
+ if (i3cdev->boardinfo && i3cdev->boardinfo->init_dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i3cdev->boardinfo->init_dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ i3c_master_free_i3c_dev(i3cdev);
+ }
+
+ list_for_each_entry_safe(i2cdev, i2ctmp, &master->bus.devs.i2c,
+ common.node) {
+ i3c_master_detach_i2c_dev(i2cdev);
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i2cdev->addr,
+ I3C_ADDR_SLOT_FREE);
+ i3c_master_free_i2c_dev(i2cdev);
+ }
+}
+
+/**
+ * i3c_master_bus_init() - initialize an I3C bus
+ * @master: main master initializing the bus
+ *
+ * This function is following all initialisation steps described in the I3C
+ * specification:
+ *
+ * 1. Attach I2C devs to the master so that the master can fill its internal
+ * device table appropriately
+ *
+ * 2. Call &i3c_master_controller_ops->bus_init() method to initialize
+ * the master controller. That's usually where the bus mode is selected
+ * (pure bus or mixed fast/slow bus)
+ *
+ * 3. Instruct all devices on the bus to drop their dynamic address. This is
+ * particularly important when the bus was previously configured by someone
+ * else (for example the bootloader)
+ *
+ * 4. Disable all slave events.
+ *
+ * 5. Reserve address slots for I3C devices with init_dyn_addr. And if devices
+ * also have static_addr, try to pre-assign dynamic addresses requested by
+ * the FW with SETDASA and attach corresponding statically defined I3C
+ * devices to the master.
+ *
+ * 6. Do a DAA (Dynamic Address Assignment) to assign dynamic addresses to all
+ * remaining I3C devices
+ *
+ * Once this is done, all I3C and I2C devices should be usable.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+static int i3c_master_bus_init(struct i3c_master_controller *master)
+{
+ enum i3c_addr_slot_status status;
+ struct i2c_dev_boardinfo *i2cboardinfo;
+ struct i3c_dev_boardinfo *i3cboardinfo;
+ struct i2c_dev_desc *i2cdev;
+ int ret;
+
+ /*
+ * First attach all devices with static definitions provided by the
+ * FW.
+ */
+ list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ i2cboardinfo->base.addr);
+ if (status != I3C_ADDR_SLOT_FREE) {
+ ret = -EBUSY;
+ goto err_detach_devs;
+ }
+
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i2cboardinfo->base.addr,
+ I3C_ADDR_SLOT_I2C_DEV);
+
+ i2cdev = i3c_master_alloc_i2c_dev(master, i2cboardinfo);
+ if (IS_ERR(i2cdev)) {
+ ret = PTR_ERR(i2cdev);
+ goto err_detach_devs;
+ }
+
+ ret = i3c_master_attach_i2c_dev(master, i2cdev);
+ if (ret) {
+ i3c_master_free_i2c_dev(i2cdev);
+ goto err_detach_devs;
+ }
+ }
+
+ /*
+ * Now execute the controller specific ->bus_init() routine, which
+ * might configure its internal logic to match the bus limitations.
+ */
+ ret = master->ops->bus_init(master);
+ if (ret)
+ goto err_detach_devs;
+
+ /*
+ * The master device should have been instantiated in ->bus_init(),
+ * complain if this was not the case.
+ */
+ if (!master->this) {
+ dev_err(&master->dev,
+ "master_set_info() was not called in ->bus_init()\n");
+ ret = -EINVAL;
+ goto err_bus_cleanup;
+ }
+
+ /*
+ * Reset all dynamic address that may have been assigned before
+ * (assigned by the bootloader for example).
+ */
+ ret = i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
+ if (ret && ret != I3C_ERROR_M2)
+ goto err_bus_cleanup;
+
+ /* Disable all slave events before starting DAA. */
+ ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR,
+ I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR |
+ I3C_CCC_EVENT_HJ);
+ if (ret && ret != I3C_ERROR_M2)
+ goto err_bus_cleanup;
+
+ /*
+ * Reserve init_dyn_addr first, and then try to pre-assign dynamic
+ * address and retrieve device information if needed.
+ * In case pre-assign dynamic address fails, setting dynamic address to
+ * the requested init_dyn_addr is retried after DAA is done in
+ * i3c_master_add_i3c_dev_locked().
+ */
+ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
+
+ /*
+ * We don't reserve a dynamic address for devices that
+ * don't explicitly request one.
+ */
+ if (!i3cboardinfo->init_dyn_addr)
+ continue;
+
+ ret = i3c_bus_get_addr_slot_status(&master->bus,
+ i3cboardinfo->init_dyn_addr);
+ if (ret != I3C_ADDR_SLOT_FREE) {
+ ret = -EBUSY;
+ goto err_rstdaa;
+ }
+
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i3cboardinfo->init_dyn_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+
+ /*
+ * Only try to create/attach devices that have a static
+ * address. Other devices will be created/attached when
+ * DAA happens, and the requested dynamic address will
+ * be set using SETNEWDA once those devices become
+ * addressable.
+ */
+
+ if (i3cboardinfo->static_addr)
+ i3c_master_early_i3c_dev_add(master, i3cboardinfo);
+ }
+
+ ret = i3c_master_do_daa(master);
+ if (ret)
+ goto err_rstdaa;
+
+ return 0;
+
+err_rstdaa:
+ i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
+
+err_bus_cleanup:
+ if (master->ops->bus_cleanup)
+ master->ops->bus_cleanup(master);
+
+err_detach_devs:
+ i3c_master_detach_free_devs(master);
+
+ return ret;
+}
+
+static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
+{
+ if (master->ops->bus_cleanup)
+ master->ops->bus_cleanup(master);
+
+ i3c_master_detach_free_devs(master);
+}
+
+static void i3c_master_attach_boardinfo(struct i3c_dev_desc *i3cdev)
+{
+ struct i3c_master_controller *master = i3cdev->common.master;
+ struct i3c_dev_boardinfo *i3cboardinfo;
+
+ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
+ if (i3cdev->info.pid != i3cboardinfo->pid)
+ continue;
+
+ i3cdev->boardinfo = i3cboardinfo;
+ i3cdev->info.static_addr = i3cboardinfo->static_addr;
+ return;
+ }
+}
+
+static struct i3c_dev_desc *
+i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(refdev);
+ struct i3c_dev_desc *i3cdev;
+
+ i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
+ if (i3cdev != refdev && i3cdev->info.pid == refdev->info.pid)
+ return i3cdev;
+ }
+
+ return NULL;
+}
+
+/**
+ * i3c_master_add_i3c_dev_locked() - add an I3C slave to the bus
+ * @master: master used to send frames on the bus
+ * @addr: I3C slave dynamic address assigned to the device
+ *
+ * This function is instantiating an I3C device object and adding it to the
+ * I3C device list. All device information are automatically retrieved using
+ * standard CCC commands.
+ *
+ * The I3C device object is returned in case the master wants to attach
+ * private data to it using i3c_dev_set_master_data().
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ u8 addr)
+{
+ struct i3c_device_info info = { .dyn_addr = addr };
+ struct i3c_dev_desc *newdev, *olddev;
+ u8 old_dyn_addr = addr, expected_dyn_addr;
+ struct i3c_ibi_setup ibireq = { };
+ bool enable_ibi = false;
+ int ret;
+
+ if (!master)
+ return -EINVAL;
+
+ newdev = i3c_master_alloc_i3c_dev(master, &info);
+ if (IS_ERR(newdev))
+ return PTR_ERR(newdev);
+
+ ret = i3c_master_attach_i3c_dev(master, newdev);
+ if (ret)
+ goto err_free_dev;
+
+ ret = i3c_master_retrieve_dev_info(newdev);
+ if (ret)
+ goto err_detach_dev;
+
+ i3c_master_attach_boardinfo(newdev);
+
+ olddev = i3c_master_search_i3c_dev_duplicate(newdev);
+ if (olddev) {
+ newdev->dev = olddev->dev;
+ if (newdev->dev)
+ newdev->dev->desc = newdev;
+
+ /*
+ * We need to restore the IBI state too, so let's save the
+ * IBI information and try to restore them after olddev has
+ * been detached+released and its IBI has been stopped and
+ * the associated resources have been freed.
+ */
+ mutex_lock(&olddev->ibi_lock);
+ if (olddev->ibi) {
+ ibireq.handler = olddev->ibi->handler;
+ ibireq.max_payload_len = olddev->ibi->max_payload_len;
+ ibireq.num_slots = olddev->ibi->num_slots;
+
+ if (olddev->ibi->enabled) {
+ enable_ibi = true;
+ i3c_dev_disable_ibi_locked(olddev);
+ }
+
+ i3c_dev_free_ibi_locked(olddev);
+ }
+ mutex_unlock(&olddev->ibi_lock);
+
+ old_dyn_addr = olddev->info.dyn_addr;
+
+ i3c_master_detach_i3c_dev(olddev);
+ i3c_master_free_i3c_dev(olddev);
+ }
+
+ ret = i3c_master_reattach_i3c_dev(newdev, old_dyn_addr);
+ if (ret)
+ goto err_detach_dev;
+
+ /*
+ * Depending on our previous state, the expected dynamic address might
+ * differ:
+ * - if the device already had a dynamic address assigned, let's try to
+ * re-apply this one
+ * - if the device did not have a dynamic address and the firmware
+ * requested a specific address, pick this one
+ * - in any other case, keep the address automatically assigned by the
+ * master
+ */
+ if (old_dyn_addr && old_dyn_addr != newdev->info.dyn_addr)
+ expected_dyn_addr = old_dyn_addr;
+ else if (newdev->boardinfo && newdev->boardinfo->init_dyn_addr)
+ expected_dyn_addr = newdev->boardinfo->init_dyn_addr;
+ else
+ expected_dyn_addr = newdev->info.dyn_addr;
+
+ if (newdev->info.dyn_addr != expected_dyn_addr) {
+ /*
+ * Try to apply the expected dynamic address. If it fails, keep
+ * the address assigned by the master.
+ */
+ ret = i3c_master_setnewda_locked(master,
+ newdev->info.dyn_addr,
+ expected_dyn_addr);
+ if (!ret) {
+ old_dyn_addr = newdev->info.dyn_addr;
+ newdev->info.dyn_addr = expected_dyn_addr;
+ i3c_master_reattach_i3c_dev(newdev, old_dyn_addr);
+ } else {
+ dev_err(&master->dev,
+ "Failed to assign reserved/old address to device %d%llx",
+ master->bus.id, newdev->info.pid);
+ }
+ }
+
+ /*
+ * Now is time to try to restore the IBI setup. If we're lucky,
+ * everything works as before, otherwise, all we can do is complain.
+ * FIXME: maybe we should add callback to inform the driver that it
+ * should request the IBI again instead of trying to hide that from
+ * him.
+ */
+ if (ibireq.handler) {
+ mutex_lock(&newdev->ibi_lock);
+ ret = i3c_dev_request_ibi_locked(newdev, &ibireq);
+ if (ret) {
+ dev_err(&master->dev,
+ "Failed to request IBI on device %d-%llx",
+ master->bus.id, newdev->info.pid);
+ } else if (enable_ibi) {
+ ret = i3c_dev_enable_ibi_locked(newdev);
+ if (ret)
+ dev_err(&master->dev,
+ "Failed to re-enable IBI on device %d-%llx",
+ master->bus.id, newdev->info.pid);
+ }
+ mutex_unlock(&newdev->ibi_lock);
+ }
+
+ return 0;
+
+err_detach_dev:
+ if (newdev->dev && newdev->dev->desc)
+ newdev->dev->desc = NULL;
+
+ i3c_master_detach_i3c_dev(newdev);
+
+err_free_dev:
+ i3c_master_free_i3c_dev(newdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_add_i3c_dev_locked);
+
+#define OF_I3C_REG1_IS_I2C_DEV BIT(31)
+
+static int
+of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
+ struct device_node *node, u32 *reg)
+{
+ struct i2c_dev_boardinfo *boardinfo;
+ struct device *dev = &master->dev;
+ int ret;
+
+ boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
+ if (!boardinfo)
+ return -ENOMEM;
+
+ ret = of_i2c_get_board_info(dev, node, &boardinfo->base);
+ if (ret)
+ return ret;
+
+ /*
+ * The I3C Specification does not clearly say I2C devices with 10-bit
+ * address are supported. These devices can't be passed properly through
+ * DEFSLVS command.
+ */
+ if (boardinfo->base.flags & I2C_CLIENT_TEN) {
+ dev_err(dev, "I2C device with 10 bit address not supported.");
+ return -ENOTSUPP;
+ }
+
+ /* LVR is encoded in reg[2]. */
+ boardinfo->lvr = reg[2];
+
+ list_add_tail(&boardinfo->node, &master->boardinfo.i2c);
+ of_node_get(node);
+
+ return 0;
+}
+
+static int
+of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
+ struct device_node *node, u32 *reg)
+{
+ struct i3c_dev_boardinfo *boardinfo;
+ struct device *dev = &master->dev;
+ enum i3c_addr_slot_status addrstatus;
+ u32 init_dyn_addr = 0;
+
+ boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
+ if (!boardinfo)
+ return -ENOMEM;
+
+ if (reg[0]) {
+ if (reg[0] > I3C_MAX_ADDR)
+ return -EINVAL;
+
+ addrstatus = i3c_bus_get_addr_slot_status(&master->bus,
+ reg[0]);
+ if (addrstatus != I3C_ADDR_SLOT_FREE)
+ return -EINVAL;
+ }
+
+ boardinfo->static_addr = reg[0];
+
+ if (!of_property_read_u32(node, "assigned-address", &init_dyn_addr)) {
+ if (init_dyn_addr > I3C_MAX_ADDR)
+ return -EINVAL;
+
+ addrstatus = i3c_bus_get_addr_slot_status(&master->bus,
+ init_dyn_addr);
+ if (addrstatus != I3C_ADDR_SLOT_FREE)
+ return -EINVAL;
+ }
+
+ boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
+
+ if ((boardinfo->pid & GENMASK_ULL(63, 48)) ||
+ I3C_PID_RND_LOWER_32BITS(boardinfo->pid))
+ return -EINVAL;
+
+ boardinfo->init_dyn_addr = init_dyn_addr;
+ boardinfo->of_node = of_node_get(node);
+ list_add_tail(&boardinfo->node, &master->boardinfo.i3c);
+
+ return 0;
+}
+
+static int of_i3c_master_add_dev(struct i3c_master_controller *master,
+ struct device_node *node)
+{
+ u32 reg[3];
+ int ret;
+
+ if (!master || !node)
+ return -EINVAL;
+
+ ret = of_property_read_u32_array(node, "reg", reg, ARRAY_SIZE(reg));
+ if (ret)
+ return ret;
+
+ /*
+ * The manufacturer ID can't be 0. If reg[1] == 0 that means we're
+ * dealing with an I2C device.
+ */
+ if (!reg[1])
+ ret = of_i3c_master_add_i2c_boardinfo(master, node, reg);
+ else
+ ret = of_i3c_master_add_i3c_boardinfo(master, node, reg);
+
+ return ret;
+}
+
+static int of_populate_i3c_bus(struct i3c_master_controller *master)
+{
+ struct device *dev = &master->dev;
+ struct device_node *i3cbus_np = dev->of_node;
+ struct device_node *node;
+ int ret;
+ u32 val;
+
+ if (!i3cbus_np)
+ return 0;
+
+ for_each_available_child_of_node(i3cbus_np, node) {
+ ret = of_i3c_master_add_dev(master, node);
+ if (ret) {
+ of_node_put(node);
+ return ret;
+ }
+ }
+
+ /*
+ * The user might want to limit I2C and I3C speed in case some devices
+ * on the bus are not supporting typical rates, or if the bus topology
+ * prevents it from using max possible rate.
+ */
+ if (!of_property_read_u32(i3cbus_np, "i2c-scl-hz", &val))
+ master->bus.scl_rate.i2c = val;
+
+ if (!of_property_read_u32(i3cbus_np, "i3c-scl-hz", &val))
+ master->bus.scl_rate.i3c = val;
+
+ return 0;
+}
+
+static int i3c_master_i2c_adapter_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *xfers, int nxfers)
+{
+ struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
+ struct i2c_dev_desc *dev;
+ int i, ret;
+ u16 addr;
+
+ if (!xfers || !master || nxfers <= 0)
+ return -EINVAL;
+
+ if (!master->ops->i2c_xfers)
+ return -ENOTSUPP;
+
+ /* Doing transfers to different devices is not supported. */
+ addr = xfers[0].addr;
+ for (i = 1; i < nxfers; i++) {
+ if (addr != xfers[i].addr)
+ return -ENOTSUPP;
+ }
+
+ i3c_bus_normaluse_lock(&master->bus);
+ dev = i3c_master_find_i2c_dev_by_addr(master, addr);
+ if (!dev)
+ ret = -ENOENT;
+ else
+ ret = master->ops->i2c_xfers(dev, xfers, nxfers);
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return ret ? ret : nxfers;
+}
+
+static u32 i3c_master_i2c_funcs(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
+}
+
+static const struct i2c_algorithm i3c_master_i2c_algo = {
+ .master_xfer = i3c_master_i2c_adapter_xfer,
+ .functionality = i3c_master_i2c_funcs,
+};
+
+static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
+{
+ struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master);
+ struct i2c_dev_desc *i2cdev;
+ int ret;
+
+ adap->dev.parent = master->dev.parent;
+ adap->owner = master->dev.parent->driver->owner;
+ adap->algo = &i3c_master_i2c_algo;
+ strncpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
+
+ /* FIXME: Should we allow i3c masters to override these values? */
+ adap->timeout = 1000;
+ adap->retries = 3;
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ return ret;
+
+ /*
+ * We silently ignore failures here. The bus should keep working
+ * correctly even if one or more i2c devices are not registered.
+ */
+ i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
+ i2cdev->dev = i2c_new_client_device(adap, &i2cdev->boardinfo->base);
+
+ return 0;
+}
+
+static void i3c_master_i2c_adapter_cleanup(struct i3c_master_controller *master)
+{
+ struct i2c_dev_desc *i2cdev;
+
+ i2c_del_adapter(&master->i2c);
+
+ i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
+ i2cdev->dev = NULL;
+}
+
+static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *i3cdev;
+
+ i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
+ if (!i3cdev->dev)
+ continue;
+
+ i3cdev->dev->desc = NULL;
+ if (device_is_registered(&i3cdev->dev->dev))
+ device_unregister(&i3cdev->dev->dev);
+ else
+ put_device(&i3cdev->dev->dev);
+ i3cdev->dev = NULL;
+ }
+}
+
+/**
+ * i3c_master_queue_ibi() - Queue an IBI
+ * @dev: the device this IBI is coming from
+ * @slot: the IBI slot used to store the payload
+ *
+ * Queue an IBI to the controller workqueue. The IBI handler attached to
+ * the dev will be called from a workqueue context.
+ */
+void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
+{
+ atomic_inc(&dev->ibi->pending_ibis);
+ queue_work(dev->common.master->wq, &slot->work);
+}
+EXPORT_SYMBOL_GPL(i3c_master_queue_ibi);
+
+static void i3c_master_handle_ibi(struct work_struct *work)
+{
+ struct i3c_ibi_slot *slot = container_of(work, struct i3c_ibi_slot,
+ work);
+ struct i3c_dev_desc *dev = slot->dev;
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ struct i3c_ibi_payload payload;
+
+ payload.data = slot->data;
+ payload.len = slot->len;
+
+ if (dev->dev)
+ dev->ibi->handler(dev->dev, &payload);
+
+ master->ops->recycle_ibi_slot(dev, slot);
+ if (atomic_dec_and_test(&dev->ibi->pending_ibis))
+ complete(&dev->ibi->all_ibis_handled);
+}
+
+static void i3c_master_init_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ slot->dev = dev;
+ INIT_WORK(&slot->work, i3c_master_handle_ibi);
+}
+
+struct i3c_generic_ibi_slot {
+ struct list_head node;
+ struct i3c_ibi_slot base;
+};
+
+struct i3c_generic_ibi_pool {
+ spinlock_t lock;
+ unsigned int num_slots;
+ struct i3c_generic_ibi_slot *slots;
+ void *payload_buf;
+ struct list_head free_slots;
+ struct list_head pending;
+};
+
+/**
+ * i3c_generic_ibi_free_pool() - Free a generic IBI pool
+ * @pool: the IBI pool to free
+ *
+ * Free all IBI slots allated by a generic IBI pool.
+ */
+void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool)
+{
+ struct i3c_generic_ibi_slot *slot;
+ unsigned int nslots = 0;
+
+ while (!list_empty(&pool->free_slots)) {
+ slot = list_first_entry(&pool->free_slots,
+ struct i3c_generic_ibi_slot, node);
+ list_del(&slot->node);
+ nslots++;
+ }
+
+ /*
+ * If the number of freed slots is not equal to the number of allocated
+ * slots we have a leak somewhere.
+ */
+ WARN_ON(nslots != pool->num_slots);
+
+ kfree(pool->payload_buf);
+ kfree(pool->slots);
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_free_pool);
+
+/**
+ * i3c_generic_ibi_alloc_pool() - Create a generic IBI pool
+ * @dev: the device this pool will be used for
+ * @req: IBI setup request describing what the device driver expects
+ *
+ * Create a generic IBI pool based on the information provided in @req.
+ *
+ * Return: a valid IBI pool in case of success, an ERR_PTR() otherwise.
+ */
+struct i3c_generic_ibi_pool *
+i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_generic_ibi_pool *pool;
+ struct i3c_generic_ibi_slot *slot;
+ unsigned int i;
+ int ret;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free_slots);
+ INIT_LIST_HEAD(&pool->pending);
+
+ pool->slots = kcalloc(req->num_slots, sizeof(*slot), GFP_KERNEL);
+ if (!pool->slots) {
+ ret = -ENOMEM;
+ goto err_free_pool;
+ }
+
+ if (req->max_payload_len) {
+ pool->payload_buf = kcalloc(req->num_slots,
+ req->max_payload_len, GFP_KERNEL);
+ if (!pool->payload_buf) {
+ ret = -ENOMEM;
+ goto err_free_pool;
+ }
+ }
+
+ for (i = 0; i < req->num_slots; i++) {
+ slot = &pool->slots[i];
+ i3c_master_init_ibi_slot(dev, &slot->base);
+
+ if (req->max_payload_len)
+ slot->base.data = pool->payload_buf +
+ (i * req->max_payload_len);
+
+ list_add_tail(&slot->node, &pool->free_slots);
+ pool->num_slots++;
+ }
+
+ return pool;
+
+err_free_pool:
+ i3c_generic_ibi_free_pool(pool);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_alloc_pool);
+
+/**
+ * i3c_generic_ibi_get_free_slot() - Get a free slot from a generic IBI pool
+ * @pool: the pool to query an IBI slot on
+ *
+ * Search for a free slot in a generic IBI pool.
+ * The slot should be returned to the pool using i3c_generic_ibi_recycle_slot()
+ * when it's no longer needed.
+ *
+ * Return: a pointer to a free slot, or NULL if there's no free slot available.
+ */
+struct i3c_ibi_slot *
+i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool)
+{
+ struct i3c_generic_ibi_slot *slot;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ slot = list_first_entry_or_null(&pool->free_slots,
+ struct i3c_generic_ibi_slot, node);
+ if (slot)
+ list_del(&slot->node);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return slot ? &slot->base : NULL;
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_get_free_slot);
+
+/**
+ * i3c_generic_ibi_recycle_slot() - Return a slot to a generic IBI pool
+ * @pool: the pool to return the IBI slot to
+ * @s: IBI slot to recycle
+ *
+ * Add an IBI slot back to its generic IBI pool. Should be called from the
+ * master driver struct_master_controller_ops->recycle_ibi() method.
+ */
+void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
+ struct i3c_ibi_slot *s)
+{
+ struct i3c_generic_ibi_slot *slot;
+ unsigned long flags;
+
+ if (!s)
+ return;
+
+ slot = container_of(s, struct i3c_generic_ibi_slot, base);
+ spin_lock_irqsave(&pool->lock, flags);
+ list_add_tail(&slot->node, &pool->free_slots);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_recycle_slot);
+
+static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops)
+{
+ if (!ops || !ops->bus_init || !ops->priv_xfers ||
+ !ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers)
+ return -EINVAL;
+
+ if (ops->request_ibi &&
+ (!ops->enable_ibi || !ops->disable_ibi || !ops->free_ibi ||
+ !ops->recycle_ibi_slot))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * i3c_master_register() - register an I3C master
+ * @master: master used to send frames on the bus
+ * @parent: the parent device (the one that provides this I3C master
+ * controller)
+ * @ops: the master controller operations
+ * @secondary: true if you are registering a secondary master. Will return
+ * -ENOTSUPP if set to true since secondary masters are not yet
+ * supported
+ *
+ * This function takes care of everything for you:
+ *
+ * - creates and initializes the I3C bus
+ * - populates the bus with static I2C devs if @parent->of_node is not
+ * NULL
+ * - registers all I3C devices added by the controller during bus
+ * initialization
+ * - registers the I2C adapter and all I2C devices
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int i3c_master_register(struct i3c_master_controller *master,
+ struct device *parent,
+ const struct i3c_master_controller_ops *ops,
+ bool secondary)
+{
+ unsigned long i2c_scl_rate = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
+ struct i3c_bus *i3cbus = i3c_master_get_bus(master);
+ enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
+ struct i2c_dev_boardinfo *i2cbi;
+ int ret;
+
+ /* We do not support secondary masters yet. */
+ if (secondary)
+ return -ENOTSUPP;
+
+ ret = i3c_master_check_ops(ops);
+ if (ret)
+ return ret;
+
+ master->dev.parent = parent;
+ master->dev.of_node = of_node_get(parent->of_node);
+ master->dev.bus = &i3c_bus_type;
+ master->dev.type = &i3c_masterdev_type;
+ master->dev.release = i3c_masterdev_release;
+ master->ops = ops;
+ master->secondary = secondary;
+ INIT_LIST_HEAD(&master->boardinfo.i2c);
+ INIT_LIST_HEAD(&master->boardinfo.i3c);
+
+ ret = i3c_bus_init(i3cbus);
+ if (ret)
+ return ret;
+
+ device_initialize(&master->dev);
+ dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
+
+ ret = of_populate_i3c_bus(master);
+ if (ret)
+ goto err_put_dev;
+
+ list_for_each_entry(i2cbi, &master->boardinfo.i2c, node) {
+ switch (i2cbi->lvr & I3C_LVR_I2C_INDEX_MASK) {
+ case I3C_LVR_I2C_INDEX(0):
+ if (mode < I3C_BUS_MODE_MIXED_FAST)
+ mode = I3C_BUS_MODE_MIXED_FAST;
+ break;
+ case I3C_LVR_I2C_INDEX(1):
+ if (mode < I3C_BUS_MODE_MIXED_LIMITED)
+ mode = I3C_BUS_MODE_MIXED_LIMITED;
+ break;
+ case I3C_LVR_I2C_INDEX(2):
+ if (mode < I3C_BUS_MODE_MIXED_SLOW)
+ mode = I3C_BUS_MODE_MIXED_SLOW;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_put_dev;
+ }
+
+ if (i2cbi->lvr & I3C_LVR_I2C_FM_MODE)
+ i2c_scl_rate = I3C_BUS_I2C_FM_SCL_RATE;
+ }
+
+ ret = i3c_bus_set_mode(i3cbus, mode, i2c_scl_rate);
+ if (ret)
+ goto err_put_dev;
+
+ master->wq = alloc_workqueue("%s", 0, 0, dev_name(parent));
+ if (!master->wq) {
+ ret = -ENOMEM;
+ goto err_put_dev;
+ }
+
+ ret = i3c_master_bus_init(master);
+ if (ret)
+ goto err_put_dev;
+
+ ret = device_add(&master->dev);
+ if (ret)
+ goto err_cleanup_bus;
+
+ /*
+ * Expose our I3C bus as an I2C adapter so that I2C devices are exposed
+ * through the I2C subsystem.
+ */
+ ret = i3c_master_i2c_adapter_init(master);
+ if (ret)
+ goto err_del_dev;
+
+ /*
+ * We're done initializing the bus and the controller, we can now
+ * register I3C devices discovered during the initial DAA.
+ */
+ master->init_done = true;
+ i3c_bus_normaluse_lock(&master->bus);
+ i3c_master_register_new_i3c_devs(master);
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return 0;
+
+err_del_dev:
+ device_del(&master->dev);
+
+err_cleanup_bus:
+ i3c_master_bus_cleanup(master);
+
+err_put_dev:
+ put_device(&master->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_register);
+
+/**
+ * i3c_master_unregister() - unregister an I3C master
+ * @master: master used to send frames on the bus
+ *
+ * Basically undo everything done in i3c_master_register().
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int i3c_master_unregister(struct i3c_master_controller *master)
+{
+ i3c_master_i2c_adapter_cleanup(master);
+ i3c_master_unregister_i3c_devs(master);
+ i3c_master_bus_cleanup(master);
+ device_unregister(&master->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_master_unregister);
+
+int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *master;
+
+ if (!dev)
+ return -ENOENT;
+
+ master = i3c_dev_get_master(dev);
+ if (!master || !xfers)
+ return -EINVAL;
+
+ if (!master->ops->priv_xfers)
+ return -ENOTSUPP;
+
+ return master->ops->priv_xfers(dev, xfers, nxfers);
+}
+
+int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master;
+ int ret;
+
+ if (!dev->ibi)
+ return -EINVAL;
+
+ master = i3c_dev_get_master(dev);
+ ret = master->ops->disable_ibi(dev);
+ if (ret)
+ return ret;
+
+ reinit_completion(&dev->ibi->all_ibis_handled);
+ if (atomic_read(&dev->ibi->pending_ibis))
+ wait_for_completion(&dev->ibi->all_ibis_handled);
+
+ dev->ibi->enabled = false;
+
+ return 0;
+}
+
+int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ int ret;
+
+ if (!dev->ibi)
+ return -EINVAL;
+
+ ret = master->ops->enable_ibi(dev);
+ if (!ret)
+ dev->ibi->enabled = true;
+
+ return ret;
+}
+
+int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ struct i3c_device_ibi_info *ibi;
+ int ret;
+
+ if (!master->ops->request_ibi)
+ return -ENOTSUPP;
+
+ if (dev->ibi)
+ return -EBUSY;
+
+ ibi = kzalloc(sizeof(*ibi), GFP_KERNEL);
+ if (!ibi)
+ return -ENOMEM;
+
+ atomic_set(&ibi->pending_ibis, 0);
+ init_completion(&ibi->all_ibis_handled);
+ ibi->handler = req->handler;
+ ibi->max_payload_len = req->max_payload_len;
+ ibi->num_slots = req->num_slots;
+
+ dev->ibi = ibi;
+ ret = master->ops->request_ibi(dev, req);
+ if (ret) {
+ kfree(ibi);
+ dev->ibi = NULL;
+ }
+
+ return ret;
+}
+
+void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+ if (!dev->ibi)
+ return;
+
+ if (WARN_ON(dev->ibi->enabled))
+ WARN_ON(i3c_dev_disable_ibi_locked(dev));
+
+ master->ops->free_ibi(dev);
+ kfree(dev->ibi);
+ dev->ibi = NULL;
+}
+
+static int __init i3c_init(void)
+{
+ return bus_register(&i3c_bus_type);
+}
+subsys_initcall(i3c_init);
+
+static void __exit i3c_exit(void)
+{
+ idr_destroy(&i3c_bus_idr);
+ bus_unregister(&i3c_bus_type);
+}
+module_exit(i3c_exit);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_DESCRIPTION("I3C core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
new file mode 100644
index 000000000..4e80a1fcb
--- /dev/null
+++ b/drivers/i3c/master/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CDNS_I3C_MASTER
+ tristate "Cadence I3C master driver"
+ depends on I3C
+ depends on HAS_IOMEM
+ depends on !(ALPHA || PARISC)
+ help
+ Enable this driver if you want to support Cadence I3C master block.
+
+config DW_I3C_MASTER
+ tristate "Synospsys DesignWare I3C master driver"
+ depends on I3C
+ depends on HAS_IOMEM
+ depends on !(ALPHA || PARISC)
+ # ALPHA and PARISC needs {read,write}sl()
+ help
+ Support for Synopsys DesignWare MIPI I3C Controller.
+
+ For details please see
+ https://www.synopsys.com/dw/ipdir.php?ds=mipi_i3c
+
+ This driver can also be built as a module. If so, the module
+ will be called dw-i3c-master.
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
new file mode 100644
index 000000000..7eea9e086
--- /dev/null
+++ b/drivers/i3c/master/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CDNS_I3C_MASTER) += i3c-master-cdns.o
+obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
new file mode 100644
index 000000000..8513bd353
--- /dev/null
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -0,0 +1,1219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ *
+ * Author: Vitor Soares <vitor.soares@synopsys.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define DEVICE_CTRL 0x0
+#define DEV_CTRL_ENABLE BIT(31)
+#define DEV_CTRL_RESUME BIT(30)
+#define DEV_CTRL_HOT_JOIN_NACK BIT(8)
+#define DEV_CTRL_I2C_SLAVE_PRESENT BIT(7)
+
+#define DEVICE_ADDR 0x4
+#define DEV_ADDR_DYNAMIC_ADDR_VALID BIT(31)
+#define DEV_ADDR_DYNAMIC(x) (((x) << 16) & GENMASK(22, 16))
+
+#define HW_CAPABILITY 0x8
+#define COMMAND_QUEUE_PORT 0xc
+#define COMMAND_PORT_TOC BIT(30)
+#define COMMAND_PORT_READ_TRANSFER BIT(28)
+#define COMMAND_PORT_SDAP BIT(27)
+#define COMMAND_PORT_ROC BIT(26)
+#define COMMAND_PORT_SPEED(x) (((x) << 21) & GENMASK(23, 21))
+#define COMMAND_PORT_DEV_INDEX(x) (((x) << 16) & GENMASK(20, 16))
+#define COMMAND_PORT_CP BIT(15)
+#define COMMAND_PORT_CMD(x) (((x) << 7) & GENMASK(14, 7))
+#define COMMAND_PORT_TID(x) (((x) << 3) & GENMASK(6, 3))
+
+#define COMMAND_PORT_ARG_DATA_LEN(x) (((x) << 16) & GENMASK(31, 16))
+#define COMMAND_PORT_ARG_DATA_LEN_MAX 65536
+#define COMMAND_PORT_TRANSFER_ARG 0x01
+
+#define COMMAND_PORT_SDA_DATA_BYTE_3(x) (((x) << 24) & GENMASK(31, 24))
+#define COMMAND_PORT_SDA_DATA_BYTE_2(x) (((x) << 16) & GENMASK(23, 16))
+#define COMMAND_PORT_SDA_DATA_BYTE_1(x) (((x) << 8) & GENMASK(15, 8))
+#define COMMAND_PORT_SDA_BYTE_STRB_3 BIT(5)
+#define COMMAND_PORT_SDA_BYTE_STRB_2 BIT(4)
+#define COMMAND_PORT_SDA_BYTE_STRB_1 BIT(3)
+#define COMMAND_PORT_SHORT_DATA_ARG 0x02
+
+#define COMMAND_PORT_DEV_COUNT(x) (((x) << 21) & GENMASK(25, 21))
+#define COMMAND_PORT_ADDR_ASSGN_CMD 0x03
+
+#define RESPONSE_QUEUE_PORT 0x10
+#define RESPONSE_PORT_ERR_STATUS(x) (((x) & GENMASK(31, 28)) >> 28)
+#define RESPONSE_NO_ERROR 0
+#define RESPONSE_ERROR_CRC 1
+#define RESPONSE_ERROR_PARITY 2
+#define RESPONSE_ERROR_FRAME 3
+#define RESPONSE_ERROR_IBA_NACK 4
+#define RESPONSE_ERROR_ADDRESS_NACK 5
+#define RESPONSE_ERROR_OVER_UNDER_FLOW 6
+#define RESPONSE_ERROR_TRANSF_ABORT 8
+#define RESPONSE_ERROR_I2C_W_NACK_ERR 9
+#define RESPONSE_PORT_TID(x) (((x) & GENMASK(27, 24)) >> 24)
+#define RESPONSE_PORT_DATA_LEN(x) ((x) & GENMASK(15, 0))
+
+#define RX_TX_DATA_PORT 0x14
+#define IBI_QUEUE_STATUS 0x18
+#define QUEUE_THLD_CTRL 0x1c
+#define QUEUE_THLD_CTRL_RESP_BUF_MASK GENMASK(15, 8)
+#define QUEUE_THLD_CTRL_RESP_BUF(x) (((x) - 1) << 8)
+
+#define DATA_BUFFER_THLD_CTRL 0x20
+#define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(11, 8)
+
+#define IBI_QUEUE_CTRL 0x24
+#define IBI_MR_REQ_REJECT 0x2C
+#define IBI_SIR_REQ_REJECT 0x30
+#define IBI_REQ_REJECT_ALL GENMASK(31, 0)
+
+#define RESET_CTRL 0x34
+#define RESET_CTRL_IBI_QUEUE BIT(5)
+#define RESET_CTRL_RX_FIFO BIT(4)
+#define RESET_CTRL_TX_FIFO BIT(3)
+#define RESET_CTRL_RESP_QUEUE BIT(2)
+#define RESET_CTRL_CMD_QUEUE BIT(1)
+#define RESET_CTRL_SOFT BIT(0)
+
+#define SLV_EVENT_CTRL 0x38
+#define INTR_STATUS 0x3c
+#define INTR_STATUS_EN 0x40
+#define INTR_SIGNAL_EN 0x44
+#define INTR_FORCE 0x48
+#define INTR_BUSOWNER_UPDATE_STAT BIT(13)
+#define INTR_IBI_UPDATED_STAT BIT(12)
+#define INTR_READ_REQ_RECV_STAT BIT(11)
+#define INTR_DEFSLV_STAT BIT(10)
+#define INTR_TRANSFER_ERR_STAT BIT(9)
+#define INTR_DYN_ADDR_ASSGN_STAT BIT(8)
+#define INTR_CCC_UPDATED_STAT BIT(6)
+#define INTR_TRANSFER_ABORT_STAT BIT(5)
+#define INTR_RESP_READY_STAT BIT(4)
+#define INTR_CMD_QUEUE_READY_STAT BIT(3)
+#define INTR_IBI_THLD_STAT BIT(2)
+#define INTR_RX_THLD_STAT BIT(1)
+#define INTR_TX_THLD_STAT BIT(0)
+#define INTR_ALL (INTR_BUSOWNER_UPDATE_STAT | \
+ INTR_IBI_UPDATED_STAT | \
+ INTR_READ_REQ_RECV_STAT | \
+ INTR_DEFSLV_STAT | \
+ INTR_TRANSFER_ERR_STAT | \
+ INTR_DYN_ADDR_ASSGN_STAT | \
+ INTR_CCC_UPDATED_STAT | \
+ INTR_TRANSFER_ABORT_STAT | \
+ INTR_RESP_READY_STAT | \
+ INTR_CMD_QUEUE_READY_STAT | \
+ INTR_IBI_THLD_STAT | \
+ INTR_TX_THLD_STAT | \
+ INTR_RX_THLD_STAT)
+
+#define INTR_MASTER_MASK (INTR_TRANSFER_ERR_STAT | \
+ INTR_RESP_READY_STAT)
+
+#define QUEUE_STATUS_LEVEL 0x4c
+#define QUEUE_STATUS_IBI_STATUS_CNT(x) (((x) & GENMASK(28, 24)) >> 24)
+#define QUEUE_STATUS_IBI_BUF_BLR(x) (((x) & GENMASK(23, 16)) >> 16)
+#define QUEUE_STATUS_LEVEL_RESP(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QUEUE_STATUS_LEVEL_CMD(x) ((x) & GENMASK(7, 0))
+
+#define DATA_BUFFER_STATUS_LEVEL 0x50
+#define DATA_BUFFER_STATUS_LEVEL_TX(x) ((x) & GENMASK(7, 0))
+
+#define PRESENT_STATE 0x54
+#define CCC_DEVICE_STATUS 0x58
+#define DEVICE_ADDR_TABLE_POINTER 0x5c
+#define DEVICE_ADDR_TABLE_DEPTH(x) (((x) & GENMASK(31, 16)) >> 16)
+#define DEVICE_ADDR_TABLE_ADDR(x) ((x) & GENMASK(7, 0))
+
+#define DEV_CHAR_TABLE_POINTER 0x60
+#define VENDOR_SPECIFIC_REG_POINTER 0x6c
+#define SLV_PID_VALUE 0x74
+#define SLV_CHAR_CTRL 0x78
+#define SLV_MAX_LEN 0x7c
+#define MAX_READ_TURNAROUND 0x80
+#define MAX_DATA_SPEED 0x84
+#define SLV_DEBUG_STATUS 0x88
+#define SLV_INTR_REQ 0x8c
+#define DEVICE_CTRL_EXTENDED 0xb0
+#define SCL_I3C_OD_TIMING 0xb4
+#define SCL_I3C_PP_TIMING 0xb8
+#define SCL_I3C_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
+#define SCL_I3C_TIMING_LCNT(x) ((x) & GENMASK(7, 0))
+#define SCL_I3C_TIMING_CNT_MIN 5
+
+#define SCL_I2C_FM_TIMING 0xbc
+#define SCL_I2C_FM_TIMING_HCNT(x) (((x) << 16) & GENMASK(31, 16))
+#define SCL_I2C_FM_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
+
+#define SCL_I2C_FMP_TIMING 0xc0
+#define SCL_I2C_FMP_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
+#define SCL_I2C_FMP_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
+
+#define SCL_EXT_LCNT_TIMING 0xc8
+#define SCL_EXT_LCNT_4(x) (((x) << 24) & GENMASK(31, 24))
+#define SCL_EXT_LCNT_3(x) (((x) << 16) & GENMASK(23, 16))
+#define SCL_EXT_LCNT_2(x) (((x) << 8) & GENMASK(15, 8))
+#define SCL_EXT_LCNT_1(x) ((x) & GENMASK(7, 0))
+
+#define SCL_EXT_TERMN_LCNT_TIMING 0xcc
+#define BUS_FREE_TIMING 0xd4
+#define BUS_I3C_MST_FREE(x) ((x) & GENMASK(15, 0))
+
+#define BUS_IDLE_TIMING 0xd8
+#define I3C_VER_ID 0xe0
+#define I3C_VER_TYPE 0xe4
+#define EXTENDED_CAPABILITY 0xe8
+#define SLAVE_CONFIG 0xec
+
+#define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31)
+#define DEV_ADDR_TABLE_DYNAMIC_ADDR(x) (((x) << 16) & GENMASK(23, 16))
+#define DEV_ADDR_TABLE_STATIC_ADDR(x) ((x) & GENMASK(6, 0))
+#define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2))
+
+#define MAX_DEVS 32
+
+#define I3C_BUS_SDR1_SCL_RATE 8000000
+#define I3C_BUS_SDR2_SCL_RATE 6000000
+#define I3C_BUS_SDR3_SCL_RATE 4000000
+#define I3C_BUS_SDR4_SCL_RATE 2000000
+#define I3C_BUS_I2C_FM_TLOW_MIN_NS 1300
+#define I3C_BUS_I2C_FMP_TLOW_MIN_NS 500
+#define I3C_BUS_THIGH_MAX_NS 41
+
+#define XFER_TIMEOUT (msecs_to_jiffies(1000))
+
+struct dw_i3c_master_caps {
+ u8 cmdfifodepth;
+ u8 datafifodepth;
+};
+
+struct dw_i3c_cmd {
+ u32 cmd_lo;
+ u32 cmd_hi;
+ u16 tx_len;
+ const void *tx_buf;
+ u16 rx_len;
+ void *rx_buf;
+ u8 error;
+};
+
+struct dw_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int ncmds;
+ struct dw_i3c_cmd cmds[];
+};
+
+struct dw_i3c_master {
+ struct i3c_master_controller base;
+ u16 maxdevs;
+ u16 datstartaddr;
+ u32 free_pos;
+ struct {
+ struct list_head list;
+ struct dw_i3c_xfer *cur;
+ spinlock_t lock;
+ } xferqueue;
+ struct dw_i3c_master_caps caps;
+ void __iomem *regs;
+ struct reset_control *core_rst;
+ struct clk *core_clk;
+ char version[5];
+ char type[5];
+ u8 addrs[MAX_DEVS];
+};
+
+struct dw_i3c_i2c_dev_data {
+ u8 index;
+};
+
+static u8 even_parity(u8 p)
+{
+ p ^= p >> 4;
+ p &= 0xf;
+
+ return (0x9669 >> p) & 1;
+}
+
+static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_ENTAS(0, true):
+ case I3C_CCC_ENTAS(0, false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_SETMWL(true):
+ case I3C_CCC_SETMWL(false):
+ case I3C_CCC_SETMRL(true):
+ case I3C_CCC_SETMRL(false):
+ case I3C_CCC_ENTHDR(0):
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETMXDS:
+ case I3C_CCC_GETHDRCAP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline struct dw_i3c_master *
+to_dw_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct dw_i3c_master, base);
+}
+
+static void dw_i3c_master_disable(struct dw_i3c_master *master)
+{
+ writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE,
+ master->regs + DEVICE_CTRL);
+}
+
+static void dw_i3c_master_enable(struct dw_i3c_master *master)
+{
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_ENABLE,
+ master->regs + DEVICE_CTRL);
+}
+
+static int dw_i3c_master_get_addr_pos(struct dw_i3c_master *master, u8 addr)
+{
+ int pos;
+
+ for (pos = 0; pos < master->maxdevs; pos++) {
+ if (addr == master->addrs[pos])
+ return pos;
+ }
+
+ return -EINVAL;
+}
+
+static int dw_i3c_master_get_free_pos(struct dw_i3c_master *master)
+{
+ if (!(master->free_pos & GENMASK(master->maxdevs - 1, 0)))
+ return -ENOSPC;
+
+ return ffs(master->free_pos) - 1;
+}
+
+static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master,
+ const u8 *bytes, int nbytes)
+{
+ writesl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp = 0;
+
+ memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
+ writesl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
+ }
+}
+
+static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master,
+ u8 *bytes, int nbytes)
+{
+ readsl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp;
+
+ readsl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
+ memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
+ }
+}
+
+static struct dw_i3c_xfer *
+dw_i3c_master_alloc_xfer(struct dw_i3c_master *master, unsigned int ncmds)
+{
+ struct dw_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void dw_i3c_master_free_xfer(struct dw_i3c_xfer *xfer)
+{
+ kfree(xfer);
+}
+
+static void dw_i3c_master_start_xfer_locked(struct dw_i3c_master *master)
+{
+ struct dw_i3c_xfer *xfer = master->xferqueue.cur;
+ unsigned int i;
+ u32 thld_ctrl;
+
+ if (!xfer)
+ return;
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
+ }
+
+ thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
+ thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
+ thld_ctrl |= QUEUE_THLD_CTRL_RESP_BUF(xfer->ncmds);
+ writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ writel(cmd->cmd_hi, master->regs + COMMAND_QUEUE_PORT);
+ writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT);
+ }
+}
+
+static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master,
+ struct dw_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ init_completion(&xfer->comp);
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ dw_i3c_master_start_xfer_locked(master);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master,
+ struct dw_i3c_xfer *xfer)
+{
+ if (master->xferqueue.cur == xfer) {
+ u32 status;
+
+ master->xferqueue.cur = NULL;
+
+ writel(RESET_CTRL_RX_FIFO | RESET_CTRL_TX_FIFO |
+ RESET_CTRL_RESP_QUEUE | RESET_CTRL_CMD_QUEUE,
+ master->regs + RESET_CTRL);
+
+ readl_poll_timeout_atomic(master->regs + RESET_CTRL, status,
+ !status, 10, 1000000);
+ } else {
+ list_del_init(&xfer->node);
+ }
+}
+
+static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
+ struct dw_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ dw_i3c_master_dequeue_xfer_locked(master, xfer);
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
+{
+ struct dw_i3c_xfer *xfer = master->xferqueue.cur;
+ int i, ret = 0;
+ u32 nresp;
+
+ if (!xfer)
+ return;
+
+ nresp = readl(master->regs + QUEUE_STATUS_LEVEL);
+ nresp = QUEUE_STATUS_LEVEL_RESP(nresp);
+
+ for (i = 0; i < nresp; i++) {
+ struct dw_i3c_cmd *cmd;
+ u32 resp;
+
+ resp = readl(master->regs + RESPONSE_QUEUE_PORT);
+
+ cmd = &xfer->cmds[RESPONSE_PORT_TID(resp)];
+ cmd->rx_len = RESPONSE_PORT_DATA_LEN(resp);
+ cmd->error = RESPONSE_PORT_ERR_STATUS(resp);
+ if (cmd->rx_len && !cmd->error)
+ dw_i3c_master_read_rx_fifo(master, cmd->rx_buf,
+ cmd->rx_len);
+ }
+
+ for (i = 0; i < nresp; i++) {
+ switch (xfer->cmds[i].error) {
+ case RESPONSE_NO_ERROR:
+ break;
+ case RESPONSE_ERROR_PARITY:
+ case RESPONSE_ERROR_IBA_NACK:
+ case RESPONSE_ERROR_TRANSF_ABORT:
+ case RESPONSE_ERROR_CRC:
+ case RESPONSE_ERROR_FRAME:
+ ret = -EIO;
+ break;
+ case RESPONSE_ERROR_OVER_UNDER_FLOW:
+ ret = -ENOSPC;
+ break;
+ case RESPONSE_ERROR_I2C_W_NACK_ERR:
+ case RESPONSE_ERROR_ADDRESS_NACK:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ xfer->ret = ret;
+ complete(&xfer->comp);
+
+ if (ret < 0) {
+ dw_i3c_master_dequeue_xfer_locked(master, xfer);
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
+ master->regs + DEVICE_CTRL);
+ }
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct dw_i3c_xfer,
+ node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ dw_i3c_master_start_xfer_locked(master);
+}
+
+static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
+{
+ unsigned long core_rate, core_period;
+ u32 scl_timing;
+ u8 hcnt, lcnt;
+
+ core_rate = clk_get_rate(master->core_clk);
+ if (!core_rate)
+ return -EINVAL;
+
+ core_period = DIV_ROUND_UP(1000000000, core_rate);
+
+ hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1;
+ if (hcnt < SCL_I3C_TIMING_CNT_MIN)
+ hcnt = SCL_I3C_TIMING_CNT_MIN;
+
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_TYP_I3C_SCL_RATE) - hcnt;
+ if (lcnt < SCL_I3C_TIMING_CNT_MIN)
+ lcnt = SCL_I3C_TIMING_CNT_MIN;
+
+ scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I3C_PP_TIMING);
+
+ if (!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_I2C_SLAVE_PRESENT))
+ writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
+
+ lcnt = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period);
+ scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
+
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt;
+ scl_timing = SCL_EXT_LCNT_1(lcnt);
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR2_SCL_RATE) - hcnt;
+ scl_timing |= SCL_EXT_LCNT_2(lcnt);
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR3_SCL_RATE) - hcnt;
+ scl_timing |= SCL_EXT_LCNT_3(lcnt);
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR4_SCL_RATE) - hcnt;
+ scl_timing |= SCL_EXT_LCNT_4(lcnt);
+ writel(scl_timing, master->regs + SCL_EXT_LCNT_TIMING);
+
+ return 0;
+}
+
+static int dw_i2c_clk_cfg(struct dw_i3c_master *master)
+{
+ unsigned long core_rate, core_period;
+ u16 hcnt, lcnt;
+ u32 scl_timing;
+
+ core_rate = clk_get_rate(master->core_clk);
+ if (!core_rate)
+ return -EINVAL;
+
+ core_period = DIV_ROUND_UP(1000000000, core_rate);
+
+ lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period);
+ hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_RATE) - lcnt;
+ scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) |
+ SCL_I2C_FMP_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING);
+
+ lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period);
+ hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt;
+ scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) |
+ SCL_I2C_FM_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I2C_FM_TIMING);
+
+ writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT,
+ master->regs + DEVICE_CTRL);
+
+ return 0;
+}
+
+static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ struct i3c_device_info info = { };
+ u32 thld_ctrl;
+ int ret;
+
+ switch (bus->mode) {
+ case I3C_BUS_MODE_MIXED_FAST:
+ case I3C_BUS_MODE_MIXED_LIMITED:
+ ret = dw_i2c_clk_cfg(master);
+ if (ret)
+ return ret;
+ fallthrough;
+ case I3C_BUS_MODE_PURE:
+ ret = dw_i3c_clk_cfg(master);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
+ thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
+ writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
+
+ thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL);
+ thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF;
+ writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL);
+
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN);
+ writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN);
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(ret),
+ master->regs + DEVICE_ADDR);
+
+ memset(&info, 0, sizeof(info));
+ info.dyn_addr = ret;
+
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+
+ writel(IBI_REQ_REJECT_ALL, master->regs + IBI_SIR_REQ_REJECT);
+ writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT);
+
+ /* For now don't support Hot-Join */
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
+ master->regs + DEVICE_CTRL);
+
+ dw_i3c_master_enable(master);
+
+ return 0;
+}
+
+static void dw_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+
+ dw_i3c_master_disable(master);
+}
+
+static int dw_i3c_ccc_set(struct dw_i3c_master *master,
+ struct i3c_ccc_cmd *ccc)
+{
+ struct dw_i3c_xfer *xfer;
+ struct dw_i3c_cmd *cmd;
+ int ret, pos = 0;
+
+ if (ccc->id & I3C_CCC_DIRECT) {
+ pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
+ if (pos < 0)
+ return pos;
+ }
+
+ xfer = dw_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ cmd = xfer->cmds;
+ cmd->tx_buf = ccc->dests[0].payload.data;
+ cmd->tx_len = ccc->dests[0].payload.len;
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ cmd->cmd_lo = COMMAND_PORT_CP |
+ COMMAND_PORT_DEV_INDEX(pos) |
+ COMMAND_PORT_CMD(ccc->id) |
+ COMMAND_PORT_TOC |
+ COMMAND_PORT_ROC;
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
+ ccc->err = I3C_ERROR_M2;
+
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc)
+{
+ struct dw_i3c_xfer *xfer;
+ struct dw_i3c_cmd *cmd;
+ int ret, pos;
+
+ pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
+ if (pos < 0)
+ return pos;
+
+ xfer = dw_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ cmd = xfer->cmds;
+ cmd->rx_buf = ccc->dests[0].payload.data;
+ cmd->rx_len = ccc->dests[0].payload.len;
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
+ COMMAND_PORT_CP |
+ COMMAND_PORT_DEV_INDEX(pos) |
+ COMMAND_PORT_CMD(ccc->id) |
+ COMMAND_PORT_TOC |
+ COMMAND_PORT_ROC;
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
+ ccc->err = I3C_ERROR_M2;
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *ccc)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int ret = 0;
+
+ if (ccc->id == I3C_CCC_ENTDAA)
+ return -EINVAL;
+
+ if (ccc->rnw)
+ ret = dw_i3c_ccc_get(master, ccc);
+ else
+ ret = dw_i3c_ccc_set(master, ccc);
+
+ return ret;
+}
+
+static int dw_i3c_master_daa(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_xfer *xfer;
+ struct dw_i3c_cmd *cmd;
+ u32 olddevs, newdevs;
+ u8 p, last_addr = 0;
+ int ret, pos;
+
+ olddevs = ~(master->free_pos);
+
+ /* Prepare DAT before launching DAA. */
+ for (pos = 0; pos < master->maxdevs; pos++) {
+ if (olddevs & BIT(pos))
+ continue;
+
+ ret = i3c_master_get_free_addr(m, last_addr + 1);
+ if (ret < 0)
+ return -ENOSPC;
+
+ master->addrs[pos] = ret;
+ p = even_parity(ret);
+ last_addr = ret;
+ ret |= (p << 7);
+
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
+ }
+
+ xfer = dw_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ pos = dw_i3c_master_get_free_pos(master);
+ cmd = &xfer->cmds[0];
+ cmd->cmd_hi = 0x1;
+ cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) |
+ COMMAND_PORT_DEV_INDEX(pos) |
+ COMMAND_PORT_CMD(I3C_CCC_ENTDAA) |
+ COMMAND_PORT_ADDR_ASSGN_CMD |
+ COMMAND_PORT_TOC |
+ COMMAND_PORT_ROC;
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ newdevs = GENMASK(master->maxdevs - cmd->rx_len - 1, 0);
+ newdevs &= ~olddevs;
+
+ for (pos = 0; pos < master->maxdevs; pos++) {
+ if (newdevs & BIT(pos))
+ i3c_master_add_i3c_dev_locked(m, master->addrs[pos]);
+ }
+
+ dw_i3c_master_free_xfer(xfer);
+
+ i3c_master_disec_locked(m, I3C_BROADCAST_ADDR,
+ I3C_CCC_EVENT_HJ |
+ I3C_CCC_EVENT_MR |
+ I3C_CCC_EVENT_SIR);
+
+ return 0;
+}
+
+static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *i3c_xfers,
+ int i3c_nxfers)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ unsigned int nrxwords = 0, ntxwords = 0;
+ struct dw_i3c_xfer *xfer;
+ int i, ret = 0;
+
+ if (!i3c_nxfers)
+ return 0;
+
+ if (i3c_nxfers > master->caps.cmdfifodepth)
+ return -ENOTSUPP;
+
+ for (i = 0; i < i3c_nxfers; i++) {
+ if (i3c_xfers[i].rnw)
+ nrxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
+ else
+ ntxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
+ }
+
+ if (ntxwords > master->caps.datafifodepth ||
+ nrxwords > master->caps.datafifodepth)
+ return -ENOTSUPP;
+
+ xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < i3c_nxfers; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i3c_xfers[i].len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ if (i3c_xfers[i].rnw) {
+ cmd->rx_buf = i3c_xfers[i].data.in;
+ cmd->rx_len = i3c_xfers[i].len;
+ cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
+ COMMAND_PORT_SPEED(dev->info.max_read_ds);
+
+ } else {
+ cmd->tx_buf = i3c_xfers[i].data.out;
+ cmd->tx_len = i3c_xfers[i].len;
+ cmd->cmd_lo =
+ COMMAND_PORT_SPEED(dev->info.max_write_ds);
+ }
+
+ cmd->cmd_lo |= COMMAND_PORT_TID(i) |
+ COMMAND_PORT_DEV_INDEX(data->index) |
+ COMMAND_PORT_ROC;
+
+ if (i == (i3c_nxfers - 1))
+ cmd->cmd_lo |= COMMAND_PORT_TOC;
+ }
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int pos;
+
+ pos = dw_i3c_master_get_free_pos(master);
+
+ if (data->index > pos && pos > 0) {
+ writel(0,
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ master->addrs[data->index] = 0;
+ master->free_pos |= BIT(data->index);
+
+ data->index = pos;
+ master->addrs[pos] = dev->info.dyn_addr;
+ master->free_pos &= ~BIT(pos);
+ }
+
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ master->addrs[data->index] = dev->info.dyn_addr;
+
+ return 0;
+}
+
+static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_i2c_dev_data *data;
+ int pos;
+
+ pos = dw_i3c_master_get_free_pos(master);
+ if (pos < 0)
+ return pos;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->index = pos;
+ master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr;
+ master->free_pos &= ~BIT(pos);
+ i3c_dev_set_master_data(dev, data);
+
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ return 0;
+}
+
+static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+
+ writel(0,
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ i3c_dev_set_master_data(dev, NULL);
+ master->addrs[data->index] = 0;
+ master->free_pos |= BIT(data->index);
+ kfree(data);
+}
+
+static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *i2c_xfers,
+ int i2c_nxfers)
+{
+ struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ unsigned int nrxwords = 0, ntxwords = 0;
+ struct dw_i3c_xfer *xfer;
+ int i, ret = 0;
+
+ if (!i2c_nxfers)
+ return 0;
+
+ if (i2c_nxfers > master->caps.cmdfifodepth)
+ return -ENOTSUPP;
+
+ for (i = 0; i < i2c_nxfers; i++) {
+ if (i2c_xfers[i].flags & I2C_M_RD)
+ nrxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
+ else
+ ntxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
+ }
+
+ if (ntxwords > master->caps.datafifodepth ||
+ nrxwords > master->caps.datafifodepth)
+ return -ENOTSUPP;
+
+ xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < i2c_nxfers; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i2c_xfers[i].len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ cmd->cmd_lo = COMMAND_PORT_TID(i) |
+ COMMAND_PORT_DEV_INDEX(data->index) |
+ COMMAND_PORT_ROC;
+
+ if (i2c_xfers[i].flags & I2C_M_RD) {
+ cmd->cmd_lo |= COMMAND_PORT_READ_TRANSFER;
+ cmd->rx_buf = i2c_xfers[i].buf;
+ cmd->rx_len = i2c_xfers[i].len;
+ } else {
+ cmd->tx_buf = i2c_xfers[i].buf;
+ cmd->tx_len = i2c_xfers[i].len;
+ }
+
+ if (i == (i2c_nxfers - 1))
+ cmd->cmd_lo |= COMMAND_PORT_TOC;
+ }
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_i2c_dev_data *data;
+ int pos;
+
+ pos = dw_i3c_master_get_free_pos(master);
+ if (pos < 0)
+ return pos;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->index = pos;
+ master->addrs[pos] = dev->addr;
+ master->free_pos &= ~BIT(pos);
+ i2c_dev_set_master_data(dev, data);
+
+ writel(DEV_ADDR_TABLE_LEGACY_I2C_DEV |
+ DEV_ADDR_TABLE_STATIC_ADDR(dev->addr),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ return 0;
+}
+
+static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+
+ writel(0,
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ i2c_dev_set_master_data(dev, NULL);
+ master->addrs[data->index] = 0;
+ master->free_pos |= BIT(data->index);
+ kfree(data);
+}
+
+static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id)
+{
+ struct dw_i3c_master *master = dev_id;
+ u32 status;
+
+ status = readl(master->regs + INTR_STATUS);
+
+ if (!(status & readl(master->regs + INTR_STATUS_EN))) {
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ return IRQ_NONE;
+ }
+
+ spin_lock(&master->xferqueue.lock);
+ dw_i3c_master_end_xfer_locked(master, status);
+ if (status & INTR_TRANSFER_ERR_STAT)
+ writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS);
+ spin_unlock(&master->xferqueue.lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
+ .bus_init = dw_i3c_master_bus_init,
+ .bus_cleanup = dw_i3c_master_bus_cleanup,
+ .attach_i3c_dev = dw_i3c_master_attach_i3c_dev,
+ .reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev,
+ .detach_i3c_dev = dw_i3c_master_detach_i3c_dev,
+ .do_daa = dw_i3c_master_daa,
+ .supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = dw_i3c_master_send_ccc_cmd,
+ .priv_xfers = dw_i3c_master_priv_xfers,
+ .attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
+ .i2c_xfers = dw_i3c_master_i2c_xfers,
+};
+
+static int dw_i3c_probe(struct platform_device *pdev)
+{
+ struct dw_i3c_master *master;
+ int ret, irq;
+
+ master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ master->core_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(master->core_clk))
+ return PTR_ERR(master->core_clk);
+
+ master->core_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "core_rst");
+ if (IS_ERR(master->core_rst))
+ return PTR_ERR(master->core_rst);
+
+ ret = clk_prepare_enable(master->core_clk);
+ if (ret)
+ goto err_disable_core_clk;
+
+ reset_control_deassert(master->core_rst);
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq,
+ dw_i3c_master_irq_handler, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ goto err_assert_rst;
+
+ platform_set_drvdata(pdev, master);
+
+ /* Information regarding the FIFOs/QUEUEs depth */
+ ret = readl(master->regs + QUEUE_STATUS_LEVEL);
+ master->caps.cmdfifodepth = QUEUE_STATUS_LEVEL_CMD(ret);
+
+ ret = readl(master->regs + DATA_BUFFER_STATUS_LEVEL);
+ master->caps.datafifodepth = DATA_BUFFER_STATUS_LEVEL_TX(ret);
+
+ ret = readl(master->regs + DEVICE_ADDR_TABLE_POINTER);
+ master->datstartaddr = ret;
+ master->maxdevs = ret >> 16;
+ master->free_pos = GENMASK(master->maxdevs - 1, 0);
+
+ ret = i3c_master_register(&master->base, &pdev->dev,
+ &dw_mipi_i3c_ops, false);
+ if (ret)
+ goto err_assert_rst;
+
+ return 0;
+
+err_assert_rst:
+ reset_control_assert(master->core_rst);
+
+err_disable_core_clk:
+ clk_disable_unprepare(master->core_clk);
+
+ return ret;
+}
+
+static int dw_i3c_remove(struct platform_device *pdev)
+{
+ struct dw_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i3c_master_unregister(&master->base);
+ if (ret)
+ return ret;
+
+ reset_control_assert(master->core_rst);
+
+ clk_disable_unprepare(master->core_clk);
+
+ return 0;
+}
+
+static const struct of_device_id dw_i3c_master_of_match[] = {
+ { .compatible = "snps,dw-i3c-master-1.00a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match);
+
+static struct platform_driver dw_i3c_driver = {
+ .probe = dw_i3c_probe,
+ .remove = dw_i3c_remove,
+ .driver = {
+ .name = "dw-i3c-master",
+ .of_match_table = of_match_ptr(dw_i3c_master_of_match),
+ },
+};
+module_platform_driver(dw_i3c_driver);
+
+MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
+MODULE_DESCRIPTION("DesignWare MIPI I3C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
new file mode 100644
index 000000000..6b9df33ac
--- /dev/null
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -0,0 +1,1691 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/of_device.h>
+
+#define DEV_ID 0x0
+#define DEV_ID_I3C_MASTER 0x5034
+
+#define CONF_STATUS0 0x4
+#define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29))
+#define CONF_STATUS0_ECC_CHK BIT(28)
+#define CONF_STATUS0_INTEG_CHK BIT(27)
+#define CONF_STATUS0_CSR_DAP_CHK BIT(26)
+#define CONF_STATUS0_TRANS_TOUT_CHK BIT(25)
+#define CONF_STATUS0_PROT_FAULTS_CHK BIT(24)
+#define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16)
+#define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8)
+#define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7))
+#define CONF_STATUS0_SUPPORTS_DDR BIT(5)
+#define CONF_STATUS0_SEC_MASTER BIT(4)
+#define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0))
+
+#define CONF_STATUS1 0x8
+#define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1)
+#define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26))
+#define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
+#define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
+#define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10))
+#define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5))
+#define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0)))
+
+#define REV_ID 0xc
+#define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20)
+#define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8)
+#define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4)
+#define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0))
+
+#define CTRL 0x10
+#define CTRL_DEV_EN BIT(31)
+#define CTRL_HALT_EN BIT(30)
+#define CTRL_MCS BIT(29)
+#define CTRL_MCS_EN BIT(28)
+#define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
+#define CTRL_HJ_DISEC BIT(8)
+#define CTRL_MST_ACK BIT(7)
+#define CTRL_HJ_ACK BIT(6)
+#define CTRL_HJ_INIT BIT(5)
+#define CTRL_MST_INIT BIT(4)
+#define CTRL_AHDR_OPT BIT(3)
+#define CTRL_PURE_BUS_MODE 0
+#define CTRL_MIXED_FAST_BUS_MODE 2
+#define CTRL_MIXED_SLOW_BUS_MODE 3
+#define CTRL_BUS_MODE_MASK GENMASK(1, 0)
+#define THD_DELAY_MAX 3
+
+#define PRESCL_CTRL0 0x14
+#define PRESCL_CTRL0_I2C(x) ((x) << 16)
+#define PRESCL_CTRL0_I3C(x) (x)
+#define PRESCL_CTRL0_MAX GENMASK(9, 0)
+
+#define PRESCL_CTRL1 0x18
+#define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
+#define PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
+#define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
+#define PRESCL_CTRL1_OD_LOW(x) (x)
+
+#define MST_IER 0x20
+#define MST_IDR 0x24
+#define MST_IMR 0x28
+#define MST_ICR 0x2c
+#define MST_ISR 0x30
+#define MST_INT_HALTED BIT(18)
+#define MST_INT_MR_DONE BIT(17)
+#define MST_INT_IMM_COMP BIT(16)
+#define MST_INT_TX_THR BIT(15)
+#define MST_INT_TX_OVF BIT(14)
+#define MST_INT_IBID_THR BIT(12)
+#define MST_INT_IBID_UNF BIT(11)
+#define MST_INT_IBIR_THR BIT(10)
+#define MST_INT_IBIR_UNF BIT(9)
+#define MST_INT_IBIR_OVF BIT(8)
+#define MST_INT_RX_THR BIT(7)
+#define MST_INT_RX_UNF BIT(6)
+#define MST_INT_CMDD_EMP BIT(5)
+#define MST_INT_CMDD_THR BIT(4)
+#define MST_INT_CMDD_OVF BIT(3)
+#define MST_INT_CMDR_THR BIT(2)
+#define MST_INT_CMDR_UNF BIT(1)
+#define MST_INT_CMDR_OVF BIT(0)
+
+#define MST_STATUS0 0x34
+#define MST_STATUS0_IDLE BIT(18)
+#define MST_STATUS0_HALTED BIT(17)
+#define MST_STATUS0_MASTER_MODE BIT(16)
+#define MST_STATUS0_TX_FULL BIT(13)
+#define MST_STATUS0_IBID_FULL BIT(12)
+#define MST_STATUS0_IBIR_FULL BIT(11)
+#define MST_STATUS0_RX_FULL BIT(10)
+#define MST_STATUS0_CMDD_FULL BIT(9)
+#define MST_STATUS0_CMDR_FULL BIT(8)
+#define MST_STATUS0_TX_EMP BIT(5)
+#define MST_STATUS0_IBID_EMP BIT(4)
+#define MST_STATUS0_IBIR_EMP BIT(3)
+#define MST_STATUS0_RX_EMP BIT(2)
+#define MST_STATUS0_CMDD_EMP BIT(1)
+#define MST_STATUS0_CMDR_EMP BIT(0)
+
+#define CMDR 0x38
+#define CMDR_NO_ERROR 0
+#define CMDR_DDR_PREAMBLE_ERROR 1
+#define CMDR_DDR_PARITY_ERROR 2
+#define CMDR_DDR_RX_FIFO_OVF 3
+#define CMDR_DDR_TX_FIFO_UNF 4
+#define CMDR_M0_ERROR 5
+#define CMDR_M1_ERROR 6
+#define CMDR_M2_ERROR 7
+#define CMDR_MST_ABORT 8
+#define CMDR_NACK_RESP 9
+#define CMDR_INVALID_DA 10
+#define CMDR_DDR_DROPPED 11
+#define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24)
+#define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8)
+#define CMDR_CMDID_HJACK_DISEC 0xfe
+#define CMDR_CMDID_HJACK_ENTDAA 0xff
+#define CMDR_CMDID(x) ((x) & GENMASK(7, 0))
+
+#define IBIR 0x3c
+#define IBIR_ACKED BIT(12)
+#define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8)
+#define IBIR_ERROR BIT(7)
+#define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
+#define IBIR_TYPE_IBI 0
+#define IBIR_TYPE_HJ 1
+#define IBIR_TYPE_MR 2
+#define IBIR_TYPE(x) ((x) & GENMASK(1, 0))
+
+#define SLV_IER 0x40
+#define SLV_IDR 0x44
+#define SLV_IMR 0x48
+#define SLV_ICR 0x4c
+#define SLV_ISR 0x50
+#define SLV_INT_TM BIT(20)
+#define SLV_INT_ERROR BIT(19)
+#define SLV_INT_EVENT_UP BIT(18)
+#define SLV_INT_HJ_DONE BIT(17)
+#define SLV_INT_MR_DONE BIT(16)
+#define SLV_INT_DA_UPD BIT(15)
+#define SLV_INT_SDR_FAIL BIT(14)
+#define SLV_INT_DDR_FAIL BIT(13)
+#define SLV_INT_M_RD_ABORT BIT(12)
+#define SLV_INT_DDR_RX_THR BIT(11)
+#define SLV_INT_DDR_TX_THR BIT(10)
+#define SLV_INT_SDR_RX_THR BIT(9)
+#define SLV_INT_SDR_TX_THR BIT(8)
+#define SLV_INT_DDR_RX_UNF BIT(7)
+#define SLV_INT_DDR_TX_OVF BIT(6)
+#define SLV_INT_SDR_RX_UNF BIT(5)
+#define SLV_INT_SDR_TX_OVF BIT(4)
+#define SLV_INT_DDR_RD_COMP BIT(3)
+#define SLV_INT_DDR_WR_COMP BIT(2)
+#define SLV_INT_SDR_RD_COMP BIT(1)
+#define SLV_INT_SDR_WR_COMP BIT(0)
+
+#define SLV_STATUS0 0x54
+#define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16)
+#define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0))
+
+#define SLV_STATUS1 0x58
+#define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20)
+#define SLV_STATUS1_VEN_TM BIT(19)
+#define SLV_STATUS1_HJ_DIS BIT(18)
+#define SLV_STATUS1_MR_DIS BIT(17)
+#define SLV_STATUS1_PROT_ERR BIT(16)
+#define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
+#define SLV_STATUS1_HAS_DA BIT(8)
+#define SLV_STATUS1_DDR_RX_FULL BIT(7)
+#define SLV_STATUS1_DDR_TX_FULL BIT(6)
+#define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
+#define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
+#define SLV_STATUS1_SDR_RX_FULL BIT(3)
+#define SLV_STATUS1_SDR_TX_FULL BIT(2)
+#define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
+#define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
+
+#define CMD0_FIFO 0x60
+#define CMD0_FIFO_IS_DDR BIT(31)
+#define CMD0_FIFO_IS_CCC BIT(30)
+#define CMD0_FIFO_BCH BIT(29)
+#define XMIT_BURST_STATIC_SUBADDR 0
+#define XMIT_SINGLE_INC_SUBADDR 1
+#define XMIT_SINGLE_STATIC_SUBADDR 2
+#define XMIT_BURST_WITHOUT_SUBADDR 3
+#define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
+#define CMD0_FIFO_SBCA BIT(26)
+#define CMD0_FIFO_RSBC BIT(25)
+#define CMD0_FIFO_IS_10B BIT(24)
+#define CMD0_FIFO_PL_LEN(l) ((l) << 12)
+#define CMD0_FIFO_PL_LEN_MAX 4095
+#define CMD0_FIFO_DEV_ADDR(a) ((a) << 1)
+#define CMD0_FIFO_RNW BIT(0)
+
+#define CMD1_FIFO 0x64
+#define CMD1_FIFO_CMDID(id) ((id) << 24)
+#define CMD1_FIFO_CSRADDR(a) (a)
+#define CMD1_FIFO_CCC(id) (id)
+
+#define TX_FIFO 0x68
+
+#define IMD_CMD0 0x70
+#define IMD_CMD0_PL_LEN(l) ((l) << 12)
+#define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
+#define IMD_CMD0_RNW BIT(0)
+
+#define IMD_CMD1 0x74
+#define IMD_CMD1_CCC(id) (id)
+
+#define IMD_DATA 0x78
+#define RX_FIFO 0x80
+#define IBI_DATA_FIFO 0x84
+#define SLV_DDR_TX_FIFO 0x88
+#define SLV_DDR_RX_FIFO 0x8c
+
+#define CMD_IBI_THR_CTRL 0x90
+#define IBIR_THR(t) ((t) << 24)
+#define CMDR_THR(t) ((t) << 16)
+#define IBI_THR(t) ((t) << 8)
+#define CMD_THR(t) (t)
+
+#define TX_RX_THR_CTRL 0x94
+#define RX_THR(t) ((t) << 16)
+#define TX_THR(t) (t)
+
+#define SLV_DDR_TX_RX_THR_CTRL 0x98
+#define SLV_DDR_RX_THR(t) ((t) << 16)
+#define SLV_DDR_TX_THR(t) (t)
+
+#define FLUSH_CTRL 0x9c
+#define FLUSH_IBI_RESP BIT(23)
+#define FLUSH_CMD_RESP BIT(22)
+#define FLUSH_SLV_DDR_RX_FIFO BIT(22)
+#define FLUSH_SLV_DDR_TX_FIFO BIT(21)
+#define FLUSH_IMM_FIFO BIT(20)
+#define FLUSH_IBI_FIFO BIT(19)
+#define FLUSH_RX_FIFO BIT(18)
+#define FLUSH_TX_FIFO BIT(17)
+#define FLUSH_CMD_FIFO BIT(16)
+
+#define TTO_PRESCL_CTRL0 0xb0
+#define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16)
+#define TTO_PRESCL_CTRL0_DIVA(x) (x)
+
+#define TTO_PRESCL_CTRL1 0xb4
+#define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16)
+#define TTO_PRESCL_CTRL1_DIVA(x) (x)
+
+#define DEVS_CTRL 0xb8
+#define DEVS_CTRL_DEV_CLR_SHIFT 16
+#define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16)
+#define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev))
+#define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev)
+#define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
+#define MAX_DEVS 16
+
+#define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10))
+#define DEV_ID_RR0_LVR_EXT_ADDR BIT(11)
+#define DEV_ID_RR0_HDR_CAP BIT(10)
+#define DEV_ID_RR0_IS_I3C BIT(9)
+#define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13))
+#define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \
+ (((a) & GENMASK(9, 7)) << 6))
+#define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \
+ (((x) >> 6) & GENMASK(9, 7)))
+
+#define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10))
+#define DEV_ID_RR1_PID_MSB(pid) (pid)
+
+#define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10))
+#define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
+#define DEV_ID_RR2_BCR(bcr) ((bcr) << 8)
+#define DEV_ID_RR2_DCR(dcr) (dcr)
+#define DEV_ID_RR2_LVR(lvr) (lvr)
+
+#define SIR_MAP(x) (0x180 + ((x) * 4))
+#define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2)
+#define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
+#define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
+#define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0))
+#define DEV_ROLE_SLAVE 0
+#define DEV_ROLE_MASTER 1
+#define SIR_MAP_DEV_ROLE(role) ((role) << 14)
+#define SIR_MAP_DEV_SLOW BIT(13)
+#define SIR_MAP_DEV_PL(l) ((l) << 8)
+#define SIR_MAP_PL_MAX GENMASK(4, 0)
+#define SIR_MAP_DEV_DA(a) ((a) << 1)
+#define SIR_MAP_DEV_ACK BIT(0)
+
+#define GPIR_WORD(x) (0x200 + ((x) * 4))
+#define GPI_REG(val, id) \
+ (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
+
+#define GPOR_WORD(x) (0x220 + ((x) * 4))
+#define GPO_REG(val, id) \
+ (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
+
+#define ASF_INT_STATUS 0x300
+#define ASF_INT_RAW_STATUS 0x304
+#define ASF_INT_MASK 0x308
+#define ASF_INT_TEST 0x30c
+#define ASF_INT_FATAL_SELECT 0x310
+#define ASF_INTEGRITY_ERR BIT(6)
+#define ASF_PROTOCOL_ERR BIT(5)
+#define ASF_TRANS_TIMEOUT_ERR BIT(4)
+#define ASF_CSR_ERR BIT(3)
+#define ASF_DAP_ERR BIT(2)
+#define ASF_SRAM_UNCORR_ERR BIT(1)
+#define ASF_SRAM_CORR_ERR BIT(0)
+
+#define ASF_SRAM_CORR_FAULT_STATUS 0x320
+#define ASF_SRAM_UNCORR_FAULT_STATUS 0x324
+#define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
+#define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0))
+
+#define ASF_SRAM_FAULT_STATS 0x328
+#define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
+#define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0))
+
+#define ASF_TRANS_TOUT_CTRL 0x330
+#define ASF_TRANS_TOUT_EN BIT(31)
+#define ASF_TRANS_TOUT_VAL(x) (x)
+
+#define ASF_TRANS_TOUT_FAULT_MASK 0x334
+#define ASF_TRANS_TOUT_FAULT_STATUS 0x338
+#define ASF_TRANS_TOUT_FAULT_APB BIT(3)
+#define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2)
+#define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1)
+#define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
+
+#define ASF_PROTO_FAULT_MASK 0x340
+#define ASF_PROTO_FAULT_STATUS 0x344
+#define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
+#define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30)
+#define ASF_PROTO_FAULT_S(x) BIT(16 + (x))
+#define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
+#define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14)
+#define ASF_PROTO_FAULT_M(x) BIT(x)
+
+struct cdns_i3c_master_caps {
+ u32 cmdfifodepth;
+ u32 cmdrfifodepth;
+ u32 txfifodepth;
+ u32 rxfifodepth;
+ u32 ibirfifodepth;
+};
+
+struct cdns_i3c_cmd {
+ u32 cmd0;
+ u32 cmd1;
+ u32 tx_len;
+ const void *tx_buf;
+ u32 rx_len;
+ void *rx_buf;
+ u32 error;
+};
+
+struct cdns_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int ncmds;
+ struct cdns_i3c_cmd cmds[];
+};
+
+struct cdns_i3c_data {
+ u8 thd_delay_ns;
+};
+
+struct cdns_i3c_master {
+ struct work_struct hj_work;
+ struct i3c_master_controller base;
+ u32 free_rr_slots;
+ unsigned int maxdevs;
+ struct {
+ unsigned int num_slots;
+ struct i3c_dev_desc **slots;
+ spinlock_t lock;
+ } ibi;
+ struct {
+ struct list_head list;
+ struct cdns_i3c_xfer *cur;
+ spinlock_t lock;
+ } xferqueue;
+ void __iomem *regs;
+ struct clk *sysclk;
+ struct clk *pclk;
+ struct cdns_i3c_master_caps caps;
+ unsigned long i3c_scl_lim;
+ const struct cdns_i3c_data *devdata;
+};
+
+static inline struct cdns_i3c_master *
+to_cdns_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct cdns_i3c_master, base);
+}
+
+static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
+ const u8 *bytes, int nbytes)
+{
+ writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp = 0;
+
+ memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
+ writesl(master->regs + TX_FIFO, &tmp, 1);
+ }
+}
+
+static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
+ u8 *bytes, int nbytes)
+{
+ readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp;
+
+ readsl(master->regs + RX_FIFO, &tmp, 1);
+ memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
+ }
+}
+
+static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_ENTAS(0, true):
+ case I3C_CCC_ENTAS(0, false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_SETMWL(true):
+ case I3C_CCC_SETMWL(false):
+ case I3C_CCC_SETMRL(true):
+ case I3C_CCC_SETMRL(false):
+ case I3C_CCC_DEFSLVS:
+ case I3C_CCC_ENTHDR(0):
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETACCMST:
+ case I3C_CCC_GETMXDS:
+ case I3C_CCC_GETHDRCAP:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int cdns_i3c_master_disable(struct cdns_i3c_master *master)
+{
+ u32 status;
+
+ writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL);
+
+ return readl_poll_timeout(master->regs + MST_STATUS0, status,
+ status & MST_STATUS0_IDLE, 10, 1000000);
+}
+
+static void cdns_i3c_master_enable(struct cdns_i3c_master *master)
+{
+ writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL);
+}
+
+static struct cdns_i3c_xfer *
+cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
+{
+ struct cdns_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer)
+{
+ kfree(xfer);
+}
+
+static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master)
+{
+ struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
+ unsigned int i;
+
+ if (!xfer)
+ return;
+
+ writel(MST_INT_CMDD_EMP, master->regs + MST_ICR);
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf,
+ cmd->tx_len);
+ }
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
+
+ writel(cmd->cmd1 | CMD1_FIFO_CMDID(i),
+ master->regs + CMD1_FIFO);
+ writel(cmd->cmd0, master->regs + CMD0_FIFO);
+ }
+
+ writel(readl(master->regs + CTRL) | CTRL_MCS,
+ master->regs + CTRL);
+ writel(MST_INT_CMDD_EMP, master->regs + MST_IER);
+}
+
+static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master,
+ u32 isr)
+{
+ struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
+ int i, ret = 0;
+ u32 status0;
+
+ if (!xfer)
+ return;
+
+ if (!(isr & MST_INT_CMDD_EMP))
+ return;
+
+ writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
+
+ for (status0 = readl(master->regs + MST_STATUS0);
+ !(status0 & MST_STATUS0_CMDR_EMP);
+ status0 = readl(master->regs + MST_STATUS0)) {
+ struct cdns_i3c_cmd *cmd;
+ u32 cmdr, rx_len, id;
+
+ cmdr = readl(master->regs + CMDR);
+ id = CMDR_CMDID(cmdr);
+ if (id == CMDR_CMDID_HJACK_DISEC ||
+ id == CMDR_CMDID_HJACK_ENTDAA ||
+ WARN_ON(id >= xfer->ncmds))
+ continue;
+
+ cmd = &xfer->cmds[CMDR_CMDID(cmdr)];
+ rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len);
+ cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
+ cmd->error = CMDR_ERROR(cmdr);
+ }
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ switch (xfer->cmds[i].error) {
+ case CMDR_NO_ERROR:
+ break;
+
+ case CMDR_DDR_PREAMBLE_ERROR:
+ case CMDR_DDR_PARITY_ERROR:
+ case CMDR_M0_ERROR:
+ case CMDR_M1_ERROR:
+ case CMDR_M2_ERROR:
+ case CMDR_MST_ABORT:
+ case CMDR_NACK_RESP:
+ case CMDR_DDR_DROPPED:
+ ret = -EIO;
+ break;
+
+ case CMDR_DDR_RX_FIFO_OVF:
+ case CMDR_DDR_TX_FIFO_UNF:
+ ret = -ENOSPC;
+ break;
+
+ case CMDR_INVALID_DA:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ xfer->ret = ret;
+ complete(&xfer->comp);
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct cdns_i3c_xfer, node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ cdns_i3c_master_start_xfer_locked(master);
+}
+
+static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master,
+ struct cdns_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ init_completion(&xfer->comp);
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ cdns_i3c_master_start_xfer_locked(master);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master,
+ struct cdns_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur == xfer) {
+ u32 status;
+
+ writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN,
+ master->regs + CTRL);
+ readl_poll_timeout_atomic(master->regs + MST_STATUS0, status,
+ status & MST_STATUS0_IDLE, 10,
+ 1000000);
+ master->xferqueue.cur = NULL;
+ writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO |
+ FLUSH_CMD_RESP,
+ master->regs + FLUSH_CTRL);
+ writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
+ writel(readl(master->regs + CTRL) | CTRL_DEV_EN,
+ master->regs + CTRL);
+ } else {
+ list_del_init(&xfer->node);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd)
+{
+ switch (cmd->error) {
+ case CMDR_M0_ERROR:
+ return I3C_ERROR_M0;
+
+ case CMDR_M1_ERROR:
+ return I3C_ERROR_M1;
+
+ case CMDR_M2_ERROR:
+ case CMDR_NACK_RESP:
+ return I3C_ERROR_M2;
+
+ default:
+ break;
+ }
+
+ return I3C_ERROR_UNKNOWN;
+}
+
+static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *cmd)
+{
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_xfer *xfer;
+ struct cdns_i3c_cmd *ccmd;
+ int ret;
+
+ xfer = cdns_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ ccmd = xfer->cmds;
+ ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id);
+ ccmd->cmd0 = CMD0_FIFO_IS_CCC |
+ CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len);
+
+ if (cmd->id & I3C_CCC_DIRECT)
+ ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr);
+
+ if (cmd->rnw) {
+ ccmd->cmd0 |= CMD0_FIFO_RNW;
+ ccmd->rx_buf = cmd->dests[0].payload.data;
+ ccmd->rx_len = cmd->dests[0].payload.len;
+ } else {
+ ccmd->tx_buf = cmd->dests[0].payload.data;
+ ccmd->tx_len = cmd->dests[0].payload.len;
+ }
+
+ cdns_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ cdns_i3c_master_unqueue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]);
+ cdns_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ int txslots = 0, rxslots = 0, i, ret;
+ struct cdns_i3c_xfer *cdns_xfer;
+
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
+ return -ENOTSUPP;
+ }
+
+ if (!nxfers)
+ return 0;
+
+ if (nxfers > master->caps.cmdfifodepth ||
+ nxfers > master->caps.cmdrfifodepth)
+ return -ENOTSUPP;
+
+ /*
+ * First make sure that all transactions (block of transfers separated
+ * by a STOP marker) fit in the FIFOs.
+ */
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].rnw)
+ rxslots += DIV_ROUND_UP(xfers[i].len, 4);
+ else
+ txslots += DIV_ROUND_UP(xfers[i].len, 4);
+ }
+
+ if (rxslots > master->caps.rxfifodepth ||
+ txslots > master->caps.txfifodepth)
+ return -ENOTSUPP;
+
+ cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
+ if (!cdns_xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i];
+ u32 pl_len = xfers[i].len;
+
+ ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) |
+ CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
+
+ if (xfers[i].rnw) {
+ ccmd->cmd0 |= CMD0_FIFO_RNW;
+ ccmd->rx_buf = xfers[i].data.in;
+ ccmd->rx_len = xfers[i].len;
+ pl_len++;
+ } else {
+ ccmd->tx_buf = xfers[i].data.out;
+ ccmd->tx_len = xfers[i].len;
+ }
+
+ ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len);
+
+ if (i < nxfers - 1)
+ ccmd->cmd0 |= CMD0_FIFO_RSBC;
+
+ if (!i)
+ ccmd->cmd0 |= CMD0_FIFO_BCH;
+ }
+
+ cdns_i3c_master_queue_xfer(master, cdns_xfer);
+ if (!wait_for_completion_timeout(&cdns_xfer->comp,
+ msecs_to_jiffies(1000)))
+ cdns_i3c_master_unqueue_xfer(master, cdns_xfer);
+
+ ret = cdns_xfer->ret;
+
+ for (i = 0; i < nxfers; i++)
+ xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]);
+
+ cdns_i3c_master_free_xfer(cdns_xfer);
+
+ return ret;
+}
+
+static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *xfers, int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ unsigned int nrxwords = 0, ntxwords = 0;
+ struct cdns_i3c_xfer *xfer;
+ int i, ret = 0;
+
+ if (nxfers > master->caps.cmdfifodepth)
+ return -ENOTSUPP;
+
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
+ return -ENOTSUPP;
+
+ if (xfers[i].flags & I2C_M_RD)
+ nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
+ else
+ ntxwords += DIV_ROUND_UP(xfers[i].len, 4);
+ }
+
+ if (ntxwords > master->caps.txfifodepth ||
+ nrxwords > master->caps.rxfifodepth)
+ return -ENOTSUPP;
+
+ xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct cdns_i3c_cmd *ccmd = &xfer->cmds[i];
+
+ ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) |
+ CMD0_FIFO_PL_LEN(xfers[i].len) |
+ CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
+
+ if (xfers[i].flags & I2C_M_TEN)
+ ccmd->cmd0 |= CMD0_FIFO_IS_10B;
+
+ if (xfers[i].flags & I2C_M_RD) {
+ ccmd->cmd0 |= CMD0_FIFO_RNW;
+ ccmd->rx_buf = xfers[i].buf;
+ ccmd->rx_len = xfers[i].len;
+ } else {
+ ccmd->tx_buf = xfers[i].buf;
+ ccmd->tx_len = xfers[i].len;
+ }
+ }
+
+ cdns_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ cdns_i3c_master_unqueue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ cdns_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+struct cdns_i3c_i2c_dev_data {
+ u16 id;
+ s16 ibi;
+ struct i3c_generic_ibi_pool *ibi_pool;
+};
+
+static u32 prepare_rr0_dev_address(u32 addr)
+{
+ u32 ret = (addr << 1) & 0xff;
+
+ /* RR0[7:1] = addr[6:0] */
+ ret |= (addr & GENMASK(6, 0)) << 1;
+
+ /* RR0[15:13] = addr[9:7] */
+ ret |= (addr & GENMASK(9, 7)) << 6;
+
+ /* RR0[0] = ~XOR(addr[6:0]) */
+ if (!(hweight8(addr & 0x7f) & 1))
+ ret |= 1;
+
+ return ret;
+}
+
+static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ u32 rr;
+
+ rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
+ dev->info.dyn_addr :
+ dev->info.static_addr);
+ writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
+}
+
+static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
+ u8 dyn_addr)
+{
+ unsigned long activedevs;
+ u32 rr;
+ int i;
+
+ if (!dyn_addr) {
+ if (!master->free_rr_slots)
+ return -ENOSPC;
+
+ return ffs(master->free_rr_slots) - 1;
+ }
+
+ activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
+ activedevs &= ~BIT(0);
+
+ for_each_set_bit(i, &activedevs, master->maxdevs + 1) {
+ rr = readl(master->regs + DEV_ID_RR0(i));
+ if (!(rr & DEV_ID_RR0_IS_I3C) ||
+ DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
+ continue;
+
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ cdns_i3c_master_upd_i3c_addr(dev);
+
+ return 0;
+}
+
+static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data;
+ int slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
+ if (slot < 0) {
+ kfree(data);
+ return slot;
+ }
+
+ data->ibi = -1;
+ data->id = slot;
+ i3c_dev_set_master_data(dev, data);
+ master->free_rr_slots &= ~BIT(slot);
+
+ if (!dev->info.dyn_addr) {
+ cdns_i3c_master_upd_i3c_addr(dev);
+ writel(readl(master->regs + DEVS_CTRL) |
+ DEVS_CTRL_DEV_ACTIVE(data->id),
+ master->regs + DEVS_CTRL);
+ }
+
+ return 0;
+}
+
+static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ writel(readl(master->regs + DEVS_CTRL) |
+ DEVS_CTRL_DEV_CLR(data->id),
+ master->regs + DEVS_CTRL);
+
+ i3c_dev_set_master_data(dev, NULL);
+ master->free_rr_slots |= BIT(data->id);
+ kfree(data);
+}
+
+static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = cdns_i3c_master_get_rr_slot(master, 0);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = slot;
+ master->free_rr_slots &= ~BIT(slot);
+ i2c_dev_set_master_data(dev, data);
+
+ writel(prepare_rr0_dev_address(dev->addr),
+ master->regs + DEV_ID_RR0(data->id));
+ writel(dev->lvr, master->regs + DEV_ID_RR2(data->id));
+ writel(readl(master->regs + DEVS_CTRL) |
+ DEVS_CTRL_DEV_ACTIVE(data->id),
+ master->regs + DEVS_CTRL);
+
+ return 0;
+}
+
+static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+
+ writel(readl(master->regs + DEVS_CTRL) |
+ DEVS_CTRL_DEV_CLR(data->id),
+ master->regs + DEVS_CTRL);
+ master->free_rr_slots |= BIT(data->id);
+
+ i2c_dev_set_master_data(dev, NULL);
+ kfree(data);
+}
+
+static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+
+ cdns_i3c_master_disable(master);
+}
+
+static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master,
+ unsigned int slot,
+ struct i3c_device_info *info)
+{
+ u32 rr;
+
+ memset(info, 0, sizeof(*info));
+ rr = readl(master->regs + DEV_ID_RR0(slot));
+ info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
+ rr = readl(master->regs + DEV_ID_RR2(slot));
+ info->dcr = rr;
+ info->bcr = rr >> 8;
+ info->pid = rr >> 16;
+ info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16;
+}
+
+static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
+{
+ struct i3c_master_controller *m = &master->base;
+ unsigned long i3c_lim_period, pres_step, ncycles;
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ unsigned long new_i3c_scl_lim = 0;
+ struct i3c_dev_desc *dev;
+ u32 prescl1, ctrl;
+
+ i3c_bus_for_each_i3cdev(bus, dev) {
+ unsigned long max_fscl;
+
+ max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
+ I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
+ switch (max_fscl) {
+ case I3C_SDR1_FSCL_8MHZ:
+ max_fscl = 8000000;
+ break;
+ case I3C_SDR2_FSCL_6MHZ:
+ max_fscl = 6000000;
+ break;
+ case I3C_SDR3_FSCL_4MHZ:
+ max_fscl = 4000000;
+ break;
+ case I3C_SDR4_FSCL_2MHZ:
+ max_fscl = 2000000;
+ break;
+ case I3C_SDR0_FSCL_MAX:
+ default:
+ max_fscl = 0;
+ break;
+ }
+
+ if (max_fscl &&
+ (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim))
+ new_i3c_scl_lim = max_fscl;
+ }
+
+ /* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */
+ if (new_i3c_scl_lim == master->i3c_scl_lim)
+ return;
+ master->i3c_scl_lim = new_i3c_scl_lim;
+ if (!new_i3c_scl_lim)
+ return;
+ pres_step = 1000000000UL / (bus->scl_rate.i3c * 4);
+
+ /* Configure PP_LOW to meet I3C slave limitations. */
+ prescl1 = readl(master->regs + PRESCL_CTRL1) &
+ ~PRESCL_CTRL1_PP_LOW_MASK;
+ ctrl = readl(master->regs + CTRL);
+
+ i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim);
+ ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step);
+ if (ncycles < 4)
+ ncycles = 0;
+ else
+ ncycles -= 4;
+
+ prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles);
+
+ /* Disable I3C master before updating PRESCL_CTRL1. */
+ if (ctrl & CTRL_DEV_EN)
+ cdns_i3c_master_disable(master);
+
+ writel(prescl1, master->regs + PRESCL_CTRL1);
+
+ if (ctrl & CTRL_DEV_EN)
+ cdns_i3c_master_enable(master);
+}
+
+static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
+{
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ unsigned long olddevs, newdevs;
+ int ret, slot;
+ u8 addrs[MAX_DEVS] = { };
+ u8 last_addr = 0;
+
+ olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
+ olddevs |= BIT(0);
+
+ /* Prepare RR slots before launching DAA. */
+ for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) {
+ ret = i3c_master_get_free_addr(m, last_addr + 1);
+ if (ret < 0)
+ return -ENOSPC;
+
+ last_addr = ret;
+ addrs[slot] = last_addr;
+ writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
+ master->regs + DEV_ID_RR0(slot));
+ writel(0, master->regs + DEV_ID_RR1(slot));
+ writel(0, master->regs + DEV_ID_RR2(slot));
+ }
+
+ ret = i3c_master_entdaa_locked(&master->base);
+ if (ret && ret != I3C_ERROR_M2)
+ return ret;
+
+ newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
+ newdevs &= ~olddevs;
+
+ /*
+ * Clear all retaining registers filled during DAA. We already
+ * have the addressed assigned to them in the addrs array.
+ */
+ for_each_set_bit(slot, &newdevs, master->maxdevs + 1)
+ i3c_master_add_i3c_dev_locked(m, addrs[slot]);
+
+ /*
+ * Clear slots that ended up not being used. Can be caused by I3C
+ * device creation failure or when the I3C device was already known
+ * by the system but with a different address (in this case the device
+ * already has a slot and does not need a new one).
+ */
+ writel(readl(master->regs + DEVS_CTRL) |
+ master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT,
+ master->regs + DEVS_CTRL);
+
+ i3c_master_defslvs_locked(&master->base);
+
+ cdns_i3c_master_upd_i3c_scl_lim(master);
+
+ /* Unmask Hot-Join and Mastership request interrupts. */
+ i3c_master_enec_locked(m, I3C_BROADCAST_ADDR,
+ I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR);
+
+ return 0;
+}
+
+static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
+{
+ unsigned long sysclk_rate = clk_get_rate(master->sysclk);
+ u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
+ (NSEC_PER_SEC / sysclk_rate));
+
+ /* Every value greater than 3 is not valid. */
+ if (thd_delay > THD_DELAY_MAX)
+ thd_delay = THD_DELAY_MAX;
+
+ /* CTLR_THD_DEL value is encoded. */
+ return (THD_DELAY_MAX - thd_delay);
+}
+
+static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ unsigned long pres_step, sysclk_rate, max_i2cfreq;
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ u32 ctrl, prescl0, prescl1, pres, low;
+ struct i3c_device_info info = { };
+ int ret, ncycles;
+
+ switch (bus->mode) {
+ case I3C_BUS_MODE_PURE:
+ ctrl = CTRL_PURE_BUS_MODE;
+ break;
+
+ case I3C_BUS_MODE_MIXED_FAST:
+ ctrl = CTRL_MIXED_FAST_BUS_MODE;
+ break;
+
+ case I3C_BUS_MODE_MIXED_SLOW:
+ ctrl = CTRL_MIXED_SLOW_BUS_MODE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ sysclk_rate = clk_get_rate(master->sysclk);
+ if (!sysclk_rate)
+ return -EINVAL;
+
+ pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
+ if (pres > PRESCL_CTRL0_MAX)
+ return -ERANGE;
+
+ bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
+
+ prescl0 = PRESCL_CTRL0_I3C(pres);
+
+ low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2;
+ prescl1 = PRESCL_CTRL1_OD_LOW(low);
+
+ max_i2cfreq = bus->scl_rate.i2c;
+
+ pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
+ if (pres > PRESCL_CTRL0_MAX)
+ return -ERANGE;
+
+ bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
+
+ prescl0 |= PRESCL_CTRL0_I2C(pres);
+ writel(prescl0, master->regs + PRESCL_CTRL0);
+
+ /* Calculate OD and PP low. */
+ pres_step = 1000000000 / (bus->scl_rate.i3c * 4);
+ ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
+ if (ncycles < 0)
+ ncycles = 0;
+ prescl1 = PRESCL_CTRL1_OD_LOW(ncycles);
+ writel(prescl1, master->regs + PRESCL_CTRL1);
+
+ /* Get an address for the master. */
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C,
+ master->regs + DEV_ID_RR0(0));
+
+ cdns_i3c_master_dev_rr_to_info(master, 0, &info);
+ if (info.bcr & I3C_BCR_HDR_CAP)
+ info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
+
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+
+ /*
+ * Enable Hot-Join, and, when a Hot-Join request happens, disable all
+ * events coming from this device.
+ *
+ * We will issue ENTDAA afterwards from the threaded IRQ handler.
+ */
+ ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
+
+ /*
+ * Configure data hold delay based on device-specific data.
+ *
+ * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
+ * master output. This setting allows to meet this timing on master's
+ * SoC outputs, regardless of PCB balancing.
+ */
+ ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
+ writel(ctrl, master->regs + CTRL);
+
+ cdns_i3c_master_enable(master);
+
+ return 0;
+}
+
+static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
+ u32 ibir)
+{
+ struct cdns_i3c_i2c_dev_data *data;
+ bool data_consumed = false;
+ struct i3c_ibi_slot *slot;
+ u32 id = IBIR_SLVID(ibir);
+ struct i3c_dev_desc *dev;
+ size_t nbytes;
+ u8 *buf;
+
+ /*
+ * FIXME: maybe we should report the FIFO OVF errors to the upper
+ * layer.
+ */
+ if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR))
+ goto out;
+
+ dev = master->ibi.slots[id];
+ spin_lock(&master->ibi.lock);
+
+ data = i3c_dev_get_master_data(dev);
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot)
+ goto out_unlock;
+
+ buf = slot->data;
+
+ nbytes = IBIR_XFER_BYTES(ibir);
+ readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
+ if (nbytes % 3) {
+ u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
+
+ memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
+ }
+
+ slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
+ dev->ibi->max_payload_len);
+ i3c_master_queue_ibi(dev, slot);
+ data_consumed = true;
+
+out_unlock:
+ spin_unlock(&master->ibi.lock);
+
+out:
+ /* Consume data from the FIFO if it's not been done already. */
+ if (!data_consumed) {
+ int i;
+
+ for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4)
+ readl(master->regs + IBI_DATA_FIFO);
+ }
+}
+
+static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
+{
+ u32 status0;
+
+ writel(MST_INT_IBIR_THR, master->regs + MST_ICR);
+
+ for (status0 = readl(master->regs + MST_STATUS0);
+ !(status0 & MST_STATUS0_IBIR_EMP);
+ status0 = readl(master->regs + MST_STATUS0)) {
+ u32 ibir = readl(master->regs + IBIR);
+
+ switch (IBIR_TYPE(ibir)) {
+ case IBIR_TYPE_IBI:
+ cdns_i3c_master_handle_ibi(master, ibir);
+ break;
+
+ case IBIR_TYPE_HJ:
+ WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
+ queue_work(master->base.wq, &master->hj_work);
+ break;
+
+ case IBIR_TYPE_MR:
+ WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
+ default:
+ break;
+ }
+ }
+}
+
+static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data)
+{
+ struct cdns_i3c_master *master = data;
+ u32 status;
+
+ status = readl(master->regs + MST_ISR);
+ if (!(status & readl(master->regs + MST_IMR)))
+ return IRQ_NONE;
+
+ spin_lock(&master->xferqueue.lock);
+ cdns_i3c_master_end_xfer_locked(master, status);
+ spin_unlock(&master->xferqueue.lock);
+
+ if (status & MST_INT_IBIR_THR)
+ cnds_i3c_master_demux_ibis(master);
+
+ return IRQ_HANDLED;
+}
+
+static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+ u32 sirmap;
+ int ret;
+
+ ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
+ sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
+ sirmap |= SIR_MAP_DEV_CONF(data->ibi,
+ SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
+ writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ return ret;
+}
+
+static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+ u32 sircfg, sirmap;
+ int ret;
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
+ sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
+ sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) |
+ SIR_MAP_DEV_DA(dev->info.dyn_addr) |
+ SIR_MAP_DEV_PL(dev->info.max_ibi_len) |
+ SIR_MAP_DEV_ACK;
+
+ if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM)
+ sircfg |= SIR_MAP_DEV_SLOW;
+
+ sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg);
+ writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+ if (ret) {
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
+ sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
+ sirmap |= SIR_MAP_DEV_CONF(data->ibi,
+ SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
+ writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+ }
+
+ return ret;
+}
+
+static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(data->ibi_pool))
+ return PTR_ERR(data->ibi_pool);
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ for (i = 0; i < master->ibi.num_slots; i++) {
+ if (!master->ibi.slots[i]) {
+ data->ibi = i;
+ master->ibi.slots[i] = dev;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ if (i < master->ibi.num_slots)
+ return 0;
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+
+ return -ENOSPC;
+}
+
+static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ master->ibi.slots[data->ibi] = NULL;
+ data->ibi = -1;
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+}
+
+static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+}
+
+static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
+ .bus_init = cdns_i3c_master_bus_init,
+ .bus_cleanup = cdns_i3c_master_bus_cleanup,
+ .do_daa = cdns_i3c_master_do_daa,
+ .attach_i3c_dev = cdns_i3c_master_attach_i3c_dev,
+ .reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev,
+ .detach_i3c_dev = cdns_i3c_master_detach_i3c_dev,
+ .attach_i2c_dev = cdns_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = cdns_i3c_master_detach_i2c_dev,
+ .supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
+ .priv_xfers = cdns_i3c_master_priv_xfers,
+ .i2c_xfers = cdns_i3c_master_i2c_xfers,
+ .enable_ibi = cdns_i3c_master_enable_ibi,
+ .disable_ibi = cdns_i3c_master_disable_ibi,
+ .request_ibi = cdns_i3c_master_request_ibi,
+ .free_ibi = cdns_i3c_master_free_ibi,
+ .recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot,
+};
+
+static void cdns_i3c_master_hj(struct work_struct *work)
+{
+ struct cdns_i3c_master *master = container_of(work,
+ struct cdns_i3c_master,
+ hj_work);
+
+ i3c_master_do_daa(&master->base);
+}
+
+static struct cdns_i3c_data cdns_i3c_devdata = {
+ .thd_delay_ns = 10,
+};
+
+static const struct of_device_id cdns_i3c_master_of_ids[] = {
+ { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
+ { /* sentinel */ },
+};
+
+static int cdns_i3c_master_probe(struct platform_device *pdev)
+{
+ struct cdns_i3c_master *master;
+ int ret, irq;
+ u32 val;
+
+ master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->devdata = of_device_get_match_data(&pdev->dev);
+ if (!master->devdata)
+ return -EINVAL;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ master->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(master->pclk))
+ return PTR_ERR(master->pclk);
+
+ master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
+ if (IS_ERR(master->sysclk))
+ return PTR_ERR(master->sysclk);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = clk_prepare_enable(master->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(master->sysclk);
+ if (ret)
+ goto err_disable_pclk;
+
+ if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
+ ret = -EINVAL;
+ goto err_disable_sysclk;
+ }
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ INIT_WORK(&master->hj_work, cdns_i3c_master_hj);
+ writel(0xffffffff, master->regs + MST_IDR);
+ writel(0xffffffff, master->regs + SLV_IDR);
+ ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ goto err_disable_sysclk;
+
+ platform_set_drvdata(pdev, master);
+
+ val = readl(master->regs + CONF_STATUS0);
+
+ /* Device ID0 is reserved to describe this master. */
+ master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
+ master->free_rr_slots = GENMASK(master->maxdevs, 1);
+ master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
+ master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+
+ val = readl(master->regs + CONF_STATUS1);
+ master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
+ master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
+ master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
+ master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
+ sizeof(*master->ibi.slots),
+ GFP_KERNEL);
+ if (!master->ibi.slots) {
+ ret = -ENOMEM;
+ goto err_disable_sysclk;
+ }
+
+ writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
+ writel(MST_INT_IBIR_THR, master->regs + MST_IER);
+ writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
+
+ ret = i3c_master_register(&master->base, &pdev->dev,
+ &cdns_i3c_master_ops, false);
+ if (ret)
+ goto err_disable_sysclk;
+
+ return 0;
+
+err_disable_sysclk:
+ clk_disable_unprepare(master->sysclk);
+
+err_disable_pclk:
+ clk_disable_unprepare(master->pclk);
+
+ return ret;
+}
+
+static int cdns_i3c_master_remove(struct platform_device *pdev)
+{
+ struct cdns_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i3c_master_unregister(&master->base);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(master->sysclk);
+ clk_disable_unprepare(master->pclk);
+
+ return 0;
+}
+
+static struct platform_driver cdns_i3c_master = {
+ .probe = cdns_i3c_master_probe,
+ .remove = cdns_i3c_master_remove,
+ .driver = {
+ .name = "cdns-i3c-master",
+ .of_match_table = cdns_i3c_master_of_ids,
+ },
+};
+module_platform_driver(cdns_i3c_master);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_DESCRIPTION("Cadence I3C master driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cdns-i3c-master");