summaryrefslogtreecommitdiffstats
path: root/drivers/i3c/master
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/i3c/master
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/i3c/master.c2847
-rw-r--r--drivers/i3c/master/Kconfig45
-rw-r--r--drivers/i3c/master/Makefile5
-rw-r--r--drivers/i3c/master/dw-i3c-master.c1218
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c1693
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/Makefile6
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd.h67
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c378
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v2.c316
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c793
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat.h32
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c191
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dct.h16
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dct_v1.c36
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c784
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.c308
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.h19
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h144
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ibi.h42
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/pio.c1041
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/xfer_mode_rate.h79
-rw-r--r--drivers/i3c/master/svc-i3c-master.c1703
22 files changed, 11763 insertions, 0 deletions
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
new file mode 100644
index 000000000..ab0b5691b
--- /dev/null
+++ b/drivers/i3c/master.c
@@ -0,0 +1,2847 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "internals.h"
+
+static DEFINE_IDR(i3c_bus_idr);
+static DEFINE_MUTEX(i3c_core_lock);
+
+/**
+ * i3c_bus_maintenance_lock - Lock the bus for a maintenance operation
+ * @bus: I3C bus to take the lock on
+ *
+ * This function takes the bus lock so that no other operations can occur on
+ * the bus. This is needed for all kind of bus maintenance operation, like
+ * - enabling/disabling slave events
+ * - re-triggering DAA
+ * - changing the dynamic address of a device
+ * - relinquishing mastership
+ * - ...
+ *
+ * The reason for this kind of locking is that we don't want drivers and core
+ * logic to rely on I3C device information that could be changed behind their
+ * back.
+ */
+static void i3c_bus_maintenance_lock(struct i3c_bus *bus)
+{
+ down_write(&bus->lock);
+}
+
+/**
+ * i3c_bus_maintenance_unlock - Release the bus lock after a maintenance
+ * operation
+ * @bus: I3C bus to release the lock on
+ *
+ * Should be called when the bus maintenance operation is done. See
+ * i3c_bus_maintenance_lock() for more details on what these maintenance
+ * operations are.
+ */
+static void i3c_bus_maintenance_unlock(struct i3c_bus *bus)
+{
+ up_write(&bus->lock);
+}
+
+/**
+ * i3c_bus_normaluse_lock - Lock the bus for a normal operation
+ * @bus: I3C bus to take the lock on
+ *
+ * This function takes the bus lock for any operation that is not a maintenance
+ * operation (see i3c_bus_maintenance_lock() for a non-exhaustive list of
+ * maintenance operations). Basically all communications with I3C devices are
+ * normal operations (HDR, SDR transfers or CCC commands that do not change bus
+ * state or I3C dynamic address).
+ *
+ * Note that this lock is not guaranteeing serialization of normal operations.
+ * In other words, transfer requests passed to the I3C master can be submitted
+ * in parallel and I3C master drivers have to use their own locking to make
+ * sure two different communications are not inter-mixed, or access to the
+ * output/input queue is not done while the engine is busy.
+ */
+void i3c_bus_normaluse_lock(struct i3c_bus *bus)
+{
+ down_read(&bus->lock);
+}
+
+/**
+ * i3c_bus_normaluse_unlock - Release the bus lock after a normal operation
+ * @bus: I3C bus to release the lock on
+ *
+ * Should be called when a normal operation is done. See
+ * i3c_bus_normaluse_lock() for more details on what these normal operations
+ * are.
+ */
+void i3c_bus_normaluse_unlock(struct i3c_bus *bus)
+{
+ up_read(&bus->lock);
+}
+
+static struct i3c_master_controller *
+i3c_bus_to_i3c_master(struct i3c_bus *i3cbus)
+{
+ return container_of(i3cbus, struct i3c_master_controller, bus);
+}
+
+static struct i3c_master_controller *dev_to_i3cmaster(struct device *dev)
+{
+ return container_of(dev, struct i3c_master_controller, dev);
+}
+
+static const struct device_type i3c_device_type;
+
+static struct i3c_bus *dev_to_i3cbus(struct device *dev)
+{
+ struct i3c_master_controller *master;
+
+ if (dev->type == &i3c_device_type)
+ return dev_to_i3cdev(dev)->bus;
+
+ master = dev_to_i3cmaster(dev);
+
+ return &master->bus;
+}
+
+static struct i3c_dev_desc *dev_to_i3cdesc(struct device *dev)
+{
+ struct i3c_master_controller *master;
+
+ if (dev->type == &i3c_device_type)
+ return dev_to_i3cdev(dev)->desc;
+
+ master = dev_to_i3cmaster(dev);
+
+ return master->this;
+}
+
+static ssize_t bcr_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%x\n", desc->info.bcr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(bcr);
+
+static ssize_t dcr_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%x\n", desc->info.dcr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(dcr);
+
+static ssize_t pid_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%llx\n", desc->info.pid);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(pid);
+
+static ssize_t dynamic_address_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%02x\n", desc->info.dyn_addr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(dynamic_address);
+
+static const char * const hdrcap_strings[] = {
+ "hdr-ddr", "hdr-tsp", "hdr-tsl",
+};
+
+static ssize_t hdrcap_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t offset = 0, ret;
+ unsigned long caps;
+ int mode;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ caps = desc->info.hdr_cap;
+ for_each_set_bit(mode, &caps, 8) {
+ if (mode >= ARRAY_SIZE(hdrcap_strings))
+ break;
+
+ if (!hdrcap_strings[mode])
+ continue;
+
+ ret = sprintf(buf + offset, offset ? " %s" : "%s",
+ hdrcap_strings[mode]);
+ if (ret < 0)
+ goto out;
+
+ offset += ret;
+ }
+
+ ret = sprintf(buf + offset, "\n");
+ if (ret < 0)
+ goto out;
+
+ ret = offset + ret;
+
+out:
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(hdrcap);
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct i3c_device *i3c = dev_to_i3cdev(dev);
+ struct i3c_device_info devinfo;
+ u16 manuf, part, ext;
+
+ i3c_device_get_info(i3c, &devinfo);
+ manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ part = I3C_PID_PART_ID(devinfo.pid);
+ ext = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+ if (I3C_PID_RND_LOWER_32BITS(devinfo.pid))
+ return sprintf(buf, "i3c:dcr%02Xmanuf%04X", devinfo.dcr,
+ manuf);
+
+ return sprintf(buf, "i3c:dcr%02Xmanuf%04Xpart%04Xext%04X",
+ devinfo.dcr, manuf, part, ext);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *i3c_device_attrs[] = {
+ &dev_attr_bcr.attr,
+ &dev_attr_dcr.attr,
+ &dev_attr_pid.attr,
+ &dev_attr_dynamic_address.attr,
+ &dev_attr_hdrcap.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(i3c_device);
+
+static int i3c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_device_info devinfo;
+ u16 manuf, part, ext;
+
+ i3c_device_get_info(i3cdev, &devinfo);
+ manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ part = I3C_PID_PART_ID(devinfo.pid);
+ ext = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+ if (I3C_PID_RND_LOWER_32BITS(devinfo.pid))
+ return add_uevent_var(env, "MODALIAS=i3c:dcr%02Xmanuf%04X",
+ devinfo.dcr, manuf);
+
+ return add_uevent_var(env,
+ "MODALIAS=i3c:dcr%02Xmanuf%04Xpart%04Xext%04X",
+ devinfo.dcr, manuf, part, ext);
+}
+
+static const struct device_type i3c_device_type = {
+ .groups = i3c_device_groups,
+ .uevent = i3c_device_uevent,
+};
+
+static int i3c_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct i3c_device *i3cdev;
+ struct i3c_driver *i3cdrv;
+
+ if (dev->type != &i3c_device_type)
+ return 0;
+
+ i3cdev = dev_to_i3cdev(dev);
+ i3cdrv = drv_to_i3cdrv(drv);
+ if (i3c_device_match_id(i3cdev, i3cdrv->id_table))
+ return 1;
+
+ return 0;
+}
+
+static int i3c_device_probe(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
+
+ return driver->probe(i3cdev);
+}
+
+static void i3c_device_remove(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
+
+ if (driver->remove)
+ driver->remove(i3cdev);
+
+ i3c_device_free_ibi(i3cdev);
+}
+
+struct bus_type i3c_bus_type = {
+ .name = "i3c",
+ .match = i3c_device_match,
+ .probe = i3c_device_probe,
+ .remove = i3c_device_remove,
+};
+
+static enum i3c_addr_slot_status
+i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
+{
+ unsigned long status;
+ int bitpos = addr * 2;
+
+ if (addr > I2C_MAX_ADDR)
+ return I3C_ADDR_SLOT_RSVD;
+
+ status = bus->addrslots[bitpos / BITS_PER_LONG];
+ status >>= bitpos % BITS_PER_LONG;
+
+ return status & I3C_ADDR_SLOT_STATUS_MASK;
+}
+
+static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
+ enum i3c_addr_slot_status status)
+{
+ int bitpos = addr * 2;
+ unsigned long *ptr;
+
+ if (addr > I2C_MAX_ADDR)
+ return;
+
+ ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
+ *ptr &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK <<
+ (bitpos % BITS_PER_LONG));
+ *ptr |= (unsigned long)status << (bitpos % BITS_PER_LONG);
+}
+
+static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
+{
+ enum i3c_addr_slot_status status;
+
+ status = i3c_bus_get_addr_slot_status(bus, addr);
+
+ return status == I3C_ADDR_SLOT_FREE;
+}
+
+static int i3c_bus_get_free_addr(struct i3c_bus *bus, u8 start_addr)
+{
+ enum i3c_addr_slot_status status;
+ u8 addr;
+
+ for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) {
+ status = i3c_bus_get_addr_slot_status(bus, addr);
+ if (status == I3C_ADDR_SLOT_FREE)
+ return addr;
+ }
+
+ return -ENOMEM;
+}
+
+static void i3c_bus_init_addrslots(struct i3c_bus *bus)
+{
+ int i;
+
+ /* Addresses 0 to 7 are reserved. */
+ for (i = 0; i < 8; i++)
+ i3c_bus_set_addr_slot_status(bus, i, I3C_ADDR_SLOT_RSVD);
+
+ /*
+ * Reserve broadcast address and all addresses that might collide
+ * with the broadcast address when facing a single bit error.
+ */
+ i3c_bus_set_addr_slot_status(bus, I3C_BROADCAST_ADDR,
+ I3C_ADDR_SLOT_RSVD);
+ for (i = 0; i < 7; i++)
+ i3c_bus_set_addr_slot_status(bus, I3C_BROADCAST_ADDR ^ BIT(i),
+ I3C_ADDR_SLOT_RSVD);
+}
+
+static void i3c_bus_cleanup(struct i3c_bus *i3cbus)
+{
+ mutex_lock(&i3c_core_lock);
+ idr_remove(&i3c_bus_idr, i3cbus->id);
+ mutex_unlock(&i3c_core_lock);
+}
+
+static int i3c_bus_init(struct i3c_bus *i3cbus)
+{
+ int ret;
+
+ init_rwsem(&i3cbus->lock);
+ INIT_LIST_HEAD(&i3cbus->devs.i2c);
+ INIT_LIST_HEAD(&i3cbus->devs.i3c);
+ i3c_bus_init_addrslots(i3cbus);
+ i3cbus->mode = I3C_BUS_MODE_PURE;
+
+ mutex_lock(&i3c_core_lock);
+ ret = idr_alloc(&i3c_bus_idr, i3cbus, 0, 0, GFP_KERNEL);
+ mutex_unlock(&i3c_core_lock);
+
+ if (ret < 0)
+ return ret;
+
+ i3cbus->id = ret;
+
+ return 0;
+}
+
+static const char * const i3c_bus_mode_strings[] = {
+ [I3C_BUS_MODE_PURE] = "pure",
+ [I3C_BUS_MODE_MIXED_FAST] = "mixed-fast",
+ [I3C_BUS_MODE_MIXED_LIMITED] = "mixed-limited",
+ [I3C_BUS_MODE_MIXED_SLOW] = "mixed-slow",
+};
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ if (i3cbus->mode < 0 ||
+ i3cbus->mode >= ARRAY_SIZE(i3c_bus_mode_strings) ||
+ !i3c_bus_mode_strings[i3cbus->mode])
+ ret = sprintf(buf, "unknown\n");
+ else
+ ret = sprintf(buf, "%s\n", i3c_bus_mode_strings[i3cbus->mode]);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(mode);
+
+static ssize_t current_master_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sprintf(buf, "%d-%llx\n", i3cbus->id,
+ i3cbus->cur_master->info.pid);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(current_master);
+
+static ssize_t i3c_scl_frequency_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i3c);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(i3c_scl_frequency);
+
+static ssize_t i2c_scl_frequency_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i2c);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(i2c_scl_frequency);
+
+static struct attribute *i3c_masterdev_attrs[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_current_master.attr,
+ &dev_attr_i3c_scl_frequency.attr,
+ &dev_attr_i2c_scl_frequency.attr,
+ &dev_attr_bcr.attr,
+ &dev_attr_dcr.attr,
+ &dev_attr_pid.attr,
+ &dev_attr_dynamic_address.attr,
+ &dev_attr_hdrcap.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(i3c_masterdev);
+
+static void i3c_masterdev_release(struct device *dev)
+{
+ struct i3c_master_controller *master = dev_to_i3cmaster(dev);
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+
+ if (master->wq)
+ destroy_workqueue(master->wq);
+
+ WARN_ON(!list_empty(&bus->devs.i2c) || !list_empty(&bus->devs.i3c));
+ i3c_bus_cleanup(bus);
+
+ of_node_put(dev->of_node);
+}
+
+static const struct device_type i3c_masterdev_type = {
+ .groups = i3c_masterdev_groups,
+};
+
+static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
+ unsigned long max_i2c_scl_rate)
+{
+ struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus);
+
+ i3cbus->mode = mode;
+
+ switch (i3cbus->mode) {
+ case I3C_BUS_MODE_PURE:
+ if (!i3cbus->scl_rate.i3c)
+ i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ break;
+ case I3C_BUS_MODE_MIXED_FAST:
+ case I3C_BUS_MODE_MIXED_LIMITED:
+ if (!i3cbus->scl_rate.i3c)
+ i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ if (!i3cbus->scl_rate.i2c)
+ i3cbus->scl_rate.i2c = max_i2c_scl_rate;
+ break;
+ case I3C_BUS_MODE_MIXED_SLOW:
+ if (!i3cbus->scl_rate.i2c)
+ i3cbus->scl_rate.i2c = max_i2c_scl_rate;
+ if (!i3cbus->scl_rate.i3c ||
+ i3cbus->scl_rate.i3c > i3cbus->scl_rate.i2c)
+ i3cbus->scl_rate.i3c = i3cbus->scl_rate.i2c;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(&master->dev, "i2c-scl = %ld Hz i3c-scl = %ld Hz\n",
+ i3cbus->scl_rate.i2c, i3cbus->scl_rate.i3c);
+
+ /*
+ * I3C/I2C frequency may have been overridden, check that user-provided
+ * values are not exceeding max possible frequency.
+ */
+ if (i3cbus->scl_rate.i3c > I3C_BUS_MAX_I3C_SCL_RATE ||
+ i3cbus->scl_rate.i2c > I3C_BUS_I2C_FM_PLUS_SCL_RATE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct i3c_master_controller *
+i2c_adapter_to_i3c_master(struct i2c_adapter *adap)
+{
+ return container_of(adap, struct i3c_master_controller, i2c);
+}
+
+static struct i2c_adapter *
+i3c_master_to_i2c_adapter(struct i3c_master_controller *master)
+{
+ return &master->i2c;
+}
+
+static void i3c_master_free_i2c_dev(struct i2c_dev_desc *dev)
+{
+ kfree(dev);
+}
+
+static struct i2c_dev_desc *
+i3c_master_alloc_i2c_dev(struct i3c_master_controller *master,
+ u16 addr, u8 lvr)
+{
+ struct i2c_dev_desc *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->common.master = master;
+ dev->addr = addr;
+ dev->lvr = lvr;
+
+ return dev;
+}
+
+static void *i3c_ccc_cmd_dest_init(struct i3c_ccc_cmd_dest *dest, u8 addr,
+ u16 payloadlen)
+{
+ dest->addr = addr;
+ dest->payload.len = payloadlen;
+ if (payloadlen)
+ dest->payload.data = kzalloc(payloadlen, GFP_KERNEL);
+ else
+ dest->payload.data = NULL;
+
+ return dest->payload.data;
+}
+
+static void i3c_ccc_cmd_dest_cleanup(struct i3c_ccc_cmd_dest *dest)
+{
+ kfree(dest->payload.data);
+}
+
+static void i3c_ccc_cmd_init(struct i3c_ccc_cmd *cmd, bool rnw, u8 id,
+ struct i3c_ccc_cmd_dest *dests,
+ unsigned int ndests)
+{
+ cmd->rnw = rnw ? 1 : 0;
+ cmd->id = id;
+ cmd->dests = dests;
+ cmd->ndests = ndests;
+ cmd->err = I3C_ERROR_UNKNOWN;
+}
+
+static int i3c_master_send_ccc_cmd_locked(struct i3c_master_controller *master,
+ struct i3c_ccc_cmd *cmd)
+{
+ int ret;
+
+ if (!cmd || !master)
+ return -EINVAL;
+
+ if (WARN_ON(master->init_done &&
+ !rwsem_is_locked(&master->bus.lock)))
+ return -EINVAL;
+
+ if (!master->ops->send_ccc_cmd)
+ return -ENOTSUPP;
+
+ if ((cmd->id & I3C_CCC_DIRECT) && (!cmd->dests || !cmd->ndests))
+ return -EINVAL;
+
+ if (master->ops->supports_ccc_cmd &&
+ !master->ops->supports_ccc_cmd(master, cmd))
+ return -ENOTSUPP;
+
+ ret = master->ops->send_ccc_cmd(master, cmd);
+ if (ret) {
+ if (cmd->err != I3C_ERROR_UNKNOWN)
+ return cmd->err;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct i2c_dev_desc *
+i3c_master_find_i2c_dev_by_addr(const struct i3c_master_controller *master,
+ u16 addr)
+{
+ struct i2c_dev_desc *dev;
+
+ i3c_bus_for_each_i2cdev(&master->bus, dev) {
+ if (dev->addr == addr)
+ return dev;
+ }
+
+ return NULL;
+}
+
+/**
+ * i3c_master_get_free_addr() - get a free address on the bus
+ * @master: I3C master object
+ * @start_addr: where to start searching
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: the first free address starting at @start_addr (included) or -ENOMEM
+ * if there's no more address available.
+ */
+int i3c_master_get_free_addr(struct i3c_master_controller *master,
+ u8 start_addr)
+{
+ return i3c_bus_get_free_addr(&master->bus, start_addr);
+}
+EXPORT_SYMBOL_GPL(i3c_master_get_free_addr);
+
+static void i3c_device_release(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+
+ WARN_ON(i3cdev->desc);
+
+ of_node_put(i3cdev->dev.of_node);
+ kfree(i3cdev);
+}
+
+static void i3c_master_free_i3c_dev(struct i3c_dev_desc *dev)
+{
+ kfree(dev);
+}
+
+static struct i3c_dev_desc *
+i3c_master_alloc_i3c_dev(struct i3c_master_controller *master,
+ const struct i3c_device_info *info)
+{
+ struct i3c_dev_desc *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->common.master = master;
+ dev->info = *info;
+ mutex_init(&dev->ibi_lock);
+
+ return dev;
+}
+
+static int i3c_master_rstdaa_locked(struct i3c_master_controller *master,
+ u8 addr)
+{
+ enum i3c_addr_slot_status addrstat;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ if (!master)
+ return -EINVAL;
+
+ addrstat = i3c_bus_get_addr_slot_status(&master->bus, addr);
+ if (addr != I3C_BROADCAST_ADDR && addrstat != I3C_ADDR_SLOT_I3C_DEV)
+ return -EINVAL;
+
+ i3c_ccc_cmd_dest_init(&dest, addr, 0);
+ i3c_ccc_cmd_init(&cmd, false,
+ I3C_CCC_RSTDAA(addr == I3C_BROADCAST_ADDR),
+ &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+/**
+ * i3c_master_entdaa_locked() - start a DAA (Dynamic Address Assignment)
+ * procedure
+ * @master: master used to send frames on the bus
+ *
+ * Send a ENTDAA CCC command to start a DAA procedure.
+ *
+ * Note that this function only sends the ENTDAA CCC command, all the logic
+ * behind dynamic address assignment has to be handled in the I3C master
+ * driver.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_entdaa_locked(struct i3c_master_controller *master)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR, 0);
+ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_ENTDAA, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_entdaa_locked);
+
+static int i3c_master_enec_disec_locked(struct i3c_master_controller *master,
+ u8 addr, bool enable, u8 evts)
+{
+ struct i3c_ccc_events *events;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ events = i3c_ccc_cmd_dest_init(&dest, addr, sizeof(*events));
+ if (!events)
+ return -ENOMEM;
+
+ events->events = evts;
+ i3c_ccc_cmd_init(&cmd, false,
+ enable ?
+ I3C_CCC_ENEC(addr == I3C_BROADCAST_ADDR) :
+ I3C_CCC_DISEC(addr == I3C_BROADCAST_ADDR),
+ &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+/**
+ * i3c_master_disec_locked() - send a DISEC CCC command
+ * @master: master used to send frames on the bus
+ * @addr: a valid I3C slave address or %I3C_BROADCAST_ADDR
+ * @evts: events to disable
+ *
+ * Send a DISEC CCC command to disable some or all events coming from a
+ * specific slave, or all devices if @addr is %I3C_BROADCAST_ADDR.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts)
+{
+ return i3c_master_enec_disec_locked(master, addr, false, evts);
+}
+EXPORT_SYMBOL_GPL(i3c_master_disec_locked);
+
+/**
+ * i3c_master_enec_locked() - send an ENEC CCC command
+ * @master: master used to send frames on the bus
+ * @addr: a valid I3C slave address or %I3C_BROADCAST_ADDR
+ * @evts: events to disable
+ *
+ * Sends an ENEC CCC command to enable some or all events coming from a
+ * specific slave, or all devices if @addr is %I3C_BROADCAST_ADDR.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts)
+{
+ return i3c_master_enec_disec_locked(master, addr, true, evts);
+}
+EXPORT_SYMBOL_GPL(i3c_master_enec_locked);
+
+/**
+ * i3c_master_defslvs_locked() - send a DEFSLVS CCC command
+ * @master: master used to send frames on the bus
+ *
+ * Send a DEFSLVS CCC command containing all the devices known to the @master.
+ * This is useful when you have secondary masters on the bus to propagate
+ * device information.
+ *
+ * This should be called after all I3C devices have been discovered (in other
+ * words, after the DAA procedure has finished) and instantiated in
+ * &i3c_master_controller_ops->bus_init().
+ * It should also be called if a master ACKed an Hot-Join request and assigned
+ * a dynamic address to the device joining the bus.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_defslvs_locked(struct i3c_master_controller *master)
+{
+ struct i3c_ccc_defslvs *defslvs;
+ struct i3c_ccc_dev_desc *desc;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_dev_desc *i3cdev;
+ struct i2c_dev_desc *i2cdev;
+ struct i3c_ccc_cmd cmd;
+ struct i3c_bus *bus;
+ bool send = false;
+ int ndevs = 0, ret;
+
+ if (!master)
+ return -EINVAL;
+
+ bus = i3c_master_get_bus(master);
+ i3c_bus_for_each_i3cdev(bus, i3cdev) {
+ ndevs++;
+
+ if (i3cdev == master->this)
+ continue;
+
+ if (I3C_BCR_DEVICE_ROLE(i3cdev->info.bcr) ==
+ I3C_BCR_I3C_MASTER)
+ send = true;
+ }
+
+ /* No other master on the bus, skip DEFSLVS. */
+ if (!send)
+ return 0;
+
+ i3c_bus_for_each_i2cdev(bus, i2cdev)
+ ndevs++;
+
+ defslvs = i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR,
+ struct_size(defslvs, slaves,
+ ndevs - 1));
+ if (!defslvs)
+ return -ENOMEM;
+
+ defslvs->count = ndevs;
+ defslvs->master.bcr = master->this->info.bcr;
+ defslvs->master.dcr = master->this->info.dcr;
+ defslvs->master.dyn_addr = master->this->info.dyn_addr << 1;
+ defslvs->master.static_addr = I3C_BROADCAST_ADDR << 1;
+
+ desc = defslvs->slaves;
+ i3c_bus_for_each_i2cdev(bus, i2cdev) {
+ desc->lvr = i2cdev->lvr;
+ desc->static_addr = i2cdev->addr << 1;
+ desc++;
+ }
+
+ i3c_bus_for_each_i3cdev(bus, i3cdev) {
+ /* Skip the I3C dev representing this master. */
+ if (i3cdev == master->this)
+ continue;
+
+ desc->bcr = i3cdev->info.bcr;
+ desc->dcr = i3cdev->info.dcr;
+ desc->dyn_addr = i3cdev->info.dyn_addr << 1;
+ desc->static_addr = i3cdev->info.static_addr << 1;
+ desc++;
+ }
+
+ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_DEFSLVS, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_defslvs_locked);
+
+static int i3c_master_setda_locked(struct i3c_master_controller *master,
+ u8 oldaddr, u8 newaddr, bool setdasa)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_setda *setda;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ if (!oldaddr || !newaddr)
+ return -EINVAL;
+
+ setda = i3c_ccc_cmd_dest_init(&dest, oldaddr, sizeof(*setda));
+ if (!setda)
+ return -ENOMEM;
+
+ setda->addr = newaddr << 1;
+ i3c_ccc_cmd_init(&cmd, false,
+ setdasa ? I3C_CCC_SETDASA : I3C_CCC_SETNEWDA,
+ &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_setdasa_locked(struct i3c_master_controller *master,
+ u8 static_addr, u8 dyn_addr)
+{
+ return i3c_master_setda_locked(master, static_addr, dyn_addr, true);
+}
+
+static int i3c_master_setnewda_locked(struct i3c_master_controller *master,
+ u8 oldaddr, u8 newaddr)
+{
+ return i3c_master_setda_locked(master, oldaddr, newaddr, false);
+}
+
+static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_mrl *mrl;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ mrl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mrl));
+ if (!mrl)
+ return -ENOMEM;
+
+ /*
+ * When the device does not have IBI payload GETMRL only returns 2
+ * bytes of data.
+ */
+ if (!(info->bcr & I3C_BCR_IBI_PAYLOAD))
+ dest.payload.len -= 1;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMRL, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ switch (dest.payload.len) {
+ case 3:
+ info->max_ibi_len = mrl->ibi_len;
+ fallthrough;
+ case 2:
+ info->max_read_len = be16_to_cpu(mrl->read_len);
+ break;
+ default:
+ ret = -EIO;
+ goto out;
+ }
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getmwl_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_mwl *mwl;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ mwl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mwl));
+ if (!mwl)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMWL, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != sizeof(*mwl)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ info->max_write_len = be16_to_cpu(mwl->len);
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getmxds_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getmxds *getmaxds;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getmaxds = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr,
+ sizeof(*getmaxds));
+ if (!getmaxds)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMXDS, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != 2 && dest.payload.len != 5) {
+ ret = -EIO;
+ goto out;
+ }
+
+ info->max_read_ds = getmaxds->maxrd;
+ info->max_write_ds = getmaxds->maxwr;
+ if (dest.payload.len == 5)
+ info->max_read_turnaround = getmaxds->maxrdturn[0] |
+ ((u32)getmaxds->maxrdturn[1] << 8) |
+ ((u32)getmaxds->maxrdturn[2] << 16);
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_gethdrcap_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_gethdrcap *gethdrcap;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ gethdrcap = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr,
+ sizeof(*gethdrcap));
+ if (!gethdrcap)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETHDRCAP, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != 1) {
+ ret = -EIO;
+ goto out;
+ }
+
+ info->hdr_cap = gethdrcap->modes;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getpid_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getpid *getpid;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret, i;
+
+ getpid = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getpid));
+ if (!getpid)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETPID, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->pid = 0;
+ for (i = 0; i < sizeof(getpid->pid); i++) {
+ int sft = (sizeof(getpid->pid) - i - 1) * 8;
+
+ info->pid |= (u64)getpid->pid[i] << sft;
+ }
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getbcr_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getbcr *getbcr;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getbcr = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getbcr));
+ if (!getbcr)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETBCR, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->bcr = getbcr->bcr;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getdcr_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getdcr *getdcr;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getdcr = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getdcr));
+ if (!getdcr)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETDCR, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->dcr = getdcr->dcr;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ enum i3c_addr_slot_status slot_status;
+ int ret;
+
+ if (!dev->info.dyn_addr)
+ return -EINVAL;
+
+ slot_status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.dyn_addr);
+ if (slot_status == I3C_ADDR_SLOT_RSVD ||
+ slot_status == I3C_ADDR_SLOT_I2C_DEV)
+ return -EINVAL;
+
+ ret = i3c_master_getpid_locked(master, &dev->info);
+ if (ret)
+ return ret;
+
+ ret = i3c_master_getbcr_locked(master, &dev->info);
+ if (ret)
+ return ret;
+
+ ret = i3c_master_getdcr_locked(master, &dev->info);
+ if (ret)
+ return ret;
+
+ if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM) {
+ ret = i3c_master_getmxds_locked(master, &dev->info);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+ dev->info.max_ibi_len = 1;
+
+ i3c_master_getmrl_locked(master, &dev->info);
+ i3c_master_getmwl_locked(master, &dev->info);
+
+ if (dev->info.bcr & I3C_BCR_HDR_CAP) {
+ ret = i3c_master_gethdrcap_locked(master, &dev->info);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void i3c_master_put_i3c_addrs(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+ if (dev->info.static_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.static_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ if (dev->info.dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ if (dev->boardinfo && dev->boardinfo->init_dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+}
+
+static int i3c_master_get_i3c_addrs(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ enum i3c_addr_slot_status status;
+
+ if (!dev->info.static_addr && !dev->info.dyn_addr)
+ return 0;
+
+ if (dev->info.static_addr) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.static_addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ return -EBUSY;
+
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.static_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+ }
+
+ /*
+ * ->init_dyn_addr should have been reserved before that, so, if we're
+ * trying to apply a pre-reserved dynamic address, we should not try
+ * to reserve the address slot a second time.
+ */
+ if (dev->info.dyn_addr &&
+ (!dev->boardinfo ||
+ dev->boardinfo->init_dyn_addr != dev->info.dyn_addr)) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.dyn_addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ goto err_release_static_addr;
+
+ i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+ }
+
+ return 0;
+
+err_release_static_addr:
+ if (dev->info.static_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.static_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ return -EBUSY;
+}
+
+static int i3c_master_attach_i3c_dev(struct i3c_master_controller *master,
+ struct i3c_dev_desc *dev)
+{
+ int ret;
+
+ /*
+ * We don't attach devices to the controller until they are
+ * addressable on the bus.
+ */
+ if (!dev->info.static_addr && !dev->info.dyn_addr)
+ return 0;
+
+ ret = i3c_master_get_i3c_addrs(dev);
+ if (ret)
+ return ret;
+
+ /* Do not attach the master device itself. */
+ if (master->this != dev && master->ops->attach_i3c_dev) {
+ ret = master->ops->attach_i3c_dev(dev);
+ if (ret) {
+ i3c_master_put_i3c_addrs(dev);
+ return ret;
+ }
+ }
+
+ list_add_tail(&dev->common.node, &master->bus.devs.i3c);
+
+ return 0;
+}
+
+static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ enum i3c_addr_slot_status status;
+ int ret;
+
+ if (dev->info.dyn_addr != old_dyn_addr &&
+ (!dev->boardinfo ||
+ dev->info.dyn_addr != dev->boardinfo->init_dyn_addr)) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.dyn_addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ return -EBUSY;
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.dyn_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+ if (old_dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus, old_dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+ }
+
+ if (master->ops->reattach_i3c_dev) {
+ ret = master->ops->reattach_i3c_dev(dev, old_dyn_addr);
+ if (ret) {
+ i3c_master_put_i3c_addrs(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+ /* Do not detach the master device itself. */
+ if (master->this != dev && master->ops->detach_i3c_dev)
+ master->ops->detach_i3c_dev(dev);
+
+ i3c_master_put_i3c_addrs(dev);
+ list_del(&dev->common.node);
+}
+
+static int i3c_master_attach_i2c_dev(struct i3c_master_controller *master,
+ struct i2c_dev_desc *dev)
+{
+ int ret;
+
+ if (master->ops->attach_i2c_dev) {
+ ret = master->ops->attach_i2c_dev(dev);
+ if (ret)
+ return ret;
+ }
+
+ list_add_tail(&dev->common.node, &master->bus.devs.i2c);
+
+ return 0;
+}
+
+static void i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i2c_dev_get_master(dev);
+
+ list_del(&dev->common.node);
+
+ if (master->ops->detach_i2c_dev)
+ master->ops->detach_i2c_dev(dev);
+}
+
+static int i3c_master_early_i3c_dev_add(struct i3c_master_controller *master,
+ struct i3c_dev_boardinfo *boardinfo)
+{
+ struct i3c_device_info info = {
+ .static_addr = boardinfo->static_addr,
+ };
+ struct i3c_dev_desc *i3cdev;
+ int ret;
+
+ i3cdev = i3c_master_alloc_i3c_dev(master, &info);
+ if (IS_ERR(i3cdev))
+ return -ENOMEM;
+
+ i3cdev->boardinfo = boardinfo;
+
+ ret = i3c_master_attach_i3c_dev(master, i3cdev);
+ if (ret)
+ goto err_free_dev;
+
+ ret = i3c_master_setdasa_locked(master, i3cdev->info.static_addr,
+ i3cdev->boardinfo->init_dyn_addr);
+ if (ret)
+ goto err_detach_dev;
+
+ i3cdev->info.dyn_addr = i3cdev->boardinfo->init_dyn_addr;
+ ret = i3c_master_reattach_i3c_dev(i3cdev, 0);
+ if (ret)
+ goto err_rstdaa;
+
+ ret = i3c_master_retrieve_dev_info(i3cdev);
+ if (ret)
+ goto err_rstdaa;
+
+ return 0;
+
+err_rstdaa:
+ i3c_master_rstdaa_locked(master, i3cdev->boardinfo->init_dyn_addr);
+err_detach_dev:
+ i3c_master_detach_i3c_dev(i3cdev);
+err_free_dev:
+ i3c_master_free_i3c_dev(i3cdev);
+
+ return ret;
+}
+
+static void
+i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *desc;
+ int ret;
+
+ if (!master->init_done)
+ return;
+
+ i3c_bus_for_each_i3cdev(&master->bus, desc) {
+ if (desc->dev || !desc->info.dyn_addr || desc == master->this)
+ continue;
+
+ desc->dev = kzalloc(sizeof(*desc->dev), GFP_KERNEL);
+ if (!desc->dev)
+ continue;
+
+ desc->dev->bus = &master->bus;
+ desc->dev->desc = desc;
+ desc->dev->dev.parent = &master->dev;
+ desc->dev->dev.type = &i3c_device_type;
+ desc->dev->dev.bus = &i3c_bus_type;
+ desc->dev->dev.release = i3c_device_release;
+ dev_set_name(&desc->dev->dev, "%d-%llx", master->bus.id,
+ desc->info.pid);
+
+ if (desc->boardinfo)
+ desc->dev->dev.of_node = desc->boardinfo->of_node;
+
+ ret = device_register(&desc->dev->dev);
+ if (ret) {
+ dev_err(&master->dev,
+ "Failed to add I3C device (err = %d)\n", ret);
+ put_device(&desc->dev->dev);
+ }
+ }
+}
+
+/**
+ * i3c_master_do_daa() - do a DAA (Dynamic Address Assignment)
+ * @master: master doing the DAA
+ *
+ * This function is instantiating an I3C device object and adding it to the
+ * I3C device list. All device information are automatically retrieved using
+ * standard CCC commands.
+ *
+ * The I3C device object is returned in case the master wants to attach
+ * private data to it using i3c_dev_set_master_data().
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_do_daa(struct i3c_master_controller *master)
+{
+ int ret;
+
+ i3c_bus_maintenance_lock(&master->bus);
+ ret = master->ops->do_daa(master);
+ i3c_bus_maintenance_unlock(&master->bus);
+
+ if (ret)
+ return ret;
+
+ i3c_bus_normaluse_lock(&master->bus);
+ i3c_master_register_new_i3c_devs(master);
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_master_do_daa);
+
+/**
+ * i3c_master_set_info() - set master device information
+ * @master: master used to send frames on the bus
+ * @info: I3C device information
+ *
+ * Set master device info. This should be called from
+ * &i3c_master_controller_ops->bus_init().
+ *
+ * Not all &i3c_device_info fields are meaningful for a master device.
+ * Here is a list of fields that should be properly filled:
+ *
+ * - &i3c_device_info->dyn_addr
+ * - &i3c_device_info->bcr
+ * - &i3c_device_info->dcr
+ * - &i3c_device_info->pid
+ * - &i3c_device_info->hdr_cap if %I3C_BCR_HDR_CAP bit is set in
+ * &i3c_device_info->bcr
+ *
+ * This function must be called with the bus lock held in maintenance mode.
+ *
+ * Return: 0 if @info contains valid information (not every piece of
+ * information can be checked, but we can at least make sure @info->dyn_addr
+ * and @info->bcr are correct), -EINVAL otherwise.
+ */
+int i3c_master_set_info(struct i3c_master_controller *master,
+ const struct i3c_device_info *info)
+{
+ struct i3c_dev_desc *i3cdev;
+ int ret;
+
+ if (!i3c_bus_dev_addr_is_avail(&master->bus, info->dyn_addr))
+ return -EINVAL;
+
+ if (I3C_BCR_DEVICE_ROLE(info->bcr) == I3C_BCR_I3C_MASTER &&
+ master->secondary)
+ return -EINVAL;
+
+ if (master->this)
+ return -EINVAL;
+
+ i3cdev = i3c_master_alloc_i3c_dev(master, info);
+ if (IS_ERR(i3cdev))
+ return PTR_ERR(i3cdev);
+
+ master->this = i3cdev;
+ master->bus.cur_master = master->this;
+
+ ret = i3c_master_attach_i3c_dev(master, i3cdev);
+ if (ret)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ i3c_master_free_i3c_dev(i3cdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_set_info);
+
+static void i3c_master_detach_free_devs(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *i3cdev, *i3ctmp;
+ struct i2c_dev_desc *i2cdev, *i2ctmp;
+
+ list_for_each_entry_safe(i3cdev, i3ctmp, &master->bus.devs.i3c,
+ common.node) {
+ i3c_master_detach_i3c_dev(i3cdev);
+
+ if (i3cdev->boardinfo && i3cdev->boardinfo->init_dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i3cdev->boardinfo->init_dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ i3c_master_free_i3c_dev(i3cdev);
+ }
+
+ list_for_each_entry_safe(i2cdev, i2ctmp, &master->bus.devs.i2c,
+ common.node) {
+ i3c_master_detach_i2c_dev(i2cdev);
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i2cdev->addr,
+ I3C_ADDR_SLOT_FREE);
+ i3c_master_free_i2c_dev(i2cdev);
+ }
+}
+
+/**
+ * i3c_master_bus_init() - initialize an I3C bus
+ * @master: main master initializing the bus
+ *
+ * This function is following all initialisation steps described in the I3C
+ * specification:
+ *
+ * 1. Attach I2C devs to the master so that the master can fill its internal
+ * device table appropriately
+ *
+ * 2. Call &i3c_master_controller_ops->bus_init() method to initialize
+ * the master controller. That's usually where the bus mode is selected
+ * (pure bus or mixed fast/slow bus)
+ *
+ * 3. Instruct all devices on the bus to drop their dynamic address. This is
+ * particularly important when the bus was previously configured by someone
+ * else (for example the bootloader)
+ *
+ * 4. Disable all slave events.
+ *
+ * 5. Reserve address slots for I3C devices with init_dyn_addr. And if devices
+ * also have static_addr, try to pre-assign dynamic addresses requested by
+ * the FW with SETDASA and attach corresponding statically defined I3C
+ * devices to the master.
+ *
+ * 6. Do a DAA (Dynamic Address Assignment) to assign dynamic addresses to all
+ * remaining I3C devices
+ *
+ * Once this is done, all I3C and I2C devices should be usable.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+static int i3c_master_bus_init(struct i3c_master_controller *master)
+{
+ enum i3c_addr_slot_status status;
+ struct i2c_dev_boardinfo *i2cboardinfo;
+ struct i3c_dev_boardinfo *i3cboardinfo;
+ struct i2c_dev_desc *i2cdev;
+ int ret;
+
+ /*
+ * First attach all devices with static definitions provided by the
+ * FW.
+ */
+ list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ i2cboardinfo->base.addr);
+ if (status != I3C_ADDR_SLOT_FREE) {
+ ret = -EBUSY;
+ goto err_detach_devs;
+ }
+
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i2cboardinfo->base.addr,
+ I3C_ADDR_SLOT_I2C_DEV);
+
+ i2cdev = i3c_master_alloc_i2c_dev(master,
+ i2cboardinfo->base.addr,
+ i2cboardinfo->lvr);
+ if (IS_ERR(i2cdev)) {
+ ret = PTR_ERR(i2cdev);
+ goto err_detach_devs;
+ }
+
+ ret = i3c_master_attach_i2c_dev(master, i2cdev);
+ if (ret) {
+ i3c_master_free_i2c_dev(i2cdev);
+ goto err_detach_devs;
+ }
+ }
+
+ /*
+ * Now execute the controller specific ->bus_init() routine, which
+ * might configure its internal logic to match the bus limitations.
+ */
+ ret = master->ops->bus_init(master);
+ if (ret)
+ goto err_detach_devs;
+
+ /*
+ * The master device should have been instantiated in ->bus_init(),
+ * complain if this was not the case.
+ */
+ if (!master->this) {
+ dev_err(&master->dev,
+ "master_set_info() was not called in ->bus_init()\n");
+ ret = -EINVAL;
+ goto err_bus_cleanup;
+ }
+
+ /*
+ * Reset all dynamic address that may have been assigned before
+ * (assigned by the bootloader for example).
+ */
+ ret = i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
+ if (ret && ret != I3C_ERROR_M2)
+ goto err_bus_cleanup;
+
+ /* Disable all slave events before starting DAA. */
+ ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR,
+ I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR |
+ I3C_CCC_EVENT_HJ);
+ if (ret && ret != I3C_ERROR_M2)
+ goto err_bus_cleanup;
+
+ /*
+ * Reserve init_dyn_addr first, and then try to pre-assign dynamic
+ * address and retrieve device information if needed.
+ * In case pre-assign dynamic address fails, setting dynamic address to
+ * the requested init_dyn_addr is retried after DAA is done in
+ * i3c_master_add_i3c_dev_locked().
+ */
+ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
+
+ /*
+ * We don't reserve a dynamic address for devices that
+ * don't explicitly request one.
+ */
+ if (!i3cboardinfo->init_dyn_addr)
+ continue;
+
+ ret = i3c_bus_get_addr_slot_status(&master->bus,
+ i3cboardinfo->init_dyn_addr);
+ if (ret != I3C_ADDR_SLOT_FREE) {
+ ret = -EBUSY;
+ goto err_rstdaa;
+ }
+
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i3cboardinfo->init_dyn_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+
+ /*
+ * Only try to create/attach devices that have a static
+ * address. Other devices will be created/attached when
+ * DAA happens, and the requested dynamic address will
+ * be set using SETNEWDA once those devices become
+ * addressable.
+ */
+
+ if (i3cboardinfo->static_addr)
+ i3c_master_early_i3c_dev_add(master, i3cboardinfo);
+ }
+
+ ret = i3c_master_do_daa(master);
+ if (ret)
+ goto err_rstdaa;
+
+ return 0;
+
+err_rstdaa:
+ i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
+
+err_bus_cleanup:
+ if (master->ops->bus_cleanup)
+ master->ops->bus_cleanup(master);
+
+err_detach_devs:
+ i3c_master_detach_free_devs(master);
+
+ return ret;
+}
+
+static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
+{
+ if (master->ops->bus_cleanup)
+ master->ops->bus_cleanup(master);
+
+ i3c_master_detach_free_devs(master);
+}
+
+static void i3c_master_attach_boardinfo(struct i3c_dev_desc *i3cdev)
+{
+ struct i3c_master_controller *master = i3cdev->common.master;
+ struct i3c_dev_boardinfo *i3cboardinfo;
+
+ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
+ if (i3cdev->info.pid != i3cboardinfo->pid)
+ continue;
+
+ i3cdev->boardinfo = i3cboardinfo;
+ i3cdev->info.static_addr = i3cboardinfo->static_addr;
+ return;
+ }
+}
+
+static struct i3c_dev_desc *
+i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(refdev);
+ struct i3c_dev_desc *i3cdev;
+
+ i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
+ if (i3cdev != refdev && i3cdev->info.pid == refdev->info.pid)
+ return i3cdev;
+ }
+
+ return NULL;
+}
+
+/**
+ * i3c_master_add_i3c_dev_locked() - add an I3C slave to the bus
+ * @master: master used to send frames on the bus
+ * @addr: I3C slave dynamic address assigned to the device
+ *
+ * This function is instantiating an I3C device object and adding it to the
+ * I3C device list. All device information are automatically retrieved using
+ * standard CCC commands.
+ *
+ * The I3C device object is returned in case the master wants to attach
+ * private data to it using i3c_dev_set_master_data().
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ u8 addr)
+{
+ struct i3c_device_info info = { .dyn_addr = addr };
+ struct i3c_dev_desc *newdev, *olddev;
+ u8 old_dyn_addr = addr, expected_dyn_addr;
+ struct i3c_ibi_setup ibireq = { };
+ bool enable_ibi = false;
+ int ret;
+
+ if (!master)
+ return -EINVAL;
+
+ newdev = i3c_master_alloc_i3c_dev(master, &info);
+ if (IS_ERR(newdev))
+ return PTR_ERR(newdev);
+
+ ret = i3c_master_attach_i3c_dev(master, newdev);
+ if (ret)
+ goto err_free_dev;
+
+ ret = i3c_master_retrieve_dev_info(newdev);
+ if (ret)
+ goto err_detach_dev;
+
+ i3c_master_attach_boardinfo(newdev);
+
+ olddev = i3c_master_search_i3c_dev_duplicate(newdev);
+ if (olddev) {
+ newdev->dev = olddev->dev;
+ if (newdev->dev)
+ newdev->dev->desc = newdev;
+
+ /*
+ * We need to restore the IBI state too, so let's save the
+ * IBI information and try to restore them after olddev has
+ * been detached+released and its IBI has been stopped and
+ * the associated resources have been freed.
+ */
+ mutex_lock(&olddev->ibi_lock);
+ if (olddev->ibi) {
+ ibireq.handler = olddev->ibi->handler;
+ ibireq.max_payload_len = olddev->ibi->max_payload_len;
+ ibireq.num_slots = olddev->ibi->num_slots;
+
+ if (olddev->ibi->enabled) {
+ enable_ibi = true;
+ i3c_dev_disable_ibi_locked(olddev);
+ }
+
+ i3c_dev_free_ibi_locked(olddev);
+ }
+ mutex_unlock(&olddev->ibi_lock);
+
+ old_dyn_addr = olddev->info.dyn_addr;
+
+ i3c_master_detach_i3c_dev(olddev);
+ i3c_master_free_i3c_dev(olddev);
+ }
+
+ /*
+ * Depending on our previous state, the expected dynamic address might
+ * differ:
+ * - if the device already had a dynamic address assigned, let's try to
+ * re-apply this one
+ * - if the device did not have a dynamic address and the firmware
+ * requested a specific address, pick this one
+ * - in any other case, keep the address automatically assigned by the
+ * master
+ */
+ if (old_dyn_addr && old_dyn_addr != newdev->info.dyn_addr)
+ expected_dyn_addr = old_dyn_addr;
+ else if (newdev->boardinfo && newdev->boardinfo->init_dyn_addr)
+ expected_dyn_addr = newdev->boardinfo->init_dyn_addr;
+ else
+ expected_dyn_addr = newdev->info.dyn_addr;
+
+ if (newdev->info.dyn_addr != expected_dyn_addr) {
+ /*
+ * Try to apply the expected dynamic address. If it fails, keep
+ * the address assigned by the master.
+ */
+ ret = i3c_master_setnewda_locked(master,
+ newdev->info.dyn_addr,
+ expected_dyn_addr);
+ if (!ret) {
+ old_dyn_addr = newdev->info.dyn_addr;
+ newdev->info.dyn_addr = expected_dyn_addr;
+ i3c_master_reattach_i3c_dev(newdev, old_dyn_addr);
+ } else {
+ dev_err(&master->dev,
+ "Failed to assign reserved/old address to device %d%llx",
+ master->bus.id, newdev->info.pid);
+ }
+ }
+
+ /*
+ * Now is time to try to restore the IBI setup. If we're lucky,
+ * everything works as before, otherwise, all we can do is complain.
+ * FIXME: maybe we should add callback to inform the driver that it
+ * should request the IBI again instead of trying to hide that from
+ * him.
+ */
+ if (ibireq.handler) {
+ mutex_lock(&newdev->ibi_lock);
+ ret = i3c_dev_request_ibi_locked(newdev, &ibireq);
+ if (ret) {
+ dev_err(&master->dev,
+ "Failed to request IBI on device %d-%llx",
+ master->bus.id, newdev->info.pid);
+ } else if (enable_ibi) {
+ ret = i3c_dev_enable_ibi_locked(newdev);
+ if (ret)
+ dev_err(&master->dev,
+ "Failed to re-enable IBI on device %d-%llx",
+ master->bus.id, newdev->info.pid);
+ }
+ mutex_unlock(&newdev->ibi_lock);
+ }
+
+ return 0;
+
+err_detach_dev:
+ if (newdev->dev && newdev->dev->desc)
+ newdev->dev->desc = NULL;
+
+ i3c_master_detach_i3c_dev(newdev);
+
+err_free_dev:
+ i3c_master_free_i3c_dev(newdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_add_i3c_dev_locked);
+
+#define OF_I3C_REG1_IS_I2C_DEV BIT(31)
+
+static int
+of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
+ struct device_node *node, u32 *reg)
+{
+ struct i2c_dev_boardinfo *boardinfo;
+ struct device *dev = &master->dev;
+ int ret;
+
+ boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
+ if (!boardinfo)
+ return -ENOMEM;
+
+ ret = of_i2c_get_board_info(dev, node, &boardinfo->base);
+ if (ret)
+ return ret;
+
+ /*
+ * The I3C Specification does not clearly say I2C devices with 10-bit
+ * address are supported. These devices can't be passed properly through
+ * DEFSLVS command.
+ */
+ if (boardinfo->base.flags & I2C_CLIENT_TEN) {
+ dev_err(dev, "I2C device with 10 bit address not supported.");
+ return -ENOTSUPP;
+ }
+
+ /* LVR is encoded in reg[2]. */
+ boardinfo->lvr = reg[2];
+
+ list_add_tail(&boardinfo->node, &master->boardinfo.i2c);
+ of_node_get(node);
+
+ return 0;
+}
+
+static int
+of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
+ struct device_node *node, u32 *reg)
+{
+ struct i3c_dev_boardinfo *boardinfo;
+ struct device *dev = &master->dev;
+ enum i3c_addr_slot_status addrstatus;
+ u32 init_dyn_addr = 0;
+
+ boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
+ if (!boardinfo)
+ return -ENOMEM;
+
+ if (reg[0]) {
+ if (reg[0] > I3C_MAX_ADDR)
+ return -EINVAL;
+
+ addrstatus = i3c_bus_get_addr_slot_status(&master->bus,
+ reg[0]);
+ if (addrstatus != I3C_ADDR_SLOT_FREE)
+ return -EINVAL;
+ }
+
+ boardinfo->static_addr = reg[0];
+
+ if (!of_property_read_u32(node, "assigned-address", &init_dyn_addr)) {
+ if (init_dyn_addr > I3C_MAX_ADDR)
+ return -EINVAL;
+
+ addrstatus = i3c_bus_get_addr_slot_status(&master->bus,
+ init_dyn_addr);
+ if (addrstatus != I3C_ADDR_SLOT_FREE)
+ return -EINVAL;
+ }
+
+ boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
+
+ if ((boardinfo->pid & GENMASK_ULL(63, 48)) ||
+ I3C_PID_RND_LOWER_32BITS(boardinfo->pid))
+ return -EINVAL;
+
+ boardinfo->init_dyn_addr = init_dyn_addr;
+ boardinfo->of_node = of_node_get(node);
+ list_add_tail(&boardinfo->node, &master->boardinfo.i3c);
+
+ return 0;
+}
+
+static int of_i3c_master_add_dev(struct i3c_master_controller *master,
+ struct device_node *node)
+{
+ u32 reg[3];
+ int ret;
+
+ if (!master || !node)
+ return -EINVAL;
+
+ ret = of_property_read_u32_array(node, "reg", reg, ARRAY_SIZE(reg));
+ if (ret)
+ return ret;
+
+ /*
+ * The manufacturer ID can't be 0. If reg[1] == 0 that means we're
+ * dealing with an I2C device.
+ */
+ if (!reg[1])
+ ret = of_i3c_master_add_i2c_boardinfo(master, node, reg);
+ else
+ ret = of_i3c_master_add_i3c_boardinfo(master, node, reg);
+
+ return ret;
+}
+
+static int of_populate_i3c_bus(struct i3c_master_controller *master)
+{
+ struct device *dev = &master->dev;
+ struct device_node *i3cbus_np = dev->of_node;
+ struct device_node *node;
+ int ret;
+ u32 val;
+
+ if (!i3cbus_np)
+ return 0;
+
+ for_each_available_child_of_node(i3cbus_np, node) {
+ ret = of_i3c_master_add_dev(master, node);
+ if (ret) {
+ of_node_put(node);
+ return ret;
+ }
+ }
+
+ /*
+ * The user might want to limit I2C and I3C speed in case some devices
+ * on the bus are not supporting typical rates, or if the bus topology
+ * prevents it from using max possible rate.
+ */
+ if (!of_property_read_u32(i3cbus_np, "i2c-scl-hz", &val))
+ master->bus.scl_rate.i2c = val;
+
+ if (!of_property_read_u32(i3cbus_np, "i3c-scl-hz", &val))
+ master->bus.scl_rate.i3c = val;
+
+ return 0;
+}
+
+static int i3c_master_i2c_adapter_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *xfers, int nxfers)
+{
+ struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
+ struct i2c_dev_desc *dev;
+ int i, ret;
+ u16 addr;
+
+ if (!xfers || !master || nxfers <= 0)
+ return -EINVAL;
+
+ if (!master->ops->i2c_xfers)
+ return -ENOTSUPP;
+
+ /* Doing transfers to different devices is not supported. */
+ addr = xfers[0].addr;
+ for (i = 1; i < nxfers; i++) {
+ if (addr != xfers[i].addr)
+ return -ENOTSUPP;
+ }
+
+ i3c_bus_normaluse_lock(&master->bus);
+ dev = i3c_master_find_i2c_dev_by_addr(master, addr);
+ if (!dev)
+ ret = -ENOENT;
+ else
+ ret = master->ops->i2c_xfers(dev, xfers, nxfers);
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return ret ? ret : nxfers;
+}
+
+static u32 i3c_master_i2c_funcs(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
+}
+
+static u8 i3c_master_i2c_get_lvr(struct i2c_client *client)
+{
+ /* Fall back to no spike filters and FM bus mode. */
+ u8 lvr = I3C_LVR_I2C_INDEX(2) | I3C_LVR_I2C_FM_MODE;
+
+ if (client->dev.of_node) {
+ u32 reg[3];
+
+ if (!of_property_read_u32_array(client->dev.of_node, "reg",
+ reg, ARRAY_SIZE(reg)))
+ lvr = reg[2];
+ }
+
+ return lvr;
+}
+
+static int i3c_master_i2c_attach(struct i2c_adapter *adap, struct i2c_client *client)
+{
+ struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
+ enum i3c_addr_slot_status status;
+ struct i2c_dev_desc *i2cdev;
+ int ret;
+
+ /* Already added by board info? */
+ if (i3c_master_find_i2c_dev_by_addr(master, client->addr))
+ return 0;
+
+ status = i3c_bus_get_addr_slot_status(&master->bus, client->addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ return -EBUSY;
+
+ i3c_bus_set_addr_slot_status(&master->bus, client->addr,
+ I3C_ADDR_SLOT_I2C_DEV);
+
+ i2cdev = i3c_master_alloc_i2c_dev(master, client->addr,
+ i3c_master_i2c_get_lvr(client));
+ if (IS_ERR(i2cdev)) {
+ ret = PTR_ERR(i2cdev);
+ goto out_clear_status;
+ }
+
+ ret = i3c_master_attach_i2c_dev(master, i2cdev);
+ if (ret)
+ goto out_free_dev;
+
+ return 0;
+
+out_free_dev:
+ i3c_master_free_i2c_dev(i2cdev);
+out_clear_status:
+ i3c_bus_set_addr_slot_status(&master->bus, client->addr,
+ I3C_ADDR_SLOT_FREE);
+
+ return ret;
+}
+
+static int i3c_master_i2c_detach(struct i2c_adapter *adap, struct i2c_client *client)
+{
+ struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
+ struct i2c_dev_desc *dev;
+
+ dev = i3c_master_find_i2c_dev_by_addr(master, client->addr);
+ if (!dev)
+ return -ENODEV;
+
+ i3c_master_detach_i2c_dev(dev);
+ i3c_bus_set_addr_slot_status(&master->bus, dev->addr,
+ I3C_ADDR_SLOT_FREE);
+ i3c_master_free_i2c_dev(dev);
+
+ return 0;
+}
+
+static const struct i2c_algorithm i3c_master_i2c_algo = {
+ .master_xfer = i3c_master_i2c_adapter_xfer,
+ .functionality = i3c_master_i2c_funcs,
+};
+
+static int i3c_i2c_notifier_call(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct i2c_adapter *adap;
+ struct i2c_client *client;
+ struct device *dev = data;
+ struct i3c_master_controller *master;
+ int ret;
+
+ if (dev->type != &i2c_client_type)
+ return 0;
+
+ client = to_i2c_client(dev);
+ adap = client->adapter;
+
+ if (adap->algo != &i3c_master_i2c_algo)
+ return 0;
+
+ master = i2c_adapter_to_i3c_master(adap);
+
+ i3c_bus_maintenance_lock(&master->bus);
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ ret = i3c_master_i2c_attach(adap, client);
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ ret = i3c_master_i2c_detach(adap, client);
+ break;
+ }
+ i3c_bus_maintenance_unlock(&master->bus);
+
+ return ret;
+}
+
+static struct notifier_block i2cdev_notifier = {
+ .notifier_call = i3c_i2c_notifier_call,
+};
+
+static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
+{
+ struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master);
+ struct i2c_dev_desc *i2cdev;
+ struct i2c_dev_boardinfo *i2cboardinfo;
+ int ret;
+
+ adap->dev.parent = master->dev.parent;
+ adap->owner = master->dev.parent->driver->owner;
+ adap->algo = &i3c_master_i2c_algo;
+ strncpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
+
+ /* FIXME: Should we allow i3c masters to override these values? */
+ adap->timeout = 1000;
+ adap->retries = 3;
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ return ret;
+
+ /*
+ * We silently ignore failures here. The bus should keep working
+ * correctly even if one or more i2c devices are not registered.
+ */
+ list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
+ i2cdev = i3c_master_find_i2c_dev_by_addr(master,
+ i2cboardinfo->base.addr);
+ if (WARN_ON(!i2cdev))
+ continue;
+ i2cdev->dev = i2c_new_client_device(adap, &i2cboardinfo->base);
+ }
+
+ return 0;
+}
+
+static void i3c_master_i2c_adapter_cleanup(struct i3c_master_controller *master)
+{
+ struct i2c_dev_desc *i2cdev;
+
+ i2c_del_adapter(&master->i2c);
+
+ i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
+ i2cdev->dev = NULL;
+}
+
+static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *i3cdev;
+
+ i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
+ if (!i3cdev->dev)
+ continue;
+
+ i3cdev->dev->desc = NULL;
+ if (device_is_registered(&i3cdev->dev->dev))
+ device_unregister(&i3cdev->dev->dev);
+ else
+ put_device(&i3cdev->dev->dev);
+ i3cdev->dev = NULL;
+ }
+}
+
+/**
+ * i3c_master_queue_ibi() - Queue an IBI
+ * @dev: the device this IBI is coming from
+ * @slot: the IBI slot used to store the payload
+ *
+ * Queue an IBI to the controller workqueue. The IBI handler attached to
+ * the dev will be called from a workqueue context.
+ */
+void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
+{
+ atomic_inc(&dev->ibi->pending_ibis);
+ queue_work(dev->common.master->wq, &slot->work);
+}
+EXPORT_SYMBOL_GPL(i3c_master_queue_ibi);
+
+static void i3c_master_handle_ibi(struct work_struct *work)
+{
+ struct i3c_ibi_slot *slot = container_of(work, struct i3c_ibi_slot,
+ work);
+ struct i3c_dev_desc *dev = slot->dev;
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ struct i3c_ibi_payload payload;
+
+ payload.data = slot->data;
+ payload.len = slot->len;
+
+ if (dev->dev)
+ dev->ibi->handler(dev->dev, &payload);
+
+ master->ops->recycle_ibi_slot(dev, slot);
+ if (atomic_dec_and_test(&dev->ibi->pending_ibis))
+ complete(&dev->ibi->all_ibis_handled);
+}
+
+static void i3c_master_init_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ slot->dev = dev;
+ INIT_WORK(&slot->work, i3c_master_handle_ibi);
+}
+
+struct i3c_generic_ibi_slot {
+ struct list_head node;
+ struct i3c_ibi_slot base;
+};
+
+struct i3c_generic_ibi_pool {
+ spinlock_t lock;
+ unsigned int num_slots;
+ struct i3c_generic_ibi_slot *slots;
+ void *payload_buf;
+ struct list_head free_slots;
+ struct list_head pending;
+};
+
+/**
+ * i3c_generic_ibi_free_pool() - Free a generic IBI pool
+ * @pool: the IBI pool to free
+ *
+ * Free all IBI slots allated by a generic IBI pool.
+ */
+void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool)
+{
+ struct i3c_generic_ibi_slot *slot;
+ unsigned int nslots = 0;
+
+ while (!list_empty(&pool->free_slots)) {
+ slot = list_first_entry(&pool->free_slots,
+ struct i3c_generic_ibi_slot, node);
+ list_del(&slot->node);
+ nslots++;
+ }
+
+ /*
+ * If the number of freed slots is not equal to the number of allocated
+ * slots we have a leak somewhere.
+ */
+ WARN_ON(nslots != pool->num_slots);
+
+ kfree(pool->payload_buf);
+ kfree(pool->slots);
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_free_pool);
+
+/**
+ * i3c_generic_ibi_alloc_pool() - Create a generic IBI pool
+ * @dev: the device this pool will be used for
+ * @req: IBI setup request describing what the device driver expects
+ *
+ * Create a generic IBI pool based on the information provided in @req.
+ *
+ * Return: a valid IBI pool in case of success, an ERR_PTR() otherwise.
+ */
+struct i3c_generic_ibi_pool *
+i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_generic_ibi_pool *pool;
+ struct i3c_generic_ibi_slot *slot;
+ unsigned int i;
+ int ret;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free_slots);
+ INIT_LIST_HEAD(&pool->pending);
+
+ pool->slots = kcalloc(req->num_slots, sizeof(*slot), GFP_KERNEL);
+ if (!pool->slots) {
+ ret = -ENOMEM;
+ goto err_free_pool;
+ }
+
+ if (req->max_payload_len) {
+ pool->payload_buf = kcalloc(req->num_slots,
+ req->max_payload_len, GFP_KERNEL);
+ if (!pool->payload_buf) {
+ ret = -ENOMEM;
+ goto err_free_pool;
+ }
+ }
+
+ for (i = 0; i < req->num_slots; i++) {
+ slot = &pool->slots[i];
+ i3c_master_init_ibi_slot(dev, &slot->base);
+
+ if (req->max_payload_len)
+ slot->base.data = pool->payload_buf +
+ (i * req->max_payload_len);
+
+ list_add_tail(&slot->node, &pool->free_slots);
+ pool->num_slots++;
+ }
+
+ return pool;
+
+err_free_pool:
+ i3c_generic_ibi_free_pool(pool);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_alloc_pool);
+
+/**
+ * i3c_generic_ibi_get_free_slot() - Get a free slot from a generic IBI pool
+ * @pool: the pool to query an IBI slot on
+ *
+ * Search for a free slot in a generic IBI pool.
+ * The slot should be returned to the pool using i3c_generic_ibi_recycle_slot()
+ * when it's no longer needed.
+ *
+ * Return: a pointer to a free slot, or NULL if there's no free slot available.
+ */
+struct i3c_ibi_slot *
+i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool)
+{
+ struct i3c_generic_ibi_slot *slot;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ slot = list_first_entry_or_null(&pool->free_slots,
+ struct i3c_generic_ibi_slot, node);
+ if (slot)
+ list_del(&slot->node);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return slot ? &slot->base : NULL;
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_get_free_slot);
+
+/**
+ * i3c_generic_ibi_recycle_slot() - Return a slot to a generic IBI pool
+ * @pool: the pool to return the IBI slot to
+ * @s: IBI slot to recycle
+ *
+ * Add an IBI slot back to its generic IBI pool. Should be called from the
+ * master driver struct_master_controller_ops->recycle_ibi() method.
+ */
+void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
+ struct i3c_ibi_slot *s)
+{
+ struct i3c_generic_ibi_slot *slot;
+ unsigned long flags;
+
+ if (!s)
+ return;
+
+ slot = container_of(s, struct i3c_generic_ibi_slot, base);
+ spin_lock_irqsave(&pool->lock, flags);
+ list_add_tail(&slot->node, &pool->free_slots);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_recycle_slot);
+
+static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops)
+{
+ if (!ops || !ops->bus_init || !ops->priv_xfers ||
+ !ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers)
+ return -EINVAL;
+
+ if (ops->request_ibi &&
+ (!ops->enable_ibi || !ops->disable_ibi || !ops->free_ibi ||
+ !ops->recycle_ibi_slot))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * i3c_master_register() - register an I3C master
+ * @master: master used to send frames on the bus
+ * @parent: the parent device (the one that provides this I3C master
+ * controller)
+ * @ops: the master controller operations
+ * @secondary: true if you are registering a secondary master. Will return
+ * -ENOTSUPP if set to true since secondary masters are not yet
+ * supported
+ *
+ * This function takes care of everything for you:
+ *
+ * - creates and initializes the I3C bus
+ * - populates the bus with static I2C devs if @parent->of_node is not
+ * NULL
+ * - registers all I3C devices added by the controller during bus
+ * initialization
+ * - registers the I2C adapter and all I2C devices
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int i3c_master_register(struct i3c_master_controller *master,
+ struct device *parent,
+ const struct i3c_master_controller_ops *ops,
+ bool secondary)
+{
+ unsigned long i2c_scl_rate = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
+ struct i3c_bus *i3cbus = i3c_master_get_bus(master);
+ enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
+ struct i2c_dev_boardinfo *i2cbi;
+ int ret;
+
+ /* We do not support secondary masters yet. */
+ if (secondary)
+ return -ENOTSUPP;
+
+ ret = i3c_master_check_ops(ops);
+ if (ret)
+ return ret;
+
+ master->dev.parent = parent;
+ master->dev.of_node = of_node_get(parent->of_node);
+ master->dev.bus = &i3c_bus_type;
+ master->dev.type = &i3c_masterdev_type;
+ master->dev.release = i3c_masterdev_release;
+ master->ops = ops;
+ master->secondary = secondary;
+ INIT_LIST_HEAD(&master->boardinfo.i2c);
+ INIT_LIST_HEAD(&master->boardinfo.i3c);
+
+ ret = i3c_bus_init(i3cbus);
+ if (ret)
+ return ret;
+
+ device_initialize(&master->dev);
+ dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
+
+ ret = of_populate_i3c_bus(master);
+ if (ret)
+ goto err_put_dev;
+
+ list_for_each_entry(i2cbi, &master->boardinfo.i2c, node) {
+ switch (i2cbi->lvr & I3C_LVR_I2C_INDEX_MASK) {
+ case I3C_LVR_I2C_INDEX(0):
+ if (mode < I3C_BUS_MODE_MIXED_FAST)
+ mode = I3C_BUS_MODE_MIXED_FAST;
+ break;
+ case I3C_LVR_I2C_INDEX(1):
+ if (mode < I3C_BUS_MODE_MIXED_LIMITED)
+ mode = I3C_BUS_MODE_MIXED_LIMITED;
+ break;
+ case I3C_LVR_I2C_INDEX(2):
+ if (mode < I3C_BUS_MODE_MIXED_SLOW)
+ mode = I3C_BUS_MODE_MIXED_SLOW;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_put_dev;
+ }
+
+ if (i2cbi->lvr & I3C_LVR_I2C_FM_MODE)
+ i2c_scl_rate = I3C_BUS_I2C_FM_SCL_RATE;
+ }
+
+ ret = i3c_bus_set_mode(i3cbus, mode, i2c_scl_rate);
+ if (ret)
+ goto err_put_dev;
+
+ master->wq = alloc_workqueue("%s", 0, 0, dev_name(parent));
+ if (!master->wq) {
+ ret = -ENOMEM;
+ goto err_put_dev;
+ }
+
+ ret = i3c_master_bus_init(master);
+ if (ret)
+ goto err_put_dev;
+
+ ret = device_add(&master->dev);
+ if (ret)
+ goto err_cleanup_bus;
+
+ /*
+ * Expose our I3C bus as an I2C adapter so that I2C devices are exposed
+ * through the I2C subsystem.
+ */
+ ret = i3c_master_i2c_adapter_init(master);
+ if (ret)
+ goto err_del_dev;
+
+ /*
+ * We're done initializing the bus and the controller, we can now
+ * register I3C devices discovered during the initial DAA.
+ */
+ master->init_done = true;
+ i3c_bus_normaluse_lock(&master->bus);
+ i3c_master_register_new_i3c_devs(master);
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return 0;
+
+err_del_dev:
+ device_del(&master->dev);
+
+err_cleanup_bus:
+ i3c_master_bus_cleanup(master);
+
+err_put_dev:
+ put_device(&master->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_register);
+
+/**
+ * i3c_master_unregister() - unregister an I3C master
+ * @master: master used to send frames on the bus
+ *
+ * Basically undo everything done in i3c_master_register().
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int i3c_master_unregister(struct i3c_master_controller *master)
+{
+ i3c_master_i2c_adapter_cleanup(master);
+ i3c_master_unregister_i3c_devs(master);
+ i3c_master_bus_cleanup(master);
+ device_unregister(&master->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_master_unregister);
+
+int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *master;
+
+ if (!dev)
+ return -ENOENT;
+
+ master = i3c_dev_get_master(dev);
+ if (!master || !xfers)
+ return -EINVAL;
+
+ if (!master->ops->priv_xfers)
+ return -ENOTSUPP;
+
+ return master->ops->priv_xfers(dev, xfers, nxfers);
+}
+
+int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master;
+ int ret;
+
+ if (!dev->ibi)
+ return -EINVAL;
+
+ master = i3c_dev_get_master(dev);
+ ret = master->ops->disable_ibi(dev);
+ if (ret)
+ return ret;
+
+ reinit_completion(&dev->ibi->all_ibis_handled);
+ if (atomic_read(&dev->ibi->pending_ibis))
+ wait_for_completion(&dev->ibi->all_ibis_handled);
+
+ dev->ibi->enabled = false;
+
+ return 0;
+}
+
+int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ int ret;
+
+ if (!dev->ibi)
+ return -EINVAL;
+
+ ret = master->ops->enable_ibi(dev);
+ if (!ret)
+ dev->ibi->enabled = true;
+
+ return ret;
+}
+
+int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ struct i3c_device_ibi_info *ibi;
+ int ret;
+
+ if (!master->ops->request_ibi)
+ return -ENOTSUPP;
+
+ if (dev->ibi)
+ return -EBUSY;
+
+ ibi = kzalloc(sizeof(*ibi), GFP_KERNEL);
+ if (!ibi)
+ return -ENOMEM;
+
+ atomic_set(&ibi->pending_ibis, 0);
+ init_completion(&ibi->all_ibis_handled);
+ ibi->handler = req->handler;
+ ibi->max_payload_len = req->max_payload_len;
+ ibi->num_slots = req->num_slots;
+
+ dev->ibi = ibi;
+ ret = master->ops->request_ibi(dev, req);
+ if (ret) {
+ kfree(ibi);
+ dev->ibi = NULL;
+ }
+
+ return ret;
+}
+
+void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+ if (!dev->ibi)
+ return;
+
+ if (WARN_ON(dev->ibi->enabled))
+ WARN_ON(i3c_dev_disable_ibi_locked(dev));
+
+ master->ops->free_ibi(dev);
+ kfree(dev->ibi);
+ dev->ibi = NULL;
+}
+
+static int __init i3c_init(void)
+{
+ int res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
+
+ if (res)
+ return res;
+
+ res = bus_register(&i3c_bus_type);
+ if (res)
+ goto out_unreg_notifier;
+
+ return 0;
+
+out_unreg_notifier:
+ bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
+
+ return res;
+}
+subsys_initcall(i3c_init);
+
+static void __exit i3c_exit(void)
+{
+ bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
+ idr_destroy(&i3c_bus_idr);
+ bus_unregister(&i3c_bus_type);
+}
+module_exit(i3c_exit);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_DESCRIPTION("I3C core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
new file mode 100644
index 000000000..3b8f95916
--- /dev/null
+++ b/drivers/i3c/master/Kconfig
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CDNS_I3C_MASTER
+ tristate "Cadence I3C master driver"
+ depends on I3C
+ depends on HAS_IOMEM
+ depends on !(ALPHA || PARISC)
+ help
+ Enable this driver if you want to support Cadence I3C master block.
+
+config DW_I3C_MASTER
+ tristate "Synospsys DesignWare I3C master driver"
+ depends on I3C
+ depends on HAS_IOMEM
+ depends on !(ALPHA || PARISC)
+ # ALPHA and PARISC needs {read,write}sl()
+ help
+ Support for Synopsys DesignWare MIPI I3C Controller.
+
+ For details please see
+ https://www.synopsys.com/dw/ipdir.php?ds=mipi_i3c
+
+ This driver can also be built as a module. If so, the module
+ will be called dw-i3c-master.
+
+config SVC_I3C_MASTER
+ tristate "Silvaco I3C Dual-Role Master driver"
+ depends on I3C
+ depends on HAS_IOMEM
+ depends on !(ALPHA || PARISC)
+ help
+ Support for Silvaco I3C Dual-Role Master Controller.
+
+config MIPI_I3C_HCI
+ tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)"
+ depends on I3C
+ depends on HAS_IOMEM
+ help
+ Support for hardware following the MIPI Aliance's I3C Host Controller
+ Interface specification.
+
+ For details please see:
+ https://www.mipi.org/specifications/i3c-hci
+
+ This driver can also be built as a module. If so, the module will be
+ called mipi-i3c-hci.
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
new file mode 100644
index 000000000..b3fee0f69
--- /dev/null
+++ b/drivers/i3c/master/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CDNS_I3C_MASTER) += i3c-master-cdns.o
+obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
+obj-$(CONFIG_SVC_I3C_MASTER) += svc-i3c-master.o
+obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci/
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
new file mode 100644
index 000000000..51a860820
--- /dev/null
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ *
+ * Author: Vitor Soares <vitor.soares@synopsys.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define DEVICE_CTRL 0x0
+#define DEV_CTRL_ENABLE BIT(31)
+#define DEV_CTRL_RESUME BIT(30)
+#define DEV_CTRL_HOT_JOIN_NACK BIT(8)
+#define DEV_CTRL_I2C_SLAVE_PRESENT BIT(7)
+
+#define DEVICE_ADDR 0x4
+#define DEV_ADDR_DYNAMIC_ADDR_VALID BIT(31)
+#define DEV_ADDR_DYNAMIC(x) (((x) << 16) & GENMASK(22, 16))
+
+#define HW_CAPABILITY 0x8
+#define COMMAND_QUEUE_PORT 0xc
+#define COMMAND_PORT_TOC BIT(30)
+#define COMMAND_PORT_READ_TRANSFER BIT(28)
+#define COMMAND_PORT_SDAP BIT(27)
+#define COMMAND_PORT_ROC BIT(26)
+#define COMMAND_PORT_SPEED(x) (((x) << 21) & GENMASK(23, 21))
+#define COMMAND_PORT_DEV_INDEX(x) (((x) << 16) & GENMASK(20, 16))
+#define COMMAND_PORT_CP BIT(15)
+#define COMMAND_PORT_CMD(x) (((x) << 7) & GENMASK(14, 7))
+#define COMMAND_PORT_TID(x) (((x) << 3) & GENMASK(6, 3))
+
+#define COMMAND_PORT_ARG_DATA_LEN(x) (((x) << 16) & GENMASK(31, 16))
+#define COMMAND_PORT_ARG_DATA_LEN_MAX 65536
+#define COMMAND_PORT_TRANSFER_ARG 0x01
+
+#define COMMAND_PORT_SDA_DATA_BYTE_3(x) (((x) << 24) & GENMASK(31, 24))
+#define COMMAND_PORT_SDA_DATA_BYTE_2(x) (((x) << 16) & GENMASK(23, 16))
+#define COMMAND_PORT_SDA_DATA_BYTE_1(x) (((x) << 8) & GENMASK(15, 8))
+#define COMMAND_PORT_SDA_BYTE_STRB_3 BIT(5)
+#define COMMAND_PORT_SDA_BYTE_STRB_2 BIT(4)
+#define COMMAND_PORT_SDA_BYTE_STRB_1 BIT(3)
+#define COMMAND_PORT_SHORT_DATA_ARG 0x02
+
+#define COMMAND_PORT_DEV_COUNT(x) (((x) << 21) & GENMASK(25, 21))
+#define COMMAND_PORT_ADDR_ASSGN_CMD 0x03
+
+#define RESPONSE_QUEUE_PORT 0x10
+#define RESPONSE_PORT_ERR_STATUS(x) (((x) & GENMASK(31, 28)) >> 28)
+#define RESPONSE_NO_ERROR 0
+#define RESPONSE_ERROR_CRC 1
+#define RESPONSE_ERROR_PARITY 2
+#define RESPONSE_ERROR_FRAME 3
+#define RESPONSE_ERROR_IBA_NACK 4
+#define RESPONSE_ERROR_ADDRESS_NACK 5
+#define RESPONSE_ERROR_OVER_UNDER_FLOW 6
+#define RESPONSE_ERROR_TRANSF_ABORT 8
+#define RESPONSE_ERROR_I2C_W_NACK_ERR 9
+#define RESPONSE_PORT_TID(x) (((x) & GENMASK(27, 24)) >> 24)
+#define RESPONSE_PORT_DATA_LEN(x) ((x) & GENMASK(15, 0))
+
+#define RX_TX_DATA_PORT 0x14
+#define IBI_QUEUE_STATUS 0x18
+#define QUEUE_THLD_CTRL 0x1c
+#define QUEUE_THLD_CTRL_RESP_BUF_MASK GENMASK(15, 8)
+#define QUEUE_THLD_CTRL_RESP_BUF(x) (((x) - 1) << 8)
+
+#define DATA_BUFFER_THLD_CTRL 0x20
+#define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(11, 8)
+
+#define IBI_QUEUE_CTRL 0x24
+#define IBI_MR_REQ_REJECT 0x2C
+#define IBI_SIR_REQ_REJECT 0x30
+#define IBI_REQ_REJECT_ALL GENMASK(31, 0)
+
+#define RESET_CTRL 0x34
+#define RESET_CTRL_IBI_QUEUE BIT(5)
+#define RESET_CTRL_RX_FIFO BIT(4)
+#define RESET_CTRL_TX_FIFO BIT(3)
+#define RESET_CTRL_RESP_QUEUE BIT(2)
+#define RESET_CTRL_CMD_QUEUE BIT(1)
+#define RESET_CTRL_SOFT BIT(0)
+
+#define SLV_EVENT_CTRL 0x38
+#define INTR_STATUS 0x3c
+#define INTR_STATUS_EN 0x40
+#define INTR_SIGNAL_EN 0x44
+#define INTR_FORCE 0x48
+#define INTR_BUSOWNER_UPDATE_STAT BIT(13)
+#define INTR_IBI_UPDATED_STAT BIT(12)
+#define INTR_READ_REQ_RECV_STAT BIT(11)
+#define INTR_DEFSLV_STAT BIT(10)
+#define INTR_TRANSFER_ERR_STAT BIT(9)
+#define INTR_DYN_ADDR_ASSGN_STAT BIT(8)
+#define INTR_CCC_UPDATED_STAT BIT(6)
+#define INTR_TRANSFER_ABORT_STAT BIT(5)
+#define INTR_RESP_READY_STAT BIT(4)
+#define INTR_CMD_QUEUE_READY_STAT BIT(3)
+#define INTR_IBI_THLD_STAT BIT(2)
+#define INTR_RX_THLD_STAT BIT(1)
+#define INTR_TX_THLD_STAT BIT(0)
+#define INTR_ALL (INTR_BUSOWNER_UPDATE_STAT | \
+ INTR_IBI_UPDATED_STAT | \
+ INTR_READ_REQ_RECV_STAT | \
+ INTR_DEFSLV_STAT | \
+ INTR_TRANSFER_ERR_STAT | \
+ INTR_DYN_ADDR_ASSGN_STAT | \
+ INTR_CCC_UPDATED_STAT | \
+ INTR_TRANSFER_ABORT_STAT | \
+ INTR_RESP_READY_STAT | \
+ INTR_CMD_QUEUE_READY_STAT | \
+ INTR_IBI_THLD_STAT | \
+ INTR_TX_THLD_STAT | \
+ INTR_RX_THLD_STAT)
+
+#define INTR_MASTER_MASK (INTR_TRANSFER_ERR_STAT | \
+ INTR_RESP_READY_STAT)
+
+#define QUEUE_STATUS_LEVEL 0x4c
+#define QUEUE_STATUS_IBI_STATUS_CNT(x) (((x) & GENMASK(28, 24)) >> 24)
+#define QUEUE_STATUS_IBI_BUF_BLR(x) (((x) & GENMASK(23, 16)) >> 16)
+#define QUEUE_STATUS_LEVEL_RESP(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QUEUE_STATUS_LEVEL_CMD(x) ((x) & GENMASK(7, 0))
+
+#define DATA_BUFFER_STATUS_LEVEL 0x50
+#define DATA_BUFFER_STATUS_LEVEL_TX(x) ((x) & GENMASK(7, 0))
+
+#define PRESENT_STATE 0x54
+#define CCC_DEVICE_STATUS 0x58
+#define DEVICE_ADDR_TABLE_POINTER 0x5c
+#define DEVICE_ADDR_TABLE_DEPTH(x) (((x) & GENMASK(31, 16)) >> 16)
+#define DEVICE_ADDR_TABLE_ADDR(x) ((x) & GENMASK(7, 0))
+
+#define DEV_CHAR_TABLE_POINTER 0x60
+#define VENDOR_SPECIFIC_REG_POINTER 0x6c
+#define SLV_PID_VALUE 0x74
+#define SLV_CHAR_CTRL 0x78
+#define SLV_MAX_LEN 0x7c
+#define MAX_READ_TURNAROUND 0x80
+#define MAX_DATA_SPEED 0x84
+#define SLV_DEBUG_STATUS 0x88
+#define SLV_INTR_REQ 0x8c
+#define DEVICE_CTRL_EXTENDED 0xb0
+#define SCL_I3C_OD_TIMING 0xb4
+#define SCL_I3C_PP_TIMING 0xb8
+#define SCL_I3C_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
+#define SCL_I3C_TIMING_LCNT(x) ((x) & GENMASK(7, 0))
+#define SCL_I3C_TIMING_CNT_MIN 5
+
+#define SCL_I2C_FM_TIMING 0xbc
+#define SCL_I2C_FM_TIMING_HCNT(x) (((x) << 16) & GENMASK(31, 16))
+#define SCL_I2C_FM_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
+
+#define SCL_I2C_FMP_TIMING 0xc0
+#define SCL_I2C_FMP_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
+#define SCL_I2C_FMP_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
+
+#define SCL_EXT_LCNT_TIMING 0xc8
+#define SCL_EXT_LCNT_4(x) (((x) << 24) & GENMASK(31, 24))
+#define SCL_EXT_LCNT_3(x) (((x) << 16) & GENMASK(23, 16))
+#define SCL_EXT_LCNT_2(x) (((x) << 8) & GENMASK(15, 8))
+#define SCL_EXT_LCNT_1(x) ((x) & GENMASK(7, 0))
+
+#define SCL_EXT_TERMN_LCNT_TIMING 0xcc
+#define BUS_FREE_TIMING 0xd4
+#define BUS_I3C_MST_FREE(x) ((x) & GENMASK(15, 0))
+
+#define BUS_IDLE_TIMING 0xd8
+#define I3C_VER_ID 0xe0
+#define I3C_VER_TYPE 0xe4
+#define EXTENDED_CAPABILITY 0xe8
+#define SLAVE_CONFIG 0xec
+
+#define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31)
+#define DEV_ADDR_TABLE_DYNAMIC_ADDR(x) (((x) << 16) & GENMASK(23, 16))
+#define DEV_ADDR_TABLE_STATIC_ADDR(x) ((x) & GENMASK(6, 0))
+#define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2))
+
+#define MAX_DEVS 32
+
+#define I3C_BUS_SDR1_SCL_RATE 8000000
+#define I3C_BUS_SDR2_SCL_RATE 6000000
+#define I3C_BUS_SDR3_SCL_RATE 4000000
+#define I3C_BUS_SDR4_SCL_RATE 2000000
+#define I3C_BUS_I2C_FM_TLOW_MIN_NS 1300
+#define I3C_BUS_I2C_FMP_TLOW_MIN_NS 500
+#define I3C_BUS_THIGH_MAX_NS 41
+
+#define XFER_TIMEOUT (msecs_to_jiffies(1000))
+
+struct dw_i3c_master_caps {
+ u8 cmdfifodepth;
+ u8 datafifodepth;
+};
+
+struct dw_i3c_cmd {
+ u32 cmd_lo;
+ u32 cmd_hi;
+ u16 tx_len;
+ const void *tx_buf;
+ u16 rx_len;
+ void *rx_buf;
+ u8 error;
+};
+
+struct dw_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int ncmds;
+ struct dw_i3c_cmd cmds[];
+};
+
+struct dw_i3c_master {
+ struct i3c_master_controller base;
+ u16 maxdevs;
+ u16 datstartaddr;
+ u32 free_pos;
+ struct {
+ struct list_head list;
+ struct dw_i3c_xfer *cur;
+ spinlock_t lock;
+ } xferqueue;
+ struct dw_i3c_master_caps caps;
+ void __iomem *regs;
+ struct reset_control *core_rst;
+ struct clk *core_clk;
+ char version[5];
+ char type[5];
+ u8 addrs[MAX_DEVS];
+};
+
+struct dw_i3c_i2c_dev_data {
+ u8 index;
+};
+
+static u8 even_parity(u8 p)
+{
+ p ^= p >> 4;
+ p &= 0xf;
+
+ return (0x9669 >> p) & 1;
+}
+
+static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_ENTAS(0, true):
+ case I3C_CCC_ENTAS(0, false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_SETMWL(true):
+ case I3C_CCC_SETMWL(false):
+ case I3C_CCC_SETMRL(true):
+ case I3C_CCC_SETMRL(false):
+ case I3C_CCC_ENTHDR(0):
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETMXDS:
+ case I3C_CCC_GETHDRCAP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline struct dw_i3c_master *
+to_dw_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct dw_i3c_master, base);
+}
+
+static void dw_i3c_master_disable(struct dw_i3c_master *master)
+{
+ writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE,
+ master->regs + DEVICE_CTRL);
+}
+
+static void dw_i3c_master_enable(struct dw_i3c_master *master)
+{
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_ENABLE,
+ master->regs + DEVICE_CTRL);
+}
+
+static int dw_i3c_master_get_addr_pos(struct dw_i3c_master *master, u8 addr)
+{
+ int pos;
+
+ for (pos = 0; pos < master->maxdevs; pos++) {
+ if (addr == master->addrs[pos])
+ return pos;
+ }
+
+ return -EINVAL;
+}
+
+static int dw_i3c_master_get_free_pos(struct dw_i3c_master *master)
+{
+ if (!(master->free_pos & GENMASK(master->maxdevs - 1, 0)))
+ return -ENOSPC;
+
+ return ffs(master->free_pos) - 1;
+}
+
+static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master,
+ const u8 *bytes, int nbytes)
+{
+ writesl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp = 0;
+
+ memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
+ writesl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
+ }
+}
+
+static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master,
+ u8 *bytes, int nbytes)
+{
+ readsl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp;
+
+ readsl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
+ memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
+ }
+}
+
+static struct dw_i3c_xfer *
+dw_i3c_master_alloc_xfer(struct dw_i3c_master *master, unsigned int ncmds)
+{
+ struct dw_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void dw_i3c_master_free_xfer(struct dw_i3c_xfer *xfer)
+{
+ kfree(xfer);
+}
+
+static void dw_i3c_master_start_xfer_locked(struct dw_i3c_master *master)
+{
+ struct dw_i3c_xfer *xfer = master->xferqueue.cur;
+ unsigned int i;
+ u32 thld_ctrl;
+
+ if (!xfer)
+ return;
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
+ }
+
+ thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
+ thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
+ thld_ctrl |= QUEUE_THLD_CTRL_RESP_BUF(xfer->ncmds);
+ writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ writel(cmd->cmd_hi, master->regs + COMMAND_QUEUE_PORT);
+ writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT);
+ }
+}
+
+static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master,
+ struct dw_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ init_completion(&xfer->comp);
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ dw_i3c_master_start_xfer_locked(master);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master,
+ struct dw_i3c_xfer *xfer)
+{
+ if (master->xferqueue.cur == xfer) {
+ u32 status;
+
+ master->xferqueue.cur = NULL;
+
+ writel(RESET_CTRL_RX_FIFO | RESET_CTRL_TX_FIFO |
+ RESET_CTRL_RESP_QUEUE | RESET_CTRL_CMD_QUEUE,
+ master->regs + RESET_CTRL);
+
+ readl_poll_timeout_atomic(master->regs + RESET_CTRL, status,
+ !status, 10, 1000000);
+ } else {
+ list_del_init(&xfer->node);
+ }
+}
+
+static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
+ struct dw_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ dw_i3c_master_dequeue_xfer_locked(master, xfer);
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
+{
+ struct dw_i3c_xfer *xfer = master->xferqueue.cur;
+ int i, ret = 0;
+ u32 nresp;
+
+ if (!xfer)
+ return;
+
+ nresp = readl(master->regs + QUEUE_STATUS_LEVEL);
+ nresp = QUEUE_STATUS_LEVEL_RESP(nresp);
+
+ for (i = 0; i < nresp; i++) {
+ struct dw_i3c_cmd *cmd;
+ u32 resp;
+
+ resp = readl(master->regs + RESPONSE_QUEUE_PORT);
+
+ cmd = &xfer->cmds[RESPONSE_PORT_TID(resp)];
+ cmd->rx_len = RESPONSE_PORT_DATA_LEN(resp);
+ cmd->error = RESPONSE_PORT_ERR_STATUS(resp);
+ if (cmd->rx_len && !cmd->error)
+ dw_i3c_master_read_rx_fifo(master, cmd->rx_buf,
+ cmd->rx_len);
+ }
+
+ for (i = 0; i < nresp; i++) {
+ switch (xfer->cmds[i].error) {
+ case RESPONSE_NO_ERROR:
+ break;
+ case RESPONSE_ERROR_PARITY:
+ case RESPONSE_ERROR_IBA_NACK:
+ case RESPONSE_ERROR_TRANSF_ABORT:
+ case RESPONSE_ERROR_CRC:
+ case RESPONSE_ERROR_FRAME:
+ ret = -EIO;
+ break;
+ case RESPONSE_ERROR_OVER_UNDER_FLOW:
+ ret = -ENOSPC;
+ break;
+ case RESPONSE_ERROR_I2C_W_NACK_ERR:
+ case RESPONSE_ERROR_ADDRESS_NACK:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ xfer->ret = ret;
+ complete(&xfer->comp);
+
+ if (ret < 0) {
+ dw_i3c_master_dequeue_xfer_locked(master, xfer);
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
+ master->regs + DEVICE_CTRL);
+ }
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct dw_i3c_xfer,
+ node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ dw_i3c_master_start_xfer_locked(master);
+}
+
+static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
+{
+ unsigned long core_rate, core_period;
+ u32 scl_timing;
+ u8 hcnt, lcnt;
+
+ core_rate = clk_get_rate(master->core_clk);
+ if (!core_rate)
+ return -EINVAL;
+
+ core_period = DIV_ROUND_UP(1000000000, core_rate);
+
+ hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1;
+ if (hcnt < SCL_I3C_TIMING_CNT_MIN)
+ hcnt = SCL_I3C_TIMING_CNT_MIN;
+
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_TYP_I3C_SCL_RATE) - hcnt;
+ if (lcnt < SCL_I3C_TIMING_CNT_MIN)
+ lcnt = SCL_I3C_TIMING_CNT_MIN;
+
+ scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I3C_PP_TIMING);
+
+ if (!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_I2C_SLAVE_PRESENT))
+ writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
+
+ lcnt = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period);
+ scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
+
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt;
+ scl_timing = SCL_EXT_LCNT_1(lcnt);
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR2_SCL_RATE) - hcnt;
+ scl_timing |= SCL_EXT_LCNT_2(lcnt);
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR3_SCL_RATE) - hcnt;
+ scl_timing |= SCL_EXT_LCNT_3(lcnt);
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR4_SCL_RATE) - hcnt;
+ scl_timing |= SCL_EXT_LCNT_4(lcnt);
+ writel(scl_timing, master->regs + SCL_EXT_LCNT_TIMING);
+
+ return 0;
+}
+
+static int dw_i2c_clk_cfg(struct dw_i3c_master *master)
+{
+ unsigned long core_rate, core_period;
+ u16 hcnt, lcnt;
+ u32 scl_timing;
+
+ core_rate = clk_get_rate(master->core_clk);
+ if (!core_rate)
+ return -EINVAL;
+
+ core_period = DIV_ROUND_UP(1000000000, core_rate);
+
+ lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period);
+ hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_RATE) - lcnt;
+ scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) |
+ SCL_I2C_FMP_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING);
+
+ lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period);
+ hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt;
+ scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) |
+ SCL_I2C_FM_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I2C_FM_TIMING);
+
+ writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT,
+ master->regs + DEVICE_CTRL);
+
+ return 0;
+}
+
+static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ struct i3c_device_info info = { };
+ u32 thld_ctrl;
+ int ret;
+
+ switch (bus->mode) {
+ case I3C_BUS_MODE_MIXED_FAST:
+ case I3C_BUS_MODE_MIXED_LIMITED:
+ ret = dw_i2c_clk_cfg(master);
+ if (ret)
+ return ret;
+ fallthrough;
+ case I3C_BUS_MODE_PURE:
+ ret = dw_i3c_clk_cfg(master);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
+ thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
+ writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
+
+ thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL);
+ thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF;
+ writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL);
+
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN);
+ writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN);
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(ret),
+ master->regs + DEVICE_ADDR);
+
+ memset(&info, 0, sizeof(info));
+ info.dyn_addr = ret;
+
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+
+ writel(IBI_REQ_REJECT_ALL, master->regs + IBI_SIR_REQ_REJECT);
+ writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT);
+
+ /* For now don't support Hot-Join */
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
+ master->regs + DEVICE_CTRL);
+
+ dw_i3c_master_enable(master);
+
+ return 0;
+}
+
+static void dw_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+
+ dw_i3c_master_disable(master);
+}
+
+static int dw_i3c_ccc_set(struct dw_i3c_master *master,
+ struct i3c_ccc_cmd *ccc)
+{
+ struct dw_i3c_xfer *xfer;
+ struct dw_i3c_cmd *cmd;
+ int ret, pos = 0;
+
+ if (ccc->id & I3C_CCC_DIRECT) {
+ pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
+ if (pos < 0)
+ return pos;
+ }
+
+ xfer = dw_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ cmd = xfer->cmds;
+ cmd->tx_buf = ccc->dests[0].payload.data;
+ cmd->tx_len = ccc->dests[0].payload.len;
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ cmd->cmd_lo = COMMAND_PORT_CP |
+ COMMAND_PORT_DEV_INDEX(pos) |
+ COMMAND_PORT_CMD(ccc->id) |
+ COMMAND_PORT_TOC |
+ COMMAND_PORT_ROC;
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
+ ccc->err = I3C_ERROR_M2;
+
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc)
+{
+ struct dw_i3c_xfer *xfer;
+ struct dw_i3c_cmd *cmd;
+ int ret, pos;
+
+ pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
+ if (pos < 0)
+ return pos;
+
+ xfer = dw_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ cmd = xfer->cmds;
+ cmd->rx_buf = ccc->dests[0].payload.data;
+ cmd->rx_len = ccc->dests[0].payload.len;
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
+ COMMAND_PORT_CP |
+ COMMAND_PORT_DEV_INDEX(pos) |
+ COMMAND_PORT_CMD(ccc->id) |
+ COMMAND_PORT_TOC |
+ COMMAND_PORT_ROC;
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
+ ccc->err = I3C_ERROR_M2;
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *ccc)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int ret = 0;
+
+ if (ccc->id == I3C_CCC_ENTDAA)
+ return -EINVAL;
+
+ if (ccc->rnw)
+ ret = dw_i3c_ccc_get(master, ccc);
+ else
+ ret = dw_i3c_ccc_set(master, ccc);
+
+ return ret;
+}
+
+static int dw_i3c_master_daa(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_xfer *xfer;
+ struct dw_i3c_cmd *cmd;
+ u32 olddevs, newdevs;
+ u8 p, last_addr = 0;
+ int ret, pos;
+
+ olddevs = ~(master->free_pos);
+
+ /* Prepare DAT before launching DAA. */
+ for (pos = 0; pos < master->maxdevs; pos++) {
+ if (olddevs & BIT(pos))
+ continue;
+
+ ret = i3c_master_get_free_addr(m, last_addr + 1);
+ if (ret < 0)
+ return -ENOSPC;
+
+ master->addrs[pos] = ret;
+ p = even_parity(ret);
+ last_addr = ret;
+ ret |= (p << 7);
+
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
+ }
+
+ xfer = dw_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ pos = dw_i3c_master_get_free_pos(master);
+ if (pos < 0) {
+ dw_i3c_master_free_xfer(xfer);
+ return pos;
+ }
+ cmd = &xfer->cmds[0];
+ cmd->cmd_hi = 0x1;
+ cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) |
+ COMMAND_PORT_DEV_INDEX(pos) |
+ COMMAND_PORT_CMD(I3C_CCC_ENTDAA) |
+ COMMAND_PORT_ADDR_ASSGN_CMD |
+ COMMAND_PORT_TOC |
+ COMMAND_PORT_ROC;
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ newdevs = GENMASK(master->maxdevs - cmd->rx_len - 1, 0);
+ newdevs &= ~olddevs;
+
+ for (pos = 0; pos < master->maxdevs; pos++) {
+ if (newdevs & BIT(pos))
+ i3c_master_add_i3c_dev_locked(m, master->addrs[pos]);
+ }
+
+ dw_i3c_master_free_xfer(xfer);
+
+ return 0;
+}
+
+static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *i3c_xfers,
+ int i3c_nxfers)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ unsigned int nrxwords = 0, ntxwords = 0;
+ struct dw_i3c_xfer *xfer;
+ int i, ret = 0;
+
+ if (!i3c_nxfers)
+ return 0;
+
+ if (i3c_nxfers > master->caps.cmdfifodepth)
+ return -ENOTSUPP;
+
+ for (i = 0; i < i3c_nxfers; i++) {
+ if (i3c_xfers[i].rnw)
+ nrxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
+ else
+ ntxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
+ }
+
+ if (ntxwords > master->caps.datafifodepth ||
+ nrxwords > master->caps.datafifodepth)
+ return -ENOTSUPP;
+
+ xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < i3c_nxfers; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i3c_xfers[i].len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ if (i3c_xfers[i].rnw) {
+ cmd->rx_buf = i3c_xfers[i].data.in;
+ cmd->rx_len = i3c_xfers[i].len;
+ cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
+ COMMAND_PORT_SPEED(dev->info.max_read_ds);
+
+ } else {
+ cmd->tx_buf = i3c_xfers[i].data.out;
+ cmd->tx_len = i3c_xfers[i].len;
+ cmd->cmd_lo =
+ COMMAND_PORT_SPEED(dev->info.max_write_ds);
+ }
+
+ cmd->cmd_lo |= COMMAND_PORT_TID(i) |
+ COMMAND_PORT_DEV_INDEX(data->index) |
+ COMMAND_PORT_ROC;
+
+ if (i == (i3c_nxfers - 1))
+ cmd->cmd_lo |= COMMAND_PORT_TOC;
+ }
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int pos;
+
+ pos = dw_i3c_master_get_free_pos(master);
+
+ if (data->index > pos && pos > 0) {
+ writel(0,
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ master->addrs[data->index] = 0;
+ master->free_pos |= BIT(data->index);
+
+ data->index = pos;
+ master->addrs[pos] = dev->info.dyn_addr;
+ master->free_pos &= ~BIT(pos);
+ }
+
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ master->addrs[data->index] = dev->info.dyn_addr;
+
+ return 0;
+}
+
+static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_i2c_dev_data *data;
+ int pos;
+
+ pos = dw_i3c_master_get_free_pos(master);
+ if (pos < 0)
+ return pos;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->index = pos;
+ master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr;
+ master->free_pos &= ~BIT(pos);
+ i3c_dev_set_master_data(dev, data);
+
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ return 0;
+}
+
+static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+
+ writel(0,
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ i3c_dev_set_master_data(dev, NULL);
+ master->addrs[data->index] = 0;
+ master->free_pos |= BIT(data->index);
+ kfree(data);
+}
+
+static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *i2c_xfers,
+ int i2c_nxfers)
+{
+ struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ unsigned int nrxwords = 0, ntxwords = 0;
+ struct dw_i3c_xfer *xfer;
+ int i, ret = 0;
+
+ if (!i2c_nxfers)
+ return 0;
+
+ if (i2c_nxfers > master->caps.cmdfifodepth)
+ return -ENOTSUPP;
+
+ for (i = 0; i < i2c_nxfers; i++) {
+ if (i2c_xfers[i].flags & I2C_M_RD)
+ nrxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
+ else
+ ntxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
+ }
+
+ if (ntxwords > master->caps.datafifodepth ||
+ nrxwords > master->caps.datafifodepth)
+ return -ENOTSUPP;
+
+ xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < i2c_nxfers; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i2c_xfers[i].len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ cmd->cmd_lo = COMMAND_PORT_TID(i) |
+ COMMAND_PORT_DEV_INDEX(data->index) |
+ COMMAND_PORT_ROC;
+
+ if (i2c_xfers[i].flags & I2C_M_RD) {
+ cmd->cmd_lo |= COMMAND_PORT_READ_TRANSFER;
+ cmd->rx_buf = i2c_xfers[i].buf;
+ cmd->rx_len = i2c_xfers[i].len;
+ } else {
+ cmd->tx_buf = i2c_xfers[i].buf;
+ cmd->tx_len = i2c_xfers[i].len;
+ }
+
+ if (i == (i2c_nxfers - 1))
+ cmd->cmd_lo |= COMMAND_PORT_TOC;
+ }
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_i2c_dev_data *data;
+ int pos;
+
+ pos = dw_i3c_master_get_free_pos(master);
+ if (pos < 0)
+ return pos;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->index = pos;
+ master->addrs[pos] = dev->addr;
+ master->free_pos &= ~BIT(pos);
+ i2c_dev_set_master_data(dev, data);
+
+ writel(DEV_ADDR_TABLE_LEGACY_I2C_DEV |
+ DEV_ADDR_TABLE_STATIC_ADDR(dev->addr),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ return 0;
+}
+
+static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+
+ writel(0,
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ i2c_dev_set_master_data(dev, NULL);
+ master->addrs[data->index] = 0;
+ master->free_pos |= BIT(data->index);
+ kfree(data);
+}
+
+static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id)
+{
+ struct dw_i3c_master *master = dev_id;
+ u32 status;
+
+ status = readl(master->regs + INTR_STATUS);
+
+ if (!(status & readl(master->regs + INTR_STATUS_EN))) {
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ return IRQ_NONE;
+ }
+
+ spin_lock(&master->xferqueue.lock);
+ dw_i3c_master_end_xfer_locked(master, status);
+ if (status & INTR_TRANSFER_ERR_STAT)
+ writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS);
+ spin_unlock(&master->xferqueue.lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
+ .bus_init = dw_i3c_master_bus_init,
+ .bus_cleanup = dw_i3c_master_bus_cleanup,
+ .attach_i3c_dev = dw_i3c_master_attach_i3c_dev,
+ .reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev,
+ .detach_i3c_dev = dw_i3c_master_detach_i3c_dev,
+ .do_daa = dw_i3c_master_daa,
+ .supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = dw_i3c_master_send_ccc_cmd,
+ .priv_xfers = dw_i3c_master_priv_xfers,
+ .attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
+ .i2c_xfers = dw_i3c_master_i2c_xfers,
+};
+
+static int dw_i3c_probe(struct platform_device *pdev)
+{
+ struct dw_i3c_master *master;
+ int ret, irq;
+
+ master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ master->core_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(master->core_clk))
+ return PTR_ERR(master->core_clk);
+
+ master->core_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "core_rst");
+ if (IS_ERR(master->core_rst))
+ return PTR_ERR(master->core_rst);
+
+ ret = clk_prepare_enable(master->core_clk);
+ if (ret)
+ goto err_disable_core_clk;
+
+ reset_control_deassert(master->core_rst);
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq,
+ dw_i3c_master_irq_handler, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ goto err_assert_rst;
+
+ platform_set_drvdata(pdev, master);
+
+ /* Information regarding the FIFOs/QUEUEs depth */
+ ret = readl(master->regs + QUEUE_STATUS_LEVEL);
+ master->caps.cmdfifodepth = QUEUE_STATUS_LEVEL_CMD(ret);
+
+ ret = readl(master->regs + DATA_BUFFER_STATUS_LEVEL);
+ master->caps.datafifodepth = DATA_BUFFER_STATUS_LEVEL_TX(ret);
+
+ ret = readl(master->regs + DEVICE_ADDR_TABLE_POINTER);
+ master->datstartaddr = ret;
+ master->maxdevs = ret >> 16;
+ master->free_pos = GENMASK(master->maxdevs - 1, 0);
+
+ ret = i3c_master_register(&master->base, &pdev->dev,
+ &dw_mipi_i3c_ops, false);
+ if (ret)
+ goto err_assert_rst;
+
+ return 0;
+
+err_assert_rst:
+ reset_control_assert(master->core_rst);
+
+err_disable_core_clk:
+ clk_disable_unprepare(master->core_clk);
+
+ return ret;
+}
+
+static int dw_i3c_remove(struct platform_device *pdev)
+{
+ struct dw_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i3c_master_unregister(&master->base);
+ if (ret)
+ return ret;
+
+ reset_control_assert(master->core_rst);
+
+ clk_disable_unprepare(master->core_clk);
+
+ return 0;
+}
+
+static const struct of_device_id dw_i3c_master_of_match[] = {
+ { .compatible = "snps,dw-i3c-master-1.00a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match);
+
+static struct platform_driver dw_i3c_driver = {
+ .probe = dw_i3c_probe,
+ .remove = dw_i3c_remove,
+ .driver = {
+ .name = "dw-i3c-master",
+ .of_match_table = of_match_ptr(dw_i3c_master_of_match),
+ },
+};
+module_platform_driver(dw_i3c_driver);
+
+MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
+MODULE_DESCRIPTION("DesignWare MIPI I3C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
new file mode 100644
index 000000000..4a49c75a9
--- /dev/null
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -0,0 +1,1693 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/of_device.h>
+
+#define DEV_ID 0x0
+#define DEV_ID_I3C_MASTER 0x5034
+
+#define CONF_STATUS0 0x4
+#define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29))
+#define CONF_STATUS0_ECC_CHK BIT(28)
+#define CONF_STATUS0_INTEG_CHK BIT(27)
+#define CONF_STATUS0_CSR_DAP_CHK BIT(26)
+#define CONF_STATUS0_TRANS_TOUT_CHK BIT(25)
+#define CONF_STATUS0_PROT_FAULTS_CHK BIT(24)
+#define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16)
+#define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8)
+#define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7))
+#define CONF_STATUS0_SUPPORTS_DDR BIT(5)
+#define CONF_STATUS0_SEC_MASTER BIT(4)
+#define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0))
+
+#define CONF_STATUS1 0x8
+#define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1)
+#define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26))
+#define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
+#define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
+#define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10))
+#define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5))
+#define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0)))
+
+#define REV_ID 0xc
+#define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20)
+#define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8)
+#define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4)
+#define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0))
+
+#define CTRL 0x10
+#define CTRL_DEV_EN BIT(31)
+#define CTRL_HALT_EN BIT(30)
+#define CTRL_MCS BIT(29)
+#define CTRL_MCS_EN BIT(28)
+#define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
+#define CTRL_HJ_DISEC BIT(8)
+#define CTRL_MST_ACK BIT(7)
+#define CTRL_HJ_ACK BIT(6)
+#define CTRL_HJ_INIT BIT(5)
+#define CTRL_MST_INIT BIT(4)
+#define CTRL_AHDR_OPT BIT(3)
+#define CTRL_PURE_BUS_MODE 0
+#define CTRL_MIXED_FAST_BUS_MODE 2
+#define CTRL_MIXED_SLOW_BUS_MODE 3
+#define CTRL_BUS_MODE_MASK GENMASK(1, 0)
+#define THD_DELAY_MAX 3
+
+#define PRESCL_CTRL0 0x14
+#define PRESCL_CTRL0_I2C(x) ((x) << 16)
+#define PRESCL_CTRL0_I3C(x) (x)
+#define PRESCL_CTRL0_MAX GENMASK(9, 0)
+
+#define PRESCL_CTRL1 0x18
+#define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
+#define PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
+#define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
+#define PRESCL_CTRL1_OD_LOW(x) (x)
+
+#define MST_IER 0x20
+#define MST_IDR 0x24
+#define MST_IMR 0x28
+#define MST_ICR 0x2c
+#define MST_ISR 0x30
+#define MST_INT_HALTED BIT(18)
+#define MST_INT_MR_DONE BIT(17)
+#define MST_INT_IMM_COMP BIT(16)
+#define MST_INT_TX_THR BIT(15)
+#define MST_INT_TX_OVF BIT(14)
+#define MST_INT_IBID_THR BIT(12)
+#define MST_INT_IBID_UNF BIT(11)
+#define MST_INT_IBIR_THR BIT(10)
+#define MST_INT_IBIR_UNF BIT(9)
+#define MST_INT_IBIR_OVF BIT(8)
+#define MST_INT_RX_THR BIT(7)
+#define MST_INT_RX_UNF BIT(6)
+#define MST_INT_CMDD_EMP BIT(5)
+#define MST_INT_CMDD_THR BIT(4)
+#define MST_INT_CMDD_OVF BIT(3)
+#define MST_INT_CMDR_THR BIT(2)
+#define MST_INT_CMDR_UNF BIT(1)
+#define MST_INT_CMDR_OVF BIT(0)
+
+#define MST_STATUS0 0x34
+#define MST_STATUS0_IDLE BIT(18)
+#define MST_STATUS0_HALTED BIT(17)
+#define MST_STATUS0_MASTER_MODE BIT(16)
+#define MST_STATUS0_TX_FULL BIT(13)
+#define MST_STATUS0_IBID_FULL BIT(12)
+#define MST_STATUS0_IBIR_FULL BIT(11)
+#define MST_STATUS0_RX_FULL BIT(10)
+#define MST_STATUS0_CMDD_FULL BIT(9)
+#define MST_STATUS0_CMDR_FULL BIT(8)
+#define MST_STATUS0_TX_EMP BIT(5)
+#define MST_STATUS0_IBID_EMP BIT(4)
+#define MST_STATUS0_IBIR_EMP BIT(3)
+#define MST_STATUS0_RX_EMP BIT(2)
+#define MST_STATUS0_CMDD_EMP BIT(1)
+#define MST_STATUS0_CMDR_EMP BIT(0)
+
+#define CMDR 0x38
+#define CMDR_NO_ERROR 0
+#define CMDR_DDR_PREAMBLE_ERROR 1
+#define CMDR_DDR_PARITY_ERROR 2
+#define CMDR_DDR_RX_FIFO_OVF 3
+#define CMDR_DDR_TX_FIFO_UNF 4
+#define CMDR_M0_ERROR 5
+#define CMDR_M1_ERROR 6
+#define CMDR_M2_ERROR 7
+#define CMDR_MST_ABORT 8
+#define CMDR_NACK_RESP 9
+#define CMDR_INVALID_DA 10
+#define CMDR_DDR_DROPPED 11
+#define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24)
+#define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8)
+#define CMDR_CMDID_HJACK_DISEC 0xfe
+#define CMDR_CMDID_HJACK_ENTDAA 0xff
+#define CMDR_CMDID(x) ((x) & GENMASK(7, 0))
+
+#define IBIR 0x3c
+#define IBIR_ACKED BIT(12)
+#define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8)
+#define IBIR_ERROR BIT(7)
+#define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
+#define IBIR_TYPE_IBI 0
+#define IBIR_TYPE_HJ 1
+#define IBIR_TYPE_MR 2
+#define IBIR_TYPE(x) ((x) & GENMASK(1, 0))
+
+#define SLV_IER 0x40
+#define SLV_IDR 0x44
+#define SLV_IMR 0x48
+#define SLV_ICR 0x4c
+#define SLV_ISR 0x50
+#define SLV_INT_TM BIT(20)
+#define SLV_INT_ERROR BIT(19)
+#define SLV_INT_EVENT_UP BIT(18)
+#define SLV_INT_HJ_DONE BIT(17)
+#define SLV_INT_MR_DONE BIT(16)
+#define SLV_INT_DA_UPD BIT(15)
+#define SLV_INT_SDR_FAIL BIT(14)
+#define SLV_INT_DDR_FAIL BIT(13)
+#define SLV_INT_M_RD_ABORT BIT(12)
+#define SLV_INT_DDR_RX_THR BIT(11)
+#define SLV_INT_DDR_TX_THR BIT(10)
+#define SLV_INT_SDR_RX_THR BIT(9)
+#define SLV_INT_SDR_TX_THR BIT(8)
+#define SLV_INT_DDR_RX_UNF BIT(7)
+#define SLV_INT_DDR_TX_OVF BIT(6)
+#define SLV_INT_SDR_RX_UNF BIT(5)
+#define SLV_INT_SDR_TX_OVF BIT(4)
+#define SLV_INT_DDR_RD_COMP BIT(3)
+#define SLV_INT_DDR_WR_COMP BIT(2)
+#define SLV_INT_SDR_RD_COMP BIT(1)
+#define SLV_INT_SDR_WR_COMP BIT(0)
+
+#define SLV_STATUS0 0x54
+#define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16)
+#define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0))
+
+#define SLV_STATUS1 0x58
+#define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20)
+#define SLV_STATUS1_VEN_TM BIT(19)
+#define SLV_STATUS1_HJ_DIS BIT(18)
+#define SLV_STATUS1_MR_DIS BIT(17)
+#define SLV_STATUS1_PROT_ERR BIT(16)
+#define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
+#define SLV_STATUS1_HAS_DA BIT(8)
+#define SLV_STATUS1_DDR_RX_FULL BIT(7)
+#define SLV_STATUS1_DDR_TX_FULL BIT(6)
+#define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
+#define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
+#define SLV_STATUS1_SDR_RX_FULL BIT(3)
+#define SLV_STATUS1_SDR_TX_FULL BIT(2)
+#define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
+#define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
+
+#define CMD0_FIFO 0x60
+#define CMD0_FIFO_IS_DDR BIT(31)
+#define CMD0_FIFO_IS_CCC BIT(30)
+#define CMD0_FIFO_BCH BIT(29)
+#define XMIT_BURST_STATIC_SUBADDR 0
+#define XMIT_SINGLE_INC_SUBADDR 1
+#define XMIT_SINGLE_STATIC_SUBADDR 2
+#define XMIT_BURST_WITHOUT_SUBADDR 3
+#define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
+#define CMD0_FIFO_SBCA BIT(26)
+#define CMD0_FIFO_RSBC BIT(25)
+#define CMD0_FIFO_IS_10B BIT(24)
+#define CMD0_FIFO_PL_LEN(l) ((l) << 12)
+#define CMD0_FIFO_PL_LEN_MAX 4095
+#define CMD0_FIFO_DEV_ADDR(a) ((a) << 1)
+#define CMD0_FIFO_RNW BIT(0)
+
+#define CMD1_FIFO 0x64
+#define CMD1_FIFO_CMDID(id) ((id) << 24)
+#define CMD1_FIFO_CSRADDR(a) (a)
+#define CMD1_FIFO_CCC(id) (id)
+
+#define TX_FIFO 0x68
+
+#define IMD_CMD0 0x70
+#define IMD_CMD0_PL_LEN(l) ((l) << 12)
+#define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
+#define IMD_CMD0_RNW BIT(0)
+
+#define IMD_CMD1 0x74
+#define IMD_CMD1_CCC(id) (id)
+
+#define IMD_DATA 0x78
+#define RX_FIFO 0x80
+#define IBI_DATA_FIFO 0x84
+#define SLV_DDR_TX_FIFO 0x88
+#define SLV_DDR_RX_FIFO 0x8c
+
+#define CMD_IBI_THR_CTRL 0x90
+#define IBIR_THR(t) ((t) << 24)
+#define CMDR_THR(t) ((t) << 16)
+#define IBI_THR(t) ((t) << 8)
+#define CMD_THR(t) (t)
+
+#define TX_RX_THR_CTRL 0x94
+#define RX_THR(t) ((t) << 16)
+#define TX_THR(t) (t)
+
+#define SLV_DDR_TX_RX_THR_CTRL 0x98
+#define SLV_DDR_RX_THR(t) ((t) << 16)
+#define SLV_DDR_TX_THR(t) (t)
+
+#define FLUSH_CTRL 0x9c
+#define FLUSH_IBI_RESP BIT(23)
+#define FLUSH_CMD_RESP BIT(22)
+#define FLUSH_SLV_DDR_RX_FIFO BIT(22)
+#define FLUSH_SLV_DDR_TX_FIFO BIT(21)
+#define FLUSH_IMM_FIFO BIT(20)
+#define FLUSH_IBI_FIFO BIT(19)
+#define FLUSH_RX_FIFO BIT(18)
+#define FLUSH_TX_FIFO BIT(17)
+#define FLUSH_CMD_FIFO BIT(16)
+
+#define TTO_PRESCL_CTRL0 0xb0
+#define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16)
+#define TTO_PRESCL_CTRL0_DIVA(x) (x)
+
+#define TTO_PRESCL_CTRL1 0xb4
+#define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16)
+#define TTO_PRESCL_CTRL1_DIVA(x) (x)
+
+#define DEVS_CTRL 0xb8
+#define DEVS_CTRL_DEV_CLR_SHIFT 16
+#define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16)
+#define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev))
+#define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev)
+#define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
+#define MAX_DEVS 16
+
+#define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10))
+#define DEV_ID_RR0_LVR_EXT_ADDR BIT(11)
+#define DEV_ID_RR0_HDR_CAP BIT(10)
+#define DEV_ID_RR0_IS_I3C BIT(9)
+#define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13))
+#define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \
+ (((a) & GENMASK(9, 7)) << 6))
+#define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \
+ (((x) >> 6) & GENMASK(9, 7)))
+
+#define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10))
+#define DEV_ID_RR1_PID_MSB(pid) (pid)
+
+#define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10))
+#define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
+#define DEV_ID_RR2_BCR(bcr) ((bcr) << 8)
+#define DEV_ID_RR2_DCR(dcr) (dcr)
+#define DEV_ID_RR2_LVR(lvr) (lvr)
+
+#define SIR_MAP(x) (0x180 + ((x) * 4))
+#define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2)
+#define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
+#define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
+#define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0))
+#define DEV_ROLE_SLAVE 0
+#define DEV_ROLE_MASTER 1
+#define SIR_MAP_DEV_ROLE(role) ((role) << 14)
+#define SIR_MAP_DEV_SLOW BIT(13)
+#define SIR_MAP_DEV_PL(l) ((l) << 8)
+#define SIR_MAP_PL_MAX GENMASK(4, 0)
+#define SIR_MAP_DEV_DA(a) ((a) << 1)
+#define SIR_MAP_DEV_ACK BIT(0)
+
+#define GPIR_WORD(x) (0x200 + ((x) * 4))
+#define GPI_REG(val, id) \
+ (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
+
+#define GPOR_WORD(x) (0x220 + ((x) * 4))
+#define GPO_REG(val, id) \
+ (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
+
+#define ASF_INT_STATUS 0x300
+#define ASF_INT_RAW_STATUS 0x304
+#define ASF_INT_MASK 0x308
+#define ASF_INT_TEST 0x30c
+#define ASF_INT_FATAL_SELECT 0x310
+#define ASF_INTEGRITY_ERR BIT(6)
+#define ASF_PROTOCOL_ERR BIT(5)
+#define ASF_TRANS_TIMEOUT_ERR BIT(4)
+#define ASF_CSR_ERR BIT(3)
+#define ASF_DAP_ERR BIT(2)
+#define ASF_SRAM_UNCORR_ERR BIT(1)
+#define ASF_SRAM_CORR_ERR BIT(0)
+
+#define ASF_SRAM_CORR_FAULT_STATUS 0x320
+#define ASF_SRAM_UNCORR_FAULT_STATUS 0x324
+#define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
+#define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0))
+
+#define ASF_SRAM_FAULT_STATS 0x328
+#define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
+#define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0))
+
+#define ASF_TRANS_TOUT_CTRL 0x330
+#define ASF_TRANS_TOUT_EN BIT(31)
+#define ASF_TRANS_TOUT_VAL(x) (x)
+
+#define ASF_TRANS_TOUT_FAULT_MASK 0x334
+#define ASF_TRANS_TOUT_FAULT_STATUS 0x338
+#define ASF_TRANS_TOUT_FAULT_APB BIT(3)
+#define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2)
+#define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1)
+#define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
+
+#define ASF_PROTO_FAULT_MASK 0x340
+#define ASF_PROTO_FAULT_STATUS 0x344
+#define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
+#define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30)
+#define ASF_PROTO_FAULT_S(x) BIT(16 + (x))
+#define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
+#define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14)
+#define ASF_PROTO_FAULT_M(x) BIT(x)
+
+struct cdns_i3c_master_caps {
+ u32 cmdfifodepth;
+ u32 cmdrfifodepth;
+ u32 txfifodepth;
+ u32 rxfifodepth;
+ u32 ibirfifodepth;
+};
+
+struct cdns_i3c_cmd {
+ u32 cmd0;
+ u32 cmd1;
+ u32 tx_len;
+ const void *tx_buf;
+ u32 rx_len;
+ void *rx_buf;
+ u32 error;
+};
+
+struct cdns_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int ncmds;
+ struct cdns_i3c_cmd cmds[];
+};
+
+struct cdns_i3c_data {
+ u8 thd_delay_ns;
+};
+
+struct cdns_i3c_master {
+ struct work_struct hj_work;
+ struct i3c_master_controller base;
+ u32 free_rr_slots;
+ unsigned int maxdevs;
+ struct {
+ unsigned int num_slots;
+ struct i3c_dev_desc **slots;
+ spinlock_t lock;
+ } ibi;
+ struct {
+ struct list_head list;
+ struct cdns_i3c_xfer *cur;
+ spinlock_t lock;
+ } xferqueue;
+ void __iomem *regs;
+ struct clk *sysclk;
+ struct clk *pclk;
+ struct cdns_i3c_master_caps caps;
+ unsigned long i3c_scl_lim;
+ const struct cdns_i3c_data *devdata;
+};
+
+static inline struct cdns_i3c_master *
+to_cdns_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct cdns_i3c_master, base);
+}
+
+static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
+ const u8 *bytes, int nbytes)
+{
+ writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp = 0;
+
+ memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
+ writesl(master->regs + TX_FIFO, &tmp, 1);
+ }
+}
+
+static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
+ u8 *bytes, int nbytes)
+{
+ readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp;
+
+ readsl(master->regs + RX_FIFO, &tmp, 1);
+ memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
+ }
+}
+
+static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_ENTAS(0, true):
+ case I3C_CCC_ENTAS(0, false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_SETMWL(true):
+ case I3C_CCC_SETMWL(false):
+ case I3C_CCC_SETMRL(true):
+ case I3C_CCC_SETMRL(false):
+ case I3C_CCC_DEFSLVS:
+ case I3C_CCC_ENTHDR(0):
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETACCMST:
+ case I3C_CCC_GETMXDS:
+ case I3C_CCC_GETHDRCAP:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int cdns_i3c_master_disable(struct cdns_i3c_master *master)
+{
+ u32 status;
+
+ writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL);
+
+ return readl_poll_timeout(master->regs + MST_STATUS0, status,
+ status & MST_STATUS0_IDLE, 10, 1000000);
+}
+
+static void cdns_i3c_master_enable(struct cdns_i3c_master *master)
+{
+ writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL);
+}
+
+static struct cdns_i3c_xfer *
+cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
+{
+ struct cdns_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer)
+{
+ kfree(xfer);
+}
+
+static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master)
+{
+ struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
+ unsigned int i;
+
+ if (!xfer)
+ return;
+
+ writel(MST_INT_CMDD_EMP, master->regs + MST_ICR);
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf,
+ cmd->tx_len);
+ }
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
+
+ writel(cmd->cmd1 | CMD1_FIFO_CMDID(i),
+ master->regs + CMD1_FIFO);
+ writel(cmd->cmd0, master->regs + CMD0_FIFO);
+ }
+
+ writel(readl(master->regs + CTRL) | CTRL_MCS,
+ master->regs + CTRL);
+ writel(MST_INT_CMDD_EMP, master->regs + MST_IER);
+}
+
+static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master,
+ u32 isr)
+{
+ struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
+ int i, ret = 0;
+ u32 status0;
+
+ if (!xfer)
+ return;
+
+ if (!(isr & MST_INT_CMDD_EMP))
+ return;
+
+ writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
+
+ for (status0 = readl(master->regs + MST_STATUS0);
+ !(status0 & MST_STATUS0_CMDR_EMP);
+ status0 = readl(master->regs + MST_STATUS0)) {
+ struct cdns_i3c_cmd *cmd;
+ u32 cmdr, rx_len, id;
+
+ cmdr = readl(master->regs + CMDR);
+ id = CMDR_CMDID(cmdr);
+ if (id == CMDR_CMDID_HJACK_DISEC ||
+ id == CMDR_CMDID_HJACK_ENTDAA ||
+ WARN_ON(id >= xfer->ncmds))
+ continue;
+
+ cmd = &xfer->cmds[CMDR_CMDID(cmdr)];
+ rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len);
+ cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
+ cmd->error = CMDR_ERROR(cmdr);
+ }
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ switch (xfer->cmds[i].error) {
+ case CMDR_NO_ERROR:
+ break;
+
+ case CMDR_DDR_PREAMBLE_ERROR:
+ case CMDR_DDR_PARITY_ERROR:
+ case CMDR_M0_ERROR:
+ case CMDR_M1_ERROR:
+ case CMDR_M2_ERROR:
+ case CMDR_MST_ABORT:
+ case CMDR_NACK_RESP:
+ case CMDR_DDR_DROPPED:
+ ret = -EIO;
+ break;
+
+ case CMDR_DDR_RX_FIFO_OVF:
+ case CMDR_DDR_TX_FIFO_UNF:
+ ret = -ENOSPC;
+ break;
+
+ case CMDR_INVALID_DA:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ xfer->ret = ret;
+ complete(&xfer->comp);
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct cdns_i3c_xfer, node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ cdns_i3c_master_start_xfer_locked(master);
+}
+
+static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master,
+ struct cdns_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ init_completion(&xfer->comp);
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ cdns_i3c_master_start_xfer_locked(master);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master,
+ struct cdns_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur == xfer) {
+ u32 status;
+
+ writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN,
+ master->regs + CTRL);
+ readl_poll_timeout_atomic(master->regs + MST_STATUS0, status,
+ status & MST_STATUS0_IDLE, 10,
+ 1000000);
+ master->xferqueue.cur = NULL;
+ writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO |
+ FLUSH_CMD_RESP,
+ master->regs + FLUSH_CTRL);
+ writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
+ writel(readl(master->regs + CTRL) | CTRL_DEV_EN,
+ master->regs + CTRL);
+ } else {
+ list_del_init(&xfer->node);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd)
+{
+ switch (cmd->error) {
+ case CMDR_M0_ERROR:
+ return I3C_ERROR_M0;
+
+ case CMDR_M1_ERROR:
+ return I3C_ERROR_M1;
+
+ case CMDR_M2_ERROR:
+ case CMDR_NACK_RESP:
+ return I3C_ERROR_M2;
+
+ default:
+ break;
+ }
+
+ return I3C_ERROR_UNKNOWN;
+}
+
+static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *cmd)
+{
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_xfer *xfer;
+ struct cdns_i3c_cmd *ccmd;
+ int ret;
+
+ xfer = cdns_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ ccmd = xfer->cmds;
+ ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id);
+ ccmd->cmd0 = CMD0_FIFO_IS_CCC |
+ CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len);
+
+ if (cmd->id & I3C_CCC_DIRECT)
+ ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr);
+
+ if (cmd->rnw) {
+ ccmd->cmd0 |= CMD0_FIFO_RNW;
+ ccmd->rx_buf = cmd->dests[0].payload.data;
+ ccmd->rx_len = cmd->dests[0].payload.len;
+ } else {
+ ccmd->tx_buf = cmd->dests[0].payload.data;
+ ccmd->tx_len = cmd->dests[0].payload.len;
+ }
+
+ cdns_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ cdns_i3c_master_unqueue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]);
+ cdns_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ int txslots = 0, rxslots = 0, i, ret;
+ struct cdns_i3c_xfer *cdns_xfer;
+
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
+ return -ENOTSUPP;
+ }
+
+ if (!nxfers)
+ return 0;
+
+ if (nxfers > master->caps.cmdfifodepth ||
+ nxfers > master->caps.cmdrfifodepth)
+ return -ENOTSUPP;
+
+ /*
+ * First make sure that all transactions (block of transfers separated
+ * by a STOP marker) fit in the FIFOs.
+ */
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].rnw)
+ rxslots += DIV_ROUND_UP(xfers[i].len, 4);
+ else
+ txslots += DIV_ROUND_UP(xfers[i].len, 4);
+ }
+
+ if (rxslots > master->caps.rxfifodepth ||
+ txslots > master->caps.txfifodepth)
+ return -ENOTSUPP;
+
+ cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
+ if (!cdns_xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i];
+ u32 pl_len = xfers[i].len;
+
+ ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) |
+ CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
+
+ if (xfers[i].rnw) {
+ ccmd->cmd0 |= CMD0_FIFO_RNW;
+ ccmd->rx_buf = xfers[i].data.in;
+ ccmd->rx_len = xfers[i].len;
+ pl_len++;
+ } else {
+ ccmd->tx_buf = xfers[i].data.out;
+ ccmd->tx_len = xfers[i].len;
+ }
+
+ ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len);
+
+ if (i < nxfers - 1)
+ ccmd->cmd0 |= CMD0_FIFO_RSBC;
+
+ if (!i)
+ ccmd->cmd0 |= CMD0_FIFO_BCH;
+ }
+
+ cdns_i3c_master_queue_xfer(master, cdns_xfer);
+ if (!wait_for_completion_timeout(&cdns_xfer->comp,
+ msecs_to_jiffies(1000)))
+ cdns_i3c_master_unqueue_xfer(master, cdns_xfer);
+
+ ret = cdns_xfer->ret;
+
+ for (i = 0; i < nxfers; i++)
+ xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]);
+
+ cdns_i3c_master_free_xfer(cdns_xfer);
+
+ return ret;
+}
+
+static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *xfers, int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ unsigned int nrxwords = 0, ntxwords = 0;
+ struct cdns_i3c_xfer *xfer;
+ int i, ret = 0;
+
+ if (nxfers > master->caps.cmdfifodepth)
+ return -ENOTSUPP;
+
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
+ return -ENOTSUPP;
+
+ if (xfers[i].flags & I2C_M_RD)
+ nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
+ else
+ ntxwords += DIV_ROUND_UP(xfers[i].len, 4);
+ }
+
+ if (ntxwords > master->caps.txfifodepth ||
+ nrxwords > master->caps.rxfifodepth)
+ return -ENOTSUPP;
+
+ xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct cdns_i3c_cmd *ccmd = &xfer->cmds[i];
+
+ ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) |
+ CMD0_FIFO_PL_LEN(xfers[i].len) |
+ CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
+
+ if (xfers[i].flags & I2C_M_TEN)
+ ccmd->cmd0 |= CMD0_FIFO_IS_10B;
+
+ if (xfers[i].flags & I2C_M_RD) {
+ ccmd->cmd0 |= CMD0_FIFO_RNW;
+ ccmd->rx_buf = xfers[i].buf;
+ ccmd->rx_len = xfers[i].len;
+ } else {
+ ccmd->tx_buf = xfers[i].buf;
+ ccmd->tx_len = xfers[i].len;
+ }
+ }
+
+ cdns_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ cdns_i3c_master_unqueue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ cdns_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+struct cdns_i3c_i2c_dev_data {
+ u16 id;
+ s16 ibi;
+ struct i3c_generic_ibi_pool *ibi_pool;
+};
+
+static u32 prepare_rr0_dev_address(u32 addr)
+{
+ u32 ret = (addr << 1) & 0xff;
+
+ /* RR0[7:1] = addr[6:0] */
+ ret |= (addr & GENMASK(6, 0)) << 1;
+
+ /* RR0[15:13] = addr[9:7] */
+ ret |= (addr & GENMASK(9, 7)) << 6;
+
+ /* RR0[0] = ~XOR(addr[6:0]) */
+ if (!(hweight8(addr & 0x7f) & 1))
+ ret |= 1;
+
+ return ret;
+}
+
+static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ u32 rr;
+
+ rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
+ dev->info.dyn_addr :
+ dev->info.static_addr);
+ writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
+}
+
+static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
+ u8 dyn_addr)
+{
+ unsigned long activedevs;
+ u32 rr;
+ int i;
+
+ if (!dyn_addr) {
+ if (!master->free_rr_slots)
+ return -ENOSPC;
+
+ return ffs(master->free_rr_slots) - 1;
+ }
+
+ activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
+ activedevs &= ~BIT(0);
+
+ for_each_set_bit(i, &activedevs, master->maxdevs + 1) {
+ rr = readl(master->regs + DEV_ID_RR0(i));
+ if (!(rr & DEV_ID_RR0_IS_I3C) ||
+ DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
+ continue;
+
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ cdns_i3c_master_upd_i3c_addr(dev);
+
+ return 0;
+}
+
+static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data;
+ int slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
+ if (slot < 0) {
+ kfree(data);
+ return slot;
+ }
+
+ data->ibi = -1;
+ data->id = slot;
+ i3c_dev_set_master_data(dev, data);
+ master->free_rr_slots &= ~BIT(slot);
+
+ if (!dev->info.dyn_addr) {
+ cdns_i3c_master_upd_i3c_addr(dev);
+ writel(readl(master->regs + DEVS_CTRL) |
+ DEVS_CTRL_DEV_ACTIVE(data->id),
+ master->regs + DEVS_CTRL);
+ }
+
+ return 0;
+}
+
+static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ writel(readl(master->regs + DEVS_CTRL) |
+ DEVS_CTRL_DEV_CLR(data->id),
+ master->regs + DEVS_CTRL);
+
+ i3c_dev_set_master_data(dev, NULL);
+ master->free_rr_slots |= BIT(data->id);
+ kfree(data);
+}
+
+static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = cdns_i3c_master_get_rr_slot(master, 0);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = slot;
+ master->free_rr_slots &= ~BIT(slot);
+ i2c_dev_set_master_data(dev, data);
+
+ writel(prepare_rr0_dev_address(dev->addr),
+ master->regs + DEV_ID_RR0(data->id));
+ writel(dev->lvr, master->regs + DEV_ID_RR2(data->id));
+ writel(readl(master->regs + DEVS_CTRL) |
+ DEVS_CTRL_DEV_ACTIVE(data->id),
+ master->regs + DEVS_CTRL);
+
+ return 0;
+}
+
+static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+
+ writel(readl(master->regs + DEVS_CTRL) |
+ DEVS_CTRL_DEV_CLR(data->id),
+ master->regs + DEVS_CTRL);
+ master->free_rr_slots |= BIT(data->id);
+
+ i2c_dev_set_master_data(dev, NULL);
+ kfree(data);
+}
+
+static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+
+ cdns_i3c_master_disable(master);
+}
+
+static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master,
+ unsigned int slot,
+ struct i3c_device_info *info)
+{
+ u32 rr;
+
+ memset(info, 0, sizeof(*info));
+ rr = readl(master->regs + DEV_ID_RR0(slot));
+ info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
+ rr = readl(master->regs + DEV_ID_RR2(slot));
+ info->dcr = rr;
+ info->bcr = rr >> 8;
+ info->pid = rr >> 16;
+ info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16;
+}
+
+static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
+{
+ struct i3c_master_controller *m = &master->base;
+ unsigned long i3c_lim_period, pres_step, ncycles;
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ unsigned long new_i3c_scl_lim = 0;
+ struct i3c_dev_desc *dev;
+ u32 prescl1, ctrl;
+
+ i3c_bus_for_each_i3cdev(bus, dev) {
+ unsigned long max_fscl;
+
+ max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
+ I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
+ switch (max_fscl) {
+ case I3C_SDR1_FSCL_8MHZ:
+ max_fscl = 8000000;
+ break;
+ case I3C_SDR2_FSCL_6MHZ:
+ max_fscl = 6000000;
+ break;
+ case I3C_SDR3_FSCL_4MHZ:
+ max_fscl = 4000000;
+ break;
+ case I3C_SDR4_FSCL_2MHZ:
+ max_fscl = 2000000;
+ break;
+ case I3C_SDR0_FSCL_MAX:
+ default:
+ max_fscl = 0;
+ break;
+ }
+
+ if (max_fscl &&
+ (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim))
+ new_i3c_scl_lim = max_fscl;
+ }
+
+ /* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */
+ if (new_i3c_scl_lim == master->i3c_scl_lim)
+ return;
+ master->i3c_scl_lim = new_i3c_scl_lim;
+ if (!new_i3c_scl_lim)
+ return;
+ pres_step = 1000000000UL / (bus->scl_rate.i3c * 4);
+
+ /* Configure PP_LOW to meet I3C slave limitations. */
+ prescl1 = readl(master->regs + PRESCL_CTRL1) &
+ ~PRESCL_CTRL1_PP_LOW_MASK;
+ ctrl = readl(master->regs + CTRL);
+
+ i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim);
+ ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step);
+ if (ncycles < 4)
+ ncycles = 0;
+ else
+ ncycles -= 4;
+
+ prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles);
+
+ /* Disable I3C master before updating PRESCL_CTRL1. */
+ if (ctrl & CTRL_DEV_EN)
+ cdns_i3c_master_disable(master);
+
+ writel(prescl1, master->regs + PRESCL_CTRL1);
+
+ if (ctrl & CTRL_DEV_EN)
+ cdns_i3c_master_enable(master);
+}
+
+static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
+{
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ unsigned long olddevs, newdevs;
+ int ret, slot;
+ u8 addrs[MAX_DEVS] = { };
+ u8 last_addr = 0;
+
+ olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
+ olddevs |= BIT(0);
+
+ /* Prepare RR slots before launching DAA. */
+ for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) {
+ ret = i3c_master_get_free_addr(m, last_addr + 1);
+ if (ret < 0)
+ return -ENOSPC;
+
+ last_addr = ret;
+ addrs[slot] = last_addr;
+ writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
+ master->regs + DEV_ID_RR0(slot));
+ writel(0, master->regs + DEV_ID_RR1(slot));
+ writel(0, master->regs + DEV_ID_RR2(slot));
+ }
+
+ ret = i3c_master_entdaa_locked(&master->base);
+ if (ret && ret != I3C_ERROR_M2)
+ return ret;
+
+ newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
+ newdevs &= ~olddevs;
+
+ /*
+ * Clear all retaining registers filled during DAA. We already
+ * have the addressed assigned to them in the addrs array.
+ */
+ for_each_set_bit(slot, &newdevs, master->maxdevs + 1)
+ i3c_master_add_i3c_dev_locked(m, addrs[slot]);
+
+ /*
+ * Clear slots that ended up not being used. Can be caused by I3C
+ * device creation failure or when the I3C device was already known
+ * by the system but with a different address (in this case the device
+ * already has a slot and does not need a new one).
+ */
+ writel(readl(master->regs + DEVS_CTRL) |
+ master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT,
+ master->regs + DEVS_CTRL);
+
+ i3c_master_defslvs_locked(&master->base);
+
+ cdns_i3c_master_upd_i3c_scl_lim(master);
+
+ /* Unmask Hot-Join and Mastership request interrupts. */
+ i3c_master_enec_locked(m, I3C_BROADCAST_ADDR,
+ I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR);
+
+ return 0;
+}
+
+static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
+{
+ unsigned long sysclk_rate = clk_get_rate(master->sysclk);
+ u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
+ (NSEC_PER_SEC / sysclk_rate));
+
+ /* Every value greater than 3 is not valid. */
+ if (thd_delay > THD_DELAY_MAX)
+ thd_delay = THD_DELAY_MAX;
+
+ /* CTLR_THD_DEL value is encoded. */
+ return (THD_DELAY_MAX - thd_delay);
+}
+
+static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ unsigned long pres_step, sysclk_rate, max_i2cfreq;
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ u32 ctrl, prescl0, prescl1, pres, low;
+ struct i3c_device_info info = { };
+ int ret, ncycles;
+
+ switch (bus->mode) {
+ case I3C_BUS_MODE_PURE:
+ ctrl = CTRL_PURE_BUS_MODE;
+ break;
+
+ case I3C_BUS_MODE_MIXED_FAST:
+ ctrl = CTRL_MIXED_FAST_BUS_MODE;
+ break;
+
+ case I3C_BUS_MODE_MIXED_SLOW:
+ ctrl = CTRL_MIXED_SLOW_BUS_MODE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ sysclk_rate = clk_get_rate(master->sysclk);
+ if (!sysclk_rate)
+ return -EINVAL;
+
+ pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
+ if (pres > PRESCL_CTRL0_MAX)
+ return -ERANGE;
+
+ bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
+
+ prescl0 = PRESCL_CTRL0_I3C(pres);
+
+ low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2;
+ prescl1 = PRESCL_CTRL1_OD_LOW(low);
+
+ max_i2cfreq = bus->scl_rate.i2c;
+
+ pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
+ if (pres > PRESCL_CTRL0_MAX)
+ return -ERANGE;
+
+ bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
+
+ prescl0 |= PRESCL_CTRL0_I2C(pres);
+ writel(prescl0, master->regs + PRESCL_CTRL0);
+
+ /* Calculate OD and PP low. */
+ pres_step = 1000000000 / (bus->scl_rate.i3c * 4);
+ ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
+ if (ncycles < 0)
+ ncycles = 0;
+ prescl1 = PRESCL_CTRL1_OD_LOW(ncycles);
+ writel(prescl1, master->regs + PRESCL_CTRL1);
+
+ /* Get an address for the master. */
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C,
+ master->regs + DEV_ID_RR0(0));
+
+ cdns_i3c_master_dev_rr_to_info(master, 0, &info);
+ if (info.bcr & I3C_BCR_HDR_CAP)
+ info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
+
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+
+ /*
+ * Enable Hot-Join, and, when a Hot-Join request happens, disable all
+ * events coming from this device.
+ *
+ * We will issue ENTDAA afterwards from the threaded IRQ handler.
+ */
+ ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
+
+ /*
+ * Configure data hold delay based on device-specific data.
+ *
+ * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
+ * master output. This setting allows to meet this timing on master's
+ * SoC outputs, regardless of PCB balancing.
+ */
+ ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
+ writel(ctrl, master->regs + CTRL);
+
+ cdns_i3c_master_enable(master);
+
+ return 0;
+}
+
+static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
+ u32 ibir)
+{
+ struct cdns_i3c_i2c_dev_data *data;
+ bool data_consumed = false;
+ struct i3c_ibi_slot *slot;
+ u32 id = IBIR_SLVID(ibir);
+ struct i3c_dev_desc *dev;
+ size_t nbytes;
+ u8 *buf;
+
+ /*
+ * FIXME: maybe we should report the FIFO OVF errors to the upper
+ * layer.
+ */
+ if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR))
+ goto out;
+
+ dev = master->ibi.slots[id];
+ spin_lock(&master->ibi.lock);
+
+ data = i3c_dev_get_master_data(dev);
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot)
+ goto out_unlock;
+
+ buf = slot->data;
+
+ nbytes = IBIR_XFER_BYTES(ibir);
+ readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
+ if (nbytes % 3) {
+ u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
+
+ memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
+ }
+
+ slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
+ dev->ibi->max_payload_len);
+ i3c_master_queue_ibi(dev, slot);
+ data_consumed = true;
+
+out_unlock:
+ spin_unlock(&master->ibi.lock);
+
+out:
+ /* Consume data from the FIFO if it's not been done already. */
+ if (!data_consumed) {
+ int i;
+
+ for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4)
+ readl(master->regs + IBI_DATA_FIFO);
+ }
+}
+
+static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
+{
+ u32 status0;
+
+ writel(MST_INT_IBIR_THR, master->regs + MST_ICR);
+
+ for (status0 = readl(master->regs + MST_STATUS0);
+ !(status0 & MST_STATUS0_IBIR_EMP);
+ status0 = readl(master->regs + MST_STATUS0)) {
+ u32 ibir = readl(master->regs + IBIR);
+
+ switch (IBIR_TYPE(ibir)) {
+ case IBIR_TYPE_IBI:
+ cdns_i3c_master_handle_ibi(master, ibir);
+ break;
+
+ case IBIR_TYPE_HJ:
+ WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
+ queue_work(master->base.wq, &master->hj_work);
+ break;
+
+ case IBIR_TYPE_MR:
+ WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data)
+{
+ struct cdns_i3c_master *master = data;
+ u32 status;
+
+ status = readl(master->regs + MST_ISR);
+ if (!(status & readl(master->regs + MST_IMR)))
+ return IRQ_NONE;
+
+ spin_lock(&master->xferqueue.lock);
+ cdns_i3c_master_end_xfer_locked(master, status);
+ spin_unlock(&master->xferqueue.lock);
+
+ if (status & MST_INT_IBIR_THR)
+ cnds_i3c_master_demux_ibis(master);
+
+ return IRQ_HANDLED;
+}
+
+static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+ u32 sirmap;
+ int ret;
+
+ ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
+ sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
+ sirmap |= SIR_MAP_DEV_CONF(data->ibi,
+ SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
+ writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ return ret;
+}
+
+static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+ u32 sircfg, sirmap;
+ int ret;
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
+ sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
+ sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) |
+ SIR_MAP_DEV_DA(dev->info.dyn_addr) |
+ SIR_MAP_DEV_PL(dev->info.max_ibi_len) |
+ SIR_MAP_DEV_ACK;
+
+ if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM)
+ sircfg |= SIR_MAP_DEV_SLOW;
+
+ sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg);
+ writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+ if (ret) {
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
+ sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
+ sirmap |= SIR_MAP_DEV_CONF(data->ibi,
+ SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
+ writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+ }
+
+ return ret;
+}
+
+static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(data->ibi_pool))
+ return PTR_ERR(data->ibi_pool);
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ for (i = 0; i < master->ibi.num_slots; i++) {
+ if (!master->ibi.slots[i]) {
+ data->ibi = i;
+ master->ibi.slots[i] = dev;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ if (i < master->ibi.num_slots)
+ return 0;
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+
+ return -ENOSPC;
+}
+
+static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ master->ibi.slots[data->ibi] = NULL;
+ data->ibi = -1;
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+}
+
+static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+}
+
+static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
+ .bus_init = cdns_i3c_master_bus_init,
+ .bus_cleanup = cdns_i3c_master_bus_cleanup,
+ .do_daa = cdns_i3c_master_do_daa,
+ .attach_i3c_dev = cdns_i3c_master_attach_i3c_dev,
+ .reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev,
+ .detach_i3c_dev = cdns_i3c_master_detach_i3c_dev,
+ .attach_i2c_dev = cdns_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = cdns_i3c_master_detach_i2c_dev,
+ .supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
+ .priv_xfers = cdns_i3c_master_priv_xfers,
+ .i2c_xfers = cdns_i3c_master_i2c_xfers,
+ .enable_ibi = cdns_i3c_master_enable_ibi,
+ .disable_ibi = cdns_i3c_master_disable_ibi,
+ .request_ibi = cdns_i3c_master_request_ibi,
+ .free_ibi = cdns_i3c_master_free_ibi,
+ .recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot,
+};
+
+static void cdns_i3c_master_hj(struct work_struct *work)
+{
+ struct cdns_i3c_master *master = container_of(work,
+ struct cdns_i3c_master,
+ hj_work);
+
+ i3c_master_do_daa(&master->base);
+}
+
+static struct cdns_i3c_data cdns_i3c_devdata = {
+ .thd_delay_ns = 10,
+};
+
+static const struct of_device_id cdns_i3c_master_of_ids[] = {
+ { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
+ { /* sentinel */ },
+};
+
+static int cdns_i3c_master_probe(struct platform_device *pdev)
+{
+ struct cdns_i3c_master *master;
+ int ret, irq;
+ u32 val;
+
+ master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->devdata = of_device_get_match_data(&pdev->dev);
+ if (!master->devdata)
+ return -EINVAL;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ master->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(master->pclk))
+ return PTR_ERR(master->pclk);
+
+ master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
+ if (IS_ERR(master->sysclk))
+ return PTR_ERR(master->sysclk);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = clk_prepare_enable(master->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(master->sysclk);
+ if (ret)
+ goto err_disable_pclk;
+
+ if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
+ ret = -EINVAL;
+ goto err_disable_sysclk;
+ }
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ INIT_WORK(&master->hj_work, cdns_i3c_master_hj);
+ writel(0xffffffff, master->regs + MST_IDR);
+ writel(0xffffffff, master->regs + SLV_IDR);
+ ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ goto err_disable_sysclk;
+
+ platform_set_drvdata(pdev, master);
+
+ val = readl(master->regs + CONF_STATUS0);
+
+ /* Device ID0 is reserved to describe this master. */
+ master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
+ master->free_rr_slots = GENMASK(master->maxdevs, 1);
+ master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
+ master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+
+ val = readl(master->regs + CONF_STATUS1);
+ master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
+ master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
+ master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
+ master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
+ sizeof(*master->ibi.slots),
+ GFP_KERNEL);
+ if (!master->ibi.slots) {
+ ret = -ENOMEM;
+ goto err_disable_sysclk;
+ }
+
+ writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
+ writel(MST_INT_IBIR_THR, master->regs + MST_IER);
+ writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
+
+ ret = i3c_master_register(&master->base, &pdev->dev,
+ &cdns_i3c_master_ops, false);
+ if (ret)
+ goto err_disable_sysclk;
+
+ return 0;
+
+err_disable_sysclk:
+ clk_disable_unprepare(master->sysclk);
+
+err_disable_pclk:
+ clk_disable_unprepare(master->pclk);
+
+ return ret;
+}
+
+static int cdns_i3c_master_remove(struct platform_device *pdev)
+{
+ struct cdns_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i3c_master_unregister(&master->base);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(master->sysclk);
+ clk_disable_unprepare(master->pclk);
+
+ return 0;
+}
+
+static struct platform_driver cdns_i3c_master = {
+ .probe = cdns_i3c_master_probe,
+ .remove = cdns_i3c_master_remove,
+ .driver = {
+ .name = "cdns-i3c-master",
+ .of_match_table = cdns_i3c_master_of_ids,
+ },
+};
+module_platform_driver(cdns_i3c_master);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_DESCRIPTION("Cadence I3C master driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cdns-i3c-master");
diff --git a/drivers/i3c/master/mipi-i3c-hci/Makefile b/drivers/i3c/master/mipi-i3c-hci/Makefile
new file mode 100644
index 000000000..a658e7b82
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci.o
+mipi-i3c-hci-y := core.o ext_caps.o pio.o dma.o \
+ cmd_v1.o cmd_v2.o \
+ dat_v1.o dct_v1.o
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd.h b/drivers/i3c/master/mipi-i3c-hci/cmd.h
new file mode 100644
index 000000000..1d6dd2c5d
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common command/response related stuff
+ */
+
+#ifndef CMD_H
+#define CMD_H
+
+/*
+ * Those bits are common to all descriptor formats and
+ * may be manipulated by the core code.
+ */
+#define CMD_0_TOC W0_BIT_(31)
+#define CMD_0_ROC W0_BIT_(30)
+#define CMD_0_ATTR W0_MASK(2, 0)
+
+/*
+ * Response Descriptor Structure
+ */
+#define RESP_STATUS(resp) FIELD_GET(GENMASK(31, 28), resp)
+#define RESP_TID(resp) FIELD_GET(GENMASK(27, 24), resp)
+#define RESP_DATA_LENGTH(resp) FIELD_GET(GENMASK(21, 0), resp)
+
+#define RESP_ERR_FIELD GENMASK(31, 28)
+
+enum hci_resp_err {
+ RESP_SUCCESS = 0x0,
+ RESP_ERR_CRC = 0x1,
+ RESP_ERR_PARITY = 0x2,
+ RESP_ERR_FRAME = 0x3,
+ RESP_ERR_ADDR_HEADER = 0x4,
+ RESP_ERR_BCAST_NACK_7E = 0x4,
+ RESP_ERR_NACK = 0x5,
+ RESP_ERR_OVL = 0x6,
+ RESP_ERR_I3C_SHORT_READ = 0x7,
+ RESP_ERR_HC_TERMINATED = 0x8,
+ RESP_ERR_I2C_WR_DATA_NACK = 0x9,
+ RESP_ERR_BUS_XFER_ABORTED = 0x9,
+ RESP_ERR_NOT_SUPPORTED = 0xa,
+ RESP_ERR_ABORTED_WITH_CRC = 0xb,
+ /* 0xc to 0xf are reserved for transfer specific errors */
+};
+
+/* TID generation (4 bits wide in all cases) */
+#define hci_get_tid(bits) \
+ (atomic_inc_return_relaxed(&hci->next_cmd_tid) % (1U << 4))
+
+/* This abstracts operations with our command descriptor formats */
+struct hci_cmd_ops {
+ int (*prep_ccc)(struct i3c_hci *hci, struct hci_xfer *xfer,
+ u8 ccc_addr, u8 ccc_cmd, bool raw);
+ void (*prep_i3c_xfer)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ struct hci_xfer *xfer);
+ void (*prep_i2c_xfer)(struct i3c_hci *hci, struct i2c_dev_desc *dev,
+ struct hci_xfer *xfer);
+ int (*perform_daa)(struct i3c_hci *hci);
+};
+
+/* Our various instances */
+extern const struct hci_cmd_ops mipi_i3c_hci_cmd_v1;
+extern const struct hci_cmd_ops mipi_i3c_hci_cmd_v2;
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
new file mode 100644
index 000000000..d97c3175e
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * I3C HCI v1.0/v1.1 Command Descriptor Handling
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i3c/master.h>
+
+#include "hci.h"
+#include "cmd.h"
+#include "dat.h"
+#include "dct.h"
+
+
+/*
+ * Address Assignment Command
+ */
+
+#define CMD_0_ATTR_A FIELD_PREP(CMD_0_ATTR, 0x2)
+
+#define CMD_A0_TOC W0_BIT_(31)
+#define CMD_A0_ROC W0_BIT_(30)
+#define CMD_A0_DEV_COUNT(v) FIELD_PREP(W0_MASK(29, 26), v)
+#define CMD_A0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
+#define CMD_A0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+#define CMD_A0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Immediate Data Transfer Command
+ */
+
+#define CMD_0_ATTR_I FIELD_PREP(CMD_0_ATTR, 0x1)
+
+#define CMD_I1_DATA_BYTE_4(v) FIELD_PREP(W1_MASK(63, 56), v)
+#define CMD_I1_DATA_BYTE_3(v) FIELD_PREP(W1_MASK(55, 48), v)
+#define CMD_I1_DATA_BYTE_2(v) FIELD_PREP(W1_MASK(47, 40), v)
+#define CMD_I1_DATA_BYTE_1(v) FIELD_PREP(W1_MASK(39, 32), v)
+#define CMD_I1_DEF_BYTE(v) FIELD_PREP(W1_MASK(39, 32), v)
+#define CMD_I0_TOC W0_BIT_(31)
+#define CMD_I0_ROC W0_BIT_(30)
+#define CMD_I0_RNW W0_BIT_(29)
+#define CMD_I0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
+#define CMD_I0_DTT(v) FIELD_PREP(W0_MASK(25, 23), v)
+#define CMD_I0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
+#define CMD_I0_CP W0_BIT_(15)
+#define CMD_I0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+#define CMD_I0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Regular Data Transfer Command
+ */
+
+#define CMD_0_ATTR_R FIELD_PREP(CMD_0_ATTR, 0x0)
+
+#define CMD_R1_DATA_LENGTH(v) FIELD_PREP(W1_MASK(63, 48), v)
+#define CMD_R1_DEF_BYTE(v) FIELD_PREP(W1_MASK(39, 32), v)
+#define CMD_R0_TOC W0_BIT_(31)
+#define CMD_R0_ROC W0_BIT_(30)
+#define CMD_R0_RNW W0_BIT_(29)
+#define CMD_R0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
+#define CMD_R0_DBP W0_BIT_(25)
+#define CMD_R0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
+#define CMD_R0_CP W0_BIT_(15)
+#define CMD_R0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+#define CMD_R0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Combo Transfer (Write + Write/Read) Command
+ */
+
+#define CMD_0_ATTR_C FIELD_PREP(CMD_0_ATTR, 0x3)
+
+#define CMD_C1_DATA_LENGTH(v) FIELD_PREP(W1_MASK(63, 48), v)
+#define CMD_C1_OFFSET(v) FIELD_PREP(W1_MASK(47, 32), v)
+#define CMD_C0_TOC W0_BIT_(31)
+#define CMD_C0_ROC W0_BIT_(30)
+#define CMD_C0_RNW W0_BIT_(29)
+#define CMD_C0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
+#define CMD_C0_16_BIT_SUBOFFSET W0_BIT_(25)
+#define CMD_C0_FIRST_PHASE_MODE W0_BIT_(24)
+#define CMD_C0_DATA_LENGTH_POSITION(v) FIELD_PREP(W0_MASK(23, 22), v)
+#define CMD_C0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
+#define CMD_C0_CP W0_BIT_(15)
+#define CMD_C0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+#define CMD_C0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Internal Control Command
+ */
+
+#define CMD_0_ATTR_M FIELD_PREP(CMD_0_ATTR, 0x7)
+
+#define CMD_M1_VENDOR_SPECIFIC W1_MASK(63, 32)
+#define CMD_M0_MIPI_RESERVED W0_MASK(31, 12)
+#define CMD_M0_MIPI_CMD W0_MASK(11, 8)
+#define CMD_M0_VENDOR_INFO_PRESENT W0_BIT_( 7)
+#define CMD_M0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+
+/* Data Transfer Speed and Mode */
+enum hci_cmd_mode {
+ MODE_I3C_SDR0 = 0x0,
+ MODE_I3C_SDR1 = 0x1,
+ MODE_I3C_SDR2 = 0x2,
+ MODE_I3C_SDR3 = 0x3,
+ MODE_I3C_SDR4 = 0x4,
+ MODE_I3C_HDR_TSx = 0x5,
+ MODE_I3C_HDR_DDR = 0x6,
+ MODE_I3C_HDR_BT = 0x7,
+ MODE_I3C_Fm_FmP = 0x8,
+ MODE_I2C_Fm = 0x0,
+ MODE_I2C_FmP = 0x1,
+ MODE_I2C_UD1 = 0x2,
+ MODE_I2C_UD2 = 0x3,
+ MODE_I2C_UD3 = 0x4,
+};
+
+static enum hci_cmd_mode get_i3c_mode(struct i3c_hci *hci)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+ if (bus->scl_rate.i3c >= 12500000)
+ return MODE_I3C_SDR0;
+ if (bus->scl_rate.i3c > 8000000)
+ return MODE_I3C_SDR1;
+ if (bus->scl_rate.i3c > 6000000)
+ return MODE_I3C_SDR2;
+ if (bus->scl_rate.i3c > 4000000)
+ return MODE_I3C_SDR3;
+ if (bus->scl_rate.i3c > 2000000)
+ return MODE_I3C_SDR4;
+ return MODE_I3C_Fm_FmP;
+}
+
+static enum hci_cmd_mode get_i2c_mode(struct i3c_hci *hci)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+ if (bus->scl_rate.i2c >= 1000000)
+ return MODE_I2C_FmP;
+ return MODE_I2C_Fm;
+}
+
+static void fill_data_bytes(struct hci_xfer *xfer, u8 *data,
+ unsigned int data_len)
+{
+ xfer->cmd_desc[1] = 0;
+ switch (data_len) {
+ case 4:
+ xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_4(data[3]);
+ fallthrough;
+ case 3:
+ xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_3(data[2]);
+ fallthrough;
+ case 2:
+ xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_2(data[1]);
+ fallthrough;
+ case 1:
+ xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_1(data[0]);
+ fallthrough;
+ case 0:
+ break;
+ }
+ /* we consumed all the data with the cmd descriptor */
+ xfer->data = NULL;
+}
+
+static int hci_cmd_v1_prep_ccc(struct i3c_hci *hci,
+ struct hci_xfer *xfer,
+ u8 ccc_addr, u8 ccc_cmd, bool raw)
+{
+ unsigned int dat_idx = 0;
+ enum hci_cmd_mode mode = get_i3c_mode(hci);
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+ int ret;
+
+ /* this should never happen */
+ if (WARN_ON(raw))
+ return -EINVAL;
+
+ if (ccc_addr != I3C_BROADCAST_ADDR) {
+ ret = mipi_i3c_hci_dat_v1.get_index(hci, ccc_addr);
+ if (ret < 0)
+ return ret;
+ dat_idx = ret;
+ }
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ /* we use an Immediate Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_I |
+ CMD_I0_TID(xfer->cmd_tid) |
+ CMD_I0_CMD(ccc_cmd) | CMD_I0_CP |
+ CMD_I0_DEV_INDEX(dat_idx) |
+ CMD_I0_DTT(data_len) |
+ CMD_I0_MODE(mode);
+ fill_data_bytes(xfer, data, data_len);
+ } else {
+ /* we use a Regular Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_R |
+ CMD_R0_TID(xfer->cmd_tid) |
+ CMD_R0_CMD(ccc_cmd) | CMD_R0_CP |
+ CMD_R0_DEV_INDEX(dat_idx) |
+ CMD_R0_MODE(mode) |
+ (rnw ? CMD_R0_RNW : 0);
+ xfer->cmd_desc[1] =
+ CMD_R1_DATA_LENGTH(data_len);
+ }
+
+ return 0;
+}
+
+static void hci_cmd_v1_prep_i3c_xfer(struct i3c_hci *hci,
+ struct i3c_dev_desc *dev,
+ struct hci_xfer *xfer)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ unsigned int dat_idx = dev_data->dat_idx;
+ enum hci_cmd_mode mode = get_i3c_mode(hci);
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ /* we use an Immediate Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_I |
+ CMD_I0_TID(xfer->cmd_tid) |
+ CMD_I0_DEV_INDEX(dat_idx) |
+ CMD_I0_DTT(data_len) |
+ CMD_I0_MODE(mode);
+ fill_data_bytes(xfer, data, data_len);
+ } else {
+ /* we use a Regular Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_R |
+ CMD_R0_TID(xfer->cmd_tid) |
+ CMD_R0_DEV_INDEX(dat_idx) |
+ CMD_R0_MODE(mode) |
+ (rnw ? CMD_R0_RNW : 0);
+ xfer->cmd_desc[1] =
+ CMD_R1_DATA_LENGTH(data_len);
+ }
+}
+
+static void hci_cmd_v1_prep_i2c_xfer(struct i3c_hci *hci,
+ struct i2c_dev_desc *dev,
+ struct hci_xfer *xfer)
+{
+ struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
+ unsigned int dat_idx = dev_data->dat_idx;
+ enum hci_cmd_mode mode = get_i2c_mode(hci);
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ /* we use an Immediate Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_I |
+ CMD_I0_TID(xfer->cmd_tid) |
+ CMD_I0_DEV_INDEX(dat_idx) |
+ CMD_I0_DTT(data_len) |
+ CMD_I0_MODE(mode);
+ fill_data_bytes(xfer, data, data_len);
+ } else {
+ /* we use a Regular Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_R |
+ CMD_R0_TID(xfer->cmd_tid) |
+ CMD_R0_DEV_INDEX(dat_idx) |
+ CMD_R0_MODE(mode) |
+ (rnw ? CMD_R0_RNW : 0);
+ xfer->cmd_desc[1] =
+ CMD_R1_DATA_LENGTH(data_len);
+ }
+}
+
+static int hci_cmd_v1_daa(struct i3c_hci *hci)
+{
+ struct hci_xfer *xfer;
+ int ret, dat_idx = -1;
+ u8 next_addr = 0;
+ u64 pid;
+ unsigned int dcr, bcr;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ xfer = hci_alloc_xfer(2);
+ if (!xfer)
+ return -ENOMEM;
+
+ /*
+ * Simple for now: we allocate a temporary DAT entry, do a single
+ * DAA, register the device which will allocate its own DAT entry
+ * via the core callback, then free the temporary DAT entry.
+ * Loop until there is no more devices to assign an address to.
+ * Yes, there is room for improvements.
+ */
+ for (;;) {
+ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
+ if (ret < 0)
+ break;
+ dat_idx = ret;
+ ret = i3c_master_get_free_addr(&hci->master, next_addr);
+ if (ret < 0)
+ break;
+ next_addr = ret;
+
+ DBG("next_addr = 0x%02x, DAA using DAT %d", next_addr, dat_idx);
+ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dat_idx, next_addr);
+ mipi_i3c_hci_dct_index_reset(hci);
+
+ xfer->cmd_tid = hci_get_tid();
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_A |
+ CMD_A0_TID(xfer->cmd_tid) |
+ CMD_A0_CMD(I3C_CCC_ENTDAA) |
+ CMD_A0_DEV_INDEX(dat_idx) |
+ CMD_A0_DEV_COUNT(1) |
+ CMD_A0_ROC | CMD_A0_TOC;
+ xfer->cmd_desc[1] = 0;
+ hci->io->queue_xfer(hci, xfer, 1);
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, 1)) {
+ ret = -ETIME;
+ break;
+ }
+ if (RESP_STATUS(xfer[0].response) == RESP_ERR_NACK &&
+ RESP_STATUS(xfer[0].response) == 1) {
+ ret = 0; /* no more devices to be assigned */
+ break;
+ }
+ if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ break;
+ }
+
+ i3c_hci_dct_get_val(hci, 0, &pid, &dcr, &bcr);
+ DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
+
+ mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
+ dat_idx = -1;
+
+ /*
+ * TODO: Extend the subsystem layer to allow for registering
+ * new device and provide BCR/DCR/PID at the same time.
+ */
+ ret = i3c_master_add_i3c_dev_locked(&hci->master, next_addr);
+ if (ret)
+ break;
+ }
+
+ if (dat_idx >= 0)
+ mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
+ hci_free_xfer(xfer, 1);
+ return ret;
+}
+
+const struct hci_cmd_ops mipi_i3c_hci_cmd_v1 = {
+ .prep_ccc = hci_cmd_v1_prep_ccc,
+ .prep_i3c_xfer = hci_cmd_v1_prep_i3c_xfer,
+ .prep_i2c_xfer = hci_cmd_v1_prep_i2c_xfer,
+ .perform_daa = hci_cmd_v1_daa,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
new file mode 100644
index 000000000..4493b2b06
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * I3C HCI v2.0 Command Descriptor Handling
+ *
+ * Note: The I3C HCI v2.0 spec is still in flux. The code here will change.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i3c/master.h>
+
+#include "hci.h"
+#include "cmd.h"
+#include "xfer_mode_rate.h"
+
+
+/*
+ * Unified Data Transfer Command
+ */
+
+#define CMD_0_ATTR_U FIELD_PREP(CMD_0_ATTR, 0x4)
+
+#define CMD_U3_HDR_TSP_ML_CTRL(v) FIELD_PREP(W3_MASK(107, 104), v)
+#define CMD_U3_IDB4(v) FIELD_PREP(W3_MASK(103, 96), v)
+#define CMD_U3_HDR_CMD(v) FIELD_PREP(W3_MASK(103, 96), v)
+#define CMD_U2_IDB3(v) FIELD_PREP(W2_MASK( 95, 88), v)
+#define CMD_U2_HDR_BT(v) FIELD_PREP(W2_MASK( 95, 88), v)
+#define CMD_U2_IDB2(v) FIELD_PREP(W2_MASK( 87, 80), v)
+#define CMD_U2_BT_CMD2(v) FIELD_PREP(W2_MASK( 87, 80), v)
+#define CMD_U2_IDB1(v) FIELD_PREP(W2_MASK( 79, 72), v)
+#define CMD_U2_BT_CMD1(v) FIELD_PREP(W2_MASK( 79, 72), v)
+#define CMD_U2_IDB0(v) FIELD_PREP(W2_MASK( 71, 64), v)
+#define CMD_U2_BT_CMD0(v) FIELD_PREP(W2_MASK( 71, 64), v)
+#define CMD_U1_ERR_HANDLING(v) FIELD_PREP(W1_MASK( 63, 62), v)
+#define CMD_U1_ADD_FUNC(v) FIELD_PREP(W1_MASK( 61, 56), v)
+#define CMD_U1_COMBO_XFER W1_BIT_( 55)
+#define CMD_U1_DATA_LENGTH(v) FIELD_PREP(W1_MASK( 53, 32), v)
+#define CMD_U0_TOC W0_BIT_( 31)
+#define CMD_U0_ROC W0_BIT_( 30)
+#define CMD_U0_MAY_YIELD W0_BIT_( 29)
+#define CMD_U0_NACK_RCNT(v) FIELD_PREP(W0_MASK( 28, 27), v)
+#define CMD_U0_IDB_COUNT(v) FIELD_PREP(W0_MASK( 26, 24), v)
+#define CMD_U0_MODE_INDEX(v) FIELD_PREP(W0_MASK( 22, 18), v)
+#define CMD_U0_XFER_RATE(v) FIELD_PREP(W0_MASK( 17, 15), v)
+#define CMD_U0_DEV_ADDRESS(v) FIELD_PREP(W0_MASK( 14, 8), v)
+#define CMD_U0_RnW W0_BIT_( 7)
+#define CMD_U0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Address Assignment Command
+ */
+
+#define CMD_0_ATTR_A FIELD_PREP(CMD_0_ATTR, 0x2)
+
+#define CMD_A1_DATA_LENGTH(v) FIELD_PREP(W1_MASK( 53, 32), v)
+#define CMD_A0_TOC W0_BIT_( 31)
+#define CMD_A0_ROC W0_BIT_( 30)
+#define CMD_A0_XFER_RATE(v) FIELD_PREP(W0_MASK( 17, 15), v)
+#define CMD_A0_ASSIGN_ADDRESS(v) FIELD_PREP(W0_MASK( 14, 8), v)
+#define CMD_A0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+
+static unsigned int get_i3c_rate_idx(struct i3c_hci *hci)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+ if (bus->scl_rate.i3c >= 12000000)
+ return XFERRATE_I3C_SDR0;
+ if (bus->scl_rate.i3c > 8000000)
+ return XFERRATE_I3C_SDR1;
+ if (bus->scl_rate.i3c > 6000000)
+ return XFERRATE_I3C_SDR2;
+ if (bus->scl_rate.i3c > 4000000)
+ return XFERRATE_I3C_SDR3;
+ if (bus->scl_rate.i3c > 2000000)
+ return XFERRATE_I3C_SDR4;
+ return XFERRATE_I3C_SDR_FM_FMP;
+}
+
+static unsigned int get_i2c_rate_idx(struct i3c_hci *hci)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+ if (bus->scl_rate.i2c >= 1000000)
+ return XFERRATE_I2C_FMP;
+ return XFERRATE_I2C_FM;
+}
+
+static void hci_cmd_v2_prep_private_xfer(struct i3c_hci *hci,
+ struct hci_xfer *xfer,
+ u8 addr, unsigned int mode,
+ unsigned int rate)
+{
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 5) {
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_U |
+ CMD_U0_TID(xfer->cmd_tid) |
+ CMD_U0_DEV_ADDRESS(addr) |
+ CMD_U0_XFER_RATE(rate) |
+ CMD_U0_MODE_INDEX(mode) |
+ CMD_U0_IDB_COUNT(data_len);
+ xfer->cmd_desc[1] =
+ CMD_U1_DATA_LENGTH(0);
+ xfer->cmd_desc[2] = 0;
+ xfer->cmd_desc[3] = 0;
+ switch (data_len) {
+ case 5:
+ xfer->cmd_desc[3] |= CMD_U3_IDB4(data[4]);
+ fallthrough;
+ case 4:
+ xfer->cmd_desc[2] |= CMD_U2_IDB3(data[3]);
+ fallthrough;
+ case 3:
+ xfer->cmd_desc[2] |= CMD_U2_IDB2(data[2]);
+ fallthrough;
+ case 2:
+ xfer->cmd_desc[2] |= CMD_U2_IDB1(data[1]);
+ fallthrough;
+ case 1:
+ xfer->cmd_desc[2] |= CMD_U2_IDB0(data[0]);
+ fallthrough;
+ case 0:
+ break;
+ }
+ /* we consumed all the data with the cmd descriptor */
+ xfer->data = NULL;
+ } else {
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_U |
+ CMD_U0_TID(xfer->cmd_tid) |
+ (rnw ? CMD_U0_RnW : 0) |
+ CMD_U0_DEV_ADDRESS(addr) |
+ CMD_U0_XFER_RATE(rate) |
+ CMD_U0_MODE_INDEX(mode);
+ xfer->cmd_desc[1] =
+ CMD_U1_DATA_LENGTH(data_len);
+ xfer->cmd_desc[2] = 0;
+ xfer->cmd_desc[3] = 0;
+ }
+}
+
+static int hci_cmd_v2_prep_ccc(struct i3c_hci *hci, struct hci_xfer *xfer,
+ u8 ccc_addr, u8 ccc_cmd, bool raw)
+{
+ unsigned int mode = XFERMODE_IDX_I3C_SDR;
+ unsigned int rate = get_i3c_rate_idx(hci);
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+
+ if (raw && ccc_addr != I3C_BROADCAST_ADDR) {
+ hci_cmd_v2_prep_private_xfer(hci, xfer, ccc_addr, mode, rate);
+ return 0;
+ }
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_U |
+ CMD_U0_TID(xfer->cmd_tid) |
+ CMD_U0_DEV_ADDRESS(ccc_addr) |
+ CMD_U0_XFER_RATE(rate) |
+ CMD_U0_MODE_INDEX(mode) |
+ CMD_U0_IDB_COUNT(data_len + (!raw ? 0 : 1));
+ xfer->cmd_desc[1] =
+ CMD_U1_DATA_LENGTH(0);
+ xfer->cmd_desc[2] =
+ CMD_U2_IDB0(ccc_cmd);
+ xfer->cmd_desc[3] = 0;
+ switch (data_len) {
+ case 4:
+ xfer->cmd_desc[3] |= CMD_U3_IDB4(data[3]);
+ fallthrough;
+ case 3:
+ xfer->cmd_desc[2] |= CMD_U2_IDB3(data[2]);
+ fallthrough;
+ case 2:
+ xfer->cmd_desc[2] |= CMD_U2_IDB2(data[1]);
+ fallthrough;
+ case 1:
+ xfer->cmd_desc[2] |= CMD_U2_IDB1(data[0]);
+ fallthrough;
+ case 0:
+ break;
+ }
+ /* we consumed all the data with the cmd descriptor */
+ xfer->data = NULL;
+ } else {
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_U |
+ CMD_U0_TID(xfer->cmd_tid) |
+ (rnw ? CMD_U0_RnW : 0) |
+ CMD_U0_DEV_ADDRESS(ccc_addr) |
+ CMD_U0_XFER_RATE(rate) |
+ CMD_U0_MODE_INDEX(mode) |
+ CMD_U0_IDB_COUNT(!raw ? 0 : 1);
+ xfer->cmd_desc[1] =
+ CMD_U1_DATA_LENGTH(data_len);
+ xfer->cmd_desc[2] =
+ CMD_U2_IDB0(ccc_cmd);
+ xfer->cmd_desc[3] = 0;
+ }
+
+ return 0;
+}
+
+static void hci_cmd_v2_prep_i3c_xfer(struct i3c_hci *hci,
+ struct i3c_dev_desc *dev,
+ struct hci_xfer *xfer)
+{
+ unsigned int mode = XFERMODE_IDX_I3C_SDR;
+ unsigned int rate = get_i3c_rate_idx(hci);
+ u8 addr = dev->info.dyn_addr;
+
+ hci_cmd_v2_prep_private_xfer(hci, xfer, addr, mode, rate);
+}
+
+static void hci_cmd_v2_prep_i2c_xfer(struct i3c_hci *hci,
+ struct i2c_dev_desc *dev,
+ struct hci_xfer *xfer)
+{
+ unsigned int mode = XFERMODE_IDX_I2C;
+ unsigned int rate = get_i2c_rate_idx(hci);
+ u8 addr = dev->addr;
+
+ hci_cmd_v2_prep_private_xfer(hci, xfer, addr, mode, rate);
+}
+
+static int hci_cmd_v2_daa(struct i3c_hci *hci)
+{
+ struct hci_xfer *xfer;
+ int ret;
+ u8 next_addr = 0;
+ u32 device_id[2];
+ u64 pid;
+ unsigned int dcr, bcr;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ xfer = hci_alloc_xfer(2);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer[0].data = &device_id;
+ xfer[0].data_len = 8;
+ xfer[0].rnw = true;
+ xfer[0].cmd_desc[1] = CMD_A1_DATA_LENGTH(8);
+ xfer[1].completion = &done;
+
+ for (;;) {
+ ret = i3c_master_get_free_addr(&hci->master, next_addr);
+ if (ret < 0)
+ break;
+ next_addr = ret;
+ DBG("next_addr = 0x%02x", next_addr);
+ xfer[0].cmd_tid = hci_get_tid();
+ xfer[0].cmd_desc[0] =
+ CMD_0_ATTR_A |
+ CMD_A0_TID(xfer[0].cmd_tid) |
+ CMD_A0_ROC;
+ xfer[1].cmd_tid = hci_get_tid();
+ xfer[1].cmd_desc[0] =
+ CMD_0_ATTR_A |
+ CMD_A0_TID(xfer[1].cmd_tid) |
+ CMD_A0_ASSIGN_ADDRESS(next_addr) |
+ CMD_A0_ROC |
+ CMD_A0_TOC;
+ hci->io->queue_xfer(hci, xfer, 2);
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, 2)) {
+ ret = -ETIME;
+ break;
+ }
+ if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
+ ret = 0; /* no more devices to be assigned */
+ break;
+ }
+ if (RESP_STATUS(xfer[1].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ break;
+ }
+
+ pid = FIELD_GET(W1_MASK(47, 32), device_id[1]);
+ pid = (pid << 32) | device_id[0];
+ bcr = FIELD_GET(W1_MASK(55, 48), device_id[1]);
+ dcr = FIELD_GET(W1_MASK(63, 56), device_id[1]);
+ DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
+ /*
+ * TODO: Extend the subsystem layer to allow for registering
+ * new device and provide BCR/DCR/PID at the same time.
+ */
+ ret = i3c_master_add_i3c_dev_locked(&hci->master, next_addr);
+ if (ret)
+ break;
+ }
+
+ hci_free_xfer(xfer, 2);
+ return ret;
+}
+
+const struct hci_cmd_ops mipi_i3c_hci_cmd_v2 = {
+ .prep_ccc = hci_cmd_v2_prep_ccc,
+ .prep_i3c_xfer = hci_cmd_v2_prep_i3c_xfer,
+ .prep_i2c_xfer = hci_cmd_v2_prep_i2c_xfer,
+ .perform_daa = hci_cmd_v2_daa,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
new file mode 100644
index 000000000..6aef5ce43
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -0,0 +1,793 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Core driver code with main interface to the I3C subsystem.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "hci.h"
+#include "ext_caps.h"
+#include "cmd.h"
+#include "dat.h"
+
+
+/*
+ * Host Controller Capabilities and Operation Registers
+ */
+
+#define reg_read(r) readl(hci->base_regs + (r))
+#define reg_write(r, v) writel(v, hci->base_regs + (r))
+#define reg_set(r, v) reg_write(r, reg_read(r) | (v))
+#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v))
+
+#define HCI_VERSION 0x00 /* HCI Version (in BCD) */
+
+#define HC_CONTROL 0x04
+#define HC_CONTROL_BUS_ENABLE BIT(31)
+#define HC_CONTROL_RESUME BIT(30)
+#define HC_CONTROL_ABORT BIT(29)
+#define HC_CONTROL_HALT_ON_CMD_TIMEOUT BIT(12)
+#define HC_CONTROL_HOT_JOIN_CTRL BIT(8) /* Hot-Join ACK/NACK Control */
+#define HC_CONTROL_I2C_TARGET_PRESENT BIT(7)
+#define HC_CONTROL_PIO_MODE BIT(6) /* DMA/PIO Mode Selector */
+#define HC_CONTROL_DATA_BIG_ENDIAN BIT(4)
+#define HC_CONTROL_IBA_INCLUDE BIT(0) /* Include I3C Broadcast Address */
+
+#define MASTER_DEVICE_ADDR 0x08 /* Master Device Address */
+#define MASTER_DYNAMIC_ADDR_VALID BIT(31) /* Dynamic Address is Valid */
+#define MASTER_DYNAMIC_ADDR(v) FIELD_PREP(GENMASK(22, 16), v)
+
+#define HC_CAPABILITIES 0x0c
+#define HC_CAP_SG_DC_EN BIT(30)
+#define HC_CAP_SG_IBI_EN BIT(29)
+#define HC_CAP_SG_CR_EN BIT(28)
+#define HC_CAP_MAX_DATA_LENGTH GENMASK(24, 22)
+#define HC_CAP_CMD_SIZE GENMASK(21, 20)
+#define HC_CAP_DIRECT_COMMANDS_EN BIT(18)
+#define HC_CAP_MULTI_LANE_EN BIT(15)
+#define HC_CAP_CMD_CCC_DEFBYTE BIT(10)
+#define HC_CAP_HDR_BT_EN BIT(8)
+#define HC_CAP_HDR_TS_EN BIT(7)
+#define HC_CAP_HDR_DDR_EN BIT(6)
+#define HC_CAP_NON_CURRENT_MASTER_CAP BIT(5) /* master handoff capable */
+#define HC_CAP_DATA_BYTE_CFG_EN BIT(4) /* endian selection possible */
+#define HC_CAP_AUTO_COMMAND BIT(3)
+#define HC_CAP_COMBO_COMMAND BIT(2)
+
+#define RESET_CONTROL 0x10
+#define BUS_RESET BIT(31)
+#define BUS_RESET_TYPE GENMASK(30, 29)
+#define IBI_QUEUE_RST BIT(5)
+#define RX_FIFO_RST BIT(4)
+#define TX_FIFO_RST BIT(3)
+#define RESP_QUEUE_RST BIT(2)
+#define CMD_QUEUE_RST BIT(1)
+#define SOFT_RST BIT(0) /* Core Reset */
+
+#define PRESENT_STATE 0x14
+#define STATE_CURRENT_MASTER BIT(2)
+
+#define INTR_STATUS 0x20
+#define INTR_STATUS_ENABLE 0x24
+#define INTR_SIGNAL_ENABLE 0x28
+#define INTR_FORCE 0x2c
+#define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */
+#define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */
+#define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */
+#define INTR_HC_PIO BIT(8) /* cascaded PIO interrupt */
+#define INTR_HC_RINGS GENMASK(7, 0)
+
+#define DAT_SECTION 0x30 /* Device Address Table */
+#define DAT_ENTRY_SIZE GENMASK(31, 28)
+#define DAT_TABLE_SIZE GENMASK(18, 12)
+#define DAT_TABLE_OFFSET GENMASK(11, 0)
+
+#define DCT_SECTION 0x34 /* Device Characteristics Table */
+#define DCT_ENTRY_SIZE GENMASK(31, 28)
+#define DCT_TABLE_INDEX GENMASK(23, 19)
+#define DCT_TABLE_SIZE GENMASK(18, 12)
+#define DCT_TABLE_OFFSET GENMASK(11, 0)
+
+#define RING_HEADERS_SECTION 0x38
+#define RING_HEADERS_OFFSET GENMASK(15, 0)
+
+#define PIO_SECTION 0x3c
+#define PIO_REGS_OFFSET GENMASK(15, 0) /* PIO Offset */
+
+#define EXT_CAPS_SECTION 0x40
+#define EXT_CAPS_OFFSET GENMASK(15, 0)
+
+#define IBI_NOTIFY_CTRL 0x58 /* IBI Notify Control */
+#define IBI_NOTIFY_SIR_REJECTED BIT(3) /* Rejected Target Interrupt Request */
+#define IBI_NOTIFY_MR_REJECTED BIT(1) /* Rejected Master Request Control */
+#define IBI_NOTIFY_HJ_REJECTED BIT(0) /* Rejected Hot-Join Control */
+
+#define DEV_CTX_BASE_LO 0x60
+#define DEV_CTX_BASE_HI 0x64
+
+
+static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m)
+{
+ return container_of(m, struct i3c_hci, master);
+}
+
+static int i3c_hci_bus_init(struct i3c_master_controller *m)
+{
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_device_info info;
+ int ret;
+
+ DBG("");
+
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
+ ret = mipi_i3c_hci_dat_v1.init(hci);
+ if (ret)
+ return ret;
+ }
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+ reg_write(MASTER_DEVICE_ADDR,
+ MASTER_DYNAMIC_ADDR(ret) | MASTER_DYNAMIC_ADDR_VALID);
+ memset(&info, 0, sizeof(info));
+ info.dyn_addr = ret;
+ ret = i3c_master_set_info(m, &info);
+ if (ret)
+ return ret;
+
+ ret = hci->io->init(hci);
+ if (ret)
+ return ret;
+
+ reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
+ DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
+
+ return 0;
+}
+
+static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct i3c_hci *hci = to_i3c_hci(m);
+
+ DBG("");
+
+ reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
+ hci->io->cleanup(hci);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.cleanup(hci);
+}
+
+void mipi_i3c_hci_resume(struct i3c_hci *hci)
+{
+ /* the HC_CONTROL_RESUME bit is R/W1C so just read and write back */
+ reg_write(HC_CONTROL, reg_read(HC_CONTROL));
+}
+
+/* located here rather than pio.c because needed bits are in core reg space */
+void mipi_i3c_hci_pio_reset(struct i3c_hci *hci)
+{
+ reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST);
+}
+
+/* located here rather than dct.c because needed bits are in core reg space */
+void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
+{
+ reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
+}
+
+static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *ccc)
+{
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct hci_xfer *xfer;
+ bool raw = !!(hci->quirks & HCI_QUIRK_RAW_CCC);
+ bool prefixed = raw && !!(ccc->id & I3C_CCC_DIRECT);
+ unsigned int nxfers = ccc->ndests + prefixed;
+ DECLARE_COMPLETION_ONSTACK(done);
+ int i, last, ret = 0;
+
+ DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
+ ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
+
+ xfer = hci_alloc_xfer(nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ if (prefixed) {
+ xfer->data = NULL;
+ xfer->data_len = 0;
+ xfer->rnw = false;
+ hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR,
+ ccc->id, true);
+ xfer++;
+ }
+
+ for (i = 0; i < nxfers - prefixed; i++) {
+ xfer[i].data = ccc->dests[i].payload.data;
+ xfer[i].data_len = ccc->dests[i].payload.len;
+ xfer[i].rnw = ccc->rnw;
+ ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr,
+ ccc->id, raw);
+ if (ret)
+ goto out;
+ xfer[i].cmd_desc[0] |= CMD_0_ROC;
+ }
+ last = i - 1;
+ xfer[last].cmd_desc[0] |= CMD_0_TOC;
+ xfer[last].completion = &done;
+
+ if (prefixed)
+ xfer--;
+
+ ret = hci->io->queue_xfer(hci, xfer, nxfers);
+ if (ret)
+ goto out;
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, nxfers)) {
+ ret = -ETIME;
+ goto out;
+ }
+ for (i = prefixed; i < nxfers; i++) {
+ if (ccc->rnw)
+ ccc->dests[i - prefixed].payload.len =
+ RESP_DATA_LENGTH(xfer[i].response);
+ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ if (ccc->rnw)
+ DBG("got: %*ph",
+ ccc->dests[0].payload.len, ccc->dests[0].payload.data);
+
+out:
+ hci_free_xfer(xfer, nxfers);
+ return ret;
+}
+
+static int i3c_hci_daa(struct i3c_master_controller *m)
+{
+ struct i3c_hci *hci = to_i3c_hci(m);
+
+ DBG("");
+
+ return hci->cmd->perform_daa(hci);
+}
+
+static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *i3c_xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct hci_xfer *xfer;
+ DECLARE_COMPLETION_ONSTACK(done);
+ unsigned int size_limit;
+ int i, last, ret = 0;
+
+ DBG("nxfers = %d", nxfers);
+
+ xfer = hci_alloc_xfer(nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps));
+
+ for (i = 0; i < nxfers; i++) {
+ xfer[i].data_len = i3c_xfers[i].len;
+ ret = -EFBIG;
+ if (xfer[i].data_len >= size_limit)
+ goto out;
+ xfer[i].rnw = i3c_xfers[i].rnw;
+ if (i3c_xfers[i].rnw) {
+ xfer[i].data = i3c_xfers[i].data.in;
+ } else {
+ /* silence the const qualifier warning with a cast */
+ xfer[i].data = (void *) i3c_xfers[i].data.out;
+ }
+ hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
+ xfer[i].cmd_desc[0] |= CMD_0_ROC;
+ }
+ last = i - 1;
+ xfer[last].cmd_desc[0] |= CMD_0_TOC;
+ xfer[last].completion = &done;
+
+ ret = hci->io->queue_xfer(hci, xfer, nxfers);
+ if (ret)
+ goto out;
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, nxfers)) {
+ ret = -ETIME;
+ goto out;
+ }
+ for (i = 0; i < nxfers; i++) {
+ if (i3c_xfers[i].rnw)
+ i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
+ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+out:
+ hci_free_xfer(xfer, nxfers);
+ return ret;
+}
+
+static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *i2c_xfers, int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct hci_xfer *xfer;
+ DECLARE_COMPLETION_ONSTACK(done);
+ int i, last, ret = 0;
+
+ DBG("nxfers = %d", nxfers);
+
+ xfer = hci_alloc_xfer(nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ xfer[i].data = i2c_xfers[i].buf;
+ xfer[i].data_len = i2c_xfers[i].len;
+ xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
+ hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
+ xfer[i].cmd_desc[0] |= CMD_0_ROC;
+ }
+ last = i - 1;
+ xfer[last].cmd_desc[0] |= CMD_0_TOC;
+ xfer[last].completion = &done;
+
+ ret = hci->io->queue_xfer(hci, xfer, nxfers);
+ if (ret)
+ goto out;
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, nxfers)) {
+ ret = -ETIME;
+ goto out;
+ }
+ for (i = 0; i < nxfers; i++) {
+ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+out:
+ hci_free_xfer(xfer, nxfers);
+ return ret;
+}
+
+static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data;
+ int ret;
+
+ DBG("");
+
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
+ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
+ if (ret < 0) {
+ kfree(dev_data);
+ return ret;
+ }
+ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret, dev->info.dyn_addr);
+ dev_data->dat_idx = ret;
+ }
+ i3c_dev_set_master_data(dev, dev_data);
+ return 0;
+}
+
+static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+
+ DBG("");
+
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
+ dev->info.dyn_addr);
+ return 0;
+}
+
+static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+
+ DBG("");
+
+ i3c_dev_set_master_data(dev, NULL);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
+ kfree(dev_data);
+}
+
+static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data;
+ int ret;
+
+ DBG("");
+
+ if (hci->cmd != &mipi_i3c_hci_cmd_v1)
+ return 0;
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
+ if (ret < 0) {
+ kfree(dev_data);
+ return ret;
+ }
+ mipi_i3c_hci_dat_v1.set_static_addr(hci, ret, dev->addr);
+ mipi_i3c_hci_dat_v1.set_flags(hci, ret, DAT_0_I2C_DEVICE, 0);
+ dev_data->dat_idx = ret;
+ i2c_dev_set_master_data(dev, dev_data);
+ return 0;
+}
+
+static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
+
+ DBG("");
+
+ if (dev_data) {
+ i2c_dev_set_master_data(dev, NULL);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
+ kfree(dev_data);
+ }
+}
+
+static int i3c_hci_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ unsigned int dat_idx = dev_data->dat_idx;
+
+ if (req->max_payload_len != 0)
+ mipi_i3c_hci_dat_v1.set_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
+ else
+ mipi_i3c_hci_dat_v1.clear_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
+ return hci->io->request_ibi(hci, dev, req);
+}
+
+static void i3c_hci_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+
+ hci->io->free_ibi(hci, dev);
+}
+
+static int i3c_hci_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+
+ mipi_i3c_hci_dat_v1.clear_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
+ return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+}
+
+static int i3c_hci_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+
+ mipi_i3c_hci_dat_v1.set_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
+ return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+}
+
+static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+
+ hci->io->recycle_ibi_slot(hci, dev, slot);
+}
+
+static const struct i3c_master_controller_ops i3c_hci_ops = {
+ .bus_init = i3c_hci_bus_init,
+ .bus_cleanup = i3c_hci_bus_cleanup,
+ .do_daa = i3c_hci_daa,
+ .send_ccc_cmd = i3c_hci_send_ccc_cmd,
+ .priv_xfers = i3c_hci_priv_xfers,
+ .i2c_xfers = i3c_hci_i2c_xfers,
+ .attach_i3c_dev = i3c_hci_attach_i3c_dev,
+ .reattach_i3c_dev = i3c_hci_reattach_i3c_dev,
+ .detach_i3c_dev = i3c_hci_detach_i3c_dev,
+ .attach_i2c_dev = i3c_hci_attach_i2c_dev,
+ .detach_i2c_dev = i3c_hci_detach_i2c_dev,
+ .request_ibi = i3c_hci_request_ibi,
+ .free_ibi = i3c_hci_free_ibi,
+ .enable_ibi = i3c_hci_enable_ibi,
+ .disable_ibi = i3c_hci_disable_ibi,
+ .recycle_ibi_slot = i3c_hci_recycle_ibi_slot,
+};
+
+static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
+{
+ struct i3c_hci *hci = dev_id;
+ irqreturn_t result = IRQ_NONE;
+ u32 val;
+
+ val = reg_read(INTR_STATUS);
+ DBG("INTR_STATUS = %#x", val);
+
+ if (val) {
+ reg_write(INTR_STATUS, val);
+ } else {
+ /* v1.0 does not have PIO cascaded notification bits */
+ val |= INTR_HC_PIO;
+ }
+
+ if (val & INTR_HC_RESET_CANCEL) {
+ DBG("cancelled reset");
+ val &= ~INTR_HC_RESET_CANCEL;
+ }
+ if (val & INTR_HC_INTERNAL_ERR) {
+ dev_err(&hci->master.dev, "Host Controller Internal Error\n");
+ val &= ~INTR_HC_INTERNAL_ERR;
+ }
+ if (val & INTR_HC_PIO) {
+ hci->io->irq_handler(hci, 0);
+ val &= ~INTR_HC_PIO;
+ }
+ if (val & INTR_HC_RINGS) {
+ hci->io->irq_handler(hci, val & INTR_HC_RINGS);
+ val &= ~INTR_HC_RINGS;
+ }
+ if (val)
+ dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
+ else
+ result = IRQ_HANDLED;
+
+ return result;
+}
+
+static int i3c_hci_init(struct i3c_hci *hci)
+{
+ u32 regval, offset;
+ int ret;
+
+ /* Validate HCI hardware version */
+ regval = reg_read(HCI_VERSION);
+ hci->version_major = (regval >> 8) & 0xf;
+ hci->version_minor = (regval >> 4) & 0xf;
+ hci->revision = regval & 0xf;
+ dev_notice(&hci->master.dev, "MIPI I3C HCI v%u.%u r%02u\n",
+ hci->version_major, hci->version_minor, hci->revision);
+ /* known versions */
+ switch (regval & ~0xf) {
+ case 0x100: /* version 1.0 */
+ case 0x110: /* version 1.1 */
+ case 0x200: /* version 2.0 */
+ break;
+ default:
+ dev_err(&hci->master.dev, "unsupported HCI version\n");
+ return -EPROTONOSUPPORT;
+ }
+
+ hci->caps = reg_read(HC_CAPABILITIES);
+ DBG("caps = %#x", hci->caps);
+
+ regval = reg_read(DAT_SECTION);
+ offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
+ hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
+ hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
+ hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval);
+ dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
+ hci->DAT_entries, hci->DAT_entry_size * 4, offset);
+
+ regval = reg_read(DCT_SECTION);
+ offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
+ hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
+ hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
+ hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval);
+ dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
+ hci->DCT_entries, hci->DCT_entry_size * 4, offset);
+
+ regval = reg_read(RING_HEADERS_SECTION);
+ offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
+ hci->RHS_regs = offset ? hci->base_regs + offset : NULL;
+ dev_info(&hci->master.dev, "Ring Headers at offset %#x\n", offset);
+
+ regval = reg_read(PIO_SECTION);
+ offset = FIELD_GET(PIO_REGS_OFFSET, regval);
+ hci->PIO_regs = offset ? hci->base_regs + offset : NULL;
+ dev_info(&hci->master.dev, "PIO section at offset %#x\n", offset);
+
+ regval = reg_read(EXT_CAPS_SECTION);
+ offset = FIELD_GET(EXT_CAPS_OFFSET, regval);
+ hci->EXTCAPS_regs = offset ? hci->base_regs + offset : NULL;
+ dev_info(&hci->master.dev, "Extended Caps at offset %#x\n", offset);
+
+ ret = i3c_hci_parse_ext_caps(hci);
+ if (ret)
+ return ret;
+
+ /*
+ * Now let's reset the hardware.
+ * SOFT_RST must be clear before we write to it.
+ * Then we must wait until it clears again.
+ */
+ ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
+ !(regval & SOFT_RST), 1, 10000);
+ if (ret)
+ return -ENXIO;
+ reg_write(RESET_CONTROL, SOFT_RST);
+ ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
+ !(regval & SOFT_RST), 1, 10000);
+ if (ret)
+ return -ENXIO;
+
+ /* Disable all interrupts and allow all signal updates */
+ reg_write(INTR_SIGNAL_ENABLE, 0x0);
+ reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+
+ /* Make sure our data ordering fits the host's */
+ regval = reg_read(HC_CONTROL);
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
+ regval |= HC_CONTROL_DATA_BIG_ENDIAN;
+ reg_write(HC_CONTROL, regval);
+ regval = reg_read(HC_CONTROL);
+ if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
+ dev_err(&hci->master.dev, "cannot set BE mode\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ } else {
+ if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
+ regval &= ~HC_CONTROL_DATA_BIG_ENDIAN;
+ reg_write(HC_CONTROL, regval);
+ regval = reg_read(HC_CONTROL);
+ if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
+ dev_err(&hci->master.dev, "cannot clear BE mode\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
+ /* Select our command descriptor model */
+ switch (FIELD_GET(HC_CAP_CMD_SIZE, hci->caps)) {
+ case 0:
+ hci->cmd = &mipi_i3c_hci_cmd_v1;
+ break;
+ case 1:
+ hci->cmd = &mipi_i3c_hci_cmd_v2;
+ break;
+ default:
+ dev_err(&hci->master.dev, "wrong CMD_SIZE capability value\n");
+ return -EINVAL;
+ }
+
+ /* Try activating DMA operations first */
+ if (hci->RHS_regs) {
+ reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
+ if (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE) {
+ dev_err(&hci->master.dev, "PIO mode is stuck\n");
+ ret = -EIO;
+ } else {
+ hci->io = &mipi_i3c_hci_dma;
+ dev_info(&hci->master.dev, "Using DMA\n");
+ }
+ }
+
+ /* If no DMA, try PIO */
+ if (!hci->io && hci->PIO_regs) {
+ reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
+ if (!(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
+ dev_err(&hci->master.dev, "DMA mode is stuck\n");
+ ret = -EIO;
+ } else {
+ hci->io = &mipi_i3c_hci_pio;
+ dev_info(&hci->master.dev, "Using PIO\n");
+ }
+ }
+
+ if (!hci->io) {
+ dev_err(&hci->master.dev, "neither DMA nor PIO can be used\n");
+ if (!ret)
+ ret = -EINVAL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int i3c_hci_probe(struct platform_device *pdev)
+{
+ struct i3c_hci *hci;
+ int irq, ret;
+
+ hci = devm_kzalloc(&pdev->dev, sizeof(*hci), GFP_KERNEL);
+ if (!hci)
+ return -ENOMEM;
+ hci->base_regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hci->base_regs))
+ return PTR_ERR(hci->base_regs);
+
+ platform_set_drvdata(pdev, hci);
+ /* temporary for dev_printk's, to be replaced in i3c_master_register */
+ hci->master.dev.init_name = dev_name(&pdev->dev);
+
+ ret = i3c_hci_init(hci);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
+ 0, NULL, hci);
+ if (ret)
+ return ret;
+
+ ret = i3c_master_register(&hci->master, &pdev->dev,
+ &i3c_hci_ops, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int i3c_hci_remove(struct platform_device *pdev)
+{
+ struct i3c_hci *hci = platform_get_drvdata(pdev);
+
+ return i3c_master_unregister(&hci->master);
+}
+
+static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
+ { .compatible = "mipi-i3c-hci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
+
+static struct platform_driver i3c_hci_driver = {
+ .probe = i3c_hci_probe,
+ .remove = i3c_hci_remove,
+ .driver = {
+ .name = "mipi-i3c-hci",
+ .of_match_table = of_match_ptr(i3c_hci_of_match),
+ },
+};
+module_platform_driver(i3c_hci_driver);
+
+MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
+MODULE_DESCRIPTION("MIPI I3C HCI driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat.h b/drivers/i3c/master/mipi-i3c-hci/dat.h
new file mode 100644
index 000000000..1f0f345c3
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dat.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common DAT related stuff
+ */
+
+#ifndef DAT_H
+#define DAT_H
+
+/* Global DAT flags */
+#define DAT_0_I2C_DEVICE W0_BIT_(31)
+#define DAT_0_SIR_REJECT W0_BIT_(13)
+#define DAT_0_IBI_PAYLOAD W0_BIT_(12)
+
+struct hci_dat_ops {
+ int (*init)(struct i3c_hci *hci);
+ void (*cleanup)(struct i3c_hci *hci);
+ int (*alloc_entry)(struct i3c_hci *hci);
+ void (*free_entry)(struct i3c_hci *hci, unsigned int dat_idx);
+ void (*set_dynamic_addr)(struct i3c_hci *hci, unsigned int dat_idx, u8 addr);
+ void (*set_static_addr)(struct i3c_hci *hci, unsigned int dat_idx, u8 addr);
+ void (*set_flags)(struct i3c_hci *hci, unsigned int dat_idx, u32 w0, u32 w1);
+ void (*clear_flags)(struct i3c_hci *hci, unsigned int dat_idx, u32 w0, u32 w1);
+ int (*get_index)(struct i3c_hci *hci, u8 address);
+};
+
+extern const struct hci_dat_ops mipi_i3c_hci_dat_v1;
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
new file mode 100644
index 000000000..47b9b4d4e
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "dat.h"
+
+
+/*
+ * Device Address Table Structure
+ */
+
+#define DAT_1_AUTOCMD_HDR_CODE W1_MASK(58, 51)
+#define DAT_1_AUTOCMD_MODE W1_MASK(50, 48)
+#define DAT_1_AUTOCMD_VALUE W1_MASK(47, 40)
+#define DAT_1_AUTOCMD_MASK W1_MASK(39, 32)
+/* DAT_0_I2C_DEVICE W0_BIT_(31) */
+#define DAT_0_DEV_NACK_RETRY_CNT W0_MASK(30, 29)
+#define DAT_0_RING_ID W0_MASK(28, 26)
+#define DAT_0_DYNADDR_PARITY W0_BIT_(23)
+#define DAT_0_DYNAMIC_ADDRESS W0_MASK(22, 16)
+#define DAT_0_TS W0_BIT_(15)
+#define DAT_0_MR_REJECT W0_BIT_(14)
+/* DAT_0_SIR_REJECT W0_BIT_(13) */
+/* DAT_0_IBI_PAYLOAD W0_BIT_(12) */
+#define DAT_0_STATIC_ADDRESS W0_MASK(6, 0)
+
+#define dat_w0_read(i) readl(hci->DAT_regs + (i) * 8)
+#define dat_w1_read(i) readl(hci->DAT_regs + (i) * 8 + 4)
+#define dat_w0_write(i, v) writel(v, hci->DAT_regs + (i) * 8)
+#define dat_w1_write(i, v) writel(v, hci->DAT_regs + (i) * 8 + 4)
+
+static inline bool dynaddr_parity(unsigned int addr)
+{
+ addr |= 1 << 7;
+ addr += addr >> 4;
+ addr += addr >> 2;
+ addr += addr >> 1;
+ return (addr & 1);
+}
+
+static int hci_dat_v1_init(struct i3c_hci *hci)
+{
+ unsigned int dat_idx;
+
+ if (!hci->DAT_regs) {
+ dev_err(&hci->master.dev,
+ "only DAT in register space is supported at the moment\n");
+ return -EOPNOTSUPP;
+ }
+ if (hci->DAT_entry_size != 8) {
+ dev_err(&hci->master.dev,
+ "only 8-bytes DAT entries are supported at the moment\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!hci->DAT_data) {
+ /* use a bitmap for faster free slot search */
+ hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
+ if (!hci->DAT_data)
+ return -ENOMEM;
+
+ /* clear them */
+ for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
+ dat_w0_write(dat_idx, 0);
+ dat_w1_write(dat_idx, 0);
+ }
+ }
+
+ return 0;
+}
+
+static void hci_dat_v1_cleanup(struct i3c_hci *hci)
+{
+ bitmap_free(hci->DAT_data);
+ hci->DAT_data = NULL;
+}
+
+static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
+{
+ unsigned int dat_idx;
+ int ret;
+
+ if (!hci->DAT_data) {
+ ret = hci_dat_v1_init(hci);
+ if (ret)
+ return ret;
+ }
+ dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
+ if (dat_idx >= hci->DAT_entries)
+ return -ENOENT;
+ __set_bit(dat_idx, hci->DAT_data);
+
+ /* default flags */
+ dat_w0_write(dat_idx, DAT_0_SIR_REJECT | DAT_0_MR_REJECT);
+
+ return dat_idx;
+}
+
+static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
+{
+ dat_w0_write(dat_idx, 0);
+ dat_w1_write(dat_idx, 0);
+ if (hci->DAT_data)
+ __clear_bit(dat_idx, hci->DAT_data);
+}
+
+static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
+ unsigned int dat_idx, u8 address)
+{
+ u32 dat_w0;
+
+ dat_w0 = dat_w0_read(dat_idx);
+ dat_w0 &= ~(DAT_0_DYNAMIC_ADDRESS | DAT_0_DYNADDR_PARITY);
+ dat_w0 |= FIELD_PREP(DAT_0_DYNAMIC_ADDRESS, address) |
+ (dynaddr_parity(address) ? DAT_0_DYNADDR_PARITY : 0);
+ dat_w0_write(dat_idx, dat_w0);
+}
+
+static void hci_dat_v1_set_static_addr(struct i3c_hci *hci,
+ unsigned int dat_idx, u8 address)
+{
+ u32 dat_w0;
+
+ dat_w0 = dat_w0_read(dat_idx);
+ dat_w0 &= ~DAT_0_STATIC_ADDRESS;
+ dat_w0 |= FIELD_PREP(DAT_0_STATIC_ADDRESS, address);
+ dat_w0_write(dat_idx, dat_w0);
+}
+
+static void hci_dat_v1_set_flags(struct i3c_hci *hci, unsigned int dat_idx,
+ u32 w0_flags, u32 w1_flags)
+{
+ u32 dat_w0, dat_w1;
+
+ dat_w0 = dat_w0_read(dat_idx);
+ dat_w1 = dat_w1_read(dat_idx);
+ dat_w0 |= w0_flags;
+ dat_w1 |= w1_flags;
+ dat_w0_write(dat_idx, dat_w0);
+ dat_w1_write(dat_idx, dat_w1);
+}
+
+static void hci_dat_v1_clear_flags(struct i3c_hci *hci, unsigned int dat_idx,
+ u32 w0_flags, u32 w1_flags)
+{
+ u32 dat_w0, dat_w1;
+
+ dat_w0 = dat_w0_read(dat_idx);
+ dat_w1 = dat_w1_read(dat_idx);
+ dat_w0 &= ~w0_flags;
+ dat_w1 &= ~w1_flags;
+ dat_w0_write(dat_idx, dat_w0);
+ dat_w1_write(dat_idx, dat_w1);
+}
+
+static int hci_dat_v1_get_index(struct i3c_hci *hci, u8 dev_addr)
+{
+ unsigned int dat_idx;
+ u32 dat_w0;
+
+ for_each_set_bit(dat_idx, hci->DAT_data, hci->DAT_entries) {
+ dat_w0 = dat_w0_read(dat_idx);
+ if (FIELD_GET(DAT_0_DYNAMIC_ADDRESS, dat_w0) == dev_addr)
+ return dat_idx;
+ }
+
+ return -ENODEV;
+}
+
+const struct hci_dat_ops mipi_i3c_hci_dat_v1 = {
+ .init = hci_dat_v1_init,
+ .cleanup = hci_dat_v1_cleanup,
+ .alloc_entry = hci_dat_v1_alloc_entry,
+ .free_entry = hci_dat_v1_free_entry,
+ .set_dynamic_addr = hci_dat_v1_set_dynamic_addr,
+ .set_static_addr = hci_dat_v1_set_static_addr,
+ .set_flags = hci_dat_v1_set_flags,
+ .clear_flags = hci_dat_v1_clear_flags,
+ .get_index = hci_dat_v1_get_index,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/dct.h b/drivers/i3c/master/mipi-i3c-hci/dct.h
new file mode 100644
index 000000000..1028e0b40
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dct.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common DCT related stuff
+ */
+
+#ifndef DCT_H
+#define DCT_H
+
+void i3c_hci_dct_get_val(struct i3c_hci *hci, unsigned int dct_idx,
+ u64 *pid, unsigned int *dcr, unsigned int *bcr);
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/dct_v1.c b/drivers/i3c/master/mipi-i3c-hci/dct_v1.c
new file mode 100644
index 000000000..acfd4d60f
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dct_v1.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ */
+
+#include <linux/device.h>
+#include <linux/bitfield.h>
+#include <linux/i3c/master.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "dct.h"
+
+/*
+ * Device Characteristic Table
+ */
+
+void i3c_hci_dct_get_val(struct i3c_hci *hci, unsigned int dct_idx,
+ u64 *pid, unsigned int *dcr, unsigned int *bcr)
+{
+ void __iomem *reg = hci->DCT_regs + dct_idx * 4 * 4;
+ u32 dct_entry_data[4];
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ dct_entry_data[i] = readl(reg);
+ reg += 4;
+ }
+
+ *pid = ((u64)dct_entry_data[0]) << (47 - 32 + 1) |
+ FIELD_GET(W1_MASK(47, 32), dct_entry_data[1]);
+ *dcr = FIELD_GET(W2_MASK(71, 64), dct_entry_data[2]);
+ *bcr = FIELD_GET(W2_MASK(79, 72), dct_entry_data[2]);
+}
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
new file mode 100644
index 000000000..71b5dbe45
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -0,0 +1,784 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Note: The I3C HCI v2.0 spec is still in flux. The IBI support is based on
+ * v1.x of the spec and v2.0 will likely be split out.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "cmd.h"
+#include "ibi.h"
+
+
+/*
+ * Software Parameter Values (somewhat arb itrary for now).
+ * Some of them could be determined at run time eventually.
+ */
+
+#define XFER_RINGS 1 /* max: 8 */
+#define XFER_RING_ENTRIES 16 /* max: 255 */
+
+#define IBI_RINGS 1 /* max: 8 */
+#define IBI_STATUS_RING_ENTRIES 32 /* max: 255 */
+#define IBI_CHUNK_CACHELINES 1 /* max: 256 bytes equivalent */
+#define IBI_CHUNK_POOL_SIZE 128 /* max: 1023 */
+
+/*
+ * Ring Header Preamble
+ */
+
+#define rhs_reg_read(r) readl(hci->RHS_regs + (RHS_##r))
+#define rhs_reg_write(r, v) writel(v, hci->RHS_regs + (RHS_##r))
+
+#define RHS_CONTROL 0x00
+#define PREAMBLE_SIZE GENMASK(31, 24) /* Preamble Section Size */
+#define HEADER_SIZE GENMASK(23, 16) /* Ring Header Size */
+#define MAX_HEADER_COUNT_CAP GENMASK(7, 4) /* HC Max Header Count */
+#define MAX_HEADER_COUNT GENMASK(3, 0) /* Driver Max Header Count */
+
+#define RHS_RHn_OFFSET(n) (0x04 + (n)*4)
+
+/*
+ * Ring Header (Per-Ring Bundle)
+ */
+
+#define rh_reg_read(r) readl(rh->regs + (RH_##r))
+#define rh_reg_write(r, v) writel(v, rh->regs + (RH_##r))
+
+#define RH_CR_SETUP 0x00 /* Command/Response Ring */
+#define CR_XFER_STRUCT_SIZE GENMASK(31, 24)
+#define CR_RESP_STRUCT_SIZE GENMASK(23, 16)
+#define CR_RING_SIZE GENMASK(8, 0)
+
+#define RH_IBI_SETUP 0x04
+#define IBI_STATUS_STRUCT_SIZE GENMASK(31, 24)
+#define IBI_STATUS_RING_SIZE GENMASK(23, 16)
+#define IBI_DATA_CHUNK_SIZE GENMASK(12, 10)
+#define IBI_DATA_CHUNK_COUNT GENMASK(9, 0)
+
+#define RH_CHUNK_CONTROL 0x08
+
+#define RH_INTR_STATUS 0x10
+#define RH_INTR_STATUS_ENABLE 0x14
+#define RH_INTR_SIGNAL_ENABLE 0x18
+#define RH_INTR_FORCE 0x1c
+#define INTR_IBI_READY BIT(12)
+#define INTR_TRANSFER_COMPLETION BIT(11)
+#define INTR_RING_OP BIT(10)
+#define INTR_TRANSFER_ERR BIT(9)
+#define INTR_WARN_INS_STOP_MODE BIT(7)
+#define INTR_IBI_RING_FULL BIT(6)
+#define INTR_TRANSFER_ABORT BIT(5)
+
+#define RH_RING_STATUS 0x20
+#define RING_STATUS_LOCKED BIT(3)
+#define RING_STATUS_ABORTED BIT(2)
+#define RING_STATUS_RUNNING BIT(1)
+#define RING_STATUS_ENABLED BIT(0)
+
+#define RH_RING_CONTROL 0x24
+#define RING_CTRL_ABORT BIT(2)
+#define RING_CTRL_RUN_STOP BIT(1)
+#define RING_CTRL_ENABLE BIT(0)
+
+#define RH_RING_OPERATION1 0x28
+#define RING_OP1_IBI_DEQ_PTR GENMASK(23, 16)
+#define RING_OP1_CR_SW_DEQ_PTR GENMASK(15, 8)
+#define RING_OP1_CR_ENQ_PTR GENMASK(7, 0)
+
+#define RH_RING_OPERATION2 0x2c
+#define RING_OP2_IBI_ENQ_PTR GENMASK(23, 16)
+#define RING_OP2_CR_DEQ_PTR GENMASK(7, 0)
+
+#define RH_CMD_RING_BASE_LO 0x30
+#define RH_CMD_RING_BASE_HI 0x34
+#define RH_RESP_RING_BASE_LO 0x38
+#define RH_RESP_RING_BASE_HI 0x3c
+#define RH_IBI_STATUS_RING_BASE_LO 0x40
+#define RH_IBI_STATUS_RING_BASE_HI 0x44
+#define RH_IBI_DATA_RING_BASE_LO 0x48
+#define RH_IBI_DATA_RING_BASE_HI 0x4c
+
+#define RH_CMD_RING_SG 0x50 /* Ring Scatter Gather Support */
+#define RH_RESP_RING_SG 0x54
+#define RH_IBI_STATUS_RING_SG 0x58
+#define RH_IBI_DATA_RING_SG 0x5c
+#define RING_SG_BLP BIT(31) /* Buffer Vs. List Pointer */
+#define RING_SG_LIST_SIZE GENMASK(15, 0)
+
+/*
+ * Data Buffer Descriptor (in memory)
+ */
+
+#define DATA_BUF_BLP BIT(31) /* Buffer Vs. List Pointer */
+#define DATA_BUF_IOC BIT(30) /* Interrupt on Completion */
+#define DATA_BUF_BLOCK_SIZE GENMASK(15, 0)
+
+
+struct hci_rh_data {
+ void __iomem *regs;
+ void *xfer, *resp, *ibi_status, *ibi_data;
+ dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma;
+ unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total;
+ unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
+ unsigned int done_ptr, ibi_chunk_ptr;
+ struct hci_xfer **src_xfers;
+ spinlock_t lock;
+ struct completion op_done;
+};
+
+struct hci_rings_data {
+ unsigned int total;
+ struct hci_rh_data headers[];
+};
+
+struct hci_dma_dev_ibi_data {
+ struct i3c_generic_ibi_pool *pool;
+ unsigned int max_len;
+};
+
+static inline u32 lo32(dma_addr_t physaddr)
+{
+ return physaddr;
+}
+
+static inline u32 hi32(dma_addr_t physaddr)
+{
+ /* trickery to avoid compiler warnings on 32-bit build targets */
+ if (sizeof(dma_addr_t) > 4) {
+ u64 hi = physaddr;
+ return hi >> 32;
+ }
+ return 0;
+}
+
+static void hci_dma_cleanup(struct i3c_hci *hci)
+{
+ struct hci_rings_data *rings = hci->io_data;
+ struct hci_rh_data *rh;
+ unsigned int i;
+
+ if (!rings)
+ return;
+
+ for (i = 0; i < rings->total; i++) {
+ rh = &rings->headers[i];
+
+ rh_reg_write(RING_CONTROL, 0);
+ rh_reg_write(CR_SETUP, 0);
+ rh_reg_write(IBI_SETUP, 0);
+ rh_reg_write(INTR_SIGNAL_ENABLE, 0);
+
+ if (rh->xfer)
+ dma_free_coherent(&hci->master.dev,
+ rh->xfer_struct_sz * rh->xfer_entries,
+ rh->xfer, rh->xfer_dma);
+ if (rh->resp)
+ dma_free_coherent(&hci->master.dev,
+ rh->resp_struct_sz * rh->xfer_entries,
+ rh->resp, rh->resp_dma);
+ kfree(rh->src_xfers);
+ if (rh->ibi_status)
+ dma_free_coherent(&hci->master.dev,
+ rh->ibi_status_sz * rh->ibi_status_entries,
+ rh->ibi_status, rh->ibi_status_dma);
+ if (rh->ibi_data_dma)
+ dma_unmap_single(&hci->master.dev, rh->ibi_data_dma,
+ rh->ibi_chunk_sz * rh->ibi_chunks_total,
+ DMA_FROM_DEVICE);
+ kfree(rh->ibi_data);
+ }
+
+ rhs_reg_write(CONTROL, 0);
+
+ kfree(rings);
+ hci->io_data = NULL;
+}
+
+static int hci_dma_init(struct i3c_hci *hci)
+{
+ struct hci_rings_data *rings;
+ struct hci_rh_data *rh;
+ u32 regval;
+ unsigned int i, nr_rings, xfers_sz, resps_sz;
+ unsigned int ibi_status_ring_sz, ibi_data_ring_sz;
+ int ret;
+
+ regval = rhs_reg_read(CONTROL);
+ nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval);
+ dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings);
+ if (unlikely(nr_rings > 8)) {
+ dev_err(&hci->master.dev, "number of rings should be <= 8\n");
+ nr_rings = 8;
+ }
+ if (nr_rings > XFER_RINGS)
+ nr_rings = XFER_RINGS;
+ rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL);
+ if (!rings)
+ return -ENOMEM;
+ hci->io_data = rings;
+ rings->total = nr_rings;
+
+ for (i = 0; i < rings->total; i++) {
+ u32 offset = rhs_reg_read(RHn_OFFSET(i));
+
+ dev_info(&hci->master.dev, "Ring %d at offset %#x\n", i, offset);
+ ret = -EINVAL;
+ if (!offset)
+ goto err_out;
+ rh = &rings->headers[i];
+ rh->regs = hci->base_regs + offset;
+ spin_lock_init(&rh->lock);
+ init_completion(&rh->op_done);
+
+ rh->xfer_entries = XFER_RING_ENTRIES;
+
+ regval = rh_reg_read(CR_SETUP);
+ rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval);
+ rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval);
+ DBG("xfer_struct_sz = %d, resp_struct_sz = %d",
+ rh->xfer_struct_sz, rh->resp_struct_sz);
+ xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
+ resps_sz = rh->resp_struct_sz * rh->xfer_entries;
+
+ rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz,
+ &rh->xfer_dma, GFP_KERNEL);
+ rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz,
+ &rh->resp_dma, GFP_KERNEL);
+ rh->src_xfers =
+ kmalloc_array(rh->xfer_entries, sizeof(*rh->src_xfers),
+ GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!rh->xfer || !rh->resp || !rh->src_xfers)
+ goto err_out;
+
+ rh_reg_write(CMD_RING_BASE_LO, lo32(rh->xfer_dma));
+ rh_reg_write(CMD_RING_BASE_HI, hi32(rh->xfer_dma));
+ rh_reg_write(RESP_RING_BASE_LO, lo32(rh->resp_dma));
+ rh_reg_write(RESP_RING_BASE_HI, hi32(rh->resp_dma));
+
+ regval = FIELD_PREP(CR_RING_SIZE, rh->xfer_entries);
+ rh_reg_write(CR_SETUP, regval);
+
+ rh_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+ rh_reg_write(INTR_SIGNAL_ENABLE, INTR_IBI_READY |
+ INTR_TRANSFER_COMPLETION |
+ INTR_RING_OP |
+ INTR_TRANSFER_ERR |
+ INTR_WARN_INS_STOP_MODE |
+ INTR_IBI_RING_FULL |
+ INTR_TRANSFER_ABORT);
+
+ /* IBIs */
+
+ if (i >= IBI_RINGS)
+ goto ring_ready;
+
+ regval = rh_reg_read(IBI_SETUP);
+ rh->ibi_status_sz = FIELD_GET(IBI_STATUS_STRUCT_SIZE, regval);
+ rh->ibi_status_entries = IBI_STATUS_RING_ENTRIES;
+ rh->ibi_chunks_total = IBI_CHUNK_POOL_SIZE;
+
+ rh->ibi_chunk_sz = dma_get_cache_alignment();
+ rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
+ BUG_ON(rh->ibi_chunk_sz > 256);
+
+ ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
+ ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
+
+ rh->ibi_status =
+ dma_alloc_coherent(&hci->master.dev, ibi_status_ring_sz,
+ &rh->ibi_status_dma, GFP_KERNEL);
+ rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!rh->ibi_status || !rh->ibi_data)
+ goto err_out;
+ rh->ibi_data_dma =
+ dma_map_single(&hci->master.dev, rh->ibi_data,
+ ibi_data_ring_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) {
+ rh->ibi_data_dma = 0;
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ regval = FIELD_PREP(IBI_STATUS_RING_SIZE,
+ rh->ibi_status_entries) |
+ FIELD_PREP(IBI_DATA_CHUNK_SIZE,
+ ilog2(rh->ibi_chunk_sz) - 2) |
+ FIELD_PREP(IBI_DATA_CHUNK_COUNT,
+ rh->ibi_chunks_total);
+ rh_reg_write(IBI_SETUP, regval);
+
+ regval = rh_reg_read(INTR_SIGNAL_ENABLE);
+ regval |= INTR_IBI_READY;
+ rh_reg_write(INTR_SIGNAL_ENABLE, regval);
+
+ring_ready:
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+ }
+
+ regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
+ rhs_reg_write(CONTROL, regval);
+ return 0;
+
+err_out:
+ hci_dma_cleanup(hci);
+ return ret;
+}
+
+static void hci_dma_unmap_xfer(struct i3c_hci *hci,
+ struct hci_xfer *xfer_list, unsigned int n)
+{
+ struct hci_xfer *xfer;
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ xfer = xfer_list + i;
+ dma_unmap_single(&hci->master.dev,
+ xfer->data_dma, xfer->data_len,
+ xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static int hci_dma_queue_xfer(struct i3c_hci *hci,
+ struct hci_xfer *xfer_list, int n)
+{
+ struct hci_rings_data *rings = hci->io_data;
+ struct hci_rh_data *rh;
+ unsigned int i, ring, enqueue_ptr;
+ u32 op1_val, op2_val;
+
+ /* For now we only use ring 0 */
+ ring = 0;
+ rh = &rings->headers[ring];
+
+ op1_val = rh_reg_read(RING_OPERATION1);
+ enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
+ for (i = 0; i < n; i++) {
+ struct hci_xfer *xfer = xfer_list + i;
+ u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
+
+ /* store cmd descriptor */
+ *ring_data++ = xfer->cmd_desc[0];
+ *ring_data++ = xfer->cmd_desc[1];
+ if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
+ *ring_data++ = xfer->cmd_desc[2];
+ *ring_data++ = xfer->cmd_desc[3];
+ }
+
+ /* first word of Data Buffer Descriptor Structure */
+ if (!xfer->data)
+ xfer->data_len = 0;
+ *ring_data++ =
+ FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) |
+ ((i == n - 1) ? DATA_BUF_IOC : 0);
+
+ /* 2nd and 3rd words of Data Buffer Descriptor Structure */
+ if (xfer->data) {
+ xfer->data_dma =
+ dma_map_single(&hci->master.dev,
+ xfer->data,
+ xfer->data_len,
+ xfer->rnw ?
+ DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&hci->master.dev,
+ xfer->data_dma)) {
+ hci_dma_unmap_xfer(hci, xfer_list, i);
+ return -ENOMEM;
+ }
+ *ring_data++ = lo32(xfer->data_dma);
+ *ring_data++ = hi32(xfer->data_dma);
+ } else {
+ *ring_data++ = 0;
+ *ring_data++ = 0;
+ }
+
+ /* remember corresponding xfer struct */
+ rh->src_xfers[enqueue_ptr] = xfer;
+ /* remember corresponding ring/entry for this xfer structure */
+ xfer->ring_number = ring;
+ xfer->ring_entry = enqueue_ptr;
+
+ enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries;
+
+ /*
+ * We may update the hardware view of the enqueue pointer
+ * only if we didn't reach its dequeue pointer.
+ */
+ op2_val = rh_reg_read(RING_OPERATION2);
+ if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
+ /* the ring is full */
+ hci_dma_unmap_xfer(hci, xfer_list, i + 1);
+ return -EBUSY;
+ }
+ }
+
+ /* take care to update the hardware enqueue pointer atomically */
+ spin_lock_irq(&rh->lock);
+ op1_val = rh_reg_read(RING_OPERATION1);
+ op1_val &= ~RING_OP1_CR_ENQ_PTR;
+ op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
+ rh_reg_write(RING_OPERATION1, op1_val);
+ spin_unlock_irq(&rh->lock);
+
+ return 0;
+}
+
+static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
+ struct hci_xfer *xfer_list, int n)
+{
+ struct hci_rings_data *rings = hci->io_data;
+ struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number];
+ unsigned int i;
+ bool did_unqueue = false;
+
+ /* stop the ring */
+ rh_reg_write(RING_CONTROL, RING_CTRL_ABORT);
+ if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) {
+ /*
+ * We're deep in it if ever this condition is ever met.
+ * Hardware might still be writing to memory, etc.
+ * Better suspend the world than risking silent corruption.
+ */
+ dev_crit(&hci->master.dev, "unable to abort the ring\n");
+ BUG();
+ }
+
+ for (i = 0; i < n; i++) {
+ struct hci_xfer *xfer = xfer_list + i;
+ int idx = xfer->ring_entry;
+
+ /*
+ * At the time the abort happened, the xfer might have
+ * completed already. If not then replace corresponding
+ * descriptor entries with a no-op.
+ */
+ if (idx >= 0) {
+ u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx;
+
+ /* store no-op cmd descriptor */
+ *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7);
+ *ring_data++ = 0;
+ if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
+ *ring_data++ = 0;
+ *ring_data++ = 0;
+ }
+
+ /* disassociate this xfer struct */
+ rh->src_xfers[idx] = NULL;
+
+ /* and unmap it */
+ hci_dma_unmap_xfer(hci, xfer, 1);
+
+ did_unqueue = true;
+ }
+ }
+
+ /* restart the ring */
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+
+ return did_unqueue;
+}
+
+static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
+{
+ u32 op1_val, op2_val, resp, *ring_resp;
+ unsigned int tid, done_ptr = rh->done_ptr;
+ struct hci_xfer *xfer;
+
+ for (;;) {
+ op2_val = rh_reg_read(RING_OPERATION2);
+ if (done_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val))
+ break;
+
+ ring_resp = rh->resp + rh->resp_struct_sz * done_ptr;
+ resp = *ring_resp;
+ tid = RESP_TID(resp);
+ DBG("resp = 0x%08x", resp);
+
+ xfer = rh->src_xfers[done_ptr];
+ if (!xfer) {
+ DBG("orphaned ring entry");
+ } else {
+ hci_dma_unmap_xfer(hci, xfer, 1);
+ xfer->ring_entry = -1;
+ xfer->response = resp;
+ if (tid != xfer->cmd_tid) {
+ dev_err(&hci->master.dev,
+ "response tid=%d when expecting %d\n",
+ tid, xfer->cmd_tid);
+ /* TODO: do something about it? */
+ }
+ if (xfer->completion)
+ complete(xfer->completion);
+ }
+
+ done_ptr = (done_ptr + 1) % rh->xfer_entries;
+ rh->done_ptr = done_ptr;
+ }
+
+ /* take care to update the software dequeue pointer atomically */
+ spin_lock(&rh->lock);
+ op1_val = rh_reg_read(RING_OPERATION1);
+ op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
+ op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
+ rh_reg_write(RING_OPERATION1, op1_val);
+ spin_unlock(&rh->lock);
+}
+
+static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct i3c_generic_ibi_pool *pool;
+ struct hci_dma_dev_ibi_data *dev_ibi;
+
+ dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
+ if (!dev_ibi)
+ return -ENOMEM;
+ pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(pool)) {
+ kfree(dev_ibi);
+ return PTR_ERR(pool);
+ }
+ dev_ibi->pool = pool;
+ dev_ibi->max_len = req->max_payload_len;
+ dev_data->ibi_data = dev_ibi;
+ return 0;
+}
+
+static void hci_dma_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
+
+ dev_data->ibi_data = NULL;
+ i3c_generic_ibi_free_pool(dev_ibi->pool);
+ kfree(dev_ibi);
+}
+
+static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci,
+ struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
+
+ i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
+}
+
+static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
+{
+ struct i3c_dev_desc *dev;
+ struct i3c_hci_dev_data *dev_data;
+ struct hci_dma_dev_ibi_data *dev_ibi;
+ struct i3c_ibi_slot *slot;
+ u32 op1_val, op2_val, ibi_status_error;
+ unsigned int ptr, enq_ptr, deq_ptr;
+ unsigned int ibi_size, ibi_chunks, ibi_data_offset, first_part;
+ int ibi_addr, last_ptr;
+ void *ring_ibi_data;
+ dma_addr_t ring_ibi_data_dma;
+
+ op1_val = rh_reg_read(RING_OPERATION1);
+ deq_ptr = FIELD_GET(RING_OP1_IBI_DEQ_PTR, op1_val);
+
+ op2_val = rh_reg_read(RING_OPERATION2);
+ enq_ptr = FIELD_GET(RING_OP2_IBI_ENQ_PTR, op2_val);
+
+ ibi_status_error = 0;
+ ibi_addr = -1;
+ ibi_chunks = 0;
+ ibi_size = 0;
+ last_ptr = -1;
+
+ /* let's find all we can about this IBI */
+ for (ptr = deq_ptr; ptr != enq_ptr;
+ ptr = (ptr + 1) % rh->ibi_status_entries) {
+ u32 ibi_status, *ring_ibi_status;
+ unsigned int chunks;
+
+ ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
+ ibi_status = *ring_ibi_status;
+ DBG("status = %#x", ibi_status);
+
+ if (ibi_status_error) {
+ /* we no longer care */
+ } else if (ibi_status & IBI_ERROR) {
+ ibi_status_error = ibi_status;
+ } else if (ibi_addr == -1) {
+ ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
+ } else if (ibi_addr != FIELD_GET(IBI_TARGET_ADDR, ibi_status)) {
+ /* the address changed unexpectedly */
+ ibi_status_error = ibi_status;
+ }
+
+ chunks = FIELD_GET(IBI_CHUNKS, ibi_status);
+ ibi_chunks += chunks;
+ if (!(ibi_status & IBI_LAST_STATUS)) {
+ ibi_size += chunks * rh->ibi_chunk_sz;
+ } else {
+ ibi_size += FIELD_GET(IBI_DATA_LENGTH, ibi_status);
+ last_ptr = ptr;
+ break;
+ }
+ }
+
+ /* validate what we've got */
+
+ if (last_ptr == -1) {
+ /* this IBI sequence is not yet complete */
+ DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr);
+ return;
+ }
+ deq_ptr = last_ptr + 1;
+ deq_ptr %= rh->ibi_status_entries;
+
+ if (ibi_status_error) {
+ dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr);
+ goto done;
+ }
+
+ /* determine who this is for */
+ dev = i3c_hci_addr_to_dev(hci, ibi_addr);
+ if (!dev) {
+ dev_err(&hci->master.dev,
+ "IBI for unknown device %#x\n", ibi_addr);
+ goto done;
+ }
+
+ dev_data = i3c_dev_get_master_data(dev);
+ dev_ibi = dev_data->ibi_data;
+ if (ibi_size > dev_ibi->max_len) {
+ dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
+ ibi_size, dev_ibi->max_len);
+ goto done;
+ }
+
+ /*
+ * This ring model is not suitable for zero-copy processing of IBIs.
+ * We have the data chunk ring wrap-around to deal with, meaning
+ * that the payload might span multiple chunks beginning at the
+ * end of the ring and wrap to the start of the ring. Furthermore
+ * there is no guarantee that those chunks will be released in order
+ * and in a timely manner by the upper driver. So let's just copy
+ * them to a discrete buffer. In practice they're supposed to be
+ * small anyway.
+ */
+ slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
+ if (!slot) {
+ dev_err(&hci->master.dev, "no free slot for IBI\n");
+ goto done;
+ }
+
+ /* copy first part of the payload */
+ ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr;
+ ring_ibi_data = rh->ibi_data + ibi_data_offset;
+ ring_ibi_data_dma = rh->ibi_data_dma + ibi_data_offset;
+ first_part = (rh->ibi_chunks_total - rh->ibi_chunk_ptr)
+ * rh->ibi_chunk_sz;
+ if (first_part > ibi_size)
+ first_part = ibi_size;
+ dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ first_part, DMA_FROM_DEVICE);
+ memcpy(slot->data, ring_ibi_data, first_part);
+
+ /* copy second part if any */
+ if (ibi_size > first_part) {
+ /* we wrap back to the start and copy remaining data */
+ ring_ibi_data = rh->ibi_data;
+ ring_ibi_data_dma = rh->ibi_data_dma;
+ dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ ibi_size - first_part, DMA_FROM_DEVICE);
+ memcpy(slot->data + first_part, ring_ibi_data,
+ ibi_size - first_part);
+ }
+
+ /* submit it */
+ slot->dev = dev;
+ slot->len = ibi_size;
+ i3c_master_queue_ibi(dev, slot);
+
+done:
+ /* take care to update the ibi dequeue pointer atomically */
+ spin_lock(&rh->lock);
+ op1_val = rh_reg_read(RING_OPERATION1);
+ op1_val &= ~RING_OP1_IBI_DEQ_PTR;
+ op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
+ rh_reg_write(RING_OPERATION1, op1_val);
+ spin_unlock(&rh->lock);
+
+ /* update the chunk pointer */
+ rh->ibi_chunk_ptr += ibi_chunks;
+ rh->ibi_chunk_ptr %= rh->ibi_chunks_total;
+
+ /* and tell the hardware about freed chunks */
+ rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks);
+}
+
+static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
+{
+ struct hci_rings_data *rings = hci->io_data;
+ unsigned int i;
+ bool handled = false;
+
+ for (i = 0; mask && i < rings->total; i++) {
+ struct hci_rh_data *rh;
+ u32 status;
+
+ if (!(mask & BIT(i)))
+ continue;
+ mask &= ~BIT(i);
+
+ rh = &rings->headers[i];
+ status = rh_reg_read(INTR_STATUS);
+ DBG("rh%d status: %#x", i, status);
+ if (!status)
+ continue;
+ rh_reg_write(INTR_STATUS, status);
+
+ if (status & INTR_IBI_READY)
+ hci_dma_process_ibi(hci, rh);
+ if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR))
+ hci_dma_xfer_done(hci, rh);
+ if (status & INTR_RING_OP)
+ complete(&rh->op_done);
+
+ if (status & INTR_TRANSFER_ABORT)
+ dev_notice_ratelimited(&hci->master.dev,
+ "ring %d: Transfer Aborted\n", i);
+ if (status & INTR_WARN_INS_STOP_MODE)
+ dev_warn_ratelimited(&hci->master.dev,
+ "ring %d: Inserted Stop on Mode Change\n", i);
+ if (status & INTR_IBI_RING_FULL)
+ dev_err_ratelimited(&hci->master.dev,
+ "ring %d: IBI Ring Full Condition\n", i);
+
+ handled = true;
+ }
+
+ return handled;
+}
+
+const struct hci_io_ops mipi_i3c_hci_dma = {
+ .init = hci_dma_init,
+ .cleanup = hci_dma_cleanup,
+ .queue_xfer = hci_dma_queue_xfer,
+ .dequeue_xfer = hci_dma_dequeue_xfer,
+ .irq_handler = hci_dma_irq_handler,
+ .request_ibi = hci_dma_request_ibi,
+ .free_ibi = hci_dma_free_ibi,
+ .recycle_ibi_slot = hci_dma_recycle_ibi_slot,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
new file mode 100644
index 000000000..2e9b23efd
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "ext_caps.h"
+#include "xfer_mode_rate.h"
+
+
+/* Extended Capability Header */
+#define CAP_HEADER_LENGTH GENMASK(23, 8)
+#define CAP_HEADER_ID GENMASK(7, 0)
+
+static int hci_extcap_hardware_id(struct i3c_hci *hci, void __iomem *base)
+{
+ hci->vendor_mipi_id = readl(base + 0x04);
+ hci->vendor_version_id = readl(base + 0x08);
+ hci->vendor_product_id = readl(base + 0x0c);
+
+ dev_info(&hci->master.dev, "vendor MIPI ID: %#x\n", hci->vendor_mipi_id);
+ dev_info(&hci->master.dev, "vendor version ID: %#x\n", hci->vendor_version_id);
+ dev_info(&hci->master.dev, "vendor product ID: %#x\n", hci->vendor_product_id);
+
+ /* ought to go in a table if this grows too much */
+ switch (hci->vendor_mipi_id) {
+ case MIPI_VENDOR_NXP:
+ hci->quirks |= HCI_QUIRK_RAW_CCC;
+ DBG("raw CCC quirks set");
+ break;
+ }
+
+ return 0;
+}
+
+static int hci_extcap_master_config(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 master_config = readl(base + 0x04);
+ unsigned int operation_mode = FIELD_GET(GENMASK(5, 4), master_config);
+ static const char * const functionality[] = {
+ "(unknown)", "master only", "target only",
+ "primary/secondary master" };
+ dev_info(&hci->master.dev, "operation mode: %s\n", functionality[operation_mode]);
+ if (operation_mode & 0x1)
+ return 0;
+ dev_err(&hci->master.dev, "only master mode is currently supported\n");
+ return -EOPNOTSUPP;
+}
+
+static int hci_extcap_multi_bus(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 bus_instance = readl(base + 0x04);
+ unsigned int count = FIELD_GET(GENMASK(3, 0), bus_instance);
+
+ dev_info(&hci->master.dev, "%d bus instances\n", count);
+ return 0;
+}
+
+static int hci_extcap_xfer_modes(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 header = readl(base);
+ u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
+ unsigned int index;
+
+ dev_info(&hci->master.dev, "transfer mode table has %d entries\n",
+ entries);
+ base += 4; /* skip header */
+ for (index = 0; index < entries; index++) {
+ u32 mode_entry = readl(base);
+
+ DBG("mode %d: 0x%08x", index, mode_entry);
+ /* TODO: will be needed when I3C core does more than SDR */
+ base += 4;
+ }
+
+ return 0;
+}
+
+static int hci_extcap_xfer_rates(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 header = readl(base);
+ u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
+ u32 rate_entry;
+ unsigned int index, rate, rate_id, mode_id;
+
+ base += 4; /* skip header */
+
+ dev_info(&hci->master.dev, "available data rates:\n");
+ for (index = 0; index < entries; index++) {
+ rate_entry = readl(base);
+ DBG("entry %d: 0x%08x", index, rate_entry);
+ rate = FIELD_GET(XFERRATE_ACTUAL_RATE_KHZ, rate_entry);
+ rate_id = FIELD_GET(XFERRATE_RATE_ID, rate_entry);
+ mode_id = FIELD_GET(XFERRATE_MODE_ID, rate_entry);
+ dev_info(&hci->master.dev, "rate %d for %s = %d kHz\n",
+ rate_id,
+ mode_id == XFERRATE_MODE_I3C ? "I3C" :
+ mode_id == XFERRATE_MODE_I2C ? "I2C" :
+ "unknown mode",
+ rate);
+ base += 4;
+ }
+
+ return 0;
+}
+
+static int hci_extcap_auto_command(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 autocmd_ext_caps = readl(base + 0x04);
+ unsigned int max_count = FIELD_GET(GENMASK(3, 0), autocmd_ext_caps);
+ u32 autocmd_ext_config = readl(base + 0x08);
+ unsigned int count = FIELD_GET(GENMASK(3, 0), autocmd_ext_config);
+
+ dev_info(&hci->master.dev, "%d/%d active auto-command entries\n",
+ count, max_count);
+ /* remember auto-command register location for later use */
+ hci->AUTOCMD_regs = base;
+ return 0;
+}
+
+static int hci_extcap_debug(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "debug registers present\n");
+ hci->DEBUG_regs = base;
+ return 0;
+}
+
+static int hci_extcap_scheduled_cmd(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "scheduled commands available\n");
+ /* hci->schedcmd_regs = base; */
+ return 0;
+}
+
+static int hci_extcap_non_curr_master(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "Non-Current Master support available\n");
+ /* hci->NCM_regs = base; */
+ return 0;
+}
+
+static int hci_extcap_ccc_resp_conf(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "CCC Response Configuration available\n");
+ return 0;
+}
+
+static int hci_extcap_global_DAT(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "Global DAT available\n");
+ return 0;
+}
+
+static int hci_extcap_multilane(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "Master Multi-Lane support available\n");
+ return 0;
+}
+
+static int hci_extcap_ncm_multilane(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "NCM Multi-Lane support available\n");
+ return 0;
+}
+
+struct hci_ext_caps {
+ u8 id;
+ u16 min_length;
+ int (*parser)(struct i3c_hci *hci, void __iomem *base);
+};
+
+#define EXT_CAP(_id, _highest_mandatory_reg_offset, _parser) \
+ { .id = (_id), .parser = (_parser), \
+ .min_length = (_highest_mandatory_reg_offset)/4 + 1 }
+
+static const struct hci_ext_caps ext_capabilities[] = {
+ EXT_CAP(0x01, 0x0c, hci_extcap_hardware_id),
+ EXT_CAP(0x02, 0x04, hci_extcap_master_config),
+ EXT_CAP(0x03, 0x04, hci_extcap_multi_bus),
+ EXT_CAP(0x04, 0x24, hci_extcap_xfer_modes),
+ EXT_CAP(0x05, 0x08, hci_extcap_auto_command),
+ EXT_CAP(0x08, 0x40, hci_extcap_xfer_rates),
+ EXT_CAP(0x0c, 0x10, hci_extcap_debug),
+ EXT_CAP(0x0d, 0x0c, hci_extcap_scheduled_cmd),
+ EXT_CAP(0x0e, 0x80, hci_extcap_non_curr_master), /* TODO confirm size */
+ EXT_CAP(0x0f, 0x04, hci_extcap_ccc_resp_conf),
+ EXT_CAP(0x10, 0x08, hci_extcap_global_DAT),
+ EXT_CAP(0x9d, 0x04, hci_extcap_multilane),
+ EXT_CAP(0x9e, 0x04, hci_extcap_ncm_multilane),
+};
+
+static int hci_extcap_vendor_NXP(struct i3c_hci *hci, void __iomem *base)
+{
+ hci->vendor_data = (__force void *)base;
+ dev_info(&hci->master.dev, "Build Date Info = %#x\n", readl(base + 1*4));
+ /* reset the FPGA */
+ writel(0xdeadbeef, base + 1*4);
+ return 0;
+}
+
+struct hci_ext_cap_vendor_specific {
+ u32 vendor;
+ u8 cap;
+ u16 min_length;
+ int (*parser)(struct i3c_hci *hci, void __iomem *base);
+};
+
+#define EXT_CAP_VENDOR(_vendor, _cap, _highest_mandatory_reg_offset) \
+ { .vendor = (MIPI_VENDOR_##_vendor), .cap = (_cap), \
+ .parser = (hci_extcap_vendor_##_vendor), \
+ .min_length = (_highest_mandatory_reg_offset)/4 + 1 }
+
+static const struct hci_ext_cap_vendor_specific vendor_ext_caps[] = {
+ EXT_CAP_VENDOR(NXP, 0xc0, 0x20),
+};
+
+static int hci_extcap_vendor_specific(struct i3c_hci *hci, void __iomem *base,
+ u32 cap_id, u32 cap_length)
+{
+ const struct hci_ext_cap_vendor_specific *vendor_cap_entry;
+ int i;
+
+ vendor_cap_entry = NULL;
+ for (i = 0; i < ARRAY_SIZE(vendor_ext_caps); i++) {
+ if (vendor_ext_caps[i].vendor == hci->vendor_mipi_id &&
+ vendor_ext_caps[i].cap == cap_id) {
+ vendor_cap_entry = &vendor_ext_caps[i];
+ break;
+ }
+ }
+
+ if (!vendor_cap_entry) {
+ dev_notice(&hci->master.dev,
+ "unknown ext_cap 0x%02x for vendor 0x%02x\n",
+ cap_id, hci->vendor_mipi_id);
+ return 0;
+ }
+ if (cap_length < vendor_cap_entry->min_length) {
+ dev_err(&hci->master.dev,
+ "ext_cap 0x%02x has size %d (expecting >= %d)\n",
+ cap_id, cap_length, vendor_cap_entry->min_length);
+ return -EINVAL;
+ }
+ return vendor_cap_entry->parser(hci, base);
+}
+
+int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
+{
+ void __iomem *curr_cap = hci->EXTCAPS_regs;
+ void __iomem *end = curr_cap + 0x1000; /* some arbitrary limit */
+ u32 cap_header, cap_id, cap_length;
+ const struct hci_ext_caps *cap_entry;
+ int i, err = 0;
+
+ if (!curr_cap)
+ return 0;
+
+ for (; !err && curr_cap < end; curr_cap += cap_length * 4) {
+ cap_header = readl(curr_cap);
+ cap_id = FIELD_GET(CAP_HEADER_ID, cap_header);
+ cap_length = FIELD_GET(CAP_HEADER_LENGTH, cap_header);
+ DBG("id=0x%02x length=%d", cap_id, cap_length);
+ if (!cap_length)
+ break;
+ if (curr_cap + cap_length * 4 >= end) {
+ dev_err(&hci->master.dev,
+ "ext_cap 0x%02x has size %d (too big)\n",
+ cap_id, cap_length);
+ err = -EINVAL;
+ break;
+ }
+
+ if (cap_id >= 0xc0 && cap_id <= 0xcf) {
+ err = hci_extcap_vendor_specific(hci, curr_cap,
+ cap_id, cap_length);
+ continue;
+ }
+
+ cap_entry = NULL;
+ for (i = 0; i < ARRAY_SIZE(ext_capabilities); i++) {
+ if (ext_capabilities[i].id == cap_id) {
+ cap_entry = &ext_capabilities[i];
+ break;
+ }
+ }
+ if (!cap_entry) {
+ dev_notice(&hci->master.dev,
+ "unknown ext_cap 0x%02x\n", cap_id);
+ } else if (cap_length < cap_entry->min_length) {
+ dev_err(&hci->master.dev,
+ "ext_cap 0x%02x has size %d (expecting >= %d)\n",
+ cap_id, cap_length, cap_entry->min_length);
+ err = -EINVAL;
+ } else {
+ err = cap_entry->parser(hci, curr_cap);
+ }
+ }
+ return err;
+}
diff --git a/drivers/i3c/master/mipi-i3c-hci/ext_caps.h b/drivers/i3c/master/mipi-i3c-hci/ext_caps.h
new file mode 100644
index 000000000..9df17822f
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/ext_caps.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Extended Capability Definitions
+ */
+
+#ifndef EXTCAPS_H
+#define EXTCAPS_H
+
+/* MIPI vendor IDs */
+#define MIPI_VENDOR_NXP 0x11b
+
+
+int i3c_hci_parse_ext_caps(struct i3c_hci *hci);
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
new file mode 100644
index 000000000..f109923f6
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common HCI stuff
+ */
+
+#ifndef HCI_H
+#define HCI_H
+
+
+/* Handy logging macro to save on line length */
+#define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__)
+
+/* 32-bit word aware bit and mask macros */
+#define W0_MASK(h, l) GENMASK((h) - 0, (l) - 0)
+#define W1_MASK(h, l) GENMASK((h) - 32, (l) - 32)
+#define W2_MASK(h, l) GENMASK((h) - 64, (l) - 64)
+#define W3_MASK(h, l) GENMASK((h) - 96, (l) - 96)
+
+/* Same for single bit macros (trailing _ to align with W*_MASK width) */
+#define W0_BIT_(x) BIT((x) - 0)
+#define W1_BIT_(x) BIT((x) - 32)
+#define W2_BIT_(x) BIT((x) - 64)
+#define W3_BIT_(x) BIT((x) - 96)
+
+
+struct hci_cmd_ops;
+
+/* Our main structure */
+struct i3c_hci {
+ struct i3c_master_controller master;
+ void __iomem *base_regs;
+ void __iomem *DAT_regs;
+ void __iomem *DCT_regs;
+ void __iomem *RHS_regs;
+ void __iomem *PIO_regs;
+ void __iomem *EXTCAPS_regs;
+ void __iomem *AUTOCMD_regs;
+ void __iomem *DEBUG_regs;
+ const struct hci_io_ops *io;
+ void *io_data;
+ const struct hci_cmd_ops *cmd;
+ atomic_t next_cmd_tid;
+ u32 caps;
+ unsigned int quirks;
+ unsigned int DAT_entries;
+ unsigned int DAT_entry_size;
+ void *DAT_data;
+ unsigned int DCT_entries;
+ unsigned int DCT_entry_size;
+ u8 version_major;
+ u8 version_minor;
+ u8 revision;
+ u32 vendor_mipi_id;
+ u32 vendor_version_id;
+ u32 vendor_product_id;
+ void *vendor_data;
+};
+
+
+/*
+ * Structure to represent a master initiated transfer.
+ * The rnw, data and data_len fields must be initialized before calling any
+ * hci->cmd->*() method. The cmd method will initialize cmd_desc[] and
+ * possibly modify (clear) the data field. Then xfer->cmd_desc[0] can
+ * be augmented with CMD_0_ROC and/or CMD_0_TOC.
+ * The completion field needs to be initialized before queueing with
+ * hci->io->queue_xfer(), and requires CMD_0_ROC to be set.
+ */
+struct hci_xfer {
+ u32 cmd_desc[4];
+ u32 response;
+ bool rnw;
+ void *data;
+ unsigned int data_len;
+ unsigned int cmd_tid;
+ struct completion *completion;
+ union {
+ struct {
+ /* PIO specific */
+ struct hci_xfer *next_xfer;
+ struct hci_xfer *next_data;
+ struct hci_xfer *next_resp;
+ unsigned int data_left;
+ u32 data_word_before_partial;
+ };
+ struct {
+ /* DMA specific */
+ dma_addr_t data_dma;
+ int ring_number;
+ int ring_entry;
+ };
+ };
+};
+
+static inline struct hci_xfer *hci_alloc_xfer(unsigned int n)
+{
+ return kcalloc(n, sizeof(struct hci_xfer), GFP_KERNEL);
+}
+
+static inline void hci_free_xfer(struct hci_xfer *xfer, unsigned int n)
+{
+ kfree(xfer);
+}
+
+
+/* This abstracts PIO vs DMA operations */
+struct hci_io_ops {
+ bool (*irq_handler)(struct i3c_hci *hci, unsigned int mask);
+ int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
+ bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
+ int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+ void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev);
+ void (*recycle_ibi_slot)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot);
+ int (*init)(struct i3c_hci *hci);
+ void (*cleanup)(struct i3c_hci *hci);
+};
+
+extern const struct hci_io_ops mipi_i3c_hci_pio;
+extern const struct hci_io_ops mipi_i3c_hci_dma;
+
+
+/* Our per device master private data */
+struct i3c_hci_dev_data {
+ int dat_idx;
+ void *ibi_data;
+};
+
+
+/* list of quirks */
+#define HCI_QUIRK_RAW_CCC BIT(1) /* CCC framing must be explicit */
+
+
+/* global functions */
+void mipi_i3c_hci_resume(struct i3c_hci *hci);
+void mipi_i3c_hci_pio_reset(struct i3c_hci *hci);
+void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci);
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/ibi.h b/drivers/i3c/master/mipi-i3c-hci/ibi.h
new file mode 100644
index 000000000..e1f98e264
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/ibi.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common IBI related stuff
+ */
+
+#ifndef IBI_H
+#define IBI_H
+
+/*
+ * IBI Status Descriptor bits
+ */
+#define IBI_STS BIT(31)
+#define IBI_ERROR BIT(30)
+#define IBI_STATUS_TYPE BIT(29)
+#define IBI_HW_CONTEXT GENMASK(28, 26)
+#define IBI_TS BIT(25)
+#define IBI_LAST_STATUS BIT(24)
+#define IBI_CHUNKS GENMASK(23, 16)
+#define IBI_ID GENMASK(15, 8)
+#define IBI_TARGET_ADDR GENMASK(15, 9)
+#define IBI_TARGET_RNW BIT(8)
+#define IBI_DATA_LENGTH GENMASK(7, 0)
+
+/* handy helpers */
+static inline struct i3c_dev_desc *
+i3c_hci_addr_to_dev(struct i3c_hci *hci, unsigned int addr)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+ struct i3c_dev_desc *dev;
+
+ i3c_bus_for_each_i3cdev(bus, dev) {
+ if (dev->info.dyn_addr == addr)
+ return dev;
+ }
+ return NULL;
+}
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/pio.c b/drivers/i3c/master/mipi-i3c-hci/pio.c
new file mode 100644
index 000000000..d0272aa93
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/pio.c
@@ -0,0 +1,1041 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "cmd.h"
+#include "ibi.h"
+
+
+/*
+ * PIO Access Area
+ */
+
+#define pio_reg_read(r) readl(hci->PIO_regs + (PIO_##r))
+#define pio_reg_write(r, v) writel(v, hci->PIO_regs + (PIO_##r))
+
+#define PIO_COMMAND_QUEUE_PORT 0x00
+#define PIO_RESPONSE_QUEUE_PORT 0x04
+#define PIO_XFER_DATA_PORT 0x08
+#define PIO_IBI_PORT 0x0c
+
+#define PIO_QUEUE_THLD_CTRL 0x10
+#define QUEUE_IBI_STATUS_THLD GENMASK(31, 24)
+#define QUEUE_IBI_DATA_THLD GENMASK(23, 16)
+#define QUEUE_RESP_BUF_THLD GENMASK(15, 8)
+#define QUEUE_CMD_EMPTY_BUF_THLD GENMASK(7, 0)
+
+#define PIO_DATA_BUFFER_THLD_CTRL 0x14
+#define DATA_RX_START_THLD GENMASK(26, 24)
+#define DATA_TX_START_THLD GENMASK(18, 16)
+#define DATA_RX_BUF_THLD GENMASK(10, 8)
+#define DATA_TX_BUF_THLD GENMASK(2, 0)
+
+#define PIO_QUEUE_SIZE 0x18
+#define TX_DATA_BUFFER_SIZE GENMASK(31, 24)
+#define RX_DATA_BUFFER_SIZE GENMASK(23, 16)
+#define IBI_STATUS_SIZE GENMASK(15, 8)
+#define CR_QUEUE_SIZE GENMASK(7, 0)
+
+#define PIO_INTR_STATUS 0x20
+#define PIO_INTR_STATUS_ENABLE 0x24
+#define PIO_INTR_SIGNAL_ENABLE 0x28
+#define PIO_INTR_FORCE 0x2c
+#define STAT_TRANSFER_BLOCKED BIT(25)
+#define STAT_PERR_RESP_UFLOW BIT(24)
+#define STAT_PERR_CMD_OFLOW BIT(23)
+#define STAT_PERR_IBI_UFLOW BIT(22)
+#define STAT_PERR_RX_UFLOW BIT(21)
+#define STAT_PERR_TX_OFLOW BIT(20)
+#define STAT_ERR_RESP_QUEUE_FULL BIT(19)
+#define STAT_WARN_RESP_QUEUE_FULL BIT(18)
+#define STAT_ERR_IBI_QUEUE_FULL BIT(17)
+#define STAT_WARN_IBI_QUEUE_FULL BIT(16)
+#define STAT_ERR_RX_DATA_FULL BIT(15)
+#define STAT_WARN_RX_DATA_FULL BIT(14)
+#define STAT_ERR_TX_DATA_EMPTY BIT(13)
+#define STAT_WARN_TX_DATA_EMPTY BIT(12)
+#define STAT_TRANSFER_ERR BIT(9)
+#define STAT_WARN_INS_STOP_MODE BIT(7)
+#define STAT_TRANSFER_ABORT BIT(5)
+#define STAT_RESP_READY BIT(4)
+#define STAT_CMD_QUEUE_READY BIT(3)
+#define STAT_IBI_STATUS_THLD BIT(2)
+#define STAT_RX_THLD BIT(1)
+#define STAT_TX_THLD BIT(0)
+
+#define PIO_QUEUE_CUR_STATUS 0x38
+#define CUR_IBI_Q_LEVEL GENMASK(28, 20)
+#define CUR_RESP_Q_LEVEL GENMASK(18, 10)
+#define CUR_CMD_Q_EMPTY_LEVEL GENMASK(8, 0)
+
+#define PIO_DATA_BUFFER_CUR_STATUS 0x3c
+#define CUR_RX_BUF_LVL GENMASK(26, 16)
+#define CUR_TX_BUF_LVL GENMASK(10, 0)
+
+/*
+ * Handy status bit combinations
+ */
+
+#define STAT_LATENCY_WARNINGS (STAT_WARN_RESP_QUEUE_FULL | \
+ STAT_WARN_IBI_QUEUE_FULL | \
+ STAT_WARN_RX_DATA_FULL | \
+ STAT_WARN_TX_DATA_EMPTY | \
+ STAT_WARN_INS_STOP_MODE)
+
+#define STAT_LATENCY_ERRORS (STAT_ERR_RESP_QUEUE_FULL | \
+ STAT_ERR_IBI_QUEUE_FULL | \
+ STAT_ERR_RX_DATA_FULL | \
+ STAT_ERR_TX_DATA_EMPTY)
+
+#define STAT_PROG_ERRORS (STAT_TRANSFER_BLOCKED | \
+ STAT_PERR_RESP_UFLOW | \
+ STAT_PERR_CMD_OFLOW | \
+ STAT_PERR_IBI_UFLOW | \
+ STAT_PERR_RX_UFLOW | \
+ STAT_PERR_TX_OFLOW)
+
+#define STAT_ALL_ERRORS (STAT_TRANSFER_ABORT | \
+ STAT_TRANSFER_ERR | \
+ STAT_LATENCY_ERRORS | \
+ STAT_PROG_ERRORS)
+
+struct hci_pio_dev_ibi_data {
+ struct i3c_generic_ibi_pool *pool;
+ unsigned int max_len;
+};
+
+struct hci_pio_ibi_data {
+ struct i3c_ibi_slot *slot;
+ void *data_ptr;
+ unsigned int addr;
+ unsigned int seg_len, seg_cnt;
+ unsigned int max_len;
+ bool last_seg;
+};
+
+struct hci_pio_data {
+ spinlock_t lock;
+ struct hci_xfer *curr_xfer, *xfer_queue;
+ struct hci_xfer *curr_rx, *rx_queue;
+ struct hci_xfer *curr_tx, *tx_queue;
+ struct hci_xfer *curr_resp, *resp_queue;
+ struct hci_pio_ibi_data ibi;
+ unsigned int rx_thresh_size, tx_thresh_size;
+ unsigned int max_ibi_thresh;
+ u32 reg_queue_thresh;
+ u32 enabled_irqs;
+};
+
+static int hci_pio_init(struct i3c_hci *hci)
+{
+ struct hci_pio_data *pio;
+ u32 val, size_val, rx_thresh, tx_thresh, ibi_val;
+
+ pio = kzalloc(sizeof(*pio), GFP_KERNEL);
+ if (!pio)
+ return -ENOMEM;
+
+ hci->io_data = pio;
+ spin_lock_init(&pio->lock);
+
+ size_val = pio_reg_read(QUEUE_SIZE);
+ dev_info(&hci->master.dev, "CMD/RESP FIFO = %ld entries\n",
+ FIELD_GET(CR_QUEUE_SIZE, size_val));
+ dev_info(&hci->master.dev, "IBI FIFO = %ld bytes\n",
+ 4 * FIELD_GET(IBI_STATUS_SIZE, size_val));
+ dev_info(&hci->master.dev, "RX data FIFO = %d bytes\n",
+ 4 * (2 << FIELD_GET(RX_DATA_BUFFER_SIZE, size_val)));
+ dev_info(&hci->master.dev, "TX data FIFO = %d bytes\n",
+ 4 * (2 << FIELD_GET(TX_DATA_BUFFER_SIZE, size_val)));
+
+ /*
+ * Let's initialize data thresholds to half of the actual FIFO size.
+ * The start thresholds aren't used (set to 0) as the FIFO is always
+ * serviced before the corresponding command is queued.
+ */
+ rx_thresh = FIELD_GET(RX_DATA_BUFFER_SIZE, size_val);
+ tx_thresh = FIELD_GET(TX_DATA_BUFFER_SIZE, size_val);
+ if (hci->version_major == 1) {
+ /* those are expressed as 2^[n+1), so just sub 1 if not 0 */
+ if (rx_thresh)
+ rx_thresh -= 1;
+ if (tx_thresh)
+ tx_thresh -= 1;
+ pio->rx_thresh_size = 2 << rx_thresh;
+ pio->tx_thresh_size = 2 << tx_thresh;
+ } else {
+ /* size is 2^(n+1) and threshold is 2^n i.e. already halved */
+ pio->rx_thresh_size = 1 << rx_thresh;
+ pio->tx_thresh_size = 1 << tx_thresh;
+ }
+ val = FIELD_PREP(DATA_RX_BUF_THLD, rx_thresh) |
+ FIELD_PREP(DATA_TX_BUF_THLD, tx_thresh);
+ pio_reg_write(DATA_BUFFER_THLD_CTRL, val);
+
+ /*
+ * Let's raise an interrupt as soon as there is one free cmd slot
+ * or one available response or IBI. For IBI data let's use half the
+ * IBI queue size within allowed bounds.
+ */
+ ibi_val = FIELD_GET(IBI_STATUS_SIZE, size_val);
+ pio->max_ibi_thresh = clamp_val(ibi_val/2, 1, 63);
+ val = FIELD_PREP(QUEUE_IBI_STATUS_THLD, 1) |
+ FIELD_PREP(QUEUE_IBI_DATA_THLD, pio->max_ibi_thresh) |
+ FIELD_PREP(QUEUE_RESP_BUF_THLD, 1) |
+ FIELD_PREP(QUEUE_CMD_EMPTY_BUF_THLD, 1);
+ pio_reg_write(QUEUE_THLD_CTRL, val);
+ pio->reg_queue_thresh = val;
+
+ /* Disable all IRQs but allow all status bits */
+ pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
+ pio_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+
+ /* Always accept error interrupts (will be activated on first xfer) */
+ pio->enabled_irqs = STAT_ALL_ERRORS;
+
+ return 0;
+}
+
+static void hci_pio_cleanup(struct i3c_hci *hci)
+{
+ struct hci_pio_data *pio = hci->io_data;
+
+ pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
+
+ if (pio) {
+ DBG("status = %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ BUG_ON(pio->curr_xfer);
+ BUG_ON(pio->curr_rx);
+ BUG_ON(pio->curr_tx);
+ BUG_ON(pio->curr_resp);
+ kfree(pio);
+ hci->io_data = NULL;
+ }
+}
+
+static void hci_pio_write_cmd(struct i3c_hci *hci, struct hci_xfer *xfer)
+{
+ DBG("cmd_desc[%d] = 0x%08x", 0, xfer->cmd_desc[0]);
+ DBG("cmd_desc[%d] = 0x%08x", 1, xfer->cmd_desc[1]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[0]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
+ DBG("cmd_desc[%d] = 0x%08x", 2, xfer->cmd_desc[2]);
+ DBG("cmd_desc[%d] = 0x%08x", 3, xfer->cmd_desc[3]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[2]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[3]);
+ }
+}
+
+static bool hci_pio_do_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_xfer *xfer = pio->curr_rx;
+ unsigned int nr_words;
+ u32 *p;
+
+ p = xfer->data;
+ p += (xfer->data_len - xfer->data_left) / 4;
+
+ while (xfer->data_left >= 4) {
+ /* bail out if FIFO hasn't reached the threshold value yet */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_RX_THLD))
+ return false;
+ nr_words = min(xfer->data_left / 4, pio->rx_thresh_size);
+ /* extract data from FIFO */
+ xfer->data_left -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ while (nr_words--)
+ *p++ = pio_reg_read(XFER_DATA_PORT);
+ }
+
+ /* trailing data is retrieved upon response reception */
+ return !xfer->data_left;
+}
+
+static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
+ struct hci_pio_data *pio, unsigned int count)
+{
+ struct hci_xfer *xfer = pio->curr_rx;
+ u32 *p;
+
+ DBG("%d remaining", count);
+
+ p = xfer->data;
+ p += (xfer->data_len - xfer->data_left) / 4;
+
+ if (count >= 4) {
+ unsigned int nr_words = count / 4;
+ /* extract data from FIFO */
+ xfer->data_left -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ while (nr_words--)
+ *p++ = pio_reg_read(XFER_DATA_PORT);
+ }
+
+ count &= 3;
+ if (count) {
+ /*
+ * There are trailing bytes in the last word.
+ * Fetch it and extract bytes in an endian independent way.
+ * Unlike the TX case, we must not write memory past the
+ * end of the destination buffer.
+ */
+ u8 *p_byte = (u8 *)p;
+ u32 data = pio_reg_read(XFER_DATA_PORT);
+
+ xfer->data_word_before_partial = data;
+ xfer->data_left -= count;
+ data = (__force u32) cpu_to_le32(data);
+ while (count--) {
+ *p_byte++ = data;
+ data >>= 8;
+ }
+ }
+}
+
+static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_xfer *xfer = pio->curr_tx;
+ unsigned int nr_words;
+ u32 *p;
+
+ p = xfer->data;
+ p += (xfer->data_len - xfer->data_left) / 4;
+
+ while (xfer->data_left >= 4) {
+ /* bail out if FIFO free space is below set threshold */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
+ return false;
+ /* we can fill up to that TX threshold */
+ nr_words = min(xfer->data_left / 4, pio->tx_thresh_size);
+ /* push data into the FIFO */
+ xfer->data_left -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ while (nr_words--)
+ pio_reg_write(XFER_DATA_PORT, *p++);
+ }
+
+ if (xfer->data_left) {
+ /*
+ * There are trailing bytes to send. We can simply load
+ * them from memory as a word which will keep those bytes
+ * in their proper place even on a BE system. This will
+ * also get some bytes past the actual buffer but no one
+ * should care as they won't be sent out.
+ */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
+ return false;
+ DBG("trailing %d", xfer->data_left);
+ pio_reg_write(XFER_DATA_PORT, *p);
+ xfer->data_left = 0;
+ }
+
+ return true;
+}
+
+static bool hci_pio_process_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ while (pio->curr_rx && hci_pio_do_rx(hci, pio))
+ pio->curr_rx = pio->curr_rx->next_data;
+ return !pio->curr_rx;
+}
+
+static bool hci_pio_process_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ while (pio->curr_tx && hci_pio_do_tx(hci, pio))
+ pio->curr_tx = pio->curr_tx->next_data;
+ return !pio->curr_tx;
+}
+
+static void hci_pio_queue_data(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_xfer *xfer = pio->curr_xfer;
+ struct hci_xfer *prev_queue_tail;
+
+ if (!xfer->data) {
+ xfer->data_len = xfer->data_left = 0;
+ return;
+ }
+
+ if (xfer->rnw) {
+ prev_queue_tail = pio->rx_queue;
+ pio->rx_queue = xfer;
+ if (pio->curr_rx) {
+ prev_queue_tail->next_data = xfer;
+ } else {
+ pio->curr_rx = xfer;
+ if (!hci_pio_process_rx(hci, pio))
+ pio->enabled_irqs |= STAT_RX_THLD;
+ }
+ } else {
+ prev_queue_tail = pio->tx_queue;
+ pio->tx_queue = xfer;
+ if (pio->curr_tx) {
+ prev_queue_tail->next_data = xfer;
+ } else {
+ pio->curr_tx = xfer;
+ if (!hci_pio_process_tx(hci, pio))
+ pio->enabled_irqs |= STAT_TX_THLD;
+ }
+ }
+}
+
+static void hci_pio_push_to_next_rx(struct i3c_hci *hci, struct hci_xfer *xfer,
+ unsigned int words_to_keep)
+{
+ u32 *from = xfer->data;
+ u32 from_last;
+ unsigned int received, count;
+
+ received = (xfer->data_len - xfer->data_left) / 4;
+ if ((xfer->data_len - xfer->data_left) & 3) {
+ from_last = xfer->data_word_before_partial;
+ received += 1;
+ } else {
+ from_last = from[received];
+ }
+ from += words_to_keep;
+ count = received - words_to_keep;
+
+ while (count) {
+ unsigned int room, left, chunk, bytes_to_move;
+ u32 last_word;
+
+ xfer = xfer->next_data;
+ if (!xfer) {
+ dev_err(&hci->master.dev, "pushing RX data to unexistent xfer\n");
+ return;
+ }
+
+ room = DIV_ROUND_UP(xfer->data_len, 4);
+ left = DIV_ROUND_UP(xfer->data_left, 4);
+ chunk = min(count, room);
+ if (chunk > left) {
+ hci_pio_push_to_next_rx(hci, xfer, chunk - left);
+ left = chunk;
+ xfer->data_left = left * 4;
+ }
+
+ bytes_to_move = xfer->data_len - xfer->data_left;
+ if (bytes_to_move & 3) {
+ /* preserve word to become partial */
+ u32 *p = xfer->data;
+
+ xfer->data_word_before_partial = p[bytes_to_move / 4];
+ }
+ memmove(xfer->data + chunk, xfer->data, bytes_to_move);
+
+ /* treat last word specially because of partial word issues */
+ chunk -= 1;
+
+ memcpy(xfer->data, from, chunk * 4);
+ xfer->data_left -= chunk * 4;
+ from += chunk;
+ count -= chunk;
+
+ last_word = (count == 1) ? from_last : *from++;
+ if (xfer->data_left < 4) {
+ /*
+ * Like in hci_pio_do_trailing_rx(), preserve original
+ * word to be stored partially then store bytes it
+ * in an endian independent way.
+ */
+ u8 *p_byte = xfer->data;
+
+ p_byte += chunk * 4;
+ xfer->data_word_before_partial = last_word;
+ last_word = (__force u32) cpu_to_le32(last_word);
+ while (xfer->data_left--) {
+ *p_byte++ = last_word;
+ last_word >>= 8;
+ }
+ } else {
+ u32 *p = xfer->data;
+
+ p[chunk] = last_word;
+ xfer->data_left -= 4;
+ }
+ count--;
+ }
+}
+
+static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
+ u32 status);
+
+static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ while (pio->curr_resp &&
+ (pio_reg_read(INTR_STATUS) & STAT_RESP_READY)) {
+ struct hci_xfer *xfer = pio->curr_resp;
+ u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
+ unsigned int tid = RESP_TID(resp);
+
+ DBG("resp = 0x%08x", resp);
+ if (tid != xfer->cmd_tid) {
+ dev_err(&hci->master.dev,
+ "response tid=%d when expecting %d\n",
+ tid, xfer->cmd_tid);
+ /* let's pretend it is a prog error... any of them */
+ hci_pio_err(hci, pio, STAT_PROG_ERRORS);
+ return false;
+ }
+ xfer->response = resp;
+
+ if (pio->curr_rx == xfer) {
+ /*
+ * Response availability implies RX completion.
+ * Retrieve trailing RX data if any.
+ * Note that short reads are possible.
+ */
+ unsigned int received, expected, to_keep;
+
+ received = xfer->data_len - xfer->data_left;
+ expected = RESP_DATA_LENGTH(xfer->response);
+ if (expected > received) {
+ hci_pio_do_trailing_rx(hci, pio,
+ expected - received);
+ } else if (received > expected) {
+ /* we consumed data meant for next xfer */
+ to_keep = DIV_ROUND_UP(expected, 4);
+ hci_pio_push_to_next_rx(hci, xfer, to_keep);
+ }
+
+ /* then process the RX list pointer */
+ if (hci_pio_process_rx(hci, pio))
+ pio->enabled_irqs &= ~STAT_RX_THLD;
+ }
+
+ /*
+ * We're about to give back ownership of the xfer structure
+ * to the waiting instance. Make sure no reference to it
+ * still exists.
+ */
+ if (pio->curr_rx == xfer) {
+ DBG("short RX ?");
+ pio->curr_rx = pio->curr_rx->next_data;
+ } else if (pio->curr_tx == xfer) {
+ DBG("short TX ?");
+ pio->curr_tx = pio->curr_tx->next_data;
+ } else if (xfer->data_left) {
+ DBG("PIO xfer count = %d after response",
+ xfer->data_left);
+ }
+
+ pio->curr_resp = xfer->next_resp;
+ if (xfer->completion)
+ complete(xfer->completion);
+ }
+ return !pio->curr_resp;
+}
+
+static void hci_pio_queue_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_xfer *xfer = pio->curr_xfer;
+ struct hci_xfer *prev_queue_tail;
+
+ if (!(xfer->cmd_desc[0] & CMD_0_ROC))
+ return;
+
+ prev_queue_tail = pio->resp_queue;
+ pio->resp_queue = xfer;
+ if (pio->curr_resp) {
+ prev_queue_tail->next_resp = xfer;
+ } else {
+ pio->curr_resp = xfer;
+ if (!hci_pio_process_resp(hci, pio))
+ pio->enabled_irqs |= STAT_RESP_READY;
+ }
+}
+
+static bool hci_pio_process_cmd(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ while (pio->curr_xfer &&
+ (pio_reg_read(INTR_STATUS) & STAT_CMD_QUEUE_READY)) {
+ /*
+ * Always process the data FIFO before sending the command
+ * so needed TX data or RX space is available upfront.
+ */
+ hci_pio_queue_data(hci, pio);
+ /*
+ * Then queue our response request. This will also process
+ * the response FIFO in case it got suddenly filled up
+ * with results from previous commands.
+ */
+ hci_pio_queue_resp(hci, pio);
+ /*
+ * Finally send the command.
+ */
+ hci_pio_write_cmd(hci, pio->curr_xfer);
+ /*
+ * And move on.
+ */
+ pio->curr_xfer = pio->curr_xfer->next_xfer;
+ }
+ return !pio->curr_xfer;
+}
+
+static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
+{
+ struct hci_pio_data *pio = hci->io_data;
+ struct hci_xfer *prev_queue_tail;
+ int i;
+
+ DBG("n = %d", n);
+
+ /* link xfer instances together and initialize data count */
+ for (i = 0; i < n; i++) {
+ xfer[i].next_xfer = (i + 1 < n) ? &xfer[i + 1] : NULL;
+ xfer[i].next_data = NULL;
+ xfer[i].next_resp = NULL;
+ xfer[i].data_left = xfer[i].data_len;
+ }
+
+ spin_lock_irq(&pio->lock);
+ prev_queue_tail = pio->xfer_queue;
+ pio->xfer_queue = &xfer[n - 1];
+ if (pio->curr_xfer) {
+ prev_queue_tail->next_xfer = xfer;
+ } else {
+ pio->curr_xfer = xfer;
+ if (!hci_pio_process_cmd(hci, pio))
+ pio->enabled_irqs |= STAT_CMD_QUEUE_READY;
+ pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
+ DBG("status = %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ }
+ spin_unlock_irq(&pio->lock);
+ return 0;
+}
+
+static bool hci_pio_dequeue_xfer_common(struct i3c_hci *hci,
+ struct hci_pio_data *pio,
+ struct hci_xfer *xfer, int n)
+{
+ struct hci_xfer *p, **p_prev_next;
+ int i;
+
+ /*
+ * To safely dequeue a transfer request, it must be either entirely
+ * processed, or not yet processed at all. If our request tail is
+ * reachable from either the data or resp list that means the command
+ * was submitted and not yet completed.
+ */
+ for (p = pio->curr_resp; p; p = p->next_resp)
+ for (i = 0; i < n; i++)
+ if (p == &xfer[i])
+ goto pio_screwed;
+ for (p = pio->curr_rx; p; p = p->next_data)
+ for (i = 0; i < n; i++)
+ if (p == &xfer[i])
+ goto pio_screwed;
+ for (p = pio->curr_tx; p; p = p->next_data)
+ for (i = 0; i < n; i++)
+ if (p == &xfer[i])
+ goto pio_screwed;
+
+ /*
+ * The command was completed, or wasn't yet submitted.
+ * Unlink it from the que if the later.
+ */
+ p_prev_next = &pio->curr_xfer;
+ for (p = pio->curr_xfer; p; p = p->next_xfer) {
+ if (p == &xfer[0]) {
+ *p_prev_next = xfer[n - 1].next_xfer;
+ break;
+ }
+ p_prev_next = &p->next_xfer;
+ }
+
+ /* return true if we actually unqueued something */
+ return !!p;
+
+pio_screwed:
+ /*
+ * Life is tough. We must invalidate the hardware state and
+ * discard everything that is still queued.
+ */
+ for (p = pio->curr_resp; p; p = p->next_resp) {
+ p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED);
+ if (p->completion)
+ complete(p->completion);
+ }
+ for (p = pio->curr_xfer; p; p = p->next_xfer) {
+ p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED);
+ if (p->completion)
+ complete(p->completion);
+ }
+ pio->curr_xfer = pio->curr_rx = pio->curr_tx = pio->curr_resp = NULL;
+
+ return true;
+}
+
+static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
+{
+ struct hci_pio_data *pio = hci->io_data;
+ int ret;
+
+ spin_lock_irq(&pio->lock);
+ DBG("n=%d status=%#x/%#x", n,
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ DBG("main_status = %#x/%#x",
+ readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
+
+ ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
+ spin_unlock_irq(&pio->lock);
+ return ret;
+}
+
+static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
+ u32 status)
+{
+ /* TODO: this ought to be more sophisticated eventually */
+
+ if (pio_reg_read(INTR_STATUS) & STAT_RESP_READY) {
+ /* this may happen when an error is signaled with ROC unset */
+ u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
+
+ dev_err(&hci->master.dev,
+ "orphan response (%#x) on error\n", resp);
+ }
+
+ /* dump states on programming errors */
+ if (status & STAT_PROG_ERRORS) {
+ u32 queue = pio_reg_read(QUEUE_CUR_STATUS);
+ u32 data = pio_reg_read(DATA_BUFFER_CUR_STATUS);
+
+ dev_err(&hci->master.dev,
+ "prog error %#lx (C/R/I = %ld/%ld/%ld, TX/RX = %ld/%ld)\n",
+ status & STAT_PROG_ERRORS,
+ FIELD_GET(CUR_CMD_Q_EMPTY_LEVEL, queue),
+ FIELD_GET(CUR_RESP_Q_LEVEL, queue),
+ FIELD_GET(CUR_IBI_Q_LEVEL, queue),
+ FIELD_GET(CUR_TX_BUF_LVL, data),
+ FIELD_GET(CUR_RX_BUF_LVL, data));
+ }
+
+ /* just bust out everything with pending responses for now */
+ hci_pio_dequeue_xfer_common(hci, pio, pio->curr_resp, 1);
+ /* ... and half-way TX transfers if any */
+ if (pio->curr_tx && pio->curr_tx->data_left != pio->curr_tx->data_len)
+ hci_pio_dequeue_xfer_common(hci, pio, pio->curr_tx, 1);
+ /* then reset the hardware */
+ mipi_i3c_hci_pio_reset(hci);
+ mipi_i3c_hci_resume(hci);
+
+ DBG("status=%#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+}
+
+static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
+ struct hci_pio_data *pio,
+ unsigned int thresh_val)
+{
+ u32 regval = pio->reg_queue_thresh;
+
+ regval &= ~QUEUE_IBI_STATUS_THLD;
+ regval |= FIELD_PREP(QUEUE_IBI_STATUS_THLD, thresh_val);
+ /* write the threshold reg only if it changes */
+ if (regval != pio->reg_queue_thresh) {
+ pio_reg_write(QUEUE_THLD_CTRL, regval);
+ pio->reg_queue_thresh = regval;
+ DBG("%d", thresh_val);
+ }
+}
+
+static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
+ struct hci_pio_data *pio)
+{
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+ unsigned int nr_words, thresh_val;
+ u32 *p;
+
+ p = ibi->data_ptr;
+ p += (ibi->seg_len - ibi->seg_cnt) / 4;
+
+ while ((nr_words = ibi->seg_cnt/4)) {
+ /* determine our IBI queue threshold value */
+ thresh_val = min(nr_words, pio->max_ibi_thresh);
+ hci_pio_set_ibi_thresh(hci, pio, thresh_val);
+ /* bail out if we don't have that amount of data ready */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+ return false;
+ /* extract the data from the IBI port */
+ nr_words = thresh_val;
+ ibi->seg_cnt -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, ibi->seg_cnt);
+ while (nr_words--)
+ *p++ = pio_reg_read(IBI_PORT);
+ }
+
+ if (ibi->seg_cnt) {
+ /*
+ * There are trailing bytes in the last word.
+ * Fetch it and extract bytes in an endian independent way.
+ * Unlike the TX case, we must not write past the end of
+ * the destination buffer.
+ */
+ u32 data;
+ u8 *p_byte = (u8 *)p;
+
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+ return false;
+ DBG("trailing %d", ibi->seg_cnt);
+ data = pio_reg_read(IBI_PORT);
+ data = (__force u32) cpu_to_le32(data);
+ while (ibi->seg_cnt--) {
+ *p_byte++ = data;
+ data >>= 8;
+ }
+ }
+
+ return true;
+}
+
+static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+ struct i3c_dev_desc *dev;
+ struct i3c_hci_dev_data *dev_data;
+ struct hci_pio_dev_ibi_data *dev_ibi;
+ u32 ibi_status;
+
+ /*
+ * We have a new IBI. Try to set up its payload retrieval.
+ * When returning true, the IBI data has to be consumed whether
+ * or not we are set up to capture it. If we return true with
+ * ibi->slot == NULL that means the data payload has to be
+ * drained out of the IBI port and dropped.
+ */
+
+ ibi_status = pio_reg_read(IBI_PORT);
+ DBG("status = %#x", ibi_status);
+ ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
+ if (ibi_status & IBI_ERROR) {
+ dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr);
+ return false;
+ }
+
+ ibi->last_seg = ibi_status & IBI_LAST_STATUS;
+ ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
+ ibi->seg_cnt = ibi->seg_len;
+
+ dev = i3c_hci_addr_to_dev(hci, ibi->addr);
+ if (!dev) {
+ dev_err(&hci->master.dev,
+ "IBI for unknown device %#x\n", ibi->addr);
+ return true;
+ }
+
+ dev_data = i3c_dev_get_master_data(dev);
+ dev_ibi = dev_data->ibi_data;
+ ibi->max_len = dev_ibi->max_len;
+
+ if (ibi->seg_len > ibi->max_len) {
+ dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
+ ibi->seg_len, ibi->max_len);
+ return true;
+ }
+
+ ibi->slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
+ if (!ibi->slot) {
+ dev_err(&hci->master.dev, "no free slot for IBI\n");
+ } else {
+ ibi->slot->len = 0;
+ ibi->data_ptr = ibi->slot->data;
+ }
+ return true;
+}
+
+static void hci_pio_free_ibi_slot(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+ struct hci_pio_dev_ibi_data *dev_ibi;
+
+ if (ibi->slot) {
+ dev_ibi = ibi->slot->dev->common.master_priv;
+ i3c_generic_ibi_recycle_slot(dev_ibi->pool, ibi->slot);
+ ibi->slot = NULL;
+ }
+}
+
+static bool hci_pio_process_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+
+ if (!ibi->slot && !ibi->seg_cnt && ibi->last_seg)
+ if (!hci_pio_prep_new_ibi(hci, pio))
+ return false;
+
+ for (;;) {
+ u32 ibi_status;
+ unsigned int ibi_addr;
+
+ if (ibi->slot) {
+ if (!hci_pio_get_ibi_segment(hci, pio))
+ return false;
+ ibi->slot->len += ibi->seg_len;
+ ibi->data_ptr += ibi->seg_len;
+ if (ibi->last_seg) {
+ /* was the last segment: submit it and leave */
+ i3c_master_queue_ibi(ibi->slot->dev, ibi->slot);
+ ibi->slot = NULL;
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+ return true;
+ }
+ } else if (ibi->seg_cnt) {
+ /*
+ * No slot but a non-zero count. This is the result
+ * of some error and the payload must be drained.
+ * This normally does not happen therefore no need
+ * to be extra optimized here.
+ */
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+ do {
+ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+ return false;
+ pio_reg_read(IBI_PORT);
+ } while (--ibi->seg_cnt);
+ if (ibi->last_seg)
+ return true;
+ }
+
+ /* try to move to the next segment right away */
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+ return false;
+ ibi_status = pio_reg_read(IBI_PORT);
+ ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
+ if (ibi->addr != ibi_addr) {
+ /* target address changed before last segment */
+ dev_err(&hci->master.dev,
+ "unexp IBI address changed from %d to %d\n",
+ ibi->addr, ibi_addr);
+ hci_pio_free_ibi_slot(hci, pio);
+ }
+ ibi->last_seg = ibi_status & IBI_LAST_STATUS;
+ ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
+ ibi->seg_cnt = ibi->seg_len;
+ if (ibi->slot && ibi->slot->len + ibi->seg_len > ibi->max_len) {
+ dev_err(&hci->master.dev,
+ "IBI payload too big (%d > %d)\n",
+ ibi->slot->len + ibi->seg_len, ibi->max_len);
+ hci_pio_free_ibi_slot(hci, pio);
+ }
+ }
+
+ return false;
+}
+
+static int hci_pio_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct i3c_generic_ibi_pool *pool;
+ struct hci_pio_dev_ibi_data *dev_ibi;
+
+ dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
+ if (!dev_ibi)
+ return -ENOMEM;
+ pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(pool)) {
+ kfree(dev_ibi);
+ return PTR_ERR(pool);
+ }
+ dev_ibi->pool = pool;
+ dev_ibi->max_len = req->max_payload_len;
+ dev_data->ibi_data = dev_ibi;
+ return 0;
+}
+
+static void hci_pio_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data;
+
+ dev_data->ibi_data = NULL;
+ i3c_generic_ibi_free_pool(dev_ibi->pool);
+ kfree(dev_ibi);
+}
+
+static void hci_pio_recycle_ibi_slot(struct i3c_hci *hci,
+ struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data;
+
+ i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
+}
+
+static bool hci_pio_irq_handler(struct i3c_hci *hci, unsigned int unused)
+{
+ struct hci_pio_data *pio = hci->io_data;
+ u32 status;
+
+ spin_lock(&pio->lock);
+ status = pio_reg_read(INTR_STATUS);
+ DBG("(in) status: %#x/%#x", status, pio->enabled_irqs);
+ status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
+ if (!status) {
+ spin_unlock(&pio->lock);
+ return false;
+ }
+
+ if (status & STAT_IBI_STATUS_THLD)
+ hci_pio_process_ibi(hci, pio);
+
+ if (status & STAT_RX_THLD)
+ if (hci_pio_process_rx(hci, pio))
+ pio->enabled_irqs &= ~STAT_RX_THLD;
+ if (status & STAT_TX_THLD)
+ if (hci_pio_process_tx(hci, pio))
+ pio->enabled_irqs &= ~STAT_TX_THLD;
+ if (status & STAT_RESP_READY)
+ if (hci_pio_process_resp(hci, pio))
+ pio->enabled_irqs &= ~STAT_RESP_READY;
+
+ if (unlikely(status & STAT_LATENCY_WARNINGS)) {
+ pio_reg_write(INTR_STATUS, status & STAT_LATENCY_WARNINGS);
+ dev_warn_ratelimited(&hci->master.dev,
+ "encountered warning condition %#lx\n",
+ status & STAT_LATENCY_WARNINGS);
+ }
+
+ if (unlikely(status & STAT_ALL_ERRORS)) {
+ pio_reg_write(INTR_STATUS, status & STAT_ALL_ERRORS);
+ hci_pio_err(hci, pio, status & STAT_ALL_ERRORS);
+ }
+
+ if (status & STAT_CMD_QUEUE_READY)
+ if (hci_pio_process_cmd(hci, pio))
+ pio->enabled_irqs &= ~STAT_CMD_QUEUE_READY;
+
+ pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
+ DBG("(out) status: %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ spin_unlock(&pio->lock);
+ return true;
+}
+
+const struct hci_io_ops mipi_i3c_hci_pio = {
+ .init = hci_pio_init,
+ .cleanup = hci_pio_cleanup,
+ .queue_xfer = hci_pio_queue_xfer,
+ .dequeue_xfer = hci_pio_dequeue_xfer,
+ .irq_handler = hci_pio_irq_handler,
+ .request_ibi = hci_pio_request_ibi,
+ .free_ibi = hci_pio_free_ibi,
+ .recycle_ibi_slot = hci_pio_recycle_ibi_slot,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/xfer_mode_rate.h b/drivers/i3c/master/mipi-i3c-hci/xfer_mode_rate.h
new file mode 100644
index 000000000..1e36b75af
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/xfer_mode_rate.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Transfer Mode/Rate Table definitions as found in extended capability
+ * sections 0x04 and 0x08.
+ * This applies starting from I3C HCI v2.0.
+ */
+
+#ifndef XFER_MODE_RATE_H
+#define XFER_MODE_RATE_H
+
+/*
+ * Master Transfer Mode Table Fixed Indexes.
+ *
+ * Indexes 0x0 and 0x8 are mandatory. Availability for the rest must be
+ * obtained from the mode table in the extended capability area.
+ * Presence and definitions for indexes beyond these ones may vary.
+ */
+#define XFERMODE_IDX_I3C_SDR 0x00 /* I3C SDR Mode */
+#define XFERMODE_IDX_I3C_HDR_DDR 0x01 /* I3C HDR-DDR Mode */
+#define XFERMODE_IDX_I3C_HDR_T 0x02 /* I3C HDR-Ternary Mode */
+#define XFERMODE_IDX_I3C_HDR_BT 0x03 /* I3C HDR-BT Mode */
+#define XFERMODE_IDX_I2C 0x08 /* Legacy I2C Mode */
+
+/*
+ * Transfer Mode Table Entry Bits Definitions
+ */
+#define XFERMODE_VALID_XFER_ADD_FUNC GENMASK(21, 16)
+#define XFERMODE_ML_DATA_XFER_CODING GENMASK(15, 11)
+#define XFERMODE_ML_ADDL_LANES GENMASK(10, 8)
+#define XFERMODE_SUPPORTED BIT(7)
+#define XFERMODE_MODE GENMASK(3, 0)
+
+/*
+ * Master Data Transfer Rate Selector Values.
+ *
+ * These are the values to be used in the command descriptor XFER_RATE field
+ * and found in the RATE_ID field below.
+ * The I3C_SDR0, I3C_SDR1, I3C_SDR2, I3C_SDR3, I3C_SDR4 and I2C_FM rates
+ * are required, everything else is optional and discoverable in the
+ * Data Transfer Rate Table. Indicated are typical rates. The actual
+ * rates may vary slightly and are also specified in the Data Transfer
+ * Rate Table.
+ */
+#define XFERRATE_I3C_SDR0 0x00 /* 12.5 MHz */
+#define XFERRATE_I3C_SDR1 0x01 /* 8 MHz */
+#define XFERRATE_I3C_SDR2 0x02 /* 6 MHz */
+#define XFERRATE_I3C_SDR3 0x03 /* 4 MHz */
+#define XFERRATE_I3C_SDR4 0x04 /* 2 MHz */
+#define XFERRATE_I3C_SDR_FM_FMP 0x05 /* 400 KHz / 1 MHz */
+#define XFERRATE_I3C_SDR_USER6 0x06 /* User Defined */
+#define XFERRATE_I3C_SDR_USER7 0x07 /* User Defined */
+
+#define XFERRATE_I2C_FM 0x00 /* 400 KHz */
+#define XFERRATE_I2C_FMP 0x01 /* 1 MHz */
+#define XFERRATE_I2C_USER2 0x02 /* User Defined */
+#define XFERRATE_I2C_USER3 0x03 /* User Defined */
+#define XFERRATE_I2C_USER4 0x04 /* User Defined */
+#define XFERRATE_I2C_USER5 0x05 /* User Defined */
+#define XFERRATE_I2C_USER6 0x06 /* User Defined */
+#define XFERRATE_I2C_USER7 0x07 /* User Defined */
+
+/*
+ * Master Data Transfer Rate Table Mode ID values.
+ */
+#define XFERRATE_MODE_I3C 0x00
+#define XFERRATE_MODE_I2C 0x08
+
+/*
+ * Master Data Transfer Rate Table Entry Bits Definitions
+ */
+#define XFERRATE_MODE_ID GENMASK(31, 28)
+#define XFERRATE_RATE_ID GENMASK(22, 20)
+#define XFERRATE_ACTUAL_RATE_KHZ GENMASK(19, 0)
+
+#endif
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
new file mode 100644
index 000000000..f30d457e9
--- /dev/null
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -0,0 +1,1703 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Silvaco dual-role I3C master driver
+ *
+ * Copyright (C) 2020 Silvaco
+ * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
+ * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+/* Master Mode Registers */
+#define SVC_I3C_MCONFIG 0x000
+#define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
+#define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
+#define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
+#define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
+#define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
+#define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
+#define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
+#define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
+#define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
+#define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
+
+#define SVC_I3C_MCTRL 0x084
+#define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
+#define SVC_I3C_MCTRL_REQUEST_NONE 0
+#define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
+#define SVC_I3C_MCTRL_REQUEST_STOP 2
+#define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
+#define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
+#define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
+#define SVC_I3C_MCTRL_TYPE_I3C 0
+#define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
+#define SVC_I3C_MCTRL_IBIRESP_AUTO 0
+#define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
+#define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
+#define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
+#define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
+#define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
+#define SVC_I3C_MCTRL_DIR_WRITE 0
+#define SVC_I3C_MCTRL_DIR_READ 1
+#define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
+#define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
+
+#define SVC_I3C_MSTATUS 0x088
+#define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
+#define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
+#define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
+#define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
+#define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
+#define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
+#define SVC_I3C_MSTATUS_IBITYPE_IBI 1
+#define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
+#define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
+#define SVC_I3C_MINT_SLVSTART BIT(8)
+#define SVC_I3C_MINT_MCTRLDONE BIT(9)
+#define SVC_I3C_MINT_COMPLETE BIT(10)
+#define SVC_I3C_MINT_RXPEND BIT(11)
+#define SVC_I3C_MINT_TXNOTFULL BIT(12)
+#define SVC_I3C_MINT_IBIWON BIT(13)
+#define SVC_I3C_MINT_ERRWARN BIT(15)
+#define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
+#define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
+#define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
+#define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
+#define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
+#define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
+#define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
+#define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
+
+#define SVC_I3C_IBIRULES 0x08C
+#define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
+ ((addr) & 0x3F) << ((slot) * 6))
+#define SVC_I3C_IBIRULES_ADDRS 5
+#define SVC_I3C_IBIRULES_MSB0 BIT(30)
+#define SVC_I3C_IBIRULES_NOBYTE BIT(31)
+#define SVC_I3C_IBIRULES_MANDBYTE 0
+#define SVC_I3C_MINTSET 0x090
+#define SVC_I3C_MINTCLR 0x094
+#define SVC_I3C_MINTMASKED 0x098
+#define SVC_I3C_MERRWARN 0x09C
+#define SVC_I3C_MERRWARN_NACK BIT(2)
+#define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
+#define SVC_I3C_MDMACTRL 0x0A0
+#define SVC_I3C_MDATACTRL 0x0AC
+#define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
+#define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
+#define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
+#define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
+#define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
+#define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
+#define SVC_I3C_MDATACTRL_TXFULL BIT(30)
+#define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
+
+#define SVC_I3C_MWDATAB 0x0B0
+#define SVC_I3C_MWDATAB_END BIT(8)
+
+#define SVC_I3C_MWDATABE 0x0B4
+#define SVC_I3C_MWDATAH 0x0B8
+#define SVC_I3C_MWDATAHE 0x0BC
+#define SVC_I3C_MRDATAB 0x0C0
+#define SVC_I3C_MRDATAH 0x0C8
+#define SVC_I3C_MWMSG_SDR 0x0D0
+#define SVC_I3C_MRMSG_SDR 0x0D4
+#define SVC_I3C_MWMSG_DDR 0x0D8
+#define SVC_I3C_MRMSG_DDR 0x0DC
+
+#define SVC_I3C_MDYNADDR 0x0E4
+#define SVC_MDYNADDR_VALID BIT(0)
+#define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
+
+#define SVC_I3C_MAX_DEVS 32
+#define SVC_I3C_PM_TIMEOUT_MS 1000
+
+/* This parameter depends on the implementation and may be tuned */
+#define SVC_I3C_FIFO_SIZE 16
+
+struct svc_i3c_cmd {
+ u8 addr;
+ bool rnw;
+ u8 *in;
+ const void *out;
+ unsigned int len;
+ unsigned int read_len;
+ bool continued;
+};
+
+struct svc_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int type;
+ unsigned int ncmds;
+ struct svc_i3c_cmd cmds[];
+};
+
+/**
+ * struct svc_i3c_master - Silvaco I3C Master structure
+ * @base: I3C master controller
+ * @dev: Corresponding device
+ * @regs: Memory mapping
+ * @free_slots: Bit array of available slots
+ * @addrs: Array containing the dynamic addresses of each attached device
+ * @descs: Array of descriptors, one per attached device
+ * @hj_work: Hot-join work
+ * @ibi_work: IBI work
+ * @irq: Main interrupt
+ * @pclk: System clock
+ * @fclk: Fast clock (bus)
+ * @sclk: Slow clock (other events)
+ * @xferqueue: Transfer queue structure
+ * @xferqueue.list: List member
+ * @xferqueue.cur: Current ongoing transfer
+ * @xferqueue.lock: Queue lock
+ * @ibi: IBI structure
+ * @ibi.num_slots: Number of slots available in @ibi.slots
+ * @ibi.slots: Available IBI slots
+ * @ibi.tbq_slot: To be queued IBI slot
+ * @ibi.lock: IBI lock
+ * @lock: Transfer lock, protect between IBI work thread and callbacks from master
+ */
+struct svc_i3c_master {
+ struct i3c_master_controller base;
+ struct device *dev;
+ void __iomem *regs;
+ u32 free_slots;
+ u8 addrs[SVC_I3C_MAX_DEVS];
+ struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
+ struct work_struct hj_work;
+ struct work_struct ibi_work;
+ int irq;
+ struct clk *pclk;
+ struct clk *fclk;
+ struct clk *sclk;
+ struct {
+ struct list_head list;
+ struct svc_i3c_xfer *cur;
+ /* Prevent races between transfers */
+ spinlock_t lock;
+ } xferqueue;
+ struct {
+ unsigned int num_slots;
+ struct i3c_dev_desc **slots;
+ struct i3c_ibi_slot *tbq_slot;
+ /* Prevent races within IBI handlers */
+ spinlock_t lock;
+ } ibi;
+ struct mutex lock;
+};
+
+/**
+ * struct svc_i3c_i2c_dev_data - Device specific data
+ * @index: Index in the master tables corresponding to this device
+ * @ibi: IBI slot index in the master structure
+ * @ibi_pool: IBI pool associated to this device
+ */
+struct svc_i3c_i2c_dev_data {
+ u8 index;
+ int ibi;
+ struct i3c_generic_ibi_pool *ibi_pool;
+};
+
+static bool svc_i3c_master_error(struct svc_i3c_master *master)
+{
+ u32 mstatus, merrwarn;
+
+ mstatus = readl(master->regs + SVC_I3C_MSTATUS);
+ if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
+ merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
+ writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
+
+ /* Ignore timeout error */
+ if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
+ dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
+ mstatus, merrwarn);
+ return false;
+ }
+
+ dev_err(master->dev,
+ "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
+ mstatus, merrwarn);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
+{
+ writel(mask, master->regs + SVC_I3C_MINTSET);
+}
+
+static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
+{
+ u32 mask = readl(master->regs + SVC_I3C_MINTSET);
+
+ writel(mask, master->regs + SVC_I3C_MINTCLR);
+}
+
+static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
+{
+ /* Clear pending warnings */
+ writel(readl(master->regs + SVC_I3C_MERRWARN),
+ master->regs + SVC_I3C_MERRWARN);
+}
+
+static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
+{
+ /* Flush FIFOs */
+ writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
+ master->regs + SVC_I3C_MDATACTRL);
+}
+
+static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
+{
+ u32 reg;
+
+ /* Set RX and TX tigger levels, flush FIFOs */
+ reg = SVC_I3C_MDATACTRL_FLUSHTB |
+ SVC_I3C_MDATACTRL_FLUSHRB |
+ SVC_I3C_MDATACTRL_UNLOCK_TRIG |
+ SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
+ SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
+ writel(reg, master->regs + SVC_I3C_MDATACTRL);
+}
+
+static void svc_i3c_master_reset(struct svc_i3c_master *master)
+{
+ svc_i3c_master_clear_merrwarn(master);
+ svc_i3c_master_reset_fifo_trigger(master);
+ svc_i3c_master_disable_interrupts(master);
+}
+
+static inline struct svc_i3c_master *
+to_svc_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct svc_i3c_master, base);
+}
+
+static void svc_i3c_master_hj_work(struct work_struct *work)
+{
+ struct svc_i3c_master *master;
+
+ master = container_of(work, struct svc_i3c_master, hj_work);
+ i3c_master_do_daa(&master->base);
+}
+
+static struct i3c_dev_desc *
+svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
+ unsigned int ibiaddr)
+{
+ int i;
+
+ for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
+ if (master->addrs[i] == ibiaddr)
+ break;
+
+ if (i == SVC_I3C_MAX_DEVS)
+ return NULL;
+
+ return master->descs[i];
+}
+
+static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
+{
+ writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
+
+ /*
+ * This delay is necessary after the emission of a stop, otherwise eg.
+ * repeating IBIs do not get detected. There is a note in the manual
+ * about it, stating that the stop condition might not be settled
+ * correctly if a start condition follows too rapidly.
+ */
+ udelay(1);
+}
+
+static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ struct i3c_dev_desc *dev)
+{
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_ibi_slot *slot;
+ unsigned int count;
+ u32 mdatactrl;
+ int ret, val;
+ u8 *buf;
+
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot)
+ return -ENOSPC;
+
+ slot->len = 0;
+ buf = slot->data;
+
+ ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
+ SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
+ if (ret) {
+ dev_err(master->dev, "Timeout when polling for COMPLETE\n");
+ return ret;
+ }
+
+ while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
+ slot->len < SVC_I3C_FIFO_SIZE) {
+ mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
+ count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
+ readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
+ slot->len += count;
+ buf += count;
+ }
+
+ master->ibi.tbq_slot = slot;
+
+ return 0;
+}
+
+static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
+ bool mandatory_byte)
+{
+ unsigned int ibi_ack_nack;
+
+ ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
+ if (mandatory_byte)
+ ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
+ else
+ ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
+
+ writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
+}
+
+static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
+{
+ writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
+ SVC_I3C_MCTRL_IBIRESP_NACK,
+ master->regs + SVC_I3C_MCTRL);
+}
+
+static void svc_i3c_master_ibi_work(struct work_struct *work)
+{
+ struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
+ struct svc_i3c_i2c_dev_data *data;
+ unsigned int ibitype, ibiaddr;
+ struct i3c_dev_desc *dev;
+ u32 status, val;
+ int ret;
+
+ mutex_lock(&master->lock);
+ /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
+ writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
+ SVC_I3C_MCTRL_IBIRESP_AUTO,
+ master->regs + SVC_I3C_MCTRL);
+
+ /* Wait for IBIWON, should take approximately 100us */
+ ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
+ SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
+ if (ret) {
+ dev_err(master->dev, "Timeout when polling for IBIWON\n");
+ svc_i3c_master_emit_stop(master);
+ goto reenable_ibis;
+ }
+
+ /* Clear the interrupt status */
+ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+
+ status = readl(master->regs + SVC_I3C_MSTATUS);
+ ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
+ ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
+
+ /* Handle the critical responses to IBI's */
+ switch (ibitype) {
+ case SVC_I3C_MSTATUS_IBITYPE_IBI:
+ dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
+ if (!dev)
+ svc_i3c_master_nack_ibi(master);
+ else
+ svc_i3c_master_handle_ibi(master, dev);
+ break;
+ case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
+ svc_i3c_master_ack_ibi(master, false);
+ break;
+ case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
+ svc_i3c_master_nack_ibi(master);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * If an error happened, we probably got interrupted and the exchange
+ * timedout. In this case we just drop everything, emit a stop and wait
+ * for the slave to interrupt again.
+ */
+ if (svc_i3c_master_error(master)) {
+ if (master->ibi.tbq_slot) {
+ data = i3c_dev_get_master_data(dev);
+ i3c_generic_ibi_recycle_slot(data->ibi_pool,
+ master->ibi.tbq_slot);
+ master->ibi.tbq_slot = NULL;
+ }
+
+ svc_i3c_master_emit_stop(master);
+
+ goto reenable_ibis;
+ }
+
+ /* Handle the non critical tasks */
+ switch (ibitype) {
+ case SVC_I3C_MSTATUS_IBITYPE_IBI:
+ if (dev) {
+ i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
+ master->ibi.tbq_slot = NULL;
+ }
+ svc_i3c_master_emit_stop(master);
+ break;
+ case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
+ queue_work(master->base.wq, &master->hj_work);
+ break;
+ case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
+ default:
+ break;
+ }
+
+reenable_ibis:
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+ mutex_unlock(&master->lock);
+}
+
+static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
+{
+ struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
+ u32 active = readl(master->regs + SVC_I3C_MSTATUS);
+
+ if (!SVC_I3C_MSTATUS_SLVSTART(active))
+ return IRQ_NONE;
+
+ /* Clear the interrupt status */
+ writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
+
+ svc_i3c_master_disable_interrupts(master);
+
+ /* Handle the interrupt in a non atomic context */
+ queue_work(master->base.wq, &master->ibi_work);
+
+ return IRQ_HANDLED;
+}
+
+static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ struct i3c_device_info info = {};
+ unsigned long fclk_rate, fclk_period_ns;
+ unsigned int high_period_ns, od_low_period_ns;
+ u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Timings derivation */
+ fclk_rate = clk_get_rate(master->fclk);
+ if (!fclk_rate) {
+ ret = -EINVAL;
+ goto rpm_out;
+ }
+
+ fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
+
+ /*
+ * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
+ * Simplest configuration is using a 50% duty-cycle of 40ns.
+ */
+ ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
+ pplow = 0;
+
+ /*
+ * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
+ * duty-cycle tuned so that high levels are filetered out by
+ * the 50ns filter (target being 40ns).
+ */
+ odhpp = 1;
+ high_period_ns = (ppbaud + 1) * fclk_period_ns;
+ odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
+ od_low_period_ns = (odbaud + 1) * high_period_ns;
+
+ switch (bus->mode) {
+ case I3C_BUS_MODE_PURE:
+ i2cbaud = 0;
+ odstop = 0;
+ break;
+ case I3C_BUS_MODE_MIXED_FAST:
+ case I3C_BUS_MODE_MIXED_LIMITED:
+ /*
+ * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
+ * between the high and low period does not really matter.
+ */
+ i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
+ odstop = 1;
+ break;
+ case I3C_BUS_MODE_MIXED_SLOW:
+ /*
+ * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
+ * constraints as the FM+ mode.
+ */
+ i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
+ odstop = 1;
+ break;
+ default:
+ goto rpm_out;
+ }
+
+ reg = SVC_I3C_MCONFIG_MASTER_EN |
+ SVC_I3C_MCONFIG_DISTO(0) |
+ SVC_I3C_MCONFIG_HKEEP(0) |
+ SVC_I3C_MCONFIG_ODSTOP(odstop) |
+ SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
+ SVC_I3C_MCONFIG_PPLOW(pplow) |
+ SVC_I3C_MCONFIG_ODBAUD(odbaud) |
+ SVC_I3C_MCONFIG_ODHPP(odhpp) |
+ SVC_I3C_MCONFIG_SKEW(0) |
+ SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
+ writel(reg, master->regs + SVC_I3C_MCONFIG);
+
+ /* Master core's registration */
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ goto rpm_out;
+
+ info.dyn_addr = ret;
+
+ writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
+ master->regs + SVC_I3C_MDYNADDR);
+
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ goto rpm_out;
+
+rpm_out:
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+
+ return ret;
+}
+
+static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return;
+ }
+
+ svc_i3c_master_disable_interrupts(master);
+
+ /* Disable master */
+ writel(0, master->regs + SVC_I3C_MCONFIG);
+
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+}
+
+static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
+{
+ unsigned int slot;
+
+ if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
+ return -ENOSPC;
+
+ slot = ffs(master->free_slots) - 1;
+
+ master->free_slots &= ~BIT(slot);
+
+ return slot;
+}
+
+static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
+ unsigned int slot)
+{
+ master->free_slots |= BIT(slot);
+}
+
+static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = svc_i3c_master_reserve_slot(master);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ svc_i3c_master_release_slot(master, slot);
+ return -ENOMEM;
+ }
+
+ data->ibi = -1;
+ data->index = slot;
+ master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
+ dev->info.static_addr;
+ master->descs[slot] = dev;
+
+ i3c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
+ dev->info.static_addr;
+
+ return 0;
+}
+
+static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+
+ master->addrs[data->index] = 0;
+ svc_i3c_master_release_slot(master, data->index);
+
+ kfree(data);
+}
+
+static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = svc_i3c_master_reserve_slot(master);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ svc_i3c_master_release_slot(master, slot);
+ return -ENOMEM;
+ }
+
+ data->index = slot;
+ master->addrs[slot] = dev->addr;
+
+ i2c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+
+ svc_i3c_master_release_slot(master, data->index);
+
+ kfree(data);
+}
+
+static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
+ unsigned int len)
+{
+ int ret, i;
+ u32 reg;
+
+ for (i = 0; i < len; i++) {
+ ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
+ reg,
+ SVC_I3C_MSTATUS_RXPEND(reg),
+ 0, 1000);
+ if (ret)
+ return ret;
+
+ dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
+ }
+
+ return 0;
+}
+
+static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
+ u8 *addrs, unsigned int *count)
+{
+ u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
+ unsigned int dev_nb = 0, last_addr = 0;
+ u32 reg;
+ int ret, i;
+
+ while (true) {
+ /* Enter/proceed with DAA */
+ writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
+ SVC_I3C_MCTRL_TYPE_I3C |
+ SVC_I3C_MCTRL_IBIRESP_NACK |
+ SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
+ master->regs + SVC_I3C_MCTRL);
+
+ /*
+ * Either one slave will send its ID, or the assignment process
+ * is done.
+ */
+ ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
+ reg,
+ SVC_I3C_MSTATUS_RXPEND(reg) |
+ SVC_I3C_MSTATUS_MCTRLDONE(reg),
+ 1, 1000);
+ if (ret)
+ return ret;
+
+ if (SVC_I3C_MSTATUS_RXPEND(reg)) {
+ u8 data[6];
+
+ /*
+ * We only care about the 48-bit provisional ID yet to
+ * be sure a device does not nack an address twice.
+ * Otherwise, we would just need to flush the RX FIFO.
+ */
+ ret = svc_i3c_master_readb(master, data, 6);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 6; i++)
+ prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
+
+ /* We do not care about the BCR and DCR yet */
+ ret = svc_i3c_master_readb(master, data, 2);
+ if (ret)
+ return ret;
+ } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
+ if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
+ SVC_I3C_MSTATUS_COMPLETE(reg)) {
+ /*
+ * All devices received and acked they dynamic
+ * address, this is the natural end of the DAA
+ * procedure.
+ */
+ break;
+ } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
+ /* No I3C devices attached */
+ if (dev_nb == 0)
+ break;
+
+ /*
+ * A slave device nacked the address, this is
+ * allowed only once, DAA will be stopped and
+ * then resumed. The same device is supposed to
+ * answer again immediately and shall ack the
+ * address this time.
+ */
+ if (prov_id[dev_nb] == nacking_prov_id)
+ return -EIO;
+
+ dev_nb--;
+ nacking_prov_id = prov_id[dev_nb];
+ svc_i3c_master_emit_stop(master);
+
+ continue;
+ } else {
+ return -EIO;
+ }
+ }
+
+ /* Wait for the slave to be ready to receive its address */
+ ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
+ reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
+ SVC_I3C_MSTATUS_STATE_DAA(reg) &&
+ SVC_I3C_MSTATUS_BETWEEN(reg),
+ 0, 1000);
+ if (ret)
+ return ret;
+
+ /* Give the slave device a suitable dynamic address */
+ ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
+ if (ret < 0)
+ return ret;
+
+ addrs[dev_nb] = ret;
+ dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
+ dev_nb, addrs[dev_nb]);
+
+ writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
+ last_addr = addrs[dev_nb++];
+ }
+
+ *count = dev_nb;
+
+ return 0;
+}
+
+static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
+{
+ struct i3c_dev_desc *dev;
+ u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
+ unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
+ nobyte_addr_ko = 0;
+ bool list_mbyte = false, list_nobyte = false;
+
+ /* Create the IBIRULES register for both cases */
+ i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
+ if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
+ continue;
+
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
+ reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
+ dev->info.dyn_addr);
+
+ /* IBI rules cannot be applied to devices with MSb=1 */
+ if (dev->info.dyn_addr & BIT(7))
+ mbyte_addr_ko++;
+ else
+ mbyte_addr_ok++;
+ } else {
+ reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
+ dev->info.dyn_addr);
+
+ /* IBI rules cannot be applied to devices with MSb=1 */
+ if (dev->info.dyn_addr & BIT(7))
+ nobyte_addr_ko++;
+ else
+ nobyte_addr_ok++;
+ }
+ }
+
+ /* Device list cannot be handled by hardware */
+ if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
+ list_mbyte = true;
+
+ if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
+ list_nobyte = true;
+
+ /* No list can be properly handled, return an error */
+ if (!list_mbyte && !list_nobyte)
+ return -ERANGE;
+
+ /* Pick the first list that can be handled by hardware, randomly */
+ if (list_mbyte)
+ writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
+ else
+ writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
+
+ return 0;
+}
+
+static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ u8 addrs[SVC_I3C_MAX_DEVS];
+ unsigned long flags;
+ unsigned int dev_nb;
+ int ret, i;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return ret;
+ }
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+ if (ret) {
+ svc_i3c_master_emit_stop(master);
+ svc_i3c_master_clear_merrwarn(master);
+ goto rpm_out;
+ }
+
+ /* Register all devices who participated to the core */
+ for (i = 0; i < dev_nb; i++) {
+ ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
+ if (ret)
+ goto rpm_out;
+ }
+
+ /* Configure IBI auto-rules */
+ ret = svc_i3c_update_ibirules(master);
+ if (ret)
+ dev_err(master->dev, "Cannot handle such a list of devices");
+
+rpm_out:
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+
+ return ret;
+}
+
+static int svc_i3c_master_read(struct svc_i3c_master *master,
+ u8 *in, unsigned int len)
+{
+ int offset = 0, i;
+ u32 mdctrl, mstatus;
+ bool completed = false;
+ unsigned int count;
+ unsigned long start = jiffies;
+
+ while (!completed) {
+ mstatus = readl(master->regs + SVC_I3C_MSTATUS);
+ if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
+ completed = true;
+
+ if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
+ dev_dbg(master->dev, "I3C read timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
+ count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
+ if (offset + count > len) {
+ dev_err(master->dev, "I3C receive length too long!\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < count; i++)
+ in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
+
+ offset += count;
+ }
+
+ return offset;
+}
+
+static int svc_i3c_master_write(struct svc_i3c_master *master,
+ const u8 *out, unsigned int len)
+{
+ int offset = 0, ret;
+ u32 mdctrl;
+
+ while (offset < len) {
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
+ mdctrl,
+ !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
+ 0, 1000);
+ if (ret)
+ return ret;
+
+ /*
+ * The last byte to be sent over the bus must either have the
+ * "end" bit set or be written in MWDATABE.
+ */
+ if (likely(offset < (len - 1)))
+ writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
+ else
+ writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
+ }
+
+ return 0;
+}
+
+static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ bool rnw, unsigned int xfer_type, u8 addr,
+ u8 *in, const u8 *out, unsigned int xfer_len,
+ unsigned int *read_len, bool continued)
+{
+ u32 reg;
+ int ret;
+
+ /* clean SVC_I3C_MINT_IBIWON w1c bits */
+ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ xfer_type |
+ SVC_I3C_MCTRL_IBIRESP_NACK |
+ SVC_I3C_MCTRL_DIR(rnw) |
+ SVC_I3C_MCTRL_ADDR(addr) |
+ SVC_I3C_MCTRL_RDTERM(*read_len),
+ master->regs + SVC_I3C_MCTRL);
+
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
+ if (ret)
+ goto emit_stop;
+
+ if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
+ ret = -ENXIO;
+ goto emit_stop;
+ }
+
+ if (rnw)
+ ret = svc_i3c_master_read(master, in, xfer_len);
+ else
+ ret = svc_i3c_master_write(master, out, xfer_len);
+ if (ret < 0)
+ goto emit_stop;
+
+ /*
+ * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
+ * with I3C Target Address.
+ *
+ * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
+ * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
+ * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
+ * a Hot-Join Request has been made.
+ *
+ * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
+ * and yield the above events handler.
+ */
+ if (SVC_I3C_MSTATUS_IBIWON(reg)) {
+ ret = -ENXIO;
+ goto emit_stop;
+ }
+
+ if (rnw)
+ *read_len = ret;
+
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
+ if (ret)
+ goto emit_stop;
+
+ writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
+
+ if (!continued) {
+ svc_i3c_master_emit_stop(master);
+
+ /* Wait idle if stop is sent. */
+ readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
+ }
+
+ return 0;
+
+emit_stop:
+ svc_i3c_master_emit_stop(master);
+ svc_i3c_master_clear_merrwarn(master);
+
+ return ret;
+}
+
+static struct svc_i3c_xfer *
+svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
+{
+ struct svc_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
+{
+ kfree(xfer);
+}
+
+static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
+ struct svc_i3c_xfer *xfer)
+{
+ if (master->xferqueue.cur == xfer)
+ master->xferqueue.cur = NULL;
+ else
+ list_del_init(&xfer->node);
+}
+
+static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
+ struct svc_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ svc_i3c_master_dequeue_xfer_locked(master, xfer);
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
+{
+ struct svc_i3c_xfer *xfer = master->xferqueue.cur;
+ int ret, i;
+
+ if (!xfer)
+ return;
+
+ svc_i3c_master_clear_merrwarn(master);
+ svc_i3c_master_flush_fifo(master);
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct svc_i3c_cmd *cmd = &xfer->cmds[i];
+
+ ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
+ cmd->addr, cmd->in, cmd->out,
+ cmd->len, &cmd->read_len,
+ cmd->continued);
+ if (ret)
+ break;
+ }
+
+ xfer->ret = ret;
+ complete(&xfer->comp);
+
+ if (ret < 0)
+ svc_i3c_master_dequeue_xfer_locked(master, xfer);
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct svc_i3c_xfer,
+ node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ svc_i3c_master_start_xfer_locked(master);
+}
+
+static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
+ struct svc_i3c_xfer *xfer)
+{
+ unsigned long flags;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return;
+ }
+
+ init_completion(&xfer->comp);
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ svc_i3c_master_start_xfer_locked(master);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+}
+
+static bool
+svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
+ const struct i3c_ccc_cmd *cmd)
+{
+ /* No software support for CCC commands targeting more than one slave */
+ return (cmd->ndests == 1);
+}
+
+static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
+ struct i3c_ccc_cmd *ccc)
+{
+ unsigned int xfer_len = ccc->dests[0].payload.len + 1;
+ struct svc_i3c_xfer *xfer;
+ struct svc_i3c_cmd *cmd;
+ u8 *buf;
+ int ret;
+
+ xfer = svc_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ buf = kmalloc(xfer_len, GFP_KERNEL);
+ if (!buf) {
+ svc_i3c_master_free_xfer(xfer);
+ return -ENOMEM;
+ }
+
+ buf[0] = ccc->id;
+ memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
+
+ xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
+
+ cmd = &xfer->cmds[0];
+ cmd->addr = ccc->dests[0].addr;
+ cmd->rnw = ccc->rnw;
+ cmd->in = NULL;
+ cmd->out = buf;
+ cmd->len = xfer_len;
+ cmd->read_len = 0;
+ cmd->continued = false;
+
+ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
+ mutex_unlock(&master->lock);
+
+ ret = xfer->ret;
+ kfree(buf);
+ svc_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
+ struct i3c_ccc_cmd *ccc)
+{
+ unsigned int xfer_len = ccc->dests[0].payload.len;
+ unsigned int read_len = ccc->rnw ? xfer_len : 0;
+ struct svc_i3c_xfer *xfer;
+ struct svc_i3c_cmd *cmd;
+ int ret;
+
+ xfer = svc_i3c_master_alloc_xfer(master, 2);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
+
+ /* Broadcasted message */
+ cmd = &xfer->cmds[0];
+ cmd->addr = I3C_BROADCAST_ADDR;
+ cmd->rnw = 0;
+ cmd->in = NULL;
+ cmd->out = &ccc->id;
+ cmd->len = 1;
+ cmd->read_len = 0;
+ cmd->continued = true;
+
+ /* Directed message */
+ cmd = &xfer->cmds[1];
+ cmd->addr = ccc->dests[0].addr;
+ cmd->rnw = ccc->rnw;
+ cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
+ cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
+ cmd->len = xfer_len;
+ cmd->read_len = read_len;
+ cmd->continued = false;
+
+ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
+ mutex_unlock(&master->lock);
+
+ if (cmd->read_len != xfer_len)
+ ccc->dests[0].payload.len = cmd->read_len;
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *cmd)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ bool broadcast = cmd->id < 0x80;
+ int ret;
+
+ if (broadcast)
+ ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
+ else
+ ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
+
+ if (ret)
+ cmd->err = I3C_ERROR_M2;
+
+ return ret;
+}
+
+static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct svc_i3c_xfer *xfer;
+ int ret, i;
+
+ xfer = svc_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
+
+ for (i = 0; i < nxfers; i++) {
+ struct svc_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cmd->addr = master->addrs[data->index];
+ cmd->rnw = xfers[i].rnw;
+ cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
+ cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
+ cmd->len = xfers[i].len;
+ cmd->read_len = xfers[i].rnw ? xfers[i].len : 0;
+ cmd->continued = (i + 1) < nxfers;
+ }
+
+ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
+ mutex_unlock(&master->lock);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct svc_i3c_xfer *xfer;
+ int ret, i;
+
+ xfer = svc_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
+
+ for (i = 0; i < nxfers; i++) {
+ struct svc_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cmd->addr = master->addrs[data->index];
+ cmd->rnw = xfers[i].flags & I2C_M_RD;
+ cmd->in = cmd->rnw ? xfers[i].buf : NULL;
+ cmd->out = cmd->rnw ? NULL : xfers[i].buf;
+ cmd->len = xfers[i].len;
+ cmd->read_len = cmd->rnw ? xfers[i].len : 0;
+ cmd->continued = (i + 1 < nxfers);
+ }
+
+ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
+ mutex_unlock(&master->lock);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
+ dev_err(master->dev, "IBI max payload %d should be < %d\n",
+ dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
+ return -ERANGE;
+ }
+
+ data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(data->ibi_pool))
+ return PTR_ERR(data->ibi_pool);
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ for (i = 0; i < master->ibi.num_slots; i++) {
+ if (!master->ibi.slots[i]) {
+ data->ibi = i;
+ master->ibi.slots[i] = dev;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ if (i < master->ibi.num_slots)
+ return 0;
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+
+ return -ENOSPC;
+}
+
+static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ master->ibi.slots[data->ibi] = NULL;
+ data->ibi = -1;
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+}
+
+static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return ret;
+ }
+
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+
+ return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+}
+
+static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ int ret;
+
+ svc_i3c_master_disable_interrupts(master);
+
+ ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+
+ return ret;
+}
+
+static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+}
+
+static const struct i3c_master_controller_ops svc_i3c_master_ops = {
+ .bus_init = svc_i3c_master_bus_init,
+ .bus_cleanup = svc_i3c_master_bus_cleanup,
+ .attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
+ .detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
+ .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
+ .attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
+ .do_daa = svc_i3c_master_do_daa,
+ .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
+ .priv_xfers = svc_i3c_master_priv_xfers,
+ .i2c_xfers = svc_i3c_master_i2c_xfers,
+ .request_ibi = svc_i3c_master_request_ibi,
+ .free_ibi = svc_i3c_master_free_ibi,
+ .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
+ .enable_ibi = svc_i3c_master_enable_ibi,
+ .disable_ibi = svc_i3c_master_disable_ibi,
+};
+
+static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
+{
+ int ret = 0;
+
+ ret = clk_prepare_enable(master->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(master->fclk);
+ if (ret) {
+ clk_disable_unprepare(master->pclk);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(master->sclk);
+ if (ret) {
+ clk_disable_unprepare(master->pclk);
+ clk_disable_unprepare(master->fclk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
+{
+ clk_disable_unprepare(master->pclk);
+ clk_disable_unprepare(master->fclk);
+ clk_disable_unprepare(master->sclk);
+}
+
+static int svc_i3c_master_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct svc_i3c_master *master;
+ int ret;
+
+ master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ master->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(master->pclk))
+ return PTR_ERR(master->pclk);
+
+ master->fclk = devm_clk_get(dev, "fast_clk");
+ if (IS_ERR(master->fclk))
+ return PTR_ERR(master->fclk);
+
+ master->sclk = devm_clk_get(dev, "slow_clk");
+ if (IS_ERR(master->sclk))
+ return PTR_ERR(master->sclk);
+
+ master->irq = platform_get_irq(pdev, 0);
+ if (master->irq <= 0)
+ return -ENOENT;
+
+ master->dev = dev;
+
+ ret = svc_i3c_master_prepare_clks(master);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
+ INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
+ mutex_init(&master->lock);
+
+ ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
+ IRQF_NO_SUSPEND, "svc-i3c-irq", master);
+ if (ret)
+ goto err_disable_clks;
+
+ master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = SVC_I3C_MAX_DEVS;
+ master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
+ sizeof(*master->ibi.slots),
+ GFP_KERNEL);
+ if (!master->ibi.slots) {
+ ret = -ENOMEM;
+ goto err_disable_clks;
+ }
+
+ platform_set_drvdata(pdev, master);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ svc_i3c_master_reset(master);
+
+ /* Register the master */
+ ret = i3c_master_register(&master->base, &pdev->dev,
+ &svc_i3c_master_ops, false);
+ if (ret)
+ goto rpm_disable;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+rpm_disable:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+err_disable_clks:
+ svc_i3c_master_unprepare_clks(master);
+
+ return ret;
+}
+
+static int svc_i3c_master_remove(struct platform_device *pdev)
+{
+ struct svc_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i3c_master_unregister(&master->base);
+ if (ret)
+ return ret;
+
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
+{
+ struct svc_i3c_master *master = dev_get_drvdata(dev);
+
+ svc_i3c_master_unprepare_clks(master);
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
+{
+ struct svc_i3c_master *master = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_default_state(dev);
+ svc_i3c_master_prepare_clks(master);
+
+ return 0;
+}
+
+static const struct dev_pm_ops svc_i3c_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
+ svc_i3c_runtime_resume, NULL)
+};
+
+static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
+ { .compatible = "silvaco,i3c-master" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
+
+static struct platform_driver svc_i3c_master = {
+ .probe = svc_i3c_master_probe,
+ .remove = svc_i3c_master_remove,
+ .driver = {
+ .name = "silvaco-i3c-master",
+ .of_match_table = svc_i3c_master_of_match_tbl,
+ .pm = &svc_i3c_pm_ops,
+ },
+};
+module_platform_driver(svc_i3c_master);
+
+MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
+MODULE_LICENSE("GPL v2");