summaryrefslogtreecommitdiffstats
path: root/drivers/media/pci/intel
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/media/pci/intel
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/media/pci/intel')
-rw-r--r--drivers/media/pci/intel/Kconfig19
-rw-r--r--drivers/media/pci/intel/Makefile7
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c816
-rw-r--r--drivers/media/pci/intel/ipu3/Kconfig20
-rw-r--r--drivers/media/pci/intel/ipu3/Makefile2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c2067
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h462
-rw-r--r--drivers/media/pci/intel/ivsc/Kconfig15
-rw-r--r--drivers/media/pci/intel/ivsc/Makefile9
-rw-r--r--drivers/media/pci/intel/ivsc/mei_ace.c579
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c825
11 files changed, 4821 insertions, 0 deletions
diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig
new file mode 100644
index 0000000000..ee4684159d
--- /dev/null
+++ b/drivers/media/pci/intel/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+source "drivers/media/pci/intel/ipu3/Kconfig"
+source "drivers/media/pci/intel/ivsc/Kconfig"
+
+config IPU_BRIDGE
+ tristate "Intel IPU Bridge"
+ depends on I2C && ACPI
+ help
+ The IPU bridge is a helper library for Intel IPU drivers to
+ function on systems shipped with Windows.
+
+ Currently used by the ipu3-cio2 and atomisp drivers.
+
+ Supported systems include:
+
+ - Microsoft Surface models (except Surface Pro 3)
+ - The Lenovo Miix line (for example the 510, 520, 710 and 720)
+ - Dell 7285
diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile
new file mode 100644
index 0000000000..f199a97e1d
--- /dev/null
+++ b/drivers/media/pci/intel/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the IPU drivers
+#
+obj-$(CONFIG_IPU_BRIDGE) += ipu-bridge.o
+obj-y += ipu3/
+obj-y += ivsc/
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
new file mode 100644
index 0000000000..e38198e259
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include <media/ipu-bridge.h>
+#include <media/v4l2-fwnode.h>
+
+/*
+ * 92335fcf-3203-4472-af93-7b4453ac29da
+ *
+ * Used to build MEI CSI device name to lookup MEI CSI device by
+ * device_find_child_by_name().
+ */
+#define MEI_CSI_UUID \
+ UUID_LE(0x92335FCF, 0x3203, 0x4472, \
+ 0xAF, 0x93, 0x7B, 0x44, 0x53, 0xAC, 0x29, 0xDA)
+
+/*
+ * IVSC device name
+ *
+ * Used to match IVSC device by ipu_bridge_match_ivsc_dev()
+ */
+#define IVSC_DEV_NAME "intel_vsc"
+
+/*
+ * Extend this array with ACPI Hardware IDs of devices known to be working
+ * plus the number of link-frequencies expected by their drivers, along with
+ * the frequency values in hertz. This is somewhat opportunistic way of adding
+ * support for this for now in the hopes of a better source for the information
+ * (possibly some encoded value in the SSDB buffer that we're unaware of)
+ * becoming apparent in the future.
+ *
+ * Do not add an entry for a sensor that is not actually supported.
+ */
+static const struct ipu_sensor_config ipu_supported_sensors[] = {
+ /* Omnivision OV5693 */
+ IPU_SENSOR_CONFIG("INT33BE", 1, 419200000),
+ /* Omnivision OV8865 */
+ IPU_SENSOR_CONFIG("INT347A", 1, 360000000),
+ /* Omnivision OV7251 */
+ IPU_SENSOR_CONFIG("INT347E", 1, 319200000),
+ /* Omnivision OV2680 */
+ IPU_SENSOR_CONFIG("OVTI2680", 1, 331200000),
+ /* Omnivision ov8856 */
+ IPU_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000),
+ /* Omnivision ov2740 */
+ IPU_SENSOR_CONFIG("INT3474", 1, 360000000),
+ /* Hynix hi556 */
+ IPU_SENSOR_CONFIG("INT3537", 1, 437000000),
+ /* Omnivision ov13b10 */
+ IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000),
+ /* GalaxyCore GC0310 */
+ IPU_SENSOR_CONFIG("INT0310", 0),
+};
+
+static const struct ipu_property_names prop_names = {
+ .clock_frequency = "clock-frequency",
+ .rotation = "rotation",
+ .orientation = "orientation",
+ .bus_type = "bus-type",
+ .data_lanes = "data-lanes",
+ .remote_endpoint = "remote-endpoint",
+ .link_frequencies = "link-frequencies",
+};
+
+static const char * const ipu_vcm_types[] = {
+ "ad5823",
+ "dw9714",
+ "ad5816",
+ "dw9719",
+ "dw9718",
+ "dw9806b",
+ "wv517s",
+ "lc898122xa",
+ "lc898212axb",
+};
+
+/*
+ * Used to figure out IVSC acpi device by ipu_bridge_get_ivsc_acpi_dev()
+ * instead of device and driver match to probe IVSC device.
+ */
+static const struct acpi_device_id ivsc_acpi_ids[] = {
+ { "INTC1059" },
+ { "INTC1095" },
+ { "INTC100A" },
+ { "INTC10CF" },
+};
+
+static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ struct acpi_device *consumer, *ivsc_adev;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ivsc_acpi_ids); i++) {
+ const struct acpi_device_id *acpi_id = &ivsc_acpi_ids[i];
+
+ for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1)
+ /* camera sensor depends on IVSC in DSDT if exist */
+ for_each_acpi_consumer_dev(ivsc_adev, consumer)
+ if (consumer->handle == handle) {
+ acpi_dev_put(consumer);
+ return ivsc_adev;
+ }
+ }
+
+ return NULL;
+}
+
+static int ipu_bridge_match_ivsc_dev(struct device *dev, const void *adev)
+{
+ if (ACPI_COMPANION(dev) != adev)
+ return 0;
+
+ if (!sysfs_streq(dev_name(dev), IVSC_DEV_NAME))
+ return 0;
+
+ return 1;
+}
+
+static struct device *ipu_bridge_get_ivsc_csi_dev(struct acpi_device *adev)
+{
+ struct device *dev, *csi_dev;
+ uuid_le uuid = MEI_CSI_UUID;
+ char name[64];
+
+ /* IVSC device on platform bus */
+ dev = bus_find_device(&platform_bus_type, NULL, adev,
+ ipu_bridge_match_ivsc_dev);
+ if (dev) {
+ snprintf(name, sizeof(name), "%s-%pUl", dev_name(dev), &uuid);
+
+ csi_dev = device_find_child_by_name(dev, name);
+
+ put_device(dev);
+
+ return csi_dev;
+ }
+
+ return NULL;
+}
+
+static int ipu_bridge_check_ivsc_dev(struct ipu_sensor *sensor,
+ struct acpi_device *sensor_adev)
+{
+ struct acpi_device *adev;
+ struct device *csi_dev;
+
+ adev = ipu_bridge_get_ivsc_acpi_dev(sensor_adev);
+ if (adev) {
+ csi_dev = ipu_bridge_get_ivsc_csi_dev(adev);
+ if (!csi_dev) {
+ acpi_dev_put(adev);
+ dev_err(&adev->dev, "Failed to find MEI CSI dev\n");
+ return -ENODEV;
+ }
+
+ sensor->csi_dev = csi_dev;
+ sensor->ivsc_adev = adev;
+ }
+
+ return 0;
+}
+
+static int ipu_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
+ void *data, u32 size)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = 0;
+
+ status = acpi_evaluate_object(adev->handle, id, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ obj = buffer.pointer;
+ if (!obj) {
+ dev_err(&adev->dev, "Couldn't locate ACPI buffer\n");
+ return -ENODEV;
+ }
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_err(&adev->dev, "Not an ACPI buffer\n");
+ ret = -ENODEV;
+ goto out_free_buff;
+ }
+
+ if (obj->buffer.length > size) {
+ dev_err(&adev->dev, "Given buffer is too small\n");
+ ret = -EINVAL;
+ goto out_free_buff;
+ }
+
+ memcpy(data, obj->buffer.pointer, obj->buffer.length);
+
+out_free_buff:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+static u32 ipu_bridge_parse_rotation(struct acpi_device *adev,
+ struct ipu_sensor_ssdb *ssdb)
+{
+ switch (ssdb->degree) {
+ case IPU_SENSOR_ROTATION_NORMAL:
+ return 0;
+ case IPU_SENSOR_ROTATION_INVERTED:
+ return 180;
+ default:
+ dev_warn(&adev->dev,
+ "Unknown rotation %d. Assume 0 degree rotation\n",
+ ssdb->degree);
+ return 0;
+ }
+}
+
+static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_device *adev)
+{
+ enum v4l2_fwnode_orientation orientation;
+ struct acpi_pld_info *pld;
+ acpi_status status;
+
+ status = acpi_get_physical_device_location(adev->handle, &pld);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(&adev->dev, "_PLD call failed, using default orientation\n");
+ return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ }
+
+ switch (pld->panel) {
+ case ACPI_PLD_PANEL_FRONT:
+ orientation = V4L2_FWNODE_ORIENTATION_FRONT;
+ break;
+ case ACPI_PLD_PANEL_BACK:
+ orientation = V4L2_FWNODE_ORIENTATION_BACK;
+ break;
+ case ACPI_PLD_PANEL_TOP:
+ case ACPI_PLD_PANEL_LEFT:
+ case ACPI_PLD_PANEL_RIGHT:
+ case ACPI_PLD_PANEL_UNKNOWN:
+ orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ break;
+ default:
+ dev_warn(&adev->dev, "Unknown _PLD panel val %d\n", pld->panel);
+ orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ break;
+ }
+
+ ACPI_FREE(pld);
+ return orientation;
+}
+
+int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor)
+{
+ struct ipu_sensor_ssdb ssdb = {};
+ int ret;
+
+ ret = ipu_bridge_read_acpi_buffer(adev, "SSDB", &ssdb, sizeof(ssdb));
+ if (ret)
+ return ret;
+
+ if (ssdb.vcmtype > ARRAY_SIZE(ipu_vcm_types)) {
+ dev_warn(&adev->dev, "Unknown VCM type %d\n", ssdb.vcmtype);
+ ssdb.vcmtype = 0;
+ }
+
+ if (ssdb.lanes > IPU_MAX_LANES) {
+ dev_err(&adev->dev, "Number of lanes in SSDB is invalid\n");
+ return -EINVAL;
+ }
+
+ sensor->link = ssdb.link;
+ sensor->lanes = ssdb.lanes;
+ sensor->mclkspeed = ssdb.mclkspeed;
+ sensor->rotation = ipu_bridge_parse_rotation(adev, &ssdb);
+ sensor->orientation = ipu_bridge_parse_orientation(adev);
+
+ if (ssdb.vcmtype)
+ sensor->vcm_type = ipu_vcm_types[ssdb.vcmtype - 1];
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu_bridge_parse_ssdb, INTEL_IPU_BRIDGE);
+
+static void ipu_bridge_create_fwnode_properties(
+ struct ipu_sensor *sensor,
+ struct ipu_bridge *bridge,
+ const struct ipu_sensor_config *cfg)
+{
+ struct ipu_property_names *names = &sensor->prop_names;
+ struct software_node *nodes = sensor->swnodes;
+
+ sensor->prop_names = prop_names;
+
+ if (sensor->csi_dev) {
+ sensor->local_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IVSC_SENSOR_ENDPOINT]);
+ sensor->remote_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IVSC_IPU_ENDPOINT]);
+ sensor->ivsc_sensor_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_SENSOR_ENDPOINT]);
+ sensor->ivsc_ipu_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IPU_ENDPOINT]);
+
+ sensor->ivsc_sensor_ep_properties[0] =
+ PROPERTY_ENTRY_U32(names->bus_type,
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
+ sensor->ivsc_sensor_ep_properties[1] =
+ PROPERTY_ENTRY_U32_ARRAY_LEN(names->data_lanes,
+ bridge->data_lanes,
+ sensor->lanes);
+ sensor->ivsc_sensor_ep_properties[2] =
+ PROPERTY_ENTRY_REF_ARRAY(names->remote_endpoint,
+ sensor->ivsc_sensor_ref);
+
+ sensor->ivsc_ipu_ep_properties[0] =
+ PROPERTY_ENTRY_U32(names->bus_type,
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
+ sensor->ivsc_ipu_ep_properties[1] =
+ PROPERTY_ENTRY_U32_ARRAY_LEN(names->data_lanes,
+ bridge->data_lanes,
+ sensor->lanes);
+ sensor->ivsc_ipu_ep_properties[2] =
+ PROPERTY_ENTRY_REF_ARRAY(names->remote_endpoint,
+ sensor->ivsc_ipu_ref);
+ } else {
+ sensor->local_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IPU_ENDPOINT]);
+ sensor->remote_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_SENSOR_ENDPOINT]);
+ }
+
+ sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.clock_frequency,
+ sensor->mclkspeed);
+ sensor->dev_properties[1] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.rotation,
+ sensor->rotation);
+ sensor->dev_properties[2] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.orientation,
+ sensor->orientation);
+ if (sensor->vcm_type) {
+ sensor->vcm_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_VCM]);
+ sensor->dev_properties[3] =
+ PROPERTY_ENTRY_REF_ARRAY("lens-focus", sensor->vcm_ref);
+ }
+
+ sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.bus_type,
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
+ sensor->ep_properties[1] = PROPERTY_ENTRY_U32_ARRAY_LEN(
+ sensor->prop_names.data_lanes,
+ bridge->data_lanes, sensor->lanes);
+ sensor->ep_properties[2] = PROPERTY_ENTRY_REF_ARRAY(
+ sensor->prop_names.remote_endpoint,
+ sensor->local_ref);
+
+ if (cfg->nr_link_freqs > 0)
+ sensor->ep_properties[3] = PROPERTY_ENTRY_U64_ARRAY_LEN(
+ sensor->prop_names.link_frequencies,
+ cfg->link_freqs,
+ cfg->nr_link_freqs);
+
+ sensor->ipu_properties[0] = PROPERTY_ENTRY_U32_ARRAY_LEN(
+ sensor->prop_names.data_lanes,
+ bridge->data_lanes, sensor->lanes);
+ sensor->ipu_properties[1] = PROPERTY_ENTRY_REF_ARRAY(
+ sensor->prop_names.remote_endpoint,
+ sensor->remote_ref);
+}
+
+static void ipu_bridge_init_swnode_names(struct ipu_sensor *sensor)
+{
+ snprintf(sensor->node_names.remote_port,
+ sizeof(sensor->node_names.remote_port),
+ SWNODE_GRAPH_PORT_NAME_FMT, sensor->link);
+ snprintf(sensor->node_names.port,
+ sizeof(sensor->node_names.port),
+ SWNODE_GRAPH_PORT_NAME_FMT, 0); /* Always port 0 */
+ snprintf(sensor->node_names.endpoint,
+ sizeof(sensor->node_names.endpoint),
+ SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */
+ if (sensor->vcm_type) {
+ /* append link to distinguish nodes with same model VCM */
+ snprintf(sensor->node_names.vcm, sizeof(sensor->node_names.vcm),
+ "%s-%u", sensor->vcm_type, sensor->link);
+ }
+
+ if (sensor->csi_dev) {
+ snprintf(sensor->node_names.ivsc_sensor_port,
+ sizeof(sensor->node_names.ivsc_sensor_port),
+ SWNODE_GRAPH_PORT_NAME_FMT, 0);
+ snprintf(sensor->node_names.ivsc_ipu_port,
+ sizeof(sensor->node_names.ivsc_ipu_port),
+ SWNODE_GRAPH_PORT_NAME_FMT, 1);
+ }
+}
+
+static void ipu_bridge_init_swnode_group(struct ipu_sensor *sensor)
+{
+ struct software_node *nodes = sensor->swnodes;
+
+ sensor->group[SWNODE_SENSOR_HID] = &nodes[SWNODE_SENSOR_HID];
+ sensor->group[SWNODE_SENSOR_PORT] = &nodes[SWNODE_SENSOR_PORT];
+ sensor->group[SWNODE_SENSOR_ENDPOINT] = &nodes[SWNODE_SENSOR_ENDPOINT];
+ sensor->group[SWNODE_IPU_PORT] = &nodes[SWNODE_IPU_PORT];
+ sensor->group[SWNODE_IPU_ENDPOINT] = &nodes[SWNODE_IPU_ENDPOINT];
+ if (sensor->vcm_type)
+ sensor->group[SWNODE_VCM] = &nodes[SWNODE_VCM];
+
+ if (sensor->csi_dev) {
+ sensor->group[SWNODE_IVSC_HID] =
+ &nodes[SWNODE_IVSC_HID];
+ sensor->group[SWNODE_IVSC_SENSOR_PORT] =
+ &nodes[SWNODE_IVSC_SENSOR_PORT];
+ sensor->group[SWNODE_IVSC_SENSOR_ENDPOINT] =
+ &nodes[SWNODE_IVSC_SENSOR_ENDPOINT];
+ sensor->group[SWNODE_IVSC_IPU_PORT] =
+ &nodes[SWNODE_IVSC_IPU_PORT];
+ sensor->group[SWNODE_IVSC_IPU_ENDPOINT] =
+ &nodes[SWNODE_IVSC_IPU_ENDPOINT];
+
+ if (sensor->vcm_type)
+ sensor->group[SWNODE_VCM] = &nodes[SWNODE_VCM];
+ } else {
+ if (sensor->vcm_type)
+ sensor->group[SWNODE_IVSC_HID] = &nodes[SWNODE_VCM];
+ }
+}
+
+static void ipu_bridge_create_connection_swnodes(struct ipu_bridge *bridge,
+ struct ipu_sensor *sensor)
+{
+ struct ipu_node_names *names = &sensor->node_names;
+ struct software_node *nodes = sensor->swnodes;
+
+ ipu_bridge_init_swnode_names(sensor);
+
+ nodes[SWNODE_SENSOR_HID] = NODE_SENSOR(sensor->name,
+ sensor->dev_properties);
+ nodes[SWNODE_SENSOR_PORT] = NODE_PORT(sensor->node_names.port,
+ &nodes[SWNODE_SENSOR_HID]);
+ nodes[SWNODE_SENSOR_ENDPOINT] = NODE_ENDPOINT(
+ sensor->node_names.endpoint,
+ &nodes[SWNODE_SENSOR_PORT],
+ sensor->ep_properties);
+ nodes[SWNODE_IPU_PORT] = NODE_PORT(sensor->node_names.remote_port,
+ &bridge->ipu_hid_node);
+ nodes[SWNODE_IPU_ENDPOINT] = NODE_ENDPOINT(
+ sensor->node_names.endpoint,
+ &nodes[SWNODE_IPU_PORT],
+ sensor->ipu_properties);
+
+ if (sensor->csi_dev) {
+ snprintf(sensor->ivsc_name, sizeof(sensor->ivsc_name), "%s-%u",
+ acpi_device_hid(sensor->ivsc_adev), sensor->link);
+
+ nodes[SWNODE_IVSC_HID] = NODE_SENSOR(sensor->ivsc_name,
+ sensor->ivsc_properties);
+ nodes[SWNODE_IVSC_SENSOR_PORT] =
+ NODE_PORT(names->ivsc_sensor_port,
+ &nodes[SWNODE_IVSC_HID]);
+ nodes[SWNODE_IVSC_SENSOR_ENDPOINT] =
+ NODE_ENDPOINT(names->endpoint,
+ &nodes[SWNODE_IVSC_SENSOR_PORT],
+ sensor->ivsc_sensor_ep_properties);
+ nodes[SWNODE_IVSC_IPU_PORT] =
+ NODE_PORT(names->ivsc_ipu_port,
+ &nodes[SWNODE_IVSC_HID]);
+ nodes[SWNODE_IVSC_IPU_ENDPOINT] =
+ NODE_ENDPOINT(names->endpoint,
+ &nodes[SWNODE_IVSC_IPU_PORT],
+ sensor->ivsc_ipu_ep_properties);
+ }
+
+ nodes[SWNODE_VCM] = NODE_VCM(sensor->node_names.vcm);
+
+ ipu_bridge_init_swnode_group(sensor);
+}
+
+/*
+ * The actual instantiation must be done from a workqueue to avoid
+ * a deadlock on taking list_lock from v4l2-async twice.
+ */
+struct ipu_bridge_instantiate_vcm_work_data {
+ struct work_struct work;
+ struct device *sensor;
+ char name[16];
+ struct i2c_board_info board_info;
+};
+
+static void ipu_bridge_instantiate_vcm_work(struct work_struct *work)
+{
+ struct ipu_bridge_instantiate_vcm_work_data *data =
+ container_of(work, struct ipu_bridge_instantiate_vcm_work_data,
+ work);
+ struct acpi_device *adev = ACPI_COMPANION(data->sensor);
+ struct i2c_client *vcm_client;
+ bool put_fwnode = true;
+ int ret;
+
+ /*
+ * The client may get probed before the device_link gets added below
+ * make sure the sensor is powered-up during probe.
+ */
+ ret = pm_runtime_get_sync(data->sensor);
+ if (ret < 0) {
+ dev_err(data->sensor, "Error %d runtime-resuming sensor, cannot instantiate VCM\n",
+ ret);
+ goto out_pm_put;
+ }
+
+ /*
+ * Note the client is created only once and then kept around
+ * even after a rmmod, just like the software-nodes.
+ */
+ vcm_client = i2c_acpi_new_device_by_fwnode(acpi_fwnode_handle(adev),
+ 1, &data->board_info);
+ if (IS_ERR(vcm_client)) {
+ dev_err(data->sensor, "Error instantiating VCM client: %ld\n",
+ PTR_ERR(vcm_client));
+ goto out_pm_put;
+ }
+
+ device_link_add(&vcm_client->dev, data->sensor, DL_FLAG_PM_RUNTIME);
+
+ dev_info(data->sensor, "Instantiated %s VCM\n", data->board_info.type);
+ put_fwnode = false; /* Ownership has passed to the i2c-client */
+
+out_pm_put:
+ pm_runtime_put(data->sensor);
+ put_device(data->sensor);
+ if (put_fwnode)
+ fwnode_handle_put(data->board_info.fwnode);
+ kfree(data);
+}
+
+int ipu_bridge_instantiate_vcm(struct device *sensor)
+{
+ struct ipu_bridge_instantiate_vcm_work_data *data;
+ struct fwnode_handle *vcm_fwnode;
+ struct i2c_client *vcm_client;
+ struct acpi_device *adev;
+ char *sep;
+
+ adev = ACPI_COMPANION(sensor);
+ if (!adev)
+ return 0;
+
+ vcm_fwnode = fwnode_find_reference(dev_fwnode(sensor), "lens-focus", 0);
+ if (IS_ERR(vcm_fwnode))
+ return 0;
+
+ /* When reloading modules the client will already exist */
+ vcm_client = i2c_find_device_by_fwnode(vcm_fwnode);
+ if (vcm_client) {
+ fwnode_handle_put(vcm_fwnode);
+ put_device(&vcm_client->dev);
+ return 0;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ fwnode_handle_put(vcm_fwnode);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&data->work, ipu_bridge_instantiate_vcm_work);
+ data->sensor = get_device(sensor);
+ snprintf(data->name, sizeof(data->name), "%s-VCM",
+ acpi_dev_name(adev));
+ data->board_info.dev_name = data->name;
+ data->board_info.fwnode = vcm_fwnode;
+ snprintf(data->board_info.type, sizeof(data->board_info.type),
+ "%pfwP", vcm_fwnode);
+ /* Strip "-<link>" postfix */
+ sep = strchrnul(data->board_info.type, '-');
+ *sep = 0;
+
+ queue_work(system_long_wq, &data->work);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu_bridge_instantiate_vcm, INTEL_IPU_BRIDGE);
+
+static int ipu_bridge_instantiate_ivsc(struct ipu_sensor *sensor)
+{
+ struct fwnode_handle *fwnode;
+
+ if (!sensor->csi_dev)
+ return 0;
+
+ fwnode = software_node_fwnode(&sensor->swnodes[SWNODE_IVSC_HID]);
+ if (!fwnode)
+ return -ENODEV;
+
+ set_secondary_fwnode(sensor->csi_dev, fwnode);
+
+ return 0;
+}
+
+static void ipu_bridge_unregister_sensors(struct ipu_bridge *bridge)
+{
+ struct ipu_sensor *sensor;
+ unsigned int i;
+
+ for (i = 0; i < bridge->n_sensors; i++) {
+ sensor = &bridge->sensors[i];
+ software_node_unregister_node_group(sensor->group);
+ acpi_dev_put(sensor->adev);
+ put_device(sensor->csi_dev);
+ acpi_dev_put(sensor->ivsc_adev);
+ }
+}
+
+static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
+ struct ipu_bridge *bridge)
+{
+ struct fwnode_handle *fwnode, *primary;
+ struct ipu_sensor *sensor;
+ struct acpi_device *adev;
+ int ret;
+
+ for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
+ if (!adev->status.enabled)
+ continue;
+
+ if (bridge->n_sensors >= IPU_MAX_PORTS) {
+ acpi_dev_put(adev);
+ dev_err(bridge->dev, "Exceeded available IPU ports\n");
+ return -EINVAL;
+ }
+
+ sensor = &bridge->sensors[bridge->n_sensors];
+
+ ret = bridge->parse_sensor_fwnode(adev, sensor);
+ if (ret)
+ goto err_put_adev;
+
+ snprintf(sensor->name, sizeof(sensor->name), "%s-%u",
+ cfg->hid, sensor->link);
+
+ ret = ipu_bridge_check_ivsc_dev(sensor, adev);
+ if (ret)
+ goto err_put_adev;
+
+ ipu_bridge_create_fwnode_properties(sensor, bridge, cfg);
+ ipu_bridge_create_connection_swnodes(bridge, sensor);
+
+ ret = software_node_register_node_group(sensor->group);
+ if (ret)
+ goto err_put_ivsc;
+
+ fwnode = software_node_fwnode(&sensor->swnodes[
+ SWNODE_SENSOR_HID]);
+ if (!fwnode) {
+ ret = -ENODEV;
+ goto err_free_swnodes;
+ }
+
+ sensor->adev = acpi_dev_get(adev);
+
+ primary = acpi_fwnode_handle(adev);
+ primary->secondary = fwnode;
+
+ ret = ipu_bridge_instantiate_ivsc(sensor);
+ if (ret)
+ goto err_free_swnodes;
+
+ dev_info(bridge->dev, "Found supported sensor %s\n",
+ acpi_dev_name(adev));
+
+ bridge->n_sensors++;
+ }
+
+ return 0;
+
+err_free_swnodes:
+ software_node_unregister_node_group(sensor->group);
+err_put_ivsc:
+ put_device(sensor->csi_dev);
+ acpi_dev_put(sensor->ivsc_adev);
+err_put_adev:
+ acpi_dev_put(adev);
+ return ret;
+}
+
+static int ipu_bridge_connect_sensors(struct ipu_bridge *bridge)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
+ const struct ipu_sensor_config *cfg =
+ &ipu_supported_sensors[i];
+
+ ret = ipu_bridge_connect_sensor(cfg, bridge);
+ if (ret)
+ goto err_unregister_sensors;
+ }
+
+ return 0;
+
+err_unregister_sensors:
+ ipu_bridge_unregister_sensors(bridge);
+ return ret;
+}
+
+static int ipu_bridge_ivsc_is_ready(void)
+{
+ struct acpi_device *sensor_adev, *adev;
+ struct device *csi_dev;
+ bool ready = true;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
+ const struct ipu_sensor_config *cfg =
+ &ipu_supported_sensors[i];
+
+ for_each_acpi_dev_match(sensor_adev, cfg->hid, NULL, -1) {
+ if (!sensor_adev->status.enabled)
+ continue;
+
+ adev = ipu_bridge_get_ivsc_acpi_dev(sensor_adev);
+ if (!adev)
+ continue;
+
+ csi_dev = ipu_bridge_get_ivsc_csi_dev(adev);
+ if (!csi_dev)
+ ready = false;
+
+ put_device(csi_dev);
+ acpi_dev_put(adev);
+ }
+ }
+
+ return ready;
+}
+
+int ipu_bridge_init(struct device *dev,
+ ipu_parse_sensor_fwnode_t parse_sensor_fwnode)
+{
+ struct fwnode_handle *fwnode;
+ struct ipu_bridge *bridge;
+ unsigned int i;
+ int ret;
+
+ if (!ipu_bridge_ivsc_is_ready())
+ return -EPROBE_DEFER;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ strscpy(bridge->ipu_node_name, IPU_HID,
+ sizeof(bridge->ipu_node_name));
+ bridge->ipu_hid_node.name = bridge->ipu_node_name;
+ bridge->dev = dev;
+ bridge->parse_sensor_fwnode = parse_sensor_fwnode;
+
+ ret = software_node_register(&bridge->ipu_hid_node);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register the IPU HID node\n");
+ goto err_free_bridge;
+ }
+
+ /*
+ * Map the lane arrangement, which is fixed for the IPU3 (meaning we
+ * only need one, rather than one per sensor). We include it as a
+ * member of the struct ipu_bridge rather than a global variable so
+ * that it survives if the module is unloaded along with the rest of
+ * the struct.
+ */
+ for (i = 0; i < IPU_MAX_LANES; i++)
+ bridge->data_lanes[i] = i + 1;
+
+ ret = ipu_bridge_connect_sensors(bridge);
+ if (ret || bridge->n_sensors == 0)
+ goto err_unregister_ipu;
+
+ dev_info(dev, "Connected %d cameras\n", bridge->n_sensors);
+
+ fwnode = software_node_fwnode(&bridge->ipu_hid_node);
+ if (!fwnode) {
+ dev_err(dev, "Error getting fwnode from ipu software_node\n");
+ ret = -ENODEV;
+ goto err_unregister_sensors;
+ }
+
+ set_secondary_fwnode(dev, fwnode);
+
+ return 0;
+
+err_unregister_sensors:
+ ipu_bridge_unregister_sensors(bridge);
+err_unregister_ipu:
+ software_node_unregister(&bridge->ipu_hid_node);
+err_free_bridge:
+ kfree(bridge);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(ipu_bridge_init, INTEL_IPU_BRIDGE);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel IPU Sensors Bridge driver");
diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
new file mode 100644
index 0000000000..c0a250daa9
--- /dev/null
+++ b/drivers/media/pci/intel/ipu3/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_IPU3_CIO2
+ tristate "Intel ipu3-cio2 driver"
+ depends on VIDEO_DEV && PCI
+ depends on IPU_BRIDGE || !IPU_BRIDGE
+ depends on ACPI || COMPILE_TEST
+ depends on X86
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ select VIDEOBUF2_DMA_SG
+
+ help
+ This is the Intel IPU3 CIO2 CSI-2 receiver unit, found in Intel
+ Skylake and Kaby Lake SoCs and used for capturing images and
+ video from a camera sensor.
+
+ Say Y or M here if you have a Skylake/Kaby Lake SoC with MIPI CSI-2
+ connected camera.
+ The module will be called ipu3-cio2.
diff --git a/drivers/media/pci/intel/ipu3/Makefile b/drivers/media/pci/intel/ipu3/Makefile
new file mode 100644
index 0000000000..98ddd5beaf
--- /dev/null
+++ b/drivers/media/pci/intel/ipu3/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_IPU3_CIO2) += ipu3-cio2.o
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
new file mode 100644
index 0000000000..5dd69a251b
--- /dev/null
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -0,0 +1,2067 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017,2020 Intel Corporation
+ *
+ * Based partially on Intel IPU4 driver written by
+ * Sakari Ailus <sakari.ailus@linux.intel.com>
+ * Samu Onkalo <samu.onkalo@intel.com>
+ * Jouni Högander <jouni.hogander@intel.com>
+ * Jouni Ukkonen <jouni.ukkonen@intel.com>
+ * Antti Laakso <antti.laakso@intel.com>
+ * et al.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pfn.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/vmalloc.h>
+
+#include <media/ipu-bridge.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "ipu3-cio2.h"
+
+struct ipu3_cio2_fmt {
+ u32 mbus_code;
+ u32 fourcc;
+ u8 mipicode;
+ u8 bpp;
+};
+
+/*
+ * These are raw formats used in Intel's third generation of
+ * Image Processing Unit known as IPU3.
+ * 10bit raw bayer packed, 32 bytes for every 25 pixels,
+ * last LSB 6 bits unused.
+ */
+static const struct ipu3_cio2_fmt formats[] = {
+ { /* put default entry at beginning */
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
+ .mipicode = 0x2b,
+ .bpp = 10,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
+ .mipicode = 0x2b,
+ .bpp = 10,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
+ .mipicode = 0x2b,
+ .bpp = 10,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
+ .mipicode = 0x2b,
+ .bpp = 10,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
+ .fourcc = V4L2_PIX_FMT_IPU3_Y10,
+ .mipicode = 0x2b,
+ .bpp = 10,
+ },
+};
+
+/*
+ * cio2_find_format - lookup color format by fourcc or/and media bus code
+ * @pixelformat: fourcc to match, ignored if null
+ * @mbus_code: media bus code to match, ignored if null
+ */
+static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
+ const u32 *mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (pixelformat && *pixelformat != formats[i].fourcc)
+ continue;
+ if (mbus_code && *mbus_code != formats[i].mbus_code)
+ continue;
+
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+static inline u32 cio2_bytesperline(const unsigned int width)
+{
+ /*
+ * 64 bytes for every 50 pixels, the line length
+ * in bytes is multiple of 64 (line end alignment).
+ */
+ return DIV_ROUND_UP(width, 50) * 64;
+}
+
+/**************** FBPT operations ****************/
+
+static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
+{
+ struct device *dev = &cio2->pci_dev->dev;
+
+ if (cio2->dummy_lop) {
+ dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
+ cio2->dummy_lop_bus_addr);
+ cio2->dummy_lop = NULL;
+ }
+ if (cio2->dummy_page) {
+ dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
+ cio2->dummy_page_bus_addr);
+ cio2->dummy_page = NULL;
+ }
+}
+
+static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
+{
+ struct device *dev = &cio2->pci_dev->dev;
+ unsigned int i;
+
+ cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
+ &cio2->dummy_page_bus_addr,
+ GFP_KERNEL);
+ cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
+ &cio2->dummy_lop_bus_addr,
+ GFP_KERNEL);
+ if (!cio2->dummy_page || !cio2->dummy_lop) {
+ cio2_fbpt_exit_dummy(cio2);
+ return -ENOMEM;
+ }
+ /*
+ * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
+ * Initialize each entry to dummy_page bus base address.
+ */
+ for (i = 0; i < CIO2_LOP_ENTRIES; i++)
+ cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
+
+ return 0;
+}
+
+static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
+ struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
+{
+ /*
+ * The CPU first initializes some fields in fbpt, then sets
+ * the VALID bit, this barrier is to ensure that the DMA(device)
+ * does not see the VALID bit enabled before other fields are
+ * initialized; otherwise it could lead to havoc.
+ */
+ dma_wmb();
+
+ /*
+ * Request interrupts for start and completion
+ * Valid bit is applicable only to 1st entry
+ */
+ entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
+ CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
+}
+
+/* Initialize fpbt entries to point to dummy frame */
+static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
+ struct cio2_fbpt_entry
+ entry[CIO2_MAX_LOPS])
+{
+ unsigned int i;
+
+ entry[0].first_entry.first_page_offset = 0;
+ entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
+ entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
+
+ for (i = 0; i < CIO2_MAX_LOPS; i++)
+ entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
+
+ cio2_fbpt_entry_enable(cio2, entry);
+}
+
+/* Initialize fpbt entries to point to a given buffer */
+static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
+ struct cio2_buffer *b,
+ struct cio2_fbpt_entry
+ entry[CIO2_MAX_LOPS])
+{
+ struct vb2_buffer *vb = &b->vbb.vb2_buf;
+ unsigned int length = vb->planes[0].length;
+ int remaining, i;
+
+ entry[0].first_entry.first_page_offset = b->offset;
+ remaining = length + entry[0].first_entry.first_page_offset;
+ entry[1].second_entry.num_of_pages = PFN_UP(remaining);
+ /*
+ * last_page_available_bytes has the offset of the last byte in the
+ * last page which is still accessible by DMA. DMA cannot access
+ * beyond this point. Valid range for this is from 0 to 4095.
+ * 0 indicates 1st byte in the page is DMA accessible.
+ * 4095 (PAGE_SIZE - 1) means every single byte in the last page
+ * is available for DMA transfer.
+ */
+ remaining = offset_in_page(remaining) ?: PAGE_SIZE;
+ entry[1].second_entry.last_page_available_bytes = remaining - 1;
+ /* Fill FBPT */
+ remaining = length;
+ i = 0;
+ while (remaining > 0) {
+ entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
+ remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
+ entry++;
+ i++;
+ }
+
+ /*
+ * The first not meaningful FBPT entry should point to a valid LOP
+ */
+ entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
+
+ cio2_fbpt_entry_enable(cio2, entry);
+}
+
+static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
+{
+ struct device *dev = &cio2->pci_dev->dev;
+
+ q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
+ GFP_KERNEL);
+ if (!q->fbpt)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
+{
+ dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
+}
+
+/**************** CSI2 hardware setup ****************/
+
+/*
+ * The CSI2 receiver has several parameters affecting
+ * the receiver timings. These depend on the MIPI bus frequency
+ * F in Hz (sensor transmitter rate) as follows:
+ * register value = (A/1e9 + B * UI) / COUNT_ACC
+ * where
+ * UI = 1 / (2 * F) in seconds
+ * COUNT_ACC = counter accuracy in seconds
+ * For IPU3 COUNT_ACC = 0.0625
+ *
+ * A and B are coefficients from the table below,
+ * depending whether the register minimum or maximum value is
+ * calculated.
+ * Minimum Maximum
+ * Clock lane A B A B
+ * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
+ * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
+ * Data lanes
+ * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
+ *
+ * We use the minimum values of both A and B.
+ */
+
+/*
+ * shift for keeping value range suitable for 32-bit integer arithmetic
+ */
+#define LIMIT_SHIFT 8
+
+static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
+{
+ const u32 accinv = 16; /* invert of counter resolution */
+ const u32 uiinv = 500000000; /* 1e9 / 2 */
+ s32 r;
+
+ freq >>= LIMIT_SHIFT;
+
+ if (WARN_ON(freq <= 0 || freq > S32_MAX))
+ return def;
+ /*
+ * b could be 0, -2 or -8, so |accinv * b| is always
+ * less than (1 << ds) and thus |r| < 500000000.
+ */
+ r = accinv * b * (uiinv >> LIMIT_SHIFT);
+ r = r / (s32)freq;
+ /* max value of a is 95 */
+ r += accinv * a;
+
+ return r;
+};
+
+/* Calculate the delay value for termination enable of clock lane HS Rx */
+static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
+ struct cio2_csi2_timing *timing,
+ unsigned int bpp, unsigned int lanes)
+{
+ struct device *dev = &cio2->pci_dev->dev;
+ s64 freq;
+
+ if (!q->sensor)
+ return -ENODEV;
+
+ freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
+ if (freq < 0) {
+ dev_err(dev, "error %lld, invalid link_freq\n", freq);
+ return freq;
+ }
+
+ timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
+ CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
+ freq,
+ CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
+ timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
+ CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
+ freq,
+ CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
+ timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
+ CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
+ freq,
+ CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
+ timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
+ CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
+ freq,
+ CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
+
+ dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
+ dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
+ dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
+ dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
+
+ return 0;
+};
+
+static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
+{
+ static const int NUM_VCS = 4;
+ static const int SID; /* Stream id */
+ static const int ENTRY;
+ static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
+ CIO2_FBPT_SUBENTRY_UNIT);
+ const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
+ const struct ipu3_cio2_fmt *fmt;
+ void __iomem *const base = cio2->base;
+ u8 lanes, csi2bus = q->csi2.port;
+ u8 sensor_vc = SENSOR_VIR_CH_DFLT;
+ struct cio2_csi2_timing timing = { 0 };
+ int i, r;
+
+ fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
+ if (!fmt)
+ return -EINVAL;
+
+ lanes = q->csi2.lanes;
+
+ r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
+ if (r)
+ return r;
+
+ writel(timing.clk_termen, q->csi_rx_base +
+ CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
+ writel(timing.clk_settle, q->csi_rx_base +
+ CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
+
+ for (i = 0; i < lanes; i++) {
+ writel(timing.dat_termen, q->csi_rx_base +
+ CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
+ writel(timing.dat_settle, q->csi_rx_base +
+ CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
+ }
+
+ writel(CIO2_PBM_WMCTRL1_MIN_2CK |
+ CIO2_PBM_WMCTRL1_MID1_2CK |
+ CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
+ writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
+ CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
+ CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
+ CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
+ CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
+ CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
+ writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
+ CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
+ CIO2_PBM_ARB_CTRL_LE_EN |
+ CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
+ CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
+ CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
+ CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
+ base + CIO2_REG_PBM_ARB_CTRL);
+ writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
+ q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
+ writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
+ q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
+
+ writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
+ writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
+
+ /* Configure MIPI backend */
+ for (i = 0; i < NUM_VCS; i++)
+ writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
+
+ /* There are 16 short packet LUT entry */
+ for (i = 0; i < 16; i++)
+ writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
+ q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
+ writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
+ q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
+
+ writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
+ writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
+ writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
+ writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
+ writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
+ writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
+
+ writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
+ CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
+ base + CIO2_REG_INT_EN);
+
+ writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
+ << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
+ base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
+ writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
+ sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
+ fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
+ q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
+ writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
+ writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
+ writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
+
+ writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
+ writel(CIO2_CGC_PRIM_TGE |
+ CIO2_CGC_SIDE_TGE |
+ CIO2_CGC_XOSC_TGE |
+ CIO2_CGC_D3I3_TGE |
+ CIO2_CGC_CSI2_INTERFRAME_TGE |
+ CIO2_CGC_CSI2_PORT_DCGE |
+ CIO2_CGC_SIDE_DCGE |
+ CIO2_CGC_PRIM_DCGE |
+ CIO2_CGC_ROSC_DCGE |
+ CIO2_CGC_XOSC_DCGE |
+ CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
+ CIO2_CGC_CSI_CLKGATE_HOLDOFF
+ << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
+ writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
+ writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
+ CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
+ CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
+ CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
+ base + CIO2_REG_LTRVAL01);
+ writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
+ CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
+ CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
+ CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
+ base + CIO2_REG_LTRVAL23);
+
+ for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
+ writel(0, base + CIO2_REG_CDMABA(i));
+ writel(0, base + CIO2_REG_CDMAC0(i));
+ writel(0, base + CIO2_REG_CDMAC1(i));
+ }
+
+ /* Enable DMA */
+ writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
+
+ writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
+ FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
+ CIO2_CDMAC0_DMA_INTR_ON_FE |
+ CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
+ CIO2_CDMAC0_DMA_EN |
+ CIO2_CDMAC0_DMA_INTR_ON_FS |
+ CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
+
+ writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
+ base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
+
+ writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
+
+ writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
+ CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
+ CIO2_PXM_FRF_CFG_MSK_ECC_RE |
+ CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
+ base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
+
+ /* Clear interrupts */
+ writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
+ writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
+ writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
+ writel(~0, base + CIO2_REG_INT_STS);
+
+ /* Enable devices, starting from the last device in the pipe */
+ writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
+ writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
+
+ return 0;
+}
+
+static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
+{
+ struct device *dev = &cio2->pci_dev->dev;
+ void __iomem *const base = cio2->base;
+ unsigned int i;
+ u32 value;
+ int ret;
+
+ /* Disable CSI receiver and MIPI backend devices */
+ writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
+ writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
+ writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
+ writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
+
+ /* Halt DMA */
+ writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
+ ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
+ value, value & CIO2_CDMAC0_DMA_HALTED,
+ 4000, 2000000);
+ if (ret)
+ dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
+
+ for (i = 0; i < CIO2_NUM_PORTS; i++) {
+ writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
+ CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
+ writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
+ CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
+ }
+}
+
+static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
+{
+ struct device *dev = &cio2->pci_dev->dev;
+ struct cio2_queue *q = cio2->cur_queue;
+ struct cio2_fbpt_entry *entry;
+ u64 ns = ktime_get_ns();
+
+ if (dma_chan >= CIO2_QUEUES) {
+ dev_err(dev, "bad DMA channel %i\n", dma_chan);
+ return;
+ }
+
+ entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
+ if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
+ dev_warn(dev, "no ready buffers found on DMA channel %u\n",
+ dma_chan);
+ return;
+ }
+
+ /* Find out which buffer(s) are ready */
+ do {
+ struct cio2_buffer *b;
+
+ b = q->bufs[q->bufs_first];
+ if (b) {
+ unsigned int received = entry[1].second_entry.num_of_bytes;
+ unsigned long payload =
+ vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
+
+ q->bufs[q->bufs_first] = NULL;
+ atomic_dec(&q->bufs_queued);
+ dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
+
+ b->vbb.vb2_buf.timestamp = ns;
+ b->vbb.field = V4L2_FIELD_NONE;
+ b->vbb.sequence = atomic_read(&q->frame_sequence);
+ if (payload != received)
+ dev_warn(dev,
+ "payload length is %lu, received %u\n",
+ payload, received);
+ vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+ atomic_inc(&q->frame_sequence);
+ cio2_fbpt_entry_init_dummy(cio2, entry);
+ q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
+ entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
+ } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
+}
+
+static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
+{
+ /*
+ * For the user space camera control algorithms it is essential
+ * to know when the reception of a frame has begun. That's often
+ * the best timing information to get from the hardware.
+ */
+ struct v4l2_event event = {
+ .type = V4L2_EVENT_FRAME_SYNC,
+ .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
+ };
+
+ v4l2_event_queue(q->subdev.devnode, &event);
+}
+
+static const char *const cio2_irq_errs[] = {
+ "single packet header error corrected",
+ "multiple packet header errors detected",
+ "payload checksum (CRC) error",
+ "fifo overflow",
+ "reserved short packet data type detected",
+ "reserved long packet data type detected",
+ "incomplete long packet detected",
+ "frame sync error",
+ "line sync error",
+ "DPHY start of transmission error",
+ "DPHY synchronization error",
+ "escape mode error",
+ "escape mode trigger event",
+ "escape mode ultra-low power state for data lane(s)",
+ "escape mode ultra-low power state exit for clock lane",
+ "inter-frame short packet discarded",
+ "inter-frame long packet discarded",
+ "non-matching Long Packet stalled",
+};
+
+static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
+{
+ unsigned long csi2_status = status;
+ unsigned int i;
+
+ for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
+ dev_err(dev, "CSI-2 receiver port %i: %s\n",
+ port, cio2_irq_errs[i]);
+
+ if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
+ dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
+ csi2_status, port);
+}
+
+static const char *const cio2_port_errs[] = {
+ "ECC recoverable",
+ "DPHY not recoverable",
+ "ECC not recoverable",
+ "CRC error",
+ "INTERFRAMEDATA",
+ "PKT2SHORT",
+ "PKT2LONG",
+};
+
+static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
+{
+ unsigned long port_status = status;
+ unsigned int i;
+
+ for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
+ dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
+}
+
+static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
+{
+ struct device *dev = &cio2->pci_dev->dev;
+ void __iomem *const base = cio2->base;
+
+ if (int_status & CIO2_INT_IOOE) {
+ /*
+ * Interrupt on Output Error:
+ * 1) SRAM is full and FS received, or
+ * 2) An invalid bit detected by DMA.
+ */
+ u32 oe_status, oe_clear;
+
+ oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
+ oe_status = oe_clear;
+
+ if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
+ dev_err(dev, "DMA output error: 0x%x\n",
+ (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
+ >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
+ oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
+ }
+ if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
+ dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
+ (oe_status & CIO2_INT_EXT_OE_OES_MASK)
+ >> CIO2_INT_EXT_OE_OES_SHIFT);
+ oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
+ }
+ writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
+ if (oe_status)
+ dev_warn(dev, "unknown interrupt 0x%x on OE\n",
+ oe_status);
+ int_status &= ~CIO2_INT_IOOE;
+ }
+
+ if (int_status & CIO2_INT_IOC_MASK) {
+ /* DMA IO done -- frame ready */
+ u32 clr = 0;
+ unsigned int d;
+
+ for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
+ if (int_status & CIO2_INT_IOC(d)) {
+ clr |= CIO2_INT_IOC(d);
+ cio2_buffer_done(cio2, d);
+ }
+ int_status &= ~clr;
+ }
+
+ if (int_status & CIO2_INT_IOS_IOLN_MASK) {
+ /* DMA IO starts or reached specified line */
+ u32 clr = 0;
+ unsigned int d;
+
+ for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
+ if (int_status & CIO2_INT_IOS_IOLN(d)) {
+ clr |= CIO2_INT_IOS_IOLN(d);
+ if (d == CIO2_DMA_CHAN)
+ cio2_queue_event_sof(cio2,
+ cio2->cur_queue);
+ }
+ int_status &= ~clr;
+ }
+
+ if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
+ /* CSI2 receiver (error) interrupt */
+ unsigned int port;
+ u32 ie_status;
+
+ ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
+
+ for (port = 0; port < CIO2_NUM_PORTS; port++) {
+ u32 port_status = (ie_status >> (port * 8)) & 0xff;
+
+ cio2_irq_log_port_errs(dev, port, port_status);
+
+ if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
+ void __iomem *csi_rx_base =
+ base + CIO2_REG_PIPE_BASE(port);
+ u32 csi2_status;
+
+ csi2_status = readl(csi_rx_base +
+ CIO2_REG_IRQCTRL_STATUS);
+
+ cio2_irq_log_irq_errs(dev, port, csi2_status);
+
+ writel(csi2_status,
+ csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
+ }
+ }
+
+ writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
+
+ int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
+ }
+
+ if (int_status)
+ dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
+}
+
+static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
+{
+ struct cio2_device *cio2 = cio2_ptr;
+ void __iomem *const base = cio2->base;
+ struct device *dev = &cio2->pci_dev->dev;
+ u32 int_status;
+
+ int_status = readl(base + CIO2_REG_INT_STS);
+ dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
+ if (!int_status)
+ return IRQ_NONE;
+
+ do {
+ writel(int_status, base + CIO2_REG_INT_STS);
+ cio2_irq_handle_once(cio2, int_status);
+ int_status = readl(base + CIO2_REG_INT_STS);
+ if (int_status)
+ dev_dbg(dev, "pending status 0x%x\n", int_status);
+ } while (int_status);
+
+ return IRQ_HANDLED;
+}
+
+/**************** Videobuf2 interface ****************/
+
+static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
+ enum vb2_buffer_state state)
+{
+ unsigned int i;
+
+ for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
+ if (q->bufs[i]) {
+ atomic_dec(&q->bufs_queued);
+ vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
+ state);
+ q->bufs[i] = NULL;
+ }
+ }
+}
+
+static int cio2_vb2_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ struct device *dev = &cio2->pci_dev->dev;
+ struct cio2_queue *q = vb2q_to_cio2_queue(vq);
+ unsigned int i;
+
+ if (*num_planes && *num_planes < q->format.num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < q->format.num_planes; ++i) {
+ if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
+ return -EINVAL;
+ sizes[i] = q->format.plane_fmt[i].sizeimage;
+ alloc_devs[i] = dev;
+ }
+
+ *num_planes = q->format.num_planes;
+ *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
+
+ /* Initialize buffer queue */
+ for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
+ q->bufs[i] = NULL;
+ cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
+ }
+ atomic_set(&q->bufs_queued, 0);
+ q->bufs_first = 0;
+ q->bufs_next = 0;
+
+ return 0;
+}
+
+/* Called after each buffer is allocated */
+static int cio2_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = &cio2->pci_dev->dev;
+ struct cio2_buffer *b = to_cio2_buffer(vb);
+ unsigned int pages = PFN_UP(vb->planes[0].length);
+ unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
+ struct sg_table *sg;
+ struct sg_dma_page_iter sg_iter;
+ unsigned int i, j;
+
+ if (lops <= 0 || lops > CIO2_MAX_LOPS) {
+ dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
+ vb->planes[0].length);
+ return -ENOSPC; /* Should never happen */
+ }
+
+ memset(b->lop, 0, sizeof(b->lop));
+ /* Allocate LOP table */
+ for (i = 0; i < lops; i++) {
+ b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
+ &b->lop_bus_addr[i], GFP_KERNEL);
+ if (!b->lop[i])
+ goto fail;
+ }
+
+ /* Fill LOP */
+ sg = vb2_dma_sg_plane_desc(vb, 0);
+ if (!sg)
+ return -ENOMEM;
+
+ if (sg->nents && sg->sgl)
+ b->offset = sg->sgl->offset;
+
+ i = j = 0;
+ for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
+ if (!pages--)
+ break;
+ b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
+ j++;
+ if (j == CIO2_LOP_ENTRIES) {
+ i++;
+ j = 0;
+ }
+ }
+
+ b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
+ return 0;
+fail:
+ while (i--)
+ dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
+ return -ENOMEM;
+}
+
+/* Transfer buffer ownership to cio2 */
+static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = &cio2->pci_dev->dev;
+ struct cio2_queue *q =
+ container_of(vb->vb2_queue, struct cio2_queue, vbq);
+ struct cio2_buffer *b = to_cio2_buffer(vb);
+ struct cio2_fbpt_entry *entry;
+ unsigned long flags;
+ unsigned int i, j, next = q->bufs_next;
+ int bufs_queued = atomic_inc_return(&q->bufs_queued);
+ u32 fbpt_rp;
+
+ dev_dbg(dev, "queue buffer %d\n", vb->index);
+
+ /*
+ * This code queues the buffer to the CIO2 DMA engine, which starts
+ * running once streaming has started. It is possible that this code
+ * gets pre-empted due to increased CPU load. Upon this, the driver
+ * does not get an opportunity to queue new buffers to the CIO2 DMA
+ * engine. When the DMA engine encounters an FBPT entry without the
+ * VALID bit set, the DMA engine halts, which requires a restart of
+ * the DMA engine and sensor, to continue streaming.
+ * This is not desired and is highly unlikely given that there are
+ * 32 FBPT entries that the DMA engine needs to process, to run into
+ * an FBPT entry, without the VALID bit set. We try to mitigate this
+ * by disabling interrupts for the duration of this queueing.
+ */
+ local_irq_save(flags);
+
+ fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
+ >> CIO2_CDMARI_FBPT_RP_SHIFT)
+ & CIO2_CDMARI_FBPT_RP_MASK;
+
+ /*
+ * fbpt_rp is the fbpt entry that the dma is currently working
+ * on, but since it could jump to next entry at any time,
+ * assume that we might already be there.
+ */
+ fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
+
+ if (bufs_queued <= 1 || fbpt_rp == next)
+ /* Buffers were drained */
+ next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
+
+ for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
+ /*
+ * We have allocated CIO2_MAX_BUFFERS circularly for the
+ * hw, the user has requested N buffer queue. The driver
+ * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
+ * user queues a buffer, there necessarily is a free buffer.
+ */
+ if (!q->bufs[next]) {
+ q->bufs[next] = b;
+ entry = &q->fbpt[next * CIO2_MAX_LOPS];
+ cio2_fbpt_entry_init_buf(cio2, b, entry);
+ local_irq_restore(flags);
+ q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
+ for (j = 0; j < vb->num_planes; j++)
+ vb2_set_plane_payload(vb, j,
+ q->format.plane_fmt[j].sizeimage);
+ return;
+ }
+
+ dev_dbg(dev, "entry %i was full!\n", next);
+ next = (next + 1) % CIO2_MAX_BUFFERS;
+ }
+
+ local_irq_restore(flags);
+ dev_err(dev, "error: all cio2 entries were full!\n");
+ atomic_dec(&q->bufs_queued);
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+/* Called when each buffer is freed */
+static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = &cio2->pci_dev->dev;
+ struct cio2_buffer *b = to_cio2_buffer(vb);
+ unsigned int i;
+
+ /* Free LOP table */
+ for (i = 0; i < CIO2_MAX_LOPS; i++) {
+ if (b->lop[i])
+ dma_free_coherent(dev, PAGE_SIZE,
+ b->lop[i], b->lop_bus_addr[i]);
+ }
+}
+
+static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct cio2_queue *q = vb2q_to_cio2_queue(vq);
+ struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ struct device *dev = &cio2->pci_dev->dev;
+ int r;
+
+ cio2->cur_queue = q;
+ atomic_set(&q->frame_sequence, 0);
+
+ r = pm_runtime_resume_and_get(dev);
+ if (r < 0) {
+ dev_info(dev, "failed to set power %d\n", r);
+ return r;
+ }
+
+ r = video_device_pipeline_start(&q->vdev, &q->pipe);
+ if (r)
+ goto fail_pipeline;
+
+ r = cio2_hw_init(cio2, q);
+ if (r)
+ goto fail_hw;
+
+ /* Start streaming on sensor */
+ r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
+ if (r)
+ goto fail_csi2_subdev;
+
+ cio2->streaming = true;
+
+ return 0;
+
+fail_csi2_subdev:
+ cio2_hw_exit(cio2, q);
+fail_hw:
+ video_device_pipeline_stop(&q->vdev);
+fail_pipeline:
+ dev_dbg(dev, "failed to start streaming (%d)\n", r);
+ cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
+ pm_runtime_put(dev);
+
+ return r;
+}
+
+static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
+{
+ struct cio2_queue *q = vb2q_to_cio2_queue(vq);
+ struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ struct device *dev = &cio2->pci_dev->dev;
+
+ if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
+ dev_err(dev, "failed to stop sensor streaming\n");
+
+ cio2_hw_exit(cio2, q);
+ synchronize_irq(cio2->pci_dev->irq);
+ cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
+ video_device_pipeline_stop(&q->vdev);
+ pm_runtime_put(dev);
+ cio2->streaming = false;
+}
+
+static const struct vb2_ops cio2_vb2_ops = {
+ .buf_init = cio2_vb2_buf_init,
+ .buf_queue = cio2_vb2_buf_queue,
+ .buf_cleanup = cio2_vb2_buf_cleanup,
+ .queue_setup = cio2_vb2_queue_setup,
+ .start_streaming = cio2_vb2_start_streaming,
+ .stop_streaming = cio2_vb2_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/**************** V4L2 interface ****************/
+
+static int cio2_v4l2_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
+ strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
+
+ return 0;
+}
+
+static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ f->pixelformat = formats[f->index].fourcc;
+
+ return 0;
+}
+
+/* The format is validated in cio2_video_link_validate() */
+static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct cio2_queue *q = file_to_cio2_queue(file);
+
+ f->fmt.pix_mp = q->format;
+
+ return 0;
+}
+
+static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ const struct ipu3_cio2_fmt *fmt;
+ struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
+
+ fmt = cio2_find_format(&mpix->pixelformat, NULL);
+ if (!fmt)
+ fmt = &formats[0];
+
+ /* Only supports up to 4224x3136 */
+ if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
+ mpix->width = CIO2_IMAGE_MAX_WIDTH;
+ if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
+ mpix->height = CIO2_IMAGE_MAX_HEIGHT;
+
+ mpix->num_planes = 1;
+ mpix->pixelformat = fmt->fourcc;
+ mpix->colorspace = V4L2_COLORSPACE_RAW;
+ mpix->field = V4L2_FIELD_NONE;
+ mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
+ mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
+ mpix->height;
+
+ /* use default */
+ mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ return 0;
+}
+
+static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct cio2_queue *q = file_to_cio2_queue(file);
+
+ cio2_v4l2_try_fmt(file, fh, f);
+ q->format = f->fmt.pix_mp;
+
+ return 0;
+}
+
+static int
+cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
+{
+ if (input->index > 0)
+ return -EINVAL;
+
+ strscpy(input->name, "camera", sizeof(input->name));
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+
+ return 0;
+}
+
+static int
+cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
+{
+ *input = 0;
+
+ return 0;
+}
+
+static int
+cio2_video_s_input(struct file *file, void *fh, unsigned int input)
+{
+ return input == 0 ? 0 : -EINVAL;
+}
+
+static const struct v4l2_file_operations cio2_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
+ .vidioc_querycap = cio2_v4l2_querycap,
+ .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_enum_input = cio2_video_enum_input,
+ .vidioc_g_input = cio2_video_g_input,
+ .vidioc_s_input = cio2_video_s_input,
+};
+
+static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_FRAME_SYNC)
+ return -EINVAL;
+
+ /* Line number. For now only zero accepted. */
+ if (sub->id != 0)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+}
+
+static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format;
+ const struct v4l2_mbus_framefmt fmt_default = {
+ .width = 1936,
+ .height = 1096,
+ .code = formats[0].mbus_code,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
+ .quantization = V4L2_QUANTIZATION_DEFAULT,
+ .xfer_func = V4L2_XFER_FUNC_DEFAULT,
+ };
+
+ /* Initialize try_fmt */
+ format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
+ *format = fmt_default;
+
+ /* same as sink */
+ format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
+ *format = fmt_default;
+
+ return 0;
+}
+
+/*
+ * cio2_subdev_get_fmt - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad config
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
+
+ mutex_lock(&q->subdev_lock);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
+ fmt->pad);
+ else
+ fmt->format = q->subdev_fmt;
+
+ mutex_unlock(&q->subdev_lock);
+
+ return 0;
+}
+
+/*
+ * cio2_subdev_set_fmt - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad config
+ * @fmt: pointer to v4l2 subdev format structure
+ * return -EINVAL or zero on success
+ */
+static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
+ struct v4l2_mbus_framefmt *mbus;
+ u32 mbus_code = fmt->format.code;
+ unsigned int i;
+
+ /*
+ * Only allow setting sink pad format;
+ * source always propagates from sink
+ */
+ if (fmt->pad == CIO2_PAD_SOURCE)
+ return cio2_subdev_get_fmt(sd, sd_state, fmt);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ else
+ mbus = &q->subdev_fmt;
+
+ fmt->format.code = formats[0].mbus_code;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].mbus_code == mbus_code) {
+ fmt->format.code = mbus_code;
+ break;
+ }
+ }
+
+ fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
+ fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
+ fmt->format.field = V4L2_FIELD_NONE;
+
+ mutex_lock(&q->subdev_lock);
+ *mbus = fmt->format;
+ mutex_unlock(&q->subdev_lock);
+
+ return 0;
+}
+
+static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ code->code = formats[code->index].mbus_code;
+ return 0;
+}
+
+static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
+ struct v4l2_subdev_format *fmt)
+{
+ if (is_media_entity_v4l2_subdev(pad->entity)) {
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(pad->entity);
+
+ memset(fmt, 0, sizeof(*fmt));
+ fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt->pad = pad->index;
+ return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
+ }
+
+ return -EINVAL;
+}
+
+static int cio2_video_link_validate(struct media_link *link)
+{
+ struct media_entity *entity = link->sink->entity;
+ struct video_device *vd = media_entity_to_video_device(entity);
+ struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
+ struct cio2_device *cio2 = video_get_drvdata(vd);
+ struct device *dev = &cio2->pci_dev->dev;
+ struct v4l2_subdev_format source_fmt;
+ int ret;
+
+ if (!media_pad_remote_pad_first(entity->pads)) {
+ dev_info(dev, "video node %s pad not connected\n", vd->name);
+ return -ENOTCONN;
+ }
+
+ ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
+ if (ret < 0)
+ return 0;
+
+ if (source_fmt.format.width != q->format.width ||
+ source_fmt.format.height != q->format.height) {
+ dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
+ q->format.width, q->format.height,
+ source_fmt.format.width, source_fmt.format.height);
+ return -EINVAL;
+ }
+
+ if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
+ .subscribe_event = cio2_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
+ .open = cio2_subdev_open,
+};
+
+static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
+ .link_validate = v4l2_subdev_link_validate_default,
+ .get_fmt = cio2_subdev_get_fmt,
+ .set_fmt = cio2_subdev_set_fmt,
+ .enum_mbus_code = cio2_subdev_enum_mbus_code,
+};
+
+static const struct v4l2_subdev_ops cio2_subdev_ops = {
+ .core = &cio2_subdev_core_ops,
+ .pad = &cio2_subdev_pad_ops,
+};
+
+/******* V4L2 sub-device asynchronous registration callbacks***********/
+
+struct sensor_async_subdev {
+ struct v4l2_async_connection asd;
+ struct csi2_bus_info csi2;
+};
+
+#define to_sensor_asd(__asd) \
+ container_of_const(__asd, struct sensor_async_subdev, asd)
+
+/* The .bound() notifier callback when a match is found */
+static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asd)
+{
+ struct cio2_device *cio2 = to_cio2_device(notifier);
+ struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
+ struct cio2_queue *q;
+ int ret;
+
+ if (cio2->queue[s_asd->csi2.port].sensor)
+ return -EBUSY;
+
+ ret = ipu_bridge_instantiate_vcm(sd->dev);
+ if (ret)
+ return ret;
+
+ q = &cio2->queue[s_asd->csi2.port];
+
+ q->csi2 = s_asd->csi2;
+ q->sensor = sd;
+ q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
+
+ return 0;
+}
+
+/* The .unbind callback */
+static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asd)
+{
+ struct cio2_device *cio2 = to_cio2_device(notifier);
+ struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
+
+ cio2->queue[s_asd->csi2.port].sensor = NULL;
+}
+
+/* .complete() is called after all subdevices have been located */
+static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct cio2_device *cio2 = to_cio2_device(notifier);
+ struct device *dev = &cio2->pci_dev->dev;
+ struct sensor_async_subdev *s_asd;
+ struct v4l2_async_connection *asd;
+ struct cio2_queue *q;
+ int ret;
+
+ list_for_each_entry(asd, &cio2->notifier.done_list, asc_entry) {
+ s_asd = to_sensor_asd(asd);
+ q = &cio2->queue[s_asd->csi2.port];
+
+ ret = media_entity_get_fwnode_pad(&q->sensor->entity,
+ s_asd->asd.match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (ret < 0) {
+ dev_err(dev, "no pad for endpoint %pfw (%d)\n",
+ s_asd->asd.match.fwnode, ret);
+ return ret;
+ }
+
+ ret = media_create_pad_link(&q->sensor->entity, ret,
+ &q->subdev.entity, CIO2_PAD_SINK,
+ 0);
+ if (ret) {
+ dev_err(dev, "failed to create link for %s (endpoint %pfw, error %d)\n",
+ q->sensor->name, s_asd->asd.match.fwnode, ret);
+ return ret;
+ }
+ }
+
+ return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations cio2_async_ops = {
+ .bound = cio2_notifier_bound,
+ .unbind = cio2_notifier_unbind,
+ .complete = cio2_notifier_complete,
+};
+
+static int cio2_parse_firmware(struct cio2_device *cio2)
+{
+ struct device *dev = &cio2->pci_dev->dev;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < CIO2_NUM_PORTS; i++) {
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct sensor_async_subdev *s_asd;
+ struct fwnode_handle *ep;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ continue;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_parse;
+
+ s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
+ struct
+ sensor_async_subdev);
+ if (IS_ERR(s_asd)) {
+ ret = PTR_ERR(s_asd);
+ goto err_parse;
+ }
+
+ s_asd->csi2.port = vep.base.port;
+ s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
+
+ fwnode_handle_put(ep);
+
+ continue;
+
+err_parse:
+ fwnode_handle_put(ep);
+ return ret;
+ }
+
+ /*
+ * Proceed even without sensors connected to allow the device to
+ * suspend.
+ */
+ cio2->notifier.ops = &cio2_async_ops;
+ ret = v4l2_async_nf_register(&cio2->notifier);
+ if (ret)
+ dev_err(dev, "failed to register async notifier : %d\n", ret);
+
+ return ret;
+}
+
+/**************** Queue initialization ****************/
+static const struct media_entity_operations cio2_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct media_entity_operations cio2_video_entity_ops = {
+ .link_validate = cio2_video_link_validate,
+};
+
+static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
+{
+ static const u32 default_width = 1936;
+ static const u32 default_height = 1096;
+ const struct ipu3_cio2_fmt dflt_fmt = formats[0];
+ struct device *dev = &cio2->pci_dev->dev;
+ struct video_device *vdev = &q->vdev;
+ struct vb2_queue *vbq = &q->vbq;
+ struct v4l2_subdev *subdev = &q->subdev;
+ struct v4l2_mbus_framefmt *fmt;
+ int r;
+
+ /* Initialize miscellaneous variables */
+ mutex_init(&q->lock);
+ mutex_init(&q->subdev_lock);
+
+ /* Initialize formats to default values */
+ fmt = &q->subdev_fmt;
+ fmt->width = default_width;
+ fmt->height = default_height;
+ fmt->code = dflt_fmt.mbus_code;
+ fmt->field = V4L2_FIELD_NONE;
+
+ q->format.width = default_width;
+ q->format.height = default_height;
+ q->format.pixelformat = dflt_fmt.fourcc;
+ q->format.colorspace = V4L2_COLORSPACE_RAW;
+ q->format.field = V4L2_FIELD_NONE;
+ q->format.num_planes = 1;
+ q->format.plane_fmt[0].bytesperline =
+ cio2_bytesperline(q->format.width);
+ q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
+ q->format.height;
+
+ /* Initialize fbpt */
+ r = cio2_fbpt_init(cio2, q);
+ if (r)
+ goto fail_fbpt;
+
+ /* Initialize media entities */
+ q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
+ MEDIA_PAD_FL_MUST_CONNECT;
+ q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &cio2_media_ops;
+ subdev->internal_ops = &cio2_subdev_internal_ops;
+ r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
+ if (r) {
+ dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
+ goto fail_subdev_media_entity;
+ }
+
+ q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
+ vdev->entity.ops = &cio2_video_entity_ops;
+ r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
+ if (r) {
+ dev_err(dev, "failed initialize videodev media entity (%d)\n",
+ r);
+ goto fail_vdev_media_entity;
+ }
+
+ /* Initialize subdev */
+ v4l2_subdev_init(subdev, &cio2_subdev_ops);
+ subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ subdev->owner = THIS_MODULE;
+ snprintf(subdev->name, sizeof(subdev->name),
+ CIO2_ENTITY_NAME " %td", q - cio2->queue);
+ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ v4l2_set_subdevdata(subdev, cio2);
+ r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
+ if (r) {
+ dev_err(dev, "failed initialize subdev (%d)\n", r);
+ goto fail_subdev;
+ }
+
+ /* Initialize vbq */
+ vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
+ vbq->ops = &cio2_vb2_ops;
+ vbq->mem_ops = &vb2_dma_sg_memops;
+ vbq->buf_struct_size = sizeof(struct cio2_buffer);
+ vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vbq->min_buffers_needed = 1;
+ vbq->drv_priv = cio2;
+ vbq->lock = &q->lock;
+ r = vb2_queue_init(vbq);
+ if (r) {
+ dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
+ goto fail_subdev;
+ }
+
+ /* Initialize vdev */
+ snprintf(vdev->name, sizeof(vdev->name),
+ "%s %td", CIO2_NAME, q - cio2->queue);
+ vdev->release = video_device_release_empty;
+ vdev->fops = &cio2_v4l2_fops;
+ vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
+ vdev->lock = &cio2->lock;
+ vdev->v4l2_dev = &cio2->v4l2_dev;
+ vdev->queue = &q->vbq;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
+ video_set_drvdata(vdev, cio2);
+ r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (r) {
+ dev_err(dev, "failed to register video device (%d)\n", r);
+ goto fail_vdev;
+ }
+
+ /* Create link from CIO2 subdev to output node */
+ r = media_create_pad_link(
+ &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (r)
+ goto fail_link;
+
+ return 0;
+
+fail_link:
+ vb2_video_unregister_device(&q->vdev);
+fail_vdev:
+ v4l2_device_unregister_subdev(subdev);
+fail_subdev:
+ media_entity_cleanup(&vdev->entity);
+fail_vdev_media_entity:
+ media_entity_cleanup(&subdev->entity);
+fail_subdev_media_entity:
+ cio2_fbpt_exit(q, dev);
+fail_fbpt:
+ mutex_destroy(&q->subdev_lock);
+ mutex_destroy(&q->lock);
+
+ return r;
+}
+
+static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
+{
+ vb2_video_unregister_device(&q->vdev);
+ media_entity_cleanup(&q->vdev.entity);
+ v4l2_device_unregister_subdev(&q->subdev);
+ media_entity_cleanup(&q->subdev.entity);
+ cio2_fbpt_exit(q, &cio2->pci_dev->dev);
+ mutex_destroy(&q->subdev_lock);
+ mutex_destroy(&q->lock);
+}
+
+static int cio2_queues_init(struct cio2_device *cio2)
+{
+ int i, r;
+
+ for (i = 0; i < CIO2_QUEUES; i++) {
+ r = cio2_queue_init(cio2, &cio2->queue[i]);
+ if (r)
+ break;
+ }
+
+ if (i == CIO2_QUEUES)
+ return 0;
+
+ for (i--; i >= 0; i--)
+ cio2_queue_exit(cio2, &cio2->queue[i]);
+
+ return r;
+}
+
+static void cio2_queues_exit(struct cio2_device *cio2)
+{
+ unsigned int i;
+
+ for (i = 0; i < CIO2_QUEUES; i++)
+ cio2_queue_exit(cio2, &cio2->queue[i]);
+}
+
+static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *endpoint;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return -EINVAL;
+
+ endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (endpoint) {
+ fwnode_handle_put(endpoint);
+ return 0;
+ }
+
+ return cio2_check_fwnode_graph(fwnode->secondary);
+}
+
+/**************** PCI interface ****************/
+
+static int cio2_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ struct device *dev = &pci_dev->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct cio2_device *cio2;
+ int r;
+
+ /*
+ * On some platforms no connections to sensors are defined in firmware,
+ * if the device has no endpoints then we can try to build those as
+ * software_nodes parsed from SSDB.
+ */
+ r = cio2_check_fwnode_graph(fwnode);
+ if (r) {
+ if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
+ dev_err(dev, "fwnode graph has no endpoints connected\n");
+ return -EINVAL;
+ }
+
+ r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
+ if (r)
+ return r;
+ }
+
+ cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
+ if (!cio2)
+ return -ENOMEM;
+ cio2->pci_dev = pci_dev;
+
+ r = pcim_enable_device(pci_dev);
+ if (r) {
+ dev_err(dev, "failed to enable device (%d)\n", r);
+ return r;
+ }
+
+ dev_info(dev, "device 0x%x (rev: 0x%x)\n",
+ pci_dev->device, pci_dev->revision);
+
+ r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
+ if (r) {
+ dev_err(dev, "failed to remap I/O memory (%d)\n", r);
+ return -ENODEV;
+ }
+
+ cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
+
+ pci_set_drvdata(pci_dev, cio2);
+
+ pci_set_master(pci_dev);
+
+ r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
+ if (r) {
+ dev_err(dev, "failed to set DMA mask (%d)\n", r);
+ return -ENODEV;
+ }
+
+ r = pci_enable_msi(pci_dev);
+ if (r) {
+ dev_err(dev, "failed to enable MSI (%d)\n", r);
+ return r;
+ }
+
+ r = cio2_fbpt_init_dummy(cio2);
+ if (r)
+ return r;
+
+ mutex_init(&cio2->lock);
+
+ cio2->media_dev.dev = dev;
+ strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
+ sizeof(cio2->media_dev.model));
+ cio2->media_dev.hw_revision = 0;
+
+ media_device_init(&cio2->media_dev);
+ r = media_device_register(&cio2->media_dev);
+ if (r < 0)
+ goto fail_mutex_destroy;
+
+ cio2->v4l2_dev.mdev = &cio2->media_dev;
+ r = v4l2_device_register(dev, &cio2->v4l2_dev);
+ if (r) {
+ dev_err(dev, "failed to register V4L2 device (%d)\n", r);
+ goto fail_media_device_unregister;
+ }
+
+ r = cio2_queues_init(cio2);
+ if (r)
+ goto fail_v4l2_device_unregister;
+
+ v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
+
+ /* Register notifier for subdevices we care */
+ r = cio2_parse_firmware(cio2);
+ if (r)
+ goto fail_clean_notifier;
+
+ r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
+ CIO2_NAME, cio2);
+ if (r) {
+ dev_err(dev, "failed to request IRQ (%d)\n", r);
+ goto fail_clean_notifier;
+ }
+
+ pm_runtime_put_noidle(dev);
+ pm_runtime_allow(dev);
+
+ return 0;
+
+fail_clean_notifier:
+ v4l2_async_nf_unregister(&cio2->notifier);
+ v4l2_async_nf_cleanup(&cio2->notifier);
+ cio2_queues_exit(cio2);
+fail_v4l2_device_unregister:
+ v4l2_device_unregister(&cio2->v4l2_dev);
+fail_media_device_unregister:
+ media_device_unregister(&cio2->media_dev);
+ media_device_cleanup(&cio2->media_dev);
+fail_mutex_destroy:
+ mutex_destroy(&cio2->lock);
+ cio2_fbpt_exit_dummy(cio2);
+
+ return r;
+}
+
+static void cio2_pci_remove(struct pci_dev *pci_dev)
+{
+ struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
+
+ media_device_unregister(&cio2->media_dev);
+ v4l2_async_nf_unregister(&cio2->notifier);
+ v4l2_async_nf_cleanup(&cio2->notifier);
+ cio2_queues_exit(cio2);
+ cio2_fbpt_exit_dummy(cio2);
+ v4l2_device_unregister(&cio2->v4l2_dev);
+ media_device_cleanup(&cio2->media_dev);
+ mutex_destroy(&cio2->lock);
+
+ pm_runtime_forbid(&pci_dev->dev);
+ pm_runtime_get_noresume(&pci_dev->dev);
+}
+
+static int __maybe_unused cio2_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
+ void __iomem *const base = cio2->base;
+ u16 pm;
+
+ writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
+ dev_dbg(dev, "cio2 runtime suspend.\n");
+
+ pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
+ pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
+ pm |= CIO2_PMCSR_D3;
+ pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
+
+ return 0;
+}
+
+static int __maybe_unused cio2_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
+ void __iomem *const base = cio2->base;
+ u16 pm;
+
+ writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
+ dev_dbg(dev, "cio2 runtime resume.\n");
+
+ pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
+ pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
+ pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
+
+ return 0;
+}
+
+/*
+ * Helper function to advance all the elements of a circular buffer by "start"
+ * positions
+ */
+static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
+{
+ struct {
+ size_t begin, end;
+ } arr[2] = {
+ { 0, start - 1 },
+ { start, elems - 1 },
+ };
+
+#define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
+
+ /* Loop as long as we have out-of-place entries */
+ while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
+ size_t size0, i;
+
+ /*
+ * Find the number of entries that can be arranged on this
+ * iteration.
+ */
+ size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
+
+ /* Swap the entries in two parts of the array. */
+ for (i = 0; i < size0; i++) {
+ u8 *d = ptr + elem_size * (arr[1].begin + i);
+ u8 *s = ptr + elem_size * (arr[0].begin + i);
+ size_t j;
+
+ for (j = 0; j < elem_size; j++)
+ swap(d[j], s[j]);
+ }
+
+ if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
+ /* The end of the first array remains unarranged. */
+ arr[0].begin += size0;
+ } else {
+ /*
+ * The first array is fully arranged so we proceed
+ * handling the next one.
+ */
+ arr[0].begin = arr[1].begin;
+ arr[0].end = arr[1].begin + size0 - 1;
+ arr[1].begin += size0;
+ }
+ }
+}
+
+static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
+{
+ unsigned int i, j;
+
+ for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
+ i++, j = (j + 1) % CIO2_MAX_BUFFERS)
+ if (q->bufs[j])
+ break;
+
+ if (i == CIO2_MAX_BUFFERS)
+ return;
+
+ if (j) {
+ arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
+ CIO2_MAX_BUFFERS, j);
+ arrange(q->bufs, sizeof(struct cio2_buffer *),
+ CIO2_MAX_BUFFERS, j);
+ }
+
+ /*
+ * DMA clears the valid bit when accessing the buffer.
+ * When stopping stream in suspend callback, some of the buffers
+ * may be in invalid state. After resume, when DMA meets the invalid
+ * buffer, it will halt and stop receiving new data.
+ * To avoid DMA halting, set the valid bit for all buffers in FBPT.
+ */
+ for (i = 0; i < CIO2_MAX_BUFFERS; i++)
+ cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
+}
+
+static int __maybe_unused cio2_suspend(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
+ struct cio2_queue *q = cio2->cur_queue;
+ int r;
+
+ dev_dbg(dev, "cio2 suspend\n");
+ if (!cio2->streaming)
+ return 0;
+
+ /* Stop stream */
+ r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
+ if (r) {
+ dev_err(dev, "failed to stop sensor streaming\n");
+ return r;
+ }
+
+ cio2_hw_exit(cio2, q);
+ synchronize_irq(pci_dev->irq);
+
+ pm_runtime_force_suspend(dev);
+
+ /*
+ * Upon resume, hw starts to process the fbpt entries from beginning,
+ * so relocate the queued buffs to the fbpt head before suspend.
+ */
+ cio2_fbpt_rearrange(cio2, q);
+ q->bufs_first = 0;
+ q->bufs_next = 0;
+
+ return 0;
+}
+
+static int __maybe_unused cio2_resume(struct device *dev)
+{
+ struct cio2_device *cio2 = dev_get_drvdata(dev);
+ struct cio2_queue *q = cio2->cur_queue;
+ int r;
+
+ dev_dbg(dev, "cio2 resume\n");
+ if (!cio2->streaming)
+ return 0;
+ /* Start stream */
+ r = pm_runtime_force_resume(dev);
+ if (r < 0) {
+ dev_err(dev, "failed to set power %d\n", r);
+ return r;
+ }
+
+ r = cio2_hw_init(cio2, q);
+ if (r) {
+ dev_err(dev, "fail to init cio2 hw\n");
+ return r;
+ }
+
+ r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
+ if (r) {
+ dev_err(dev, "fail to start sensor streaming\n");
+ cio2_hw_exit(cio2, q);
+ }
+
+ return r;
+}
+
+static const struct dev_pm_ops cio2_pm_ops = {
+ SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
+};
+
+static const struct pci_device_id cio2_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
+
+static struct pci_driver cio2_pci_driver = {
+ .name = CIO2_NAME,
+ .id_table = cio2_pci_id_table,
+ .probe = cio2_pci_probe,
+ .remove = cio2_pci_remove,
+ .driver = {
+ .pm = &cio2_pm_ops,
+ },
+};
+
+module_pci_driver(cio2_pci_driver);
+
+MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Jian Xu Zheng");
+MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
+MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPU3 CIO2 driver");
+MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
new file mode 100644
index 0000000000..d731ce8adb
--- /dev/null
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Intel Corporation */
+
+#ifndef __IPU3_CIO2_H
+#define __IPU3_CIO2_H
+
+#include <linux/bits.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include <asm/page.h>
+
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
+struct cio2_fbpt_entry; /* defined here, after the first usage */
+struct pci_dev;
+
+#define CIO2_NAME "ipu3-cio2"
+#define CIO2_DEVICE_NAME "Intel IPU3 CIO2"
+#define CIO2_ENTITY_NAME "ipu3-csi2"
+#define CIO2_PCI_ID 0x9d32
+#define CIO2_PCI_BAR 0
+#define CIO2_DMA_MASK DMA_BIT_MASK(39)
+
+#define CIO2_IMAGE_MAX_WIDTH 4224U
+#define CIO2_IMAGE_MAX_HEIGHT 3136U
+
+/* 32MB = 8xFBPT_entry */
+#define CIO2_MAX_LOPS 8
+#define CIO2_MAX_BUFFERS (PAGE_SIZE / 16 / CIO2_MAX_LOPS)
+#define CIO2_LOP_ENTRIES (PAGE_SIZE / sizeof(u32))
+
+#define CIO2_PAD_SINK 0U
+#define CIO2_PAD_SOURCE 1U
+#define CIO2_PADS 2U
+
+#define CIO2_NUM_DMA_CHAN 20U
+#define CIO2_NUM_PORTS 4U /* DPHYs */
+
+/* 1 for each sensor */
+#define CIO2_QUEUES CIO2_NUM_PORTS
+
+/* Register and bit field definitions */
+#define CIO2_REG_PIPE_BASE(n) ((n) * 0x0400) /* n = 0..3 */
+#define CIO2_REG_CSIRX_BASE 0x000
+#define CIO2_REG_MIPIBE_BASE 0x100
+#define CIO2_REG_PIXELGEN_BAS 0x200
+#define CIO2_REG_IRQCTRL_BASE 0x300
+#define CIO2_REG_GPREG_BASE 0x1000
+
+/* base register: CIO2_REG_PIPE_BASE(pipe) * CIO2_REG_CSIRX_BASE */
+#define CIO2_REG_CSIRX_ENABLE (CIO2_REG_CSIRX_BASE + 0x0)
+#define CIO2_REG_CSIRX_NOF_ENABLED_LANES (CIO2_REG_CSIRX_BASE + 0x4)
+#define CIO2_REG_CSIRX_SP_IF_CONFIG (CIO2_REG_CSIRX_BASE + 0x10)
+#define CIO2_REG_CSIRX_LP_IF_CONFIG (CIO2_REG_CSIRX_BASE + 0x14)
+#define CIO2_CSIRX_IF_CONFIG_FILTEROUT 0x00
+#define CIO2_CSIRX_IF_CONFIG_FILTEROUT_VC_INACTIVE 0x01
+#define CIO2_CSIRX_IF_CONFIG_PASS 0x02
+#define CIO2_CSIRX_IF_CONFIG_FLAG_ERROR BIT(2)
+#define CIO2_REG_CSIRX_STATUS (CIO2_REG_CSIRX_BASE + 0x18)
+#define CIO2_REG_CSIRX_STATUS_DLANE_HS (CIO2_REG_CSIRX_BASE + 0x1c)
+#define CIO2_CSIRX_STATUS_DLANE_HS_MASK 0xff
+#define CIO2_REG_CSIRX_STATUS_DLANE_LP (CIO2_REG_CSIRX_BASE + 0x20)
+#define CIO2_CSIRX_STATUS_DLANE_LP_MASK 0xffffff
+/* Termination enable and settle in 0.0625ns units, lane=0..3 or -1 for clock */
+#define CIO2_REG_CSIRX_DLY_CNT_TERMEN(lane) \
+ (CIO2_REG_CSIRX_BASE + 0x2c + 8 * (lane))
+#define CIO2_REG_CSIRX_DLY_CNT_SETTLE(lane) \
+ (CIO2_REG_CSIRX_BASE + 0x30 + 8 * (lane))
+/* base register: CIO2_REG_PIPE_BASE(pipe) * CIO2_REG_MIPIBE_BASE */
+#define CIO2_REG_MIPIBE_ENABLE (CIO2_REG_MIPIBE_BASE + 0x0)
+#define CIO2_REG_MIPIBE_STATUS (CIO2_REG_MIPIBE_BASE + 0x4)
+#define CIO2_REG_MIPIBE_COMP_FORMAT(vc) \
+ (CIO2_REG_MIPIBE_BASE + 0x8 + 0x4 * (vc))
+#define CIO2_REG_MIPIBE_FORCE_RAW8 (CIO2_REG_MIPIBE_BASE + 0x20)
+#define CIO2_REG_MIPIBE_FORCE_RAW8_ENABLE BIT(0)
+#define CIO2_REG_MIPIBE_FORCE_RAW8_USE_TYPEID BIT(1)
+#define CIO2_REG_MIPIBE_FORCE_RAW8_TYPEID_SHIFT 2U
+
+#define CIO2_REG_MIPIBE_IRQ_STATUS (CIO2_REG_MIPIBE_BASE + 0x24)
+#define CIO2_REG_MIPIBE_IRQ_CLEAR (CIO2_REG_MIPIBE_BASE + 0x28)
+#define CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD (CIO2_REG_MIPIBE_BASE + 0x68)
+#define CIO2_MIPIBE_GLOBAL_LUT_DISREGARD 1U
+#define CIO2_REG_MIPIBE_PKT_STALL_STATUS (CIO2_REG_MIPIBE_BASE + 0x6c)
+#define CIO2_REG_MIPIBE_PARSE_GSP_THROUGH_LP_LUT_REG_IDX \
+ (CIO2_REG_MIPIBE_BASE + 0x70)
+#define CIO2_REG_MIPIBE_SP_LUT_ENTRY(vc) \
+ (CIO2_REG_MIPIBE_BASE + 0x74 + 4 * (vc))
+#define CIO2_REG_MIPIBE_LP_LUT_ENTRY(m) /* m = 0..15 */ \
+ (CIO2_REG_MIPIBE_BASE + 0x84 + 4 * (m))
+#define CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD 1U
+#define CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT 1U
+#define CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT 5U
+#define CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT 7U
+
+/* base register: CIO2_REG_PIPE_BASE(pipe) * CIO2_REG_IRQCTRL_BASE */
+/* IRQ registers are 18-bit wide, see cio2_irq_error for bit definitions */
+#define CIO2_REG_IRQCTRL_EDGE (CIO2_REG_IRQCTRL_BASE + 0x00)
+#define CIO2_REG_IRQCTRL_MASK (CIO2_REG_IRQCTRL_BASE + 0x04)
+#define CIO2_REG_IRQCTRL_STATUS (CIO2_REG_IRQCTRL_BASE + 0x08)
+#define CIO2_REG_IRQCTRL_CLEAR (CIO2_REG_IRQCTRL_BASE + 0x0c)
+#define CIO2_REG_IRQCTRL_ENABLE (CIO2_REG_IRQCTRL_BASE + 0x10)
+#define CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE (CIO2_REG_IRQCTRL_BASE + 0x14)
+
+#define CIO2_REG_GPREG_SRST (CIO2_REG_GPREG_BASE + 0x0)
+#define CIO2_GPREG_SRST_ALL 0xffff /* Reset all */
+#define CIO2_REG_FB_HPLL_FREQ (CIO2_REG_GPREG_BASE + 0x08)
+#define CIO2_REG_ISCLK_RATIO (CIO2_REG_GPREG_BASE + 0xc)
+
+#define CIO2_REG_CGC 0x1400
+#define CIO2_CGC_CSI2_TGE BIT(0)
+#define CIO2_CGC_PRIM_TGE BIT(1)
+#define CIO2_CGC_SIDE_TGE BIT(2)
+#define CIO2_CGC_XOSC_TGE BIT(3)
+#define CIO2_CGC_MPLL_SHUTDOWN_EN BIT(4)
+#define CIO2_CGC_D3I3_TGE BIT(5)
+#define CIO2_CGC_CSI2_INTERFRAME_TGE BIT(6)
+#define CIO2_CGC_CSI2_PORT_DCGE BIT(8)
+#define CIO2_CGC_CSI2_DCGE BIT(9)
+#define CIO2_CGC_SIDE_DCGE BIT(10)
+#define CIO2_CGC_PRIM_DCGE BIT(11)
+#define CIO2_CGC_ROSC_DCGE BIT(12)
+#define CIO2_CGC_XOSC_DCGE BIT(13)
+#define CIO2_CGC_FLIS_DCGE BIT(14)
+#define CIO2_CGC_CLKGATE_HOLDOFF_SHIFT 20U
+#define CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT 24U
+#define CIO2_REG_D0I3C 0x1408
+#define CIO2_D0I3C_I3 BIT(2) /* Set D0I3 */
+#define CIO2_D0I3C_RR BIT(3) /* Restore? */
+#define CIO2_REG_SWRESET 0x140c
+#define CIO2_SWRESET_SWRESET 1U
+#define CIO2_REG_SENSOR_ACTIVE 0x1410
+#define CIO2_REG_INT_STS 0x1414
+#define CIO2_REG_INT_STS_EXT_OE 0x1418
+#define CIO2_INT_EXT_OE_DMAOE_SHIFT 0U
+#define CIO2_INT_EXT_OE_DMAOE_MASK 0x7ffff
+#define CIO2_INT_EXT_OE_OES_SHIFT 24U
+#define CIO2_INT_EXT_OE_OES_MASK (0xf << CIO2_INT_EXT_OE_OES_SHIFT)
+#define CIO2_REG_INT_EN 0x1420
+#define CIO2_REG_INT_EN_IRQ (1 << 24)
+#define CIO2_REG_INT_EN_IOS(dma) (1U << (((dma) >> 1U) + 12U))
+/*
+ * Interrupt on completion bit, Eg. DMA 0-3 maps to bit 0-3,
+ * DMA4 & DMA5 map to bit 4 ... DMA18 & DMA19 map to bit 11 Et cetera
+ */
+#define CIO2_INT_IOC(dma) (1U << ((dma) < 4U ? (dma) : ((dma) >> 1U) + 2U))
+#define CIO2_INT_IOC_SHIFT 0
+#define CIO2_INT_IOC_MASK (0x7ff << CIO2_INT_IOC_SHIFT)
+#define CIO2_INT_IOS_IOLN(dma) (1U << (((dma) >> 1U) + 12U))
+#define CIO2_INT_IOS_IOLN_SHIFT 12
+#define CIO2_INT_IOS_IOLN_MASK (0x3ff << CIO2_INT_IOS_IOLN_SHIFT)
+#define CIO2_INT_IOIE BIT(22)
+#define CIO2_INT_IOOE BIT(23)
+#define CIO2_INT_IOIRQ BIT(24)
+#define CIO2_REG_INT_EN_EXT_OE 0x1424
+#define CIO2_REG_DMA_DBG 0x1448
+#define CIO2_REG_DMA_DBG_DMA_INDEX_SHIFT 0U
+#define CIO2_REG_PBM_ARB_CTRL 0x1460
+#define CIO2_PBM_ARB_CTRL_LANES_DIV 0U /* 4-4-2-2 lanes */
+#define CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT 0U
+#define CIO2_PBM_ARB_CTRL_LE_EN BIT(7)
+#define CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN 2U
+#define CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT 8U
+#define CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP 480U
+#define CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT 16U
+#define CIO2_REG_PBM_WMCTRL1 0x1464
+#define CIO2_PBM_WMCTRL1_MIN_2CK_SHIFT 0U
+#define CIO2_PBM_WMCTRL1_MID1_2CK_SHIFT 8U
+#define CIO2_PBM_WMCTRL1_MID2_2CK_SHIFT 16U
+#define CIO2_PBM_WMCTRL1_TS_COUNT_DISABLE BIT(31)
+#define CIO2_PBM_WMCTRL1_MIN_2CK (4 << CIO2_PBM_WMCTRL1_MIN_2CK_SHIFT)
+#define CIO2_PBM_WMCTRL1_MID1_2CK (16 << CIO2_PBM_WMCTRL1_MID1_2CK_SHIFT)
+#define CIO2_PBM_WMCTRL1_MID2_2CK (21 << CIO2_PBM_WMCTRL1_MID2_2CK_SHIFT)
+#define CIO2_REG_PBM_WMCTRL2 0x1468
+#define CIO2_PBM_WMCTRL2_HWM_2CK 40U
+#define CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT 0U
+#define CIO2_PBM_WMCTRL2_LWM_2CK 22U
+#define CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT 8U
+#define CIO2_PBM_WMCTRL2_OBFFWM_2CK 2U
+#define CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT 16U
+#define CIO2_PBM_WMCTRL2_TRANSDYN 1U
+#define CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT 24U
+#define CIO2_PBM_WMCTRL2_DYNWMEN BIT(28)
+#define CIO2_PBM_WMCTRL2_OBFF_MEM_EN BIT(29)
+#define CIO2_PBM_WMCTRL2_OBFF_CPU_EN BIT(30)
+#define CIO2_PBM_WMCTRL2_DRAINNOW BIT(31)
+#define CIO2_REG_PBM_TS_COUNT 0x146c
+#define CIO2_REG_PBM_FOPN_ABORT 0x1474
+/* below n = 0..3 */
+#define CIO2_PBM_FOPN_ABORT(n) (0x1 << 8U * (n))
+#define CIO2_PBM_FOPN_FORCE_ABORT(n) (0x2 << 8U * (n))
+#define CIO2_PBM_FOPN_FRAMEOPEN(n) (0x8 << 8U * (n))
+#define CIO2_REG_LTRCTRL 0x1480
+#define CIO2_LTRCTRL_LTRDYNEN BIT(16)
+#define CIO2_LTRCTRL_LTRSTABLETIME_SHIFT 8U
+#define CIO2_LTRCTRL_LTRSTABLETIME_MASK 0xff
+#define CIO2_LTRCTRL_LTRSEL1S3 BIT(7)
+#define CIO2_LTRCTRL_LTRSEL1S2 BIT(6)
+#define CIO2_LTRCTRL_LTRSEL1S1 BIT(5)
+#define CIO2_LTRCTRL_LTRSEL1S0 BIT(4)
+#define CIO2_LTRCTRL_LTRSEL2S3 BIT(3)
+#define CIO2_LTRCTRL_LTRSEL2S2 BIT(2)
+#define CIO2_LTRCTRL_LTRSEL2S1 BIT(1)
+#define CIO2_LTRCTRL_LTRSEL2S0 BIT(0)
+#define CIO2_REG_LTRVAL23 0x1484
+#define CIO2_REG_LTRVAL01 0x1488
+#define CIO2_LTRVAL02_VAL_SHIFT 0U
+#define CIO2_LTRVAL02_SCALE_SHIFT 10U
+#define CIO2_LTRVAL13_VAL_SHIFT 16U
+#define CIO2_LTRVAL13_SCALE_SHIFT 26U
+
+#define CIO2_LTRVAL0_VAL 175U
+/* Value times 1024 ns */
+#define CIO2_LTRVAL0_SCALE 2U
+#define CIO2_LTRVAL1_VAL 90U
+#define CIO2_LTRVAL1_SCALE 2U
+#define CIO2_LTRVAL2_VAL 90U
+#define CIO2_LTRVAL2_SCALE 2U
+#define CIO2_LTRVAL3_VAL 90U
+#define CIO2_LTRVAL3_SCALE 2U
+
+#define CIO2_REG_CDMABA(n) (0x1500 + 0x10 * (n)) /* n = 0..19 */
+#define CIO2_REG_CDMARI(n) (0x1504 + 0x10 * (n))
+#define CIO2_CDMARI_FBPT_RP_SHIFT 0U
+#define CIO2_CDMARI_FBPT_RP_MASK 0xff
+#define CIO2_REG_CDMAC0(n) (0x1508 + 0x10 * (n))
+#define CIO2_CDMAC0_FBPT_LEN_SHIFT 0U
+#define CIO2_CDMAC0_FBPT_WIDTH_SHIFT 8U
+#define CIO2_CDMAC0_FBPT_NS BIT(25)
+#define CIO2_CDMAC0_DMA_INTR_ON_FS BIT(26)
+#define CIO2_CDMAC0_DMA_INTR_ON_FE BIT(27)
+#define CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL BIT(28)
+#define CIO2_CDMAC0_FBPT_FIFO_FULL_FIX_DIS BIT(29)
+#define CIO2_CDMAC0_DMA_EN BIT(30)
+#define CIO2_CDMAC0_DMA_HALTED BIT(31)
+#define CIO2_REG_CDMAC1(n) (0x150c + 0x10 * (n))
+#define CIO2_CDMAC1_LINENUMINT_SHIFT 0U
+#define CIO2_CDMAC1_LINENUMUPDATE_SHIFT 16U
+/* n = 0..3 */
+#define CIO2_REG_PXM_PXF_FMT_CFG0(n) (0x1700 + 0x30 * (n))
+#define CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT 0U
+#define CIO2_PXM_PXF_FMT_CFG_SID1_SHIFT 16U
+#define CIO2_PXM_PXF_FMT_CFG_PCK_64B (0 << 0)
+#define CIO2_PXM_PXF_FMT_CFG_PCK_32B (1 << 0)
+#define CIO2_PXM_PXF_FMT_CFG_BPP_08 (0 << 2)
+#define CIO2_PXM_PXF_FMT_CFG_BPP_10 (1 << 2)
+#define CIO2_PXM_PXF_FMT_CFG_BPP_12 (2 << 2)
+#define CIO2_PXM_PXF_FMT_CFG_BPP_14 (3 << 2)
+#define CIO2_PXM_PXF_FMT_CFG_SPEC_4PPC (0 << 4)
+#define CIO2_PXM_PXF_FMT_CFG_SPEC_3PPC_RGBA (1 << 4)
+#define CIO2_PXM_PXF_FMT_CFG_SPEC_3PPC_ARGB (2 << 4)
+#define CIO2_PXM_PXF_FMT_CFG_SPEC_PLANAR2 (3 << 4)
+#define CIO2_PXM_PXF_FMT_CFG_SPEC_PLANAR3 (4 << 4)
+#define CIO2_PXM_PXF_FMT_CFG_SPEC_NV16 (5 << 4)
+#define CIO2_PXM_PXF_FMT_CFG_PSWAP4_1ST_AB (1 << 7)
+#define CIO2_PXM_PXF_FMT_CFG_PSWAP4_1ST_CD (1 << 8)
+#define CIO2_PXM_PXF_FMT_CFG_PSWAP4_2ND_AC (1 << 9)
+#define CIO2_PXM_PXF_FMT_CFG_PSWAP4_2ND_BD (1 << 10)
+#define CIO2_REG_INT_STS_EXT_IE 0x17e4
+#define CIO2_REG_INT_EN_EXT_IE 0x17e8
+#define CIO2_INT_EXT_IE_ECC_RE(n) (0x01 << (8U * (n)))
+#define CIO2_INT_EXT_IE_DPHY_NR(n) (0x02 << (8U * (n)))
+#define CIO2_INT_EXT_IE_ECC_NR(n) (0x04 << (8U * (n)))
+#define CIO2_INT_EXT_IE_CRCERR(n) (0x08 << (8U * (n)))
+#define CIO2_INT_EXT_IE_INTERFRAMEDATA(n) (0x10 << (8U * (n)))
+#define CIO2_INT_EXT_IE_PKT2SHORT(n) (0x20 << (8U * (n)))
+#define CIO2_INT_EXT_IE_PKT2LONG(n) (0x40 << (8U * (n)))
+#define CIO2_INT_EXT_IE_IRQ(n) (0x80 << (8U * (n)))
+#define CIO2_REG_PXM_FRF_CFG(n) (0x1720 + 0x30 * (n))
+#define CIO2_PXM_FRF_CFG_FNSEL BIT(0)
+#define CIO2_PXM_FRF_CFG_FN_RST BIT(1)
+#define CIO2_PXM_FRF_CFG_ABORT BIT(2)
+#define CIO2_PXM_FRF_CFG_CRC_TH_SHIFT 3U
+#define CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR BIT(8)
+#define CIO2_PXM_FRF_CFG_MSK_ECC_RE BIT(9)
+#define CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE BIT(10)
+#define CIO2_PXM_FRF_CFG_EVEN_ODD_MODE_SHIFT 11U
+#define CIO2_PXM_FRF_CFG_MASK_CRC_THRES BIT(13)
+#define CIO2_PXM_FRF_CFG_MASK_CSI_ACCEPT BIT(14)
+#define CIO2_PXM_FRF_CFG_CIOHC_FS_MODE BIT(15)
+#define CIO2_PXM_FRF_CFG_CIOHC_FRST_FRM_SHIFT 16U
+#define CIO2_REG_PXM_SID2BID0(n) (0x1724 + 0x30 * (n))
+#define CIO2_FB_HPLL_FREQ 0x2
+#define CIO2_ISCLK_RATIO 0xc
+
+#define CIO2_IRQCTRL_MASK 0x3ffff
+
+#define CIO2_INT_EN_EXT_OE_MASK 0x8f0fffff
+
+#define CIO2_CGC_CLKGATE_HOLDOFF 3U
+#define CIO2_CGC_CSI_CLKGATE_HOLDOFF 5U
+
+#define CIO2_PXM_FRF_CFG_CRC_TH 16
+
+#define CIO2_INT_EN_EXT_IE_MASK 0xffffffff
+
+#define CIO2_DMA_CHAN 0U
+
+#define CIO2_CSIRX_DLY_CNT_CLANE_IDX -1
+
+#define CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A 0
+#define CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B 0
+#define CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A 95
+#define CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B -8
+
+#define CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A 0
+#define CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B 0
+#define CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A 85
+#define CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B -2
+
+#define CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT 0x4
+#define CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT 0x570
+
+#define CIO2_PMCSR_OFFSET 4U
+#define CIO2_PMCSR_D0D3_SHIFT 2U
+#define CIO2_PMCSR_D3 0x3
+
+struct cio2_csi2_timing {
+ s32 clk_termen;
+ s32 clk_settle;
+ s32 dat_termen;
+ s32 dat_settle;
+};
+
+struct cio2_buffer {
+ struct vb2_v4l2_buffer vbb;
+ u32 *lop[CIO2_MAX_LOPS];
+ dma_addr_t lop_bus_addr[CIO2_MAX_LOPS];
+ unsigned int offset;
+};
+
+#define to_cio2_buffer(vb) container_of(vb, struct cio2_buffer, vbb.vb2_buf)
+
+struct csi2_bus_info {
+ u32 port;
+ u32 lanes;
+};
+
+struct cio2_queue {
+ /* mutex to be used by vb2_queue */
+ struct mutex lock;
+ struct media_pipeline pipe;
+ struct csi2_bus_info csi2;
+ struct v4l2_subdev *sensor;
+ void __iomem *csi_rx_base;
+
+ /* Subdev, /dev/v4l-subdevX */
+ struct v4l2_subdev subdev;
+ struct mutex subdev_lock; /* Serialise acces to subdev_fmt field */
+ struct media_pad subdev_pads[CIO2_PADS];
+ struct v4l2_mbus_framefmt subdev_fmt;
+ atomic_t frame_sequence;
+
+ /* Video device, /dev/videoX */
+ struct video_device vdev;
+ struct media_pad vdev_pad;
+ struct v4l2_pix_format_mplane format;
+ struct vb2_queue vbq;
+
+ /* Buffer queue handling */
+ struct cio2_fbpt_entry *fbpt; /* Frame buffer pointer table */
+ dma_addr_t fbpt_bus_addr;
+ struct cio2_buffer *bufs[CIO2_MAX_BUFFERS];
+ unsigned int bufs_first; /* Index of the first used entry */
+ unsigned int bufs_next; /* Index of the first unused entry */
+ atomic_t bufs_queued;
+};
+
+struct cio2_device {
+ struct pci_dev *pci_dev;
+ void __iomem *base;
+ struct v4l2_device v4l2_dev;
+ struct cio2_queue queue[CIO2_QUEUES];
+ struct cio2_queue *cur_queue;
+ /* mutex to be used by video_device */
+ struct mutex lock;
+
+ bool streaming;
+ struct v4l2_async_notifier notifier;
+ struct media_device media_dev;
+
+ /*
+ * Safety net to catch DMA fetch ahead
+ * when reaching the end of LOP
+ */
+ void *dummy_page;
+ /* DMA handle of dummy_page */
+ dma_addr_t dummy_page_bus_addr;
+ /* single List of Pointers (LOP) page */
+ u32 *dummy_lop;
+ /* DMA handle of dummy_lop */
+ dma_addr_t dummy_lop_bus_addr;
+};
+
+#define to_cio2_device(n) container_of(n, struct cio2_device, notifier)
+
+/**************** Virtual channel ****************/
+/*
+ * This should come from sensor driver. No
+ * driver interface nor requirement yet.
+ */
+#define SENSOR_VIR_CH_DFLT 0
+
+/**************** FBPT operations ****************/
+#define CIO2_FBPT_SIZE (CIO2_MAX_BUFFERS * CIO2_MAX_LOPS * \
+ sizeof(struct cio2_fbpt_entry))
+
+#define CIO2_FBPT_SUBENTRY_UNIT 4
+
+/* cio2 fbpt first_entry ctrl status */
+#define CIO2_FBPT_CTRL_VALID BIT(0)
+#define CIO2_FBPT_CTRL_IOC BIT(1)
+#define CIO2_FBPT_CTRL_IOS BIT(2)
+#define CIO2_FBPT_CTRL_SUCCXFAIL BIT(3)
+#define CIO2_FBPT_CTRL_CMPLCODE_SHIFT 4
+
+/*
+ * Frame Buffer Pointer Table(FBPT) entry
+ * each entry describe an output buffer and consists of
+ * several sub-entries
+ */
+struct __packed cio2_fbpt_entry {
+ union {
+ struct __packed {
+ u32 ctrl; /* status ctrl */
+ u16 cur_line_num; /* current line # written to DDR */
+ u16 frame_num; /* updated by DMA upon FE */
+ u32 first_page_offset; /* offset for 1st page in LOP */
+ } first_entry;
+ /* Second entry per buffer */
+ struct __packed {
+ u32 timestamp;
+ u32 num_of_bytes;
+ /* the number of bytes for write on last page */
+ u16 last_page_available_bytes;
+ /* the number of pages allocated for this buf */
+ u16 num_of_pages;
+ } second_entry;
+ };
+ u32 lop_page_addr; /* Points to list of pointers (LOP) table */
+};
+
+static inline struct cio2_queue *file_to_cio2_queue(struct file *file)
+{
+ return container_of(video_devdata(file), struct cio2_queue, vdev);
+}
+
+static inline struct cio2_queue *vb2q_to_cio2_queue(struct vb2_queue *vq)
+{
+ return container_of(vq, struct cio2_queue, vbq);
+}
+
+#endif
diff --git a/drivers/media/pci/intel/ivsc/Kconfig b/drivers/media/pci/intel/ivsc/Kconfig
new file mode 100644
index 0000000000..a8cb981544
--- /dev/null
+++ b/drivers/media/pci/intel/ivsc/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2023, Intel Corporation. All rights reserved.
+
+config INTEL_VSC
+ tristate "Intel Visual Sensing Controller"
+ depends on INTEL_MEI && ACPI && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This adds support for Intel Visual Sensing Controller (IVSC).
+
+ Enables the IVSC firmware services required for controlling
+ camera sensor ownership and CSI-2 link through Image Processing
+ Unit(IPU) driver of Intel.
diff --git a/drivers/media/pci/intel/ivsc/Makefile b/drivers/media/pci/intel/ivsc/Makefile
new file mode 100644
index 0000000000..00fad29a6e
--- /dev/null
+++ b/drivers/media/pci/intel/ivsc/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2023, Intel Corporation. All rights reserved.
+
+obj-$(CONFIG_INTEL_VSC) += ivsc-csi.o
+ivsc-csi-y += mei_csi.o
+
+obj-$(CONFIG_INTEL_VSC) += ivsc-ace.o
+ivsc-ace-y += mei_ace.o
diff --git a/drivers/media/pci/intel/ivsc/mei_ace.c b/drivers/media/pci/intel/ivsc/mei_ace.c
new file mode 100644
index 0000000000..a0491f3078
--- /dev/null
+++ b/drivers/media/pci/intel/ivsc/mei_ace.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Intel Corporation. All rights reserved.
+ * Intel Visual Sensing Controller ACE Linux driver
+ */
+
+/*
+ * To set ownership of camera sensor, there is specific command, which
+ * is sent via MEI protocol. That's a two-step scheme where the firmware
+ * first acks receipt of the command and later responses the command was
+ * executed. The command sending function uses "completion" as the
+ * synchronization mechanism. The notification for command is received
+ * via a mei callback which wakes up the caller. There can be only one
+ * outstanding command at a time.
+ *
+ * The power line of camera sensor is directly connected to IVSC instead
+ * of host, when camera sensor ownership is switched to host, sensor is
+ * already powered up by firmware.
+ */
+
+#include <linux/acpi.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/workqueue.h>
+
+#define MEI_ACE_DRIVER_NAME "ivsc_ace"
+
+/* indicating driver message */
+#define ACE_DRV_MSG 1
+/* indicating set command */
+#define ACE_CMD_SET 4
+/* command timeout determined experimentally */
+#define ACE_CMD_TIMEOUT (5 * HZ)
+/* indicating the first command block */
+#define ACE_CMD_INIT_BLOCK 1
+/* indicating the last command block */
+#define ACE_CMD_FINAL_BLOCK 1
+/* size of camera status notification content */
+#define ACE_CAMERA_STATUS_SIZE 5
+
+/* UUID used to get firmware id */
+#define ACE_GET_FW_ID_UUID UUID_LE(0x6167DCFB, 0x72F1, 0x4584, 0xBF, \
+ 0xE3, 0x84, 0x17, 0x71, 0xAA, 0x79, 0x0B)
+
+/* UUID used to get csi device */
+#define MEI_CSI_UUID UUID_LE(0x92335FCF, 0x3203, 0x4472, \
+ 0xAF, 0x93, 0x7b, 0x44, 0x53, 0xAC, 0x29, 0xDA)
+
+/* identify firmware event type */
+enum ace_event_type {
+ /* firmware ready */
+ ACE_FW_READY = 0x8,
+
+ /* command response */
+ ACE_CMD_RESPONSE = 0x10,
+};
+
+/* identify camera sensor ownership */
+enum ace_camera_owner {
+ ACE_CAMERA_IVSC,
+ ACE_CAMERA_HOST,
+};
+
+/* identify the command id supported by firmware IPC */
+enum ace_cmd_id {
+ /* used to switch camera sensor to host */
+ ACE_SWITCH_CAMERA_TO_HOST = 0x13,
+
+ /* used to switch camera sensor to IVSC */
+ ACE_SWITCH_CAMERA_TO_IVSC = 0x14,
+
+ /* used to get firmware id */
+ ACE_GET_FW_ID = 0x1A,
+};
+
+/* ACE command header structure */
+struct ace_cmd_hdr {
+ u32 firmware_id : 16;
+ u32 instance_id : 8;
+ u32 type : 5;
+ u32 rsp : 1;
+ u32 msg_tgt : 1;
+ u32 _hw_rsvd_1 : 1;
+ u32 param_size : 20;
+ u32 cmd_id : 8;
+ u32 final_block : 1;
+ u32 init_block : 1;
+ u32 _hw_rsvd_2 : 2;
+} __packed;
+
+/* ACE command parameter structure */
+union ace_cmd_param {
+ uuid_le uuid;
+ u32 param;
+};
+
+/* ACE command structure */
+struct ace_cmd {
+ struct ace_cmd_hdr hdr;
+ union ace_cmd_param param;
+} __packed;
+
+/* ACE notification header */
+union ace_notif_hdr {
+ struct _confirm {
+ u32 status : 24;
+ u32 type : 5;
+ u32 rsp : 1;
+ u32 msg_tgt : 1;
+ u32 _hw_rsvd_1 : 1;
+ u32 param_size : 20;
+ u32 cmd_id : 8;
+ u32 final_block : 1;
+ u32 init_block : 1;
+ u32 _hw_rsvd_2 : 2;
+ } __packed ack;
+
+ struct _event {
+ u32 rsvd1 : 16;
+ u32 event_type : 8;
+ u32 type : 5;
+ u32 ack : 1;
+ u32 msg_tgt : 1;
+ u32 _hw_rsvd_1 : 1;
+ u32 rsvd2 : 30;
+ u32 _hw_rsvd_2 : 2;
+ } __packed event;
+
+ struct _response {
+ u32 event_id : 16;
+ u32 notif_type : 8;
+ u32 type : 5;
+ u32 rsp : 1;
+ u32 msg_tgt : 1;
+ u32 _hw_rsvd_1 : 1;
+ u32 event_data_size : 16;
+ u32 request_target : 1;
+ u32 request_type : 5;
+ u32 cmd_id : 8;
+ u32 _hw_rsvd_2 : 2;
+ } __packed response;
+};
+
+/* ACE notification content */
+union ace_notif_cont {
+ u16 firmware_id;
+ u8 state_notif;
+ u8 camera_status[ACE_CAMERA_STATUS_SIZE];
+};
+
+/* ACE notification structure */
+struct ace_notif {
+ union ace_notif_hdr hdr;
+ union ace_notif_cont cont;
+} __packed;
+
+struct mei_ace {
+ struct mei_cl_device *cldev;
+
+ /* command ack */
+ struct ace_notif cmd_ack;
+ /* command response */
+ struct ace_notif cmd_response;
+ /* used to wait for command ack and response */
+ struct completion cmd_completion;
+ /* lock used to prevent multiple call to send command */
+ struct mutex lock;
+
+ /* used to construct command */
+ u16 firmware_id;
+
+ struct device *csi_dev;
+
+ /* runtime PM link from ace to csi */
+ struct device_link *csi_link;
+
+ struct work_struct work;
+};
+
+static inline void init_cmd_hdr(struct ace_cmd_hdr *hdr)
+{
+ memset(hdr, 0, sizeof(struct ace_cmd_hdr));
+
+ hdr->type = ACE_CMD_SET;
+ hdr->msg_tgt = ACE_DRV_MSG;
+ hdr->init_block = ACE_CMD_INIT_BLOCK;
+ hdr->final_block = ACE_CMD_FINAL_BLOCK;
+}
+
+static int construct_command(struct mei_ace *ace, struct ace_cmd *cmd,
+ enum ace_cmd_id cmd_id)
+{
+ union ace_cmd_param *param = &cmd->param;
+ struct ace_cmd_hdr *hdr = &cmd->hdr;
+
+ init_cmd_hdr(hdr);
+
+ hdr->cmd_id = cmd_id;
+ switch (cmd_id) {
+ case ACE_GET_FW_ID:
+ param->uuid = ACE_GET_FW_ID_UUID;
+ hdr->param_size = sizeof(param->uuid);
+ break;
+ case ACE_SWITCH_CAMERA_TO_IVSC:
+ param->param = 0;
+ hdr->firmware_id = ace->firmware_id;
+ hdr->param_size = sizeof(param->param);
+ break;
+ case ACE_SWITCH_CAMERA_TO_HOST:
+ hdr->firmware_id = ace->firmware_id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return hdr->param_size + sizeof(cmd->hdr);
+}
+
+/* send command to firmware */
+static int mei_ace_send(struct mei_ace *ace, struct ace_cmd *cmd,
+ size_t len, bool only_ack)
+{
+ union ace_notif_hdr *resp_hdr = &ace->cmd_response.hdr;
+ union ace_notif_hdr *ack_hdr = &ace->cmd_ack.hdr;
+ struct ace_cmd_hdr *cmd_hdr = &cmd->hdr;
+ int ret;
+
+ mutex_lock(&ace->lock);
+
+ reinit_completion(&ace->cmd_completion);
+
+ ret = mei_cldev_send(ace->cldev, (u8 *)cmd, len);
+ if (ret < 0)
+ goto out;
+
+ ret = wait_for_completion_killable_timeout(&ace->cmd_completion,
+ ACE_CMD_TIMEOUT);
+ if (ret < 0) {
+ goto out;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (ack_hdr->ack.cmd_id != cmd_hdr->cmd_id) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* command ack status */
+ ret = ack_hdr->ack.status;
+ if (ret) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (only_ack)
+ goto out;
+
+ ret = wait_for_completion_killable_timeout(&ace->cmd_completion,
+ ACE_CMD_TIMEOUT);
+ if (ret < 0) {
+ goto out;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ goto out;
+ } else {
+ ret = 0;
+ }
+
+ if (resp_hdr->response.cmd_id != cmd_hdr->cmd_id)
+ ret = -EINVAL;
+
+out:
+ mutex_unlock(&ace->lock);
+
+ return ret;
+}
+
+static int ace_set_camera_owner(struct mei_ace *ace,
+ enum ace_camera_owner owner)
+{
+ enum ace_cmd_id cmd_id;
+ struct ace_cmd cmd;
+ int cmd_size;
+ int ret;
+
+ if (owner == ACE_CAMERA_IVSC)
+ cmd_id = ACE_SWITCH_CAMERA_TO_IVSC;
+ else
+ cmd_id = ACE_SWITCH_CAMERA_TO_HOST;
+
+ cmd_size = construct_command(ace, &cmd, cmd_id);
+ if (cmd_size >= 0)
+ ret = mei_ace_send(ace, &cmd, cmd_size, false);
+ else
+ ret = cmd_size;
+
+ return ret;
+}
+
+/* the first command downloaded to firmware */
+static inline int ace_get_firmware_id(struct mei_ace *ace)
+{
+ struct ace_cmd cmd;
+ int cmd_size;
+ int ret;
+
+ cmd_size = construct_command(ace, &cmd, ACE_GET_FW_ID);
+ if (cmd_size >= 0)
+ ret = mei_ace_send(ace, &cmd, cmd_size, true);
+ else
+ ret = cmd_size;
+
+ return ret;
+}
+
+static void handle_command_response(struct mei_ace *ace,
+ struct ace_notif *resp, int len)
+{
+ union ace_notif_hdr *hdr = &resp->hdr;
+
+ switch (hdr->response.cmd_id) {
+ case ACE_SWITCH_CAMERA_TO_IVSC:
+ case ACE_SWITCH_CAMERA_TO_HOST:
+ memcpy(&ace->cmd_response, resp, len);
+ complete(&ace->cmd_completion);
+ break;
+ case ACE_GET_FW_ID:
+ break;
+ default:
+ break;
+ }
+}
+
+static void handle_command_ack(struct mei_ace *ace,
+ struct ace_notif *ack, int len)
+{
+ union ace_notif_hdr *hdr = &ack->hdr;
+
+ switch (hdr->ack.cmd_id) {
+ case ACE_GET_FW_ID:
+ ace->firmware_id = ack->cont.firmware_id;
+ fallthrough;
+ case ACE_SWITCH_CAMERA_TO_IVSC:
+ case ACE_SWITCH_CAMERA_TO_HOST:
+ memcpy(&ace->cmd_ack, ack, len);
+ complete(&ace->cmd_completion);
+ break;
+ default:
+ break;
+ }
+}
+
+/* callback for receive */
+static void mei_ace_rx(struct mei_cl_device *cldev)
+{
+ struct mei_ace *ace = mei_cldev_get_drvdata(cldev);
+ struct ace_notif event;
+ union ace_notif_hdr *hdr = &event.hdr;
+ int ret;
+
+ ret = mei_cldev_recv(cldev, (u8 *)&event, sizeof(event));
+ if (ret < 0) {
+ dev_err(&cldev->dev, "recv error: %d\n", ret);
+ return;
+ }
+
+ if (hdr->event.ack) {
+ handle_command_ack(ace, &event, ret);
+ return;
+ }
+
+ switch (hdr->event.event_type) {
+ case ACE_CMD_RESPONSE:
+ handle_command_response(ace, &event, ret);
+ break;
+ case ACE_FW_READY:
+ /*
+ * firmware ready notification sent to driver
+ * after HECI client connected with firmware.
+ */
+ dev_dbg(&cldev->dev, "firmware ready\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static int mei_ace_setup_dev_link(struct mei_ace *ace)
+{
+ struct device *dev = &ace->cldev->dev;
+ uuid_le uuid = MEI_CSI_UUID;
+ struct device *csi_dev;
+ char name[64];
+ int ret;
+
+ snprintf(name, sizeof(name), "%s-%pUl", dev_name(dev->parent), &uuid);
+
+ csi_dev = device_find_child_by_name(dev->parent, name);
+ if (!csi_dev) {
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
+
+ /* setup link between mei_ace and mei_csi */
+ ace->csi_link = device_link_add(csi_dev, dev, DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE | DL_FLAG_STATELESS);
+ if (!ace->csi_link) {
+ ret = -EINVAL;
+ dev_err(dev, "failed to link to %s\n", dev_name(csi_dev));
+ goto err_put;
+ }
+
+ ace->csi_dev = csi_dev;
+
+ return 0;
+
+err_put:
+ put_device(csi_dev);
+
+err:
+ return ret;
+}
+
+/* switch camera to host before probe sensor device */
+static void mei_ace_post_probe_work(struct work_struct *work)
+{
+ struct acpi_device *adev;
+ struct mei_ace *ace;
+ struct device *dev;
+ int ret;
+
+ ace = container_of(work, struct mei_ace, work);
+ dev = &ace->cldev->dev;
+
+ ret = ace_set_camera_owner(ace, ACE_CAMERA_HOST);
+ if (ret) {
+ dev_err(dev, "switch camera to host failed: %d\n", ret);
+ return;
+ }
+
+ adev = ACPI_COMPANION(dev->parent);
+ if (!adev)
+ return;
+
+ acpi_dev_clear_dependencies(adev);
+}
+
+static int mei_ace_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct device *dev = &cldev->dev;
+ struct mei_ace *ace;
+ int ret;
+
+ ace = devm_kzalloc(dev, sizeof(struct mei_ace), GFP_KERNEL);
+ if (!ace)
+ return -ENOMEM;
+
+ ace->cldev = cldev;
+ mutex_init(&ace->lock);
+ init_completion(&ace->cmd_completion);
+ INIT_WORK(&ace->work, mei_ace_post_probe_work);
+
+ mei_cldev_set_drvdata(cldev, ace);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(dev, "mei_cldev_enable failed: %d\n", ret);
+ goto destroy_mutex;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, mei_ace_rx);
+ if (ret) {
+ dev_err(dev, "event cb registration failed: %d\n", ret);
+ goto err_disable;
+ }
+
+ ret = ace_get_firmware_id(ace);
+ if (ret) {
+ dev_err(dev, "get firmware id failed: %d\n", ret);
+ goto err_disable;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = mei_ace_setup_dev_link(ace);
+ if (ret)
+ goto disable_pm;
+
+ schedule_work(&ace->work);
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+destroy_mutex:
+ mutex_destroy(&ace->lock);
+
+ return ret;
+}
+
+static void mei_ace_remove(struct mei_cl_device *cldev)
+{
+ struct mei_ace *ace = mei_cldev_get_drvdata(cldev);
+
+ cancel_work_sync(&ace->work);
+
+ device_link_del(ace->csi_link);
+ put_device(ace->csi_dev);
+
+ pm_runtime_disable(&cldev->dev);
+ pm_runtime_set_suspended(&cldev->dev);
+
+ ace_set_camera_owner(ace, ACE_CAMERA_IVSC);
+
+ mutex_destroy(&ace->lock);
+}
+
+static int __maybe_unused mei_ace_runtime_suspend(struct device *dev)
+{
+ struct mei_ace *ace = dev_get_drvdata(dev);
+
+ return ace_set_camera_owner(ace, ACE_CAMERA_IVSC);
+}
+
+static int __maybe_unused mei_ace_runtime_resume(struct device *dev)
+{
+ struct mei_ace *ace = dev_get_drvdata(dev);
+
+ return ace_set_camera_owner(ace, ACE_CAMERA_HOST);
+}
+
+static const struct dev_pm_ops mei_ace_pm_ops = {
+ SET_RUNTIME_PM_OPS(mei_ace_runtime_suspend,
+ mei_ace_runtime_resume, NULL)
+};
+
+#define MEI_ACE_UUID UUID_LE(0x5DB76CF6, 0x0A68, 0x4ED6, \
+ 0x9B, 0x78, 0x03, 0x61, 0x63, 0x5E, 0x24, 0x47)
+
+static const struct mei_cl_device_id mei_ace_tbl[] = {
+ { MEI_ACE_DRIVER_NAME, MEI_ACE_UUID, MEI_CL_VERSION_ANY },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(mei, mei_ace_tbl);
+
+static struct mei_cl_driver mei_ace_driver = {
+ .id_table = mei_ace_tbl,
+ .name = MEI_ACE_DRIVER_NAME,
+
+ .probe = mei_ace_probe,
+ .remove = mei_ace_remove,
+
+ .driver = {
+ .pm = &mei_ace_pm_ops,
+ },
+};
+
+module_mei_cl_driver(mei_ace_driver);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_DESCRIPTION("Device driver for IVSC ACE");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
new file mode 100644
index 0000000000..00ba611e0f
--- /dev/null
+++ b/drivers/media/pci/intel/ivsc/mei_csi.c
@@ -0,0 +1,825 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Intel Corporation. All rights reserved.
+ * Intel Visual Sensing Controller CSI Linux driver
+ */
+
+/*
+ * To set ownership of CSI-2 link and to configure CSI-2 link, there
+ * are specific commands, which are sent via MEI protocol. The send
+ * command function uses "completion" as a synchronization mechanism.
+ * The response for command is received via a mei callback which wakes
+ * up the caller. There can be only one outstanding command at a time.
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/units.h>
+#include <linux/uuid.h>
+#include <linux/workqueue.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define MEI_CSI_DRIVER_NAME "ivsc_csi"
+#define MEI_CSI_ENTITY_NAME "Intel IVSC CSI"
+
+#define MEI_CSI_LINK_FREQ_400MHZ 400000000ULL
+
+/* the 5s used here is based on experiment */
+#define CSI_CMD_TIMEOUT (5 * HZ)
+/* to setup CSI-2 link an extra delay needed and determined experimentally */
+#define CSI_FW_READY_DELAY_MS 100
+/* link frequency unit is 100kHz */
+#define CSI_LINK_FREQ(x) ((u32)(div_u64(x, 100 * HZ_PER_KHZ)))
+
+/*
+ * identify the command id supported by firmware
+ * IPC, as well as the privacy notification id
+ * used when processing privacy event.
+ */
+enum csi_cmd_id {
+ /* used to set csi ownership */
+ CSI_SET_OWNER = 0,
+
+ /* used to configure CSI-2 link */
+ CSI_SET_CONF = 2,
+
+ /* privacy notification id used when privacy state changes */
+ CSI_PRIVACY_NOTIF = 6,
+};
+
+/* CSI-2 link ownership definition */
+enum csi_link_owner {
+ CSI_LINK_IVSC,
+ CSI_LINK_HOST,
+};
+
+/* privacy status definition */
+enum ivsc_privacy_status {
+ CSI_PRIVACY_OFF,
+ CSI_PRIVACY_ON,
+ CSI_PRIVACY_MAX,
+};
+
+enum csi_pads {
+ CSI_PAD_SOURCE,
+ CSI_PAD_SINK,
+ CSI_NUM_PADS
+};
+
+/* configuration of the CSI-2 link between host and IVSC */
+struct csi_link_cfg {
+ /* number of data lanes used on the CSI-2 link */
+ u32 nr_of_lanes;
+
+ /* frequency of the CSI-2 link */
+ u32 link_freq;
+
+ /* for future use */
+ u32 rsvd[2];
+} __packed;
+
+/* CSI command structure */
+struct csi_cmd {
+ u32 cmd_id;
+ union _cmd_param {
+ u32 param;
+ struct csi_link_cfg conf;
+ } param;
+} __packed;
+
+/* CSI notification structure */
+struct csi_notif {
+ u32 cmd_id;
+ int status;
+ union _resp_cont {
+ u32 cont;
+ struct csi_link_cfg conf;
+ } cont;
+} __packed;
+
+struct mei_csi {
+ struct mei_cl_device *cldev;
+
+ /* command response */
+ struct csi_notif cmd_response;
+ /* used to wait for command response from firmware */
+ struct completion cmd_completion;
+ /* protect command download */
+ struct mutex lock;
+
+ struct v4l2_subdev subdev;
+ struct v4l2_subdev *remote;
+ struct v4l2_async_notifier notifier;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *freq_ctrl;
+ struct v4l2_ctrl *privacy_ctrl;
+ unsigned int remote_pad;
+ /* start streaming or not */
+ int streaming;
+
+ struct media_pad pads[CSI_NUM_PADS];
+ struct v4l2_mbus_framefmt format_mbus[CSI_NUM_PADS];
+
+ /* number of data lanes used on the CSI-2 link */
+ u32 nr_of_lanes;
+ /* frequency of the CSI-2 link */
+ u64 link_freq;
+
+ /* privacy status */
+ enum ivsc_privacy_status status;
+};
+
+static const struct v4l2_mbus_framefmt mei_csi_format_mbus_default = {
+ .width = 1,
+ .height = 1,
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .field = V4L2_FIELD_NONE,
+};
+
+static s64 link_freq_menu_items[] = {
+ MEI_CSI_LINK_FREQ_400MHZ
+};
+
+static inline struct mei_csi *notifier_to_csi(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct mei_csi, notifier);
+}
+
+static inline struct mei_csi *sd_to_csi(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct mei_csi, subdev);
+}
+
+static inline struct mei_csi *ctrl_to_csi(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mei_csi, ctrl_handler);
+}
+
+/* send a command to firmware and mutex must be held by caller */
+static int mei_csi_send(struct mei_csi *csi, u8 *buf, size_t len)
+{
+ struct csi_cmd *cmd = (struct csi_cmd *)buf;
+ int ret;
+
+ reinit_completion(&csi->cmd_completion);
+
+ ret = mei_cldev_send(csi->cldev, buf, len);
+ if (ret < 0)
+ goto out;
+
+ ret = wait_for_completion_killable_timeout(&csi->cmd_completion,
+ CSI_CMD_TIMEOUT);
+ if (ret < 0) {
+ goto out;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* command response status */
+ ret = csi->cmd_response.status;
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (csi->cmd_response.cmd_id != cmd->cmd_id)
+ ret = -EINVAL;
+
+out:
+ return ret;
+}
+
+/* set CSI-2 link ownership */
+static int csi_set_link_owner(struct mei_csi *csi, enum csi_link_owner owner)
+{
+ struct csi_cmd cmd = { 0 };
+ size_t cmd_size;
+ int ret;
+
+ cmd.cmd_id = CSI_SET_OWNER;
+ cmd.param.param = owner;
+ cmd_size = sizeof(cmd.cmd_id) + sizeof(cmd.param.param);
+
+ mutex_lock(&csi->lock);
+
+ ret = mei_csi_send(csi, (u8 *)&cmd, cmd_size);
+
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+/* configure CSI-2 link between host and IVSC */
+static int csi_set_link_cfg(struct mei_csi *csi)
+{
+ struct csi_cmd cmd = { 0 };
+ size_t cmd_size;
+ int ret;
+
+ cmd.cmd_id = CSI_SET_CONF;
+ cmd.param.conf.nr_of_lanes = csi->nr_of_lanes;
+ cmd.param.conf.link_freq = CSI_LINK_FREQ(csi->link_freq);
+ cmd_size = sizeof(cmd.cmd_id) + sizeof(cmd.param.conf);
+
+ mutex_lock(&csi->lock);
+
+ ret = mei_csi_send(csi, (u8 *)&cmd, cmd_size);
+ /*
+ * wait configuration ready if download success. placing
+ * delay under mutex is to make sure current command flow
+ * completed before starting a possible new one.
+ */
+ if (!ret)
+ msleep(CSI_FW_READY_DELAY_MS);
+
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+/* callback for receive */
+static void mei_csi_rx(struct mei_cl_device *cldev)
+{
+ struct mei_csi *csi = mei_cldev_get_drvdata(cldev);
+ struct csi_notif notif = { 0 };
+ int ret;
+
+ ret = mei_cldev_recv(cldev, (u8 *)&notif, sizeof(notif));
+ if (ret < 0) {
+ dev_err(&cldev->dev, "recv error: %d\n", ret);
+ return;
+ }
+
+ switch (notif.cmd_id) {
+ case CSI_PRIVACY_NOTIF:
+ if (notif.cont.cont < CSI_PRIVACY_MAX) {
+ csi->status = notif.cont.cont;
+ v4l2_ctrl_s_ctrl(csi->privacy_ctrl, csi->status);
+ }
+ break;
+ case CSI_SET_OWNER:
+ case CSI_SET_CONF:
+ memcpy(&csi->cmd_response, &notif, ret);
+
+ complete(&csi->cmd_completion);
+ break;
+ default:
+ break;
+ }
+}
+
+static int mei_csi_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct mei_csi *csi = sd_to_csi(sd);
+ s64 freq;
+ int ret;
+
+ if (enable && csi->streaming == 0) {
+ freq = v4l2_get_link_freq(csi->remote->ctrl_handler, 0, 0);
+ if (freq < 0) {
+ dev_err(&csi->cldev->dev,
+ "error %lld, invalid link_freq\n", freq);
+ ret = freq;
+ goto err;
+ }
+ csi->link_freq = freq;
+
+ /* switch CSI-2 link to host */
+ ret = csi_set_link_owner(csi, CSI_LINK_HOST);
+ if (ret < 0)
+ goto err;
+
+ /* configure CSI-2 link */
+ ret = csi_set_link_cfg(csi);
+ if (ret < 0)
+ goto err_switch;
+
+ ret = v4l2_subdev_call(csi->remote, video, s_stream, 1);
+ if (ret)
+ goto err_switch;
+ } else if (!enable && csi->streaming == 1) {
+ v4l2_subdev_call(csi->remote, video, s_stream, 0);
+
+ /* switch CSI-2 link to IVSC */
+ ret = csi_set_link_owner(csi, CSI_LINK_IVSC);
+ if (ret < 0)
+ dev_warn(&csi->cldev->dev,
+ "failed to switch CSI2 link: %d\n", ret);
+ }
+
+ csi->streaming = enable;
+
+ return 0;
+
+err_switch:
+ csi_set_link_owner(csi, CSI_LINK_IVSC);
+
+err:
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+mei_csi_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad, u32 which)
+{
+ struct mei_csi *csi = sd_to_csi(sd);
+
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(sd, sd_state, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &csi->format_mbus[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int mei_csi_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_mbus_framefmt *mbusformat;
+ struct mei_csi *csi = sd_to_csi(sd);
+ unsigned int i;
+
+ mutex_lock(&csi->lock);
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ mbusformat = v4l2_subdev_get_try_format(sd, sd_state, i);
+ *mbusformat = mei_csi_format_mbus_default;
+ }
+
+ mutex_unlock(&csi->lock);
+
+ return 0;
+}
+
+static int mei_csi_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *mbusformat;
+ struct mei_csi *csi = sd_to_csi(sd);
+
+ mutex_lock(&csi->lock);
+
+ mbusformat = mei_csi_get_pad_format(sd, sd_state, format->pad,
+ format->which);
+ if (mbusformat)
+ format->format = *mbusformat;
+
+ mutex_unlock(&csi->lock);
+
+ return 0;
+}
+
+static int mei_csi_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *source_mbusformat;
+ struct v4l2_mbus_framefmt *mbusformat;
+ struct mei_csi *csi = sd_to_csi(sd);
+ struct media_pad *pad;
+
+ mbusformat = mei_csi_get_pad_format(sd, sd_state, format->pad,
+ format->which);
+ if (!mbusformat)
+ return -EINVAL;
+
+ source_mbusformat = mei_csi_get_pad_format(sd, sd_state, CSI_PAD_SOURCE,
+ format->which);
+ if (!source_mbusformat)
+ return -EINVAL;
+
+ v4l_bound_align_image(&format->format.width, 1, 65536, 0,
+ &format->format.height, 1, 65536, 0, 0);
+
+ switch (format->format.code) {
+ case MEDIA_BUS_FMT_RGB444_1X12:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ case MEDIA_BUS_FMT_BGR565_2X8_BE:
+ case MEDIA_BUS_FMT_BGR565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_GBR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_ARGB8888_1X32:
+ case MEDIA_BUS_FMT_RGB888_1X32_PADHI:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_RGB121212_1X36:
+ case MEDIA_BUS_FMT_RGB161616_1X48:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_UV8_1X8:
+ case MEDIA_BUS_FMT_UYVY8_1_5X8:
+ case MEDIA_BUS_FMT_VYUY8_1_5X8:
+ case MEDIA_BUS_FMT_YUYV8_1_5X8:
+ case MEDIA_BUS_FMT_YVYU8_1_5X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ case MEDIA_BUS_FMT_VYUY10_2X10:
+ case MEDIA_BUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_YVYU10_2X10:
+ case MEDIA_BUS_FMT_Y12_1X12:
+ case MEDIA_BUS_FMT_UYVY12_2X12:
+ case MEDIA_BUS_FMT_VYUY12_2X12:
+ case MEDIA_BUS_FMT_YUYV12_2X12:
+ case MEDIA_BUS_FMT_YVYU12_2X12:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
+ case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VYUY10_1X20:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YVYU10_1X20:
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ case MEDIA_BUS_FMT_UYVY12_1X24:
+ case MEDIA_BUS_FMT_VYUY12_1X24:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ case MEDIA_BUS_FMT_YVYU12_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ case MEDIA_BUS_FMT_AYUV8_1X32:
+ case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+ case MEDIA_BUS_FMT_YUV12_1X36:
+ case MEDIA_BUS_FMT_YUV16_1X48:
+ case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+ case MEDIA_BUS_FMT_JPEG_1X8:
+ case MEDIA_BUS_FMT_AHSV8888_1X32:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_SBGGR14_1X14:
+ case MEDIA_BUS_FMT_SGBRG14_1X14:
+ case MEDIA_BUS_FMT_SGRBG14_1X14:
+ case MEDIA_BUS_FMT_SRGGB14_1X14:
+ case MEDIA_BUS_FMT_SBGGR16_1X16:
+ case MEDIA_BUS_FMT_SGBRG16_1X16:
+ case MEDIA_BUS_FMT_SGRBG16_1X16:
+ case MEDIA_BUS_FMT_SRGGB16_1X16:
+ break;
+ default:
+ format->format.code = MEDIA_BUS_FMT_Y8_1X8;
+ break;
+ }
+
+ if (format->format.field == V4L2_FIELD_ANY)
+ format->format.field = V4L2_FIELD_NONE;
+
+ mutex_lock(&csi->lock);
+
+ pad = &csi->pads[format->pad];
+ if (pad->flags & MEDIA_PAD_FL_SOURCE)
+ format->format = csi->format_mbus[CSI_PAD_SINK];
+
+ *mbusformat = format->format;
+
+ if (pad->flags & MEDIA_PAD_FL_SINK)
+ *source_mbusformat = format->format;
+
+ mutex_unlock(&csi->lock);
+
+ return 0;
+}
+
+static int mei_csi_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mei_csi *csi = ctrl_to_csi(ctrl);
+ s64 freq;
+
+ if (ctrl->id == V4L2_CID_LINK_FREQ) {
+ if (!csi->remote)
+ return -EINVAL;
+
+ freq = v4l2_get_link_freq(csi->remote->ctrl_handler, 0, 0);
+ if (freq < 0) {
+ dev_err(&csi->cldev->dev,
+ "error %lld, invalid link_freq\n", freq);
+ return -EINVAL;
+ }
+
+ link_freq_menu_items[0] = freq;
+ ctrl->val = 0;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops mei_csi_ctrl_ops = {
+ .g_volatile_ctrl = mei_csi_g_volatile_ctrl,
+};
+
+static const struct v4l2_subdev_video_ops mei_csi_video_ops = {
+ .s_stream = mei_csi_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops mei_csi_pad_ops = {
+ .init_cfg = mei_csi_init_cfg,
+ .get_fmt = mei_csi_get_fmt,
+ .set_fmt = mei_csi_set_fmt,
+};
+
+static const struct v4l2_subdev_ops mei_csi_subdev_ops = {
+ .video = &mei_csi_video_ops,
+ .pad = &mei_csi_pad_ops,
+};
+
+static const struct media_entity_operations mei_csi_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int mei_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct mei_csi *csi = notifier_to_csi(notifier);
+ int pad;
+
+ pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0)
+ return pad;
+
+ csi->remote = subdev;
+ csi->remote_pad = pad;
+
+ return media_create_pad_link(&subdev->entity, pad,
+ &csi->subdev.entity, 1,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static void mei_csi_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct mei_csi *csi = notifier_to_csi(notifier);
+
+ csi->remote = NULL;
+}
+
+static const struct v4l2_async_notifier_operations mei_csi_notify_ops = {
+ .bound = mei_csi_notify_bound,
+ .unbind = mei_csi_notify_unbind,
+};
+
+static int mei_csi_init_controls(struct mei_csi *csi)
+{
+ u32 max;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 2);
+ if (ret)
+ return ret;
+
+ csi->ctrl_handler.lock = &csi->lock;
+
+ max = ARRAY_SIZE(link_freq_menu_items) - 1;
+ csi->freq_ctrl = v4l2_ctrl_new_int_menu(&csi->ctrl_handler,
+ &mei_csi_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ max,
+ 0,
+ link_freq_menu_items);
+ if (csi->freq_ctrl)
+ csi->freq_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY |
+ V4L2_CTRL_FLAG_VOLATILE;
+
+ csi->privacy_ctrl = v4l2_ctrl_new_std(&csi->ctrl_handler, NULL,
+ V4L2_CID_PRIVACY, 0, 1, 1, 0);
+ if (csi->privacy_ctrl)
+ csi->privacy_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ if (csi->ctrl_handler.error)
+ return csi->ctrl_handler.error;
+
+ csi->subdev.ctrl_handler = &csi->ctrl_handler;
+
+ return 0;
+}
+
+static int mei_csi_parse_firmware(struct mei_csi *csi)
+{
+ struct v4l2_fwnode_endpoint v4l2_ep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct device *dev = &csi->cldev->dev;
+ struct v4l2_async_connection *asd;
+ struct fwnode_handle *fwnode;
+ struct fwnode_handle *ep;
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0, 0);
+ if (!ep) {
+ dev_err(dev, "not connected to subdevice\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
+ if (ret) {
+ dev_err(dev, "could not parse v4l2 endpoint\n");
+ fwnode_handle_put(ep);
+ return -EINVAL;
+ }
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ fwnode_handle_put(ep);
+
+ v4l2_async_subdev_nf_init(&csi->notifier, &csi->subdev);
+ csi->notifier.ops = &mei_csi_notify_ops;
+
+ asd = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
+ struct v4l2_async_connection);
+ if (IS_ERR(asd)) {
+ fwnode_handle_put(fwnode);
+ return PTR_ERR(asd);
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(fwnode, &v4l2_ep);
+ fwnode_handle_put(fwnode);
+ if (ret)
+ return ret;
+ csi->nr_of_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
+
+ ret = v4l2_async_nf_register(&csi->notifier);
+ if (ret)
+ v4l2_async_nf_cleanup(&csi->notifier);
+
+ v4l2_fwnode_endpoint_free(&v4l2_ep);
+
+ return ret;
+}
+
+static int mei_csi_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct device *dev = &cldev->dev;
+ struct mei_csi *csi;
+ int ret;
+
+ if (!dev_fwnode(dev))
+ return -EPROBE_DEFER;
+
+ csi = devm_kzalloc(dev, sizeof(struct mei_csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ csi->cldev = cldev;
+ mutex_init(&csi->lock);
+ init_completion(&csi->cmd_completion);
+
+ mei_cldev_set_drvdata(cldev, csi);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(dev, "mei_cldev_enable failed: %d\n", ret);
+ goto destroy_mutex;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, mei_csi_rx);
+ if (ret) {
+ dev_err(dev, "event cb registration failed: %d\n", ret);
+ goto err_disable;
+ }
+
+ ret = mei_csi_parse_firmware(csi);
+ if (ret)
+ goto err_disable;
+
+ csi->subdev.dev = &cldev->dev;
+ v4l2_subdev_init(&csi->subdev, &mei_csi_subdev_ops);
+ v4l2_set_subdevdata(&csi->subdev, csi);
+ csi->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+ csi->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csi->subdev.entity.ops = &mei_csi_entity_ops;
+
+ snprintf(csi->subdev.name, sizeof(csi->subdev.name),
+ MEI_CSI_ENTITY_NAME);
+
+ ret = mei_csi_init_controls(csi);
+ if (ret)
+ goto err_ctrl_handler;
+
+ csi->format_mbus[CSI_PAD_SOURCE] = mei_csi_format_mbus_default;
+ csi->format_mbus[CSI_PAD_SINK] = mei_csi_format_mbus_default;
+
+ csi->pads[CSI_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ csi->pads[CSI_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&csi->subdev.entity, CSI_NUM_PADS,
+ csi->pads);
+ if (ret)
+ goto err_ctrl_handler;
+
+ ret = v4l2_subdev_init_finalize(&csi->subdev);
+ if (ret < 0)
+ goto err_entity;
+
+ ret = v4l2_async_register_subdev(&csi->subdev);
+ if (ret < 0)
+ goto err_subdev;
+
+ pm_runtime_enable(&cldev->dev);
+
+ return 0;
+
+err_subdev:
+ v4l2_subdev_cleanup(&csi->subdev);
+
+err_entity:
+ media_entity_cleanup(&csi->subdev.entity);
+
+err_ctrl_handler:
+ v4l2_ctrl_handler_free(&csi->ctrl_handler);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+destroy_mutex:
+ mutex_destroy(&csi->lock);
+
+ return ret;
+}
+
+static void mei_csi_remove(struct mei_cl_device *cldev)
+{
+ struct mei_csi *csi = mei_cldev_get_drvdata(cldev);
+
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
+ v4l2_ctrl_handler_free(&csi->ctrl_handler);
+ v4l2_async_unregister_subdev(&csi->subdev);
+ v4l2_subdev_cleanup(&csi->subdev);
+ media_entity_cleanup(&csi->subdev.entity);
+
+ pm_runtime_disable(&cldev->dev);
+
+ mutex_destroy(&csi->lock);
+}
+
+#define MEI_CSI_UUID UUID_LE(0x92335FCF, 0x3203, 0x4472, \
+ 0xAF, 0x93, 0x7b, 0x44, 0x53, 0xAC, 0x29, 0xDA)
+
+static const struct mei_cl_device_id mei_csi_tbl[] = {
+ { MEI_CSI_DRIVER_NAME, MEI_CSI_UUID, MEI_CL_VERSION_ANY },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(mei, mei_csi_tbl);
+
+static struct mei_cl_driver mei_csi_driver = {
+ .id_table = mei_csi_tbl,
+ .name = MEI_CSI_DRIVER_NAME,
+
+ .probe = mei_csi_probe,
+ .remove = mei_csi_remove,
+};
+
+module_mei_cl_driver(mei_csi_driver);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_DESCRIPTION("Device driver for IVSC CSI");
+MODULE_LICENSE("GPL");