summaryrefslogtreecommitdiffstats
path: root/drivers/bus/fsl-mc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/bus/fsl-mc')
-rw-r--r--drivers/bus/fsl-mc/Kconfig23
-rw-r--r--drivers/bus/fsl-mc/Makefile22
-rw-r--r--drivers/bus/fsl-mc/dpbp.c185
-rw-r--r--drivers/bus/fsl-mc/dpcon.c221
-rw-r--r--drivers/bus/fsl-mc/dpmcp.c99
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c889
-rw-r--r--drivers/bus/fsl-mc/dprc.c704
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c663
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c1303
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-msi.c233
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h695
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-uapi.c597
-rw-r--r--drivers/bus/fsl-mc/mc-io.c285
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c297
-rw-r--r--drivers/bus/fsl-mc/obj-api.c103
15 files changed, 6319 insertions, 0 deletions
diff --git a/drivers/bus/fsl-mc/Kconfig b/drivers/bus/fsl-mc/Kconfig
new file mode 100644
index 0000000000..9492342e7d
--- /dev/null
+++ b/drivers/bus/fsl-mc/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# DPAA2 fsl-mc bus
+#
+# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+#
+
+config FSL_MC_BUS
+ bool "QorIQ DPAA2 fsl-mc bus driver"
+ depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC)))
+ select GENERIC_MSI_IRQ
+ help
+ Driver to enable the bus infrastructure for the QorIQ DPAA2
+ architecture. The fsl-mc bus driver handles discovery of
+ DPAA2 objects (which are represented as Linux devices) and
+ binding objects to drivers.
+
+config FSL_MC_UAPI_SUPPORT
+ bool "Management Complex (MC) userspace support"
+ depends on FSL_MC_BUS
+ help
+ Provides userspace support for interrogating, creating, destroying or
+ configuring DPAA2 objects exported by the Management Complex.
diff --git a/drivers/bus/fsl-mc/Makefile b/drivers/bus/fsl-mc/Makefile
new file mode 100644
index 0000000000..8929462455
--- /dev/null
+++ b/drivers/bus/fsl-mc/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Freescale Management Complex (MC) bus drivers
+#
+# Copyright (C) 2014 Freescale Semiconductor, Inc.
+#
+obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
+
+mc-bus-driver-objs := fsl-mc-bus.o \
+ mc-sys.o \
+ mc-io.o \
+ dpbp.o \
+ dpcon.o \
+ dprc.o \
+ dprc-driver.o \
+ fsl-mc-allocator.o \
+ fsl-mc-msi.o \
+ dpmcp.o \
+ obj-api.o
+
+# MC userspace support
+obj-$(CONFIG_FSL_MC_UAPI_SUPPORT) += fsl-mc-uapi.o
diff --git a/drivers/bus/fsl-mc/dpbp.c b/drivers/bus/fsl-mc/dpbp.c
new file mode 100644
index 0000000000..9003cd3698
--- /dev/null
+++ b/drivers/bus/fsl-mc/dpbp.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * dpbp_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpbp_id: DPBP unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpbp_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpbp_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpbp_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
+ cmd_flags, 0);
+ cmd_params = (struct dpbp_cmd_open *)cmd.params;
+ cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dpbp_open);
+
+/**
+ * dpbp_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpbp_close);
+
+/**
+ * dpbp_enable() - Enable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpbp_enable);
+
+/**
+ * dpbp_disable() - Disable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpbp_disable);
+
+/**
+ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpbp_reset);
+
+/**
+ * dpbp_get_attributes - Retrieve DPBP attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpbp_rsp_get_attributes *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
+ attr->bpid = le16_to_cpu(rsp_params->bpid);
+ attr->id = le32_to_cpu(rsp_params->id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpbp_get_attributes);
diff --git a/drivers/bus/fsl-mc/dpcon.c b/drivers/bus/fsl-mc/dpcon.c
new file mode 100644
index 0000000000..97b6fa605e
--- /dev/null
+++ b/drivers/bus/fsl-mc/dpcon.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * dpcon_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpcon_id: DPCON unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpcon_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpcon_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpcon_cmd_open *dpcon_cmd;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ dpcon_cmd = (struct dpcon_cmd_open *)cmd.params;
+ dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpcon_open);
+
+/**
+ * dpcon_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_close);
+
+/**
+ * dpcon_enable() - Enable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_enable);
+
+/**
+ * dpcon_disable() - Disable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_disable);
+
+/**
+ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_reset);
+
+/**
+ * dpcon_get_attributes() - Retrieve DPCON attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpcon_rsp_get_attr *dpcon_rsp;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(dpcon_rsp->id);
+ attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id);
+ attr->num_priorities = dpcon_rsp->num_priorities;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpcon_get_attributes);
+
+/**
+ * dpcon_set_notification() - Set DPCON notification destination
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @cfg: Notification parameters
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_set_notification(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpcon_cmd_set_notification *dpcon_cmd;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
+ cmd_flags,
+ token);
+ dpcon_cmd = (struct dpcon_cmd_set_notification *)cmd.params;
+ dpcon_cmd->dpio_id = cpu_to_le32(cfg->dpio_id);
+ dpcon_cmd->priority = cfg->priority;
+ dpcon_cmd->user_ctx = cpu_to_le64(cfg->user_ctx);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_set_notification);
diff --git a/drivers/bus/fsl-mc/dpmcp.c b/drivers/bus/fsl-mc/dpmcp.c
new file mode 100644
index 0000000000..5fbd0dbde2
--- /dev/null
+++ b/drivers/bus/fsl-mc/dpmcp.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * dpmcp_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpmcp_id: DPMCP unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpmcp_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmcp_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpmcp_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
+ cmd_flags, 0);
+ cmd_params = (struct dpmcp_cmd_open *)cmd.params;
+ cmd_params->dpmcp_id = cpu_to_le32(dpmcp_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+
+/**
+ * dpmcp_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMCP object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMCP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
new file mode 100644
index 0000000000..4b68c84ef4
--- /dev/null
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale data path resource container (DPRC) driver
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
+
+struct fsl_mc_child_objs {
+ int child_count;
+ struct fsl_mc_obj_desc *child_array;
+};
+
+static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ return mc_dev->obj_desc.id == obj_desc->id &&
+ strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
+}
+
+static bool fsl_mc_obj_desc_is_allocatable(struct fsl_mc_obj_desc *obj)
+{
+ if (strcmp(obj->type, "dpmcp") == 0 ||
+ strcmp(obj->type, "dpcon") == 0 ||
+ strcmp(obj->type, "dpbp") == 0)
+ return true;
+ else
+ return false;
+}
+
+static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
+{
+ int i;
+ struct fsl_mc_child_objs *objs;
+ struct fsl_mc_device *mc_dev;
+
+ if (!dev_is_fsl_mc(dev))
+ return 0;
+
+ mc_dev = to_fsl_mc_device(dev);
+ objs = data;
+
+ for (i = 0; i < objs->child_count; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &objs->child_array[i];
+
+ if (strlen(obj_desc->type) != 0 &&
+ fsl_mc_device_match(mc_dev, obj_desc))
+ break;
+ }
+
+ if (i == objs->child_count)
+ fsl_mc_device_remove(mc_dev);
+
+ return 0;
+}
+
+static int __fsl_mc_device_remove(struct device *dev, void *data)
+{
+ if (!dev_is_fsl_mc(dev))
+ return 0;
+
+ fsl_mc_device_remove(to_fsl_mc_device(dev));
+ return 0;
+}
+
+/**
+ * dprc_remove_devices - Removes devices for objects removed from a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @obj_desc_array: array of object descriptors for child objects currently
+ * present in the DPRC in the MC.
+ * @num_child_objects_in_mc: number of entries in obj_desc_array
+ *
+ * Synchronizes the state of the Linux bus driver with the actual state of
+ * the MC by removing devices that represent MC objects that have
+ * been dynamically removed in the physical DPRC.
+ */
+void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
+{
+ if (num_child_objects_in_mc != 0) {
+ /*
+ * Remove child objects that are in the DPRC in Linux,
+ * but not in the MC:
+ */
+ struct fsl_mc_child_objs objs;
+
+ objs.child_count = num_child_objects_in_mc;
+ objs.child_array = obj_desc_array;
+ device_for_each_child(&mc_bus_dev->dev, &objs,
+ __fsl_mc_device_remove_if_not_in_mc);
+ } else {
+ /*
+ * There are no child objects for this DPRC in the MC.
+ * So, remove all the child devices from Linux:
+ */
+ device_for_each_child(&mc_bus_dev->dev, NULL,
+ __fsl_mc_device_remove);
+ }
+}
+EXPORT_SYMBOL_GPL(dprc_remove_devices);
+
+static int __fsl_mc_device_match(struct device *dev, void *data)
+{
+ struct fsl_mc_obj_desc *obj_desc = data;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return fsl_mc_device_match(mc_dev, obj_desc);
+}
+
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_device *mc_bus_dev)
+{
+ struct device *dev;
+
+ dev = device_find_child(&mc_bus_dev->dev, obj_desc,
+ __fsl_mc_device_match);
+
+ return dev ? to_fsl_mc_device(dev) : NULL;
+}
+
+/**
+ * check_plugged_state_change - Check change in an MC object's plugged state
+ *
+ * @mc_dev: pointer to the fsl-mc device for a given MC object
+ * @obj_desc: pointer to the MC object's descriptor in the MC
+ *
+ * If the plugged state has changed from unplugged to plugged, the fsl-mc
+ * device is bound to the corresponding device driver.
+ * If the plugged state has changed from plugged to unplugged, the fsl-mc
+ * device is unbound from the corresponding device driver.
+ */
+static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ int error;
+ u32 plugged_flag_at_mc =
+ obj_desc->state & FSL_MC_OBJ_STATE_PLUGGED;
+
+ if (plugged_flag_at_mc !=
+ (mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED)) {
+ if (plugged_flag_at_mc) {
+ mc_dev->obj_desc.state |= FSL_MC_OBJ_STATE_PLUGGED;
+ error = device_attach(&mc_dev->dev);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "device_attach() failed: %d\n",
+ error);
+ }
+ } else {
+ mc_dev->obj_desc.state &= ~FSL_MC_OBJ_STATE_PLUGGED;
+ device_release_driver(&mc_dev->dev);
+ }
+ }
+}
+
+static void fsl_mc_obj_device_add(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ int error;
+ struct fsl_mc_device *child_dev;
+
+ /*
+ * Check if device is already known to Linux:
+ */
+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
+ if (child_dev) {
+ check_plugged_state_change(child_dev, obj_desc);
+ put_device(&child_dev->dev);
+ } else {
+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
+ &child_dev);
+ if (error < 0)
+ return;
+ }
+}
+
+/**
+ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @obj_desc_array: array of device descriptors for child devices currently
+ * present in the physical DPRC.
+ * @num_child_objects_in_mc: number of entries in obj_desc_array
+ *
+ * Synchronizes the state of the Linux bus driver with the actual
+ * state of the MC by adding objects that have been newly discovered
+ * in the physical DPRC.
+ */
+static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
+{
+ int i;
+
+ /* probe the allocable objects first */
+ for (i = 0; i < num_child_objects_in_mc; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
+
+ if (strlen(obj_desc->type) > 0 &&
+ fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
+ }
+
+ for (i = 0; i < num_child_objects_in_mc; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
+
+ if (strlen(obj_desc->type) > 0 &&
+ !fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
+ }
+}
+
+/**
+ * dprc_scan_objects - Discover objects in a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @alloc_interrupts: if true the function allocates the interrupt pool,
+ * otherwise the interrupt allocation is delayed
+ *
+ * Detects objects added and removed from a DPRC and synchronizes the
+ * state of the Linux bus driver, MC by adding and removing
+ * devices accordingly.
+ * Two types of devices can be found in a DPRC: allocatable objects (e.g.,
+ * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni).
+ * All allocatable devices needed to be probed before all non-allocatable
+ * devices, to ensure that device drivers for non-allocatable
+ * devices can allocate any type of allocatable devices.
+ * That is, we need to ensure that the corresponding resource pools are
+ * populated before they can get allocation requests from probe callbacks
+ * of the device drivers for the non-allocatable devices.
+ */
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts)
+{
+ int num_child_objects;
+ int dprc_get_obj_failures;
+ int error;
+ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
+ struct fsl_mc_obj_desc *child_obj_desc_array = NULL;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ error = dprc_get_obj_count(mc_bus_dev->mc_io,
+ 0,
+ mc_bus_dev->mc_handle,
+ &num_child_objects);
+ if (error < 0) {
+ dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n",
+ error);
+ return error;
+ }
+
+ if (num_child_objects != 0) {
+ int i;
+
+ child_obj_desc_array =
+ devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects,
+ sizeof(*child_obj_desc_array),
+ GFP_KERNEL);
+ if (!child_obj_desc_array)
+ return -ENOMEM;
+
+ /*
+ * Discover objects currently present in the physical DPRC:
+ */
+ dprc_get_obj_failures = 0;
+ for (i = 0; i < num_child_objects; i++) {
+ struct fsl_mc_obj_desc *obj_desc =
+ &child_obj_desc_array[i];
+
+ error = dprc_get_obj(mc_bus_dev->mc_io,
+ 0,
+ mc_bus_dev->mc_handle,
+ i, obj_desc);
+ if (error < 0) {
+ dev_err(&mc_bus_dev->dev,
+ "dprc_get_obj(i=%d) failed: %d\n",
+ i, error);
+ /*
+ * Mark the obj entry as "invalid", by using the
+ * empty string as obj type:
+ */
+ obj_desc->type[0] = '\0';
+ obj_desc->id = error;
+ dprc_get_obj_failures++;
+ continue;
+ }
+
+ /*
+ * add a quirk for all versions of dpsec < 4.0...none
+ * are coherent regardless of what the MC reports.
+ */
+ if ((strcmp(obj_desc->type, "dpseci") == 0) &&
+ (obj_desc->ver_major < 4))
+ obj_desc->flags |=
+ FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY;
+
+ irq_count += obj_desc->irq_count;
+ dev_dbg(&mc_bus_dev->dev,
+ "Discovered object: type %s, id %d\n",
+ obj_desc->type, obj_desc->id);
+ }
+
+ if (dprc_get_obj_failures != 0) {
+ dev_err(&mc_bus_dev->dev,
+ "%d out of %d devices could not be retrieved\n",
+ dprc_get_obj_failures, num_child_objects);
+ }
+ }
+
+ /*
+ * Allocate IRQ's before binding the scanned devices with their
+ * respective drivers.
+ */
+ if (dev_get_msi_domain(&mc_bus_dev->dev)) {
+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
+ dev_warn(&mc_bus_dev->dev,
+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ }
+
+ if (alloc_interrupts && !mc_bus->irq_resources) {
+ error = fsl_mc_populate_irq_pool(mc_bus_dev,
+ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ if (error < 0)
+ return error;
+ }
+ }
+
+ dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
+ num_child_objects);
+
+ dprc_add_new_devices(mc_bus_dev, child_obj_desc_array,
+ num_child_objects);
+
+ if (child_obj_desc_array)
+ devm_kfree(&mc_bus_dev->dev, child_obj_desc_array);
+
+ return 0;
+}
+
+/**
+ * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @alloc_interrupts: if true the function allocates the interrupt pool,
+ * otherwise the interrupt allocation is delayed
+ * Scans the physical DPRC and synchronizes the state of the Linux
+ * bus driver with the actual state of the MC by adding and removing
+ * devices as appropriate.
+ */
+int dprc_scan_container(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts)
+{
+ int error = 0;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ fsl_mc_init_all_resource_pools(mc_bus_dev);
+
+ /*
+ * Discover objects in the DPRC:
+ */
+ mutex_lock(&mc_bus->scan_mutex);
+ error = dprc_scan_objects(mc_bus_dev, alloc_interrupts);
+ mutex_unlock(&mc_bus->scan_mutex);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dprc_scan_container);
+
+/**
+ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
+ *
+ * @irq_num: IRQ number of the interrupt being handled
+ * @arg: Pointer to device structure
+ */
+static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
+ *
+ * @irq_num: IRQ number of the interrupt being handled
+ * @arg: Pointer to device structure
+ */
+static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
+{
+ int error;
+ u32 status;
+ struct device *dev = arg;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
+ int irq = mc_dev->irqs[0]->virq;
+
+ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
+ irq_num, smp_processor_id());
+
+ if (!(mc_dev->flags & FSL_MC_IS_DPRC))
+ return IRQ_HANDLED;
+
+ mutex_lock(&mc_bus->scan_mutex);
+ if (irq != (u32)irq_num)
+ goto out;
+
+ status = 0;
+ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+ &status);
+ if (error < 0) {
+ dev_err(dev,
+ "dprc_get_irq_status() failed: %d\n", error);
+ goto out;
+ }
+
+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+ status);
+ if (error < 0) {
+ dev_err(dev,
+ "dprc_clear_irq_status() failed: %d\n", error);
+ goto out;
+ }
+
+ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED |
+ DPRC_IRQ_EVENT_OBJ_REMOVED |
+ DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
+ DPRC_IRQ_EVENT_OBJ_DESTROYED |
+ DPRC_IRQ_EVENT_OBJ_CREATED)) {
+
+ error = dprc_scan_objects(mc_dev, true);
+ if (error < 0) {
+ /*
+ * If the error is -ENXIO, we ignore it, as it indicates
+ * that the object scan was aborted, as we detected that
+ * an object was removed from the DPRC in the MC, while
+ * we were scanning the DPRC.
+ */
+ if (error != -ENXIO) {
+ dev_err(dev, "dprc_scan_objects() failed: %d\n",
+ error);
+ }
+
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mc_bus->scan_mutex);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Disable and clear interrupt for a given DPRC object
+ */
+int disable_dprc_irq(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ int error;
+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
+
+ /*
+ * Disable generation of interrupt, while we configure it:
+ */
+ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Disable all interrupt causes for the interrupt:
+ */
+ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Clear any leftover interrupts:
+ */
+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n",
+ error);
+ return error;
+ }
+
+ mc_bus->irq_enabled = 0;
+
+ return 0;
+}
+
+int get_dprc_irq_state(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
+ return mc_bus->irq_enabled;
+}
+
+static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
+{
+ int error;
+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
+ /*
+ * NOTE: devm_request_threaded_irq() invokes the device-specific
+ * function that programs the MSI physically in the device
+ */
+ error = devm_request_threaded_irq(&mc_dev->dev,
+ irq->virq,
+ dprc_irq0_handler,
+ dprc_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&mc_dev->dev),
+ &mc_dev->dev);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "devm_request_threaded_irq() failed: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+int enable_dprc_irq(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ int error;
+
+ /*
+ * Enable all interrupt causes for the interrupt:
+ */
+ error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0,
+ ~0x0u);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+ error);
+
+ return error;
+ }
+
+ /*
+ * Enable generation of the interrupt:
+ */
+ error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+ error);
+
+ return error;
+ }
+
+ mc_bus->irq_enabled = 1;
+
+ return 0;
+}
+
+/*
+ * Setup interrupt for a given DPRC device
+ */
+static int dprc_setup_irq(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ error = fsl_mc_allocate_irqs(mc_dev);
+ if (error < 0)
+ return error;
+
+ error = disable_dprc_irq(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ error = register_dprc_irq_handler(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ error = enable_dprc_irq(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ return 0;
+
+error_free_irqs:
+ fsl_mc_free_irqs(mc_dev);
+ return error;
+}
+
+/**
+ * dprc_setup - opens and creates a mc_io for DPRC
+ *
+ * @mc_dev: Pointer to fsl-mc device representing a DPRC
+ *
+ * It opens the physical DPRC in the MC.
+ * It configures the DPRC portal used to communicate with MC
+ */
+
+int dprc_setup(struct fsl_mc_device *mc_dev)
+{
+ struct device *parent_dev = mc_dev->dev.parent;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ struct irq_domain *mc_msi_domain;
+ bool mc_io_created = false;
+ bool msi_domain_set = false;
+ bool uapi_created = false;
+ u16 major_ver, minor_ver;
+ size_t region_size;
+ int error;
+
+ if (!is_fsl_mc_bus_dprc(mc_dev))
+ return -EINVAL;
+
+ if (dev_get_msi_domain(&mc_dev->dev))
+ return -EINVAL;
+
+ if (!mc_dev->mc_io) {
+ /*
+ * This is a child DPRC:
+ */
+ if (!dev_is_fsl_mc(parent_dev))
+ return -EINVAL;
+
+ if (mc_dev->obj_desc.region_count == 0)
+ return -EINVAL;
+
+ region_size = resource_size(mc_dev->regions);
+
+ error = fsl_create_mc_io(&mc_dev->dev,
+ mc_dev->regions[0].start,
+ region_size,
+ NULL,
+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &mc_dev->mc_io);
+ if (error < 0)
+ return error;
+
+ mc_io_created = true;
+ } else {
+ error = fsl_mc_uapi_create_device_file(mc_bus);
+ if (error < 0)
+ return -EPROBE_DEFER;
+ uapi_created = true;
+ }
+
+ mc_msi_domain = fsl_mc_find_msi_domain(&mc_dev->dev);
+ if (!mc_msi_domain) {
+ dev_warn(&mc_dev->dev,
+ "WARNING: MC bus without interrupt support\n");
+ } else {
+ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
+ msi_domain_set = true;
+ }
+
+ error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+ &mc_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
+ goto error_cleanup_msi_domain;
+ }
+
+ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ &mc_bus->dprc_attr);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ error = dprc_get_api_version(mc_dev->mc_io, 0,
+ &major_ver,
+ &minor_ver);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ if (major_ver < DPRC_MIN_VER_MAJOR) {
+ dev_err(&mc_dev->dev,
+ "ERROR: DPRC version %d.%d not supported\n",
+ major_ver, minor_ver);
+ error = -ENOTSUPP;
+ goto error_cleanup_open;
+ }
+
+ return 0;
+
+error_cleanup_open:
+ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+
+error_cleanup_msi_domain:
+ if (msi_domain_set)
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+
+ if (mc_io_created) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
+ }
+
+ if (uapi_created)
+ fsl_mc_uapi_remove_device_file(mc_bus);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dprc_setup);
+
+/**
+ * dprc_probe - callback invoked when a DPRC is being bound to this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing a DPRC
+ *
+ * It opens the physical DPRC in the MC.
+ * It scans the DPRC to discover the MC objects contained in it.
+ * It creates the interrupt pool for the MC bus associated with the DPRC.
+ * It configures the interrupts for the DPRC device itself.
+ */
+static int dprc_probe(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ error = dprc_setup(mc_dev);
+ if (error < 0)
+ return error;
+
+ /*
+ * Discover MC objects in DPRC object:
+ */
+ error = dprc_scan_container(mc_dev, true);
+ if (error < 0)
+ goto dprc_cleanup;
+
+ /*
+ * Configure interrupt for the DPRC object associated with this MC bus:
+ */
+ error = dprc_setup_irq(mc_dev);
+ if (error < 0)
+ goto scan_cleanup;
+
+ dev_info(&mc_dev->dev, "DPRC device bound to driver");
+ return 0;
+
+scan_cleanup:
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+dprc_cleanup:
+ dprc_cleanup(mc_dev);
+ return error;
+}
+
+/*
+ * Tear down interrupt for a given DPRC object
+ */
+static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
+ (void)disable_dprc_irq(mc_dev);
+
+ devm_free_irq(&mc_dev->dev, irq->virq, &mc_dev->dev);
+
+ fsl_mc_free_irqs(mc_dev);
+}
+
+/**
+ * dprc_cleanup - function that cleanups a DPRC
+ *
+ * @mc_dev: Pointer to fsl-mc device representing the DPRC
+ *
+ * It closes the DPRC device in the MC.
+ * It destroys the interrupt pool associated with this MC bus.
+ */
+
+int dprc_cleanup(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ int error;
+
+ /* this function should be called only for DPRCs, it
+ * is an error to call it for regular objects
+ */
+ if (!is_fsl_mc_bus_dprc(mc_dev))
+ return -EINVAL;
+
+ if (dev_get_msi_domain(&mc_dev->dev)) {
+ fsl_mc_cleanup_irq_pool(mc_dev);
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+ }
+
+ fsl_mc_cleanup_all_resource_pools(mc_dev);
+
+ /* if this step fails we cannot go further with cleanup as there is no way of
+ * communicating with the firmware
+ */
+ if (!mc_dev->mc_io) {
+ dev_err(&mc_dev->dev, "mc_io is NULL, tear down cannot be performed in firmware\n");
+ return -EINVAL;
+ }
+
+ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+ if (error < 0)
+ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
+
+ if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
+ } else {
+ fsl_mc_uapi_remove_device_file(mc_bus);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_cleanup);
+
+/**
+ * dprc_remove - callback invoked when a DPRC is being unbound from this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing the DPRC
+ *
+ * It removes the DPRC's child objects from Linux (not from the MC) and
+ * closes the DPRC device in the MC.
+ * It tears down the interrupts that were configured for the DPRC device.
+ * It destroys the interrupt pool associated with this MC bus.
+ */
+static void dprc_remove(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
+ if (!mc_bus->irq_resources) {
+ dev_err(&mc_dev->dev, "No irq resources, so unbinding the device failed\n");
+ return;
+ }
+
+ if (dev_get_msi_domain(&mc_dev->dev))
+ dprc_teardown_irq(mc_dev);
+
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+
+ dprc_cleanup(mc_dev);
+
+ dev_info(&mc_dev->dev, "DPRC device unbound from driver");
+}
+
+static const struct fsl_mc_device_id match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dprc"},
+ {.vendor = 0x0},
+};
+
+static struct fsl_mc_driver dprc_driver = {
+ .driver = {
+ .name = FSL_MC_DPRC_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = NULL,
+ },
+ .match_id_table = match_id_table,
+ .probe = dprc_probe,
+ .remove = dprc_remove,
+};
+
+int __init dprc_driver_init(void)
+{
+ return fsl_mc_driver_register(&dprc_driver);
+}
+
+void dprc_driver_exit(void)
+{
+ fsl_mc_driver_unregister(&dprc_driver);
+}
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
new file mode 100644
index 0000000000..d129338b8b
--- /dev/null
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/*
+ * cache the DPRC version to reduce the number of commands
+ * towards the mc firmware
+ */
+static u16 dprc_major_ver;
+static u16 dprc_minor_ver;
+
+/**
+ * dprc_open() - Open DPRC object for use
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Container ID to open
+ * @token: Returned token of DPRC object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Required before any operation on the object.
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int container_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
+ 0);
+ cmd_params = (struct dprc_cmd_open *)cmd.params;
+ cmd_params->container_id = cpu_to_le32(container_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_open);
+
+/**
+ * dprc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_close);
+
+/**
+ * dprc_reset_container - Reset child container.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @child_container_id: ID of the container to reset
+ * @options: 32 bit options:
+ * - 0 (no bits set) - all the objects inside the container are
+ * reset. The child containers are entered recursively and the
+ * objects reset. All the objects (including the child containers)
+ * are closed.
+ * - bit 0 set - all the objects inside the container are reset.
+ * However the child containers are not entered recursively.
+ * This option is supported for API versions >= 6.5
+ * In case a software context crashes or becomes non-responsive, the parent
+ * may wish to reset its resources container before the software context is
+ * restarted.
+ *
+ * This routine informs all objects assigned to the child container that the
+ * container is being reset, so they may perform any cleanup operations that are
+ * needed. All objects handles that were owned by the child container shall be
+ * closed.
+ *
+ * Note that such request may be submitted even if the child software context
+ * has not crashed, but the resulting object cleanup operations will not be
+ * aware of that.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ u32 options)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_reset_container *cmd_params;
+ u32 cmdid = DPRC_CMDID_RESET_CONT;
+ int err;
+
+ /*
+ * If the DPRC object version was not yet cached, cache it now.
+ * Otherwise use the already cached value.
+ */
+ if (!dprc_major_ver && !dprc_minor_ver) {
+ err = dprc_get_api_version(mc_io, 0,
+ &dprc_major_ver,
+ &dprc_minor_ver);
+ if (err)
+ return err;
+ }
+
+ /*
+ * MC API 6.5 introduced a new field in the command used to pass
+ * some flags.
+ * Bit 0 indicates that the child containers are not recursively reset.
+ */
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 5))
+ cmdid = DPRC_CMDID_RESET_CONT_V2;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(cmdid, cmd_flags, token);
+ cmd_params = (struct dprc_cmd_reset_container *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
+ cmd_params->options = cpu_to_le32(options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_reset_container);
+
+/**
+ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: Identifies the interrupt index to configure
+ * @irq_cfg: IRQ configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_set_irq *)cmd.params;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_set_irq_enable *)cmd.params;
+ cmd_params->enable = en & DPRC_ENABLE;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @mask: event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting irq
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_get_irq_status() - Get the current status of any pending interrupts.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_get_irq_status *cmd_params;
+ struct dprc_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dprc_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_get_attributes() - Obtains container attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @attr: Returned container attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_rsp_get_attributes *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
+ attr->container_id = le32_to_cpu(rsp_params->container_id);
+ attr->icid = le32_to_cpu(rsp_params->icid);
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->portal_id = le32_to_cpu(rsp_params->portal_id);
+
+ return 0;
+}
+
+/**
+ * dprc_get_obj_count() - Obtains the number of objects in the DPRC
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_count: Number of objects assigned to the DPRC
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_rsp_get_obj_count *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj_count *)cmd.params;
+ *obj_count = le32_to_cpu(rsp_params->obj_count);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_get_obj_count);
+
+/**
+ * dprc_get_obj() - Get general information on an object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_index: Index of the object to be queried (< obj_count)
+ * @obj_desc: Returns the requested object descriptor
+ *
+ * The object descriptors are retrieved one by one by incrementing
+ * obj_index up to (not including) the value of obj_count returned
+ * from dprc_get_obj_count(). dprc_get_obj_count() must
+ * be called prior to dprc_get_obj().
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj *cmd_params;
+ struct dprc_rsp_get_obj *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_obj *)cmd.params;
+ cmd_params->obj_index = cpu_to_le32(obj_index);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj *)cmd.params;
+ obj_desc->id = le32_to_cpu(rsp_params->id);
+ obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+ obj_desc->irq_count = rsp_params->irq_count;
+ obj_desc->region_count = rsp_params->region_count;
+ obj_desc->state = le32_to_cpu(rsp_params->state);
+ obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+ obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+ obj_desc->flags = le16_to_cpu(rsp_params->flags);
+ strncpy(obj_desc->type, rsp_params->type, 16);
+ obj_desc->type[15] = '\0';
+ strncpy(obj_desc->label, rsp_params->label, 16);
+ obj_desc->label[15] = '\0';
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_get_obj);
+
+/**
+ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_type: Type of the object to set its IRQ
+ * @obj_id: ID of the object to set its IRQ
+ * @irq_index: The interrupt index to configure
+ * @irq_cfg: IRQ configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_set_obj_irq *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_set_obj_irq *)cmd.params;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_set_obj_irq);
+
+/**
+ * dprc_get_obj_region() - Get region information for a specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_type: Object type as returned in dprc_get_obj()
+ * @obj_id: Unique object instance as returned in dprc_get_obj()
+ * @region_index: The specific region to query
+ * @region_desc: Returns the requested region descriptor
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj_region *cmd_params;
+ struct dprc_rsp_get_obj_region *rsp_params;
+ int err;
+
+ /*
+ * If the DPRC object version was not yet cached, cache it now.
+ * Otherwise use the already cached value.
+ */
+ if (!dprc_major_ver && !dprc_minor_ver) {
+ err = dprc_get_api_version(mc_io, 0,
+ &dprc_major_ver,
+ &dprc_minor_ver);
+ if (err)
+ return err;
+ }
+
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 6)) {
+ /*
+ * MC API version 6.6 changed the size of the MC portals and software
+ * portals to 64K (as implemented by hardware). If older API is in use the
+ * size reported is less (64 bytes for mc portals and 4K for software
+ * portals).
+ */
+
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V3,
+ cmd_flags, token);
+
+ } else if (dprc_major_ver == 6 && dprc_minor_ver >= 3) {
+ /*
+ * MC API version 6.3 introduced a new field to the region
+ * descriptor: base_address. If the older API is in use then the base
+ * address is set to zero to indicate it needs to be obtained elsewhere
+ * (typically the device tree).
+ */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2,
+ cmd_flags, token);
+ } else {
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
+ cmd_flags, token);
+ }
+
+ cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ cmd_params->region_index = region_index;
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
+ region_desc->base_offset = le64_to_cpu(rsp_params->base_offset);
+ region_desc->size = le32_to_cpu(rsp_params->size);
+ region_desc->type = rsp_params->type;
+ region_desc->flags = le32_to_cpu(rsp_params->flags);
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 3))
+ region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
+ else
+ region_desc->base_address = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_get_obj_region);
+
+/**
+ * dprc_get_api_version - Get Data Path Resource Container API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Data Path Resource Container API
+ * @minor_ver: Minor version of Data Path Resource Container API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
+
+/**
+ * dprc_get_container_id - Get container ID associated with a given portal.
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Requested container id
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *container_id = (int)mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dprc_get_connection() - Get connected endpoint and link status if connection
+ * exists.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Returned endpoint 2 configuration parameters
+ * @state: Returned link state:
+ * 1 - link is up;
+ * 0 - link is down;
+ * -1 - no connection (endpoint2 information is irrelevant)
+ *
+ * Return: '0' on Success; -ENOTCONN if connection does not exist.
+ */
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state)
+{
+ struct dprc_cmd_get_connection *cmd_params;
+ struct dprc_rsp_get_connection *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le16(endpoint1->if_id);
+ for (i = 0; i < 16; i++)
+ cmd_params->ep1_type[i] = endpoint1->type[i];
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return -ENOTCONN;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+ endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+ endpoint2->if_id = le16_to_cpu(rsp_params->ep2_interface_id);
+ *state = le32_to_cpu(rsp_params->state);
+ for (i = 0; i < 16; i++)
+ endpoint2->type[i] = rsp_params->ep2_type[i];
+
+ return 0;
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
new file mode 100644
index 0000000000..b5e8c021fa
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fsl-mc object allocator driver
+ *
+ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static bool __must_check fsl_mc_is_allocatable(struct fsl_mc_device *mc_dev)
+{
+ return is_fsl_mc_bus_dpbp(mc_dev) ||
+ is_fsl_mc_bus_dpmcp(mc_dev) ||
+ is_fsl_mc_bus_dpcon(mc_dev);
+}
+
+/**
+ * fsl_mc_resource_pool_add_device - add allocatable object to a resource
+ * pool of a given fsl-mc bus
+ *
+ * @mc_bus: pointer to the fsl-mc bus
+ * @pool_type: pool type
+ * @mc_dev: pointer to allocatable fsl-mc device
+ */
+static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
+ *mc_bus,
+ enum fsl_mc_pool_type
+ pool_type,
+ struct fsl_mc_device
+ *mc_dev)
+{
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ int error = -EINVAL;
+
+ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
+ goto out;
+ if (!fsl_mc_is_allocatable(mc_dev))
+ goto out;
+ if (mc_dev->resource)
+ goto out;
+
+ res_pool = &mc_bus->resource_pools[pool_type];
+ if (res_pool->type != pool_type)
+ goto out;
+ if (res_pool->mc_bus != mc_bus)
+ goto out;
+
+ mutex_lock(&res_pool->mutex);
+
+ if (res_pool->max_count < 0)
+ goto out_unlock;
+ if (res_pool->free_count < 0 ||
+ res_pool->free_count > res_pool->max_count)
+ goto out_unlock;
+
+ resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
+ GFP_KERNEL);
+ if (!resource) {
+ error = -ENOMEM;
+ dev_err(&mc_bus_dev->dev,
+ "Failed to allocate memory for fsl_mc_resource\n");
+ goto out_unlock;
+ }
+
+ resource->type = pool_type;
+ resource->id = mc_dev->obj_desc.id;
+ resource->data = mc_dev;
+ resource->parent_pool = res_pool;
+ INIT_LIST_HEAD(&resource->node);
+ list_add_tail(&resource->node, &res_pool->free_list);
+ mc_dev->resource = resource;
+ res_pool->free_count++;
+ res_pool->max_count++;
+ error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+out:
+ return error;
+}
+
+/**
+ * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
+ * resource pool
+ *
+ * @mc_dev: pointer to allocatable fsl-mc device
+ *
+ * It permanently removes an allocatable fsl-mc device from the resource
+ * pool. It's an error if the device is in use.
+ */
+static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
+ *mc_dev)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ int error = -EINVAL;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ resource = mc_dev->resource;
+ if (!resource || resource->data != mc_dev) {
+ dev_err(&mc_bus_dev->dev, "resource mismatch\n");
+ goto out;
+ }
+
+ res_pool = resource->parent_pool;
+ if (res_pool != &mc_bus->resource_pools[resource->type]) {
+ dev_err(&mc_bus_dev->dev, "pool mismatch\n");
+ goto out;
+ }
+
+ mutex_lock(&res_pool->mutex);
+
+ if (res_pool->max_count <= 0) {
+ dev_err(&mc_bus_dev->dev, "max_count underflow\n");
+ goto out_unlock;
+ }
+ if (res_pool->free_count <= 0 ||
+ res_pool->free_count > res_pool->max_count) {
+ dev_err(&mc_bus_dev->dev, "free_count mismatch\n");
+ goto out_unlock;
+ }
+
+ /*
+ * If the device is currently allocated, its resource is not
+ * in the free list and thus, the device cannot be removed.
+ */
+ if (list_empty(&resource->node)) {
+ error = -EBUSY;
+ dev_err(&mc_bus_dev->dev,
+ "Device %s cannot be removed from resource pool\n",
+ dev_name(&mc_dev->dev));
+ goto out_unlock;
+ }
+
+ list_del_init(&resource->node);
+ res_pool->free_count--;
+ res_pool->max_count--;
+
+ devm_kfree(&mc_bus_dev->dev, resource);
+ mc_dev->resource = NULL;
+ error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+out:
+ return error;
+}
+
+static const char *const fsl_mc_pool_type_strings[] = {
+ [FSL_MC_POOL_DPMCP] = "dpmcp",
+ [FSL_MC_POOL_DPBP] = "dpbp",
+ [FSL_MC_POOL_DPCON] = "dpcon",
+ [FSL_MC_POOL_IRQ] = "irq",
+};
+
+static int __must_check object_type_to_pool_type(const char *object_type,
+ enum fsl_mc_pool_type
+ *pool_type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) {
+ if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) {
+ *pool_type = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_resource **new_resource)
+{
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ int error = -EINVAL;
+
+ BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
+ FSL_MC_NUM_POOL_TYPES);
+
+ *new_resource = NULL;
+ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
+ goto out;
+
+ res_pool = &mc_bus->resource_pools[pool_type];
+ if (res_pool->mc_bus != mc_bus)
+ goto out;
+
+ mutex_lock(&res_pool->mutex);
+ resource = list_first_entry_or_null(&res_pool->free_list,
+ struct fsl_mc_resource, node);
+
+ if (!resource) {
+ error = -ENXIO;
+ dev_err(&mc_bus_dev->dev,
+ "No more resources of type %s left\n",
+ fsl_mc_pool_type_strings[pool_type]);
+ goto out_unlock;
+ }
+
+ if (resource->type != pool_type)
+ goto out_unlock;
+ if (resource->parent_pool != res_pool)
+ goto out_unlock;
+ if (res_pool->free_count <= 0 ||
+ res_pool->free_count > res_pool->max_count)
+ goto out_unlock;
+
+ list_del_init(&resource->node);
+
+ res_pool->free_count--;
+ error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+ *new_resource = resource;
+out:
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
+
+void fsl_mc_resource_free(struct fsl_mc_resource *resource)
+{
+ struct fsl_mc_resource_pool *res_pool;
+
+ res_pool = resource->parent_pool;
+ if (resource->type != res_pool->type)
+ return;
+
+ mutex_lock(&res_pool->mutex);
+ if (res_pool->free_count < 0 ||
+ res_pool->free_count >= res_pool->max_count)
+ goto out_unlock;
+
+ if (!list_empty(&resource->node))
+ goto out_unlock;
+
+ list_add_tail(&resource->node, &res_pool->free_list);
+ res_pool->free_count++;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
+
+/**
+ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
+ * pool type from a given fsl-mc bus instance
+ *
+ * @mc_dev: fsl-mc device which is used in conjunction with the
+ * allocated object
+ * @pool_type: pool type
+ * @new_mc_adev: pointer to area where the pointer to the allocated device
+ * is to be returned
+ *
+ * Allocatable objects are always used in conjunction with some functional
+ * device. This function allocates an object of the specified type from
+ * the DPRC containing the functional device.
+ *
+ * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
+ * portals are allocated using fsl_mc_portal_allocate(), instead of
+ * this function.
+ */
+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_device **new_mc_adev)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_device *mc_adev;
+ int error = -EINVAL;
+ struct fsl_mc_resource *resource = NULL;
+
+ *new_mc_adev = NULL;
+ if (mc_dev->flags & FSL_MC_IS_DPRC)
+ goto error;
+
+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
+ goto error;
+
+ if (pool_type == FSL_MC_POOL_DPMCP)
+ goto error;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource);
+ if (error < 0)
+ goto error;
+
+ mc_adev = resource->data;
+ if (!mc_adev) {
+ error = -EINVAL;
+ goto error;
+ }
+
+ mc_adev->consumer_link = device_link_add(&mc_dev->dev,
+ &mc_adev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!mc_adev->consumer_link) {
+ error = -EINVAL;
+ goto error;
+ }
+
+ *new_mc_adev = mc_adev;
+ return 0;
+error:
+ if (resource)
+ fsl_mc_resource_free(resource);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
+
+/**
+ * fsl_mc_object_free - Returns an fsl-mc object to the resource
+ * pool where it came from.
+ * @mc_adev: Pointer to the fsl-mc device
+ */
+void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
+{
+ struct fsl_mc_resource *resource;
+
+ resource = mc_adev->resource;
+ if (resource->type == FSL_MC_POOL_DPMCP)
+ return;
+ if (resource->data != mc_adev)
+ return;
+
+ fsl_mc_resource_free(resource);
+
+ mc_adev->consumer_link = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_object_free);
+
+/*
+ * A DPRC and the devices in the DPRC all share the same GIC-ITS device
+ * ID. A block of IRQs is pre-allocated and maintained in a pool
+ * from which devices can allocate them when needed.
+ */
+
+/*
+ * Initialize the interrupt pool associated with an fsl-mc bus.
+ * It allocates a block of IRQs from the GIC-ITS.
+ */
+int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
+ unsigned int irq_count)
+{
+ unsigned int i;
+ struct fsl_mc_device_irq *irq_resources;
+ struct fsl_mc_device_irq *mc_dev_irq;
+ int error;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
+ /* do nothing if the IRQ pool is already populated */
+ if (mc_bus->irq_resources)
+ return 0;
+
+ if (irq_count == 0 ||
+ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)
+ return -EINVAL;
+
+ error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
+ if (error < 0)
+ return error;
+
+ irq_resources = devm_kcalloc(&mc_bus_dev->dev,
+ irq_count, sizeof(*irq_resources),
+ GFP_KERNEL);
+ if (!irq_resources) {
+ error = -ENOMEM;
+ goto cleanup_msi_irqs;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ mc_dev_irq = &irq_resources[i];
+
+ /*
+ * NOTE: This mc_dev_irq's MSI addr/value pair will be set
+ * by the fsl_mc_msi_write_msg() callback
+ */
+ mc_dev_irq->resource.type = res_pool->type;
+ mc_dev_irq->resource.data = mc_dev_irq;
+ mc_dev_irq->resource.parent_pool = res_pool;
+ mc_dev_irq->virq = msi_get_virq(&mc_bus_dev->dev, i);
+ mc_dev_irq->resource.id = mc_dev_irq->virq;
+ INIT_LIST_HEAD(&mc_dev_irq->resource.node);
+ list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
+ }
+
+ res_pool->max_count = irq_count;
+ res_pool->free_count = irq_count;
+ mc_bus->irq_resources = irq_resources;
+ return 0;
+
+cleanup_msi_irqs:
+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
+
+/*
+ * Teardown the interrupt pool associated with an fsl-mc bus.
+ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
+ */
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_device *mc_bus_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
+ if (!mc_bus->irq_resources)
+ return;
+
+ if (res_pool->max_count == 0)
+ return;
+
+ if (res_pool->free_count != res_pool->max_count)
+ return;
+
+ INIT_LIST_HEAD(&res_pool->free_list);
+ res_pool->max_count = 0;
+ res_pool->free_count = 0;
+ mc_bus->irq_resources = NULL;
+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
+
+/*
+ * Allocate the IRQs required by a given fsl-mc device.
+ */
+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
+{
+ int i;
+ int irq_count;
+ int res_allocated_count = 0;
+ int error = -EINVAL;
+ struct fsl_mc_device_irq **irqs = NULL;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_resource_pool *res_pool;
+
+ if (mc_dev->irqs)
+ return -EINVAL;
+
+ irq_count = mc_dev->obj_desc.irq_count;
+ if (irq_count == 0)
+ return -EINVAL;
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ mc_bus = to_fsl_mc_bus(mc_dev);
+ else
+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
+ if (!mc_bus->irq_resources)
+ return -EINVAL;
+
+ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+ if (res_pool->free_count < irq_count) {
+ dev_err(&mc_dev->dev,
+ "Not able to allocate %u irqs for device\n", irq_count);
+ return -ENOSPC;
+ }
+
+ irqs = devm_kcalloc(&mc_dev->dev, irq_count, sizeof(irqs[0]),
+ GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < irq_count; i++) {
+ struct fsl_mc_resource *resource;
+
+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ,
+ &resource);
+ if (error < 0)
+ goto error_resource_alloc;
+
+ irqs[i] = to_fsl_mc_irq(resource);
+ res_allocated_count++;
+
+ irqs[i]->mc_dev = mc_dev;
+ irqs[i]->dev_irq_index = i;
+ }
+
+ mc_dev->irqs = irqs;
+ return 0;
+
+error_resource_alloc:
+ for (i = 0; i < res_allocated_count; i++) {
+ irqs[i]->mc_dev = NULL;
+ fsl_mc_resource_free(&irqs[i]->resource);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
+
+/*
+ * Frees the IRQs that were allocated for an fsl-mc device.
+ */
+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
+{
+ int i;
+ int irq_count;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_device_irq **irqs = mc_dev->irqs;
+
+ if (!irqs)
+ return;
+
+ irq_count = mc_dev->obj_desc.irq_count;
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ mc_bus = to_fsl_mc_bus(mc_dev);
+ else
+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
+ if (!mc_bus->irq_resources)
+ return;
+
+ for (i = 0; i < irq_count; i++) {
+ irqs[i]->mc_dev = NULL;
+ fsl_mc_resource_free(&irqs[i]->resource);
+ }
+
+ mc_dev->irqs = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
+
+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+{
+ int pool_type;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) {
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[pool_type];
+
+ res_pool->type = pool_type;
+ res_pool->max_count = 0;
+ res_pool->free_count = 0;
+ res_pool->mc_bus = mc_bus;
+ INIT_LIST_HEAD(&res_pool->free_list);
+ mutex_init(&res_pool->mutex);
+ }
+}
+
+static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
+ enum fsl_mc_pool_type pool_type)
+{
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_resource *next;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[pool_type];
+
+ list_for_each_entry_safe(resource, next, &res_pool->free_list, node)
+ devm_kfree(&mc_bus_dev->dev, resource);
+}
+
+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+{
+ int pool_type;
+
+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
+ fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
+}
+
+/*
+ * fsl_mc_allocator_probe - callback invoked when an allocatable device is
+ * being added to the system
+ */
+static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
+{
+ enum fsl_mc_pool_type pool_type;
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ if (!fsl_mc_is_allocatable(mc_dev))
+ return -EINVAL;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ if (!dev_is_fsl_mc(&mc_bus_dev->dev))
+ return -EINVAL;
+
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ error = object_type_to_pool_type(mc_dev->obj_desc.type, &pool_type);
+ if (error < 0)
+ return error;
+
+ error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, mc_dev);
+ if (error < 0)
+ return error;
+
+ dev_dbg(&mc_dev->dev,
+ "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
+ return 0;
+}
+
+/*
+ * fsl_mc_allocator_remove - callback invoked when an allocatable device is
+ * being removed from the system
+ */
+static void fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ if (mc_dev->resource) {
+ error = fsl_mc_resource_pool_remove_device(mc_dev);
+ if (error < 0)
+ return;
+ }
+
+ dev_dbg(&mc_dev->dev,
+ "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
+}
+
+static const struct fsl_mc_device_id match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpbp",
+ },
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpmcp",
+ },
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpcon",
+ },
+ {.vendor = 0x0},
+};
+
+static struct fsl_mc_driver fsl_mc_allocator_driver = {
+ .driver = {
+ .name = "fsl_mc_allocator",
+ .pm = NULL,
+ },
+ .match_id_table = match_id_table,
+ .probe = fsl_mc_allocator_probe,
+ .remove = fsl_mc_allocator_remove,
+};
+
+int __init fsl_mc_allocator_driver_init(void)
+{
+ return fsl_mc_driver_register(&fsl_mc_allocator_driver);
+}
+
+void fsl_mc_allocator_driver_exit(void)
+{
+ fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
new file mode 100644
index 0000000000..2f6d5002e4
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -0,0 +1,1303 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale Management Complex (MC) bus driver
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#define pr_fmt(fmt) "fsl-mc: " fmt
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/limits.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/acpi.h>
+#include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
+
+#include "fsl-mc-private.h"
+
+/*
+ * Default DMA mask for devices on a fsl-mc bus
+ */
+#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
+
+static struct fsl_mc_version mc_version;
+
+/**
+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
+ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
+ * @num_translation_ranges: number of entries in addr_translation_ranges
+ * @translation_ranges: array of bus to system address translation ranges
+ * @fsl_mc_regs: base address of register bank
+ */
+struct fsl_mc {
+ struct fsl_mc_device *root_mc_bus_dev;
+ u8 num_translation_ranges;
+ struct fsl_mc_addr_translation_range *translation_ranges;
+ void __iomem *fsl_mc_regs;
+};
+
+/**
+ * struct fsl_mc_addr_translation_range - bus to system address translation
+ * range
+ * @mc_region_type: Type of MC region for the range being translated
+ * @start_mc_offset: Start MC offset of the range being translated
+ * @end_mc_offset: MC offset of the first byte after the range (last MC
+ * offset of the range is end_mc_offset - 1)
+ * @start_phys_addr: system physical address corresponding to start_mc_addr
+ */
+struct fsl_mc_addr_translation_range {
+ enum dprc_region_type mc_region_type;
+ u64 start_mc_offset;
+ u64 end_mc_offset;
+ phys_addr_t start_phys_addr;
+};
+
+#define FSL_MC_GCR1 0x0
+#define GCR1_P1_STOP BIT(31)
+#define GCR1_P2_STOP BIT(30)
+
+#define FSL_MC_FAPR 0x28
+#define MC_FAPR_PL BIT(18)
+#define MC_FAPR_BMT BIT(17)
+
+static phys_addr_t mc_portal_base_phys_addr;
+
+/**
+ * fsl_mc_bus_match - device to driver matching callback
+ * @dev: the fsl-mc device to match against
+ * @drv: the device driver to search for matching fsl-mc object type
+ * structures
+ *
+ * Returns 1 on success, 0 otherwise.
+ */
+static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+{
+ const struct fsl_mc_device_id *id;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
+ bool found = false;
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (mc_dev->driver_override) {
+ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
+ goto out;
+ }
+
+ if (!mc_drv->match_id_table)
+ goto out;
+
+ /*
+ * If the object is not 'plugged' don't match.
+ * Only exception is the root DPRC, which is a special case.
+ */
+ if ((mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED) == 0 &&
+ !fsl_mc_is_root_dprc(&mc_dev->dev))
+ goto out;
+
+ /*
+ * Traverse the match_id table of the given driver, trying to find
+ * a matching for the given device.
+ */
+ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
+ if (id->vendor == mc_dev->obj_desc.vendor &&
+ strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
+ found = true;
+
+ break;
+ }
+ }
+
+out:
+ dev_dbg(dev, "%smatched\n", found ? "" : "not ");
+ return found;
+}
+
+/*
+ * fsl_mc_bus_uevent - callback invoked when a device is added
+ */
+static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
+ mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int fsl_mc_dma_configure(struct device *dev)
+{
+ struct device *dma_dev = dev;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+ u32 input_id = mc_dev->icid;
+ int ret;
+
+ while (dev_is_fsl_mc(dma_dev))
+ dma_dev = dma_dev->parent;
+
+ if (dev_of_node(dma_dev))
+ ret = of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
+ else
+ ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
+
+ if (!ret && !mc_drv->driver_managed_dma) {
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
+ }
+
+ return ret;
+}
+
+static void fsl_mc_dma_cleanup(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+
+ if (!mc_drv->driver_managed_dma)
+ iommu_device_unuse_default_domain(dev);
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ int ret;
+
+ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
+ return -EINVAL;
+
+ ret = driver_set_override(dev, &mc_dev->driver_override, buf, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct attribute *fsl_mc_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(fsl_mc_dev);
+
+static int scan_fsl_mc_bus(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ struct fsl_mc_bus *root_mc_bus;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
+ mutex_lock(&root_mc_bus->scan_mutex);
+ dprc_scan_objects(root_mc_dev, false);
+ mutex_unlock(&root_mc_bus->scan_mutex);
+
+exit:
+ return 0;
+}
+
+static ssize_t rescan_store(const struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val)
+ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus);
+
+ return count;
+}
+static BUS_ATTR_WO(rescan);
+
+static int fsl_mc_bus_set_autorescan(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ unsigned long val;
+ char *buf = data;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val)
+ enable_dprc_irq(root_mc_dev);
+ else
+ disable_dprc_irq(root_mc_dev);
+
+exit:
+ return 0;
+}
+
+static int fsl_mc_bus_get_autorescan(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ char *buf = data;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+
+ sprintf(buf, "%d\n", get_dprc_irq_state(root_mc_dev));
+exit:
+ return 0;
+}
+
+static ssize_t autorescan_store(const struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_set_autorescan);
+
+ return count;
+}
+
+static ssize_t autorescan_show(const struct bus_type *bus, char *buf)
+{
+ bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_get_autorescan);
+ return strlen(buf);
+}
+
+static BUS_ATTR_RW(autorescan);
+
+static struct attribute *fsl_mc_bus_attrs[] = {
+ &bus_attr_rescan.attr,
+ &bus_attr_autorescan.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(fsl_mc_bus);
+
+struct bus_type fsl_mc_bus_type = {
+ .name = "fsl-mc",
+ .match = fsl_mc_bus_match,
+ .uevent = fsl_mc_bus_uevent,
+ .dma_configure = fsl_mc_dma_configure,
+ .dma_cleanup = fsl_mc_dma_cleanup,
+ .dev_groups = fsl_mc_dev_groups,
+ .bus_groups = fsl_mc_bus_groups,
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
+
+struct device_type fsl_mc_bus_dprc_type = {
+ .name = "fsl_mc_bus_dprc"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
+
+struct device_type fsl_mc_bus_dpni_type = {
+ .name = "fsl_mc_bus_dpni"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
+
+struct device_type fsl_mc_bus_dpio_type = {
+ .name = "fsl_mc_bus_dpio"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
+
+struct device_type fsl_mc_bus_dpsw_type = {
+ .name = "fsl_mc_bus_dpsw"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
+
+struct device_type fsl_mc_bus_dpbp_type = {
+ .name = "fsl_mc_bus_dpbp"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
+
+struct device_type fsl_mc_bus_dpcon_type = {
+ .name = "fsl_mc_bus_dpcon"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
+
+struct device_type fsl_mc_bus_dpmcp_type = {
+ .name = "fsl_mc_bus_dpmcp"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
+
+struct device_type fsl_mc_bus_dpmac_type = {
+ .name = "fsl_mc_bus_dpmac"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
+
+struct device_type fsl_mc_bus_dprtc_type = {
+ .name = "fsl_mc_bus_dprtc"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
+
+struct device_type fsl_mc_bus_dpseci_type = {
+ .name = "fsl_mc_bus_dpseci"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
+
+struct device_type fsl_mc_bus_dpdmux_type = {
+ .name = "fsl_mc_bus_dpdmux"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
+
+struct device_type fsl_mc_bus_dpdcei_type = {
+ .name = "fsl_mc_bus_dpdcei"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
+
+struct device_type fsl_mc_bus_dpaiop_type = {
+ .name = "fsl_mc_bus_dpaiop"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
+
+struct device_type fsl_mc_bus_dpci_type = {
+ .name = "fsl_mc_bus_dpci"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
+
+struct device_type fsl_mc_bus_dpdmai_type = {
+ .name = "fsl_mc_bus_dpdmai"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
+
+struct device_type fsl_mc_bus_dpdbg_type = {
+ .name = "fsl_mc_bus_dpdbg"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type);
+
+static struct device_type *fsl_mc_get_device_type(const char *type)
+{
+ static const struct {
+ struct device_type *dev_type;
+ const char *type;
+ } dev_types[] = {
+ { &fsl_mc_bus_dprc_type, "dprc" },
+ { &fsl_mc_bus_dpni_type, "dpni" },
+ { &fsl_mc_bus_dpio_type, "dpio" },
+ { &fsl_mc_bus_dpsw_type, "dpsw" },
+ { &fsl_mc_bus_dpbp_type, "dpbp" },
+ { &fsl_mc_bus_dpcon_type, "dpcon" },
+ { &fsl_mc_bus_dpmcp_type, "dpmcp" },
+ { &fsl_mc_bus_dpmac_type, "dpmac" },
+ { &fsl_mc_bus_dprtc_type, "dprtc" },
+ { &fsl_mc_bus_dpseci_type, "dpseci" },
+ { &fsl_mc_bus_dpdmux_type, "dpdmux" },
+ { &fsl_mc_bus_dpdcei_type, "dpdcei" },
+ { &fsl_mc_bus_dpaiop_type, "dpaiop" },
+ { &fsl_mc_bus_dpci_type, "dpci" },
+ { &fsl_mc_bus_dpdmai_type, "dpdmai" },
+ { &fsl_mc_bus_dpdbg_type, "dpdbg" },
+ { NULL, NULL }
+ };
+ int i;
+
+ for (i = 0; dev_types[i].dev_type; i++)
+ if (!strcmp(dev_types[i].type, type))
+ return dev_types[i].dev_type;
+
+ return NULL;
+}
+
+static int fsl_mc_driver_probe(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ int error;
+
+ mc_drv = to_fsl_mc_driver(dev->driver);
+
+ error = mc_drv->probe(mc_dev);
+ if (error < 0) {
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "%s failed: %d\n", __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int fsl_mc_driver_remove(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ mc_drv->remove(mc_dev);
+
+ return 0;
+}
+
+static void fsl_mc_driver_shutdown(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ mc_drv->shutdown(mc_dev);
+}
+
+/*
+ * __fsl_mc_driver_register - registers a child device driver with the
+ * MC bus
+ *
+ * This function is implicitly invoked from the registration function of
+ * fsl_mc device drivers, which is generated by the
+ * module_fsl_mc_driver() macro.
+ */
+int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
+ struct module *owner)
+{
+ int error;
+
+ mc_driver->driver.owner = owner;
+ mc_driver->driver.bus = &fsl_mc_bus_type;
+
+ if (mc_driver->probe)
+ mc_driver->driver.probe = fsl_mc_driver_probe;
+
+ if (mc_driver->remove)
+ mc_driver->driver.remove = fsl_mc_driver_remove;
+
+ if (mc_driver->shutdown)
+ mc_driver->driver.shutdown = fsl_mc_driver_shutdown;
+
+ error = driver_register(&mc_driver->driver);
+ if (error < 0) {
+ pr_err("driver_register() failed for %s: %d\n",
+ mc_driver->driver.name, error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
+
+/*
+ * fsl_mc_driver_unregister - unregisters a device driver from the
+ * MC bus
+ */
+void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
+{
+ driver_unregister(&mc_driver->driver);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
+
+/**
+ * mc_get_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_ver_info: Returned version information structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+static int mc_get_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ struct fsl_mc_version *mc_ver_info)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpmng_rsp_get_version *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
+ mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
+ mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
+ mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
+
+ return 0;
+}
+
+/**
+ * fsl_mc_get_version - function to retrieve the MC f/w version information
+ *
+ * Return: mc version when called after fsl-mc-bus probe; NULL otherwise.
+ */
+struct fsl_mc_version *fsl_mc_get_version(void)
+{
+ if (mc_version.major)
+ return &mc_version;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_version);
+
+/*
+ * fsl_mc_get_root_dprc - function to traverse to the root dprc
+ */
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev)
+{
+ if (!dev) {
+ *root_dprc_dev = NULL;
+ } else if (!dev_is_fsl_mc(dev)) {
+ *root_dprc_dev = NULL;
+ } else {
+ *root_dprc_dev = dev;
+ while (dev_is_fsl_mc((*root_dprc_dev)->parent))
+ *root_dprc_dev = (*root_dprc_dev)->parent;
+ }
+}
+
+static int get_dprc_attr(struct fsl_mc_io *mc_io,
+ int container_id, struct dprc_attributes *attr)
+{
+ u16 dprc_handle;
+ int error;
+
+ error = dprc_open(mc_io, 0, container_id, &dprc_handle);
+ if (error < 0) {
+ dev_err(mc_io->dev, "dprc_open() failed: %d\n", error);
+ return error;
+ }
+
+ memset(attr, 0, sizeof(struct dprc_attributes));
+ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
+ if (error < 0) {
+ dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto common_cleanup;
+ }
+
+ error = 0;
+
+common_cleanup:
+ (void)dprc_close(mc_io, 0, dprc_handle);
+ return error;
+}
+
+static int get_dprc_icid(struct fsl_mc_io *mc_io,
+ int container_id, u32 *icid)
+{
+ struct dprc_attributes attr;
+ int error;
+
+ error = get_dprc_attr(mc_io, container_id, &attr);
+ if (error == 0)
+ *icid = attr.icid;
+
+ return error;
+}
+
+static int translate_mc_addr(struct fsl_mc_device *mc_dev,
+ enum dprc_region_type mc_region_type,
+ u64 mc_offset, phys_addr_t *phys_addr)
+{
+ int i;
+ struct device *root_dprc_dev;
+ struct fsl_mc *mc;
+
+ fsl_mc_get_root_dprc(&mc_dev->dev, &root_dprc_dev);
+ mc = dev_get_drvdata(root_dprc_dev->parent);
+
+ if (mc->num_translation_ranges == 0) {
+ /*
+ * Do identity mapping:
+ */
+ *phys_addr = mc_offset;
+ return 0;
+ }
+
+ for (i = 0; i < mc->num_translation_ranges; i++) {
+ struct fsl_mc_addr_translation_range *range =
+ &mc->translation_ranges[i];
+
+ if (mc_region_type == range->mc_region_type &&
+ mc_offset >= range->start_mc_offset &&
+ mc_offset < range->end_mc_offset) {
+ *phys_addr = range->start_phys_addr +
+ (mc_offset - range->start_mc_offset);
+ return 0;
+ }
+ }
+
+ return -EFAULT;
+}
+
+static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_device *mc_bus_dev)
+{
+ int i;
+ int error;
+ struct resource *regions;
+ struct fsl_mc_obj_desc *obj_desc = &mc_dev->obj_desc;
+ struct device *parent_dev = mc_dev->dev.parent;
+ enum dprc_region_type mc_region_type;
+
+ if (is_fsl_mc_bus_dprc(mc_dev) ||
+ is_fsl_mc_bus_dpmcp(mc_dev)) {
+ mc_region_type = DPRC_REGION_TYPE_MC_PORTAL;
+ } else if (is_fsl_mc_bus_dpio(mc_dev)) {
+ mc_region_type = DPRC_REGION_TYPE_QBMAN_PORTAL;
+ } else {
+ /*
+ * This function should not have been called for this MC object
+ * type, as this object type is not supposed to have MMIO
+ * regions
+ */
+ return -EINVAL;
+ }
+
+ regions = kmalloc_array(obj_desc->region_count,
+ sizeof(regions[0]), GFP_KERNEL);
+ if (!regions)
+ return -ENOMEM;
+
+ for (i = 0; i < obj_desc->region_count; i++) {
+ struct dprc_region_desc region_desc;
+
+ error = dprc_get_obj_region(mc_bus_dev->mc_io,
+ 0,
+ mc_bus_dev->mc_handle,
+ obj_desc->type,
+ obj_desc->id, i, &region_desc);
+ if (error < 0) {
+ dev_err(parent_dev,
+ "dprc_get_obj_region() failed: %d\n", error);
+ goto error_cleanup_regions;
+ }
+ /*
+ * Older MC only returned region offset and no base address
+ * If base address is in the region_desc use it otherwise
+ * revert to old mechanism
+ */
+ if (region_desc.base_address) {
+ regions[i].start = region_desc.base_address +
+ region_desc.base_offset;
+ } else {
+ error = translate_mc_addr(mc_dev, mc_region_type,
+ region_desc.base_offset,
+ &regions[i].start);
+
+ /*
+ * Some versions of the MC firmware wrongly report
+ * 0 for register base address of the DPMCP associated
+ * with child DPRC objects thus rendering them unusable.
+ * This is particularly troublesome in ACPI boot
+ * scenarios where the legacy way of extracting this
+ * base address from the device tree does not apply.
+ * Given that DPMCPs share the same base address,
+ * workaround this by using the base address extracted
+ * from the root DPRC container.
+ */
+ if (is_fsl_mc_bus_dprc(mc_dev) &&
+ regions[i].start == region_desc.base_offset)
+ regions[i].start += mc_portal_base_phys_addr;
+ }
+
+ if (error < 0) {
+ dev_err(parent_dev,
+ "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
+ region_desc.base_offset,
+ obj_desc->type, obj_desc->id, i);
+ goto error_cleanup_regions;
+ }
+
+ regions[i].end = regions[i].start + region_desc.size - 1;
+ regions[i].name = "fsl-mc object MMIO region";
+ regions[i].flags = region_desc.flags & IORESOURCE_BITS;
+ regions[i].flags |= IORESOURCE_MEM;
+ }
+
+ mc_dev->regions = regions;
+ return 0;
+
+error_cleanup_regions:
+ kfree(regions);
+ return error;
+}
+
+/*
+ * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
+ */
+bool fsl_mc_is_root_dprc(struct device *dev)
+{
+ struct device *root_dprc_dev;
+
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ if (!root_dprc_dev)
+ return false;
+ return dev == root_dprc_dev;
+}
+
+static void fsl_mc_device_release(struct device *dev)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ kfree(mc_dev->regions);
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ kfree(to_fsl_mc_bus(mc_dev));
+ else
+ kfree(mc_dev);
+}
+
+/*
+ * Add a newly discovered fsl-mc device to be visible in Linux
+ */
+int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
+ struct fsl_mc_device **new_mc_dev)
+{
+ int error;
+ struct fsl_mc_device *mc_dev = NULL;
+ struct fsl_mc_bus *mc_bus = NULL;
+ struct fsl_mc_device *parent_mc_dev;
+
+ if (dev_is_fsl_mc(parent_dev))
+ parent_mc_dev = to_fsl_mc_device(parent_dev);
+ else
+ parent_mc_dev = NULL;
+
+ if (strcmp(obj_desc->type, "dprc") == 0) {
+ /*
+ * Allocate an MC bus device object:
+ */
+ mc_bus = kzalloc(sizeof(*mc_bus), GFP_KERNEL);
+ if (!mc_bus)
+ return -ENOMEM;
+
+ mutex_init(&mc_bus->scan_mutex);
+ mc_dev = &mc_bus->mc_dev;
+ } else {
+ /*
+ * Allocate a regular fsl_mc_device object:
+ */
+ mc_dev = kzalloc(sizeof(*mc_dev), GFP_KERNEL);
+ if (!mc_dev)
+ return -ENOMEM;
+ }
+
+ mc_dev->obj_desc = *obj_desc;
+ mc_dev->mc_io = mc_io;
+ device_initialize(&mc_dev->dev);
+ mc_dev->dev.parent = parent_dev;
+ mc_dev->dev.bus = &fsl_mc_bus_type;
+ mc_dev->dev.release = fsl_mc_device_release;
+ mc_dev->dev.type = fsl_mc_get_device_type(obj_desc->type);
+ if (!mc_dev->dev.type) {
+ error = -ENODEV;
+ dev_err(parent_dev, "unknown device type %s\n", obj_desc->type);
+ goto error_cleanup_dev;
+ }
+ dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
+
+ if (strcmp(obj_desc->type, "dprc") == 0) {
+ struct fsl_mc_io *mc_io2;
+
+ mc_dev->flags |= FSL_MC_IS_DPRC;
+
+ /*
+ * To get the DPRC's ICID, we need to open the DPRC
+ * in get_dprc_icid(). For child DPRCs, we do so using the
+ * parent DPRC's MC portal instead of the child DPRC's MC
+ * portal, in case the child DPRC is already opened with
+ * its own portal (e.g., the DPRC used by AIOP).
+ *
+ * NOTE: There cannot be more than one active open for a
+ * given MC object, using the same MC portal.
+ */
+ if (parent_mc_dev) {
+ /*
+ * device being added is a child DPRC device
+ */
+ mc_io2 = parent_mc_dev->mc_io;
+ } else {
+ /*
+ * device being added is the root DPRC device
+ */
+ if (!mc_io) {
+ error = -EINVAL;
+ goto error_cleanup_dev;
+ }
+
+ mc_io2 = mc_io;
+ }
+
+ error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
+ if (error < 0)
+ goto error_cleanup_dev;
+ } else {
+ /*
+ * A non-DPRC object has to be a child of a DPRC, use the
+ * parent's ICID and interrupt domain.
+ */
+ mc_dev->icid = parent_mc_dev->icid;
+ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
+ mc_dev->dev.dma_mask = &mc_dev->dma_mask;
+ mc_dev->dev.coherent_dma_mask = mc_dev->dma_mask;
+ dev_set_msi_domain(&mc_dev->dev,
+ dev_get_msi_domain(&parent_mc_dev->dev));
+ }
+
+ /*
+ * Get MMIO regions for the device from the MC:
+ *
+ * NOTE: the root DPRC is a special case as its MMIO region is
+ * obtained from the device tree
+ */
+ if (parent_mc_dev && obj_desc->region_count != 0) {
+ error = fsl_mc_device_get_mmio_regions(mc_dev,
+ parent_mc_dev);
+ if (error < 0)
+ goto error_cleanup_dev;
+ }
+
+ /*
+ * The device-specific probe callback will get invoked by device_add()
+ */
+ error = device_add(&mc_dev->dev);
+ if (error < 0) {
+ dev_err(parent_dev,
+ "device_add() failed for device %s: %d\n",
+ dev_name(&mc_dev->dev), error);
+ goto error_cleanup_dev;
+ }
+
+ dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
+
+ *new_mc_dev = mc_dev;
+ return 0;
+
+error_cleanup_dev:
+ kfree(mc_dev->regions);
+ kfree(mc_bus);
+ kfree(mc_dev);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_device_add);
+
+static struct notifier_block fsl_mc_nb;
+
+/**
+ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
+ * Linux
+ *
+ * @mc_dev: Pointer to an fsl-mc device
+ */
+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
+{
+ kfree(mc_dev->driver_override);
+ mc_dev->driver_override = NULL;
+
+ /*
+ * The device-specific remove callback will get invoked by device_del()
+ */
+ device_del(&mc_dev->dev);
+ put_device(&mc_dev->dev);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
+
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ u16 if_id)
+{
+ struct fsl_mc_device *mc_bus_dev, *endpoint;
+ struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
+ struct dprc_endpoint endpoint1 = {{ 0 }};
+ struct dprc_endpoint endpoint2 = {{ 0 }};
+ int state, err;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ strcpy(endpoint1.type, mc_dev->obj_desc.type);
+ endpoint1.id = mc_dev->obj_desc.id;
+ endpoint1.if_id = if_id;
+
+ err = dprc_get_connection(mc_bus_dev->mc_io, 0,
+ mc_bus_dev->mc_handle,
+ &endpoint1, &endpoint2,
+ &state);
+
+ if (err == -ENOTCONN || state == -1)
+ return ERR_PTR(-ENOTCONN);
+
+ if (err < 0) {
+ dev_err(&mc_bus_dev->dev, "dprc_get_connection() = %d\n", err);
+ return ERR_PTR(err);
+ }
+
+ strcpy(endpoint_desc.type, endpoint2.type);
+ endpoint_desc.id = endpoint2.id;
+ endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+
+ /*
+ * We know that the device has an endpoint because we verified by
+ * interrogating the firmware. This is the case when the device was not
+ * yet discovered by the fsl-mc bus, thus the lookup returned NULL.
+ * Force a rescan of the devices in this container and retry the lookup.
+ */
+ if (!endpoint) {
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ if (mutex_trylock(&mc_bus->scan_mutex)) {
+ err = dprc_scan_objects(mc_bus_dev, true);
+ mutex_unlock(&mc_bus->scan_mutex);
+ }
+
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
+ endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ /*
+ * This means that the endpoint might reside in a different isolation
+ * context (DPRC/container). Not much to do, so return a permssion
+ * error.
+ */
+ if (!endpoint)
+ return ERR_PTR(-EPERM);
+
+ return endpoint;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
+
+static int get_mc_addr_translation_ranges(struct device *dev,
+ struct fsl_mc_addr_translation_range
+ **ranges,
+ u8 *num_ranges)
+{
+ struct fsl_mc_addr_translation_range *r;
+ struct of_range_parser parser;
+ struct of_range range;
+
+ of_range_parser_init(&parser, dev->of_node);
+ *num_ranges = of_range_count(&parser);
+ if (!*num_ranges) {
+ /*
+ * Missing or empty ranges property ("ranges;") for the
+ * 'fsl,qoriq-mc' node. In this case, identity mapping
+ * will be used.
+ */
+ *ranges = NULL;
+ return 0;
+ }
+
+ *ranges = devm_kcalloc(dev, *num_ranges,
+ sizeof(struct fsl_mc_addr_translation_range),
+ GFP_KERNEL);
+ if (!(*ranges))
+ return -ENOMEM;
+
+ r = *ranges;
+ for_each_of_range(&parser, &range) {
+ r->mc_region_type = range.flags;
+ r->start_mc_offset = range.bus_addr;
+ r->end_mc_offset = range.bus_addr + range.size;
+ r->start_phys_addr = range.cpu_addr;
+ r++;
+ }
+
+ return 0;
+}
+
+/*
+ * fsl_mc_bus_probe - callback invoked when the root MC bus is being
+ * added
+ */
+static int fsl_mc_bus_probe(struct platform_device *pdev)
+{
+ struct fsl_mc_obj_desc obj_desc;
+ int error;
+ struct fsl_mc *mc;
+ struct fsl_mc_device *mc_bus_dev = NULL;
+ struct fsl_mc_io *mc_io = NULL;
+ int container_id;
+ phys_addr_t mc_portal_phys_addr;
+ u32 mc_portal_size, mc_stream_id;
+ struct resource *plat_res;
+
+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mc);
+
+ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (plat_res) {
+ mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res);
+ if (IS_ERR(mc->fsl_mc_regs))
+ return PTR_ERR(mc->fsl_mc_regs);
+ }
+
+ if (mc->fsl_mc_regs) {
+ if (IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) {
+ mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
+ /*
+ * HW ORs the PL and BMT bit, places the result in bit
+ * 14 of the StreamID and ORs in the ICID. Calculate it
+ * accordingly.
+ */
+ mc_stream_id = (mc_stream_id & 0xffff) |
+ ((mc_stream_id & (MC_FAPR_PL | MC_FAPR_BMT)) ?
+ BIT(14) : 0);
+ error = acpi_dma_configure_id(&pdev->dev,
+ DEV_DMA_COHERENT,
+ &mc_stream_id);
+ if (error == -EPROBE_DEFER)
+ return error;
+ if (error)
+ dev_warn(&pdev->dev,
+ "failed to configure dma: %d.\n",
+ error);
+ }
+
+ /*
+ * Some bootloaders pause the MC firmware before booting the
+ * kernel so that MC will not cause faults as soon as the
+ * SMMU probes due to the fact that there's no configuration
+ * in place for MC.
+ * At this point MC should have all its SMMU setup done so make
+ * sure it is resumed.
+ */
+ writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) &
+ (~(GCR1_P1_STOP | GCR1_P2_STOP)),
+ mc->fsl_mc_regs + FSL_MC_GCR1);
+ }
+
+ /*
+ * Get physical address of MC portal for the root DPRC:
+ */
+ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mc_portal_phys_addr = plat_res->start;
+ mc_portal_size = resource_size(plat_res);
+ mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
+
+ error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
+ mc_portal_size, NULL,
+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
+ if (error < 0)
+ return error;
+
+ error = mc_get_version(mc_io, 0, &mc_version);
+ if (error != 0) {
+ dev_err(&pdev->dev,
+ "mc_get_version() failed with error %d\n", error);
+ goto error_cleanup_mc_io;
+ }
+
+ dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
+ mc_version.major, mc_version.minor, mc_version.revision);
+
+ if (dev_of_node(&pdev->dev)) {
+ error = get_mc_addr_translation_ranges(&pdev->dev,
+ &mc->translation_ranges,
+ &mc->num_translation_ranges);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+ }
+
+ error = dprc_get_container_id(mc_io, 0, &container_id);
+ if (error < 0) {
+ dev_err(&pdev->dev,
+ "dprc_get_container_id() failed: %d\n", error);
+ goto error_cleanup_mc_io;
+ }
+
+ memset(&obj_desc, 0, sizeof(struct fsl_mc_obj_desc));
+ error = dprc_get_api_version(mc_io, 0,
+ &obj_desc.ver_major,
+ &obj_desc.ver_minor);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+ obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
+ strcpy(obj_desc.type, "dprc");
+ obj_desc.id = container_id;
+ obj_desc.irq_count = 1;
+ obj_desc.region_count = 0;
+
+ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+ mc->root_mc_bus_dev = mc_bus_dev;
+ mc_bus_dev->dev.fwnode = pdev->dev.fwnode;
+ return 0;
+
+error_cleanup_mc_io:
+ fsl_destroy_mc_io(mc_io);
+ return error;
+}
+
+/*
+ * fsl_mc_bus_remove - callback invoked when the root MC bus is being
+ * removed
+ */
+static int fsl_mc_bus_remove(struct platform_device *pdev)
+{
+ struct fsl_mc *mc = platform_get_drvdata(pdev);
+ struct fsl_mc_io *mc_io;
+
+ if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev))
+ return -EINVAL;
+
+ mc_io = mc->root_mc_bus_dev->mc_io;
+ fsl_mc_device_remove(mc->root_mc_bus_dev);
+ fsl_destroy_mc_io(mc_io);
+
+ bus_unregister_notifier(&fsl_mc_bus_type, &fsl_mc_nb);
+
+ if (mc->fsl_mc_regs) {
+ /*
+ * Pause the MC firmware so that it doesn't crash in certain
+ * scenarios, such as kexec.
+ */
+ writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) |
+ (GCR1_P1_STOP | GCR1_P2_STOP),
+ mc->fsl_mc_regs + FSL_MC_GCR1);
+ }
+
+ return 0;
+}
+
+static void fsl_mc_bus_shutdown(struct platform_device *pdev)
+{
+ fsl_mc_bus_remove(pdev);
+}
+
+static const struct of_device_id fsl_mc_bus_match_table[] = {
+ {.compatible = "fsl,qoriq-mc",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
+
+static const struct acpi_device_id fsl_mc_bus_acpi_match_table[] = {
+ {"NXP0008", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fsl_mc_bus_acpi_match_table);
+
+static struct platform_driver fsl_mc_bus_driver = {
+ .driver = {
+ .name = "fsl_mc_bus",
+ .pm = NULL,
+ .of_match_table = fsl_mc_bus_match_table,
+ .acpi_match_table = fsl_mc_bus_acpi_match_table,
+ },
+ .probe = fsl_mc_bus_probe,
+ .remove = fsl_mc_bus_remove,
+ .shutdown = fsl_mc_bus_shutdown,
+};
+
+static int fsl_mc_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct resource *res;
+ void __iomem *fsl_mc_regs;
+
+ if (action != BUS_NOTIFY_ADD_DEVICE)
+ return 0;
+
+ if (!of_match_device(fsl_mc_bus_match_table, dev) &&
+ !acpi_match_device(fsl_mc_bus_acpi_match_table, dev))
+ return 0;
+
+ res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
+ if (!res)
+ return 0;
+
+ fsl_mc_regs = ioremap(res->start, resource_size(res));
+ if (!fsl_mc_regs)
+ return 0;
+
+ /*
+ * Make sure that the MC firmware is paused before the IOMMU setup for
+ * it is done or otherwise the firmware will crash right after the SMMU
+ * gets probed and enabled.
+ */
+ writel(readl(fsl_mc_regs + FSL_MC_GCR1) | (GCR1_P1_STOP | GCR1_P2_STOP),
+ fsl_mc_regs + FSL_MC_GCR1);
+ iounmap(fsl_mc_regs);
+
+ return 0;
+}
+
+static struct notifier_block fsl_mc_nb = {
+ .notifier_call = fsl_mc_bus_notifier,
+};
+
+static int __init fsl_mc_bus_driver_init(void)
+{
+ int error;
+
+ error = bus_register(&fsl_mc_bus_type);
+ if (error < 0) {
+ pr_err("bus type registration failed: %d\n", error);
+ goto error_cleanup_cache;
+ }
+
+ error = platform_driver_register(&fsl_mc_bus_driver);
+ if (error < 0) {
+ pr_err("platform_driver_register() failed: %d\n", error);
+ goto error_cleanup_bus;
+ }
+
+ error = dprc_driver_init();
+ if (error < 0)
+ goto error_cleanup_driver;
+
+ error = fsl_mc_allocator_driver_init();
+ if (error < 0)
+ goto error_cleanup_dprc_driver;
+
+ return bus_register_notifier(&platform_bus_type, &fsl_mc_nb);
+
+error_cleanup_dprc_driver:
+ dprc_driver_exit();
+
+error_cleanup_driver:
+ platform_driver_unregister(&fsl_mc_bus_driver);
+
+error_cleanup_bus:
+ bus_unregister(&fsl_mc_bus_type);
+
+error_cleanup_cache:
+ return error;
+}
+postcore_initcall(fsl_mc_bus_driver_init);
diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c
new file mode 100644
index 0000000000..82cd69f788
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#include <linux/of_irq.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/acpi_iort.h>
+
+#include "fsl-mc-private.h"
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+/*
+ * Generate a unique ID identifying the interrupt (only used within the MSI
+ * irqdomain. Combine the icid with the interrupt index.
+ */
+static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
+ struct msi_desc *desc)
+{
+ /*
+ * Make the base hwirq value for ICID*10000 so it is readable
+ * as a decimal value in /proc/interrupts.
+ */
+ return (irq_hw_number_t)(desc->msi_index + (dev->icid * 10000));
+}
+
+static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
+ struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
+ desc);
+}
+#else
+#define fsl_mc_msi_set_desc NULL
+#endif
+
+static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
+{
+ struct msi_domain_ops *ops = info->ops;
+
+ if (!ops)
+ return;
+
+ /*
+ * set_desc should not be set by the caller
+ */
+ if (!ops->set_desc)
+ ops->set_desc = fsl_mc_msi_set_desc;
+}
+
+static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_device_irq *mc_dev_irq,
+ struct msi_desc *msi_desc)
+{
+ int error;
+ struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
+ struct dprc_irq_cfg irq_cfg;
+
+ /*
+ * msi_desc->msg.address is 0x0 when this function is invoked in
+ * the free_irq() code path. In this case, for the MC, we don't
+ * really need to "unprogram" the MSI, so we just return.
+ */
+ if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
+ return;
+
+ if (!owner_mc_dev)
+ return;
+
+ irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
+ msi_desc->msg.address_lo;
+ irq_cfg.val = msi_desc->msg.data;
+ irq_cfg.irq_num = msi_desc->irq;
+
+ if (owner_mc_dev == mc_bus_dev) {
+ /*
+ * IRQ is for the mc_bus_dev's DPRC itself
+ */
+ error = dprc_set_irq(mc_bus_dev->mc_io,
+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+ mc_bus_dev->mc_handle,
+ mc_dev_irq->dev_irq_index,
+ &irq_cfg);
+ if (error < 0) {
+ dev_err(&owner_mc_dev->dev,
+ "dprc_set_irq() failed: %d\n", error);
+ }
+ } else {
+ /*
+ * IRQ is for for a child device of mc_bus_dev
+ */
+ error = dprc_set_obj_irq(mc_bus_dev->mc_io,
+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+ mc_bus_dev->mc_handle,
+ owner_mc_dev->obj_desc.type,
+ owner_mc_dev->obj_desc.id,
+ mc_dev_irq->dev_irq_index,
+ &irq_cfg);
+ if (error < 0) {
+ dev_err(&owner_mc_dev->dev,
+ "dprc_obj_set_irq() failed: %d\n", error);
+ }
+ }
+}
+
+/*
+ * NOTE: This function is invoked with interrupts disabled
+ */
+static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
+ struct msi_msg *msg)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_device_irq *mc_dev_irq =
+ &mc_bus->irq_resources[msi_desc->msi_index];
+
+ msi_desc->msg = *msg;
+
+ /*
+ * Program the MSI (paddr, value) pair in the device:
+ */
+ __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq, msi_desc);
+}
+
+static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (!chip)
+ return;
+
+ /*
+ * irq_write_msi_msg should not be set by the caller
+ */
+ if (!chip->irq_write_msi_msg)
+ chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
+}
+
+/**
+ * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
+ * @fwnode: Optional firmware node of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a fsl-mc MSI
+ * interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE)))
+ info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+ fsl_mc_msi_update_dom_ops(info);
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ fsl_mc_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS;
+
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (domain)
+ irq_domain_update_bus_token(domain, DOMAIN_BUS_FSL_MC_MSI);
+
+ return domain;
+}
+
+struct irq_domain *fsl_mc_find_msi_domain(struct device *dev)
+{
+ struct device *root_dprc_dev;
+ struct device *bus_dev;
+ struct irq_domain *msi_domain;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ bus_dev = root_dprc_dev->parent;
+
+ if (bus_dev->of_node) {
+ msi_domain = of_msi_map_get_device_domain(dev,
+ mc_dev->icid,
+ DOMAIN_BUS_FSL_MC_MSI);
+
+ /*
+ * if the msi-map property is missing assume that all the
+ * child containers inherit the domain from the parent
+ */
+ if (!msi_domain)
+
+ msi_domain = of_msi_get_domain(bus_dev,
+ bus_dev->of_node,
+ DOMAIN_BUS_FSL_MC_MSI);
+ } else {
+ msi_domain = iort_get_device_domain(dev, mc_dev->icid,
+ DOMAIN_BUS_FSL_MC_MSI);
+ }
+
+ return msi_domain;
+}
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count)
+{
+ int error = msi_setup_device_data(dev);
+
+ if (error)
+ return error;
+
+ /*
+ * NOTE: Calling this function will trigger the invocation of the
+ * its_fsl_mc_msi_prepare() callback
+ */
+ error = msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, irq_count - 1);
+
+ if (error)
+ dev_err(dev, "Failed to allocate IRQs\n");
+ return error;
+}
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev)
+{
+ msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
new file mode 100644
index 0000000000..b3520ea1b9
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -0,0 +1,695 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale Management Complex (MC) bus private declarations
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ */
+#ifndef _FSL_MC_PRIVATE_H_
+#define _FSL_MC_PRIVATE_H_
+
+#include <linux/fsl/mc.h>
+#include <linux/mutex.h>
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+
+/*
+ * Data Path Management Complex (DPMNG) General API
+ */
+
+/* DPMNG command versioning */
+#define DPMNG_CMD_BASE_VERSION 1
+#define DPMNG_CMD_ID_OFFSET 4
+
+#define DPMNG_CMD(id) (((id) << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+
+/* DPMNG command IDs */
+#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
+
+struct dpmng_rsp_get_version {
+ __le32 revision;
+ __le32 version_major;
+ __le32 version_minor;
+};
+
+/*
+ * Data Path Management Command Portal (DPMCP) API
+ */
+
+/* Minimal supported DPMCP Version */
+#define DPMCP_MIN_VER_MAJOR 3
+#define DPMCP_MIN_VER_MINOR 0
+
+/* DPMCP command versioning */
+#define DPMCP_CMD_BASE_VERSION 1
+#define DPMCP_CMD_ID_OFFSET 4
+
+#define DPMCP_CMD(id) (((id) << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
+
+/* DPMCP command IDs */
+#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
+#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
+
+struct dpmcp_cmd_open {
+ __le32 dpmcp_id;
+};
+
+/*
+ * Initialization and runtime control APIs for DPMCP
+ */
+int dpmcp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmcp_id,
+ u16 *token);
+
+int dpmcp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpmcp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/*
+ * Data Path Resource Container (DPRC) API
+ */
+
+/* Minimal supported DPRC Version */
+#define DPRC_MIN_VER_MAJOR 6
+#define DPRC_MIN_VER_MINOR 0
+
+/* DPRC command versioning */
+#define DPRC_CMD_BASE_VERSION 1
+#define DPRC_CMD_2ND_VERSION 2
+#define DPRC_CMD_3RD_VERSION 3
+#define DPRC_CMD_ID_OFFSET 4
+
+#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION)
+#define DPRC_CMD_V3(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_3RD_VERSION)
+
+/* DPRC command IDs */
+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
+#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
+
+#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
+#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
+#define DPRC_CMDID_RESET_CONT_V2 DPRC_CMD_V2(0x005)
+
+#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
+#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
+#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
+#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
+#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
+
+#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
+#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
+#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
+#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
+#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
+#define DPRC_CMDID_GET_OBJ_REG_V3 DPRC_CMD_V3(0x15E)
+#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
+
+#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
+
+struct dprc_cmd_open {
+ __le32 container_id;
+};
+
+struct dprc_cmd_reset_container {
+ __le32 child_container_id;
+ __le32 options;
+};
+
+struct dprc_cmd_set_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+};
+
+#define DPRC_ENABLE 0x1
+
+struct dprc_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dprc_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dprc_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dprc_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_attributes {
+ /* response word 0 */
+ __le32 container_id;
+ __le32 icid;
+ /* response word 1 */
+ __le32 options;
+ __le32 portal_id;
+};
+
+struct dprc_rsp_get_obj_count {
+ __le32 pad;
+ __le32 obj_count;
+};
+
+struct dprc_cmd_get_obj {
+ __le32 obj_index;
+};
+
+struct dprc_rsp_get_obj {
+ /* response word 0 */
+ __le32 pad0;
+ __le32 id;
+ /* response word 1 */
+ __le16 vendor;
+ u8 irq_count;
+ u8 region_count;
+ __le32 state;
+ /* response word 2 */
+ __le16 version_major;
+ __le16 version_minor;
+ __le16 flags;
+ __le16 pad1;
+ /* response word 3-4 */
+ u8 type[16];
+ /* response word 5-6 */
+ u8 label[16];
+};
+
+struct dprc_cmd_get_obj_region {
+ /* cmd word 0 */
+ __le32 obj_id;
+ __le16 pad0;
+ u8 region_index;
+ u8 pad1;
+ /* cmd word 1-2 */
+ __le64 pad2[2];
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_rsp_get_obj_region {
+ /* response word 0 */
+ __le64 pad0;
+ /* response word 1 */
+ __le64 base_offset;
+ /* response word 2 */
+ __le32 size;
+ u8 type;
+ u8 pad2[3];
+ /* response word 3 */
+ __le32 flags;
+ __le32 pad3;
+ /* response word 4 */
+ /* base_addr may be zero if older MC firmware is used */
+ __le64 base_addr;
+};
+
+struct dprc_cmd_set_obj_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+ __le32 obj_id;
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_cmd_get_connection {
+ __le32 ep1_id;
+ __le16 ep1_interface_id;
+ u8 pad[2];
+ u8 ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+ __le64 pad[3];
+ __le32 ep2_id;
+ __le16 ep2_interface_id;
+ __le16 pad1;
+ u8 ep2_type[16];
+ __le32 state;
+};
+
+/*
+ * DPRC API for managing and querying DPAA resources
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int container_id,
+ u16 *token);
+
+int dprc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/* DPRC IRQ events */
+
+/* IRQ event - Indicates that a new object added to the container */
+#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
+/* IRQ event - Indicates that an object was removed from the container */
+#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
+/*
+ * IRQ event - Indicates that one of the descendant containers that opened by
+ * this container is destroyed
+ */
+#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
+
+/*
+ * IRQ event - Indicates that on one of the container's opened object is
+ * destroyed
+ */
+#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
+
+/* Irq event - Indicates that object is created at the container */
+#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
+
+/**
+ * struct dprc_irq_cfg - IRQ configuration
+ * @paddr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+ * @irq_num: A user defined number associated with this IRQ
+ */
+struct dprc_irq_cfg {
+ phys_addr_t paddr;
+ u32 val;
+ int irq_num;
+};
+
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dprc_attributes - Container attributes
+ * @container_id: Container's ID
+ * @icid: Container's ICID
+ * @portal_id: Container's portal ID
+ * @options: Container's options as set at container's creation
+ */
+struct dprc_attributes {
+ int container_id;
+ u32 icid;
+ int portal_id;
+ u64 options;
+};
+
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attributes);
+
+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count);
+
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
+ struct fsl_mc_obj_desc *obj_desc);
+
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+/**
+ * enum dprc_region_type - Region type
+ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
+ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
+ */
+enum dprc_region_type {
+ DPRC_REGION_TYPE_MC_PORTAL,
+ DPRC_REGION_TYPE_QBMAN_PORTAL,
+ DPRC_REGION_TYPE_QBMAN_MEM_BACKED_PORTAL
+};
+
+/**
+ * struct dprc_region_desc - Mappable region descriptor
+ * @base_offset: Region offset from region's base address.
+ * For DPMCP and DPRC objects, region base is offset from SoC MC portals
+ * base address; For DPIO, region base is offset from SoC QMan portals
+ * base address
+ * @size: Region size (in bytes)
+ * @flags: Region attributes
+ * @type: Portal region type
+ */
+struct dprc_region_desc {
+ u32 base_offset;
+ u32 size;
+ u32 flags;
+ enum dprc_region_type type;
+ u64 base_address;
+};
+
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc);
+
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id);
+
+/**
+ * struct dprc_endpoint - Endpoint description for link connect/disconnect
+ * operations
+ * @type: Endpoint object type: NULL terminated string
+ * @id: Endpoint object ID
+ * @if_id: Interface ID; should be set for endpoints with multiple
+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+ */
+struct dprc_endpoint {
+ char type[16];
+ int id;
+ u16 if_id;
+};
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state);
+
+/*
+ * Data Path Buffer Pool (DPBP) API
+ */
+
+/* DPBP Version */
+#define DPBP_VER_MAJOR 3
+#define DPBP_VER_MINOR 2
+
+/* Command versioning */
+#define DPBP_CMD_BASE_VERSION 1
+#define DPBP_CMD_ID_OFFSET 4
+
+#define DPBP_CMD(id) (((id) << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
+
+#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
+#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
+#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
+#define DPBP_CMDID_RESET DPBP_CMD(0x005)
+
+struct dpbp_cmd_open {
+ __le32 dpbp_id;
+};
+
+#define DPBP_ENABLE 0x1
+
+struct dpbp_rsp_get_attributes {
+ /* response word 0 */
+ __le16 pad;
+ __le16 bpid;
+ __le32 id;
+ /* response word 1 */
+ __le16 version_major;
+ __le16 version_minor;
+};
+
+/*
+ * Data Path Concentrator (DPCON) API
+ */
+
+/* DPCON Version */
+#define DPCON_VER_MAJOR 3
+#define DPCON_VER_MINOR 2
+
+/* Command versioning */
+#define DPCON_CMD_BASE_VERSION 1
+#define DPCON_CMD_ID_OFFSET 4
+
+#define DPCON_CMD(id) (((id) << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
+
+#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
+#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
+#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
+#define DPCON_CMDID_RESET DPCON_CMD(0x005)
+
+#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
+
+struct dpcon_cmd_open {
+ __le32 dpcon_id;
+};
+
+#define DPCON_ENABLE 1
+
+struct dpcon_rsp_get_attr {
+ /* response word 0 */
+ __le32 id;
+ __le16 qbman_ch_id;
+ u8 num_priorities;
+ u8 pad;
+};
+
+struct dpcon_cmd_set_notification {
+ /* cmd word 0 */
+ __le32 dpio_id;
+ u8 priority;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 user_ctx;
+};
+
+/*
+ * Generic FSL MC API
+ */
+
+/* generic command versioning */
+#define OBJ_CMD_BASE_VERSION 1
+#define OBJ_CMD_ID_OFFSET 4
+
+#define OBJ_CMD(id) (((id) << OBJ_CMD_ID_OFFSET) | OBJ_CMD_BASE_VERSION)
+
+/* open command codes */
+#define DPRTC_CMDID_OPEN OBJ_CMD(0x810)
+#define DPNI_CMDID_OPEN OBJ_CMD(0x801)
+#define DPSW_CMDID_OPEN OBJ_CMD(0x802)
+#define DPIO_CMDID_OPEN OBJ_CMD(0x803)
+#define DPBP_CMDID_OPEN OBJ_CMD(0x804)
+#define DPRC_CMDID_OPEN OBJ_CMD(0x805)
+#define DPDMUX_CMDID_OPEN OBJ_CMD(0x806)
+#define DPCI_CMDID_OPEN OBJ_CMD(0x807)
+#define DPCON_CMDID_OPEN OBJ_CMD(0x808)
+#define DPSECI_CMDID_OPEN OBJ_CMD(0x809)
+#define DPAIOP_CMDID_OPEN OBJ_CMD(0x80a)
+#define DPMCP_CMDID_OPEN OBJ_CMD(0x80b)
+#define DPMAC_CMDID_OPEN OBJ_CMD(0x80c)
+#define DPDCEI_CMDID_OPEN OBJ_CMD(0x80d)
+#define DPDMAI_CMDID_OPEN OBJ_CMD(0x80e)
+#define DPDBG_CMDID_OPEN OBJ_CMD(0x80f)
+
+/* Generic object command IDs */
+#define OBJ_CMDID_CLOSE OBJ_CMD(0x800)
+#define OBJ_CMDID_RESET OBJ_CMD(0x005)
+
+struct fsl_mc_obj_cmd_open {
+ __le32 obj_id;
+};
+
+/**
+ * struct fsl_mc_resource_pool - Pool of MC resources of a given
+ * type
+ * @type: type of resources in the pool
+ * @max_count: maximum number of resources in the pool
+ * @free_count: number of free resources in the pool
+ * @mutex: mutex to serialize access to the pool's free list
+ * @free_list: anchor node of list of free resources in the pool
+ * @mc_bus: pointer to the MC bus that owns this resource pool
+ */
+struct fsl_mc_resource_pool {
+ enum fsl_mc_pool_type type;
+ int max_count;
+ int free_count;
+ struct mutex mutex; /* serializes access to free_list */
+ struct list_head free_list;
+ struct fsl_mc_bus *mc_bus;
+};
+
+/**
+ * struct fsl_mc_uapi - information associated with a device file
+ * @misc: struct miscdevice linked to the root dprc
+ * @device: newly created device in /dev
+ * @mutex: mutex lock to serialize the open/release operations
+ * @local_instance_in_use: local MC I/O instance in use or not
+ * @static_mc_io: pointer to the static MC I/O object
+ */
+struct fsl_mc_uapi {
+ struct miscdevice misc;
+ struct device *device;
+ struct mutex mutex; /* serialize open/release operations */
+ u32 local_instance_in_use;
+ struct fsl_mc_io *static_mc_io;
+};
+
+/**
+ * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
+ * @mc_dev: fsl-mc device for the bus device itself.
+ * @resource_pools: array of resource pools (one pool per resource type)
+ * for this MC bus. These resources represent allocatable entities
+ * from the physical DPRC.
+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
+ * @scan_mutex: Serializes bus scanning
+ * @dprc_attr: DPRC attributes
+ * @uapi_misc: struct that abstracts the interaction with userspace
+ */
+struct fsl_mc_bus {
+ struct fsl_mc_device mc_dev;
+ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
+ struct fsl_mc_device_irq *irq_resources;
+ struct mutex scan_mutex; /* serializes bus scanning */
+ struct dprc_attributes dprc_attr;
+ struct fsl_mc_uapi uapi_misc;
+ int irq_enabled;
+};
+
+#define to_fsl_mc_bus(_mc_dev) \
+ container_of(_mc_dev, struct fsl_mc_bus, mc_dev)
+
+int __must_check fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
+ struct fsl_mc_device **new_mc_dev);
+
+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
+
+int __init dprc_driver_init(void);
+
+void dprc_driver_exit(void);
+
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts);
+
+int __init fsl_mc_allocator_driver_init(void);
+
+void fsl_mc_allocator_driver_exit(void);
+
+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+
+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+
+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_resource
+ **new_resource);
+
+void fsl_mc_resource_free(struct fsl_mc_resource *resource);
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+ unsigned int irq_count);
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev);
+
+struct irq_domain *fsl_mc_find_msi_domain(struct device *dev);
+
+int __must_check fsl_create_mc_io(struct device *dev,
+ phys_addr_t mc_portal_phys_addr,
+ u32 mc_portal_size,
+ struct fsl_mc_device *dpmcp_dev,
+ u32 flags, struct fsl_mc_io **new_mc_io);
+
+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
+
+bool fsl_mc_is_root_dprc(struct device *dev);
+
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev);
+
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_device *mc_bus_dev);
+
+u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd);
+
+#ifdef CONFIG_FSL_MC_UAPI_SUPPORT
+
+int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus);
+
+void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus);
+
+#else
+
+static inline int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus)
+{
+ return 0;
+}
+
+static inline void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus)
+{
+}
+
+#endif
+
+int disable_dprc_irq(struct fsl_mc_device *mc_dev);
+int enable_dprc_irq(struct fsl_mc_device *mc_dev);
+int get_dprc_irq_state(struct fsl_mc_device *mc_dev);
+
+#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
new file mode 100644
index 0000000000..9c4c1395fc
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management Complex (MC) userspace support
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include "fsl-mc-private.h"
+
+struct uapi_priv_data {
+ struct fsl_mc_uapi *uapi;
+ struct fsl_mc_io *mc_io;
+};
+
+struct fsl_mc_cmd_desc {
+ u16 cmdid_value;
+ u16 cmdid_mask;
+ int size;
+ bool token;
+ int flags;
+};
+
+#define FSL_MC_CHECK_MODULE_ID BIT(0)
+#define FSL_MC_CAP_NET_ADMIN_NEEDED BIT(1)
+
+enum fsl_mc_cmd_index {
+ DPDBG_DUMP = 0,
+ DPDBG_SET,
+ DPRC_GET_CONTAINER_ID,
+ DPRC_CREATE_CONT,
+ DPRC_DESTROY_CONT,
+ DPRC_ASSIGN,
+ DPRC_UNASSIGN,
+ DPRC_GET_OBJ_COUNT,
+ DPRC_GET_OBJ,
+ DPRC_GET_RES_COUNT,
+ DPRC_GET_RES_IDS,
+ DPRC_SET_OBJ_LABEL,
+ DPRC_SET_LOCKED,
+ DPRC_CONNECT,
+ DPRC_DISCONNECT,
+ DPRC_GET_POOL,
+ DPRC_GET_POOL_COUNT,
+ DPRC_GET_CONNECTION,
+ DPCI_GET_LINK_STATE,
+ DPCI_GET_PEER_ATTR,
+ DPAIOP_GET_SL_VERSION,
+ DPAIOP_GET_STATE,
+ DPMNG_GET_VERSION,
+ DPSECI_GET_TX_QUEUE,
+ DPMAC_GET_COUNTER,
+ DPMAC_GET_MAC_ADDR,
+ DPNI_SET_PRIM_MAC,
+ DPNI_GET_PRIM_MAC,
+ DPNI_GET_STATISTICS,
+ DPNI_GET_LINK_STATE,
+ DPNI_GET_MAX_FRAME_LENGTH,
+ DPSW_GET_TAILDROP,
+ DPSW_SET_TAILDROP,
+ DPSW_IF_GET_COUNTER,
+ DPSW_IF_GET_MAX_FRAME_LENGTH,
+ DPDMUX_GET_COUNTER,
+ DPDMUX_IF_GET_MAX_FRAME_LENGTH,
+ GET_ATTR,
+ GET_IRQ_MASK,
+ GET_IRQ_STATUS,
+ CLOSE,
+ OPEN,
+ GET_API_VERSION,
+ DESTROY,
+ CREATE,
+};
+
+static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
+ [DPDBG_DUMP] = {
+ .cmdid_value = 0x1300,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 28,
+ },
+ [DPDBG_SET] = {
+ .cmdid_value = 0x1400,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 28,
+ },
+ [DPRC_GET_CONTAINER_ID] = {
+ .cmdid_value = 0x8300,
+ .cmdid_mask = 0xFFF0,
+ .token = false,
+ .size = 8,
+ },
+ [DPRC_CREATE_CONT] = {
+ .cmdid_value = 0x1510,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_DESTROY_CONT] = {
+ .cmdid_value = 0x1520,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_ASSIGN] = {
+ .cmdid_value = 0x1570,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_UNASSIGN] = {
+ .cmdid_value = 0x1580,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_GET_OBJ_COUNT] = {
+ .cmdid_value = 0x1590,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ },
+ [DPRC_GET_OBJ] = {
+ .cmdid_value = 0x15A0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
+ [DPRC_GET_RES_COUNT] = {
+ .cmdid_value = 0x15B0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ },
+ [DPRC_GET_RES_IDS] = {
+ .cmdid_value = 0x15C0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ },
+ [DPRC_SET_OBJ_LABEL] = {
+ .cmdid_value = 0x1610,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 48,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_SET_LOCKED] = {
+ .cmdid_value = 0x16B0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_CONNECT] = {
+ .cmdid_value = 0x1670,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 56,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_DISCONNECT] = {
+ .cmdid_value = 0x1680,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_GET_POOL] = {
+ .cmdid_value = 0x1690,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
+ [DPRC_GET_POOL_COUNT] = {
+ .cmdid_value = 0x16A0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPRC_GET_CONNECTION] = {
+ .cmdid_value = 0x16C0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ },
+
+ [DPCI_GET_LINK_STATE] = {
+ .cmdid_value = 0x0E10,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPCI_GET_PEER_ATTR] = {
+ .cmdid_value = 0x0E20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPAIOP_GET_SL_VERSION] = {
+ .cmdid_value = 0x2820,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPAIOP_GET_STATE] = {
+ .cmdid_value = 0x2830,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPMNG_GET_VERSION] = {
+ .cmdid_value = 0x8310,
+ .cmdid_mask = 0xFFF0,
+ .token = false,
+ .size = 8,
+ },
+ [DPSECI_GET_TX_QUEUE] = {
+ .cmdid_value = 0x1970,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 14,
+ },
+ [DPMAC_GET_COUNTER] = {
+ .cmdid_value = 0x0c40,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 9,
+ },
+ [DPMAC_GET_MAC_ADDR] = {
+ .cmdid_value = 0x0c50,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_SET_PRIM_MAC] = {
+ .cmdid_value = 0x2240,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPNI_GET_PRIM_MAC] = {
+ .cmdid_value = 0x2250,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_GET_STATISTICS] = {
+ .cmdid_value = 0x25D0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [DPNI_GET_LINK_STATE] = {
+ .cmdid_value = 0x2150,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x2170,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPSW_GET_TAILDROP] = {
+ .cmdid_value = 0x0A80,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 14,
+ },
+ [DPSW_SET_TAILDROP] = {
+ .cmdid_value = 0x0A90,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 24,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPSW_IF_GET_COUNTER] = {
+ .cmdid_value = 0x0340,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 11,
+ },
+ [DPSW_IF_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x0450,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [DPDMUX_GET_COUNTER] = {
+ .cmdid_value = 0x0b20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 11,
+ },
+ [DPDMUX_IF_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x0a20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [GET_ATTR] = {
+ .cmdid_value = 0x0040,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [GET_IRQ_MASK] = {
+ .cmdid_value = 0x0150,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 13,
+ },
+ [GET_IRQ_STATUS] = {
+ .cmdid_value = 0x0160,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 13,
+ },
+ [CLOSE] = {
+ .cmdid_value = 0x8000,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+
+ /* Common commands amongst all types of objects. Must be checked last. */
+ [OPEN] = {
+ .cmdid_value = 0x8000,
+ .cmdid_mask = 0xFC00,
+ .token = false,
+ .size = 12,
+ .flags = FSL_MC_CHECK_MODULE_ID,
+ },
+ [GET_API_VERSION] = {
+ .cmdid_value = 0xA000,
+ .cmdid_mask = 0xFC00,
+ .token = false,
+ .size = 8,
+ .flags = FSL_MC_CHECK_MODULE_ID,
+ },
+ [DESTROY] = {
+ .cmdid_value = 0x9800,
+ .cmdid_mask = 0xFC00,
+ .token = true,
+ .size = 12,
+ .flags = FSL_MC_CHECK_MODULE_ID | FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [CREATE] = {
+ .cmdid_value = 0x9000,
+ .cmdid_mask = 0xFC00,
+ .token = true,
+ .size = 64,
+ .flags = FSL_MC_CHECK_MODULE_ID | FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+};
+
+#define FSL_MC_NUM_ACCEPTED_CMDS ARRAY_SIZE(fsl_mc_accepted_cmds)
+
+#define FSL_MC_MAX_MODULE_ID 0x10
+
+static int fsl_mc_command_check(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_command *mc_cmd)
+{
+ struct fsl_mc_cmd_desc *desc = NULL;
+ int mc_cmd_max_size, i;
+ bool token_provided;
+ u16 cmdid, module_id;
+ char *mc_cmd_end;
+ char sum = 0;
+
+ /* Check if this is an accepted MC command */
+ cmdid = mc_cmd_hdr_read_cmdid(mc_cmd);
+ for (i = 0; i < FSL_MC_NUM_ACCEPTED_CMDS; i++) {
+ desc = &fsl_mc_accepted_cmds[i];
+ if ((cmdid & desc->cmdid_mask) == desc->cmdid_value)
+ break;
+ }
+ if (i == FSL_MC_NUM_ACCEPTED_CMDS) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: cmdid not accepted\n", cmdid);
+ return -EACCES;
+ }
+
+ /* Check if the size of the command is honored. Anything beyond the
+ * last valid byte of the command should be zeroed.
+ */
+ mc_cmd_max_size = sizeof(*mc_cmd);
+ mc_cmd_end = ((char *)mc_cmd) + desc->size;
+ for (i = desc->size; i < mc_cmd_max_size; i++)
+ sum |= *mc_cmd_end++;
+ if (sum) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: garbage beyond max size of %d bytes!\n",
+ cmdid, desc->size);
+ return -EACCES;
+ }
+
+ /* Some MC commands request a token to be passed so that object
+ * identification is possible. Check if the token passed in the command
+ * is as expected.
+ */
+ token_provided = mc_cmd_hdr_read_token(mc_cmd) ? true : false;
+ if (token_provided != desc->token) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: token 0x%04x is invalid!\n",
+ cmdid, mc_cmd_hdr_read_token(mc_cmd));
+ return -EACCES;
+ }
+
+ /* If needed, check if the module ID passed is valid */
+ if (desc->flags & FSL_MC_CHECK_MODULE_ID) {
+ /* The module ID is represented by bits [4:9] from the cmdid */
+ module_id = (cmdid & GENMASK(9, 4)) >> 4;
+ if (module_id == 0 || module_id > FSL_MC_MAX_MODULE_ID) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: unknown module ID 0x%x\n",
+ cmdid, module_id);
+ return -EACCES;
+ }
+ }
+
+ /* Some commands alter how hardware resources are managed. For these
+ * commands, check for CAP_NET_ADMIN.
+ */
+ if (desc->flags & FSL_MC_CAP_NET_ADMIN_NEEDED) {
+ if (!capable(CAP_NET_ADMIN)) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: needs CAP_NET_ADMIN!\n",
+ cmdid);
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static int fsl_mc_uapi_send_command(struct fsl_mc_device *mc_dev, unsigned long arg,
+ struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_command mc_cmd;
+ int error;
+
+ error = copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd));
+ if (error)
+ return -EFAULT;
+
+ error = fsl_mc_command_check(mc_dev, &mc_cmd);
+ if (error)
+ return error;
+
+ error = mc_send_command(mc_io, &mc_cmd);
+ if (error)
+ return error;
+
+ error = copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd));
+ if (error)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int fsl_mc_uapi_dev_open(struct inode *inode, struct file *filep)
+{
+ struct fsl_mc_device *root_mc_device;
+ struct uapi_priv_data *priv_data;
+ struct fsl_mc_io *dynamic_mc_io;
+ struct fsl_mc_uapi *mc_uapi;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data)
+ return -ENOMEM;
+
+ mc_uapi = container_of(filep->private_data, struct fsl_mc_uapi, misc);
+ mc_bus = container_of(mc_uapi, struct fsl_mc_bus, uapi_misc);
+ root_mc_device = &mc_bus->mc_dev;
+
+ mutex_lock(&mc_uapi->mutex);
+
+ if (!mc_uapi->local_instance_in_use) {
+ priv_data->mc_io = mc_uapi->static_mc_io;
+ mc_uapi->local_instance_in_use = 1;
+ } else {
+ error = fsl_mc_portal_allocate(root_mc_device, 0,
+ &dynamic_mc_io);
+ if (error) {
+ dev_dbg(&root_mc_device->dev,
+ "Could not allocate MC portal\n");
+ goto error_portal_allocate;
+ }
+
+ priv_data->mc_io = dynamic_mc_io;
+ }
+ priv_data->uapi = mc_uapi;
+ filep->private_data = priv_data;
+
+ mutex_unlock(&mc_uapi->mutex);
+
+ return 0;
+
+error_portal_allocate:
+ mutex_unlock(&mc_uapi->mutex);
+ kfree(priv_data);
+
+ return error;
+}
+
+static int fsl_mc_uapi_dev_release(struct inode *inode, struct file *filep)
+{
+ struct uapi_priv_data *priv_data;
+ struct fsl_mc_uapi *mc_uapi;
+ struct fsl_mc_io *mc_io;
+
+ priv_data = filep->private_data;
+ mc_uapi = priv_data->uapi;
+ mc_io = priv_data->mc_io;
+
+ mutex_lock(&mc_uapi->mutex);
+
+ if (mc_io == mc_uapi->static_mc_io)
+ mc_uapi->local_instance_in_use = 0;
+ else
+ fsl_mc_portal_free(mc_io);
+
+ kfree(filep->private_data);
+ filep->private_data = NULL;
+
+ mutex_unlock(&mc_uapi->mutex);
+
+ return 0;
+}
+
+static long fsl_mc_uapi_dev_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct uapi_priv_data *priv_data = file->private_data;
+ struct fsl_mc_device *root_mc_device;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ mc_bus = container_of(priv_data->uapi, struct fsl_mc_bus, uapi_misc);
+ root_mc_device = &mc_bus->mc_dev;
+
+ switch (cmd) {
+ case FSL_MC_SEND_MC_COMMAND:
+ error = fsl_mc_uapi_send_command(root_mc_device, arg, priv_data->mc_io);
+ break;
+ default:
+ dev_dbg(&root_mc_device->dev, "unexpected ioctl call number\n");
+ error = -EINVAL;
+ }
+
+ return error;
+}
+
+static const struct file_operations fsl_mc_uapi_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = fsl_mc_uapi_dev_open,
+ .release = fsl_mc_uapi_dev_release,
+ .unlocked_ioctl = fsl_mc_uapi_dev_ioctl,
+};
+
+int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus)
+{
+ struct fsl_mc_device *mc_dev = &mc_bus->mc_dev;
+ struct fsl_mc_uapi *mc_uapi = &mc_bus->uapi_misc;
+ int error;
+
+ mc_uapi->misc.minor = MISC_DYNAMIC_MINOR;
+ mc_uapi->misc.name = dev_name(&mc_dev->dev);
+ mc_uapi->misc.fops = &fsl_mc_uapi_dev_fops;
+
+ error = misc_register(&mc_uapi->misc);
+ if (error)
+ return error;
+
+ mc_uapi->static_mc_io = mc_bus->mc_dev.mc_io;
+
+ mutex_init(&mc_uapi->mutex);
+
+ return 0;
+}
+
+void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus)
+{
+ misc_deregister(&mc_bus->uapi_misc.misc);
+}
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
new file mode 100644
index 0000000000..95b10a6cf3
--- /dev/null
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
+ struct fsl_mc_device *dpmcp_dev)
+{
+ int error;
+
+ if (mc_io->dpmcp_dev)
+ return -EINVAL;
+
+ if (dpmcp_dev->mc_io)
+ return -EINVAL;
+
+ error = dpmcp_open(mc_io,
+ 0,
+ dpmcp_dev->obj_desc.id,
+ &dpmcp_dev->mc_handle);
+ if (error < 0)
+ return error;
+
+ mc_io->dpmcp_dev = dpmcp_dev;
+ dpmcp_dev->mc_io = mc_io;
+ return 0;
+}
+
+static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
+{
+ int error;
+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
+ error = dpmcp_close(mc_io,
+ 0,
+ dpmcp_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n",
+ error);
+ }
+
+ mc_io->dpmcp_dev = NULL;
+ dpmcp_dev->mc_io = NULL;
+}
+
+/**
+ * fsl_create_mc_io() - Creates an MC I/O object
+ *
+ * @dev: device to be associated with the MC I/O object
+ * @mc_portal_phys_addr: physical address of the MC portal to use
+ * @mc_portal_size: size in bytes of the MC portal
+ * @dpmcp_dev: Pointer to the DPMCP object associated with this MC I/O
+ * object or NULL if none.
+ * @flags: flags for the new MC I/O object
+ * @new_mc_io: Area to return pointer to newly created MC I/O object
+ *
+ * Returns '0' on Success; Error code otherwise.
+ */
+int __must_check fsl_create_mc_io(struct device *dev,
+ phys_addr_t mc_portal_phys_addr,
+ u32 mc_portal_size,
+ struct fsl_mc_device *dpmcp_dev,
+ u32 flags, struct fsl_mc_io **new_mc_io)
+{
+ int error;
+ struct fsl_mc_io *mc_io;
+ void __iomem *mc_portal_virt_addr;
+ struct resource *res;
+
+ mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
+ if (!mc_io)
+ return -ENOMEM;
+
+ mc_io->dev = dev;
+ mc_io->flags = flags;
+ mc_io->portal_phys_addr = mc_portal_phys_addr;
+ mc_io->portal_size = mc_portal_size;
+ if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ raw_spin_lock_init(&mc_io->spinlock);
+ else
+ mutex_init(&mc_io->mutex);
+
+ res = devm_request_mem_region(dev,
+ mc_portal_phys_addr,
+ mc_portal_size,
+ "mc_portal");
+ if (!res) {
+ dev_err(dev,
+ "devm_request_mem_region failed for MC portal %pa\n",
+ &mc_portal_phys_addr);
+ return -EBUSY;
+ }
+
+ mc_portal_virt_addr = devm_ioremap(dev,
+ mc_portal_phys_addr,
+ mc_portal_size);
+ if (!mc_portal_virt_addr) {
+ dev_err(dev,
+ "devm_ioremap failed for MC portal %pa\n",
+ &mc_portal_phys_addr);
+ return -ENXIO;
+ }
+
+ mc_io->portal_virt_addr = mc_portal_virt_addr;
+ if (dpmcp_dev) {
+ error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
+ if (error < 0)
+ goto error_destroy_mc_io;
+ }
+
+ *new_mc_io = mc_io;
+ return 0;
+
+error_destroy_mc_io:
+ fsl_destroy_mc_io(mc_io);
+ return error;
+}
+
+/**
+ * fsl_destroy_mc_io() - Destroys an MC I/O object
+ *
+ * @mc_io: MC I/O object to destroy
+ */
+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_device *dpmcp_dev;
+
+ if (!mc_io)
+ return;
+
+ dpmcp_dev = mc_io->dpmcp_dev;
+
+ if (dpmcp_dev)
+ fsl_mc_io_unset_dpmcp(mc_io);
+
+ devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
+ devm_release_mem_region(mc_io->dev,
+ mc_io->portal_phys_addr,
+ mc_io->portal_size);
+
+ mc_io->portal_virt_addr = NULL;
+ devm_kfree(mc_io->dev, mc_io);
+}
+
+/**
+ * fsl_mc_portal_allocate - Allocates an MC portal
+ *
+ * @mc_dev: MC device for which the MC portal is to be allocated
+ * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated
+ * MC portal.
+ * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object
+ * that wraps the allocated MC portal is to be returned
+ *
+ * This function allocates an MC portal from the device's parent DPRC,
+ * from the corresponding MC bus' pool of MC portals and wraps
+ * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the
+ * portal is allocated from its own MC bus.
+ */
+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
+ u16 mc_io_flags,
+ struct fsl_mc_io **new_mc_io)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ phys_addr_t mc_portal_phys_addr;
+ size_t mc_portal_size;
+ struct fsl_mc_device *dpmcp_dev;
+ int error = -EINVAL;
+ struct fsl_mc_resource *resource = NULL;
+ struct fsl_mc_io *mc_io = NULL;
+
+ if (mc_dev->flags & FSL_MC_IS_DPRC) {
+ mc_bus_dev = mc_dev;
+ } else {
+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
+ return error;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ }
+
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ *new_mc_io = NULL;
+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource);
+ if (error < 0)
+ return error;
+
+ error = -EINVAL;
+ dpmcp_dev = resource->data;
+
+ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
+ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
+ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
+ dev_err(&dpmcp_dev->dev,
+ "ERROR: Version %d.%d of DPMCP not supported.\n",
+ dpmcp_dev->obj_desc.ver_major,
+ dpmcp_dev->obj_desc.ver_minor);
+ error = -ENOTSUPP;
+ goto error_cleanup_resource;
+ }
+
+ mc_portal_phys_addr = dpmcp_dev->regions[0].start;
+ mc_portal_size = resource_size(dpmcp_dev->regions);
+
+ error = fsl_create_mc_io(&mc_bus_dev->dev,
+ mc_portal_phys_addr,
+ mc_portal_size, dpmcp_dev,
+ mc_io_flags, &mc_io);
+ if (error < 0)
+ goto error_cleanup_resource;
+
+ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+ &dpmcp_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dpmcp_dev->consumer_link) {
+ error = -EINVAL;
+ goto error_cleanup_mc_io;
+ }
+
+ *new_mc_io = mc_io;
+ return 0;
+
+error_cleanup_mc_io:
+ fsl_destroy_mc_io(mc_io);
+error_cleanup_resource:
+ fsl_mc_resource_free(resource);
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate);
+
+/**
+ * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
+ * of a given MC bus
+ *
+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+ */
+void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_device *dpmcp_dev;
+ struct fsl_mc_resource *resource;
+
+ /*
+ * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed
+ * to have a DPMCP object associated with.
+ */
+ dpmcp_dev = mc_io->dpmcp_dev;
+
+ resource = dpmcp_dev->resource;
+ if (!resource || resource->type != FSL_MC_POOL_DPMCP)
+ return;
+
+ if (resource->data != dpmcp_dev)
+ return;
+
+ fsl_destroy_mc_io(mc_io);
+ fsl_mc_resource_free(resource);
+
+ dpmcp_dev->consumer_link = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
+
+/**
+ * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
+ *
+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+ */
+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
+{
+ int error;
+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
+ error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
new file mode 100644
index 0000000000..f2052cd0a0
--- /dev/null
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * I/O services to send MC commands to the MC hardware
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/*
+ * Timeout in milliseconds to wait for the completion of an MC command
+ */
+#define MC_CMD_COMPLETION_TIMEOUT_MS 500
+
+/*
+ * usleep_range() min and max values used to throttle down polling
+ * iterations while waiting for MC command completion
+ */
+#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
+#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
+
+static enum mc_cmd_status mc_cmd_hdr_read_status(struct fsl_mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+
+ return (enum mc_cmd_status)hdr->status;
+}
+
+u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 cmd_id = le16_to_cpu(hdr->cmd_id);
+
+ return cmd_id;
+}
+
+static int mc_status_to_error(enum mc_cmd_status status)
+{
+ static const int mc_status_to_error_map[] = {
+ [MC_CMD_STATUS_OK] = 0,
+ [MC_CMD_STATUS_AUTH_ERR] = -EACCES,
+ [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM,
+ [MC_CMD_STATUS_DMA_ERR] = -EIO,
+ [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO,
+ [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT,
+ [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL,
+ [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM,
+ [MC_CMD_STATUS_BUSY] = -EBUSY,
+ [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP,
+ [MC_CMD_STATUS_INVALID_STATE] = -ENODEV,
+ };
+
+ if ((u32)status >= ARRAY_SIZE(mc_status_to_error_map))
+ return -EINVAL;
+
+ return mc_status_to_error_map[status];
+}
+
+static const char *mc_status_to_string(enum mc_cmd_status status)
+{
+ static const char *const status_strings[] = {
+ [MC_CMD_STATUS_OK] = "Command completed successfully",
+ [MC_CMD_STATUS_READY] = "Command ready to be processed",
+ [MC_CMD_STATUS_AUTH_ERR] = "Authentication error",
+ [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege",
+ [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error",
+ [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error",
+ [MC_CMD_STATUS_TIMEOUT] = "Operation timed out",
+ [MC_CMD_STATUS_NO_RESOURCE] = "No resources",
+ [MC_CMD_STATUS_NO_MEMORY] = "No memory available",
+ [MC_CMD_STATUS_BUSY] = "Device is busy",
+ [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation",
+ [MC_CMD_STATUS_INVALID_STATE] = "Invalid state"
+ };
+
+ if ((unsigned int)status >= ARRAY_SIZE(status_strings))
+ return "Unknown MC error";
+
+ return status_strings[status];
+}
+
+/**
+ * mc_write_command - writes a command to a Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @cmd: pointer to a filled command
+ */
+static inline void mc_write_command(struct fsl_mc_command __iomem *portal,
+ struct fsl_mc_command *cmd)
+{
+ int i;
+
+ /* copy command parameters into the portal */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ /*
+ * Data is already in the expected LE byte-order. Do an
+ * extra LE -> CPU conversion so that the CPU -> LE done in
+ * the device io write api puts it back in the right order.
+ */
+ writeq_relaxed(le64_to_cpu(cmd->params[i]), &portal->params[i]);
+
+ /* submit the command by writing the header */
+ writeq(le64_to_cpu(cmd->header), &portal->header);
+}
+
+/**
+ * mc_read_response - reads the response for the last MC command from a
+ * Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @resp: pointer to command response buffer
+ *
+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
+ */
+static inline enum mc_cmd_status mc_read_response(struct fsl_mc_command __iomem
+ *portal,
+ struct fsl_mc_command *resp)
+{
+ int i;
+ enum mc_cmd_status status;
+
+ /* Copy command response header from MC portal: */
+ resp->header = cpu_to_le64(readq_relaxed(&portal->header));
+ status = mc_cmd_hdr_read_status(resp);
+ if (status != MC_CMD_STATUS_OK)
+ return status;
+
+ /* Copy command response data from MC portal: */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ /*
+ * Data is expected to be in LE byte-order. Do an
+ * extra CPU -> LE to revert the LE -> CPU done in
+ * the device io read api.
+ */
+ resp->params[i] =
+ cpu_to_le64(readq_relaxed(&portal->params[i]));
+
+ return status;
+}
+
+/**
+ * mc_polling_wait_preemptible() - Waits for the completion of an MC
+ * command doing preemptible polling.
+ * uslepp_range() is called between
+ * polling iterations.
+ * @mc_io: MC I/O object to be used
+ * @cmd: command buffer to receive MC response
+ * @mc_status: MC command completion status
+ */
+static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
+ struct fsl_mc_command *cmd,
+ enum mc_cmd_status *mc_status)
+{
+ enum mc_cmd_status status;
+ unsigned long jiffies_until_timeout =
+ jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS);
+
+ /*
+ * Wait for response from the MC hardware:
+ */
+ for (;;) {
+ status = mc_read_response(mc_io->portal_virt_addr, cmd);
+ if (status != MC_CMD_STATUS_READY)
+ break;
+
+ /*
+ * TODO: When MC command completion interrupts are supported
+ * call wait function here instead of usleep_range()
+ */
+ usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS,
+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+
+ if (time_after_eq(jiffies, jiffies_until_timeout)) {
+ dev_dbg(mc_io->dev,
+ "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
+ &mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+
+ return -ETIMEDOUT;
+ }
+ }
+
+ *mc_status = status;
+ return 0;
+}
+
+/**
+ * mc_polling_wait_atomic() - Waits for the completion of an MC command
+ * doing atomic polling. udelay() is called
+ * between polling iterations.
+ * @mc_io: MC I/O object to be used
+ * @cmd: command buffer to receive MC response
+ * @mc_status: MC command completion status
+ */
+static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
+ struct fsl_mc_command *cmd,
+ enum mc_cmd_status *mc_status)
+{
+ enum mc_cmd_status status;
+ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
+
+ BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) %
+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0);
+
+ for (;;) {
+ status = mc_read_response(mc_io->portal_virt_addr, cmd);
+ if (status != MC_CMD_STATUS_READY)
+ break;
+
+ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
+ if (timeout_usecs == 0) {
+ dev_dbg(mc_io->dev,
+ "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
+ &mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+
+ return -ETIMEDOUT;
+ }
+ }
+
+ *mc_status = status;
+ return 0;
+}
+
+/**
+ * mc_send_command() - Sends a command to the MC device using the given
+ * MC I/O object
+ * @mc_io: MC I/O object to be used
+ * @cmd: command to be sent
+ *
+ * Returns '0' on Success; Error code otherwise.
+ */
+int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
+{
+ int error;
+ enum mc_cmd_status status;
+ unsigned long irq_flags = 0;
+
+ if (in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
+ return -EINVAL;
+
+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ raw_spin_lock_irqsave(&mc_io->spinlock, irq_flags);
+ else
+ mutex_lock(&mc_io->mutex);
+
+ /*
+ * Send command to the MC hardware:
+ */
+ mc_write_command(mc_io->portal_virt_addr, cmd);
+
+ /*
+ * Wait for response from the MC hardware:
+ */
+ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
+ error = mc_polling_wait_preemptible(mc_io, cmd, &status);
+ else
+ error = mc_polling_wait_atomic(mc_io, cmd, &status);
+
+ if (error < 0)
+ goto common_exit;
+
+ if (status != MC_CMD_STATUS_OK) {
+ dev_dbg(mc_io->dev,
+ "MC command failed: portal: %pa, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
+ &mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
+ mc_status_to_string(status),
+ (unsigned int)status);
+
+ error = mc_status_to_error(status);
+ goto common_exit;
+ }
+
+ error = 0;
+common_exit:
+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ raw_spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
+ else
+ mutex_unlock(&mc_io->mutex);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(mc_send_command);
diff --git a/drivers/bus/fsl-mc/obj-api.c b/drivers/bus/fsl-mc/obj-api.c
new file mode 100644
index 0000000000..06c1dd84e3
--- /dev/null
+++ b/drivers/bus/fsl-mc/obj-api.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2021 NXP
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static int fsl_mc_get_open_cmd_id(const char *type)
+{
+ static const struct {
+ int cmd_id;
+ const char *type;
+ } dev_ids[] = {
+ { DPRTC_CMDID_OPEN, "dprtc" },
+ { DPRC_CMDID_OPEN, "dprc" },
+ { DPNI_CMDID_OPEN, "dpni" },
+ { DPIO_CMDID_OPEN, "dpio" },
+ { DPSW_CMDID_OPEN, "dpsw" },
+ { DPBP_CMDID_OPEN, "dpbp" },
+ { DPCON_CMDID_OPEN, "dpcon" },
+ { DPMCP_CMDID_OPEN, "dpmcp" },
+ { DPMAC_CMDID_OPEN, "dpmac" },
+ { DPSECI_CMDID_OPEN, "dpseci" },
+ { DPDMUX_CMDID_OPEN, "dpdmux" },
+ { DPDCEI_CMDID_OPEN, "dpdcei" },
+ { DPAIOP_CMDID_OPEN, "dpaiop" },
+ { DPCI_CMDID_OPEN, "dpci" },
+ { DPDMAI_CMDID_OPEN, "dpdmai" },
+ { DPDBG_CMDID_OPEN, "dpdbg" },
+ { 0, NULL }
+ };
+ int i;
+
+ for (i = 0; dev_ids[i].type; i++)
+ if (!strcmp(dev_ids[i].type, type))
+ return dev_ids[i].cmd_id;
+
+ return -1;
+}
+
+int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int obj_id,
+ char *obj_type,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct fsl_mc_obj_cmd_open *cmd_params;
+ int err = 0;
+ int cmd_id = fsl_mc_get_open_cmd_id(obj_type);
+
+ if (cmd_id == -1)
+ return -ENODEV;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(cmd_id, cmd_flags, 0);
+ cmd_params = (struct fsl_mc_obj_cmd_open *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_open);
+
+int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(OBJ_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_close);
+
+int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(OBJ_CMDID_RESET, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_reset);