summaryrefslogtreecommitdiffstats
path: root/drivers/dca
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/dca
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/dca')
-rw-r--r--drivers/dca/Kconfig8
-rw-r--r--drivers/dca/Makefile3
-rw-r--r--drivers/dca/dca-core.c455
-rw-r--r--drivers/dca/dca-sysfs.c90
4 files changed, 556 insertions, 0 deletions
diff --git a/drivers/dca/Kconfig b/drivers/dca/Kconfig
new file mode 100644
index 0000000000..fd33481319
--- /dev/null
+++ b/drivers/dca/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# DCA server configuration
+#
+
+config DCA
+ tristate
+
diff --git a/drivers/dca/Makefile b/drivers/dca/Makefile
new file mode 100644
index 0000000000..f3f0f02c70
--- /dev/null
+++ b/drivers/dca/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DCA) += dca.o
+dca-objs := dca-core.o dca-sysfs.o
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
new file mode 100644
index 0000000000..ed3dac546d
--- /dev/null
+++ b/drivers/dca/dca-core.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * This driver supports an interface for DCA clients and providers to meet.
+ */
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/device.h>
+#include <linux/dca.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#define DCA_VERSION "1.12.1"
+
+MODULE_VERSION(DCA_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+static DEFINE_RAW_SPINLOCK(dca_lock);
+
+static LIST_HEAD(dca_domains);
+
+static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
+
+static int dca_providers_blocked;
+
+static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus *bus = pdev->bus;
+
+ while (bus->parent)
+ bus = bus->parent;
+
+ return bus;
+}
+
+static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
+{
+ struct dca_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
+ if (!domain)
+ return NULL;
+
+ INIT_LIST_HEAD(&domain->dca_providers);
+ domain->pci_rc = rc;
+
+ return domain;
+}
+
+static void dca_free_domain(struct dca_domain *domain)
+{
+ list_del(&domain->node);
+ kfree(domain);
+}
+
+static int dca_provider_ioat_ver_3_0(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+ ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
+}
+
+static void unregister_dca_providers(void)
+{
+ struct dca_provider *dca, *_dca;
+ struct list_head unregistered_providers;
+ struct dca_domain *domain;
+ unsigned long flags;
+
+ blocking_notifier_call_chain(&dca_provider_chain,
+ DCA_PROVIDER_REMOVE, NULL);
+
+ INIT_LIST_HEAD(&unregistered_providers);
+
+ raw_spin_lock_irqsave(&dca_lock, flags);
+
+ if (list_empty(&dca_domains)) {
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return;
+ }
+
+ /* at this point only one domain in the list is expected */
+ domain = list_first_entry(&dca_domains, struct dca_domain, node);
+
+ list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
+ list_move(&dca->node, &unregistered_providers);
+
+ dca_free_domain(domain);
+
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+
+ list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
+ dca_sysfs_remove_provider(dca);
+ list_del(&dca->node);
+ }
+}
+
+static struct dca_domain *dca_find_domain(struct pci_bus *rc)
+{
+ struct dca_domain *domain;
+
+ list_for_each_entry(domain, &dca_domains, node)
+ if (domain->pci_rc == rc)
+ return domain;
+
+ return NULL;
+}
+
+static struct dca_domain *dca_get_domain(struct device *dev)
+{
+ struct pci_bus *rc;
+ struct dca_domain *domain;
+
+ rc = dca_pci_rc_from_dev(dev);
+ domain = dca_find_domain(rc);
+
+ if (!domain) {
+ if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
+ dca_providers_blocked = 1;
+ }
+
+ return domain;
+}
+
+static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
+{
+ struct dca_provider *dca;
+ struct pci_bus *rc;
+ struct dca_domain *domain;
+
+ if (dev) {
+ rc = dca_pci_rc_from_dev(dev);
+ domain = dca_find_domain(rc);
+ if (!domain)
+ return NULL;
+ } else {
+ if (!list_empty(&dca_domains))
+ domain = list_first_entry(&dca_domains,
+ struct dca_domain,
+ node);
+ else
+ return NULL;
+ }
+
+ list_for_each_entry(dca, &domain->dca_providers, node)
+ if ((!dev) || (dca->ops->dev_managed(dca, dev)))
+ return dca;
+
+ return NULL;
+}
+
+/**
+ * dca_add_requester - add a dca client to the list
+ * @dev - the device that wants dca service
+ */
+int dca_add_requester(struct device *dev)
+{
+ struct dca_provider *dca;
+ int err, slot = -ENODEV;
+ unsigned long flags;
+ struct pci_bus *pci_rc;
+ struct dca_domain *domain;
+
+ if (!dev)
+ return -EFAULT;
+
+ raw_spin_lock_irqsave(&dca_lock, flags);
+
+ /* check if the requester has not been added already */
+ dca = dca_find_provider_by_dev(dev);
+ if (dca) {
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -EEXIST;
+ }
+
+ pci_rc = dca_pci_rc_from_dev(dev);
+ domain = dca_find_domain(pci_rc);
+ if (!domain) {
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+
+ list_for_each_entry(dca, &domain->dca_providers, node) {
+ slot = dca->ops->add_requester(dca, dev);
+ if (slot >= 0)
+ break;
+ }
+
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+
+ if (slot < 0)
+ return slot;
+
+ err = dca_sysfs_add_req(dca, dev, slot);
+ if (err) {
+ raw_spin_lock_irqsave(&dca_lock, flags);
+ if (dca == dca_find_provider_by_dev(dev))
+ dca->ops->remove_requester(dca, dev);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dca_add_requester);
+
+/**
+ * dca_remove_requester - remove a dca client from the list
+ * @dev - the device that wants dca service
+ */
+int dca_remove_requester(struct device *dev)
+{
+ struct dca_provider *dca;
+ int slot;
+ unsigned long flags;
+
+ if (!dev)
+ return -EFAULT;
+
+ raw_spin_lock_irqsave(&dca_lock, flags);
+ dca = dca_find_provider_by_dev(dev);
+ if (!dca) {
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+ slot = dca->ops->remove_requester(dca, dev);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+
+ if (slot < 0)
+ return slot;
+
+ dca_sysfs_remove_req(dca, slot);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dca_remove_requester);
+
+/**
+ * dca_common_get_tag - return the dca tag (serves both new and old api)
+ * @dev - the device that wants dca service
+ * @cpu - the cpuid as returned by get_cpu()
+ */
+static u8 dca_common_get_tag(struct device *dev, int cpu)
+{
+ struct dca_provider *dca;
+ u8 tag;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&dca_lock, flags);
+
+ dca = dca_find_provider_by_dev(dev);
+ if (!dca) {
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+ tag = dca->ops->get_tag(dca, dev, cpu);
+
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return tag;
+}
+
+/**
+ * dca3_get_tag - return the dca tag to the requester device
+ * for the given cpu (new api)
+ * @dev - the device that wants dca service
+ * @cpu - the cpuid as returned by get_cpu()
+ */
+u8 dca3_get_tag(struct device *dev, int cpu)
+{
+ if (!dev)
+ return -EFAULT;
+
+ return dca_common_get_tag(dev, cpu);
+}
+EXPORT_SYMBOL_GPL(dca3_get_tag);
+
+/**
+ * dca_get_tag - return the dca tag for the given cpu (old api)
+ * @cpu - the cpuid as returned by get_cpu()
+ */
+u8 dca_get_tag(int cpu)
+{
+ return dca_common_get_tag(NULL, cpu);
+}
+EXPORT_SYMBOL_GPL(dca_get_tag);
+
+/**
+ * alloc_dca_provider - get data struct for describing a dca provider
+ * @ops - pointer to struct of dca operation function pointers
+ * @priv_size - size of extra mem to be added for provider's needs
+ */
+struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
+ int priv_size)
+{
+ struct dca_provider *dca;
+ int alloc_size;
+
+ alloc_size = (sizeof(*dca) + priv_size);
+ dca = kzalloc(alloc_size, GFP_KERNEL);
+ if (!dca)
+ return NULL;
+ dca->ops = ops;
+
+ return dca;
+}
+EXPORT_SYMBOL_GPL(alloc_dca_provider);
+
+/**
+ * free_dca_provider - release the dca provider data struct
+ * @ops - pointer to struct of dca operation function pointers
+ * @priv_size - size of extra mem to be added for provider's needs
+ */
+void free_dca_provider(struct dca_provider *dca)
+{
+ kfree(dca);
+}
+EXPORT_SYMBOL_GPL(free_dca_provider);
+
+/**
+ * register_dca_provider - register a dca provider
+ * @dca - struct created by alloc_dca_provider()
+ * @dev - device providing dca services
+ */
+int register_dca_provider(struct dca_provider *dca, struct device *dev)
+{
+ int err;
+ unsigned long flags;
+ struct dca_domain *domain, *newdomain = NULL;
+
+ raw_spin_lock_irqsave(&dca_lock, flags);
+ if (dca_providers_blocked) {
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+
+ err = dca_sysfs_add_provider(dca, dev);
+ if (err)
+ return err;
+
+ raw_spin_lock_irqsave(&dca_lock, flags);
+ domain = dca_get_domain(dev);
+ if (!domain) {
+ struct pci_bus *rc;
+
+ if (dca_providers_blocked) {
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ dca_sysfs_remove_provider(dca);
+ unregister_dca_providers();
+ return -ENODEV;
+ }
+
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ rc = dca_pci_rc_from_dev(dev);
+ newdomain = dca_allocate_domain(rc);
+ if (!newdomain)
+ return -ENODEV;
+ raw_spin_lock_irqsave(&dca_lock, flags);
+ /* Recheck, we might have raced after dropping the lock */
+ domain = dca_get_domain(dev);
+ if (!domain) {
+ domain = newdomain;
+ newdomain = NULL;
+ list_add(&domain->node, &dca_domains);
+ }
+ }
+ list_add(&dca->node, &domain->dca_providers);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+
+ blocking_notifier_call_chain(&dca_provider_chain,
+ DCA_PROVIDER_ADD, NULL);
+ kfree(newdomain);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(register_dca_provider);
+
+/**
+ * unregister_dca_provider - remove a dca provider
+ * @dca - struct created by alloc_dca_provider()
+ */
+void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
+{
+ unsigned long flags;
+ struct pci_bus *pci_rc;
+ struct dca_domain *domain;
+
+ blocking_notifier_call_chain(&dca_provider_chain,
+ DCA_PROVIDER_REMOVE, NULL);
+
+ raw_spin_lock_irqsave(&dca_lock, flags);
+
+ if (list_empty(&dca_domains)) {
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return;
+ }
+
+ list_del(&dca->node);
+
+ pci_rc = dca_pci_rc_from_dev(dev);
+ domain = dca_find_domain(pci_rc);
+ if (list_empty(&domain->dca_providers))
+ dca_free_domain(domain);
+
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+
+ dca_sysfs_remove_provider(dca);
+}
+EXPORT_SYMBOL_GPL(unregister_dca_provider);
+
+/**
+ * dca_register_notify - register a client's notifier callback
+ */
+void dca_register_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_register(&dca_provider_chain, nb);
+}
+EXPORT_SYMBOL_GPL(dca_register_notify);
+
+/**
+ * dca_unregister_notify - remove a client's notifier callback
+ */
+void dca_unregister_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&dca_provider_chain, nb);
+}
+EXPORT_SYMBOL_GPL(dca_unregister_notify);
+
+static int __init dca_init(void)
+{
+ pr_info("dca service started, version %s\n", DCA_VERSION);
+ return dca_sysfs_init();
+}
+
+static void __exit dca_exit(void)
+{
+ dca_sysfs_exit();
+}
+
+arch_initcall(dca_init);
+module_exit(dca_exit);
+
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
new file mode 100644
index 0000000000..fcc83ede09
--- /dev/null
+++ b/drivers/dca/dca-sysfs.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+#include <linux/dca.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+
+static struct class *dca_class;
+static struct idr dca_idr;
+static spinlock_t dca_idr_lock;
+
+int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
+{
+ struct device *cd;
+ static int req_count;
+
+ cd = device_create(dca_class, dca->cd, MKDEV(0, slot + 1), NULL,
+ "requester%d", req_count++);
+ return PTR_ERR_OR_ZERO(cd);
+}
+
+void dca_sysfs_remove_req(struct dca_provider *dca, int slot)
+{
+ device_destroy(dca_class, MKDEV(0, slot + 1));
+}
+
+int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev)
+{
+ struct device *cd;
+ int ret;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&dca_idr_lock);
+
+ ret = idr_alloc(&dca_idr, dca, 0, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ dca->id = ret;
+
+ spin_unlock(&dca_idr_lock);
+ idr_preload_end();
+ if (ret < 0)
+ return ret;
+
+ cd = device_create(dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id);
+ if (IS_ERR(cd)) {
+ spin_lock(&dca_idr_lock);
+ idr_remove(&dca_idr, dca->id);
+ spin_unlock(&dca_idr_lock);
+ return PTR_ERR(cd);
+ }
+ dca->cd = cd;
+ return 0;
+}
+
+void dca_sysfs_remove_provider(struct dca_provider *dca)
+{
+ device_unregister(dca->cd);
+ dca->cd = NULL;
+ spin_lock(&dca_idr_lock);
+ idr_remove(&dca_idr, dca->id);
+ spin_unlock(&dca_idr_lock);
+}
+
+int __init dca_sysfs_init(void)
+{
+ idr_init(&dca_idr);
+ spin_lock_init(&dca_idr_lock);
+
+ dca_class = class_create("dca");
+ if (IS_ERR(dca_class)) {
+ idr_destroy(&dca_idr);
+ return PTR_ERR(dca_class);
+ }
+ return 0;
+}
+
+void __exit dca_sysfs_exit(void)
+{
+ class_destroy(dca_class);
+ idr_destroy(&dca_idr);
+}
+