diff options
Diffstat (limited to 'drivers/platform/x86/intel/speed_select_if')
7 files changed, 1533 insertions, 0 deletions
diff --git a/drivers/platform/x86/intel/speed_select_if/Kconfig b/drivers/platform/x86/intel/speed_select_if/Kconfig new file mode 100644 index 000000000..ce3e3dc07 --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/Kconfig @@ -0,0 +1,17 @@ +menu "Intel Speed Select Technology interface support" + depends on PCI + depends on X86_64 || COMPILE_TEST + +config INTEL_SPEED_SELECT_INTERFACE + tristate "Intel(R) Speed Select Technology interface drivers" + help + This config enables the Intel(R) Speed Select Technology interface + drivers. The Intel(R) speed select technology features are non + architectural and only supported on specific Xeon(R) servers. + These drivers provide interface to directly communicate with hardware + via MMIO and Mail boxes to enumerate and control all the speed select + features. + + Enable this config, if there is a need to enable and control the + Intel(R) Speed Select Technology features from the user space. +endmenu diff --git a/drivers/platform/x86/intel/speed_select_if/Makefile b/drivers/platform/x86/intel/speed_select_if/Makefile new file mode 100644 index 000000000..856076206 --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile - Intel Speed Select Interface drivers +# Copyright (c) 2019, Intel Corporation. +# + +obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_common.o +obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mmio.o +obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_pci.o +obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_msr.o diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c new file mode 100644 index 000000000..f6b32d31c --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -0,0 +1,793 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Speed Select Interface: Common functions + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> + */ + +#include <linux/cpufeature.h> +#include <linux/cpuhotplug.h> +#include <linux/fs.h> +#include <linux/hashtable.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/sched/signal.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <uapi/linux/isst_if.h> + +#include "isst_if_common.h" + +#define MSR_THREAD_ID_INFO 0x53 +#define MSR_CPU_BUS_NUMBER 0x128 + +static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX]; + +static int punit_msr_white_list[] = { + MSR_TURBO_RATIO_LIMIT, + MSR_CONFIG_TDP_CONTROL, + MSR_TURBO_RATIO_LIMIT1, + MSR_TURBO_RATIO_LIMIT2, +}; + +struct isst_valid_cmd_ranges { + u16 cmd; + u16 sub_cmd_beg; + u16 sub_cmd_end; +}; + +struct isst_cmd_set_req_type { + u16 cmd; + u16 sub_cmd; + u16 param; +}; + +static const struct isst_valid_cmd_ranges isst_valid_cmds[] = { + {0xD0, 0x00, 0x03}, + {0x7F, 0x00, 0x0B}, + {0x7F, 0x10, 0x12}, + {0x7F, 0x20, 0x23}, + {0x94, 0x03, 0x03}, + {0x95, 0x03, 0x03}, +}; + +static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = { + {0xD0, 0x00, 0x08}, + {0xD0, 0x01, 0x08}, + {0xD0, 0x02, 0x08}, + {0xD0, 0x03, 0x08}, + {0x7F, 0x02, 0x00}, + {0x7F, 0x08, 0x00}, + {0x95, 0x03, 0x03}, +}; + +struct isst_cmd { + struct hlist_node hnode; + u64 data; + u32 cmd; + int cpu; + int mbox_cmd_type; + u32 param; +}; + +static DECLARE_HASHTABLE(isst_hash, 8); +static DEFINE_MUTEX(isst_hash_lock); + +static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param, + u32 data) +{ + struct isst_cmd *sst_cmd; + + sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL); + if (!sst_cmd) + return -ENOMEM; + + sst_cmd->cpu = cpu; + sst_cmd->cmd = cmd; + sst_cmd->mbox_cmd_type = mbox_cmd_type; + sst_cmd->param = param; + sst_cmd->data = data; + + hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd); + + return 0; +} + +static void isst_delete_hash(void) +{ + struct isst_cmd *sst_cmd; + struct hlist_node *tmp; + int i; + + hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) { + hash_del(&sst_cmd->hnode); + kfree(sst_cmd); + } +} + +/** + * isst_store_cmd() - Store command to a hash table + * @cmd: Mailbox command. + * @sub_cmd: Mailbox sub-command or MSR id. + * @mbox_cmd_type: Mailbox or MSR command. + * @param: Mailbox parameter. + * @data: Mailbox request data or MSR data. + * + * Stores the command to a hash table if there is no such command already + * stored. If already stored update the latest parameter and data for the + * command. + * + * Return: Return result of store to hash table, 0 for success, others for + * failure. + */ +int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type, + u32 param, u64 data) +{ + struct isst_cmd *sst_cmd; + int full_cmd, ret; + + full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16; + full_cmd |= (sub_cmd & GENMASK_ULL(15, 0)); + mutex_lock(&isst_hash_lock); + hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) { + if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu && + sst_cmd->mbox_cmd_type == mbox_cmd_type) { + sst_cmd->param = param; + sst_cmd->data = data; + mutex_unlock(&isst_hash_lock); + return 0; + } + } + + ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data); + mutex_unlock(&isst_hash_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(isst_store_cmd); + +static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb, + struct isst_cmd *sst_cmd) +{ + struct isst_if_mbox_cmd mbox_cmd; + int wr_only; + + mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16; + mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0); + mbox_cmd.parameter = sst_cmd->param; + mbox_cmd.req_data = sst_cmd->data; + mbox_cmd.logical_cpu = sst_cmd->cpu; + (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1); +} + +/** + * isst_resume_common() - Process Resume request + * + * On resume replay all mailbox commands and MSRs. + * + * Return: None. + */ +void isst_resume_common(void) +{ + struct isst_cmd *sst_cmd; + int i; + + hash_for_each(isst_hash, i, sst_cmd, hnode) { + struct isst_if_cmd_cb *cb; + + if (sst_cmd->mbox_cmd_type) { + cb = &punit_callbacks[ISST_IF_DEV_MBOX]; + if (cb->registered) + isst_mbox_resume_command(cb, sst_cmd); + } else { + wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd, + sst_cmd->data); + } + } +} +EXPORT_SYMBOL_GPL(isst_resume_common); + +static void isst_restore_msr_local(int cpu) +{ + struct isst_cmd *sst_cmd; + int i; + + mutex_lock(&isst_hash_lock); + for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) { + if (!punit_msr_white_list[i]) + break; + + hash_for_each_possible(isst_hash, sst_cmd, hnode, + punit_msr_white_list[i]) { + if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu) + wrmsrl_safe(sst_cmd->cmd, sst_cmd->data); + } + } + mutex_unlock(&isst_hash_lock); +} + +/** + * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands + * @cmd: Pointer to the command structure to verify. + * + * Invalid command to PUNIT to may result in instability of the platform. + * This function has a whitelist of commands, which are allowed. + * + * Return: Return true if the command is invalid, else false. + */ +bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd) +{ + int i; + + if (cmd->logical_cpu >= nr_cpu_ids) + return true; + + for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) { + if (cmd->command == isst_valid_cmds[i].cmd && + (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg && + cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) { + return false; + } + } + + return true; +} +EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid); + +/** + * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request + * @cmd: Pointer to the command structure to verify. + * + * Check if the given mail box level is set request and not a get request. + * + * Return: Return true if the command is set_req, else false. + */ +bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) { + if (cmd->command == isst_cmd_set_reqs[i].cmd && + cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd && + cmd->parameter == isst_cmd_set_reqs[i].param) { + return true; + } + } + + return false; +} +EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req); + +static int isst_if_get_platform_info(void __user *argp) +{ + struct isst_if_platform_info info; + + info.api_version = ISST_IF_API_VERSION; + info.driver_version = ISST_IF_DRIVER_VERSION; + info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT; + info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered; + info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered; + + if (copy_to_user(argp, &info, sizeof(info))) + return -EFAULT; + + return 0; +} + +#define ISST_MAX_BUS_NUMBER 2 + +struct isst_if_cpu_info { + /* For BUS 0 and BUS 1 only, which we need for PUNIT interface */ + int bus_info[ISST_MAX_BUS_NUMBER]; + struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER]; + int punit_cpu_id; + int numa_node; +}; + +struct isst_if_pkg_info { + struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER]; +}; + +static struct isst_if_cpu_info *isst_cpu_info; +static struct isst_if_pkg_info *isst_pkg_info; + +static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn) +{ + struct pci_dev *matched_pci_dev = NULL; + struct pci_dev *pci_dev = NULL; + struct pci_dev *_pci_dev = NULL; + int no_matches = 0, pkg_id; + int bus_number; + + if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 || + cpu >= nr_cpu_ids || cpu >= num_possible_cpus()) + return NULL; + + pkg_id = topology_physical_package_id(cpu); + + bus_number = isst_cpu_info[cpu].bus_info[bus_no]; + if (bus_number < 0) + return NULL; + + for_each_pci_dev(_pci_dev) { + int node; + + if (_pci_dev->bus->number != bus_number || + _pci_dev->devfn != PCI_DEVFN(dev, fn)) + continue; + + ++no_matches; + if (!matched_pci_dev) + matched_pci_dev = _pci_dev; + + node = dev_to_node(&_pci_dev->dev); + if (node == NUMA_NO_NODE) { + pr_info("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n", + cpu, bus_no, dev, fn); + continue; + } + + if (node == isst_cpu_info[cpu].numa_node) { + isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev; + + pci_dev = _pci_dev; + break; + } + } + + /* + * If there is no numa matched pci_dev, then there can be following cases: + * 1. CONFIG_NUMA is not defined: In this case if there is only single device + * match, then we don't need numa information. Simply return last match. + * Othewise return NULL. + * 2. NUMA information is not exposed via _SEG method. In this case it is similar + * to case 1. + * 3. Numa information doesn't match with CPU numa node and more than one match + * return NULL. + */ + if (!pci_dev && no_matches == 1) + pci_dev = matched_pci_dev; + + /* Return pci_dev pointer for any matched CPU in the package */ + if (!pci_dev) + pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no]; + + return pci_dev; +} + +/** + * isst_if_get_pci_dev() - Get the PCI device instance for a CPU + * @cpu: Logical CPU number. + * @bus_number: The bus number assigned by the hardware. + * @dev: The device number assigned by the hardware. + * @fn: The function number assigned by the hardware. + * + * Using cached bus information, find out the PCI device for a bus number, + * device and function. + * + * Return: Return pci_dev pointer or NULL. + */ +struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn) +{ + struct pci_dev *pci_dev; + + if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 || + cpu >= nr_cpu_ids || cpu >= num_possible_cpus()) + return NULL; + + pci_dev = isst_cpu_info[cpu].pci_dev[bus_no]; + + if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn)) + return pci_dev; + + return _isst_if_get_pci_dev(cpu, bus_no, dev, fn); +} +EXPORT_SYMBOL_GPL(isst_if_get_pci_dev); + +static int isst_if_cpu_online(unsigned int cpu) +{ + u64 data; + int ret; + + isst_cpu_info[cpu].numa_node = cpu_to_node(cpu); + + ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data); + if (ret) { + /* This is not a fatal error on MSR mailbox only I/F */ + isst_cpu_info[cpu].bus_info[0] = -1; + isst_cpu_info[cpu].bus_info[1] = -1; + } else { + isst_cpu_info[cpu].bus_info[0] = data & 0xff; + isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff; + isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1); + isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1); + } + + ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data); + if (ret) { + isst_cpu_info[cpu].punit_cpu_id = -1; + return ret; + } + isst_cpu_info[cpu].punit_cpu_id = data; + + isst_restore_msr_local(cpu); + + return 0; +} + +static int isst_if_online_id; + +static int isst_if_cpu_info_init(void) +{ + int ret; + + isst_cpu_info = kcalloc(num_possible_cpus(), + sizeof(*isst_cpu_info), + GFP_KERNEL); + if (!isst_cpu_info) + return -ENOMEM; + + isst_pkg_info = kcalloc(topology_max_packages(), + sizeof(*isst_pkg_info), + GFP_KERNEL); + if (!isst_pkg_info) { + kfree(isst_cpu_info); + return -ENOMEM; + } + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "platform/x86/isst-if:online", + isst_if_cpu_online, NULL); + if (ret < 0) { + kfree(isst_pkg_info); + kfree(isst_cpu_info); + return ret; + } + + isst_if_online_id = ret; + + return 0; +} + +static void isst_if_cpu_info_exit(void) +{ + cpuhp_remove_state(isst_if_online_id); + kfree(isst_pkg_info); + kfree(isst_cpu_info); +}; + +static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume) +{ + struct isst_if_cpu_map *cpu_map; + + cpu_map = (struct isst_if_cpu_map *)cmd_ptr; + if (cpu_map->logical_cpu >= nr_cpu_ids || + cpu_map->logical_cpu >= num_possible_cpus()) + return -EINVAL; + + *write_only = 0; + cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id; + + return 0; +} + +static bool match_punit_msr_white_list(int msr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) { + if (punit_msr_white_list[i] == msr) + return true; + } + + return false; +} + +static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume) +{ + struct isst_if_msr_cmd *msr_cmd; + int ret; + + msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr; + + if (!match_punit_msr_white_list(msr_cmd->msr)) + return -EINVAL; + + if (msr_cmd->logical_cpu >= nr_cpu_ids) + return -EINVAL; + + if (msr_cmd->read_write) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu, + msr_cmd->msr, + msr_cmd->data); + *write_only = 1; + if (!ret && !resume) + ret = isst_store_cmd(0, msr_cmd->msr, + msr_cmd->logical_cpu, + 0, 0, msr_cmd->data); + } else { + u64 data; + + ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu, + msr_cmd->msr, &data); + if (!ret) { + msr_cmd->data = data; + *write_only = 0; + } + } + + + return ret; +} + +static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb) +{ + unsigned char __user *ptr; + u32 cmd_count; + u8 *cmd_ptr; + long ret; + int i; + + /* Each multi command has u32 command count as the first field */ + if (copy_from_user(&cmd_count, argp, sizeof(cmd_count))) + return -EFAULT; + + if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT) + return -EINVAL; + + cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL); + if (!cmd_ptr) + return -ENOMEM; + + /* cb->offset points to start of the command after the command count */ + ptr = argp + cb->offset; + + for (i = 0; i < cmd_count; ++i) { + int wr_only; + + if (signal_pending(current)) { + ret = -EINTR; + break; + } + + if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) { + ret = -EFAULT; + break; + } + + ret = cb->cmd_callback(cmd_ptr, &wr_only, 0); + if (ret) + break; + + if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) { + ret = -EFAULT; + break; + } + + ptr += cb->cmd_size; + } + + kfree(cmd_ptr); + + return i ? i : ret; +} + +static long isst_if_def_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct isst_if_cmd_cb cmd_cb; + struct isst_if_cmd_cb *cb; + long ret = -ENOTTY; + + switch (cmd) { + case ISST_IF_GET_PLATFORM_INFO: + ret = isst_if_get_platform_info(argp); + break; + case ISST_IF_GET_PHY_ID: + cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map); + cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map); + cmd_cb.cmd_callback = isst_if_proc_phyid_req; + ret = isst_if_exec_multi_cmd(argp, &cmd_cb); + break; + case ISST_IF_IO_CMD: + cb = &punit_callbacks[ISST_IF_DEV_MMIO]; + if (cb->registered) + ret = isst_if_exec_multi_cmd(argp, cb); + break; + case ISST_IF_MBOX_COMMAND: + cb = &punit_callbacks[ISST_IF_DEV_MBOX]; + if (cb->registered) + ret = isst_if_exec_multi_cmd(argp, cb); + break; + case ISST_IF_MSR_COMMAND: + cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd); + cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd); + cmd_cb.cmd_callback = isst_if_msr_cmd_req; + ret = isst_if_exec_multi_cmd(argp, &cmd_cb); + break; + default: + break; + } + + return ret; +} + +/* Lock to prevent module registration when already opened by user space */ +static DEFINE_MUTEX(punit_misc_dev_open_lock); +/* Lock to allow one share misc device for all ISST interace */ +static DEFINE_MUTEX(punit_misc_dev_reg_lock); +static int misc_usage_count; +static int misc_device_ret; +static int misc_device_open; + +static int isst_if_open(struct inode *inode, struct file *file) +{ + int i, ret = 0; + + /* Fail open, if a module is going away */ + mutex_lock(&punit_misc_dev_open_lock); + for (i = 0; i < ISST_IF_DEV_MAX; ++i) { + struct isst_if_cmd_cb *cb = &punit_callbacks[i]; + + if (cb->registered && !try_module_get(cb->owner)) { + ret = -ENODEV; + break; + } + } + if (ret) { + int j; + + for (j = 0; j < i; ++j) { + struct isst_if_cmd_cb *cb; + + cb = &punit_callbacks[j]; + if (cb->registered) + module_put(cb->owner); + } + } else { + misc_device_open++; + } + mutex_unlock(&punit_misc_dev_open_lock); + + return ret; +} + +static int isst_if_relase(struct inode *inode, struct file *f) +{ + int i; + + mutex_lock(&punit_misc_dev_open_lock); + misc_device_open--; + for (i = 0; i < ISST_IF_DEV_MAX; ++i) { + struct isst_if_cmd_cb *cb = &punit_callbacks[i]; + + if (cb->registered) + module_put(cb->owner); + } + mutex_unlock(&punit_misc_dev_open_lock); + + return 0; +} + +static const struct file_operations isst_if_char_driver_ops = { + .open = isst_if_open, + .unlocked_ioctl = isst_if_def_ioctl, + .release = isst_if_relase, +}; + +static struct miscdevice isst_if_char_driver = { + .minor = MISC_DYNAMIC_MINOR, + .name = "isst_interface", + .fops = &isst_if_char_driver_ops, +}; + +static int isst_misc_reg(void) +{ + mutex_lock(&punit_misc_dev_reg_lock); + if (misc_device_ret) + goto unlock_exit; + + if (!misc_usage_count) { + misc_device_ret = isst_if_cpu_info_init(); + if (misc_device_ret) + goto unlock_exit; + + misc_device_ret = misc_register(&isst_if_char_driver); + if (misc_device_ret) { + isst_if_cpu_info_exit(); + goto unlock_exit; + } + } + misc_usage_count++; + +unlock_exit: + mutex_unlock(&punit_misc_dev_reg_lock); + + return misc_device_ret; +} + +static void isst_misc_unreg(void) +{ + mutex_lock(&punit_misc_dev_reg_lock); + if (misc_usage_count) + misc_usage_count--; + if (!misc_usage_count && !misc_device_ret) { + misc_deregister(&isst_if_char_driver); + isst_if_cpu_info_exit(); + } + mutex_unlock(&punit_misc_dev_reg_lock); +} + +/** + * isst_if_cdev_register() - Register callback for IOCTL + * @device_type: The device type this callback handling. + * @cb: Callback structure. + * + * This function registers a callback to device type. On very first call + * it will register a misc device, which is used for user kernel interface. + * Other calls simply increment ref count. Registry will fail, if the user + * already opened misc device for operation. Also if the misc device + * creation failed, then it will not try again and all callers will get + * failure code. + * + * Return: Return the return value from the misc creation device or -EINVAL + * for unsupported device type. + */ +int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) +{ + int ret; + + if (device_type >= ISST_IF_DEV_MAX) + return -EINVAL; + + mutex_lock(&punit_misc_dev_open_lock); + /* Device is already open, we don't want to add new callbacks */ + if (misc_device_open) { + mutex_unlock(&punit_misc_dev_open_lock); + return -EAGAIN; + } + memcpy(&punit_callbacks[device_type], cb, sizeof(*cb)); + punit_callbacks[device_type].registered = 1; + mutex_unlock(&punit_misc_dev_open_lock); + + ret = isst_misc_reg(); + if (ret) { + /* + * No need of mutex as the misc device register failed + * as no one can open device yet. Hence no contention. + */ + punit_callbacks[device_type].registered = 0; + return ret; + } + return 0; +} +EXPORT_SYMBOL_GPL(isst_if_cdev_register); + +/** + * isst_if_cdev_unregister() - Unregister callback for IOCTL + * @device_type: The device type to unregister. + * + * This function unregisters the previously registered callback. If this + * is the last callback unregistering, then misc device is removed. + * + * Return: None. + */ +void isst_if_cdev_unregister(int device_type) +{ + isst_misc_unreg(); + mutex_lock(&punit_misc_dev_open_lock); + punit_callbacks[device_type].registered = 0; + if (device_type == ISST_IF_DEV_MBOX) + isst_delete_hash(); + mutex_unlock(&punit_misc_dev_open_lock); +} +EXPORT_SYMBOL_GPL(isst_if_cdev_unregister); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h new file mode 100644 index 000000000..fdecdae24 --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Intel Speed Select Interface: Drivers Internal defines + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> + */ + +#ifndef __ISST_IF_COMMON_H +#define __ISST_IF_COMMON_H + +#define PCI_DEVICE_ID_INTEL_RAPL_PRIO_DEVID_0 0x3451 +#define PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_0 0x3459 + +#define PCI_DEVICE_ID_INTEL_RAPL_PRIO_DEVID_1 0x3251 +#define PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_1 0x3259 + +/* + * Validate maximum commands in a single request. + * This is enough to handle command to every core in one ioctl, or all + * possible message id to one CPU. Limit is also helpful for resonse time + * per IOCTL request, as PUNIT may take different times to process each + * request and may hold for long for too many commands. + */ +#define ISST_IF_CMD_LIMIT 64 + +#define ISST_IF_API_VERSION 0x01 +#define ISST_IF_DRIVER_VERSION 0x01 + +#define ISST_IF_DEV_MBOX 0 +#define ISST_IF_DEV_MMIO 1 +#define ISST_IF_DEV_MAX 2 + +/** + * struct isst_if_cmd_cb - Used to register a IOCTL handler + * @registered: Used by the common code to store registry. Caller don't + * to touch this field + * @cmd_size: The command size of the individual command in IOCTL + * @offset: Offset to the first valid member in command structure. + * This will be the offset of the start of the command + * after command count field + * @cmd_callback: Callback function to handle IOCTL. The callback has the + * command pointer with data for command. There is a pointer + * called write_only, which when set, will not copy the + * response to user ioctl buffer. The "resume" argument + * can be used to avoid storing the command for replay + * during system resume + * + * This structure is used to register an handler for IOCTL. To avoid + * code duplication common code handles all the IOCTL command read/write + * including handling multiple command in single IOCTL. The caller just + * need to execute a command via the registered callback. + */ +struct isst_if_cmd_cb { + int registered; + int cmd_size; + int offset; + struct module *owner; + long (*cmd_callback)(u8 *ptr, int *write_only, int resume); +}; + +/* Internal interface functions */ +int isst_if_cdev_register(int type, struct isst_if_cmd_cb *cb); +void isst_if_cdev_unregister(int type); +struct pci_dev *isst_if_get_pci_dev(int cpu, int bus, int dev, int fn); +bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *mbox_cmd); +bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd); +int isst_store_cmd(int cmd, int sub_command, u32 cpu, int mbox_cmd, + u32 param, u64 data); +void isst_resume_common(void); +#endif diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c new file mode 100644 index 000000000..1b6eab071 --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Speed Select Interface: Mbox via MSR Interface + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> + */ + +#include <linux/module.h> +#include <linux/cpuhotplug.h> +#include <linux/pci.h> +#include <linux/sched/signal.h> +#include <linux/slab.h> +#include <linux/suspend.h> +#include <linux/topology.h> +#include <linux/uaccess.h> +#include <uapi/linux/isst_if.h> +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> + +#include "isst_if_common.h" + +#define MSR_OS_MAILBOX_INTERFACE 0xB0 +#define MSR_OS_MAILBOX_DATA 0xB1 +#define MSR_OS_MAILBOX_BUSY_BIT 31 + +/* + * Based on experiments count is never more than 1, as the MSR overhead + * is enough to finish the command. So here this is the worst case number. + */ +#define OS_MAILBOX_RETRY_COUNT 3 + +static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter, + u32 command_data, u32 *response_data) +{ + u32 retries; + u64 data; + int ret; + + /* Poll for rb bit == 0 */ + retries = OS_MAILBOX_RETRY_COUNT; + do { + rdmsrl(MSR_OS_MAILBOX_INTERFACE, data); + if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) { + ret = -EBUSY; + continue; + } + ret = 0; + break; + } while (--retries); + + if (ret) + return ret; + + /* Write DATA register */ + wrmsrl(MSR_OS_MAILBOX_DATA, command_data); + + /* Write command register */ + data = BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT) | + (parameter & GENMASK_ULL(13, 0)) << 16 | + (sub_command << 8) | + command; + wrmsrl(MSR_OS_MAILBOX_INTERFACE, data); + + /* Poll for rb bit == 0 */ + retries = OS_MAILBOX_RETRY_COUNT; + do { + rdmsrl(MSR_OS_MAILBOX_INTERFACE, data); + if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) { + ret = -EBUSY; + continue; + } + + if (data & 0xff) + return -ENXIO; + + if (response_data) { + rdmsrl(MSR_OS_MAILBOX_DATA, data); + *response_data = data; + } + ret = 0; + break; + } while (--retries); + + return ret; +} + +struct msrl_action { + int err; + struct isst_if_mbox_cmd *mbox_cmd; +}; + +/* revisit, smp_call_function_single should be enough for atomic mailbox! */ +static void msrl_update_func(void *info) +{ + struct msrl_action *act = info; + + act->err = isst_if_send_mbox_cmd(act->mbox_cmd->command, + act->mbox_cmd->sub_command, + act->mbox_cmd->parameter, + act->mbox_cmd->req_data, + &act->mbox_cmd->resp_data); +} + +static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume) +{ + struct msrl_action action; + int ret; + + action.mbox_cmd = (struct isst_if_mbox_cmd *)cmd_ptr; + + if (isst_if_mbox_cmd_invalid(action.mbox_cmd)) + return -EINVAL; + + if (isst_if_mbox_cmd_set_req(action.mbox_cmd) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + + /* + * To complete mailbox command, we need to access two MSRs. + * So we don't want race to complete a mailbox transcation. + * Here smp_call ensures that msrl_update_func() has no race + * and also with wait flag, wait for completion. + * smp_call_function_single is using get_cpu() and put_cpu(). + */ + ret = smp_call_function_single(action.mbox_cmd->logical_cpu, + msrl_update_func, &action, 1); + if (ret) + return ret; + + if (!action.err && !resume && isst_if_mbox_cmd_set_req(action.mbox_cmd)) + action.err = isst_store_cmd(action.mbox_cmd->command, + action.mbox_cmd->sub_command, + action.mbox_cmd->logical_cpu, 1, + action.mbox_cmd->parameter, + action.mbox_cmd->req_data); + *write_only = 0; + + return action.err; +} + + +static int isst_pm_notify(struct notifier_block *nb, + unsigned long mode, void *_unused) +{ + switch (mode) { + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + case PM_POST_SUSPEND: + isst_resume_common(); + break; + default: + break; + } + return 0; +} + +static struct notifier_block isst_pm_nb = { + .notifier_call = isst_pm_notify, +}; + +static const struct x86_cpu_id isst_if_cpu_ids[] = { + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, isst_if_cpu_ids); + +static int __init isst_if_mbox_init(void) +{ + struct isst_if_cmd_cb cb; + const struct x86_cpu_id *id; + u64 data; + int ret; + + id = x86_match_cpu(isst_if_cpu_ids); + if (!id) + return -ENODEV; + + /* Check presence of mailbox MSRs */ + ret = rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data); + if (ret) + return ret; + + ret = rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data); + if (ret) + return ret; + + memset(&cb, 0, sizeof(cb)); + cb.cmd_size = sizeof(struct isst_if_mbox_cmd); + cb.offset = offsetof(struct isst_if_mbox_cmds, mbox_cmd); + cb.cmd_callback = isst_if_mbox_proc_cmd; + cb.owner = THIS_MODULE; + ret = isst_if_cdev_register(ISST_IF_DEV_MBOX, &cb); + if (ret) + return ret; + + ret = register_pm_notifier(&isst_pm_nb); + if (ret) + isst_if_cdev_unregister(ISST_IF_DEV_MBOX); + + return ret; +} +module_init(isst_if_mbox_init) + +static void __exit isst_if_mbox_exit(void) +{ + unregister_pm_notifier(&isst_pm_nb); + isst_if_cdev_unregister(ISST_IF_DEV_MBOX); +} +module_exit(isst_if_mbox_exit) + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel speed select interface mailbox driver"); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c new file mode 100644 index 000000000..df1fc6c71 --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_pci.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Speed Select Interface: Mbox via PCI Interface + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> + */ + +#include <linux/cpufeature.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/sched/signal.h> +#include <linux/uaccess.h> +#include <uapi/linux/isst_if.h> + +#include "isst_if_common.h" + +#define PUNIT_MAILBOX_DATA 0xA0 +#define PUNIT_MAILBOX_INTERFACE 0xA4 +#define PUNIT_MAILBOX_BUSY_BIT 31 + +/* + * The average time to complete mailbox commands is less than 40us. Most of + * the commands complete in few micro seconds. But the same firmware handles + * requests from all power management features. + * We can create a scenario where we flood the firmware with requests then + * the mailbox response can be delayed for 100s of micro seconds. So define + * two timeouts. One for average case and one for long. + * If the firmware is taking more than average, just call cond_resched(). + */ +#define OS_MAILBOX_TIMEOUT_AVG_US 40 +#define OS_MAILBOX_TIMEOUT_MAX_US 1000 + +struct isst_if_device { + struct mutex mutex; +}; + +static int isst_if_mbox_cmd(struct pci_dev *pdev, + struct isst_if_mbox_cmd *mbox_cmd) +{ + s64 tm_delta = 0; + ktime_t tm; + u32 data; + int ret; + + /* Poll for rb bit == 0 */ + tm = ktime_get(); + do { + ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE, + &data); + if (ret) + return ret; + + if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) { + ret = -EBUSY; + tm_delta = ktime_us_delta(ktime_get(), tm); + if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US) + cond_resched(); + continue; + } + ret = 0; + break; + } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US); + + if (ret) + return ret; + + /* Write DATA register */ + ret = pci_write_config_dword(pdev, PUNIT_MAILBOX_DATA, + mbox_cmd->req_data); + if (ret) + return ret; + + /* Write command register */ + data = BIT_ULL(PUNIT_MAILBOX_BUSY_BIT) | + (mbox_cmd->parameter & GENMASK_ULL(13, 0)) << 16 | + (mbox_cmd->sub_command << 8) | + mbox_cmd->command; + + ret = pci_write_config_dword(pdev, PUNIT_MAILBOX_INTERFACE, data); + if (ret) + return ret; + + /* Poll for rb bit == 0 */ + tm_delta = 0; + tm = ktime_get(); + do { + ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE, + &data); + if (ret) + return ret; + + if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) { + ret = -EBUSY; + tm_delta = ktime_us_delta(ktime_get(), tm); + if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US) + cond_resched(); + continue; + } + + if (data & 0xff) + return -ENXIO; + + ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_DATA, &data); + if (ret) + return ret; + + mbox_cmd->resp_data = data; + ret = 0; + break; + } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US); + + return ret; +} + +static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume) +{ + struct isst_if_mbox_cmd *mbox_cmd; + struct isst_if_device *punit_dev; + struct pci_dev *pdev; + int ret; + + mbox_cmd = (struct isst_if_mbox_cmd *)cmd_ptr; + + if (isst_if_mbox_cmd_invalid(mbox_cmd)) + return -EINVAL; + + if (isst_if_mbox_cmd_set_req(mbox_cmd) && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + pdev = isst_if_get_pci_dev(mbox_cmd->logical_cpu, 1, 30, 1); + if (!pdev) + return -EINVAL; + + punit_dev = pci_get_drvdata(pdev); + if (!punit_dev) + return -EINVAL; + + /* + * Basically we are allowing one complete mailbox transaction on + * a mapped PCI device at a time. + */ + mutex_lock(&punit_dev->mutex); + ret = isst_if_mbox_cmd(pdev, mbox_cmd); + if (!ret && !resume && isst_if_mbox_cmd_set_req(mbox_cmd)) + ret = isst_store_cmd(mbox_cmd->command, + mbox_cmd->sub_command, + mbox_cmd->logical_cpu, 1, + mbox_cmd->parameter, + mbox_cmd->req_data); + mutex_unlock(&punit_dev->mutex); + if (ret) + return ret; + + *write_only = 0; + + return 0; +} + +static const struct pci_device_id isst_if_mbox_ids[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_0)}, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_1)}, + { 0 }, +}; +MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids); + +static int isst_if_mbox_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct isst_if_device *punit_dev; + struct isst_if_cmd_cb cb; + int ret; + + punit_dev = devm_kzalloc(&pdev->dev, sizeof(*punit_dev), GFP_KERNEL); + if (!punit_dev) + return -ENOMEM; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + mutex_init(&punit_dev->mutex); + pci_set_drvdata(pdev, punit_dev); + + memset(&cb, 0, sizeof(cb)); + cb.cmd_size = sizeof(struct isst_if_mbox_cmd); + cb.offset = offsetof(struct isst_if_mbox_cmds, mbox_cmd); + cb.cmd_callback = isst_if_mbox_proc_cmd; + cb.owner = THIS_MODULE; + ret = isst_if_cdev_register(ISST_IF_DEV_MBOX, &cb); + + if (ret) + mutex_destroy(&punit_dev->mutex); + + return ret; +} + +static void isst_if_mbox_remove(struct pci_dev *pdev) +{ + struct isst_if_device *punit_dev; + + punit_dev = pci_get_drvdata(pdev); + isst_if_cdev_unregister(ISST_IF_DEV_MBOX); + mutex_destroy(&punit_dev->mutex); +} + +static int __maybe_unused isst_if_resume(struct device *device) +{ + isst_resume_common(); + return 0; +} + +static SIMPLE_DEV_PM_OPS(isst_if_pm_ops, NULL, isst_if_resume); + +static struct pci_driver isst_if_pci_driver = { + .name = "isst_if_mbox_pci", + .id_table = isst_if_mbox_ids, + .probe = isst_if_mbox_probe, + .remove = isst_if_mbox_remove, + .driver.pm = &isst_if_pm_ops, +}; + +module_pci_driver(isst_if_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel speed select interface pci mailbox driver"); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c new file mode 100644 index 000000000..ff49025ec --- /dev/null +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Speed Select Interface: MMIO Interface + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> + */ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/sched/signal.h> +#include <linux/uaccess.h> +#include <uapi/linux/isst_if.h> + +#include "isst_if_common.h" + +struct isst_mmio_range { + int beg; + int end; +}; + +static struct isst_mmio_range mmio_range_devid_0[] = { + {0x04, 0x14}, + {0x20, 0xD0}, +}; + +static struct isst_mmio_range mmio_range_devid_1[] = { + {0x04, 0x14}, + {0x20, 0x11C}, +}; + +struct isst_if_device { + void __iomem *punit_mmio; + u32 range_0[5]; + u32 range_1[64]; + struct isst_mmio_range *mmio_range; + struct mutex mutex; +}; + +static long isst_if_mmio_rd_wr(u8 *cmd_ptr, int *write_only, int resume) +{ + struct isst_if_device *punit_dev; + struct isst_if_io_reg *io_reg; + struct pci_dev *pdev; + + io_reg = (struct isst_if_io_reg *)cmd_ptr; + + if (io_reg->reg % 4) + return -EINVAL; + + if (io_reg->read_write && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + pdev = isst_if_get_pci_dev(io_reg->logical_cpu, 0, 0, 1); + if (!pdev) + return -EINVAL; + + punit_dev = pci_get_drvdata(pdev); + if (!punit_dev) + return -EINVAL; + + if (io_reg->reg < punit_dev->mmio_range[0].beg || + io_reg->reg > punit_dev->mmio_range[1].end) + return -EINVAL; + + /* + * Ensure that operation is complete on a PCI device to avoid read + * write race by using per PCI device mutex. + */ + mutex_lock(&punit_dev->mutex); + if (io_reg->read_write) { + writel(io_reg->value, punit_dev->punit_mmio+io_reg->reg); + *write_only = 1; + } else { + io_reg->value = readl(punit_dev->punit_mmio+io_reg->reg); + *write_only = 0; + } + mutex_unlock(&punit_dev->mutex); + + return 0; +} + +static const struct pci_device_id isst_if_ids[] = { + { PCI_DEVICE_DATA(INTEL, RAPL_PRIO_DEVID_0, &mmio_range_devid_0)}, + { PCI_DEVICE_DATA(INTEL, RAPL_PRIO_DEVID_1, &mmio_range_devid_1)}, + { 0 }, +}; +MODULE_DEVICE_TABLE(pci, isst_if_ids); + +static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct isst_if_device *punit_dev; + struct isst_if_cmd_cb cb; + u32 mmio_base, pcu_base; + u64 base_addr; + int ret; + + punit_dev = devm_kzalloc(&pdev->dev, sizeof(*punit_dev), GFP_KERNEL); + if (!punit_dev) + return -ENOMEM; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pci_read_config_dword(pdev, 0xD0, &mmio_base); + if (ret) + return ret; + + ret = pci_read_config_dword(pdev, 0xFC, &pcu_base); + if (ret) + return ret; + + pcu_base &= GENMASK(10, 0); + base_addr = (u64)mmio_base << 23 | (u64) pcu_base << 12; + punit_dev->punit_mmio = devm_ioremap(&pdev->dev, base_addr, 256); + if (!punit_dev->punit_mmio) + return -ENOMEM; + + mutex_init(&punit_dev->mutex); + pci_set_drvdata(pdev, punit_dev); + punit_dev->mmio_range = (struct isst_mmio_range *) ent->driver_data; + + memset(&cb, 0, sizeof(cb)); + cb.cmd_size = sizeof(struct isst_if_io_reg); + cb.offset = offsetof(struct isst_if_io_regs, io_reg); + cb.cmd_callback = isst_if_mmio_rd_wr; + cb.owner = THIS_MODULE; + ret = isst_if_cdev_register(ISST_IF_DEV_MMIO, &cb); + if (ret) + mutex_destroy(&punit_dev->mutex); + + return ret; +} + +static void isst_if_remove(struct pci_dev *pdev) +{ + struct isst_if_device *punit_dev; + + punit_dev = pci_get_drvdata(pdev); + isst_if_cdev_unregister(ISST_IF_DEV_MMIO); + mutex_destroy(&punit_dev->mutex); +} + +static int __maybe_unused isst_if_suspend(struct device *device) +{ + struct isst_if_device *punit_dev = dev_get_drvdata(device); + int i; + + for (i = 0; i < ARRAY_SIZE(punit_dev->range_0); ++i) + punit_dev->range_0[i] = readl(punit_dev->punit_mmio + + punit_dev->mmio_range[0].beg + 4 * i); + for (i = 0; i < ARRAY_SIZE(punit_dev->range_1); ++i) { + u32 addr; + + addr = punit_dev->mmio_range[1].beg + 4 * i; + if (addr > punit_dev->mmio_range[1].end) + break; + punit_dev->range_1[i] = readl(punit_dev->punit_mmio + addr); + } + + return 0; +} + +static int __maybe_unused isst_if_resume(struct device *device) +{ + struct isst_if_device *punit_dev = dev_get_drvdata(device); + int i; + + for (i = 0; i < ARRAY_SIZE(punit_dev->range_0); ++i) + writel(punit_dev->range_0[i], punit_dev->punit_mmio + + punit_dev->mmio_range[0].beg + 4 * i); + for (i = 0; i < ARRAY_SIZE(punit_dev->range_1); ++i) { + u32 addr; + + addr = punit_dev->mmio_range[1].beg + 4 * i; + if (addr > punit_dev->mmio_range[1].end) + break; + + writel(punit_dev->range_1[i], punit_dev->punit_mmio + addr); + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(isst_if_pm_ops, isst_if_suspend, isst_if_resume); + +static struct pci_driver isst_if_pci_driver = { + .name = "isst_if_pci", + .id_table = isst_if_ids, + .probe = isst_if_probe, + .remove = isst_if_remove, + .driver.pm = &isst_if_pm_ops, +}; + +module_pci_driver(isst_if_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel speed select interface mmio driver"); |