diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:12 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:12 +0000 |
commit | 8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 (patch) | |
tree | 8d58900dc0ebd4a3011f92c128d2fe45bc7c4bf2 /drivers/platform/x86/intel | |
parent | Adding debian version 6.7.12-1. (diff) | |
download | linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.tar.xz linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.zip |
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/platform/x86/intel')
30 files changed, 2460 insertions, 254 deletions
diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c index 848baecc1b..93f75ba1da 100644 --- a/drivers/platform/x86/intel/chtwc_int33fe.c +++ b/drivers/platform/x86/intel/chtwc_int33fe.c @@ -136,7 +136,7 @@ static const struct software_node altmodes_node = { }; static const struct property_entry dp_altmode_properties[] = { - PROPERTY_ENTRY_U32("svid", 0xff01), + PROPERTY_ENTRY_U16("svid", 0xff01), PROPERTY_ENTRY_U32("vdo", 0x0c0086), { } }; diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 7457ca2b27..9ffbdc988f 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -504,6 +504,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) struct platform_device *device = context; struct intel_hid_priv *priv = dev_get_drvdata(&device->dev); unsigned long long ev_index; + struct key_entry *ke; int err; /* @@ -545,11 +546,15 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) if (event == 0xc0 || !priv->array) return; - if (!sparse_keymap_entry_from_scancode(priv->array, event)) { + ke = sparse_keymap_entry_from_scancode(priv->array, event); + if (!ke) { dev_info(&device->dev, "unknown event 0x%x\n", event); return; } + if (ke->type == KE_IGNORE) + return; + wakeup: pm_wakeup_hard_event(&device->dev); diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c index b6708bab7c..527d8fbc7c 100644 --- a/drivers/platform/x86/intel/int0002_vgpio.c +++ b/drivers/platform/x86/intel/int0002_vgpio.c @@ -196,7 +196,7 @@ static int int0002_probe(struct platform_device *pdev) * IRQs into gpiolib. */ ret = devm_request_irq(dev, irq, int0002_irq, - IRQF_SHARED, "INT0002", chip); + IRQF_ONESHOT | IRQF_SHARED, "INT0002", chip); if (ret) { dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret); return ret; diff --git a/drivers/platform/x86/intel/pmc/Kconfig b/drivers/platform/x86/intel/pmc/Kconfig index b526597e4d..d2f651fbec 100644 --- a/drivers/platform/x86/intel/pmc/Kconfig +++ b/drivers/platform/x86/intel/pmc/Kconfig @@ -7,6 +7,7 @@ config INTEL_PMC_CORE tristate "Intel PMC Core driver" depends on PCI depends on ACPI + depends on INTEL_PMT_TELEMETRY help The Intel Platform Controller Hub for Intel Core SoCs provides access to Power Management Controller registers via various interfaces. This diff --git a/drivers/platform/x86/intel/pmc/Makefile b/drivers/platform/x86/intel/pmc/Makefile index 3a4cf1cbc1..389e5419da 100644 --- a/drivers/platform/x86/intel/pmc/Makefile +++ b/drivers/platform/x86/intel/pmc/Makefile @@ -4,7 +4,7 @@ # intel_pmc_core-y := core.o core_ssram.o spt.o cnp.o \ - icl.o tgl.o adl.o mtl.o + icl.o tgl.o adl.o mtl.o arl.o lnl.o obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o intel_pmc_core_pltdrv-y := pltdrv.o obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core_pltdrv.o diff --git a/drivers/platform/x86/intel/pmc/adl.c b/drivers/platform/x86/intel/pmc/adl.c index 606f7678bc..e7878558fd 100644 --- a/drivers/platform/x86/intel/pmc/adl.c +++ b/drivers/platform/x86/intel/pmc/adl.c @@ -307,6 +307,8 @@ const struct pmc_reg_map adl_reg_map = { .lpm_sts = adl_lpm_maps, .lpm_status_offset = ADL_LPM_STATUS_OFFSET, .lpm_live_status_offset = ADL_LPM_LIVE_STATUS_OFFSET, + .pson_residency_offset = TGL_PSON_RESIDENCY_OFFSET, + .pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP, }; int adl_core_init(struct pmc_dev *pmcdev) @@ -322,5 +324,7 @@ int adl_core_init(struct pmc_dev *pmcdev) if (ret) return ret; + pmc_core_get_low_power_modes(pmcdev); + return 0; } diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c new file mode 100644 index 0000000000..34b4cd23bf --- /dev/null +++ b/drivers/platform/x86/intel/pmc/arl.c @@ -0,0 +1,731 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file contains platform specific structure definitions + * and init function used by Meteor Lake PCH. + * + * Copyright (c) 2022, Intel Corporation. + * All Rights Reserved. + * + */ + +#include <linux/pci.h> +#include "core.h" +#include "../pmt/telemetry.h" + +/* PMC SSRAM PMT Telemetry GUID */ +#define IOEP_LPM_REQ_GUID 0x5077612 +#define SOCS_LPM_REQ_GUID 0x8478657 +#define PCHS_LPM_REQ_GUID 0x9684572 + +static const u8 ARL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20}; + +const struct pmc_bit_map arl_socs_ltr_show_map[] = { + {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, + {"SOUTHPORT_B", CNP_PMC_LTR_SPB}, + {"SATA", CNP_PMC_LTR_SATA}, + {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, + {"XHCI", CNP_PMC_LTR_XHCI}, + {"SOUTHPORT_F", ADL_PMC_LTR_SPF}, + {"ME", CNP_PMC_LTR_ME}, + /* EVA is Enterprise Value Add, doesn't really exist on PCH */ + {"SATA1", CNP_PMC_LTR_EVA}, + {"SOUTHPORT_C", CNP_PMC_LTR_SPC}, + {"HD_AUDIO", CNP_PMC_LTR_AZ}, + {"CNV", CNP_PMC_LTR_CNV}, + {"LPSS", CNP_PMC_LTR_LPSS}, + {"SOUTHPORT_D", CNP_PMC_LTR_SPD}, + {"SOUTHPORT_E", CNP_PMC_LTR_SPE}, + {"SATA2", CNP_PMC_LTR_CAM}, + {"ESPI", CNP_PMC_LTR_ESPI}, + {"SCC", CNP_PMC_LTR_SCC}, + {"ISH", CNP_PMC_LTR_ISH}, + {"UFSX2", CNP_PMC_LTR_UFSX2}, + {"EMMC", CNP_PMC_LTR_EMMC}, + /* + * Check intel_pmc_core_ids[] users of cnp_reg_map for + * a list of core SoCs using this. + */ + {"WIGIG", ICL_PMC_LTR_WIGIG}, + {"THC0", TGL_PMC_LTR_THC0}, + {"THC1", TGL_PMC_LTR_THC1}, + {"SOUTHPORT_G", MTL_PMC_LTR_SPG}, + {"Reserved", ARL_SOCS_PMC_LTR_RESERVED}, + {"IOE_PMC", MTL_PMC_LTR_IOE_PMC}, + {"DMI3", ARL_PMC_LTR_DMI3}, + + /* Below two cannot be used for LTR_IGNORE */ + {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT}, + {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT}, + {} +}; + +const struct pmc_bit_map arl_socs_clocksource_status_map[] = { + {"AON2_OFF_STS", BIT(0)}, + {"AON3_OFF_STS", BIT(1)}, + {"AON4_OFF_STS", BIT(2)}, + {"AON5_OFF_STS", BIT(3)}, + {"AON1_OFF_STS", BIT(4)}, + {"XTAL_LVM_OFF_STS", BIT(5)}, + {"AON3_SPL_OFF_STS", BIT(9)}, + {"DMI3FPW_0_PLL_OFF_STS", BIT(10)}, + {"DMI3FPW_1_PLL_OFF_STS", BIT(11)}, + {"G5X16FPW_0_PLL_OFF_STS", BIT(14)}, + {"G5X16FPW_1_PLL_OFF_STS", BIT(15)}, + {"G5X16FPW_2_PLL_OFF_STS", BIT(16)}, + {"XTAL_AGGR_OFF_STS", BIT(17)}, + {"USB2_PLL_OFF_STS", BIT(18)}, + {"G5X16FPW_3_PLL_OFF_STS", BIT(19)}, + {"BCLK_EXT_INJ_CLK_OFF_STS", BIT(20)}, + {"PHY_OC_EXT_INJ_CLK_OFF_STS", BIT(21)}, + {"FILTER_PLL_OFF_STS", BIT(22)}, + {"FABRIC_PLL_OFF_STS", BIT(25)}, + {"SOC_PLL_OFF_STS", BIT(26)}, + {"PCIEFAB_PLL_OFF_STS", BIT(27)}, + {"REF_PLL_OFF_STS", BIT(28)}, + {"GENLOCK_FILTER_PLL_OFF_STS", BIT(30)}, + {"RTC_PLL_OFF_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map arl_socs_power_gating_status_0_map[] = { + {"PMC_PGD0_PG_STS", BIT(0)}, + {"DMI_PGD0_PG_STS", BIT(1)}, + {"ESPISPI_PGD0_PG_STS", BIT(2)}, + {"XHCI_PGD0_PG_STS", BIT(3)}, + {"SPA_PGD0_PG_STS", BIT(4)}, + {"SPB_PGD0_PG_STS", BIT(5)}, + {"SPC_PGD0_PG_STS", BIT(6)}, + {"GBE_PGD0_PG_STS", BIT(7)}, + {"SATA_PGD0_PG_STS", BIT(8)}, + {"FIACPCB_P5x16_PGD0_PG_STS", BIT(9)}, + {"G5x16FPW_PGD0_PG_STS", BIT(10)}, + {"FIA_D_PGD0_PG_STS", BIT(11)}, + {"MPFPW2_PGD0_PG_STS", BIT(12)}, + {"SPD_PGD0_PG_STS", BIT(13)}, + {"LPSS_PGD0_PG_STS", BIT(14)}, + {"LPC_PGD0_PG_STS", BIT(15)}, + {"SMB_PGD0_PG_STS", BIT(16)}, + {"ISH_PGD0_PG_STS", BIT(17)}, + {"P2S_PGD0_PG_STS", BIT(18)}, + {"NPK_PGD0_PG_STS", BIT(19)}, + {"DMI3FPW_PGD0_PG_STS", BIT(20)}, + {"GBETSN1_PGD0_PG_STS", BIT(21)}, + {"FUSE_PGD0_PG_STS", BIT(22)}, + {"FIACPCB_D_PGD0_PG_STS", BIT(23)}, + {"FUSEGPSB_PGD0_PG_STS", BIT(24)}, + {"XDCI_PGD0_PG_STS", BIT(25)}, + {"EXI_PGD0_PG_STS", BIT(26)}, + {"CSE_PGD0_PG_STS", BIT(27)}, + {"KVMCC_PGD0_PG_STS", BIT(28)}, + {"PMT_PGD0_PG_STS", BIT(29)}, + {"CLINK_PGD0_PG_STS", BIT(30)}, + {"PTIO_PGD0_PG_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map arl_socs_power_gating_status_1_map[] = { + {"USBR0_PGD0_PG_STS", BIT(0)}, + {"SUSRAM_PGD0_PG_STS", BIT(1)}, + {"SMT1_PGD0_PG_STS", BIT(2)}, + {"FIACPCB_U_PGD0_PG_STS", BIT(3)}, + {"SMS2_PGD0_PG_STS", BIT(4)}, + {"SMS1_PGD0_PG_STS", BIT(5)}, + {"CSMERTC_PGD0_PG_STS", BIT(6)}, + {"CSMEPSF_PGD0_PG_STS", BIT(7)}, + {"SBR0_PGD0_PG_STS", BIT(8)}, + {"SBR1_PGD0_PG_STS", BIT(9)}, + {"SBR2_PGD0_PG_STS", BIT(10)}, + {"SBR3_PGD0_PG_STS", BIT(11)}, + {"MPFPW1_PGD0_PG_STS", BIT(12)}, + {"SBR5_PGD0_PG_STS", BIT(13)}, + {"FIA_X_PGD0_PG_STS", BIT(14)}, + {"FIACPCB_X_PGD0_PG_STS", BIT(15)}, + {"SBRG_PGD0_PG_STS", BIT(16)}, + {"SOC_D2D_PGD1_PG_STS", BIT(17)}, + {"PSF4_PGD0_PG_STS", BIT(18)}, + {"CNVI_PGD0_PG_STS", BIT(19)}, + {"UFSX2_PGD0_PG_STS", BIT(20)}, + {"ENDBG_PGD0_PG_STS", BIT(21)}, + {"DBG_PSF_PGD0_PG_STS", BIT(22)}, + {"SBR6_PGD0_PG_STS", BIT(23)}, + {"SOC_D2D_PGD2_PG_STS", BIT(24)}, + {"NPK_PGD1_PG_STS", BIT(25)}, + {"DMI3_PGD0_PG_STS", BIT(26)}, + {"DBG_SBR_PGD0_PG_STS", BIT(27)}, + {"SOC_D2D_PGD0_PG_STS", BIT(28)}, + {"PSF6_PGD0_PG_STS", BIT(29)}, + {"PSF7_PGD0_PG_STS", BIT(30)}, + {"MPFPW3_PGD0_PG_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map arl_socs_power_gating_status_2_map[] = { + {"PSF8_PGD0_PG_STS", BIT(0)}, + {"FIA_PGD0_PG_STS", BIT(1)}, + {"SOC_D2D_PGD3_PG_STS", BIT(2)}, + {"FIA_U_PGD0_PG_STS", BIT(3)}, + {"TAM_PGD0_PG_STS", BIT(4)}, + {"GBETSN_PGD0_PG_STS", BIT(5)}, + {"TBTLSX_PGD0_PG_STS", BIT(6)}, + {"THC0_PGD0_PG_STS", BIT(7)}, + {"THC1_PGD0_PG_STS", BIT(8)}, + {"PMC_PGD1_PG_STS", BIT(9)}, + {"FIA_P5x16_PGD0_PG_STS", BIT(10)}, + {"GNA_PGD0_PG_STS", BIT(11)}, + {"ACE_PGD0_PG_STS", BIT(12)}, + {"ACE_PGD1_PG_STS", BIT(13)}, + {"ACE_PGD2_PG_STS", BIT(14)}, + {"ACE_PGD3_PG_STS", BIT(15)}, + {"ACE_PGD4_PG_STS", BIT(16)}, + {"ACE_PGD5_PG_STS", BIT(17)}, + {"ACE_PGD6_PG_STS", BIT(18)}, + {"ACE_PGD7_PG_STS", BIT(19)}, + {"ACE_PGD8_PG_STS", BIT(20)}, + {"FIA_PGS_PGD0_PG_STS", BIT(21)}, + {"FIACPCB_PGS_PGD0_PG_STS", BIT(22)}, + {"FUSEPMSB_PGD0_PG_STS", BIT(23)}, + {} +}; + +const struct pmc_bit_map arl_socs_d3_status_2_map[] = { + {"CSMERTC_D3_STS", BIT(1)}, + {"SUSRAM_D3_STS", BIT(2)}, + {"CSE_D3_STS", BIT(4)}, + {"KVMCC_D3_STS", BIT(5)}, + {"USBR0_D3_STS", BIT(6)}, + {"ISH_D3_STS", BIT(7)}, + {"SMT1_D3_STS", BIT(8)}, + {"SMT2_D3_STS", BIT(9)}, + {"SMT3_D3_STS", BIT(10)}, + {"GNA_D3_STS", BIT(12)}, + {"CLINK_D3_STS", BIT(14)}, + {"PTIO_D3_STS", BIT(16)}, + {"PMT_D3_STS", BIT(17)}, + {"SMS1_D3_STS", BIT(18)}, + {"SMS2_D3_STS", BIT(19)}, + {} +}; + +const struct pmc_bit_map arl_socs_d3_status_3_map[] = { + {"GBETSN_D3_STS", BIT(13)}, + {"THC0_D3_STS", BIT(14)}, + {"THC1_D3_STS", BIT(15)}, + {"ACE_D3_STS", BIT(23)}, + {} +}; + +const struct pmc_bit_map arl_socs_vnn_req_status_3_map[] = { + {"DTS0_VNN_REQ_STS", BIT(7)}, + {"GPIOCOM5_VNN_REQ_STS", BIT(11)}, + {} +}; + +const struct pmc_bit_map *arl_socs_lpm_maps[] = { + arl_socs_clocksource_status_map, + arl_socs_power_gating_status_0_map, + arl_socs_power_gating_status_1_map, + arl_socs_power_gating_status_2_map, + mtl_socm_d3_status_0_map, + mtl_socm_d3_status_1_map, + arl_socs_d3_status_2_map, + arl_socs_d3_status_3_map, + mtl_socm_vnn_req_status_0_map, + mtl_socm_vnn_req_status_1_map, + mtl_socm_vnn_req_status_2_map, + arl_socs_vnn_req_status_3_map, + mtl_socm_vnn_misc_status_map, + mtl_socm_signal_status_map, + NULL +}; + +const struct pmc_bit_map arl_socs_pfear_map[] = { + {"RSVD64", BIT(0)}, + {"RSVD65", BIT(1)}, + {"RSVD66", BIT(2)}, + {"RSVD67", BIT(3)}, + {"RSVD68", BIT(4)}, + {"GBETSN", BIT(5)}, + {"TBTLSX", BIT(6)}, + {} +}; + +const struct pmc_bit_map *ext_arl_socs_pfear_map[] = { + mtl_socm_pfear_map, + arl_socs_pfear_map, + NULL +}; + +const struct pmc_reg_map arl_socs_reg_map = { + .pfear_sts = ext_arl_socs_pfear_map, + .ppfear_buckets = ARL_SOCS_PPFEAR_NUM_ENTRIES, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .lpm_sts = arl_socs_lpm_maps, + .ltr_ignore_max = ARL_SOCS_NUM_IP_IGN_ALLOWED, + .ltr_show_sts = arl_socs_ltr_show_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .msr_sts = msr_map, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = MTL_SOC_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .lpm_priority_offset = MTL_LPM_PRI_OFFSET, + .lpm_en_offset = MTL_LPM_EN_OFFSET, + .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, + .lpm_status_offset = MTL_LPM_STATUS_OFFSET, + .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, + .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, + .lpm_num_maps = ADL_LPM_NUM_MAPS, + .lpm_reg_index = ARL_LPM_REG_INDEX, + .etr3_offset = ETR3_OFFSET, + .pson_residency_offset = TGL_PSON_RESIDENCY_OFFSET, + .pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP, +}; + +const struct pmc_bit_map arl_pchs_ltr_show_map[] = { + {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, + {"SOUTHPORT_B", CNP_PMC_LTR_SPB}, + {"SATA", CNP_PMC_LTR_SATA}, + {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, + {"XHCI", CNP_PMC_LTR_XHCI}, + {"SOUTHPORT_F", ADL_PMC_LTR_SPF}, + {"ME", CNP_PMC_LTR_ME}, + /* EVA is Enterprise Value Add, doesn't really exist on PCH */ + {"SATA1", CNP_PMC_LTR_EVA}, + {"SOUTHPORT_C", CNP_PMC_LTR_SPC}, + {"HD_AUDIO", CNP_PMC_LTR_AZ}, + {"CNV", CNP_PMC_LTR_CNV}, + {"LPSS", CNP_PMC_LTR_LPSS}, + {"SOUTHPORT_D", CNP_PMC_LTR_SPD}, + {"SOUTHPORT_E", CNP_PMC_LTR_SPE}, + {"SATA2", CNP_PMC_LTR_CAM}, + {"ESPI", CNP_PMC_LTR_ESPI}, + {"SCC", CNP_PMC_LTR_SCC}, + {"ISH", CNP_PMC_LTR_ISH}, + {"UFSX2", CNP_PMC_LTR_UFSX2}, + {"EMMC", CNP_PMC_LTR_EMMC}, + /* + * Check intel_pmc_core_ids[] users of cnp_reg_map for + * a list of core SoCs using this. + */ + {"WIGIG", ICL_PMC_LTR_WIGIG}, + {"THC0", TGL_PMC_LTR_THC0}, + {"THC1", TGL_PMC_LTR_THC1}, + {"SOUTHPORT_G", MTL_PMC_LTR_SPG}, + {"ESE", MTL_PMC_LTR_ESE}, + {"IOE_PMC", MTL_PMC_LTR_IOE_PMC}, + {"DMI3", ARL_PMC_LTR_DMI3}, + + /* Below two cannot be used for LTR_IGNORE */ + {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT}, + {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT}, + {} +}; + +const struct pmc_bit_map arl_pchs_clocksource_status_map[] = { + {"AON2_OFF_STS", BIT(0)}, + {"AON3_OFF_STS", BIT(1)}, + {"AON4_OFF_STS", BIT(2)}, + {"AON2_SPL_OFF_STS", BIT(3)}, + {"AONL_OFF_STS", BIT(4)}, + {"XTAL_LVM_OFF_STS", BIT(5)}, + {"AON5_ACRO_OFF_STS", BIT(6)}, + {"AON6_ACRO_OFF_STS", BIT(7)}, + {"USB3_PLL_OFF_STS", BIT(8)}, + {"ACRO_OFF_STS", BIT(9)}, + {"AUDIO_PLL_OFF_STS", BIT(10)}, + {"MAIN_CRO_OFF_STS", BIT(11)}, + {"MAIN_DIVIDER_OFF_STS", BIT(12)}, + {"REF_PLL_NON_OC_OFF_STS", BIT(13)}, + {"DMI_PLL_OFF_STS", BIT(14)}, + {"PHY_EXT_INJ_OFF_STS", BIT(15)}, + {"AON6_MCRO_OFF_STS", BIT(16)}, + {"XTAL_AGGR_OFF_STS", BIT(17)}, + {"USB2_PLL_OFF_STS", BIT(18)}, + {"TSN0_PLL_OFF_STS", BIT(19)}, + {"TSN1_PLL_OFF_STS", BIT(20)}, + {"GBE_PLL_OFF_STS", BIT(21)}, + {"SATA_PLL_OFF_STS", BIT(22)}, + {"PCIE0_PLL_OFF_STS", BIT(23)}, + {"PCIE1_PLL_OFF_STS", BIT(24)}, + {"PCIE2_PLL_OFF_STS", BIT(26)}, + {"PCIE3_PLL_OFF_STS", BIT(27)}, + {"REF_PLL_OFF_STS", BIT(28)}, + {"PCIE4_PLL_OFF_STS", BIT(29)}, + {"PCIE5_PLL_OFF_STS", BIT(30)}, + {"REF38P4_PLL_OFF_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map arl_pchs_power_gating_status_0_map[] = { + {"PMC_PGD0_PG_STS", BIT(0)}, + {"DMI_PGD0_PG_STS", BIT(1)}, + {"ESPISPI_PGD0_PG_STS", BIT(2)}, + {"XHCI_PGD0_PG_STS", BIT(3)}, + {"SPA_PGD0_PG_STS", BIT(4)}, + {"SPB_PGD0_PG_STS", BIT(5)}, + {"SPC_PGD0_PG_STS", BIT(6)}, + {"GBE_PGD0_PG_STS", BIT(7)}, + {"SATA_PGD0_PG_STS", BIT(8)}, + {"FIA_X_PGD0_PG_STS", BIT(9)}, + {"MPFPW4_PGD0_PG_STS", BIT(10)}, + {"EAH_PGD0_PG_STS", BIT(11)}, + {"MPFPW1_PGD0_PG_STS", BIT(12)}, + {"SPD_PGD0_PG_STS", BIT(13)}, + {"LPSS_PGD0_PG_STS", BIT(14)}, + {"LPC_PGD0_PG_STS", BIT(15)}, + {"SMB_PGD0_PG_STS", BIT(16)}, + {"ISH_PGD0_PG_STS", BIT(17)}, + {"P2S_PGD0_PG_STS", BIT(18)}, + {"NPK_PGD0_PG_STS", BIT(19)}, + {"U3FPW1_PGD0_PG_STS", BIT(20)}, + {"PECI_PGD0_PG_STS", BIT(21)}, + {"FUSE_PGD0_PG_STS", BIT(22)}, + {"SBR8_PGD0_PG_STS", BIT(23)}, + {"EXE_PGD0_PG_STS", BIT(24)}, + {"XDCI_PGD0_PG_STS", BIT(25)}, + {"EXI_PGD0_PG_STS", BIT(26)}, + {"CSE_PGD0_PG_STS", BIT(27)}, + {"KVMCC_PGD0_PG_STS", BIT(28)}, + {"PMT_PGD0_PG_STS", BIT(29)}, + {"CLINK_PGD0_PG_STS", BIT(30)}, + {"PTIO_PGD0_PG_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map arl_pchs_power_gating_status_1_map[] = { + {"USBR0_PGD0_PG_STS", BIT(0)}, + {"SUSRAM_PGD0_PG_STS", BIT(1)}, + {"SMT1_PGD0_PG_STS", BIT(2)}, + {"SMT4_PGD0_PG_STS", BIT(3)}, + {"SMS2_PGD0_PG_STS", BIT(4)}, + {"SMS1_PGD0_PG_STS", BIT(5)}, + {"CSMERTC_PGD0_PG_STS", BIT(6)}, + {"CSMEPSF_PGD0_PG_STS", BIT(7)}, + {"SBR0_PGD0_PG_STS", BIT(8)}, + {"SBR1_PGD0_PG_STS", BIT(9)}, + {"SBR2_PGD0_PG_STS", BIT(10)}, + {"SBR3_PGD0_PG_STS", BIT(11)}, + {"SBR4_PGD0_PG_STS", BIT(12)}, + {"SBR5_PGD0_PG_STS", BIT(13)}, + {"MPFPW3_PGD0_PG_STS", BIT(14)}, + {"PSF1_PGD0_PG_STS", BIT(15)}, + {"PSF2_PGD0_PG_STS", BIT(16)}, + {"PSF3_PGD0_PG_STS", BIT(17)}, + {"PSF4_PGD0_PG_STS", BIT(18)}, + {"CNVI_PGD0_PG_STS", BIT(19)}, + {"DMI3_PGD0_PG_STS", BIT(20)}, + {"ENDBG_PGD0_PG_STS", BIT(21)}, + {"DBG_SBR_PGD0_PG_STS", BIT(22)}, + {"SBR6_PGD0_PG_STS", BIT(23)}, + {"SBR7_PGD0_PG_STS", BIT(24)}, + {"NPK_PGD1_PG_STS", BIT(25)}, + {"U3FPW3_PGD0_PG_STS", BIT(26)}, + {"MPFPW2_PGD0_PG_STS", BIT(27)}, + {"MPFPW7_PGD0_PG_STS", BIT(28)}, + {"GBETSN1_PGD0_PG_STS", BIT(29)}, + {"PSF7_PGD0_PG_STS", BIT(30)}, + {"FIA2_PGD0_PG_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map arl_pchs_power_gating_status_2_map[] = { + {"U3FPW2_PGD0_PG_STS", BIT(0)}, + {"FIA_PGD0_PG_STS", BIT(1)}, + {"FIACPCB_X_PGD0_PG_STS", BIT(2)}, + {"FIA1_PGD0_PG_STS", BIT(3)}, + {"TAM_PGD0_PG_STS", BIT(4)}, + {"GBETSN_PGD0_PG_STS", BIT(5)}, + {"SBR9_PGD0_PG_STS", BIT(6)}, + {"THC0_PGD0_PG_STS", BIT(7)}, + {"THC1_PGD0_PG_STS", BIT(8)}, + {"PMC_PGD1_PG_STS", BIT(9)}, + {"DBC_PGD0_PG_STS", BIT(10)}, + {"DBG_PSF_PGD0_PG_STS", BIT(11)}, + {"SPF_PGD0_PG_STS", BIT(12)}, + {"ACE_PGD0_PG_STS", BIT(13)}, + {"ACE_PGD1_PG_STS", BIT(14)}, + {"ACE_PGD2_PG_STS", BIT(15)}, + {"ACE_PGD3_PG_STS", BIT(16)}, + {"ACE_PGD4_PG_STS", BIT(17)}, + {"ACE_PGD5_PG_STS", BIT(18)}, + {"ACE_PGD6_PG_STS", BIT(19)}, + {"ACE_PGD7_PG_STS", BIT(20)}, + {"SPE_PGD0_PG_STS", BIT(21)}, + {"MPFPW5_PG_STS", BIT(22)}, + {} +}; + +const struct pmc_bit_map arl_pchs_d3_status_0_map[] = { + {"SPF_D3_STS", BIT(0)}, + {"LPSS_D3_STS", BIT(3)}, + {"XDCI_D3_STS", BIT(4)}, + {"XHCI_D3_STS", BIT(5)}, + {"SPA_D3_STS", BIT(12)}, + {"SPB_D3_STS", BIT(13)}, + {"SPC_D3_STS", BIT(14)}, + {"SPD_D3_STS", BIT(15)}, + {"SPE_D3_STS", BIT(16)}, + {"ESPISPI_D3_STS", BIT(18)}, + {"SATA_D3_STS", BIT(20)}, + {"PSTH_D3_STS", BIT(21)}, + {"DMI_D3_STS", BIT(22)}, + {} +}; + +const struct pmc_bit_map arl_pchs_d3_status_1_map[] = { + {"GBETSN1_D3_STS", BIT(14)}, + {"GBE_D3_STS", BIT(19)}, + {"ITSS_D3_STS", BIT(23)}, + {"P2S_D3_STS", BIT(24)}, + {"CNVI_D3_STS", BIT(27)}, + {} +}; + +const struct pmc_bit_map arl_pchs_d3_status_2_map[] = { + {"CSMERTC_D3_STS", BIT(1)}, + {"SUSRAM_D3_STS", BIT(2)}, + {"CSE_D3_STS", BIT(4)}, + {"KVMCC_D3_STS", BIT(5)}, + {"USBR0_D3_STS", BIT(6)}, + {"ISH_D3_STS", BIT(7)}, + {"SMT1_D3_STS", BIT(8)}, + {"SMT2_D3_STS", BIT(9)}, + {"SMT3_D3_STS", BIT(10)}, + {"SMT4_D3_STS", BIT(11)}, + {"SMT5_D3_STS", BIT(12)}, + {"SMT6_D3_STS", BIT(13)}, + {"CLINK_D3_STS", BIT(14)}, + {"PTIO_D3_STS", BIT(16)}, + {"PMT_D3_STS", BIT(17)}, + {"SMS1_D3_STS", BIT(18)}, + {"SMS2_D3_STS", BIT(19)}, + {} +}; + +const struct pmc_bit_map arl_pchs_d3_status_3_map[] = { + {"ESE_D3_STS", BIT(3)}, + {"GBETSN_D3_STS", BIT(13)}, + {"THC0_D3_STS", BIT(14)}, + {"THC1_D3_STS", BIT(15)}, + {"ACE_D3_STS", BIT(23)}, + {} +}; + +const struct pmc_bit_map arl_pchs_vnn_req_status_0_map[] = { + {"FIA_VNN_REQ_STS", BIT(17)}, + {"ESPISPI_VNN_REQ_STS", BIT(18)}, + {} +}; + +const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[] = { + {"NPK_VNN_REQ_STS", BIT(4)}, + {"DFXAGG_VNN_REQ_STS", BIT(8)}, + {"EXI_VNN_REQ_STS", BIT(9)}, + {"GBE_VNN_REQ_STS", BIT(19)}, + {"SMB_VNN_REQ_STS", BIT(25)}, + {"LPC_VNN_REQ_STS", BIT(26)}, + {"CNVI_VNN_REQ_STS", BIT(27)}, + {} +}; + +const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[] = { + {"FIA2_VNN_REQ_STS", BIT(0)}, + {"CSMERTC_VNN_REQ_STS", BIT(1)}, + {"CSE_VNN_REQ_STS", BIT(4)}, + {"ISH_VNN_REQ_STS", BIT(7)}, + {"SMT1_VNN_REQ_STS", BIT(8)}, + {"SMT4_VNN_REQ_STS", BIT(11)}, + {"CLINK_VNN_REQ_STS", BIT(14)}, + {"SMS1_VNN_REQ_STS", BIT(18)}, + {"SMS2_VNN_REQ_STS", BIT(19)}, + {"GPIOCOM4_VNN_REQ_STS", BIT(20)}, + {"GPIOCOM3_VNN_REQ_STS", BIT(21)}, + {"GPIOCOM2_VNN_REQ_STS", BIT(22)}, + {"GPIOCOM1_VNN_REQ_STS", BIT(23)}, + {"GPIOCOM0_VNN_REQ_STS", BIT(24)}, + {} +}; + +const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[] = { + {"ESE_VNN_REQ_STS", BIT(3)}, + {"DTS0_VNN_REQ_STS", BIT(7)}, + {"GPIOCOM5_VNN_REQ_STS", BIT(11)}, + {"FIA1_VNN_REQ_STS", BIT(12)}, + {} +}; + +const struct pmc_bit_map arl_pchs_vnn_misc_status_map[] = { + {"CPU_C10_REQ_STS", BIT(0)}, + {"TS_OFF_REQ_STS", BIT(1)}, + {"PNDE_MET_REQ_STS", BIT(2)}, + {"PCIE_DEEP_PM_REQ_STS", BIT(3)}, + {"FW_THROTTLE_ALLOWED_REQ_STS", BIT(4)}, + {"ISH_VNNAON_REQ_STS", BIT(7)}, + {"IOE_COND_MET_S02I2_0_REQ_STS", BIT(8)}, + {"IOE_COND_MET_S02I2_1_REQ_STS", BIT(9)}, + {"IOE_COND_MET_S02I2_2_REQ_STS", BIT(10)}, + {"PLT_GREATER_REQ_STS", BIT(11)}, + {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13)}, + {"PM_SYNC_STATES_REQ_STS", BIT(14)}, + {"EA_REQ_STS", BIT(15)}, + {"DMI_CLKREQ_B_REQ_STS", BIT(16)}, + {"BRK_EV_EN_REQ_STS", BIT(17)}, + {"AUTO_DEMO_EN_REQ_STS", BIT(18)}, + {"ITSS_CLK_SRC_REQ_STS", BIT(19)}, + {"ARC_IDLE_REQ_STS", BIT(21)}, + {"DMI_IN_REQ_STS", BIT(22)}, + {"FIA_DEEP_PM_REQ_STS", BIT(23)}, + {"XDCI_ATTACHED_REQ_STS", BIT(24)}, + {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25)}, + {"PRE_WAKE0_REQ_STS", BIT(27)}, + {"PRE_WAKE1_REQ_STS", BIT(28)}, + {"PRE_WAKE2_EN_REQ_STS", BIT(29)}, + {"CNVI_V1P05_REQ_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map arl_pchs_signal_status_map[] = { + {"LSX_Wake0_STS", BIT(0)}, + {"LSX_Wake1_STS", BIT(1)}, + {"LSX_Wake2_STS", BIT(2)}, + {"LSX_Wake3_STS", BIT(3)}, + {"LSX_Wake4_STS", BIT(4)}, + {"LSX_Wake5_STS", BIT(5)}, + {"LSX_Wake6_STS", BIT(6)}, + {"LSX_Wake7_STS", BIT(7)}, + {"Int_Timer_SS_Wake0_STS", BIT(8)}, + {"Int_Timer_SS_Wake1_STS", BIT(9)}, + {"Int_Timer_SS_Wake0_STS", BIT(10)}, + {"Int_Timer_SS_Wake1_STS", BIT(11)}, + {"Int_Timer_SS_Wake2_STS", BIT(12)}, + {"Int_Timer_SS_Wake3_STS", BIT(13)}, + {"Int_Timer_SS_Wake4_STS", BIT(14)}, + {"Int_Timer_SS_Wake5_STS", BIT(15)}, + {} +}; + +const struct pmc_bit_map *arl_pchs_lpm_maps[] = { + arl_pchs_clocksource_status_map, + arl_pchs_power_gating_status_0_map, + arl_pchs_power_gating_status_1_map, + arl_pchs_power_gating_status_2_map, + arl_pchs_d3_status_0_map, + arl_pchs_d3_status_1_map, + arl_pchs_d3_status_2_map, + arl_pchs_d3_status_3_map, + arl_pchs_vnn_req_status_0_map, + arl_pchs_vnn_req_status_1_map, + arl_pchs_vnn_req_status_2_map, + arl_pchs_vnn_req_status_3_map, + arl_pchs_vnn_misc_status_map, + arl_pchs_signal_status_map, + NULL +}; + +const struct pmc_reg_map arl_pchs_reg_map = { + .pfear_sts = ext_arl_socs_pfear_map, + .ppfear_buckets = ARL_SOCS_PPFEAR_NUM_ENTRIES, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .ltr_ignore_max = ARL_SOCS_NUM_IP_IGN_ALLOWED, + .lpm_sts = arl_pchs_lpm_maps, + .ltr_show_sts = arl_pchs_ltr_show_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .msr_sts = msr_map, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = ARL_PCH_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .lpm_priority_offset = MTL_LPM_PRI_OFFSET, + .lpm_en_offset = MTL_LPM_EN_OFFSET, + .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, + .lpm_status_offset = MTL_LPM_STATUS_OFFSET, + .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, + .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, + .lpm_num_maps = ADL_LPM_NUM_MAPS, + .lpm_reg_index = ARL_LPM_REG_INDEX, + .etr3_offset = ETR3_OFFSET, +}; + +#define PMC_DEVID_SOCS 0xae7f +#define PMC_DEVID_IOEP 0x7ecf +#define PMC_DEVID_PCHS 0x7f27 +static struct pmc_info arl_pmc_info_list[] = { + { + .guid = IOEP_LPM_REQ_GUID, + .devid = PMC_DEVID_IOEP, + .map = &mtl_ioep_reg_map, + }, + { + .guid = SOCS_LPM_REQ_GUID, + .devid = PMC_DEVID_SOCS, + .map = &arl_socs_reg_map, + }, + { + .guid = PCHS_LPM_REQ_GUID, + .devid = PMC_DEVID_PCHS, + .map = &arl_pchs_reg_map, + }, + {} +}; + +#define ARL_NPU_PCI_DEV 0xad1d +#define ARL_GNA_PCI_DEV 0xae4c +/* + * Set power state of select devices that do not have drivers to D3 + * so that they do not block Package C entry. + */ +static void arl_d3_fixup(void) +{ + pmc_core_set_device_d3(ARL_NPU_PCI_DEV); + pmc_core_set_device_d3(ARL_GNA_PCI_DEV); +} + +static int arl_resume(struct pmc_dev *pmcdev) +{ + arl_d3_fixup(); + pmc_core_send_ltr_ignore(pmcdev, 3, 0); + + return pmc_core_resume_common(pmcdev); +} + +int arl_core_init(struct pmc_dev *pmcdev) +{ + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC]; + int ret; + int func = 0; + bool ssram_init = true; + + arl_d3_fixup(); + pmcdev->suspend = cnl_suspend; + pmcdev->resume = arl_resume; + pmcdev->regmap_list = arl_pmc_info_list; + + /* + * If ssram init fails use legacy method to at least get the + * primary PMC + */ + ret = pmc_core_ssram_init(pmcdev, func); + if (ret) { + ssram_init = false; + pmc->map = &arl_socs_reg_map; + + ret = get_primary_reg_base(pmc); + if (ret) + return ret; + } + + pmc_core_get_low_power_modes(pmcdev); + pmc_core_punit_pmt_init(pmcdev, ARL_PMT_DMU_GUID); + + if (ssram_init) { + ret = pmc_core_ssram_get_lpm_reqs(pmcdev); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c index 98b3665120..dd72974bf7 100644 --- a/drivers/platform/x86/intel/pmc/cnp.c +++ b/drivers/platform/x86/intel/pmc/cnp.c @@ -234,5 +234,7 @@ int cnp_core_init(struct pmc_dev *pmcdev) if (ret) return ret; + pmc_core_get_low_power_modes(pmcdev); + return 0; } diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index 022afb97d5..8f9c036809 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -20,6 +20,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/suspend.h> +#include <linux/units.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> @@ -27,6 +28,7 @@ #include <asm/tsc.h> #include "core.h" +#include "../pmt/telemetry.h" /* Maximum number of modes supported by platfoms that has low power mode capability */ const char *pmc_lpm_modes[] = { @@ -206,6 +208,20 @@ static int pmc_core_dev_state_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n"); +static int pmc_core_pson_residency_get(void *data, u64 *val) +{ + struct pmc *pmc = data; + const struct pmc_reg_map *map = pmc->map; + u32 value; + + value = pmc_core_reg_read(pmc, map->pson_residency_offset); + *val = (u64)value * map->pson_residency_counter_step; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_pson_residency, pmc_core_pson_residency_get, NULL, "%llu\n"); + static int pmc_core_check_read_lock_bit(struct pmc *pmc) { u32 value; @@ -731,7 +747,7 @@ static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused) } DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs); -static void pmc_core_substate_req_header_show(struct seq_file *s) +static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index) { struct pmc_dev *pmcdev = s->private; int i, mode; @@ -746,71 +762,125 @@ static void pmc_core_substate_req_header_show(struct seq_file *s) static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; - const struct pmc_bit_map **maps = pmc->map->lpm_sts; - const struct pmc_bit_map *map; - const int num_maps = pmc->map->lpm_num_maps; - u32 sts_offset = pmc->map->lpm_status_offset; - u32 *lpm_req_regs = pmc->lpm_req_regs; - int mp; + u32 sts_offset; + u32 *lpm_req_regs; + int num_maps, mp, pmc_index; + + for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs); ++pmc_index) { + struct pmc *pmc = pmcdev->pmcs[pmc_index]; + const struct pmc_bit_map **maps; - /* Display the header */ - pmc_core_substate_req_header_show(s); + if (!pmc) + continue; - /* Loop over maps */ - for (mp = 0; mp < num_maps; mp++) { - u32 req_mask = 0; - u32 lpm_status; - int mode, idx, i, len = 32; + maps = pmc->map->lpm_sts; + num_maps = pmc->map->lpm_num_maps; + sts_offset = pmc->map->lpm_status_offset; + lpm_req_regs = pmc->lpm_req_regs; /* - * Capture the requirements and create a mask so that we only - * show an element if it's required for at least one of the - * enabled low power modes + * When there are multiple PMCs, though the PMC may exist, the + * requirement register discovery could have failed so check + * before accessing. */ - pmc_for_each_mode(idx, mode, pmcdev) - req_mask |= lpm_req_regs[mp + (mode * num_maps)]; - - /* Get the last latched status for this map */ - lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4)); - - /* Loop over elements in this map */ - map = maps[mp]; - for (i = 0; map[i].name && i < len; i++) { - u32 bit_mask = map[i].bit_mask; - - if (!(bit_mask & req_mask)) - /* - * Not required for any enabled states - * so don't display - */ - continue; - - /* Display the element name in the first column */ - seq_printf(s, "%30s |", map[i].name); - - /* Loop over the enabled states and display if required */ - pmc_for_each_mode(idx, mode, pmcdev) { - if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask) - seq_printf(s, " %9s |", - "Required"); - else - seq_printf(s, " %9s |", " "); + if (!lpm_req_regs) + continue; + + /* Display the header */ + pmc_core_substate_req_header_show(s, pmc_index); + + /* Loop over maps */ + for (mp = 0; mp < num_maps; mp++) { + u32 req_mask = 0; + u32 lpm_status; + const struct pmc_bit_map *map; + int mode, idx, i, len = 32; + + /* + * Capture the requirements and create a mask so that we only + * show an element if it's required for at least one of the + * enabled low power modes + */ + pmc_for_each_mode(idx, mode, pmcdev) + req_mask |= lpm_req_regs[mp + (mode * num_maps)]; + + /* Get the last latched status for this map */ + lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4)); + + /* Loop over elements in this map */ + map = maps[mp]; + for (i = 0; map[i].name && i < len; i++) { + u32 bit_mask = map[i].bit_mask; + + if (!(bit_mask & req_mask)) { + /* + * Not required for any enabled states + * so don't display + */ + continue; + } + + /* Display the element name in the first column */ + seq_printf(s, "pmc%d: %26s |", pmc_index, map[i].name); + + /* Loop over the enabled states and display if required */ + pmc_for_each_mode(idx, mode, pmcdev) { + bool required = lpm_req_regs[mp + (mode * num_maps)] & + bit_mask; + seq_printf(s, " %9s |", required ? "Required" : " "); + } + + /* In Status column, show the last captured state of this agent */ + seq_printf(s, " %9s |", lpm_status & bit_mask ? "Yes" : " "); + + seq_puts(s, "\n"); } + } + } + return 0; +} +DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs); - /* In Status column, show the last captured state of this agent */ - if (lpm_status & bit_mask) - seq_printf(s, " %9s |", "Yes"); - else - seq_printf(s, " %9s |", " "); +static unsigned int pmc_core_get_crystal_freq(void) +{ + unsigned int eax_denominator, ebx_numerator, ecx_hz, edx; - seq_puts(s, "\n"); - } + if (boot_cpu_data.cpuid_level < 0x15) + return 0; + + eax_denominator = ebx_numerator = ecx_hz = edx = 0; + + /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */ + cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx); + + if (ebx_numerator == 0 || eax_denominator == 0) + return 0; + + return ecx_hz; +} + +static int pmc_core_die_c6_us_show(struct seq_file *s, void *unused) +{ + struct pmc_dev *pmcdev = s->private; + u64 die_c6_res, count; + int ret; + + if (!pmcdev->crystal_freq) { + dev_warn_once(&pmcdev->pdev->dev, "Crystal frequency unavailable\n"); + return -ENXIO; } + ret = pmt_telem_read(pmcdev->punit_ep, pmcdev->die_c6_offset, + &count, 1); + if (ret) + return ret; + + die_c6_res = div64_u64(count * HZ_PER_MHZ, pmcdev->crystal_freq); + seq_printf(s, "%llu\n", die_c6_res); + return 0; } -DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs); +DEFINE_SHOW_ATTRIBUTE(pmc_core_die_c6_us); static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused) { @@ -969,9 +1039,8 @@ static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order) return true; } -static void pmc_core_get_low_power_modes(struct platform_device *pdev) +void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev) { - struct pmc_dev *pmcdev = platform_get_drvdata(pdev); struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI; u8 mode_order[LPM_MAX_NUM_MODES]; @@ -1003,7 +1072,8 @@ static void pmc_core_get_low_power_modes(struct platform_device *pdev) for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++) pri_order[mode_order[mode]] = mode; else - dev_warn(&pdev->dev, "Assuming a default substate order for this platform\n"); + dev_warn(&pmcdev->pdev->dev, + "Assuming a default substate order for this platform\n"); /* * Loop through all modes from lowest to highest priority, @@ -1039,6 +1109,69 @@ int get_primary_reg_base(struct pmc *pmc) return 0; } +void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid) +{ + struct telem_endpoint *ep; + struct pci_dev *pcidev; + + pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(10, 0)); + if (!pcidev) { + dev_err(&pmcdev->pdev->dev, "PUNIT PMT device not found."); + return; + } + + ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0); + pci_dev_put(pcidev); + if (IS_ERR(ep)) { + dev_err(&pmcdev->pdev->dev, + "pmc_core: couldn't get DMU telem endpoint %ld", + PTR_ERR(ep)); + return; + } + + pmcdev->punit_ep = ep; + + pmcdev->has_die_c6 = true; + pmcdev->die_c6_offset = MTL_PMT_DMU_DIE_C6_OFFSET; +} + +void pmc_core_set_device_d3(unsigned int device) +{ + struct pci_dev *pcidev; + + pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); + if (pcidev) { + if (!device_trylock(&pcidev->dev)) { + pci_dev_put(pcidev); + return; + } + if (!pcidev->dev.driver) { + dev_info(&pcidev->dev, "Setting to D3hot\n"); + pci_set_power_state(pcidev, PCI_D3hot); + } + device_unlock(&pcidev->dev); + pci_dev_put(pcidev); + } +} + +static bool pmc_core_is_pson_residency_enabled(struct pmc_dev *pmcdev) +{ + struct platform_device *pdev = pmcdev->pdev; + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + u8 val; + + if (!adev) + return false; + + if (fwnode_property_read_u8(acpi_fwnode_handle(adev), + "intel-cec-pson-switching-enabled-in-s0", + &val)) + return false; + + return val == 1; +} + + static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) { debugfs_remove_recursive(pmcdev->dbgfs_dir); @@ -1108,6 +1241,17 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) pmcdev->dbgfs_dir, pmcdev, &pmc_core_substate_req_regs_fops); } + + if (primary_pmc->map->pson_residency_offset && pmc_core_is_pson_residency_enabled(pmcdev)) { + debugfs_create_file("pson_residency_usec", 0444, + pmcdev->dbgfs_dir, primary_pmc, &pmc_core_pson_residency); + } + + if (pmcdev->has_die_c6) { + debugfs_create_file("die_c6_us_show", 0444, + pmcdev->dbgfs_dir, pmcdev, + &pmc_core_die_c6_us_fops); + } } static const struct x86_cpu_id intel_pmc_core_ids[] = { @@ -1120,18 +1264,20 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, icl_core_init), X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, cnp_core_init), X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, cnp_core_init), - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, tgl_core_init), + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, tgl_l_core_init), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, tgl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, tgl_core_init), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, tgl_l_core_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, icl_core_init), X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, tgl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, tgl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, tgl_core_init), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, tgl_l_core_init), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, tgl_l_core_init), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, adl_core_init), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, tgl_core_init), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, tgl_l_core_init), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, adl_core_init), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, adl_core_init), X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, mtl_core_init), + X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, arl_core_init), + X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, lnl_core_init), {} }; @@ -1202,6 +1348,10 @@ static void pmc_core_clean_structure(struct platform_device *pdev) pci_dev_put(pmcdev->ssram_pcidev); pci_disable_device(pmcdev->ssram_pcidev); } + + if (pmcdev->punit_ep) + pmt_telem_unregister_endpoint(pmcdev->punit_ep); + platform_set_drvdata(pdev, NULL); mutex_destroy(&pmcdev->lock); } @@ -1222,6 +1372,8 @@ static int pmc_core_probe(struct platform_device *pdev) if (!pmcdev) return -ENOMEM; + pmcdev->crystal_freq = pmc_core_get_crystal_freq(); + platform_set_drvdata(pdev, pmcdev); pmcdev->pdev = pdev; @@ -1253,7 +1405,6 @@ static int pmc_core_probe(struct platform_device *pdev) } pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(primary_pmc); - pmc_core_get_low_power_modes(pdev); pmc_core_do_dmi_quirks(primary_pmc); pmc_core_dbgfs_register(pmcdev); diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h index b66dacbfb9..54137faaae 100644 --- a/drivers/platform/x86/intel/pmc/core.h +++ b/drivers/platform/x86/intel/pmc/core.h @@ -16,6 +16,8 @@ #include <linux/bits.h> #include <linux/platform_device.h> +struct telem_endpoint; + #define SLP_S0_RES_COUNTER_MASK GENMASK(31, 0) #define PMC_BASE_ADDR_DEFAULT 0xFE000000 @@ -221,6 +223,10 @@ enum ppfear_regs { #define TGL_LPM_PRI_OFFSET 0x1C7C #define TGL_LPM_NUM_MAPS 6 +/* Tigerlake PSON residency register */ +#define TGL_PSON_RESIDENCY_OFFSET 0x18f8 +#define TGL_PSON_RES_COUNTER_STEP 0x7A + /* Extended Test Mode Register 3 (CNL and later) */ #define ETR3_OFFSET 0x1048 #define ETR3_CF9GR BIT(20) @@ -257,10 +263,25 @@ enum ppfear_regs { #define MTL_SOCM_NUM_IP_IGN_ALLOWED 25 #define MTL_SOC_PMC_MMIO_REG_LEN 0x2708 #define MTL_PMC_LTR_SPG 0x1B74 +#define ARL_SOCS_PMC_LTR_RESERVED 0x1B88 +#define ARL_SOCS_NUM_IP_IGN_ALLOWED 26 +#define ARL_PMC_LTR_DMI3 0x1BE4 +#define ARL_PCH_PMC_MMIO_REG_LEN 0x2720 /* Meteor Lake PGD PFET Enable Ack Status */ #define MTL_SOCM_PPFEAR_NUM_ENTRIES 8 #define MTL_IOE_PPFEAR_NUM_ENTRIES 10 +#define ARL_SOCS_PPFEAR_NUM_ENTRIES 9 + +/* Die C6 from PUNIT telemetry */ +#define MTL_PMT_DMU_DIE_C6_OFFSET 15 +#define MTL_PMT_DMU_GUID 0x1A067102 +#define ARL_PMT_DMU_GUID 0x1A06A000 + +#define LNL_PMC_MMIO_REG_LEN 0x2708 +#define LNL_PMC_LTR_OSSE 0x1B88 +#define LNL_NUM_IP_IGN_ALLOWED 27 +#define LNL_PPFEAR_NUM_ENTRIES 12 extern const char *pmc_lpm_modes[]; @@ -320,6 +341,9 @@ struct pmc_reg_map { const u32 lpm_status_offset; const u32 lpm_live_status_offset; const u32 etr3_offset; + const u8 *lpm_reg_index; + const u32 pson_residency_offset; + const u32 pson_residency_counter_step; }; /** @@ -329,6 +353,7 @@ struct pmc_reg_map { * specific attributes */ struct pmc_info { + u32 guid; u16 devid; const struct pmc_reg_map *map; }; @@ -355,6 +380,7 @@ struct pmc { * @devs: pointer to an array of pmc pointers * @pdev: pointer to platform_device struct * @ssram_pcidev: pointer to pci device struct for the PMC SSRAM + * @crystal_freq: crystal frequency from cpuid * @dbgfs_dir: path to debugfs interface * @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers * used to read MPHY PG and PLL status are available @@ -373,6 +399,7 @@ struct pmc_dev { struct dentry *dbgfs_dir; struct platform_device *pdev; struct pci_dev *ssram_pcidev; + unsigned int crystal_freq; int pmc_xram_read_bit; struct mutex lock; /* generic mutex lock for PMC Core */ @@ -425,6 +452,7 @@ extern const struct pmc_bit_map tgl_vnn_misc_status_map[]; extern const struct pmc_bit_map tgl_signal_status_map[]; extern const struct pmc_bit_map *tgl_lpm_maps[]; extern const struct pmc_reg_map tgl_reg_map; +extern const struct pmc_reg_map tgl_h_reg_map; extern const struct pmc_bit_map adl_pfear_map[]; extern const struct pmc_bit_map *ext_adl_pfear_map[]; extern const struct pmc_bit_map adl_ltr_show_map[]; @@ -486,21 +514,80 @@ extern const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[]; extern const struct pmc_bit_map mtl_ioem_vnn_req_status_1_map[]; extern const struct pmc_bit_map *mtl_ioem_lpm_maps[]; extern const struct pmc_reg_map mtl_ioem_reg_map; +extern const struct pmc_reg_map lnl_socm_reg_map; + +/* LNL */ +extern const struct pmc_bit_map lnl_ltr_show_map[]; +extern const struct pmc_bit_map lnl_clocksource_status_map[]; +extern const struct pmc_bit_map lnl_power_gating_status_0_map[]; +extern const struct pmc_bit_map lnl_power_gating_status_1_map[]; +extern const struct pmc_bit_map lnl_power_gating_status_2_map[]; +extern const struct pmc_bit_map lnl_d3_status_0_map[]; +extern const struct pmc_bit_map lnl_d3_status_1_map[]; +extern const struct pmc_bit_map lnl_d3_status_2_map[]; +extern const struct pmc_bit_map lnl_d3_status_3_map[]; +extern const struct pmc_bit_map lnl_vnn_req_status_0_map[]; +extern const struct pmc_bit_map lnl_vnn_req_status_1_map[]; +extern const struct pmc_bit_map lnl_vnn_req_status_2_map[]; +extern const struct pmc_bit_map lnl_vnn_req_status_3_map[]; +extern const struct pmc_bit_map lnl_vnn_misc_status_map[]; +extern const struct pmc_bit_map *lnl_lpm_maps[]; +extern const struct pmc_bit_map lnl_pfear_map[]; +extern const struct pmc_bit_map *ext_lnl_pfear_map[]; + +/* ARL */ +extern const struct pmc_bit_map arl_socs_ltr_show_map[]; +extern const struct pmc_bit_map arl_socs_clocksource_status_map[]; +extern const struct pmc_bit_map arl_socs_power_gating_status_0_map[]; +extern const struct pmc_bit_map arl_socs_power_gating_status_1_map[]; +extern const struct pmc_bit_map arl_socs_power_gating_status_2_map[]; +extern const struct pmc_bit_map arl_socs_d3_status_2_map[]; +extern const struct pmc_bit_map arl_socs_d3_status_3_map[]; +extern const struct pmc_bit_map arl_socs_vnn_req_status_3_map[]; +extern const struct pmc_bit_map *arl_socs_lpm_maps[]; +extern const struct pmc_bit_map arl_socs_pfear_map[]; +extern const struct pmc_bit_map *ext_arl_socs_pfear_map[]; +extern const struct pmc_reg_map arl_socs_reg_map; +extern const struct pmc_bit_map arl_pchs_ltr_show_map[]; +extern const struct pmc_bit_map arl_pchs_clocksource_status_map[]; +extern const struct pmc_bit_map arl_pchs_power_gating_status_0_map[]; +extern const struct pmc_bit_map arl_pchs_power_gating_status_1_map[]; +extern const struct pmc_bit_map arl_pchs_power_gating_status_2_map[]; +extern const struct pmc_bit_map arl_pchs_d3_status_0_map[]; +extern const struct pmc_bit_map arl_pchs_d3_status_1_map[]; +extern const struct pmc_bit_map arl_pchs_d3_status_2_map[]; +extern const struct pmc_bit_map arl_pchs_d3_status_3_map[]; +extern const struct pmc_bit_map arl_pchs_vnn_req_status_0_map[]; +extern const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[]; +extern const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[]; +extern const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[]; +extern const struct pmc_bit_map arl_pchs_vnn_misc_status_map[]; +extern const struct pmc_bit_map arl_pchs_signal_status_map[]; +extern const struct pmc_bit_map *arl_pchs_lpm_maps[]; +extern const struct pmc_reg_map arl_pchs_reg_map; extern void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev); +extern int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev); int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore); int pmc_core_resume_common(struct pmc_dev *pmcdev); int get_primary_reg_base(struct pmc *pmc); +extern void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev); +extern void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid); +extern void pmc_core_set_device_d3(unsigned int device); -extern void pmc_core_ssram_init(struct pmc_dev *pmcdev); +extern int pmc_core_ssram_init(struct pmc_dev *pmcdev, int func); int spt_core_init(struct pmc_dev *pmcdev); int cnp_core_init(struct pmc_dev *pmcdev); int icl_core_init(struct pmc_dev *pmcdev); int tgl_core_init(struct pmc_dev *pmcdev); +int tgl_l_core_init(struct pmc_dev *pmcdev); +int tgl_core_generic_init(struct pmc_dev *pmcdev, int pch_tp); int adl_core_init(struct pmc_dev *pmcdev); int mtl_core_init(struct pmc_dev *pmcdev); +int arl_core_init(struct pmc_dev *pmcdev); +int lnl_core_init(struct pmc_dev *pmcdev); void cnl_suspend(struct pmc_dev *pmcdev); int cnl_resume(struct pmc_dev *pmcdev); diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c index 13fa16f0d5..1bde86c54e 100644 --- a/drivers/platform/x86/intel/pmc/core_ssram.c +++ b/drivers/platform/x86/intel/pmc/core_ssram.c @@ -8,10 +8,13 @@ * */ +#include <linux/cleanup.h> #include <linux/pci.h> #include <linux/io-64-nonatomic-lo-hi.h> #include "core.h" +#include "../vsec.h" +#include "../pmt/telemetry.h" #define SSRAM_HDR_SIZE 0x100 #define SSRAM_PWRM_OFFSET 0x14 @@ -21,6 +24,185 @@ #define SSRAM_IOE_OFFSET 0x68 #define SSRAM_DEVID_OFFSET 0x70 +/* PCH query */ +#define LPM_HEADER_OFFSET 1 +#define LPM_REG_COUNT 28 +#define LPM_MODE_OFFSET 1 + +DEFINE_FREE(pmc_core_iounmap, void __iomem *, iounmap(_T)); + +static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *map) +{ + for (; list->map; ++list) + if (list->map == map) + return list->guid; + + return 0; +} + +static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc) +{ + struct telem_endpoint *ep; + const u8 *lpm_indices; + int num_maps, mode_offset = 0; + int ret, mode, i; + int lpm_size; + u32 guid; + + lpm_indices = pmc->map->lpm_reg_index; + num_maps = pmc->map->lpm_num_maps; + lpm_size = LPM_MAX_NUM_MODES * num_maps; + + guid = pmc_core_find_guid(pmcdev->regmap_list, pmc->map); + if (!guid) + return -ENXIO; + + ep = pmt_telem_find_and_register_endpoint(pmcdev->ssram_pcidev, guid, 0); + if (IS_ERR(ep)) { + dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %ld", + PTR_ERR(ep)); + return -EPROBE_DEFER; + } + + pmc->lpm_req_regs = devm_kzalloc(&pmcdev->pdev->dev, + lpm_size * sizeof(u32), + GFP_KERNEL); + if (!pmc->lpm_req_regs) { + ret = -ENOMEM; + goto unregister_ep; + } + + /* + * PMC Low Power Mode (LPM) table + * + * In telemetry space, the LPM table contains a 4 byte header followed + * by 8 consecutive mode blocks (one for each LPM mode). Each block + * has a 4 byte header followed by a set of registers that describe the + * IP state requirements for the given mode. The IP mapping is platform + * specific but the same for each block, making for easy analysis. + * Platforms only use a subset of the space to track the requirements + * for their IPs. Callers provide the requirement registers they use as + * a list of indices. Each requirement register is associated with an + * IP map that's maintained by the caller. + * + * Header + * +----+----------------------------+----------------------------+ + * | 0 | REVISION | ENABLED MODES | + * +----+--------------+-------------+-------------+--------------+ + * + * Low Power Mode 0 Block + * +----+--------------+-------------+-------------+--------------+ + * | 1 | SUB ID | SIZE | MAJOR | MINOR | + * +----+--------------+-------------+-------------+--------------+ + * | 2 | LPM0 Requirements 0 | + * +----+---------------------------------------------------------+ + * | | ... | + * +----+---------------------------------------------------------+ + * | 29 | LPM0 Requirements 27 | + * +----+---------------------------------------------------------+ + * + * ... + * + * Low Power Mode 7 Block + * +----+--------------+-------------+-------------+--------------+ + * | | SUB ID | SIZE | MAJOR | MINOR | + * +----+--------------+-------------+-------------+--------------+ + * | 60 | LPM7 Requirements 0 | + * +----+---------------------------------------------------------+ + * | | ... | + * +----+---------------------------------------------------------+ + * | 87 | LPM7 Requirements 27 | + * +----+---------------------------------------------------------+ + * + */ + mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET; + pmc_for_each_mode(i, mode, pmcdev) { + u32 *req_offset = pmc->lpm_req_regs + (mode * num_maps); + int m; + + for (m = 0; m < num_maps; m++) { + u8 sample_id = lpm_indices[m] + mode_offset; + + ret = pmt_telem_read32(ep, sample_id, req_offset, 1); + if (ret) { + dev_err(&pmcdev->pdev->dev, + "couldn't read Low Power Mode requirements: %d\n", ret); + devm_kfree(&pmcdev->pdev->dev, pmc->lpm_req_regs); + goto unregister_ep; + } + ++req_offset; + } + mode_offset += LPM_REG_COUNT + LPM_MODE_OFFSET; + } + +unregister_ep: + pmt_telem_unregister_endpoint(ep); + + return ret; +} + +int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev) +{ + int ret, i; + + if (!pmcdev->ssram_pcidev) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) { + if (!pmcdev->pmcs[i]) + continue; + + ret = pmc_core_get_lpm_req(pmcdev, pmcdev->pmcs[i]); + if (ret) + return ret; + } + + return 0; +} + +static void +pmc_add_pmt(struct pmc_dev *pmcdev, u64 ssram_base, void __iomem *ssram) +{ + struct pci_dev *pcidev = pmcdev->ssram_pcidev; + struct intel_vsec_platform_info info = {}; + struct intel_vsec_header *headers[2] = {}; + struct intel_vsec_header header; + void __iomem *dvsec; + u32 dvsec_offset; + u32 table, hdr; + + ssram = ioremap(ssram_base, SSRAM_HDR_SIZE); + if (!ssram) + return; + + dvsec_offset = readl(ssram + SSRAM_DVSEC_OFFSET); + iounmap(ssram); + + dvsec = ioremap(ssram_base + dvsec_offset, SSRAM_DVSEC_SIZE); + if (!dvsec) + return; + + hdr = readl(dvsec + PCI_DVSEC_HEADER1); + header.id = readw(dvsec + PCI_DVSEC_HEADER2); + header.rev = PCI_DVSEC_HEADER1_REV(hdr); + header.length = PCI_DVSEC_HEADER1_LEN(hdr); + header.num_entries = readb(dvsec + INTEL_DVSEC_ENTRIES); + header.entry_size = readb(dvsec + INTEL_DVSEC_SIZE); + + table = readl(dvsec + INTEL_DVSEC_TABLE); + header.tbir = INTEL_DVSEC_TABLE_BAR(table); + header.offset = INTEL_DVSEC_TABLE_OFFSET(table); + iounmap(dvsec); + + headers[0] = &header; + info.caps = VSEC_CAP_TELEMETRY; + info.headers = headers; + info.base_addr = ssram_base; + info.parent = &pmcdev->pdev->dev; + + intel_vsec_register(pcidev, &info); +} + static const struct pmc_reg_map *pmc_core_find_regmap(struct pmc_info *list, u16 devid) { for (; list->map; ++list) @@ -35,20 +217,20 @@ static inline u64 get_base(void __iomem *addr, u32 offset) return lo_hi_readq(addr + offset) & GENMASK_ULL(63, 3); } -static void +static int pmc_core_pmc_add(struct pmc_dev *pmcdev, u64 pwrm_base, const struct pmc_reg_map *reg_map, int pmc_index) { struct pmc *pmc = pmcdev->pmcs[pmc_index]; if (!pwrm_base) - return; + return -ENODEV; /* Memory for primary PMC has been allocated in core.c */ if (!pmc) { pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL); if (!pmc) - return; + return -ENOMEM; } pmc->map = reg_map; @@ -57,77 +239,88 @@ pmc_core_pmc_add(struct pmc_dev *pmcdev, u64 pwrm_base, if (!pmc->regbase) { devm_kfree(&pmcdev->pdev->dev, pmc); - return; + return -ENOMEM; } pmcdev->pmcs[pmc_index] = pmc; + + return 0; } -static void -pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, void __iomem *ssram, u32 offset, - int pmc_idx) +static int +pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, int pmc_idx, u32 offset) { - u64 pwrm_base; + struct pci_dev *ssram_pcidev = pmcdev->ssram_pcidev; + void __iomem __free(pmc_core_iounmap) *tmp_ssram = NULL; + void __iomem __free(pmc_core_iounmap) *ssram = NULL; + const struct pmc_reg_map *map; + u64 ssram_base, pwrm_base; u16 devid; - if (pmc_idx != PMC_IDX_SOC) { - u64 ssram_base = get_base(ssram, offset); + if (!pmcdev->regmap_list) + return -ENOENT; - if (!ssram_base) - return; + ssram_base = ssram_pcidev->resource[0].start; + tmp_ssram = ioremap(ssram_base, SSRAM_HDR_SIZE); + if (pmc_idx != PMC_IDX_MAIN) { + /* + * The secondary PMC BARS (which are behind hidden PCI devices) + * are read from fixed offsets in MMIO of the primary PMC BAR. + */ + ssram_base = get_base(tmp_ssram, offset); ssram = ioremap(ssram_base, SSRAM_HDR_SIZE); if (!ssram) - return; + return -ENOMEM; + + } else { + ssram = no_free_ptr(tmp_ssram); } pwrm_base = get_base(ssram, SSRAM_PWRM_OFFSET); devid = readw(ssram + SSRAM_DEVID_OFFSET); - if (pmcdev->regmap_list) { - const struct pmc_reg_map *map; + /* Find and register and PMC telemetry entries */ + pmc_add_pmt(pmcdev, ssram_base, ssram); - map = pmc_core_find_regmap(pmcdev->regmap_list, devid); - if (map) - pmc_core_pmc_add(pmcdev, pwrm_base, map, pmc_idx); - } + map = pmc_core_find_regmap(pmcdev->regmap_list, devid); + if (!map) + return -ENODEV; - if (pmc_idx != PMC_IDX_SOC) - iounmap(ssram); + return pmc_core_pmc_add(pmcdev, pwrm_base, map, pmc_idx); } -void pmc_core_ssram_init(struct pmc_dev *pmcdev) +int pmc_core_ssram_init(struct pmc_dev *pmcdev, int func) { - void __iomem *ssram; struct pci_dev *pcidev; - u64 ssram_base; int ret; - pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, 2)); + pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, func)); if (!pcidev) - goto out; + return -ENODEV; ret = pcim_enable_device(pcidev); if (ret) goto release_dev; - ssram_base = pcidev->resource[0].start; - ssram = ioremap(ssram_base, SSRAM_HDR_SIZE); - if (!ssram) - goto disable_dev; - pmcdev->ssram_pcidev = pcidev; - pmc_core_ssram_get_pmc(pmcdev, ssram, 0, PMC_IDX_SOC); - pmc_core_ssram_get_pmc(pmcdev, ssram, SSRAM_IOE_OFFSET, PMC_IDX_IOE); - pmc_core_ssram_get_pmc(pmcdev, ssram, SSRAM_PCH_OFFSET, PMC_IDX_PCH); + ret = pmc_core_ssram_get_pmc(pmcdev, PMC_IDX_MAIN, 0); + if (ret) + goto disable_dev; - iounmap(ssram); -out: - return; + pmc_core_ssram_get_pmc(pmcdev, PMC_IDX_IOE, SSRAM_IOE_OFFSET); + pmc_core_ssram_get_pmc(pmcdev, PMC_IDX_PCH, SSRAM_PCH_OFFSET); + + return 0; disable_dev: + pmcdev->ssram_pcidev = NULL; pci_disable_device(pcidev); release_dev: pci_dev_put(pcidev); + + return ret; } +MODULE_IMPORT_NS(INTEL_VSEC); +MODULE_IMPORT_NS(INTEL_PMT_TELEMETRY); diff --git a/drivers/platform/x86/intel/pmc/icl.c b/drivers/platform/x86/intel/pmc/icl.c index d08e317423..71b0fd6cb7 100644 --- a/drivers/platform/x86/intel/pmc/icl.c +++ b/drivers/platform/x86/intel/pmc/icl.c @@ -53,7 +53,15 @@ const struct pmc_reg_map icl_reg_map = { int icl_core_init(struct pmc_dev *pmcdev) { struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + int ret; pmc->map = &icl_reg_map; - return get_primary_reg_base(pmc); + + ret = get_primary_reg_base(pmc); + if (ret) + return ret; + + pmc_core_get_low_power_modes(pmcdev); + + return ret; } diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c new file mode 100644 index 0000000000..068d725046 --- /dev/null +++ b/drivers/platform/x86/intel/pmc/lnl.c @@ -0,0 +1,519 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file contains platform specific structure definitions + * and init function used by Meteor Lake PCH. + * + * Copyright (c) 2022, Intel Corporation. + * All Rights Reserved. + * + */ + +#include <linux/cpu.h> +#include <linux/pci.h> + +#include "core.h" + +const struct pmc_bit_map lnl_ltr_show_map[] = { + {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, + {"SOUTHPORT_B", CNP_PMC_LTR_SPB}, + {"SATA", CNP_PMC_LTR_SATA}, + {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, + {"XHCI", CNP_PMC_LTR_XHCI}, + {"SOUTHPORT_F", ADL_PMC_LTR_SPF}, + {"ME", CNP_PMC_LTR_ME}, + /* EVA is Enterprise Value Add, doesn't really exist on PCH */ + {"SATA1", CNP_PMC_LTR_EVA}, + {"SOUTHPORT_C", CNP_PMC_LTR_SPC}, + {"HD_AUDIO", CNP_PMC_LTR_AZ}, + {"CNV", CNP_PMC_LTR_CNV}, + {"LPSS", CNP_PMC_LTR_LPSS}, + {"SOUTHPORT_D", CNP_PMC_LTR_SPD}, + {"SOUTHPORT_E", CNP_PMC_LTR_SPE}, + {"SATA2", CNP_PMC_LTR_CAM}, + {"ESPI", CNP_PMC_LTR_ESPI}, + {"SCC", CNP_PMC_LTR_SCC}, + {"ISH", CNP_PMC_LTR_ISH}, + {"UFSX2", CNP_PMC_LTR_UFSX2}, + {"EMMC", CNP_PMC_LTR_EMMC}, + /* + * Check intel_pmc_core_ids[] users of cnp_reg_map for + * a list of core SoCs using this. + */ + {"WIGIG", ICL_PMC_LTR_WIGIG}, + {"THC0", TGL_PMC_LTR_THC0}, + {"THC1", TGL_PMC_LTR_THC1}, + {"SOUTHPORT_G", CNP_PMC_LTR_RESERVED}, + + {"ESE", MTL_PMC_LTR_ESE}, + {"IOE_PMC", MTL_PMC_LTR_IOE_PMC}, + {"DMI3", ARL_PMC_LTR_DMI3}, + {"OSSE", LNL_PMC_LTR_OSSE}, + + /* Below two cannot be used for LTR_IGNORE */ + {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT}, + {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT}, + {} +}; + +const struct pmc_bit_map lnl_power_gating_status_0_map[] = { + {"PMC_PGD0_PG_STS", BIT(0)}, + {"FUSE_OSSE_PGD0_PG_STS", BIT(1)}, + {"ESPISPI_PGD0_PG_STS", BIT(2)}, + {"XHCI_PGD0_PG_STS", BIT(3)}, + {"SPA_PGD0_PG_STS", BIT(4)}, + {"SPB_PGD0_PG_STS", BIT(5)}, + {"SPR16B0_PGD0_PG_STS", BIT(6)}, + {"GBE_PGD0_PG_STS", BIT(7)}, + {"SBR8B7_PGD0_PG_STS", BIT(8)}, + {"SBR8B6_PGD0_PG_STS", BIT(9)}, + {"SBR16B1_PGD0_PG_STS", BIT(10)}, + {"SBR8B8_PGD0_PG_STS", BIT(11)}, + {"ESE_PGD3_PG_STS", BIT(12)}, + {"D2D_DISP_PGD0_PG_STS", BIT(13)}, + {"LPSS_PGD0_PG_STS", BIT(14)}, + {"LPC_PGD0_PG_STS", BIT(15)}, + {"SMB_PGD0_PG_STS", BIT(16)}, + {"ISH_PGD0_PG_STS", BIT(17)}, + {"SBR8B2_PGD0_PG_STS", BIT(18)}, + {"NPK_PGD0_PG_STS", BIT(19)}, + {"D2D_NOC_PGD0_PG_STS", BIT(20)}, + {"SAFSS_PGD0_PG_STS", BIT(21)}, + {"FUSE_PGD0_PG_STS", BIT(22)}, + {"D2D_DISP_PGD1_PG_STS", BIT(23)}, + {"MPFPW1_PGD0_PG_STS", BIT(24)}, + {"XDCI_PGD0_PG_STS", BIT(25)}, + {"EXI_PGD0_PG_STS", BIT(26)}, + {"CSE_PGD0_PG_STS", BIT(27)}, + {"KVMCC_PGD0_PG_STS", BIT(28)}, + {"PMT_PGD0_PG_STS", BIT(29)}, + {"CLINK_PGD0_PG_STS", BIT(30)}, + {"PTIO_PGD0_PG_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map lnl_power_gating_status_1_map[] = { + {"USBR0_PGD0_PG_STS", BIT(0)}, + {"SUSRAM_PGD0_PG_STS", BIT(1)}, + {"SMT1_PGD0_PG_STS", BIT(2)}, + {"U3FPW1_PGD0_PG_STS", BIT(3)}, + {"SMS2_PGD0_PG_STS", BIT(4)}, + {"SMS1_PGD0_PG_STS", BIT(5)}, + {"CSMERTC_PGD0_PG_STS", BIT(6)}, + {"CSMEPSF_PGD0_PG_STS", BIT(7)}, + {"FIA_PG_PGD0_PG_STS", BIT(8)}, + {"SBR16B4_PGD0_PG_STS", BIT(9)}, + {"P2SB8B_PGD0_PG_STS", BIT(10)}, + {"DBG_SBR_PGD0_PG_STS", BIT(11)}, + {"SBR8B9_PGD0_PG_STS", BIT(12)}, + {"OSSE_SMT1_PGD0_PG_STS", BIT(13)}, + {"SBR8B10_PGD0_PG_STS", BIT(14)}, + {"SBR16B3_PGD0_PG_STS", BIT(15)}, + {"G5FPW1_PGD0_PG_STS", BIT(16)}, + {"SBRG_PGD0_PG_STS", BIT(17)}, + {"PSF4_PGD0_PG_STS", BIT(18)}, + {"CNVI_PGD0_PG_STS", BIT(19)}, + {"USFX2_PGD0_PG_STS", BIT(20)}, + {"ENDBG_PGD0_PG_STS", BIT(21)}, + {"FIACPCB_P5X4_PGD0_PG_STS", BIT(22)}, + {"SBR8B3_PGD0_PG_STS", BIT(23)}, + {"SBR8B0_PGD0_PG_STS", BIT(24)}, + {"NPK_PGD1_PG_STS", BIT(25)}, + {"OSSE_HOTHAM_PGD0_PG_STS", BIT(26)}, + {"D2D_NOC_PGD2_PG_STS", BIT(27)}, + {"SBR8B1_PGD0_PG_STS", BIT(28)}, + {"PSF6_PGD0_PG_STS", BIT(29)}, + {"PSF7_PGD0_PG_STS", BIT(30)}, + {"FIA_U_PGD0_PG_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map lnl_power_gating_status_2_map[] = { + {"PSF8_PGD0_PG_STS", BIT(0)}, + {"SBR16B2_PGD0_PG_STS", BIT(1)}, + {"D2D_IPU_PGD0_PG_STS", BIT(2)}, + {"FIACPCB_U_PGD0_PG_STS", BIT(3)}, + {"TAM_PGD0_PG_STS", BIT(4)}, + {"D2D_NOC_PGD1_PG_STS", BIT(5)}, + {"TBTLSX_PGD0_PG_STS", BIT(6)}, + {"THC0_PGD0_PG_STS", BIT(7)}, + {"THC1_PGD0_PG_STS", BIT(8)}, + {"PMC_PGD0_PG_STS", BIT(9)}, + {"SBR8B5_PGD0_PG_STS", BIT(10)}, + {"UFSPW1_PGD0_PG_STS", BIT(11)}, + {"DBC_PGD0_PG_STS", BIT(12)}, + {"TCSS_PGD0_PG_STS", BIT(13)}, + {"FIA_P5X4_PGD0_PG_STS", BIT(14)}, + {"DISP_PGA_PGD0_PG_STS", BIT(15)}, + {"DISP_PSF_PGD0_PG_STS", BIT(16)}, + {"PSF0_PGD0_PG_STS", BIT(17)}, + {"P2SB16B_PGD0_PG_STS", BIT(18)}, + {"ACE_PGD0_PG_STS", BIT(19)}, + {"ACE_PGD1_PG_STS", BIT(20)}, + {"ACE_PGD2_PG_STS", BIT(21)}, + {"ACE_PGD3_PG_STS", BIT(22)}, + {"ACE_PGD4_PG_STS", BIT(23)}, + {"ACE_PGD5_PG_STS", BIT(24)}, + {"ACE_PGD6_PG_STS", BIT(25)}, + {"ACE_PGD7_PG_STS", BIT(26)}, + {"ACE_PGD8_PG_STS", BIT(27)}, + {"ACE_PGD9_PG_STS", BIT(28)}, + {"ACE_PGD10_PG_STS", BIT(29)}, + {"FIACPCB_PG_PGD0_PG_STS", BIT(30)}, + {"OSSE_PGD0_PG_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map lnl_d3_status_0_map[] = { + {"LPSS_D3_STS", BIT(3)}, + {"XDCI_D3_STS", BIT(4)}, + {"XHCI_D3_STS", BIT(5)}, + {"SPA_D3_STS", BIT(12)}, + {"SPB_D3_STS", BIT(13)}, + {"OSSE_D3_STS", BIT(15)}, + {"ESPISPI_D3_STS", BIT(18)}, + {"PSTH_D3_STS", BIT(21)}, + {} +}; + +const struct pmc_bit_map lnl_d3_status_1_map[] = { + {"OSSE_SMT1_D3_STS", BIT(7)}, + {"GBE_D3_STS", BIT(19)}, + {"ITSS_D3_STS", BIT(23)}, + {"CNVI_D3_STS", BIT(27)}, + {"UFSX2_D3_STS", BIT(28)}, + {"OSSE_HOTHAM_D3_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map lnl_d3_status_2_map[] = { + {"ESE_D3_STS", BIT(0)}, + {"CSMERTC_D3_STS", BIT(1)}, + {"SUSRAM_D3_STS", BIT(2)}, + {"CSE_D3_STS", BIT(4)}, + {"KVMCC_D3_STS", BIT(5)}, + {"USBR0_D3_STS", BIT(6)}, + {"ISH_D3_STS", BIT(7)}, + {"SMT1_D3_STS", BIT(8)}, + {"SMT2_D3_STS", BIT(9)}, + {"SMT3_D3_STS", BIT(10)}, + {"OSSE_SMT2_D3_STS", BIT(13)}, + {"CLINK_D3_STS", BIT(14)}, + {"PTIO_D3_STS", BIT(16)}, + {"PMT_D3_STS", BIT(17)}, + {"SMS1_D3_STS", BIT(18)}, + {"SMS2_D3_STS", BIT(19)}, + {} +}; + +const struct pmc_bit_map lnl_d3_status_3_map[] = { + {"THC0_D3_STS", BIT(14)}, + {"THC1_D3_STS", BIT(15)}, + {"OSSE_SMT3_D3_STS", BIT(21)}, + {"ACE_D3_STS", BIT(23)}, + {} +}; + +const struct pmc_bit_map lnl_vnn_req_status_0_map[] = { + {"LPSS_VNN_REQ_STS", BIT(3)}, + {"OSSE_VNN_REQ_STS", BIT(15)}, + {"ESPISPI_VNN_REQ_STS", BIT(18)}, + {} +}; + +const struct pmc_bit_map lnl_vnn_req_status_1_map[] = { + {"NPK_VNN_REQ_STS", BIT(4)}, + {"OSSE_SMT1_VNN_REQ_STS", BIT(7)}, + {"DFXAGG_VNN_REQ_STS", BIT(8)}, + {"EXI_VNN_REQ_STS", BIT(9)}, + {"P2D_VNN_REQ_STS", BIT(18)}, + {"GBE_VNN_REQ_STS", BIT(19)}, + {"SMB_VNN_REQ_STS", BIT(25)}, + {"LPC_VNN_REQ_STS", BIT(26)}, + {} +}; + +const struct pmc_bit_map lnl_vnn_req_status_2_map[] = { + {"eSE_VNN_REQ_STS", BIT(0)}, + {"CSMERTC_VNN_REQ_STS", BIT(1)}, + {"CSE_VNN_REQ_STS", BIT(4)}, + {"ISH_VNN_REQ_STS", BIT(7)}, + {"SMT1_VNN_REQ_STS", BIT(8)}, + {"CLINK_VNN_REQ_STS", BIT(14)}, + {"SMS1_VNN_REQ_STS", BIT(18)}, + {"SMS2_VNN_REQ_STS", BIT(19)}, + {"GPIOCOM4_VNN_REQ_STS", BIT(20)}, + {"GPIOCOM3_VNN_REQ_STS", BIT(21)}, + {"GPIOCOM2_VNN_REQ_STS", BIT(22)}, + {"GPIOCOM1_VNN_REQ_STS", BIT(23)}, + {"GPIOCOM0_VNN_REQ_STS", BIT(24)}, + {} +}; + +const struct pmc_bit_map lnl_vnn_req_status_3_map[] = { + {"DISP_SHIM_VNN_REQ_STS", BIT(2)}, + {"DTS0_VNN_REQ_STS", BIT(7)}, + {"GPIOCOM5_VNN_REQ_STS", BIT(11)}, + {} +}; + +const struct pmc_bit_map lnl_vnn_misc_status_map[] = { + {"CPU_C10_REQ_STS", BIT(0)}, + {"TS_OFF_REQ_STS", BIT(1)}, + {"PNDE_MET_REQ_STS", BIT(2)}, + {"PCIE_DEEP_PM_REQ_STS", BIT(3)}, + {"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4)}, + {"NPK_VNNAON_REQ_STS", BIT(5)}, + {"VNN_SOC_REQ_STS", BIT(6)}, + {"ISH_VNNAON_REQ_STS", BIT(7)}, + {"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8)}, + {"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9)}, + {"D2D_NOC_IPU_QACTIVE_REQ_STS", BIT(10)}, + {"PLT_GREATER_REQ_STS", BIT(11)}, + {"PCIE_CLKREQ_REQ_STS", BIT(12)}, + {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13)}, + {"PM_SYNC_STATES_REQ_STS", BIT(14)}, + {"EA_REQ_STS", BIT(15)}, + {"MPHY_CORE_OFF_REQ_STS", BIT(16)}, + {"BRK_EV_EN_REQ_STS", BIT(17)}, + {"AUTO_DEMO_EN_REQ_STS", BIT(18)}, + {"ITSS_CLK_SRC_REQ_STS", BIT(19)}, + {"LPC_CLK_SRC_REQ_STS", BIT(20)}, + {"ARC_IDLE_REQ_STS", BIT(21)}, + {"MPHY_SUS_REQ_STS", BIT(22)}, + {"FIA_DEEP_PM_REQ_STS", BIT(23)}, + {"UXD_CONNECTED_REQ_STS", BIT(24)}, + {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25)}, + {"D2D_NOC_DISP_DDI_QACTIVE_REQ_STS", BIT(26)}, + {"PRE_WAKE0_REQ_STS", BIT(27)}, + {"PRE_WAKE1_REQ_STS", BIT(28)}, + {"PRE_WAKE2_EN_REQ_STS", BIT(29)}, + {"WOV_REQ_STS", BIT(30)}, + {"D2D_NOC_DISP_EDP_QACTIVE_REQ_STS_31", BIT(31)}, + {} +}; + +const struct pmc_bit_map lnl_clocksource_status_map[] = { + {"AON2_OFF_STS", BIT(0)}, + {"AON3_OFF_STS", BIT(1)}, + {"AON4_OFF_STS", BIT(2)}, + {"AON5_OFF_STS", BIT(3)}, + {"AON1_OFF_STS", BIT(4)}, + {"MPFPW1_0_PLL_OFF_STS", BIT(6)}, + {"USB3_PLL_OFF_STS", BIT(8)}, + {"AON3_SPL_OFF_STS", BIT(9)}, + {"G5FPW1_PLL_OFF_STS", BIT(15)}, + {"XTAL_AGGR_OFF_STS", BIT(17)}, + {"USB2_PLL_OFF_STS", BIT(18)}, + {"SAF_PLL_OFF_STS", BIT(19)}, + {"SE_TCSS_PLL_OFF_STS", BIT(20)}, + {"DDI_PLL_OFF_STS", BIT(21)}, + {"FILTER_PLL_OFF_STS", BIT(22)}, + {"ACE_PLL_OFF_STS", BIT(24)}, + {"FABRIC_PLL_OFF_STS", BIT(25)}, + {"SOC_PLL_OFF_STS", BIT(26)}, + {"REF_OFF_STS", BIT(28)}, + {"IMG_OFF_STS", BIT(29)}, + {"RTC_PLL_OFF_STS", BIT(31)}, + {} +}; + +const struct pmc_bit_map *lnl_lpm_maps[] = { + lnl_clocksource_status_map, + lnl_power_gating_status_0_map, + lnl_power_gating_status_1_map, + lnl_power_gating_status_2_map, + lnl_d3_status_0_map, + lnl_d3_status_1_map, + lnl_d3_status_2_map, + lnl_d3_status_3_map, + lnl_vnn_req_status_0_map, + lnl_vnn_req_status_1_map, + lnl_vnn_req_status_2_map, + lnl_vnn_req_status_3_map, + lnl_vnn_misc_status_map, + mtl_socm_signal_status_map, + NULL +}; + +const struct pmc_bit_map lnl_pfear_map[] = { + {"PMC_0", BIT(0)}, + {"FUSE_OSSE", BIT(1)}, + {"ESPISPI", BIT(2)}, + {"XHCI", BIT(3)}, + {"SPA", BIT(4)}, + {"SPB", BIT(5)}, + {"SBR16B0", BIT(6)}, + {"GBE", BIT(7)}, + + {"SBR8B7", BIT(0)}, + {"SBR8B6", BIT(1)}, + {"SBR16B1", BIT(1)}, + {"SBR8B8", BIT(2)}, + {"ESE", BIT(3)}, + {"SBR8B10", BIT(4)}, + {"D2D_DISP_0", BIT(5)}, + {"LPSS", BIT(6)}, + {"LPC", BIT(7)}, + + {"SMB", BIT(0)}, + {"ISH", BIT(1)}, + {"SBR8B2", BIT(2)}, + {"NPK_0", BIT(3)}, + {"D2D_NOC_0", BIT(4)}, + {"SAFSS", BIT(5)}, + {"FUSE", BIT(6)}, + {"D2D_DISP_1", BIT(7)}, + + {"MPFPW1", BIT(0)}, + {"XDCI", BIT(1)}, + {"EXI", BIT(2)}, + {"CSE", BIT(3)}, + {"KVMCC", BIT(4)}, + {"PMT", BIT(5)}, + {"CLINK", BIT(6)}, + {"PTIO", BIT(7)}, + + {"USBR", BIT(0)}, + {"SUSRAM", BIT(1)}, + {"SMT1", BIT(2)}, + {"U3FPW1", BIT(3)}, + {"SMS2", BIT(4)}, + {"SMS1", BIT(5)}, + {"CSMERTC", BIT(6)}, + {"CSMEPSF", BIT(7)}, + + {"FIA_PG", BIT(0)}, + {"SBR16B4", BIT(1)}, + {"P2SB8B", BIT(2)}, + {"DBG_SBR", BIT(3)}, + {"SBR8B9", BIT(4)}, + {"OSSE_SMT1", BIT(5)}, + {"SBR8B10", BIT(6)}, + {"SBR16B3", BIT(7)}, + + {"G5FPW1", BIT(0)}, + {"SBRG", BIT(1)}, + {"PSF4", BIT(2)}, + {"CNVI", BIT(3)}, + {"UFSX2", BIT(4)}, + {"ENDBG", BIT(5)}, + {"FIACPCB_P5X4", BIT(6)}, + {"SBR8B3", BIT(7)}, + + {"SBR8B0", BIT(0)}, + {"NPK_1", BIT(1)}, + {"OSSE_HOTHAM", BIT(2)}, + {"D2D_NOC_2", BIT(3)}, + {"SBR8B1", BIT(4)}, + {"PSF6", BIT(5)}, + {"PSF7", BIT(6)}, + {"FIA_U", BIT(7)}, + + {"PSF8", BIT(0)}, + {"SBR16B2", BIT(1)}, + {"D2D_IPU", BIT(2)}, + {"FIACPCB_U", BIT(3)}, + {"TAM", BIT(4)}, + {"D2D_NOC_1", BIT(5)}, + {"TBTLSX", BIT(6)}, + {"THC0", BIT(7)}, + + {"THC1", BIT(0)}, + {"PMC_1", BIT(1)}, + {"SBR8B5", BIT(2)}, + {"UFSPW1", BIT(3)}, + {"DBC", BIT(4)}, + {"TCSS", BIT(5)}, + {"FIA_P5X4", BIT(6)}, + {"DISP_PGA", BIT(7)}, + + {"DBG_PSF", BIT(0)}, + {"PSF0", BIT(1)}, + {"P2SB16B", BIT(2)}, + {"ACE0", BIT(3)}, + {"ACE1", BIT(4)}, + {"ACE2", BIT(5)}, + {"ACE3", BIT(6)}, + {"ACE4", BIT(7)}, + + {"ACE5", BIT(0)}, + {"ACE6", BIT(1)}, + {"ACE7", BIT(2)}, + {"ACE8", BIT(3)}, + {"ACE9", BIT(4)}, + {"ACE10", BIT(5)}, + {"FIACPCB", BIT(6)}, + {"OSSE", BIT(7)}, + {} +}; + +const struct pmc_bit_map *ext_lnl_pfear_map[] = { + lnl_pfear_map, + NULL +}; + +const struct pmc_reg_map lnl_socm_reg_map = { + .pfear_sts = ext_lnl_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, + .ltr_show_sts = lnl_ltr_show_map, + .msr_sts = msr_map, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = LNL_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = LNL_PPFEAR_NUM_ENTRIES, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .ltr_ignore_max = LNL_NUM_IP_IGN_ALLOWED, + .lpm_num_maps = ADL_LPM_NUM_MAPS, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .etr3_offset = ETR3_OFFSET, + .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, + .lpm_priority_offset = MTL_LPM_PRI_OFFSET, + .lpm_en_offset = MTL_LPM_EN_OFFSET, + .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, + .lpm_sts = lnl_lpm_maps, + .lpm_status_offset = MTL_LPM_STATUS_OFFSET, + .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, +}; + +#define LNL_NPU_PCI_DEV 0x643e +#define LNL_IPU_PCI_DEV 0x645d + +/* + * Set power state of select devices that do not have drivers to D3 + * so that they do not block Package C entry. + */ +static void lnl_d3_fixup(void) +{ + pmc_core_set_device_d3(LNL_IPU_PCI_DEV); + pmc_core_set_device_d3(LNL_NPU_PCI_DEV); +} + +static int lnl_resume(struct pmc_dev *pmcdev) +{ + lnl_d3_fixup(); + pmc_core_send_ltr_ignore(pmcdev, 3, 0); + + return pmc_core_resume_common(pmcdev); +} + +int lnl_core_init(struct pmc_dev *pmcdev) +{ + int ret; + struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC]; + + lnl_d3_fixup(); + + pmcdev->suspend = cnl_suspend; + pmcdev->resume = lnl_resume; + + pmc->map = &lnl_socm_reg_map; + ret = get_primary_reg_base(pmc); + if (ret) + return ret; + + pmc_core_get_low_power_modes(pmcdev); + + return 0; +} diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c index 504e3e273c..c7d15d8640 100644 --- a/drivers/platform/x86/intel/pmc/mtl.c +++ b/drivers/platform/x86/intel/pmc/mtl.c @@ -10,6 +10,14 @@ #include <linux/pci.h> #include "core.h" +#include "../pmt/telemetry.h" + +/* PMC SSRAM PMT Telemetry GUIDS */ +#define SOCP_LPM_REQ_GUID 0x2625030 +#define IOEM_LPM_REQ_GUID 0x4357464 +#define IOEP_LPM_REQ_GUID 0x5077612 + +static const u8 MTL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20}; /* * Die Mapping to Product. @@ -465,6 +473,7 @@ const struct pmc_reg_map mtl_socm_reg_map = { .lpm_sts = mtl_socm_lpm_maps, .lpm_status_offset = MTL_LPM_STATUS_OFFSET, .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, + .lpm_reg_index = MTL_LPM_REG_INDEX, }; const struct pmc_bit_map mtl_ioep_pfear_map[] = { @@ -782,6 +791,13 @@ const struct pmc_reg_map mtl_ioep_reg_map = { .ltr_show_sts = mtl_ioep_ltr_show_map, .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED, + .lpm_num_maps = ADL_LPM_NUM_MAPS, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, + .lpm_priority_offset = MTL_LPM_PRI_OFFSET, + .lpm_en_offset = MTL_LPM_EN_OFFSET, + .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, + .lpm_reg_index = MTL_LPM_REG_INDEX, }; const struct pmc_bit_map mtl_ioem_pfear_map[] = { @@ -922,6 +938,13 @@ const struct pmc_reg_map mtl_ioem_reg_map = { .ltr_show_sts = mtl_ioep_ltr_show_map, .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED, + .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, + .lpm_num_maps = ADL_LPM_NUM_MAPS, + .lpm_priority_offset = MTL_LPM_PRI_OFFSET, + .lpm_en_offset = MTL_LPM_EN_OFFSET, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, + .lpm_reg_index = MTL_LPM_REG_INDEX, }; #define PMC_DEVID_SOCM 0x7e7f @@ -929,16 +952,19 @@ const struct pmc_reg_map mtl_ioem_reg_map = { #define PMC_DEVID_IOEM 0x7ebf static struct pmc_info mtl_pmc_info_list[] = { { - .devid = PMC_DEVID_SOCM, - .map = &mtl_socm_reg_map, + .guid = SOCP_LPM_REQ_GUID, + .devid = PMC_DEVID_SOCM, + .map = &mtl_socm_reg_map, }, { - .devid = PMC_DEVID_IOEP, - .map = &mtl_ioep_reg_map, + .guid = IOEP_LPM_REQ_GUID, + .devid = PMC_DEVID_IOEP, + .map = &mtl_ioep_reg_map, }, { - .devid = PMC_DEVID_IOEM, - .map = &mtl_ioem_reg_map + .guid = IOEM_LPM_REQ_GUID, + .devid = PMC_DEVID_IOEM, + .map = &mtl_ioem_reg_map }, {} }; @@ -946,34 +972,15 @@ static struct pmc_info mtl_pmc_info_list[] = { #define MTL_GNA_PCI_DEV 0x7e4c #define MTL_IPU_PCI_DEV 0x7d19 #define MTL_VPU_PCI_DEV 0x7d1d -static void mtl_set_device_d3(unsigned int device) -{ - struct pci_dev *pcidev; - - pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); - if (pcidev) { - if (!device_trylock(&pcidev->dev)) { - pci_dev_put(pcidev); - return; - } - if (!pcidev->dev.driver) { - dev_info(&pcidev->dev, "Setting to D3hot\n"); - pci_set_power_state(pcidev, PCI_D3hot); - } - device_unlock(&pcidev->dev); - pci_dev_put(pcidev); - } -} - /* * Set power state of select devices that do not have drivers to D3 * so that they do not block Package C entry. */ static void mtl_d3_fixup(void) { - mtl_set_device_d3(MTL_GNA_PCI_DEV); - mtl_set_device_d3(MTL_IPU_PCI_DEV); - mtl_set_device_d3(MTL_VPU_PCI_DEV); + pmc_core_set_device_d3(MTL_GNA_PCI_DEV); + pmc_core_set_device_d3(MTL_IPU_PCI_DEV); + pmc_core_set_device_d3(MTL_VPU_PCI_DEV); } static int mtl_resume(struct pmc_dev *pmcdev) @@ -987,23 +994,36 @@ static int mtl_resume(struct pmc_dev *pmcdev) int mtl_core_init(struct pmc_dev *pmcdev) { struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC]; - int ret = 0; + int ret; + int func = 2; + bool ssram_init = true; mtl_d3_fixup(); pmcdev->suspend = cnl_suspend; pmcdev->resume = mtl_resume; - pmcdev->regmap_list = mtl_pmc_info_list; - pmc_core_ssram_init(pmcdev); - /* If regbase not assigned, set map and discover using legacy method */ - if (!pmc->regbase) { + /* + * If ssram init fails use legacy method to at least get the + * primary PMC + */ + ret = pmc_core_ssram_init(pmcdev, func); + if (ret) { + ssram_init = false; + dev_warn(&pmcdev->pdev->dev, + "ssram init failed, %d, using legacy init\n", ret); pmc->map = &mtl_socm_reg_map; ret = get_primary_reg_base(pmc); if (ret) return ret; } + pmc_core_get_low_power_modes(pmcdev); + pmc_core_punit_pmt_init(pmcdev, MTL_PMT_DMU_GUID); + + if (ssram_init) + return pmc_core_ssram_get_lpm_reqs(pmcdev); + return 0; } diff --git a/drivers/platform/x86/intel/pmc/spt.c b/drivers/platform/x86/intel/pmc/spt.c index 4b6f5cbda1..ab993a69e3 100644 --- a/drivers/platform/x86/intel/pmc/spt.c +++ b/drivers/platform/x86/intel/pmc/spt.c @@ -137,7 +137,15 @@ const struct pmc_reg_map spt_reg_map = { int spt_core_init(struct pmc_dev *pmcdev) { struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; + int ret; pmc->map = &spt_reg_map; - return get_primary_reg_base(pmc); + + ret = get_primary_reg_base(pmc); + if (ret) + return ret; + + pmc_core_get_low_power_modes(pmcdev); + + return ret; } diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c index e88d3d00c8..e0580de180 100644 --- a/drivers/platform/x86/intel/pmc/tgl.c +++ b/drivers/platform/x86/intel/pmc/tgl.c @@ -13,6 +13,11 @@ #define ACPI_S0IX_DSM_UUID "57a6512e-3979-4e9d-9708-ff13b2508972" #define ACPI_GET_LOW_MODE_REGISTERS 1 +enum pch_type { + PCH_H, + PCH_LP +}; + const struct pmc_bit_map tgl_pfear_map[] = { {"PSF9", BIT(0)}, {"RES_66", BIT(1)}, @@ -205,6 +210,33 @@ const struct pmc_reg_map tgl_reg_map = { .etr3_offset = ETR3_OFFSET, }; +const struct pmc_reg_map tgl_h_reg_map = { + .pfear_sts = ext_tgl_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, + .ltr_show_sts = cnp_ltr_show_map, + .msr_sts = msr_map, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = CNP_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED, + .lpm_num_maps = TGL_LPM_NUM_MAPS, + .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, + .lpm_sts_latch_en_offset = TGL_LPM_STS_LATCH_EN_OFFSET, + .lpm_en_offset = TGL_LPM_EN_OFFSET, + .lpm_priority_offset = TGL_LPM_PRI_OFFSET, + .lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET, + .lpm_sts = tgl_lpm_maps, + .lpm_status_offset = TGL_LPM_STATUS_OFFSET, + .lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET, + .etr3_offset = ETR3_OFFSET, + .pson_residency_offset = TGL_PSON_RESIDENCY_OFFSET, + .pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP, +}; + void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev) { struct pmc_dev *pmcdev = platform_get_drvdata(pdev); @@ -253,12 +285,25 @@ free_acpi_obj: ACPI_FREE(out_obj); } +int tgl_l_core_init(struct pmc_dev *pmcdev) +{ + return tgl_core_generic_init(pmcdev, PCH_LP); +} + int tgl_core_init(struct pmc_dev *pmcdev) { + return tgl_core_generic_init(pmcdev, PCH_H); +} + +int tgl_core_generic_init(struct pmc_dev *pmcdev, int pch_tp) +{ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; int ret; - pmc->map = &tgl_reg_map; + if (pch_tp == PCH_H) + pmc->map = &tgl_h_reg_map; + else + pmc->map = &tgl_reg_map; pmcdev->suspend = cnl_suspend; pmcdev->resume = cnl_resume; @@ -267,6 +312,7 @@ int tgl_core_init(struct pmc_dev *pmcdev) if (ret) return ret; + pmc_core_get_low_power_modes(pmcdev); pmc_core_get_tgl_lpm_reqs(pmcdev->pdev); return 0; diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index f32a233470..4b53940a64 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -17,7 +17,7 @@ #include "../vsec.h" #include "class.h" -#define PMT_XA_START 0 +#define PMT_XA_START 1 #define PMT_XA_MAX INT_MAX #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) #define GUID_SPR_PUNIT 0x9956f43f @@ -31,7 +31,7 @@ bool intel_pmt_is_early_client_hw(struct device *dev) * differences from the server platforms (which use the Out Of Band * Management Services Module OOBMSM). */ - return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW); + return !!(ivdev->quirks & VSEC_QUIRK_EARLY_HW); } EXPORT_SYMBOL_NS_GPL(intel_pmt_is_early_client_hw, INTEL_PMT); @@ -159,11 +159,12 @@ static struct class intel_pmt_class = { }; static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, - struct device *dev, + struct intel_vsec_device *ivdev, struct resource *disc_res) { - struct pci_dev *pci_dev = to_pci_dev(dev->parent); + struct pci_dev *pci_dev = ivdev->pcidev; + struct device *dev = &ivdev->auxdev.dev; + struct intel_pmt_header *header = &entry->header; u8 bir; /* @@ -215,6 +216,13 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, break; case ACCESS_BARID: + /* Use the provided base address if it exists */ + if (ivdev->base_addr) { + entry->base_addr = ivdev->base_addr + + GET_ADDRESS(header->base_offset); + break; + } + /* * If another BAR was specified then the base offset * represents the offset within that BAR. SO retrieve the @@ -239,6 +247,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns, struct device *parent) { + struct intel_vsec_device *ivdev = dev_to_ivdev(parent); struct resource res = {0}; struct device *dev; int ret; @@ -262,7 +271,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, if (ns->attr_grp) { ret = sysfs_create_group(entry->kobj, ns->attr_grp); if (ret) - goto fail_sysfs; + goto fail_sysfs_create_group; } /* if size is 0 assume no data buffer, so no file needed */ @@ -287,13 +296,23 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, entry->pmt_bin_attr.size = entry->size; ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr); - if (!ret) - return 0; + if (ret) + goto fail_ioremap; + if (ns->pmt_add_endpoint) { + ret = ns->pmt_add_endpoint(entry, ivdev->pcidev); + if (ret) + goto fail_add_endpoint; + } + + return 0; + +fail_add_endpoint: + sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr); fail_ioremap: if (ns->attr_grp) sysfs_remove_group(entry->kobj, ns->attr_grp); -fail_sysfs: +fail_sysfs_create_group: device_unregister(dev); fail_dev_create: xa_erase(ns->xa, entry->devid); @@ -305,7 +324,6 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa struct intel_vsec_device *intel_vsec_dev, int idx) { struct device *dev = &intel_vsec_dev->auxdev.dev; - struct intel_pmt_header header; struct resource *disc_res; int ret; @@ -315,16 +333,15 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa if (IS_ERR(entry->disc_table)) return PTR_ERR(entry->disc_table); - ret = ns->pmt_header_decode(entry, &header, dev); + ret = ns->pmt_header_decode(entry, dev); if (ret) return ret; - ret = intel_pmt_populate_entry(entry, &header, dev, disc_res); + ret = intel_pmt_populate_entry(entry, intel_vsec_dev, disc_res); if (ret) return ret; return intel_pmt_dev_register(entry, ns, dev); - } EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_create, INTEL_PMT); diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h index db11d58867..d23c63b73a 100644 --- a/drivers/platform/x86/intel/pmt/class.h +++ b/drivers/platform/x86/intel/pmt/class.h @@ -9,6 +9,7 @@ #include <linux/io.h> #include "../vsec.h" +#include "telemetry.h" /* PMT access types */ #define ACCESS_BARID 2 @@ -18,7 +19,26 @@ #define GET_BIR(v) ((v) & GENMASK(2, 0)) #define GET_ADDRESS(v) ((v) & GENMASK(31, 3)) +struct pci_dev; + +struct telem_endpoint { + struct pci_dev *pcidev; + struct telem_header header; + void __iomem *base; + bool present; + struct kref kref; +}; + +struct intel_pmt_header { + u32 base_offset; + u32 size; + u32 guid; + u8 access_type; +}; + struct intel_pmt_entry { + struct telem_endpoint *ep; + struct intel_pmt_header header; struct bin_attribute pmt_bin_attr; struct kobject *kobj; void __iomem *disc_table; @@ -29,20 +49,14 @@ struct intel_pmt_entry { int devid; }; -struct intel_pmt_header { - u32 base_offset; - u32 size; - u32 guid; - u8 access_type; -}; - struct intel_pmt_namespace { const char *name; struct xarray *xa; const struct attribute_group *attr_grp; int (*pmt_header_decode)(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct device *dev); + int (*pmt_add_endpoint)(struct intel_pmt_entry *entry, + struct pci_dev *pdev); }; bool intel_pmt_is_early_client_hw(struct device *dev); diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c index bbb3d61d09..4014c02caf 100644 --- a/drivers/platform/x86/intel/pmt/crashlog.c +++ b/drivers/platform/x86/intel/pmt/crashlog.c @@ -223,10 +223,10 @@ static const struct attribute_group pmt_crashlog_group = { }; static int pmt_crashlog_header_decode(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct device *dev) { void __iomem *disc_table = entry->disc_table; + struct intel_pmt_header *header = &entry->header; struct crashlog_entry *crashlog; if (!pmt_crashlog_supported(entry)) diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c index 39cbc87cc2..09258564df 100644 --- a/drivers/platform/x86/intel/pmt/telemetry.c +++ b/drivers/platform/x86/intel/pmt/telemetry.c @@ -30,6 +30,15 @@ /* Used by client hardware to identify a fixed telemetry entry*/ #define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000 +#define NUM_BYTES_QWORD(v) ((v) << 3) +#define SAMPLE_ID_OFFSET(v) ((v) << 3) + +#define NUM_BYTES_DWORD(v) ((v) << 2) +#define SAMPLE_ID_OFFSET32(v) ((v) << 2) + +/* Protects access to the xarray of telemetry endpoint handles */ +static DEFINE_MUTEX(ep_lock); + enum telem_type { TELEM_TYPE_PUNIT = 0, TELEM_TYPE_CRASHLOG, @@ -58,10 +67,10 @@ static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry, } static int pmt_telem_header_decode(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct device *dev) { void __iomem *disc_table = entry->disc_table; + struct intel_pmt_header *header = &entry->header; if (pmt_telem_region_overlaps(entry, dev)) return 1; @@ -84,21 +93,195 @@ static int pmt_telem_header_decode(struct intel_pmt_entry *entry, return 0; } +static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry, + struct pci_dev *pdev) +{ + struct telem_endpoint *ep; + + /* Endpoint lifetimes are managed by kref, not devres */ + entry->ep = kzalloc(sizeof(*(entry->ep)), GFP_KERNEL); + if (!entry->ep) + return -ENOMEM; + + ep = entry->ep; + ep->pcidev = pdev; + ep->header.access_type = entry->header.access_type; + ep->header.guid = entry->header.guid; + ep->header.base_offset = entry->header.base_offset; + ep->header.size = entry->header.size; + ep->base = entry->base; + ep->present = true; + + kref_init(&ep->kref); + + return 0; +} + static DEFINE_XARRAY_ALLOC(telem_array); static struct intel_pmt_namespace pmt_telem_ns = { .name = "telem", .xa = &telem_array, .pmt_header_decode = pmt_telem_header_decode, + .pmt_add_endpoint = pmt_telem_add_endpoint, }; +/* Called when all users unregister and the device is removed */ +static void pmt_telem_ep_release(struct kref *kref) +{ + struct telem_endpoint *ep; + + ep = container_of(kref, struct telem_endpoint, kref); + kfree(ep); +} + +unsigned long pmt_telem_get_next_endpoint(unsigned long start) +{ + struct intel_pmt_entry *entry; + unsigned long found_idx; + + mutex_lock(&ep_lock); + xa_for_each_start(&telem_array, found_idx, entry, start) { + /* + * Return first found index after start. + * 0 is not valid id. + */ + if (found_idx > start) + break; + } + mutex_unlock(&ep_lock); + + return found_idx == start ? 0 : found_idx; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_get_next_endpoint, INTEL_PMT_TELEMETRY); + +struct telem_endpoint *pmt_telem_register_endpoint(int devid) +{ + struct intel_pmt_entry *entry; + unsigned long index = devid; + + mutex_lock(&ep_lock); + entry = xa_find(&telem_array, &index, index, XA_PRESENT); + if (!entry) { + mutex_unlock(&ep_lock); + return ERR_PTR(-ENXIO); + } + + kref_get(&entry->ep->kref); + mutex_unlock(&ep_lock); + + return entry->ep; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_register_endpoint, INTEL_PMT_TELEMETRY); + +void pmt_telem_unregister_endpoint(struct telem_endpoint *ep) +{ + kref_put(&ep->kref, pmt_telem_ep_release); +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_unregister_endpoint, INTEL_PMT_TELEMETRY); + +int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info) +{ + struct intel_pmt_entry *entry; + unsigned long index = devid; + int err = 0; + + if (!info) + return -EINVAL; + + mutex_lock(&ep_lock); + entry = xa_find(&telem_array, &index, index, XA_PRESENT); + if (!entry) { + err = -ENXIO; + goto unlock; + } + + info->pdev = entry->ep->pcidev; + info->header = entry->ep->header; + +unlock: + mutex_unlock(&ep_lock); + return err; + +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_get_endpoint_info, INTEL_PMT_TELEMETRY); + +int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count) +{ + u32 offset, size; + + if (!ep->present) + return -ENODEV; + + offset = SAMPLE_ID_OFFSET(id); + size = ep->header.size; + + if (offset + NUM_BYTES_QWORD(count) > size) + return -EINVAL; + + memcpy_fromio(data, ep->base + offset, NUM_BYTES_QWORD(count)); + + return ep->present ? 0 : -EPIPE; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_read, INTEL_PMT_TELEMETRY); + +int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count) +{ + u32 offset, size; + + if (!ep->present) + return -ENODEV; + + offset = SAMPLE_ID_OFFSET32(id); + size = ep->header.size; + + if (offset + NUM_BYTES_DWORD(count) > size) + return -EINVAL; + + memcpy_fromio(data, ep->base + offset, NUM_BYTES_DWORD(count)); + + return ep->present ? 0 : -EPIPE; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_read32, INTEL_PMT_TELEMETRY); + +struct telem_endpoint * +pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, u32 guid, u16 pos) +{ + int devid = 0; + int inst = 0; + int err = 0; + + while ((devid = pmt_telem_get_next_endpoint(devid))) { + struct telem_endpoint_info ep_info; + + err = pmt_telem_get_endpoint_info(devid, &ep_info); + if (err) + return ERR_PTR(err); + + if (ep_info.header.guid == guid && ep_info.pdev == pcidev) { + if (inst == pos) + return pmt_telem_register_endpoint(devid); + ++inst; + } + } + + return ERR_PTR(-ENXIO); +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_find_and_register_endpoint, INTEL_PMT_TELEMETRY); + static void pmt_telem_remove(struct auxiliary_device *auxdev) { struct pmt_telem_priv *priv = auxiliary_get_drvdata(auxdev); int i; - for (i = 0; i < priv->num_entries; i++) - intel_pmt_dev_destroy(&priv->entry[i], &pmt_telem_ns); -} + mutex_lock(&ep_lock); + for (i = 0; i < priv->num_entries; i++) { + struct intel_pmt_entry *entry = &priv->entry[i]; + + kref_put(&entry->ep->kref, pmt_telem_ep_release); + intel_pmt_dev_destroy(entry, &pmt_telem_ns); + } + mutex_unlock(&ep_lock); +}; static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) { @@ -117,7 +300,9 @@ static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxilia for (i = 0; i < intel_vsec_dev->num_resources; i++) { struct intel_pmt_entry *entry = &priv->entry[priv->num_entries]; + mutex_lock(&ep_lock); ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i); + mutex_unlock(&ep_lock); if (ret < 0) goto abort_probe; if (ret) diff --git a/drivers/platform/x86/intel/pmt/telemetry.h b/drivers/platform/x86/intel/pmt/telemetry.h new file mode 100644 index 0000000000..d45af5512b --- /dev/null +++ b/drivers/platform/x86/intel/pmt/telemetry.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _TELEMETRY_H +#define _TELEMETRY_H + +/* Telemetry types */ +#define PMT_TELEM_TELEMETRY 0 +#define PMT_TELEM_CRASHLOG 1 + +struct telem_endpoint; +struct pci_dev; + +struct telem_header { + u8 access_type; + u16 size; + u32 guid; + u32 base_offset; +}; + +struct telem_endpoint_info { + struct pci_dev *pdev; + struct telem_header header; +}; + +/** + * pmt_telem_get_next_endpoint() - Get next device id for a telemetry endpoint + * @start: starting devid to look from + * + * This functions can be used in a while loop predicate to retrieve the devid + * of all available telemetry endpoints. Functions pmt_telem_get_next_endpoint() + * and pmt_telem_register_endpoint() can be used inside of the loop to examine + * endpoint info and register to receive a pointer to the endpoint. The pointer + * is then usable in the telemetry read calls to access the telemetry data. + * + * Return: + * * devid - devid of the next present endpoint from start + * * 0 - when no more endpoints are present after start + */ +unsigned long pmt_telem_get_next_endpoint(unsigned long start); + +/** + * pmt_telem_register_endpoint() - Register a telemetry endpoint + * @devid: device id/handle of the telemetry endpoint + * + * Increments the kref usage counter for the endpoint. + * + * Return: + * * endpoint - On success returns pointer to the telemetry endpoint + * * -ENXIO - telemetry endpoint not found + */ +struct telem_endpoint *pmt_telem_register_endpoint(int devid); + +/** + * pmt_telem_unregister_endpoint() - Unregister a telemetry endpoint + * @ep: ep structure to populate. + * + * Decrements the kref usage counter for the endpoint. + */ +void pmt_telem_unregister_endpoint(struct telem_endpoint *ep); + +/** + * pmt_telem_get_endpoint_info() - Get info for an endpoint from its devid + * @devid: device id/handle of the telemetry endpoint + * @info: Endpoint info structure to be populated + * + * Return: + * * 0 - Success + * * -ENXIO - telemetry endpoint not found for the devid + * * -EINVAL - @info is NULL + */ +int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info); + +/** + * pmt_telem_find_and_register_endpoint() - Get a telemetry endpoint from + * pci_dev device, guid and pos + * @pdev: PCI device inside the Intel vsec + * @guid: GUID of the telemetry space + * @pos: Instance of the guid + * + * Return: + * * endpoint - On success returns pointer to the telemetry endpoint + * * -ENXIO - telemetry endpoint not found + */ +struct telem_endpoint *pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, + u32 guid, u16 pos); + +/** + * pmt_telem_read() - Read qwords from counter sram using sample id + * @ep: Telemetry endpoint to be read + * @id: The beginning sample id of the metric(s) to be read + * @data: Allocated qword buffer + * @count: Number of qwords requested + * + * Callers must ensure reads are aligned. When the call returns -ENODEV, + * the device has been removed and callers should unregister the telemetry + * endpoint. + * + * Return: + * * 0 - Success + * * -ENODEV - The device is not present. + * * -EINVAL - The offset is out bounds + * * -EPIPE - The device was removed during the read. Data written + * but should be considered invalid. + */ +int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count); + +/** + * pmt_telem_read32() - Read qwords from counter sram using sample id + * @ep: Telemetry endpoint to be read + * @id: The beginning sample id of the metric(s) to be read + * @data: Allocated dword buffer + * @count: Number of dwords requested + * + * Callers must ensure reads are aligned. When the call returns -ENODEV, + * the device has been removed and callers should unregister the telemetry + * endpoint. + * + * Return: + * * 0 - Success + * * -ENODEV - The device is not present. + * * -EINVAL - The offset is out bounds + * * -EPIPE - The device was removed during the read. Data written + * but should be considered invalid. + */ +int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count); + +#endif diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 0b6d2c8644..2662fbbddf 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -234,6 +234,7 @@ struct perf_level { * @saved_clos_configs: Save SST-CP CLOS configuration to store restore for suspend/resume * @saved_clos_assocs: Save SST-CP CLOS association to store restore for suspend/resume * @saved_pp_control: Save SST-PP control information to store restore for suspend/resume + * @write_blocked: Write operation is blocked, so can't change SST state * * This structure is used store complete SST information for a power_domain. This information * is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple @@ -259,6 +260,7 @@ struct tpmi_per_power_domain_info { u64 saved_clos_configs[4]; u64 saved_clos_assocs[4]; u64 saved_pp_control; + bool write_blocked; }; /** @@ -515,6 +517,9 @@ static long isst_if_clos_param(void __user *argp) return -EINVAL; if (clos_param.get_set) { + if (power_domain_info->write_blocked) + return -EPERM; + _write_cp_info("clos.min_freq", clos_param.min_freq_mhz, (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH, @@ -602,6 +607,9 @@ static long isst_if_clos_assoc(void __user *argp) power_domain_info = &sst_inst->power_domain_info[punit_id]; + if (assoc_cmds.get_set && power_domain_info->write_blocked) + return -EPERM; + offset = SST_CLOS_ASSOC_0_OFFSET + (punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE; shift = punit_cpu_no % SST_CLOS_ASSOC_CPUS_PER_REG; @@ -752,6 +760,9 @@ static int isst_if_set_perf_level(void __user *argp) if (!power_domain_info) return -EINVAL; + if (power_domain_info->write_blocked) + return -EPERM; + if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level))) return -EINVAL; @@ -809,6 +820,9 @@ static int isst_if_set_perf_feature(void __user *argp) if (!power_domain_info) return -EINVAL; + if (power_domain_info->write_blocked) + return -EPERM; + _write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET, SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE) @@ -1257,11 +1271,21 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, int tpmi_sst_dev_add(struct auxiliary_device *auxdev) { + bool read_blocked = 0, write_blocked = 0; struct intel_tpmi_plat_info *plat_info; struct tpmi_sst_struct *tpmi_sst; int i, ret, pkg = 0, inst = 0; int num_resources; + ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked); + if (ret) + dev_info(&auxdev->dev, "Can't read feature status: ignoring read/write blocked status\n"); + + if (read_blocked) { + dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n"); + return -ENODEV; + } + plat_info = tpmi_get_platform_data(auxdev); if (!plat_info) { dev_err(&auxdev->dev, "No platform info\n"); @@ -1306,6 +1330,7 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) tpmi_sst->power_domain_info[i].package_id = pkg; tpmi_sst->power_domain_info[i].power_domain_id = i; tpmi_sst->power_domain_info[i].auxdev = auxdev; + tpmi_sst->power_domain_info[i].write_blocked = write_blocked; tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(&auxdev->dev, res); if (IS_ERR(tpmi_sst->power_domain_info[i].sst_base)) return PTR_ERR(tpmi_sst->power_domain_info[i].sst_base); diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index c2f6e20b45..910df7c654 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -171,19 +171,6 @@ struct tpmi_feature_state { } __packed; /* - * List of supported TMPI IDs. - * Some TMPI IDs are not used by Linux, so the numbers are not consecutive. - */ -enum intel_tpmi_id { - TPMI_ID_RAPL = 0, /* Running Average Power Limit */ - TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */ - TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */ - TPMI_ID_SST = 5, /* Speed Select Technology */ - TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */ - TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */ -}; - -/* * The size from hardware is in u32 units. This size is from a trusted hardware, * but better to verify for pre silicon platforms. Set size to 0, when invalid. */ @@ -345,8 +332,8 @@ err_unlock: return ret; } -int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, - int *locked, int *disabled) +int tpmi_get_feature_status(struct auxiliary_device *auxdev, + int feature_id, bool *read_blocked, bool *write_blocked) { struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent); struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev); @@ -357,8 +344,8 @@ int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, if (ret) return ret; - *locked = feature_state.locked; - *disabled = !feature_state.enabled; + *read_blocked = feature_state.read_blocked; + *write_blocked = feature_state.write_blocked; return 0; } @@ -599,9 +586,21 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info, struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev; char feature_id_name[TPMI_FEATURE_NAME_LEN]; struct intel_vsec_device *feature_vsec_dev; + struct tpmi_feature_state feature_state; struct resource *res, *tmp; const char *name; - int i; + int i, ret; + + ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &feature_state); + if (ret) + return ret; + + /* + * If not enabled, continue to look at other features in the PFS, so return -EOPNOTSUPP. + * This will not cause failure of loading of this driver. + */ + if (!feature_state.enabled) + return -EOPNOTSUPP; name = intel_tpmi_name(pfs->pfs_header.tpmi_id); if (!name) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index 4fb790552c..bd75d61ff8 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -66,6 +66,7 @@ struct tpmi_uncore_struct { int min_ratio; struct tpmi_uncore_power_domain_info *pd_info; struct tpmi_uncore_cluster_info root_cluster; + bool write_blocked; }; #define UNCORE_GENMASK_MIN_RATIO GENMASK_ULL(21, 15) @@ -157,6 +158,9 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); uncore_root = cluster_info->uncore_root; + if (uncore_root->write_blocked) + return -EPERM; + /* Update each cluster in a package */ if (cluster_info->root_domain) { struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root; @@ -233,11 +237,21 @@ static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore) static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) { + bool read_blocked = 0, write_blocked = 0; struct intel_tpmi_plat_info *plat_info; struct tpmi_uncore_struct *tpmi_uncore; int ret, i, pkg = 0; int num_resources; + ret = tpmi_get_feature_status(auxdev, TPMI_ID_UNCORE, &read_blocked, &write_blocked); + if (ret) + dev_info(&auxdev->dev, "Can't read feature status: ignoring blocked status\n"); + + if (read_blocked) { + dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n"); + return -ENODEV; + } + /* Get number of power domains, which is equal to number of resources */ num_resources = tpmi_get_resource_count(auxdev); if (!num_resources) @@ -266,6 +280,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ } tpmi_uncore->power_domain_count = num_resources; + tpmi_uncore->write_blocked = write_blocked; /* Get the package ID from the TPMI core */ plat_info = tpmi_get_platform_data(auxdev); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index a3b25253b6..a5e0f5c221 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -205,6 +205,16 @@ static const struct x86_cpu_id intel_uncore_cpu_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL), X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL), @@ -212,6 +222,9 @@ static const struct x86_cpu_id intel_uncore_cpu_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL), X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL), X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, NULL), + X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids); diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c index 084c355c86..5d13452bb9 100644 --- a/drivers/platform/x86/intel/vbtn.c +++ b/drivers/platform/x86/intel/vbtn.c @@ -136,8 +136,6 @@ static int intel_vbtn_input_setup(struct platform_device *device) priv->switches_dev->id.bustype = BUS_HOST; if (priv->has_switches) { - detect_tablet_mode(&device->dev); - ret = input_register_device(priv->switches_dev); if (ret) return ret; @@ -316,6 +314,9 @@ static int intel_vbtn_probe(struct platform_device *device) if (ACPI_FAILURE(status)) dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status); } + // Check switches after buttons since VBDL may have side effects. + if (has_switches) + detect_tablet_mode(&device->dev); device_init_wakeup(&device->dev, true); /* diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 343ab6a82c..778eb0aa34 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -15,6 +15,7 @@ #include <linux/auxiliary_bus.h> #include <linux/bits.h> +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/idr.h> @@ -24,13 +25,6 @@ #include "vsec.h" -/* Intel DVSEC offsets */ -#define INTEL_DVSEC_ENTRIES 0xA -#define INTEL_DVSEC_SIZE 0xB -#define INTEL_DVSEC_TABLE 0xC -#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0)) -#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) -#define TABLE_OFFSET_SHIFT 3 #define PMT_XA_START 0 #define PMT_XA_MAX INT_MAX #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) @@ -39,34 +33,6 @@ static DEFINE_IDA(intel_vsec_ida); static DEFINE_IDA(intel_vsec_sdsi_ida); static DEFINE_XARRAY_ALLOC(auxdev_array); -/** - * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers. - * @rev: Revision ID of the VSEC/DVSEC register space - * @length: Length of the VSEC/DVSEC register space - * @id: ID of the feature - * @num_entries: Number of instances of the feature - * @entry_size: Size of the discovery table for each feature - * @tbir: BAR containing the discovery tables - * @offset: BAR offset of start of the first discovery table - */ -struct intel_vsec_header { - u8 rev; - u16 length; - u16 id; - u8 num_entries; - u8 entry_size; - u8 tbir; - u32 offset; -}; - -enum intel_vsec_id { - VSEC_ID_TELEMETRY = 2, - VSEC_ID_WATCHER = 3, - VSEC_ID_CRASHLOG = 4, - VSEC_ID_SDSI = 65, - VSEC_ID_TPMI = 66, -}; - static const char *intel_vsec_name(enum intel_vsec_id id) { switch (id) { @@ -137,6 +103,9 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev; int ret, id; + if (!parent) + return -EINVAL; + ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev, PMT_XA_LIMIT, GFP_KERNEL); if (ret < 0) { @@ -155,9 +124,6 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, return id; } - if (!parent) - parent = &pdev->dev; - auxdev->id = id; auxdev->name = name; auxdev->dev.parent = parent; @@ -175,23 +141,27 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, return ret; } - ret = devm_add_action_or_reset(parent, intel_vsec_remove_aux, + return devm_add_action_or_reset(parent, intel_vsec_remove_aux, auxdev); - if (ret < 0) - return ret; - - return 0; } EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC); static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header, struct intel_vsec_platform_info *info) { - struct intel_vsec_device *intel_vsec_dev; - struct resource *res, *tmp; + struct intel_vsec_device __free(kfree) *intel_vsec_dev = NULL; + struct resource __free(kfree) *res = NULL; + struct resource *tmp; + struct device *parent; unsigned long quirks = info->quirks; + u64 base_addr; int i; + if (info->parent) + parent = info->parent; + else + parent = &pdev->dev; + if (!intel_vsec_supported(header->id, info->caps)) return -EINVAL; @@ -210,37 +180,50 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he return -ENOMEM; res = kcalloc(header->num_entries, sizeof(*res), GFP_KERNEL); - if (!res) { - kfree(intel_vsec_dev); + if (!res) return -ENOMEM; - } if (quirks & VSEC_QUIRK_TABLE_SHIFT) header->offset >>= TABLE_OFFSET_SHIFT; + if (info->base_addr) + base_addr = info->base_addr; + else + base_addr = pdev->resource[header->tbir].start; + /* * The DVSEC/VSEC contains the starting offset and count for a block of * discovery tables. Create a resource array of these tables to the * auxiliary device driver. */ for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) { - tmp->start = pdev->resource[header->tbir].start + - header->offset + i * (header->entry_size * sizeof(u32)); + tmp->start = base_addr + header->offset + i * (header->entry_size * sizeof(u32)); tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1; tmp->flags = IORESOURCE_MEM; + + /* Check resource is not in use */ + if (!request_mem_region(tmp->start, resource_size(tmp), "")) + return -EBUSY; + + release_mem_region(tmp->start, resource_size(tmp)); } intel_vsec_dev->pcidev = pdev; - intel_vsec_dev->resource = res; + intel_vsec_dev->resource = no_free_ptr(res); intel_vsec_dev->num_resources = header->num_entries; - intel_vsec_dev->info = info; + intel_vsec_dev->quirks = info->quirks; + intel_vsec_dev->base_addr = info->base_addr; if (header->id == VSEC_ID_SDSI) intel_vsec_dev->ida = &intel_vsec_sdsi_ida; else intel_vsec_dev->ida = &intel_vsec_ida; - return intel_vsec_add_aux(pdev, NULL, intel_vsec_dev, + /* + * Pass the ownership of intel_vsec_dev and resource within it to + * intel_vsec_add_aux() + */ + return intel_vsec_add_aux(pdev, parent, no_free_ptr(intel_vsec_dev), intel_vsec_name(header->id)); } @@ -358,6 +341,16 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev, return have_devices; } +void intel_vsec_register(struct pci_dev *pdev, + struct intel_vsec_platform_info *info) +{ + if (!pdev || !info) + return; + + intel_vsec_walk_header(pdev, info); +} +EXPORT_SYMBOL_NS_GPL(intel_vsec_register, INTEL_VSEC); + static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct intel_vsec_platform_info *info; @@ -426,6 +419,11 @@ static const struct intel_vsec_platform_info tgl_info = { .quirks = VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW, }; +/* LNL info */ +static const struct intel_vsec_platform_info lnl_info = { + .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_WATCHER, +}; + #define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d #define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e #define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d @@ -433,6 +431,7 @@ static const struct intel_vsec_platform_info tgl_info = { #define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7 #define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d #define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d +#define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) }, @@ -441,6 +440,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) }, { } }; MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids); diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h index 0a6201b4a0..e23e761296 100644 --- a/drivers/platform/x86/intel/vsec.h +++ b/drivers/platform/x86/intel/vsec.h @@ -11,9 +11,45 @@ #define VSEC_CAP_SDSI BIT(3) #define VSEC_CAP_TPMI BIT(4) +/* Intel DVSEC offsets */ +#define INTEL_DVSEC_ENTRIES 0xA +#define INTEL_DVSEC_SIZE 0xB +#define INTEL_DVSEC_TABLE 0xC +#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0)) +#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) +#define TABLE_OFFSET_SHIFT 3 + struct pci_dev; struct resource; +enum intel_vsec_id { + VSEC_ID_TELEMETRY = 2, + VSEC_ID_WATCHER = 3, + VSEC_ID_CRASHLOG = 4, + VSEC_ID_SDSI = 65, + VSEC_ID_TPMI = 66, +}; + +/** + * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers. + * @rev: Revision ID of the VSEC/DVSEC register space + * @length: Length of the VSEC/DVSEC register space + * @id: ID of the feature + * @num_entries: Number of instances of the feature + * @entry_size: Size of the discovery table for each feature + * @tbir: BAR containing the discovery tables + * @offset: BAR offset of start of the first discovery table + */ +struct intel_vsec_header { + u8 rev; + u16 length; + u16 id; + u8 num_entries; + u8 entry_size; + u8 tbir; + u32 offset; +}; + enum intel_vsec_quirks { /* Watcher feature not supported */ VSEC_QUIRK_NO_WATCHER = BIT(0), @@ -33,9 +69,11 @@ enum intel_vsec_quirks { /* Platform specific data */ struct intel_vsec_platform_info { + struct device *parent; struct intel_vsec_header **headers; unsigned long caps; unsigned long quirks; + u64 base_addr; }; struct intel_vsec_device { @@ -43,11 +81,12 @@ struct intel_vsec_device { struct pci_dev *pcidev; struct resource *resource; struct ida *ida; - struct intel_vsec_platform_info *info; int num_resources; int id; /* xa */ void *priv_data; size_t priv_data_size; + unsigned long quirks; + u64 base_addr; }; int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, @@ -63,4 +102,7 @@ static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device { return container_of(auxdev, struct intel_vsec_device, auxdev); } + +void intel_vsec_register(struct pci_dev *pdev, + struct intel_vsec_platform_info *info); #endif diff --git a/drivers/platform/x86/intel/wmi/sbl-fw-update.c b/drivers/platform/x86/intel/wmi/sbl-fw-update.c index 3c86e0108a..040153ad67 100644 --- a/drivers/platform/x86/intel/wmi/sbl-fw-update.c +++ b/drivers/platform/x86/intel/wmi/sbl-fw-update.c @@ -25,19 +25,14 @@ static int get_fwu_request(struct device *dev, u32 *out) { - struct acpi_buffer result = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obj; - acpi_status status; - status = wmi_query_block(INTEL_WMI_SBL_GUID, 0, &result); - if (ACPI_FAILURE(status)) { - dev_err(dev, "wmi_query_block failed\n"); + obj = wmidev_block_query(to_wmi_device(dev), 0); + if (!obj) return -ENODEV; - } - obj = (union acpi_object *)result.pointer; - if (!obj || obj->type != ACPI_TYPE_INTEGER) { - dev_warn(dev, "wmi_query_block returned invalid value\n"); + if (obj->type != ACPI_TYPE_INTEGER) { + dev_warn(dev, "wmidev_block_query returned invalid value\n"); kfree(obj); return -EINVAL; } @@ -58,9 +53,9 @@ static int set_fwu_request(struct device *dev, u32 in) input.length = sizeof(u32); input.pointer = &value; - status = wmi_set_block(INTEL_WMI_SBL_GUID, 0, &input); + status = wmidev_block_set(to_wmi_device(dev), 0, &input); if (ACPI_FAILURE(status)) { - dev_err(dev, "wmi_set_block failed\n"); + dev_err(dev, "wmidev_block_set failed\n"); return -ENODEV; } diff --git a/drivers/platform/x86/intel/wmi/thunderbolt.c b/drivers/platform/x86/intel/wmi/thunderbolt.c index fc333ff82d..e2ad3f46f3 100644 --- a/drivers/platform/x86/intel/wmi/thunderbolt.c +++ b/drivers/platform/x86/intel/wmi/thunderbolt.c @@ -32,8 +32,7 @@ static ssize_t force_power_store(struct device *dev, mode = hex_to_bin(buf[0]); dev_dbg(dev, "force_power: storing %#x\n", mode); if (mode == 0 || mode == 1) { - status = wmi_evaluate_method(INTEL_WMI_THUNDERBOLT_GUID, 0, 1, - &input, NULL); + status = wmidev_evaluate_method(to_wmi_device(dev), 0, 1, &input, NULL); if (ACPI_FAILURE(status)) { dev_dbg(dev, "force_power: failed to evaluate ACPI method\n"); return -ENODEV; |