From 85c675d0d09a45a135bddd15d7b385f8758c32fb Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 19:35:05 +0200 Subject: Adding upstream version 6.7.7. Signed-off-by: Daniel Baumann --- drivers/pci/pcie/aspm.c | 104 +++++++++++++++++++++++++++--------------------- 1 file changed, 59 insertions(+), 45 deletions(-) (limited to 'drivers/pci/pcie/aspm.c') diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 7e3b342215..eb2c77d522 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -7,7 +7,9 @@ * Copyright (C) Shaohua Li (shaohua.li@intel.com) */ +#include #include +#include #include #include #include @@ -16,9 +18,10 @@ #include #include #include +#include #include -#include -#include +#include + #include "../pci.h" #ifdef MODULE_PARAM_PREFIX @@ -267,10 +270,10 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) /* Convert L0s latency encoding to ns */ static u32 calc_l0s_latency(u32 lnkcap) { - u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12; + u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L0SEL, lnkcap); if (encoding == 0x7) - return (5 * 1000); /* > 4us */ + return 5 * NSEC_PER_USEC; /* > 4us */ return (64 << encoding); } @@ -278,26 +281,26 @@ static u32 calc_l0s_latency(u32 lnkcap) static u32 calc_l0s_acceptable(u32 encoding) { if (encoding == 0x7) - return -1U; + return U32_MAX; return (64 << encoding); } /* Convert L1 latency encoding to ns */ static u32 calc_l1_latency(u32 lnkcap) { - u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15; + u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L1EL, lnkcap); if (encoding == 0x7) - return (65 * 1000); /* > 64us */ - return (1000 << encoding); + return 65 * NSEC_PER_USEC; /* > 64us */ + return NSEC_PER_USEC << encoding; } /* Convert L1 acceptable latency encoding to ns */ static u32 calc_l1_acceptable(u32 encoding) { if (encoding == 0x7) - return -1U; - return (1000 << encoding); + return U32_MAX; + return NSEC_PER_USEC << encoding; } /* Convert L1SS T_pwr encoding to usec */ @@ -325,33 +328,33 @@ static u32 calc_l12_pwron(struct pci_dev *pdev, u32 scale, u32 val) */ static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value) { - u64 threshold_ns = (u64) threshold_us * 1000; + u64 threshold_ns = (u64)threshold_us * NSEC_PER_USEC; /* * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max * value of 0x3ff. */ - if (threshold_ns <= 0x3ff * 1) { + if (threshold_ns <= 1 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { *scale = 0; /* Value times 1ns */ *value = threshold_ns; - } else if (threshold_ns <= 0x3ff * 32) { + } else if (threshold_ns <= 32 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { *scale = 1; /* Value times 32ns */ *value = roundup(threshold_ns, 32) / 32; - } else if (threshold_ns <= 0x3ff * 1024) { + } else if (threshold_ns <= 1024 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { *scale = 2; /* Value times 1024ns */ *value = roundup(threshold_ns, 1024) / 1024; - } else if (threshold_ns <= 0x3ff * 32768) { + } else if (threshold_ns <= 32768 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { *scale = 3; /* Value times 32768ns */ *value = roundup(threshold_ns, 32768) / 32768; - } else if (threshold_ns <= 0x3ff * 1048576) { + } else if (threshold_ns <= 1048576 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { *scale = 4; /* Value times 1048576ns */ *value = roundup(threshold_ns, 1048576) / 1048576; - } else if (threshold_ns <= 0x3ff * (u64) 33554432) { + } else if (threshold_ns <= (u64)33554432 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) { *scale = 5; /* Value times 33554432ns */ *value = roundup(threshold_ns, 33554432) / 33554432; } else { *scale = 5; - *value = 0x3ff; /* Max representable value */ + *value = FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE); } } @@ -371,11 +374,11 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) link = endpoint->bus->self->link_state; /* Calculate endpoint L0s acceptable latency */ - encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L0S) >> 6; + encoding = FIELD_GET(PCI_EXP_DEVCAP_L0S, endpoint->devcap); acceptable_l0s = calc_l0s_acceptable(encoding); /* Calculate endpoint L1 acceptable latency */ - encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L1) >> 9; + encoding = FIELD_GET(PCI_EXP_DEVCAP_L1, endpoint->devcap); acceptable_l1 = calc_l1_acceptable(encoding); while (link) { @@ -417,7 +420,7 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) if ((link->aspm_capable & ASPM_STATE_L1) && (latency + l1_switch_latency > acceptable_l1)) link->aspm_capable &= ~ASPM_STATE_L1; - l1_switch_latency += 1000; + l1_switch_latency += NSEC_PER_USEC; link = link->parent; } @@ -446,22 +449,24 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, u32 pl1_2_enables, cl1_2_enables; /* Choose the greater of the two Port Common_Mode_Restore_Times */ - val1 = (parent_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; - val2 = (child_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; + val1 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, parent_l1ss_cap); + val2 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, child_l1ss_cap); t_common_mode = max(val1, val2); /* Choose the greater of the two Port T_POWER_ON times */ - val1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19; - scale1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16; - val2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19; - scale2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16; + val1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, parent_l1ss_cap); + scale1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, parent_l1ss_cap); + val2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, child_l1ss_cap); + scale2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, child_l1ss_cap); if (calc_l12_pwron(parent, scale1, val1) > calc_l12_pwron(child, scale2, val2)) { - ctl2 |= scale1 | (val1 << 3); + ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale1) | + FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val1); t_power_on = calc_l12_pwron(parent, scale1, val1); } else { - ctl2 |= scale2 | (val2 << 3); + ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale2) | + FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val2); t_power_on = calc_l12_pwron(child, scale2, val2); } @@ -477,7 +482,9 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, */ l1_2_threshold = 2 + 4 + t_common_mode + t_power_on; encode_l12_threshold(l1_2_threshold, &scale, &value); - ctl1 |= t_common_mode << 8 | scale << 29 | value << 16; + ctl1 |= FIELD_PREP(PCI_L1SS_CTL1_CM_RESTORE_TIME, t_common_mode) | + FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_VALUE, value) | + FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_SCALE, scale); /* Some broken devices only support dword access to L1 SS */ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1); @@ -689,10 +696,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) * in pcie_config_aspm_link(). */ if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) { - pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_ASPM_L1, 0); - pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_ASPM_L1, 0); + pcie_capability_clear_word(child, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPM_L1); + pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPM_L1); } val = 0; @@ -1001,8 +1008,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) up_read(&pci_bus_sem); } -/* @pdev: the root port or switch downstream port */ -void pcie_aspm_pm_state_change(struct pci_dev *pdev) +/* + * @pdev: the root port or switch downstream port + * @locked: whether pci_bus_sem is held + */ +void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { struct pcie_link_state *link = pdev->link_state; @@ -1012,12 +1022,14 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev) * Devices changed PM state, we should recheck if latency * meets all functions' requirement */ - down_read(&pci_bus_sem); + if (!locked) + down_read(&pci_bus_sem); mutex_lock(&aspm_lock); pcie_update_aspm_capable(link->root); pcie_config_aspm_path(link); mutex_unlock(&aspm_lock); - up_read(&pci_bus_sem); + if (!locked) + up_read(&pci_bus_sem); } void pcie_aspm_powersave_config_link(struct pci_dev *pdev) @@ -1053,7 +1065,7 @@ static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev) return bridge->link_state; } -static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) +static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked) { struct pcie_link_state *link = pcie_aspm_get_link(pdev); @@ -1072,7 +1084,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) return -EPERM; } - if (sem) + if (!locked) down_read(&pci_bus_sem); mutex_lock(&aspm_lock); if (state & PCIE_LINK_STATE_L0S) @@ -1094,7 +1106,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) link->clkpm_disable = 1; pcie_set_clkpm(link, policy_to_clkpm_state(link)); mutex_unlock(&aspm_lock); - if (sem) + if (!locked) up_read(&pci_bus_sem); return 0; @@ -1102,7 +1114,9 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) int pci_disable_link_state_locked(struct pci_dev *pdev, int state) { - return __pci_disable_link_state(pdev, state, false); + lockdep_assert_held_read(&pci_bus_sem); + + return __pci_disable_link_state(pdev, state, true); } EXPORT_SYMBOL(pci_disable_link_state_locked); @@ -1117,7 +1131,7 @@ EXPORT_SYMBOL(pci_disable_link_state_locked); */ int pci_disable_link_state(struct pci_dev *pdev, int state) { - return __pci_disable_link_state(pdev, state, true); + return __pci_disable_link_state(pdev, state, false); } EXPORT_SYMBOL(pci_disable_link_state); @@ -1410,10 +1424,10 @@ static int __init pcie_aspm_disable(char *str) aspm_policy = POLICY_DEFAULT; aspm_disabled = 1; aspm_support_enabled = false; - printk(KERN_INFO "PCIe ASPM is disabled\n"); + pr_info("PCIe ASPM is disabled\n"); } else if (!strcmp(str, "force")) { aspm_force = 1; - printk(KERN_INFO "PCIe ASPM is forcibly enabled\n"); + pr_info("PCIe ASPM is forcibly enabled\n"); } return 1; } -- cgit v1.2.3