From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild | 15 + drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c | 716 +++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c | 48 + drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c | 481 +++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c | 517 ++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c | 657 ++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h | 159 +++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c | 1071 ++++++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c | 550 ++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h | 19 + drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c | 422 ++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c | 84 ++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c | 233 +++++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c | 563 ++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h | 29 + drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h | 12 + drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c | 87 ++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c | 245 +++++ drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h | 27 + drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h | 15 + 20 files changed, 5950 insertions(+) create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/clk') diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild new file mode 100644 index 000000000..dcecd499d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: MIT +nvkm-y += nvkm/subdev/clk/base.o +nvkm-y += nvkm/subdev/clk/nv04.o +nvkm-y += nvkm/subdev/clk/nv40.o +nvkm-y += nvkm/subdev/clk/nv50.o +nvkm-y += nvkm/subdev/clk/g84.o +nvkm-y += nvkm/subdev/clk/gt215.o +nvkm-y += nvkm/subdev/clk/mcp77.o +nvkm-y += nvkm/subdev/clk/gf100.o +nvkm-y += nvkm/subdev/clk/gk104.o +nvkm-y += nvkm/subdev/clk/gk20a.o +nvkm-y += nvkm/subdev/clk/gm20b.o + +nvkm-y += nvkm/subdev/clk/pllnv04.o +nvkm-y += nvkm/subdev/clk/pllgt215.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c new file mode 100644 index 000000000..da07a2fbe --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c @@ -0,0 +1,716 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "priv.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/****************************************************************************** + * misc + *****************************************************************************/ +static u32 +nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust, + u8 pstate, u8 domain, u32 input) +{ + struct nvkm_bios *bios = clk->subdev.device->bios; + struct nvbios_boostE boostE; + u8 ver, hdr, cnt, len; + u32 data; + + data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE); + if (data) { + struct nvbios_boostS boostS; + u8 idx = 0, sver, shdr; + u32 subd; + + input = max(boostE.min, input); + input = min(boostE.max, input); + do { + sver = ver; + shdr = hdr; + subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr, + cnt, len, &boostS); + if (subd && boostS.domain == domain) { + if (adjust) + input = input * boostS.percent / 100; + input = max(boostS.min, input); + input = min(boostS.max, input); + break; + } + } while (subd); + } + + return input; +} + +/****************************************************************************** + * C-States + *****************************************************************************/ +static bool +nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate, + u32 max_volt, int temp) +{ + const struct nvkm_domain *domain = clk->domains; + struct nvkm_volt *volt = clk->subdev.device->volt; + int voltage; + + while (domain && domain->name != nv_clk_src_max) { + if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) { + u32 freq = cstate->domain[domain->name]; + switch (clk->boost_mode) { + case NVKM_CLK_BOOST_NONE: + if (clk->base_khz && freq > clk->base_khz) + return false; + fallthrough; + case NVKM_CLK_BOOST_BIOS: + if (clk->boost_khz && freq > clk->boost_khz) + return false; + } + } + domain++; + } + + if (!volt) + return true; + + voltage = nvkm_volt_map(volt, cstate->voltage, temp); + if (voltage < 0) + return false; + return voltage <= min(max_volt, volt->max_uv); +} + +static struct nvkm_cstate * +nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate, + struct nvkm_cstate *cstate) +{ + struct nvkm_device *device = clk->subdev.device; + struct nvkm_volt *volt = device->volt; + int max_volt; + + if (!pstate || !cstate) + return NULL; + + if (!volt) + return cstate; + + max_volt = volt->max_uv; + if (volt->max0_id != 0xff) + max_volt = min(max_volt, + nvkm_volt_map(volt, volt->max0_id, clk->temp)); + if (volt->max1_id != 0xff) + max_volt = min(max_volt, + nvkm_volt_map(volt, volt->max1_id, clk->temp)); + if (volt->max2_id != 0xff) + max_volt = min(max_volt, + nvkm_volt_map(volt, volt->max2_id, clk->temp)); + + list_for_each_entry_from_reverse(cstate, &pstate->list, head) { + if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp)) + return cstate; + } + + return NULL; +} + +static struct nvkm_cstate * +nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei) +{ + struct nvkm_cstate *cstate; + if (cstatei == NVKM_CLK_CSTATE_HIGHEST) + return list_last_entry(&pstate->list, typeof(*cstate), head); + else { + list_for_each_entry(cstate, &pstate->list, head) { + if (cstate->id == cstatei) + return cstate; + } + } + return NULL; +} + +static int +nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei) +{ + struct nvkm_subdev *subdev = &clk->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_therm *therm = device->therm; + struct nvkm_volt *volt = device->volt; + struct nvkm_cstate *cstate; + int ret; + + if (!list_empty(&pstate->list)) { + cstate = nvkm_cstate_get(clk, pstate, cstatei); + cstate = nvkm_cstate_find_best(clk, pstate, cstate); + if (!cstate) + return -EINVAL; + } else { + cstate = &pstate->base; + } + + if (therm) { + ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1); + if (ret && ret != -ENODEV) { + nvkm_error(subdev, "failed to raise fan speed: %d\n", ret); + return ret; + } + } + + if (volt) { + ret = nvkm_volt_set_id(volt, cstate->voltage, + pstate->base.voltage, clk->temp, +1); + if (ret && ret != -ENODEV) { + nvkm_error(subdev, "failed to raise voltage: %d\n", ret); + return ret; + } + } + + ret = clk->func->calc(clk, cstate); + if (ret == 0) { + ret = clk->func->prog(clk); + clk->func->tidy(clk); + } + + if (volt) { + ret = nvkm_volt_set_id(volt, cstate->voltage, + pstate->base.voltage, clk->temp, -1); + if (ret && ret != -ENODEV) + nvkm_error(subdev, "failed to lower voltage: %d\n", ret); + } + + if (therm) { + ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1); + if (ret && ret != -ENODEV) + nvkm_error(subdev, "failed to lower fan speed: %d\n", ret); + } + + return ret; +} + +static void +nvkm_cstate_del(struct nvkm_cstate *cstate) +{ + list_del(&cstate->head); + kfree(cstate); +} + +static int +nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate) +{ + struct nvkm_bios *bios = clk->subdev.device->bios; + struct nvkm_volt *volt = clk->subdev.device->volt; + const struct nvkm_domain *domain = clk->domains; + struct nvkm_cstate *cstate = NULL; + struct nvbios_cstepX cstepX; + u8 ver, hdr; + u32 data; + + data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX); + if (!data) + return -ENOENT; + + if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv) + return -EINVAL; + + cstate = kzalloc(sizeof(*cstate), GFP_KERNEL); + if (!cstate) + return -ENOMEM; + + *cstate = pstate->base; + cstate->voltage = cstepX.voltage; + cstate->id = idx; + + while (domain && domain->name != nv_clk_src_max) { + if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) { + u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate, + domain->bios, cstepX.freq); + cstate->domain[domain->name] = freq; + } + domain++; + } + + list_add(&cstate->head, &pstate->list); + return 0; +} + +/****************************************************************************** + * P-States + *****************************************************************************/ +static int +nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei) +{ + struct nvkm_subdev *subdev = &clk->subdev; + struct nvkm_fb *fb = subdev->device->fb; + struct nvkm_pci *pci = subdev->device->pci; + struct nvkm_pstate *pstate; + int ret, idx = 0; + + list_for_each_entry(pstate, &clk->states, head) { + if (idx++ == pstatei) + break; + } + + nvkm_debug(subdev, "setting performance state %d\n", pstatei); + clk->pstate = pstatei; + + nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width); + + if (fb && fb->ram && fb->ram->func->calc) { + struct nvkm_ram *ram = fb->ram; + int khz = pstate->base.domain[nv_clk_src_mem]; + do { + ret = ram->func->calc(ram, khz); + if (ret == 0) + ret = ram->func->prog(ram); + } while (ret > 0); + ram->func->tidy(ram); + } + + return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST); +} + +static void +nvkm_pstate_work(struct work_struct *work) +{ + struct nvkm_clk *clk = container_of(work, typeof(*clk), work); + struct nvkm_subdev *subdev = &clk->subdev; + int pstate; + + if (!atomic_xchg(&clk->waiting, 0)) + return; + clk->pwrsrc = power_supply_is_system_supplied(); + + nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n", + clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc, + clk->astate, clk->temp, clk->dstate); + + pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc; + if (clk->state_nr && pstate != -1) { + pstate = (pstate < 0) ? clk->astate : pstate; + pstate = min(pstate, clk->state_nr - 1); + pstate = max(pstate, clk->dstate); + } else { + pstate = clk->pstate = -1; + } + + nvkm_trace(subdev, "-> %d\n", pstate); + if (pstate != clk->pstate) { + int ret = nvkm_pstate_prog(clk, pstate); + if (ret) { + nvkm_error(subdev, "error setting pstate %d: %d\n", + pstate, ret); + } + } + + wake_up_all(&clk->wait); +} + +static int +nvkm_pstate_calc(struct nvkm_clk *clk, bool wait) +{ + atomic_set(&clk->waiting, 1); + schedule_work(&clk->work); + if (wait) + wait_event(clk->wait, !atomic_read(&clk->waiting)); + return 0; +} + +static void +nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate) +{ + const struct nvkm_domain *clock = clk->domains - 1; + struct nvkm_cstate *cstate; + struct nvkm_subdev *subdev = &clk->subdev; + char info[3][32] = { "", "", "" }; + char name[4] = "--"; + int i = -1; + + if (pstate->pstate != 0xff) + snprintf(name, sizeof(name), "%02x", pstate->pstate); + + while ((++clock)->name != nv_clk_src_max) { + u32 lo = pstate->base.domain[clock->name]; + u32 hi = lo; + if (hi == 0) + continue; + + nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo); + list_for_each_entry(cstate, &pstate->list, head) { + u32 freq = cstate->domain[clock->name]; + lo = min(lo, freq); + hi = max(hi, freq); + nvkm_debug(subdev, "%10d KHz\n", freq); + } + + if (clock->mname && ++i < ARRAY_SIZE(info)) { + lo /= clock->mdiv; + hi /= clock->mdiv; + if (lo == hi) { + snprintf(info[i], sizeof(info[i]), "%s %d MHz", + clock->mname, lo); + } else { + snprintf(info[i], sizeof(info[i]), + "%s %d-%d MHz", clock->mname, lo, hi); + } + } + } + + nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]); +} + +static void +nvkm_pstate_del(struct nvkm_pstate *pstate) +{ + struct nvkm_cstate *cstate, *temp; + + list_for_each_entry_safe(cstate, temp, &pstate->list, head) { + nvkm_cstate_del(cstate); + } + + list_del(&pstate->head); + kfree(pstate); +} + +static int +nvkm_pstate_new(struct nvkm_clk *clk, int idx) +{ + struct nvkm_bios *bios = clk->subdev.device->bios; + const struct nvkm_domain *domain = clk->domains - 1; + struct nvkm_pstate *pstate; + struct nvkm_cstate *cstate; + struct nvbios_cstepE cstepE; + struct nvbios_perfE perfE; + u8 ver, hdr, cnt, len; + u32 data; + + data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE); + if (!data) + return -EINVAL; + if (perfE.pstate == 0xff) + return 0; + + pstate = kzalloc(sizeof(*pstate), GFP_KERNEL); + cstate = &pstate->base; + if (!pstate) + return -ENOMEM; + + INIT_LIST_HEAD(&pstate->list); + + pstate->pstate = perfE.pstate; + pstate->fanspeed = perfE.fanspeed; + pstate->pcie_speed = perfE.pcie_speed; + pstate->pcie_width = perfE.pcie_width; + cstate->voltage = perfE.voltage; + cstate->domain[nv_clk_src_core] = perfE.core; + cstate->domain[nv_clk_src_shader] = perfE.shader; + cstate->domain[nv_clk_src_mem] = perfE.memory; + cstate->domain[nv_clk_src_vdec] = perfE.vdec; + cstate->domain[nv_clk_src_dom6] = perfE.disp; + + while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) { + struct nvbios_perfS perfS; + u8 sver = ver, shdr = hdr; + u32 perfSe = nvbios_perfSp(bios, data, domain->bios, + &sver, &shdr, cnt, len, &perfS); + if (perfSe == 0 || sver != 0x40) + continue; + + if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) { + perfS.v40.freq = nvkm_clk_adjust(clk, false, + pstate->pstate, + domain->bios, + perfS.v40.freq); + } + + cstate->domain[domain->name] = perfS.v40.freq; + } + + data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE); + if (data) { + int idx = cstepE.index; + do { + nvkm_cstate_new(clk, idx, pstate); + } while(idx--); + } + + nvkm_pstate_info(clk, pstate); + list_add_tail(&pstate->head, &clk->states); + clk->state_nr++; + return 0; +} + +/****************************************************************************** + * Adjustment triggers + *****************************************************************************/ +static int +nvkm_clk_ustate_update(struct nvkm_clk *clk, int req) +{ + struct nvkm_pstate *pstate; + int i = 0; + + if (!clk->allow_reclock) + return -ENOSYS; + + if (req != -1 && req != -2) { + list_for_each_entry(pstate, &clk->states, head) { + if (pstate->pstate == req) + break; + i++; + } + + if (pstate->pstate != req) + return -EINVAL; + req = i; + } + + return req + 2; +} + +static int +nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen) +{ + int ret = 1; + + if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen)) + return -2; + + if (strncasecmpz(mode, "disabled", arglen)) { + char save = mode[arglen]; + long v; + + ((char *)mode)[arglen] = '\0'; + if (!kstrtol(mode, 0, &v)) { + ret = nvkm_clk_ustate_update(clk, v); + if (ret < 0) + ret = 1; + } + ((char *)mode)[arglen] = save; + } + + return ret - 2; +} + +int +nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr) +{ + int ret = nvkm_clk_ustate_update(clk, req); + if (ret >= 0) { + if (ret -= 2, pwr) clk->ustate_ac = ret; + else clk->ustate_dc = ret; + return nvkm_pstate_calc(clk, true); + } + return ret; +} + +int +nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait) +{ + if (!rel) clk->astate = req; + if ( rel) clk->astate += rel; + clk->astate = min(clk->astate, clk->state_nr - 1); + clk->astate = max(clk->astate, 0); + return nvkm_pstate_calc(clk, wait); +} + +int +nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp) +{ + if (clk->temp == temp) + return 0; + clk->temp = temp; + return nvkm_pstate_calc(clk, false); +} + +int +nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel) +{ + if (!rel) clk->dstate = req; + if ( rel) clk->dstate += rel; + clk->dstate = min(clk->dstate, clk->state_nr - 1); + clk->dstate = max(clk->dstate, 0); + return nvkm_pstate_calc(clk, true); +} + +int +nvkm_clk_pwrsrc(struct nvkm_device *device) +{ + if (device->clk) + return nvkm_pstate_calc(device->clk, false); + return 0; +} + +/****************************************************************************** + * subdev base class implementation + *****************************************************************************/ + +int +nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src) +{ + return clk->func->read(clk, src); +} + +static int +nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend) +{ + struct nvkm_clk *clk = nvkm_clk(subdev); + flush_work(&clk->work); + if (clk->func->fini) + clk->func->fini(clk); + return 0; +} + +static int +nvkm_clk_init(struct nvkm_subdev *subdev) +{ + struct nvkm_clk *clk = nvkm_clk(subdev); + const struct nvkm_domain *clock = clk->domains; + int ret; + + memset(&clk->bstate, 0x00, sizeof(clk->bstate)); + INIT_LIST_HEAD(&clk->bstate.list); + clk->bstate.pstate = 0xff; + + while (clock->name != nv_clk_src_max) { + ret = nvkm_clk_read(clk, clock->name); + if (ret < 0) { + nvkm_error(subdev, "%02x freq unknown\n", clock->name); + return ret; + } + clk->bstate.base.domain[clock->name] = ret; + clock++; + } + + nvkm_pstate_info(clk, &clk->bstate); + + if (clk->func->init) + return clk->func->init(clk); + + clk->astate = clk->state_nr - 1; + clk->dstate = 0; + clk->pstate = -1; + clk->temp = 90; /* reasonable default value */ + nvkm_pstate_calc(clk, true); + return 0; +} + +static void * +nvkm_clk_dtor(struct nvkm_subdev *subdev) +{ + struct nvkm_clk *clk = nvkm_clk(subdev); + struct nvkm_pstate *pstate, *temp; + + /* Early return if the pstates have been provided statically */ + if (clk->func->pstates) + return clk; + + list_for_each_entry_safe(pstate, temp, &clk->states, head) { + nvkm_pstate_del(pstate); + } + + return clk; +} + +static const struct nvkm_subdev_func +nvkm_clk = { + .dtor = nvkm_clk_dtor, + .init = nvkm_clk_init, + .fini = nvkm_clk_fini, +}; + +int +nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->subdev; + struct nvkm_bios *bios = device->bios; + int ret, idx, arglen; + const char *mode; + struct nvbios_vpstate_header h; + + nvkm_subdev_ctor(&nvkm_clk, device, type, inst, subdev); + + if (bios && !nvbios_vpstate_parse(bios, &h)) { + struct nvbios_vpstate_entry base, boost; + if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost)) + clk->boost_khz = boost.clock_mhz * 1000; + if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base)) + clk->base_khz = base.clock_mhz * 1000; + } + + clk->func = func; + INIT_LIST_HEAD(&clk->states); + clk->domains = func->domains; + clk->ustate_ac = -1; + clk->ustate_dc = -1; + clk->allow_reclock = allow_reclock; + + INIT_WORK(&clk->work, nvkm_pstate_work); + init_waitqueue_head(&clk->wait); + atomic_set(&clk->waiting, 0); + + /* If no pstates are provided, try and fetch them from the BIOS */ + if (!func->pstates) { + idx = 0; + do { + ret = nvkm_pstate_new(clk, idx++); + } while (ret == 0); + } else { + for (idx = 0; idx < func->nr_pstates; idx++) + list_add_tail(&func->pstates[idx].head, &clk->states); + clk->state_nr = func->nr_pstates; + } + + mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen); + if (mode) { + clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen); + clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen); + } + + mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen); + if (mode) + clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen); + + mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen); + if (mode) + clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen); + + clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost", + NVKM_CLK_BOOST_NONE); + return 0; +} + +int +nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk) +{ + if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL))) + return -ENOMEM; + return nvkm_clk_ctor(func, device, type, inst, allow_reclock, *pclk); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c new file mode 100644 index 000000000..07157cf53 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c @@ -0,0 +1,48 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "nv50.h" + +static const struct nvkm_clk_func +g84_clk = { + .read = nv50_clk_read, + .calc = nv50_clk_calc, + .prog = nv50_clk_prog, + .tidy = nv50_clk_tidy, + .domains = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_href , 0xff }, + { nv_clk_src_core , 0xff, 0, "core", 1000 }, + { nv_clk_src_shader , 0xff, 0, "shader", 1000 }, + { nv_clk_src_mem , 0xff, 0, "memory", 1000 }, + { nv_clk_src_vdec , 0xff }, + { nv_clk_src_max } + } +}; + +int +g84_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + return nv50_clk_new_(&g84_clk, device, type, inst, (device->chipset >= 0x94), pclk); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c new file mode 100644 index 000000000..6eea11aef --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c @@ -0,0 +1,481 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#define gf100_clk(p) container_of((p), struct gf100_clk, base) +#include "priv.h" +#include "pll.h" + +#include +#include +#include + +struct gf100_clk_info { + u32 freq; + u32 ssel; + u32 mdiv; + u32 dsrc; + u32 ddiv; + u32 coef; +}; + +struct gf100_clk { + struct nvkm_clk base; + struct gf100_clk_info eng[16]; +}; + +static u32 read_div(struct gf100_clk *, int, u32, u32); + +static u32 +read_vco(struct gf100_clk *clk, u32 dsrc) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 ssrc = nvkm_rd32(device, dsrc); + if (!(ssrc & 0x00000100)) + return nvkm_clk_read(&clk->base, nv_clk_src_sppll0); + return nvkm_clk_read(&clk->base, nv_clk_src_sppll1); +} + +static u32 +read_pll(struct gf100_clk *clk, u32 pll) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 ctrl = nvkm_rd32(device, pll + 0x00); + u32 coef = nvkm_rd32(device, pll + 0x04); + u32 P = (coef & 0x003f0000) >> 16; + u32 N = (coef & 0x0000ff00) >> 8; + u32 M = (coef & 0x000000ff) >> 0; + u32 sclk; + + if (!(ctrl & 0x00000001)) + return 0; + + switch (pll) { + case 0x00e800: + case 0x00e820: + sclk = device->crystal; + P = 1; + break; + case 0x132000: + sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrc); + break; + case 0x132020: + sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrcref); + break; + case 0x137000: + case 0x137020: + case 0x137040: + case 0x1370e0: + sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140); + break; + default: + return 0; + } + + return sclk * N / M / P; +} + +static u32 +read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4)); + u32 sclk, sctl, sdiv = 2; + + switch (ssrc & 0x00000003) { + case 0: + if ((ssrc & 0x00030000) != 0x00030000) + return device->crystal; + return 108000; + case 2: + return 100000; + case 3: + sclk = read_vco(clk, dsrc + (doff * 4)); + + /* Memclk has doff of 0 despite its alt. location */ + if (doff <= 2) { + sctl = nvkm_rd32(device, dctl + (doff * 4)); + + if (sctl & 0x80000000) { + if (ssrc & 0x100) + sctl >>= 8; + + sdiv = (sctl & 0x3f) + 2; + } + } + + return (sclk * 2) / sdiv; + default: + return 0; + } +} + +static u32 +read_clk(struct gf100_clk *clk, int idx) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4)); + u32 ssel = nvkm_rd32(device, 0x137100); + u32 sclk, sdiv; + + if (ssel & (1 << idx)) { + if (idx < 7) + sclk = read_pll(clk, 0x137000 + (idx * 0x20)); + else + sclk = read_pll(clk, 0x1370e0); + sdiv = ((sctl & 0x00003f00) >> 8) + 2; + } else { + sclk = read_div(clk, idx, 0x137160, 0x1371d0); + sdiv = ((sctl & 0x0000003f) >> 0) + 2; + } + + if (sctl & 0x80000000) + return (sclk * 2) / sdiv; + + return sclk; +} + +static int +gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src) +{ + struct gf100_clk *clk = gf100_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + + switch (src) { + case nv_clk_src_crystal: + return device->crystal; + case nv_clk_src_href: + return 100000; + case nv_clk_src_sppll0: + return read_pll(clk, 0x00e800); + case nv_clk_src_sppll1: + return read_pll(clk, 0x00e820); + + case nv_clk_src_mpllsrcref: + return read_div(clk, 0, 0x137320, 0x137330); + case nv_clk_src_mpllsrc: + return read_pll(clk, 0x132020); + case nv_clk_src_mpll: + return read_pll(clk, 0x132000); + case nv_clk_src_mdiv: + return read_div(clk, 0, 0x137300, 0x137310); + case nv_clk_src_mem: + if (nvkm_rd32(device, 0x1373f0) & 0x00000002) + return nvkm_clk_read(&clk->base, nv_clk_src_mpll); + return nvkm_clk_read(&clk->base, nv_clk_src_mdiv); + + case nv_clk_src_gpc: + return read_clk(clk, 0x00); + case nv_clk_src_rop: + return read_clk(clk, 0x01); + case nv_clk_src_hubk07: + return read_clk(clk, 0x02); + case nv_clk_src_hubk06: + return read_clk(clk, 0x07); + case nv_clk_src_hubk01: + return read_clk(clk, 0x08); + case nv_clk_src_copy: + return read_clk(clk, 0x09); + case nv_clk_src_pmu: + return read_clk(clk, 0x0c); + case nv_clk_src_vdec: + return read_clk(clk, 0x0e); + default: + nvkm_error(subdev, "invalid clock source %d\n", src); + return -EINVAL; + } +} + +static u32 +calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv) +{ + u32 div = min((ref * 2) / freq, (u32)65); + if (div < 2) + div = 2; + + *ddiv = div - 2; + return (ref * 2) / div; +} + +static u32 +calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv) +{ + u32 sclk; + + /* use one of the fixed frequencies if possible */ + *ddiv = 0x00000000; + switch (freq) { + case 27000: + case 108000: + *dsrc = 0x00000000; + if (freq == 108000) + *dsrc |= 0x00030000; + return freq; + case 100000: + *dsrc = 0x00000002; + return freq; + default: + *dsrc = 0x00000003; + break; + } + + /* otherwise, calculate the closest divider */ + sclk = read_vco(clk, 0x137160 + (idx * 4)); + if (idx < 7) + sclk = calc_div(clk, idx, sclk, freq, ddiv); + return sclk; +} + +static u32 +calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_bios *bios = subdev->device->bios; + struct nvbios_pll limits; + int N, M, P, ret; + + ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits); + if (ret) + return 0; + + limits.refclk = read_div(clk, idx, 0x137120, 0x137140); + if (!limits.refclk) + return 0; + + ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P); + if (ret <= 0) + return 0; + + *coef = (P << 16) | (N << 8) | M; + return ret; +} + +static int +calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom) +{ + struct gf100_clk_info *info = &clk->eng[idx]; + u32 freq = cstate->domain[dom]; + u32 src0, div0, div1D, div1P = 0; + u32 clk0, clk1 = 0; + + /* invalid clock domain */ + if (!freq) + return 0; + + /* first possible path, using only dividers */ + clk0 = calc_src(clk, idx, freq, &src0, &div0); + clk0 = calc_div(clk, idx, clk0, freq, &div1D); + + /* see if we can get any closer using PLLs */ + if (clk0 != freq && (0x00004387 & (1 << idx))) { + if (idx <= 7) + clk1 = calc_pll(clk, idx, freq, &info->coef); + else + clk1 = cstate->domain[nv_clk_src_hubk06]; + clk1 = calc_div(clk, idx, clk1, freq, &div1P); + } + + /* select the method which gets closest to target freq */ + if (abs((int)freq - clk0) <= abs((int)freq - clk1)) { + info->dsrc = src0; + if (div0) { + info->ddiv |= 0x80000000; + info->ddiv |= div0 << 8; + info->ddiv |= div0; + } + if (div1D) { + info->mdiv |= 0x80000000; + info->mdiv |= div1D; + } + info->ssel = info->coef = 0; + info->freq = clk0; + } else { + if (div1P) { + info->mdiv |= 0x80000000; + info->mdiv |= div1P << 8; + } + info->ssel = (1 << idx); + info->freq = clk1; + } + + return 0; +} + +static int +gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) +{ + struct gf100_clk *clk = gf100_clk(base); + int ret; + + if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) || + (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) || + (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) || + (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) || + (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) || + (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) || + (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_pmu)) || + (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec))) + return ret; + + return 0; +} + +static void +gf100_clk_prog_0(struct gf100_clk *clk, int idx) +{ + struct gf100_clk_info *info = &clk->eng[idx]; + struct nvkm_device *device = clk->base.subdev.device; + if (idx < 7 && !info->ssel) { + nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv); + nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc); + } +} + +static void +gf100_clk_prog_1(struct gf100_clk *clk, int idx) +{ + struct nvkm_device *device = clk->base.subdev.device; + nvkm_mask(device, 0x137100, (1 << idx), 0x00000000); + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x137100) & (1 << idx))) + break; + ); +} + +static void +gf100_clk_prog_2(struct gf100_clk *clk, int idx) +{ + struct gf100_clk_info *info = &clk->eng[idx]; + struct nvkm_device *device = clk->base.subdev.device; + const u32 addr = 0x137000 + (idx * 0x20); + if (idx <= 7) { + nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000); + nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000); + if (info->coef) { + nvkm_wr32(device, addr + 0x04, info->coef); + nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001); + + /* Test PLL lock */ + nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000); + nvkm_msec(device, 2000, + if (nvkm_rd32(device, addr + 0x00) & 0x00020000) + break; + ); + nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010); + + /* Enable sync mode */ + nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004); + } + } +} + +static void +gf100_clk_prog_3(struct gf100_clk *clk, int idx) +{ + struct gf100_clk_info *info = &clk->eng[idx]; + struct nvkm_device *device = clk->base.subdev.device; + if (info->ssel) { + nvkm_mask(device, 0x137100, (1 << idx), info->ssel); + nvkm_msec(device, 2000, + u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx); + if (tmp == info->ssel) + break; + ); + } +} + +static void +gf100_clk_prog_4(struct gf100_clk *clk, int idx) +{ + struct gf100_clk_info *info = &clk->eng[idx]; + struct nvkm_device *device = clk->base.subdev.device; + nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv); +} + +static int +gf100_clk_prog(struct nvkm_clk *base) +{ + struct gf100_clk *clk = gf100_clk(base); + struct { + void (*exec)(struct gf100_clk *, int); + } stage[] = { + { gf100_clk_prog_0 }, /* div programming */ + { gf100_clk_prog_1 }, /* select div mode */ + { gf100_clk_prog_2 }, /* (maybe) program pll */ + { gf100_clk_prog_3 }, /* (maybe) select pll mode */ + { gf100_clk_prog_4 }, /* final divider */ + }; + int i, j; + + for (i = 0; i < ARRAY_SIZE(stage); i++) { + for (j = 0; j < ARRAY_SIZE(clk->eng); j++) { + if (!clk->eng[j].freq) + continue; + stage[i].exec(clk, j); + } + } + + return 0; +} + +static void +gf100_clk_tidy(struct nvkm_clk *base) +{ + struct gf100_clk *clk = gf100_clk(base); + memset(clk->eng, 0x00, sizeof(clk->eng)); +} + +static const struct nvkm_clk_func +gf100_clk = { + .read = gf100_clk_read, + .calc = gf100_clk_calc, + .prog = gf100_clk_prog, + .tidy = gf100_clk_tidy, + .domains = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_href , 0xff }, + { nv_clk_src_hubk06 , 0x00 }, + { nv_clk_src_hubk01 , 0x01 }, + { nv_clk_src_copy , 0x02 }, + { nv_clk_src_gpc , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 }, + { nv_clk_src_rop , 0x04 }, + { nv_clk_src_mem , 0x05, 0, "memory", 1000 }, + { nv_clk_src_vdec , 0x06 }, + { nv_clk_src_pmu , 0x0a }, + { nv_clk_src_hubk07 , 0x0b }, + { nv_clk_src_max } + } +}; + +int +gf100_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + struct gf100_clk *clk; + + if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL))) + return -ENOMEM; + *pclk = &clk->base; + + return nvkm_clk_ctor(&gf100_clk, device, type, inst, false, &clk->base); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c new file mode 100644 index 000000000..0d8e2ddcc --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c @@ -0,0 +1,517 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#define gk104_clk(p) container_of((p), struct gk104_clk, base) +#include "priv.h" +#include "pll.h" + +#include +#include +#include + +struct gk104_clk_info { + u32 freq; + u32 ssel; + u32 mdiv; + u32 dsrc; + u32 ddiv; + u32 coef; +}; + +struct gk104_clk { + struct nvkm_clk base; + struct gk104_clk_info eng[16]; +}; + +static u32 read_div(struct gk104_clk *, int, u32, u32); +static u32 read_pll(struct gk104_clk *, u32); + +static u32 +read_vco(struct gk104_clk *clk, u32 dsrc) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 ssrc = nvkm_rd32(device, dsrc); + if (!(ssrc & 0x00000100)) + return read_pll(clk, 0x00e800); + return read_pll(clk, 0x00e820); +} + +static u32 +read_pll(struct gk104_clk *clk, u32 pll) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 ctrl = nvkm_rd32(device, pll + 0x00); + u32 coef = nvkm_rd32(device, pll + 0x04); + u32 P = (coef & 0x003f0000) >> 16; + u32 N = (coef & 0x0000ff00) >> 8; + u32 M = (coef & 0x000000ff) >> 0; + u32 sclk; + u16 fN = 0xf000; + + if (!(ctrl & 0x00000001)) + return 0; + + switch (pll) { + case 0x00e800: + case 0x00e820: + sclk = device->crystal; + P = 1; + break; + case 0x132000: + sclk = read_pll(clk, 0x132020); + P = (coef & 0x10000000) ? 2 : 1; + break; + case 0x132020: + sclk = read_div(clk, 0, 0x137320, 0x137330); + fN = nvkm_rd32(device, pll + 0x10) >> 16; + break; + case 0x137000: + case 0x137020: + case 0x137040: + case 0x1370e0: + sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140); + break; + default: + return 0; + } + + if (P == 0) + P = 1; + + sclk = (sclk * N) + (((u16)(fN + 4096) * sclk) >> 13); + return sclk / (M * P); +} + +static u32 +read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4)); + u32 sctl = nvkm_rd32(device, dctl + (doff * 4)); + + switch (ssrc & 0x00000003) { + case 0: + if ((ssrc & 0x00030000) != 0x00030000) + return device->crystal; + return 108000; + case 2: + return 100000; + case 3: + if (sctl & 0x80000000) { + u32 sclk = read_vco(clk, dsrc + (doff * 4)); + u32 sdiv = (sctl & 0x0000003f) + 2; + return (sclk * 2) / sdiv; + } + + return read_vco(clk, dsrc + (doff * 4)); + default: + return 0; + } +} + +static u32 +read_mem(struct gk104_clk *clk) +{ + struct nvkm_device *device = clk->base.subdev.device; + switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) { + case 1: return read_pll(clk, 0x132020); + case 2: return read_pll(clk, 0x132000); + default: + return 0; + } +} + +static u32 +read_clk(struct gk104_clk *clk, int idx) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4)); + u32 sclk, sdiv; + + if (idx < 7) { + u32 ssel = nvkm_rd32(device, 0x137100); + if (ssel & (1 << idx)) { + sclk = read_pll(clk, 0x137000 + (idx * 0x20)); + sdiv = 1; + } else { + sclk = read_div(clk, idx, 0x137160, 0x1371d0); + sdiv = 0; + } + } else { + u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04)); + if ((ssrc & 0x00000003) == 0x00000003) { + sclk = read_div(clk, idx, 0x137160, 0x1371d0); + if (ssrc & 0x00000100) { + if (ssrc & 0x40000000) + sclk = read_pll(clk, 0x1370e0); + sdiv = 1; + } else { + sdiv = 0; + } + } else { + sclk = read_div(clk, idx, 0x137160, 0x1371d0); + sdiv = 0; + } + } + + if (sctl & 0x80000000) { + if (sdiv) + sdiv = ((sctl & 0x00003f00) >> 8) + 2; + else + sdiv = ((sctl & 0x0000003f) >> 0) + 2; + return (sclk * 2) / sdiv; + } + + return sclk; +} + +static int +gk104_clk_read(struct nvkm_clk *base, enum nv_clk_src src) +{ + struct gk104_clk *clk = gk104_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + + switch (src) { + case nv_clk_src_crystal: + return device->crystal; + case nv_clk_src_href: + return 100000; + case nv_clk_src_mem: + return read_mem(clk); + case nv_clk_src_gpc: + return read_clk(clk, 0x00); + case nv_clk_src_rop: + return read_clk(clk, 0x01); + case nv_clk_src_hubk07: + return read_clk(clk, 0x02); + case nv_clk_src_hubk06: + return read_clk(clk, 0x07); + case nv_clk_src_hubk01: + return read_clk(clk, 0x08); + case nv_clk_src_pmu: + return read_clk(clk, 0x0c); + case nv_clk_src_vdec: + return read_clk(clk, 0x0e); + default: + nvkm_error(subdev, "invalid clock source %d\n", src); + return -EINVAL; + } +} + +static u32 +calc_div(struct gk104_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv) +{ + u32 div = min((ref * 2) / freq, (u32)65); + if (div < 2) + div = 2; + + *ddiv = div - 2; + return (ref * 2) / div; +} + +static u32 +calc_src(struct gk104_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv) +{ + u32 sclk; + + /* use one of the fixed frequencies if possible */ + *ddiv = 0x00000000; + switch (freq) { + case 27000: + case 108000: + *dsrc = 0x00000000; + if (freq == 108000) + *dsrc |= 0x00030000; + return freq; + case 100000: + *dsrc = 0x00000002; + return freq; + default: + *dsrc = 0x00000003; + break; + } + + /* otherwise, calculate the closest divider */ + sclk = read_vco(clk, 0x137160 + (idx * 4)); + if (idx < 7) + sclk = calc_div(clk, idx, sclk, freq, ddiv); + return sclk; +} + +static u32 +calc_pll(struct gk104_clk *clk, int idx, u32 freq, u32 *coef) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_bios *bios = subdev->device->bios; + struct nvbios_pll limits; + int N, M, P, ret; + + ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits); + if (ret) + return 0; + + limits.refclk = read_div(clk, idx, 0x137120, 0x137140); + if (!limits.refclk) + return 0; + + ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P); + if (ret <= 0) + return 0; + + *coef = (P << 16) | (N << 8) | M; + return ret; +} + +static int +calc_clk(struct gk104_clk *clk, + struct nvkm_cstate *cstate, int idx, int dom) +{ + struct gk104_clk_info *info = &clk->eng[idx]; + u32 freq = cstate->domain[dom]; + u32 src0, div0, div1D, div1P = 0; + u32 clk0, clk1 = 0; + + /* invalid clock domain */ + if (!freq) + return 0; + + /* first possible path, using only dividers */ + clk0 = calc_src(clk, idx, freq, &src0, &div0); + clk0 = calc_div(clk, idx, clk0, freq, &div1D); + + /* see if we can get any closer using PLLs */ + if (clk0 != freq && (0x0000ff87 & (1 << idx))) { + if (idx <= 7) + clk1 = calc_pll(clk, idx, freq, &info->coef); + else + clk1 = cstate->domain[nv_clk_src_hubk06]; + clk1 = calc_div(clk, idx, clk1, freq, &div1P); + } + + /* select the method which gets closest to target freq */ + if (abs((int)freq - clk0) <= abs((int)freq - clk1)) { + info->dsrc = src0; + if (div0) { + info->ddiv |= 0x80000000; + info->ddiv |= div0; + } + if (div1D) { + info->mdiv |= 0x80000000; + info->mdiv |= div1D; + } + info->ssel = 0; + info->freq = clk0; + } else { + if (div1P) { + info->mdiv |= 0x80000000; + info->mdiv |= div1P << 8; + } + info->ssel = (1 << idx); + info->dsrc = 0x40000100; + info->freq = clk1; + } + + return 0; +} + +static int +gk104_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) +{ + struct gk104_clk *clk = gk104_clk(base); + int ret; + + if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) || + (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) || + (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) || + (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) || + (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) || + (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_pmu)) || + (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec))) + return ret; + + return 0; +} + +static void +gk104_clk_prog_0(struct gk104_clk *clk, int idx) +{ + struct gk104_clk_info *info = &clk->eng[idx]; + struct nvkm_device *device = clk->base.subdev.device; + if (!info->ssel) { + nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv); + nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc); + } +} + +static void +gk104_clk_prog_1_0(struct gk104_clk *clk, int idx) +{ + struct nvkm_device *device = clk->base.subdev.device; + nvkm_mask(device, 0x137100, (1 << idx), 0x00000000); + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x137100) & (1 << idx))) + break; + ); +} + +static void +gk104_clk_prog_1_1(struct gk104_clk *clk, int idx) +{ + struct nvkm_device *device = clk->base.subdev.device; + nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000); +} + +static void +gk104_clk_prog_2(struct gk104_clk *clk, int idx) +{ + struct gk104_clk_info *info = &clk->eng[idx]; + struct nvkm_device *device = clk->base.subdev.device; + const u32 addr = 0x137000 + (idx * 0x20); + nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000); + nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000); + if (info->coef) { + nvkm_wr32(device, addr + 0x04, info->coef); + nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001); + + /* Test PLL lock */ + nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000); + nvkm_msec(device, 2000, + if (nvkm_rd32(device, addr + 0x00) & 0x00020000) + break; + ); + nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010); + + /* Enable sync mode */ + nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004); + } +} + +static void +gk104_clk_prog_3(struct gk104_clk *clk, int idx) +{ + struct gk104_clk_info *info = &clk->eng[idx]; + struct nvkm_device *device = clk->base.subdev.device; + if (info->ssel) + nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv); + else + nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv); +} + +static void +gk104_clk_prog_4_0(struct gk104_clk *clk, int idx) +{ + struct gk104_clk_info *info = &clk->eng[idx]; + struct nvkm_device *device = clk->base.subdev.device; + if (info->ssel) { + nvkm_mask(device, 0x137100, (1 << idx), info->ssel); + nvkm_msec(device, 2000, + u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx); + if (tmp == info->ssel) + break; + ); + } +} + +static void +gk104_clk_prog_4_1(struct gk104_clk *clk, int idx) +{ + struct gk104_clk_info *info = &clk->eng[idx]; + struct nvkm_device *device = clk->base.subdev.device; + if (info->ssel) { + nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000); + nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100); + } +} + +static int +gk104_clk_prog(struct nvkm_clk *base) +{ + struct gk104_clk *clk = gk104_clk(base); + struct { + u32 mask; + void (*exec)(struct gk104_clk *, int); + } stage[] = { + { 0x007f, gk104_clk_prog_0 }, /* div programming */ + { 0x007f, gk104_clk_prog_1_0 }, /* select div mode */ + { 0xff80, gk104_clk_prog_1_1 }, + { 0x00ff, gk104_clk_prog_2 }, /* (maybe) program pll */ + { 0xff80, gk104_clk_prog_3 }, /* final divider */ + { 0x007f, gk104_clk_prog_4_0 }, /* (maybe) select pll mode */ + { 0xff80, gk104_clk_prog_4_1 }, + }; + int i, j; + + for (i = 0; i < ARRAY_SIZE(stage); i++) { + for (j = 0; j < ARRAY_SIZE(clk->eng); j++) { + if (!(stage[i].mask & (1 << j))) + continue; + if (!clk->eng[j].freq) + continue; + stage[i].exec(clk, j); + } + } + + return 0; +} + +static void +gk104_clk_tidy(struct nvkm_clk *base) +{ + struct gk104_clk *clk = gk104_clk(base); + memset(clk->eng, 0x00, sizeof(clk->eng)); +} + +static const struct nvkm_clk_func +gk104_clk = { + .read = gk104_clk_read, + .calc = gk104_clk_calc, + .prog = gk104_clk_prog, + .tidy = gk104_clk_tidy, + .domains = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_href , 0xff }, + { nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE | NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 }, + { nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE }, + { nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE }, + { nv_clk_src_mem , 0x03, 0, "memory", 500 }, + { nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE }, + { nv_clk_src_hubk01 , 0x05 }, + { nv_clk_src_vdec , 0x06 }, + { nv_clk_src_pmu , 0x07 }, + { nv_clk_src_max } + } +}; + +int +gk104_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + struct gk104_clk *clk; + + if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL))) + return -ENOMEM; + *pclk = &clk->base; + + return nvkm_clk_ctor(&gk104_clk, device, type, inst, true, &clk->base); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c new file mode 100644 index 000000000..d573fb091 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c @@ -0,0 +1,657 @@ +/* + * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c + * + */ +#include "priv.h" +#include "gk20a.h" + +#include +#include + +static const u8 _pl_to_div[] = { +/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ +/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32, +}; + +static u32 pl_to_div(u32 pl) +{ + if (pl >= ARRAY_SIZE(_pl_to_div)) + return 1; + + return _pl_to_div[pl]; +} + +static u32 div_to_pl(u32 div) +{ + u32 pl; + + for (pl = 0; pl < ARRAY_SIZE(_pl_to_div) - 1; pl++) { + if (_pl_to_div[pl] >= div) + return pl; + } + + return ARRAY_SIZE(_pl_to_div) - 1; +} + +static const struct gk20a_clk_pllg_params gk20a_pllg_params = { + .min_vco = 1000000, .max_vco = 2064000, + .min_u = 12000, .max_u = 38000, + .min_m = 1, .max_m = 255, + .min_n = 8, .max_n = 255, + .min_pl = 1, .max_pl = 32, +}; + +void +gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 val; + + val = nvkm_rd32(device, GPCPLL_COEFF); + pll->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); + pll->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH); + pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); +} + +void +gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 val; + + val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT; + val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT; + val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT; + nvkm_wr32(device, GPCPLL_COEFF, val); +} + +u32 +gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll) +{ + u32 rate; + u32 divider; + + rate = clk->parent_rate * pll->n; + divider = pll->m * clk->pl_to_div(pll->pl); + + return rate / divider / 2; +} + +int +gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate, + struct gk20a_pll *pll) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + u32 target_clk_f, ref_clk_f, target_freq; + u32 min_vco_f, max_vco_f; + u32 low_pl, high_pl, best_pl; + u32 target_vco_f; + u32 best_m, best_n; + u32 best_delta = ~0; + u32 pl; + + target_clk_f = rate * 2 / KHZ; + ref_clk_f = clk->parent_rate / KHZ; + + target_vco_f = target_clk_f + target_clk_f / 50; + max_vco_f = max(clk->params->max_vco, target_vco_f); + min_vco_f = clk->params->min_vco; + best_m = clk->params->max_m; + best_n = clk->params->min_n; + best_pl = clk->params->min_pl; + + /* min_pl <= high_pl <= max_pl */ + high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f; + high_pl = min(high_pl, clk->params->max_pl); + high_pl = max(high_pl, clk->params->min_pl); + high_pl = clk->div_to_pl(high_pl); + + /* min_pl <= low_pl <= max_pl */ + low_pl = min_vco_f / target_vco_f; + low_pl = min(low_pl, clk->params->max_pl); + low_pl = max(low_pl, clk->params->min_pl); + low_pl = clk->div_to_pl(low_pl); + + nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl, + clk->pl_to_div(low_pl), high_pl, clk->pl_to_div(high_pl)); + + /* Select lowest possible VCO */ + for (pl = low_pl; pl <= high_pl; pl++) { + u32 m, n, n2; + + target_vco_f = target_clk_f * clk->pl_to_div(pl); + + for (m = clk->params->min_m; m <= clk->params->max_m; m++) { + u32 u_f = ref_clk_f / m; + + if (u_f < clk->params->min_u) + break; + if (u_f > clk->params->max_u) + continue; + + n = (target_vco_f * m) / ref_clk_f; + n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f; + + if (n > clk->params->max_n) + break; + + for (; n <= n2; n++) { + u32 vco_f; + + if (n < clk->params->min_n) + continue; + if (n > clk->params->max_n) + break; + + vco_f = ref_clk_f * n / m; + + if (vco_f >= min_vco_f && vco_f <= max_vco_f) { + u32 delta, lwv; + + lwv = (vco_f + (clk->pl_to_div(pl) / 2)) + / clk->pl_to_div(pl); + delta = abs(lwv - target_clk_f); + + if (delta < best_delta) { + best_delta = delta; + best_m = m; + best_n = n; + best_pl = pl; + + if (best_delta == 0) + goto found_match; + } + } + } + } + } + +found_match: + WARN_ON(best_delta == ~0); + + if (best_delta != 0) + nvkm_debug(subdev, + "no best match for target @ %dMHz on gpc_pll", + target_clk_f / KHZ); + + pll->m = best_m; + pll->n = best_n; + pll->pl = best_pl; + + target_freq = gk20a_pllg_calc_rate(clk, pll); + + nvkm_debug(subdev, + "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n", + target_freq / KHZ, pll->m, pll->n, pll->pl, + clk->pl_to_div(pll->pl)); + return 0; +} + +static int +gk20a_pllg_slide(struct gk20a_clk *clk, u32 n) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + struct gk20a_pll pll; + int ret = 0; + + /* get old coefficients */ + gk20a_pllg_read_mnp(clk, &pll); + /* do nothing if NDIV is the same */ + if (n == pll.n) + return 0; + + /* pll slowdown mode */ + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); + + /* new ndiv ready for ramp */ + pll.n = n; + udelay(1); + gk20a_pllg_write_mnp(clk, &pll); + + /* dynamic ramp to new ndiv */ + udelay(1); + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT)); + + /* wait for ramping to complete */ + if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG, + GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK, + GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0) + ret = -ETIMEDOUT; + + /* exit slowdown mode */ + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) | + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); + nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); + + return ret; +} + +static int +gk20a_pllg_enable(struct gk20a_clk *clk) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 val; + + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); + nvkm_rd32(device, GPCPLL_CFG); + + /* enable lock detection */ + val = nvkm_rd32(device, GPCPLL_CFG); + if (val & GPCPLL_CFG_LOCK_DET_OFF) { + val &= ~GPCPLL_CFG_LOCK_DET_OFF; + nvkm_wr32(device, GPCPLL_CFG, val); + } + + /* wait for lock */ + if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK, + GPCPLL_CFG_LOCK) < 0) + return -ETIMEDOUT; + + /* switch to VCO mode */ + nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), + BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); + + return 0; +} + +static void +gk20a_pllg_disable(struct gk20a_clk *clk) +{ + struct nvkm_device *device = clk->base.subdev.device; + + /* put PLL in bypass before disabling it */ + nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); + + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); + nvkm_rd32(device, GPCPLL_CFG); +} + +static int +gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + struct gk20a_pll cur_pll; + int ret; + + gk20a_pllg_read_mnp(clk, &cur_pll); + + /* split VCO-to-bypass jump in half by setting out divider 1:2 */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT); + /* Intentional 2nd write to assure linear divider operation */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT); + nvkm_rd32(device, GPC2CLK_OUT); + udelay(2); + + gk20a_pllg_disable(clk); + + gk20a_pllg_write_mnp(clk, pll); + + ret = gk20a_pllg_enable(clk); + if (ret) + return ret; + + /* restore out divider 1:1 */ + udelay(2); + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT); + /* Intentional 2nd write to assure linear divider operation */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT); + nvkm_rd32(device, GPC2CLK_OUT); + + return 0; +} + +static int +gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll) +{ + struct gk20a_pll cur_pll; + int ret; + + if (gk20a_pllg_is_enabled(clk)) { + gk20a_pllg_read_mnp(clk, &cur_pll); + + /* just do NDIV slide if there is no change to M and PL */ + if (pll->m == cur_pll.m && pll->pl == cur_pll.pl) + return gk20a_pllg_slide(clk, pll->n); + + /* slide down to current NDIV_LO */ + cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll); + ret = gk20a_pllg_slide(clk, cur_pll.n); + if (ret) + return ret; + } + + /* program MNP with the new clock parameters and new NDIV_LO */ + cur_pll = *pll; + cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll); + ret = gk20a_pllg_program_mnp(clk, &cur_pll); + if (ret) + return ret; + + /* slide up to new NDIV */ + return gk20a_pllg_slide(clk, pll->n); +} + +static struct nvkm_pstate +gk20a_pstates[] = { + { + .base = { + .domain[nv_clk_src_gpc] = 72000, + .voltage = 0, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 108000, + .voltage = 1, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 180000, + .voltage = 2, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 252000, + .voltage = 3, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 324000, + .voltage = 4, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 396000, + .voltage = 5, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 468000, + .voltage = 6, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 540000, + .voltage = 7, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 612000, + .voltage = 8, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 648000, + .voltage = 9, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 684000, + .voltage = 10, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 708000, + .voltage = 11, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 756000, + .voltage = 12, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 804000, + .voltage = 13, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 852000, + .voltage = 14, + }, + }, +}; + +int +gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src) +{ + struct gk20a_clk *clk = gk20a_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + struct gk20a_pll pll; + + switch (src) { + case nv_clk_src_crystal: + return device->crystal; + case nv_clk_src_gpc: + gk20a_pllg_read_mnp(clk, &pll); + return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV; + default: + nvkm_error(subdev, "invalid clock source %d\n", src); + return -EINVAL; + } +} + +int +gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) +{ + struct gk20a_clk *clk = gk20a_clk(base); + + return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] * + GK20A_CLK_GPC_MDIV, &clk->pll); +} + +int +gk20a_clk_prog(struct nvkm_clk *base) +{ + struct gk20a_clk *clk = gk20a_clk(base); + int ret; + + ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll); + if (ret) + ret = gk20a_pllg_program_mnp(clk, &clk->pll); + + return ret; +} + +void +gk20a_clk_tidy(struct nvkm_clk *base) +{ +} + +int +gk20a_clk_setup_slide(struct gk20a_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + u32 step_a, step_b; + + switch (clk->parent_rate) { + case 12000000: + case 12800000: + case 13000000: + step_a = 0x2b; + step_b = 0x0b; + break; + case 19200000: + step_a = 0x12; + step_b = 0x08; + break; + case 38400000: + step_a = 0x04; + step_b = 0x05; + break; + default: + nvkm_error(subdev, "invalid parent clock rate %u KHz", + clk->parent_rate / KHZ); + return -EINVAL; + } + + nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, + step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT); + nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, + step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT); + + return 0; +} + +void +gk20a_clk_fini(struct nvkm_clk *base) +{ + struct nvkm_device *device = base->subdev.device; + struct gk20a_clk *clk = gk20a_clk(base); + + /* slide to VCO min */ + if (gk20a_pllg_is_enabled(clk)) { + struct gk20a_pll pll; + u32 n_lo; + + gk20a_pllg_read_mnp(clk, &pll); + n_lo = gk20a_pllg_n_lo(clk, &pll); + gk20a_pllg_slide(clk, n_lo); + } + + gk20a_pllg_disable(clk); + + /* set IDDQ */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1); +} + +static int +gk20a_clk_init(struct nvkm_clk *base) +{ + struct gk20a_clk *clk = gk20a_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + int ret; + + /* get out from IDDQ */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0); + nvkm_rd32(device, GPCPLL_CFG); + udelay(5); + + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, + GPC2CLK_OUT_INIT_VAL); + + ret = gk20a_clk_setup_slide(clk); + if (ret) + return ret; + + /* Start with lowest frequency */ + base->func->calc(base, &base->func->pstates[0].base); + ret = base->func->prog(&clk->base); + if (ret) { + nvkm_error(subdev, "cannot initialize clock\n"); + return ret; + } + + return 0; +} + +static const struct nvkm_clk_func +gk20a_clk = { + .init = gk20a_clk_init, + .fini = gk20a_clk_fini, + .read = gk20a_clk_read, + .calc = gk20a_clk_calc, + .prog = gk20a_clk_prog, + .tidy = gk20a_clk_tidy, + .pstates = gk20a_pstates, + .nr_pstates = ARRAY_SIZE(gk20a_pstates), + .domains = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV }, + { nv_clk_src_max } + } +}; + +int +gk20a_clk_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + const struct nvkm_clk_func *func, const struct gk20a_clk_pllg_params *params, + struct gk20a_clk *clk) +{ + struct nvkm_device_tegra *tdev = device->func->tegra(device); + int ret; + int i; + + /* Finish initializing the pstates */ + for (i = 0; i < func->nr_pstates; i++) { + INIT_LIST_HEAD(&func->pstates[i].list); + func->pstates[i].pstate = i + 1; + } + + clk->params = params; + clk->parent_rate = clk_get_rate(tdev->clk); + + ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base); + if (ret) + return ret; + + nvkm_debug(&clk->base.subdev, "parent clock rate: %d Khz\n", + clk->parent_rate / KHZ); + + return 0; +} + +int +gk20a_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + struct gk20a_clk *clk; + int ret; + + clk = kzalloc(sizeof(*clk), GFP_KERNEL); + if (!clk) + return -ENOMEM; + *pclk = &clk->base; + + ret = gk20a_clk_ctor(device, type, inst, &gk20a_clk, &gk20a_pllg_params, clk); + + clk->pl_to_div = pl_to_div; + clk->div_to_pl = div_to_pl; + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h new file mode 100644 index 000000000..286413ff4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NVKM_CLK_GK20A_H__ +#define __NVKM_CLK_GK20A_H__ + +#define KHZ (1000) +#define MHZ (KHZ * 1000) + +#define MASK(w) ((1 << (w)) - 1) + +#define GK20A_CLK_GPC_MDIV 1000 + +#define SYS_GPCPLL_CFG_BASE 0x00137000 +#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0) +#define GPCPLL_CFG_ENABLE BIT(0) +#define GPCPLL_CFG_IDDQ BIT(1) +#define GPCPLL_CFG_LOCK_DET_OFF BIT(4) +#define GPCPLL_CFG_LOCK BIT(17) + +#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc) +#define GPCPLL_CFG2_SETUP2_SHIFT 16 +#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24 + +#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18) +#define GPCPLL_CFG3_VCO_CTRL_SHIFT 0 +#define GPCPLL_CFG3_VCO_CTRL_WIDTH 9 +#define GPCPLL_CFG3_VCO_CTRL_MASK \ + (MASK(GPCPLL_CFG3_VCO_CTRL_WIDTH) << GPCPLL_CFG3_VCO_CTRL_SHIFT) +#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16 +#define GPCPLL_CFG3_PLL_STEPB_WIDTH 8 + +#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4) +#define GPCPLL_COEFF_M_SHIFT 0 +#define GPCPLL_COEFF_M_WIDTH 8 +#define GPCPLL_COEFF_N_SHIFT 8 +#define GPCPLL_COEFF_N_WIDTH 8 +#define GPCPLL_COEFF_N_MASK \ + (MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT) +#define GPCPLL_COEFF_P_SHIFT 16 +#define GPCPLL_COEFF_P_WIDTH 6 + +#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c) +#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0 +#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8 +#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16 +#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22 +#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31 + +#define GPC_BCAST_GPCPLL_CFG_BASE 0x00132800 +#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCAST_GPCPLL_CFG_BASE + 0xa0) +#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24 +#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \ + (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT) + +#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100) +#define SEL_VCO_GPC2CLK_OUT_SHIFT 0 + +#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250) +#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1 +#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31 +#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1 +#define GPC2CLK_OUT_VCODIV_WIDTH 6 +#define GPC2CLK_OUT_VCODIV_SHIFT 8 +#define GPC2CLK_OUT_VCODIV1 0 +#define GPC2CLK_OUT_VCODIV2 2 +#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \ + GPC2CLK_OUT_VCODIV_SHIFT) +#define GPC2CLK_OUT_BYPDIV_WIDTH 6 +#define GPC2CLK_OUT_BYPDIV_SHIFT 0 +#define GPC2CLK_OUT_BYPDIV31 0x3c +#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \ + GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\ + | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\ + | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT)) +#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \ + GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \ + | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \ + | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT)) + +/* All frequencies in Khz */ +struct gk20a_clk_pllg_params { + u32 min_vco, max_vco; + u32 min_u, max_u; + u32 min_m, max_m; + u32 min_n, max_n; + u32 min_pl, max_pl; +}; + +struct gk20a_pll { + u32 m; + u32 n; + u32 pl; +}; + +struct gk20a_clk { + struct nvkm_clk base; + const struct gk20a_clk_pllg_params *params; + struct gk20a_pll pll; + u32 parent_rate; + + u32 (*div_to_pl)(u32); + u32 (*pl_to_div)(u32); +}; +#define gk20a_clk(p) container_of((p), struct gk20a_clk, base) + +u32 gk20a_pllg_calc_rate(struct gk20a_clk *, struct gk20a_pll *); +int gk20a_pllg_calc_mnp(struct gk20a_clk *, unsigned long, struct gk20a_pll *); +void gk20a_pllg_read_mnp(struct gk20a_clk *, struct gk20a_pll *); +void gk20a_pllg_write_mnp(struct gk20a_clk *, const struct gk20a_pll *); + +static inline bool +gk20a_pllg_is_enabled(struct gk20a_clk *clk) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 val; + + val = nvkm_rd32(device, GPCPLL_CFG); + return val & GPCPLL_CFG_ENABLE; +} + +static inline u32 +gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll) +{ + return DIV_ROUND_UP(pll->m * clk->params->min_vco, + clk->parent_rate / KHZ); +} + +int gk20a_clk_ctor(struct nvkm_device *, enum nvkm_subdev_type, int, const struct nvkm_clk_func *, + const struct gk20a_clk_pllg_params *, struct gk20a_clk *); +void gk20a_clk_fini(struct nvkm_clk *); +int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src); +int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *); +int gk20a_clk_prog(struct nvkm_clk *); +void gk20a_clk_tidy(struct nvkm_clk *); + +int gk20a_clk_setup_slide(struct gk20a_clk *); + +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c new file mode 100644 index 000000000..7c33542f6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c @@ -0,0 +1,1071 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "priv.h" +#include "gk20a.h" + +#define GPCPLL_CFG_SYNC_MODE BIT(2) + +#define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340) +#define BYPASSCTRL_SYS_GPCPLL_SHIFT 0 +#define BYPASSCTRL_SYS_GPCPLL_WIDTH 1 + +#define GPCPLL_CFG2_SDM_DIN_SHIFT 0 +#define GPCPLL_CFG2_SDM_DIN_WIDTH 8 +#define GPCPLL_CFG2_SDM_DIN_MASK \ + (MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT) +#define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT 8 +#define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH 15 +#define GPCPLL_CFG2_SDM_DIN_NEW_MASK \ + (MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT) +#define GPCPLL_CFG2_SETUP2_SHIFT 16 +#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24 + +#define GPCPLL_DVFS0 (SYS_GPCPLL_CFG_BASE + 0x10) +#define GPCPLL_DVFS0_DFS_COEFF_SHIFT 0 +#define GPCPLL_DVFS0_DFS_COEFF_WIDTH 7 +#define GPCPLL_DVFS0_DFS_COEFF_MASK \ + (MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT) +#define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT 8 +#define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH 7 +#define GPCPLL_DVFS0_DFS_DET_MAX_MASK \ + (MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT) + +#define GPCPLL_DVFS1 (SYS_GPCPLL_CFG_BASE + 0x14) +#define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT 0 +#define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH 7 +#define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT 7 +#define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH 1 +#define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT 8 +#define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH 7 +#define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT 15 +#define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH 1 +#define GPCPLL_DVFS1_DFS_CTRL_SHIFT 16 +#define GPCPLL_DVFS1_DFS_CTRL_WIDTH 12 +#define GPCPLL_DVFS1_EN_SDM_SHIFT 28 +#define GPCPLL_DVFS1_EN_SDM_WIDTH 1 +#define GPCPLL_DVFS1_EN_SDM_BIT BIT(28) +#define GPCPLL_DVFS1_EN_DFS_SHIFT 29 +#define GPCPLL_DVFS1_EN_DFS_WIDTH 1 +#define GPCPLL_DVFS1_EN_DFS_BIT BIT(29) +#define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT 30 +#define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH 1 +#define GPCPLL_DVFS1_EN_DFS_CAL_BIT BIT(30) +#define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT 31 +#define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH 1 +#define GPCPLL_DVFS1_DFS_CAL_DONE_BIT BIT(31) + +#define GPC_BCAST_GPCPLL_DVFS2 (GPC_BCAST_GPCPLL_CFG_BASE + 0x20) +#define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT BIT(16) + +#define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT 24 +#define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH 7 + +#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */ +#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */ + +struct gm20b_clk_dvfs_params { + s32 coeff_slope; + s32 coeff_offs; + u32 vco_ctrl; +}; + +static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = { + .coeff_slope = -165230, + .coeff_offs = 214007, + .vco_ctrl = 0x7 << 3, +}; + +/* + * base.n is now the *integer* part of the N factor. + * sdm_din contains n's decimal part. + */ +struct gm20b_pll { + struct gk20a_pll base; + u32 sdm_din; +}; + +struct gm20b_clk_dvfs { + u32 dfs_coeff; + s32 dfs_det_max; + s32 dfs_ext_cal; +}; + +struct gm20b_clk { + /* currently applied parameters */ + struct gk20a_clk base; + struct gm20b_clk_dvfs dvfs; + u32 uv; + + /* new parameters to apply */ + struct gk20a_pll new_pll; + struct gm20b_clk_dvfs new_dvfs; + u32 new_uv; + + const struct gm20b_clk_dvfs_params *dvfs_params; + + /* fused parameters */ + s32 uvdet_slope; + s32 uvdet_offs; + + /* safe frequency we can use at minimum voltage */ + u32 safe_fmax_vmin; +}; +#define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base) + +static u32 pl_to_div(u32 pl) +{ + return pl; +} + +static u32 div_to_pl(u32 div) +{ + return div; +} + +static const struct gk20a_clk_pllg_params gm20b_pllg_params = { + .min_vco = 1300000, .max_vco = 2600000, + .min_u = 12000, .max_u = 38400, + .min_m = 1, .max_m = 255, + .min_n = 8, .max_n = 255, + .min_pl = 1, .max_pl = 31, +}; + +static void +gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_device *device = subdev->device; + u32 val; + + gk20a_pllg_read_mnp(&clk->base, &pll->base); + val = nvkm_rd32(device, GPCPLL_CFG2); + pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) & + MASK(GPCPLL_CFG2_SDM_DIN_WIDTH); +} + +static void +gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK, + pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT); + gk20a_pllg_write_mnp(&clk->base, &pll->base); +} + +/* + * Determine DFS_COEFF for the requested voltage. Always select external + * calibration override equal to the voltage, and set maximum detection + * limit "0" (to make sure that PLL output remains under F/V curve when + * voltage increases). + */ +static void +gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv, + struct gm20b_clk_dvfs *dvfs) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + const struct gm20b_clk_dvfs_params *p = clk->dvfs_params; + u32 coeff; + /* Work with mv as uv would likely trigger an overflow */ + s32 mv = DIV_ROUND_CLOSEST(uv, 1000); + + /* coeff = slope * voltage + offset */ + coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs; + coeff = DIV_ROUND_CLOSEST(coeff, 1000); + dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH)); + + dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs, + clk->uvdet_slope); + /* should never happen */ + if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE)) + nvkm_error(subdev, "dfs_ext_cal overflow!\n"); + + dvfs->dfs_det_max = 0; + + nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n", + __func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal, + dvfs->dfs_det_max); +} + +/* + * Solve equation for integer and fractional part of the effective NDIV: + * + * n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) + + * (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE + * + * The SDM_DIN LSB is finally shifted out, since it is not accessible by sw. + */ +static void +gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + const struct gk20a_clk_pllg_params *p = clk->base.params; + u32 n; + s32 det_delta; + u32 rem, rem_range; + + /* calculate current ext_cal and subtract previous one */ + det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs, + clk->uvdet_slope); + det_delta -= clk->dvfs.dfs_ext_cal; + det_delta = min(det_delta, clk->dvfs.dfs_det_max); + det_delta *= clk->dvfs.dfs_coeff; + + /* integer part of n */ + n = (n_eff << DFS_DET_RANGE) - det_delta; + /* should never happen! */ + if (n <= 0) { + nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n"); + n = 1 << DFS_DET_RANGE; + } + if (n >> DFS_DET_RANGE > p->max_n) { + nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n"); + n = p->max_n << DFS_DET_RANGE; + } + *n_int = n >> DFS_DET_RANGE; + + /* fractional part of n */ + rem = ((u32)n) & MASK(DFS_DET_RANGE); + rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE; + /* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */ + rem = (rem << rem_range) - BIT(SDM_DIN_RANGE); + /* lose 8 LSB and clip - sdm_din only keeps the most significant byte */ + *sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH); + + nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__, + n_eff, *n_int, *sdm_din); +} + +static int +gm20b_pllg_slide(struct gm20b_clk *clk, u32 n) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_device *device = subdev->device; + struct gm20b_pll pll; + u32 n_int, sdm_din; + int ret = 0; + + /* calculate the new n_int/sdm_din for this n/uv */ + gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din); + + /* get old coefficients */ + gm20b_pllg_read_mnp(clk, &pll); + /* do nothing if NDIV is the same */ + if (n_int == pll.base.n && sdm_din == pll.sdm_din) + return 0; + + /* pll slowdown mode */ + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); + + /* new ndiv ready for ramp */ + /* in DVFS mode SDM is updated via "new" field */ + nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK, + sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT); + pll.base.n = n_int; + udelay(1); + gk20a_pllg_write_mnp(&clk->base, &pll.base); + + /* dynamic ramp to new ndiv */ + udelay(1); + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT)); + + /* wait for ramping to complete */ + if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG, + GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK, + GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0) + ret = -ETIMEDOUT; + + /* in DVFS mode complete SDM update */ + nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK, + sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT); + + /* exit slowdown mode */ + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) | + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); + nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); + + return ret; +} + +static int +gm20b_pllg_enable(struct gm20b_clk *clk) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); + nvkm_rd32(device, GPCPLL_CFG); + + /* In DVFS mode lock cannot be used - so just delay */ + udelay(40); + + /* set SYNC_MODE for glitchless switch out of bypass */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, + GPCPLL_CFG_SYNC_MODE); + nvkm_rd32(device, GPCPLL_CFG); + + /* switch to VCO mode */ + nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), + BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); + + return 0; +} + +static void +gm20b_pllg_disable(struct gm20b_clk *clk) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + /* put PLL in bypass before disabling it */ + nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); + + /* clear SYNC_MODE before disabling PLL */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0); + + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); + nvkm_rd32(device, GPCPLL_CFG); +} + +static int +gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_device *device = subdev->device; + struct gm20b_pll cur_pll; + u32 n_int, sdm_din; + /* if we only change pdiv, we can do a glitchless transition */ + bool pdiv_only; + int ret; + + gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din); + gm20b_pllg_read_mnp(clk, &cur_pll); + pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din && + cur_pll.base.m == pll->m; + + /* need full sequence if clock not enabled yet */ + if (!gk20a_pllg_is_enabled(&clk->base)) + pdiv_only = false; + + /* split VCO-to-bypass jump in half by setting out divider 1:2 */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT); + /* Intentional 2nd write to assure linear divider operation */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT); + nvkm_rd32(device, GPC2CLK_OUT); + udelay(2); + + if (pdiv_only) { + u32 old = cur_pll.base.pl; + u32 new = pll->pl; + + /* + * we can do a glitchless transition only if the old and new PL + * parameters share at least one bit set to 1. If this is not + * the case, calculate and program an interim PL that will allow + * us to respect that rule. + */ + if ((old & new) == 0) { + cur_pll.base.pl = min(old | BIT(ffs(new) - 1), + new | BIT(ffs(old) - 1)); + gk20a_pllg_write_mnp(&clk->base, &cur_pll.base); + } + + cur_pll.base.pl = new; + gk20a_pllg_write_mnp(&clk->base, &cur_pll.base); + } else { + /* disable before programming if more than pdiv changes */ + gm20b_pllg_disable(clk); + + cur_pll.base = *pll; + cur_pll.base.n = n_int; + cur_pll.sdm_din = sdm_din; + gm20b_pllg_write_mnp(clk, &cur_pll); + + ret = gm20b_pllg_enable(clk); + if (ret) + return ret; + } + + /* restore out divider 1:1 */ + udelay(2); + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT); + /* Intentional 2nd write to assure linear divider operation */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT); + nvkm_rd32(device, GPC2CLK_OUT); + + return 0; +} + +static int +gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll) +{ + struct gk20a_pll cur_pll; + int ret; + + if (gk20a_pllg_is_enabled(&clk->base)) { + gk20a_pllg_read_mnp(&clk->base, &cur_pll); + + /* just do NDIV slide if there is no change to M and PL */ + if (pll->m == cur_pll.m && pll->pl == cur_pll.pl) + return gm20b_pllg_slide(clk, pll->n); + + /* slide down to current NDIV_LO */ + cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll); + ret = gm20b_pllg_slide(clk, cur_pll.n); + if (ret) + return ret; + } + + /* program MNP with the new clock parameters and new NDIV_LO */ + cur_pll = *pll; + cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll); + ret = gm20b_pllg_program_mnp(clk, &cur_pll); + if (ret) + return ret; + + /* slide up to new NDIV */ + return gm20b_pllg_slide(clk, pll->n); +} + +static int +gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) +{ + struct gm20b_clk *clk = gm20b_clk(base); + struct nvkm_subdev *subdev = &base->subdev; + struct nvkm_volt *volt = base->subdev.device->volt; + int ret; + + ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] * + GK20A_CLK_GPC_MDIV, &clk->new_pll); + if (ret) + return ret; + + clk->new_uv = volt->vid[cstate->voltage].uv; + gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs); + + nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv); + + return 0; +} + +/* + * Compute PLL parameters that are always safe for the current voltage + */ +static void +gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll) +{ + u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ; + u32 parent_rate = clk->base.parent_rate / KHZ; + u32 nmin, nsafe; + + /* remove a safe margin of 10% */ + if (rate > clk->safe_fmax_vmin) + rate = rate * (100 - 10) / 100; + + /* gpc2clk */ + rate *= 2; + + nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate); + nsafe = pll->m * rate / (clk->base.parent_rate); + + if (nsafe < nmin) { + pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate); + nsafe = nmin; + } + + pll->n = nsafe; +} + +static void +gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + /* strobe to read external DFS coefficient */ + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT); + + nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK, + coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT); + + udelay(1); + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0); +} + +static void +gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + u32 val; + + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1), + dfs_det_cal); + udelay(1); + + val = nvkm_rd32(device, GPCPLL_DVFS1); + if (!(val & BIT(25))) { + /* Use external value to overwrite calibration value */ + val |= BIT(25) | BIT(16); + nvkm_wr32(device, GPCPLL_DVFS1, val); + } +} + +static void +gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk, + struct gm20b_clk_dvfs *dvfs) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + /* strobe to read external DFS coefficient */ + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT); + + nvkm_mask(device, GPCPLL_DVFS0, + GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK, + dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT | + dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT); + + udelay(1); + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0); + + gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal); +} + +static int +gm20b_clk_prog(struct nvkm_clk *base) +{ + struct gm20b_clk *clk = gm20b_clk(base); + u32 cur_freq; + int ret; + + /* No change in DVFS settings? */ + if (clk->uv == clk->new_uv) + goto prog; + + /* + * Interim step for changing DVFS detection settings: low enough + * frequency to be safe at DVFS coeff = 0. + * + * 1. If voltage is increasing: + * - safe frequency target matches the lowest - old - frequency + * - DVFS settings are still old + * - Voltage already increased to new level by volt, but maximum + * detection limit assures PLL output remains under F/V curve + * + * 2. If voltage is decreasing: + * - safe frequency target matches the lowest - new - frequency + * - DVFS settings are still old + * - Voltage is also old, it will be lowered by volt afterwards + * + * Interim step can be skipped if old frequency is below safe minimum, + * i.e., it is low enough to be safe at any voltage in operating range + * with zero DVFS coefficient. + */ + cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc); + if (cur_freq > clk->safe_fmax_vmin) { + struct gk20a_pll pll_safe; + + if (clk->uv < clk->new_uv) + /* voltage will raise: safe frequency is current one */ + pll_safe = clk->base.pll; + else + /* voltage will drop: safe frequency is new one */ + pll_safe = clk->new_pll; + + gm20b_dvfs_calc_safe_pll(clk, &pll_safe); + ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe); + if (ret) + return ret; + } + + /* + * DVFS detection settings transition: + * - Set DVFS coefficient zero + * - Set calibration level to new voltage + * - Set DVFS coefficient to match new voltage + */ + gm20b_dvfs_program_coeff(clk, 0); + gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal); + gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff); + gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs); + +prog: + clk->uv = clk->new_uv; + clk->dvfs = clk->new_dvfs; + clk->base.pll = clk->new_pll; + + return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll); +} + +static struct nvkm_pstate +gm20b_pstates[] = { + { + .base = { + .domain[nv_clk_src_gpc] = 76800, + .voltage = 0, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 153600, + .voltage = 1, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 230400, + .voltage = 2, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 307200, + .voltage = 3, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 384000, + .voltage = 4, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 460800, + .voltage = 5, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 537600, + .voltage = 6, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 614400, + .voltage = 7, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 691200, + .voltage = 8, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 768000, + .voltage = 9, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 844800, + .voltage = 10, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 921600, + .voltage = 11, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 998400, + .voltage = 12, + }, + }, +}; + +static void +gm20b_clk_fini(struct nvkm_clk *base) +{ + struct nvkm_device *device = base->subdev.device; + struct gm20b_clk *clk = gm20b_clk(base); + + /* slide to VCO min */ + if (gk20a_pllg_is_enabled(&clk->base)) { + struct gk20a_pll pll; + u32 n_lo; + + gk20a_pllg_read_mnp(&clk->base, &pll); + n_lo = gk20a_pllg_n_lo(&clk->base, &pll); + gm20b_pllg_slide(clk, n_lo); + } + + gm20b_pllg_disable(clk); + + /* set IDDQ */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1); +} + +static int +gm20b_clk_init_dvfs(struct gm20b_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_device *device = subdev->device; + bool fused = clk->uvdet_offs && clk->uvdet_slope; + static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */ + u32 data; + int ret; + + /* Enable NA DVFS */ + nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT, + GPCPLL_DVFS1_EN_DFS_BIT); + + /* Set VCO_CTRL */ + if (clk->dvfs_params->vco_ctrl) + nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK, + clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT); + + if (fused) { + /* Start internal calibration, but ignore results */ + nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT, + GPCPLL_DVFS1_EN_DFS_CAL_BIT); + + /* got uvdev parameters from fuse, skip calibration */ + goto calibrated; + } + + /* + * If calibration parameters are not fused, start internal calibration, + * wait for completion, and use results along with default slope to + * calculate ADC offset during boot. + */ + nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT, + GPCPLL_DVFS1_EN_DFS_CAL_BIT); + + /* Wait for internal calibration done (spec < 2us). */ + ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1, + GPCPLL_DVFS1_DFS_CAL_DONE_BIT, + GPCPLL_DVFS1_DFS_CAL_DONE_BIT); + if (ret < 0) { + nvkm_error(subdev, "GPCPLL calibration timeout\n"); + return -ETIMEDOUT; + } + + data = nvkm_rd32(device, GPCPLL_CFG3) >> + GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT; + data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH); + + clk->uvdet_slope = ADC_SLOPE_UV; + clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV; + + nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n", + clk->uvdet_offs, clk->uvdet_slope); + +calibrated: + /* Compute and apply initial DVFS parameters */ + gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs); + gm20b_dvfs_program_coeff(clk, 0); + gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal); + gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff); + gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs); + + return 0; +} + +/* Forward declaration to detect speedo >=1 in gm20b_clk_init() */ +static const struct nvkm_clk_func gm20b_clk; + +static int +gm20b_clk_init(struct nvkm_clk *base) +{ + struct gk20a_clk *clk = gk20a_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + int ret; + u32 data; + + /* get out from IDDQ */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0); + nvkm_rd32(device, GPCPLL_CFG); + udelay(5); + + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, + GPC2CLK_OUT_INIT_VAL); + + /* Set the global bypass control to VCO */ + nvkm_mask(device, BYPASSCTRL_SYS, + MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT, + 0); + + ret = gk20a_clk_setup_slide(clk); + if (ret) + return ret; + + /* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */ + data = nvkm_rd32(device, 0x021944); + if (!(data & 0x3)) { + data |= 0x2; + nvkm_wr32(device, 0x021944, data); + + data = nvkm_rd32(device, 0x021948); + data |= 0x1; + nvkm_wr32(device, 0x021948, data); + } + + /* Disable idle slow down */ + nvkm_mask(device, 0x20160, 0x003f0000, 0x0); + + /* speedo >= 1? */ + if (clk->base.func == &gm20b_clk) { + struct gm20b_clk *_clk = gm20b_clk(base); + struct nvkm_volt *volt = device->volt; + + /* Get current voltage */ + _clk->uv = nvkm_volt_get(volt); + + /* Initialize DVFS */ + ret = gm20b_clk_init_dvfs(_clk); + if (ret) + return ret; + } + + /* Start with lowest frequency */ + base->func->calc(base, &base->func->pstates[0].base); + ret = base->func->prog(base); + if (ret) { + nvkm_error(subdev, "cannot initialize clock\n"); + return ret; + } + + return 0; +} + +static const struct nvkm_clk_func +gm20b_clk_speedo0 = { + .init = gm20b_clk_init, + .fini = gk20a_clk_fini, + .read = gk20a_clk_read, + .calc = gk20a_clk_calc, + .prog = gk20a_clk_prog, + .tidy = gk20a_clk_tidy, + .pstates = gm20b_pstates, + /* Speedo 0 only supports 12 voltages */ + .nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1, + .domains = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV }, + { nv_clk_src_max }, + }, +}; + +static const struct nvkm_clk_func +gm20b_clk = { + .init = gm20b_clk_init, + .fini = gm20b_clk_fini, + .read = gk20a_clk_read, + .calc = gm20b_clk_calc, + .prog = gm20b_clk_prog, + .tidy = gk20a_clk_tidy, + .pstates = gm20b_pstates, + .nr_pstates = ARRAY_SIZE(gm20b_pstates), + .domains = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV }, + { nv_clk_src_max }, + }, +}; + +static int +gm20b_clk_new_speedo0(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + struct gk20a_clk *clk; + int ret; + + clk = kzalloc(sizeof(*clk), GFP_KERNEL); + if (!clk) + return -ENOMEM; + *pclk = &clk->base; + + ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk_speedo0, &gm20b_pllg_params, clk); + clk->pl_to_div = pl_to_div; + clk->div_to_pl = div_to_pl; + return ret; +} + +/* FUSE register */ +#define FUSE_RESERVED_CALIB0 0x204 +#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT 0 +#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH 4 +#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT 4 +#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH 10 +#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT 14 +#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH 10 +#define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT 24 +#define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH 6 +#define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT 30 +#define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH 2 + +static int +gm20b_clk_init_fused_params(struct gm20b_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + u32 val = 0; + u32 rev = 0; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA) + tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val); + rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH); +#endif + + /* No fused parameters, we will calibrate later */ + if (rev == 0) + return -EINVAL; + + /* Integer part in mV + fractional part in uV */ + clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 + + ((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH)); + + /* Integer part in mV + fractional part in 100uV */ + clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 + + ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100; + + nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n", + clk->uvdet_slope, clk->uvdet_offs); + return 0; +} + +static int +gm20b_clk_init_safe_fmax(struct gm20b_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_volt *volt = subdev->device->volt; + struct nvkm_pstate *pstates = clk->base.base.func->pstates; + int nr_pstates = clk->base.base.func->nr_pstates; + int vmin, id = 0; + u32 fmax = 0; + int i; + + /* find lowest voltage we can use */ + vmin = volt->vid[0].uv; + for (i = 1; i < volt->vid_nr; i++) { + if (volt->vid[i].uv <= vmin) { + vmin = volt->vid[i].uv; + id = volt->vid[i].vid; + } + } + + /* find max frequency at this voltage */ + for (i = 0; i < nr_pstates; i++) + if (pstates[i].base.voltage == id) + fmax = max(fmax, + pstates[i].base.domain[nv_clk_src_gpc]); + + if (!fmax) { + nvkm_error(subdev, "failed to evaluate safe fmax\n"); + return -EINVAL; + } + + /* we are safe at 90% of the max frequency */ + clk->safe_fmax_vmin = fmax * (100 - 10) / 100; + nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin); + + return 0; +} + +int +gm20b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + struct nvkm_device_tegra *tdev = device->func->tegra(device); + struct gm20b_clk *clk; + struct nvkm_subdev *subdev; + struct gk20a_clk_pllg_params *clk_params; + int ret; + + /* Speedo 0 GPUs cannot use noise-aware PLL */ + if (tdev->gpu_speedo_id == 0) + return gm20b_clk_new_speedo0(device, type, inst, pclk); + + /* Speedo >= 1, use NAPLL */ + clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL); + if (!clk) + return -ENOMEM; + *pclk = &clk->base.base; + subdev = &clk->base.base.subdev; + + /* duplicate the clock parameters since we will patch them below */ + clk_params = (void *) (clk + 1); + *clk_params = gm20b_pllg_params; + ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk, clk_params, &clk->base); + if (ret) + return ret; + + /* + * NAPLL can only work with max_u, clamp the m range so + * gk20a_pllg_calc_mnp always uses it + */ + clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u, + (clk->base.parent_rate / KHZ)); + if (clk_params->max_m == 0) { + nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n"); + kfree(clk); + return gm20b_clk_new_speedo0(device, type, inst, pclk); + } + + clk->base.pl_to_div = pl_to_div; + clk->base.div_to_pl = div_to_pl; + + clk->dvfs_params = &gm20b_dvfs_params; + + ret = gm20b_clk_init_fused_params(clk); + /* + * we will calibrate during init - should never happen on + * prod parts + */ + if (ret) + nvkm_warn(subdev, "no fused calibration parameters\n"); + + ret = gm20b_clk_init_safe_fmax(clk); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c new file mode 100644 index 000000000..b5f396972 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c @@ -0,0 +1,550 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + * Roy Spliet + */ +#define gt215_clk(p) container_of((p), struct gt215_clk, base) +#include "gt215.h" +#include "pll.h" + +#include +#include +#include +#include + +struct gt215_clk { + struct nvkm_clk base; + struct gt215_clk_info eng[nv_clk_src_max]; +}; + +static u32 read_clk(struct gt215_clk *, int, bool); +static u32 read_pll(struct gt215_clk *, int, u32); + +static u32 +read_vco(struct gt215_clk *clk, int idx) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4)); + + switch (sctl & 0x00000030) { + case 0x00000000: + return device->crystal; + case 0x00000020: + return read_pll(clk, 0x41, 0x00e820); + case 0x00000030: + return read_pll(clk, 0x42, 0x00e8a0); + default: + return 0; + } +} + +static u32 +read_clk(struct gt215_clk *clk, int idx, bool ignore_en) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 sctl, sdiv, sclk; + + /* refclk for the 0xe8xx plls is a fixed frequency */ + if (idx >= 0x40) { + if (device->chipset == 0xaf) { + /* no joke.. seriously.. sigh.. */ + return nvkm_rd32(device, 0x00471c) * 1000; + } + + return device->crystal; + } + + sctl = nvkm_rd32(device, 0x4120 + (idx * 4)); + if (!ignore_en && !(sctl & 0x00000100)) + return 0; + + /* out_alt */ + if (sctl & 0x00000400) + return 108000; + + /* vco_out */ + switch (sctl & 0x00003000) { + case 0x00000000: + if (!(sctl & 0x00000200)) + return device->crystal; + return 0; + case 0x00002000: + if (sctl & 0x00000040) + return 108000; + return 100000; + case 0x00003000: + /* vco_enable */ + if (!(sctl & 0x00000001)) + return 0; + + sclk = read_vco(clk, idx); + sdiv = ((sctl & 0x003f0000) >> 16) + 2; + return (sclk * 2) / sdiv; + default: + return 0; + } +} + +static u32 +read_pll(struct gt215_clk *clk, int idx, u32 pll) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 ctrl = nvkm_rd32(device, pll + 0); + u32 sclk = 0, P = 1, N = 1, M = 1; + u32 MP; + + if (!(ctrl & 0x00000008)) { + if (ctrl & 0x00000001) { + u32 coef = nvkm_rd32(device, pll + 4); + M = (coef & 0x000000ff) >> 0; + N = (coef & 0x0000ff00) >> 8; + P = (coef & 0x003f0000) >> 16; + + /* no post-divider on these.. + * XXX: it looks more like two post-"dividers" that + * cross each other out in the default RPLL config */ + if ((pll & 0x00ff00) == 0x00e800) + P = 1; + + sclk = read_clk(clk, 0x00 + idx, false); + } + } else { + sclk = read_clk(clk, 0x10 + idx, false); + } + + MP = M * P; + + if (!MP) + return 0; + + return sclk * N / MP; +} + +static int +gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src) +{ + struct gt215_clk *clk = gt215_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + u32 hsrc; + + switch (src) { + case nv_clk_src_crystal: + return device->crystal; + case nv_clk_src_core: + case nv_clk_src_core_intm: + return read_pll(clk, 0x00, 0x4200); + case nv_clk_src_shader: + return read_pll(clk, 0x01, 0x4220); + case nv_clk_src_mem: + return read_pll(clk, 0x02, 0x4000); + case nv_clk_src_disp: + return read_clk(clk, 0x20, false); + case nv_clk_src_vdec: + return read_clk(clk, 0x21, false); + case nv_clk_src_pmu: + return read_clk(clk, 0x25, false); + case nv_clk_src_host: + hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28; + switch (hsrc) { + case 0: + return read_clk(clk, 0x1d, false); + case 2: + case 3: + return 277000; + default: + nvkm_error(subdev, "unknown HOST clock source %d\n", hsrc); + return -EINVAL; + } + default: + nvkm_error(subdev, "invalid clock source %d\n", src); + return -EINVAL; + } + + return 0; +} + +static int +gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz, + struct gt215_clk_info *info) +{ + struct gt215_clk *clk = gt215_clk(base); + u32 oclk, sclk, sdiv; + s32 diff; + + info->clk = 0; + + switch (khz) { + case 27000: + info->clk = 0x00000100; + return khz; + case 100000: + info->clk = 0x00002100; + return khz; + case 108000: + info->clk = 0x00002140; + return khz; + default: + sclk = read_vco(clk, idx); + sdiv = min((sclk * 2) / khz, (u32)65); + oclk = (sclk * 2) / sdiv; + diff = ((khz + 3000) - oclk); + + /* When imprecise, play it safe and aim for a clock lower than + * desired rather than higher */ + if (diff < 0) { + sdiv++; + oclk = (sclk * 2) / sdiv; + } + + /* divider can go as low as 2, limited here because NVIDIA + * and the VBIOS on my NVA8 seem to prefer using the PLL + * for 810MHz - is there a good reason? + * XXX: PLLs with refclk 810MHz? */ + if (sdiv > 4) { + info->clk = (((sdiv - 2) << 16) | 0x00003100); + return oclk; + } + + break; + } + + return -ERANGE; +} + +int +gt215_pll_info(struct nvkm_clk *base, int idx, u32 pll, u32 khz, + struct gt215_clk_info *info) +{ + struct gt215_clk *clk = gt215_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvbios_pll limits; + int P, N, M, diff; + int ret; + + info->pll = 0; + + /* If we can get a within [-2, 3) MHz of a divider, we'll disable the + * PLL and use the divider instead. */ + ret = gt215_clk_info(&clk->base, idx, khz, info); + diff = khz - ret; + if (!pll || (diff >= -2000 && diff < 3000)) { + goto out; + } + + /* Try with PLL */ + ret = nvbios_pll_parse(subdev->device->bios, pll, &limits); + if (ret) + return ret; + + ret = gt215_clk_info(&clk->base, idx - 0x10, limits.refclk, info); + if (ret != limits.refclk) + return -EINVAL; + + ret = gt215_pll_calc(subdev, &limits, khz, &N, NULL, &M, &P); + if (ret >= 0) { + info->pll = (P << 16) | (N << 8) | M; + } + +out: + info->fb_delay = max(((khz + 7566) / 15133), (u32) 18); + return ret ? ret : -ERANGE; +} + +static int +calc_clk(struct gt215_clk *clk, struct nvkm_cstate *cstate, + int idx, u32 pll, int dom) +{ + int ret = gt215_pll_info(&clk->base, idx, pll, cstate->domain[dom], + &clk->eng[dom]); + if (ret >= 0) + return 0; + return ret; +} + +static int +calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate) +{ + int ret = 0; + u32 kHz = cstate->domain[nv_clk_src_host]; + struct gt215_clk_info *info = &clk->eng[nv_clk_src_host]; + + if (kHz == 277000) { + info->clk = 0; + info->host_out = NVA3_HOST_277; + return 0; + } + + info->host_out = NVA3_HOST_CLK; + + ret = gt215_clk_info(&clk->base, 0x1d, kHz, info); + if (ret >= 0) + return 0; + + return ret; +} + +int +gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags) +{ + struct nvkm_device *device = clk->subdev.device; + struct nvkm_fifo *fifo = device->fifo; + + /* halt and idle execution engines */ + nvkm_mask(device, 0x020060, 0x00070000, 0x00000000); + nvkm_mask(device, 0x002504, 0x00000001, 0x00000001); + /* Wait until the interrupt handler is finished */ + if (nvkm_msec(device, 2000, + if (!nvkm_rd32(device, 0x000100)) + break; + ) < 0) + return -EBUSY; + + if (fifo) + nvkm_fifo_pause(fifo, flags); + + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x002504) & 0x00000010) + break; + ) < 0) + return -EIO; + + if (nvkm_msec(device, 2000, + u32 tmp = nvkm_rd32(device, 0x00251c) & 0x0000003f; + if (tmp == 0x0000003f) + break; + ) < 0) + return -EIO; + + return 0; +} + +void +gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags) +{ + struct nvkm_device *device = clk->subdev.device; + struct nvkm_fifo *fifo = device->fifo; + + if (fifo && flags) + nvkm_fifo_start(fifo, flags); + + nvkm_mask(device, 0x002504, 0x00000001, 0x00000000); + nvkm_mask(device, 0x020060, 0x00070000, 0x00040000); +} + +static void +disable_clk_src(struct gt215_clk *clk, u32 src) +{ + struct nvkm_device *device = clk->base.subdev.device; + nvkm_mask(device, src, 0x00000100, 0x00000000); + nvkm_mask(device, src, 0x00000001, 0x00000000); +} + +static void +prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom) +{ + struct gt215_clk_info *info = &clk->eng[dom]; + struct nvkm_device *device = clk->base.subdev.device; + const u32 src0 = 0x004120 + (idx * 4); + const u32 src1 = 0x004160 + (idx * 4); + const u32 ctrl = pll + 0; + const u32 coef = pll + 4; + u32 bypass; + + if (info->pll) { + /* Always start from a non-PLL clock */ + bypass = nvkm_rd32(device, ctrl) & 0x00000008; + if (!bypass) { + nvkm_mask(device, src1, 0x00000101, 0x00000101); + nvkm_mask(device, ctrl, 0x00000008, 0x00000008); + udelay(20); + } + + nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk); + nvkm_wr32(device, coef, info->pll); + nvkm_mask(device, ctrl, 0x00000015, 0x00000015); + nvkm_mask(device, ctrl, 0x00000010, 0x00000000); + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, ctrl) & 0x00020000) + break; + ) < 0) { + nvkm_mask(device, ctrl, 0x00000010, 0x00000010); + nvkm_mask(device, src0, 0x00000101, 0x00000000); + return; + } + nvkm_mask(device, ctrl, 0x00000010, 0x00000010); + nvkm_mask(device, ctrl, 0x00000008, 0x00000000); + disable_clk_src(clk, src1); + } else { + nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk); + nvkm_mask(device, ctrl, 0x00000018, 0x00000018); + udelay(20); + nvkm_mask(device, ctrl, 0x00000001, 0x00000000); + disable_clk_src(clk, src0); + } +} + +static void +prog_clk(struct gt215_clk *clk, int idx, int dom) +{ + struct gt215_clk_info *info = &clk->eng[dom]; + struct nvkm_device *device = clk->base.subdev.device; + nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk); +} + +static void +prog_host(struct gt215_clk *clk) +{ + struct gt215_clk_info *info = &clk->eng[nv_clk_src_host]; + struct nvkm_device *device = clk->base.subdev.device; + u32 hsrc = (nvkm_rd32(device, 0xc040)); + + switch (info->host_out) { + case NVA3_HOST_277: + if ((hsrc & 0x30000000) == 0) { + nvkm_wr32(device, 0xc040, hsrc | 0x20000000); + disable_clk_src(clk, 0x4194); + } + break; + case NVA3_HOST_CLK: + prog_clk(clk, 0x1d, nv_clk_src_host); + if ((hsrc & 0x30000000) >= 0x20000000) { + nvkm_wr32(device, 0xc040, hsrc & ~0x30000000); + } + break; + default: + break; + } + + /* This seems to be a clock gating factor on idle, always set to 64 */ + nvkm_wr32(device, 0xc044, 0x3e); +} + +static void +prog_core(struct gt215_clk *clk, int dom) +{ + struct gt215_clk_info *info = &clk->eng[dom]; + struct nvkm_device *device = clk->base.subdev.device; + u32 fb_delay = nvkm_rd32(device, 0x10002c); + + if (fb_delay < info->fb_delay) + nvkm_wr32(device, 0x10002c, info->fb_delay); + + prog_pll(clk, 0x00, 0x004200, dom); + + if (fb_delay > info->fb_delay) + nvkm_wr32(device, 0x10002c, info->fb_delay); +} + +static int +gt215_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) +{ + struct gt215_clk *clk = gt215_clk(base); + struct gt215_clk_info *core = &clk->eng[nv_clk_src_core]; + int ret; + + if ((ret = calc_clk(clk, cstate, 0x10, 0x4200, nv_clk_src_core)) || + (ret = calc_clk(clk, cstate, 0x11, 0x4220, nv_clk_src_shader)) || + (ret = calc_clk(clk, cstate, 0x20, 0x0000, nv_clk_src_disp)) || + (ret = calc_clk(clk, cstate, 0x21, 0x0000, nv_clk_src_vdec)) || + (ret = calc_host(clk, cstate))) + return ret; + + /* XXX: Should be reading the highest bit in the VBIOS clock to decide + * whether to use a PLL or not... but using a PLL defeats the purpose */ + if (core->pll) { + ret = gt215_clk_info(&clk->base, 0x10, + cstate->domain[nv_clk_src_core_intm], + &clk->eng[nv_clk_src_core_intm]); + if (ret < 0) + return ret; + } + + return 0; +} + +static int +gt215_clk_prog(struct nvkm_clk *base) +{ + struct gt215_clk *clk = gt215_clk(base); + struct gt215_clk_info *core = &clk->eng[nv_clk_src_core]; + int ret = 0; + unsigned long flags; + unsigned long *f = &flags; + + ret = gt215_clk_pre(&clk->base, f); + if (ret) + goto out; + + if (core->pll) + prog_core(clk, nv_clk_src_core_intm); + + prog_core(clk, nv_clk_src_core); + prog_pll(clk, 0x01, 0x004220, nv_clk_src_shader); + prog_clk(clk, 0x20, nv_clk_src_disp); + prog_clk(clk, 0x21, nv_clk_src_vdec); + prog_host(clk); + +out: + if (ret == -EBUSY) + f = NULL; + + gt215_clk_post(&clk->base, f); + return ret; +} + +static void +gt215_clk_tidy(struct nvkm_clk *base) +{ +} + +static const struct nvkm_clk_func +gt215_clk = { + .read = gt215_clk_read, + .calc = gt215_clk_calc, + .prog = gt215_clk_prog, + .tidy = gt215_clk_tidy, + .domains = { + { nv_clk_src_crystal , 0xff }, + { nv_clk_src_core , 0x00, 0, "core", 1000 }, + { nv_clk_src_shader , 0x01, 0, "shader", 1000 }, + { nv_clk_src_mem , 0x02, 0, "memory", 1000 }, + { nv_clk_src_vdec , 0x03 }, + { nv_clk_src_disp , 0x04 }, + { nv_clk_src_host , 0x05 }, + { nv_clk_src_core_intm, 0x06 }, + { nv_clk_src_max } + } +}; + +int +gt215_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + struct gt215_clk *clk; + + if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL))) + return -ENOMEM; + *pclk = &clk->base; + + return nvkm_clk_ctor(>215_clk, device, type, inst, true, &clk->base); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h new file mode 100644 index 000000000..34754efbf --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_CLK_NVA3_H__ +#define __NVKM_CLK_NVA3_H__ +#include "priv.h" + +struct gt215_clk_info { + u32 clk; + u32 pll; + enum { + NVA3_HOST_277, + NVA3_HOST_CLK, + } host_out; + u32 fb_delay; +}; + +int gt215_pll_info(struct nvkm_clk *, int, u32, u32, struct gt215_clk_info *); +int gt215_clk_pre(struct nvkm_clk *, unsigned long *flags); +void gt215_clk_post(struct nvkm_clk *, unsigned long *flags); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c new file mode 100644 index 000000000..81f103f88 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c @@ -0,0 +1,422 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#define mcp77_clk(p) container_of((p), struct mcp77_clk, base) +#include "gt215.h" +#include "pll.h" + +#include +#include +#include + +struct mcp77_clk { + struct nvkm_clk base; + enum nv_clk_src csrc, ssrc, vsrc; + u32 cctrl, sctrl; + u32 ccoef, scoef; + u32 cpost, spost; + u32 vdiv; +}; + +static u32 +read_div(struct mcp77_clk *clk) +{ + struct nvkm_device *device = clk->base.subdev.device; + return nvkm_rd32(device, 0x004600); +} + +static u32 +read_pll(struct mcp77_clk *clk, u32 base) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 ctrl = nvkm_rd32(device, base + 0); + u32 coef = nvkm_rd32(device, base + 4); + u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href); + u32 post_div = 0; + u32 clock = 0; + int N1, M1; + + switch (base){ + case 0x4020: + post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16); + break; + case 0x4028: + post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16; + break; + default: + break; + } + + N1 = (coef & 0x0000ff00) >> 8; + M1 = (coef & 0x000000ff); + if ((ctrl & 0x80000000) && M1) { + clock = ref * N1 / M1; + clock = clock / post_div; + } + + return clock; +} + +static int +mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src) +{ + struct mcp77_clk *clk = mcp77_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + u32 mast = nvkm_rd32(device, 0x00c054); + u32 P = 0; + + switch (src) { + case nv_clk_src_crystal: + return device->crystal; + case nv_clk_src_href: + return 100000; /* PCIE reference clock */ + case nv_clk_src_hclkm4: + return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4; + case nv_clk_src_hclkm2d3: + return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3; + case nv_clk_src_host: + switch (mast & 0x000c0000) { + case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3); + case 0x00040000: break; + case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4); + case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk); + } + break; + case nv_clk_src_core: + P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16; + + switch (mast & 0x00000003) { + case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P; + case 0x00000001: return 0; + case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P; + case 0x00000003: return read_pll(clk, 0x004028) >> P; + } + break; + case nv_clk_src_cclk: + if ((mast & 0x03000000) != 0x03000000) + return nvkm_clk_read(&clk->base, nv_clk_src_core); + + if ((mast & 0x00000200) == 0x00000000) + return nvkm_clk_read(&clk->base, nv_clk_src_core); + + switch (mast & 0x00000c00) { + case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href); + case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4); + case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3); + default: return 0; + } + case nv_clk_src_shader: + P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16; + switch (mast & 0x00000030) { + case 0x00000000: + if (mast & 0x00000040) + return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P; + return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P; + case 0x00000010: break; + case 0x00000020: return read_pll(clk, 0x004028) >> P; + case 0x00000030: return read_pll(clk, 0x004020) >> P; + } + break; + case nv_clk_src_mem: + return 0; + case nv_clk_src_vdec: + P = (read_div(clk) & 0x00000700) >> 8; + + switch (mast & 0x00400000) { + case 0x00400000: + return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P; + default: + return 500000 >> P; + } + break; + default: + break; + } + + nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast); + return 0; +} + +static u32 +calc_pll(struct mcp77_clk *clk, u32 reg, + u32 clock, int *N, int *M, int *P) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvbios_pll pll; + int ret; + + ret = nvbios_pll_parse(subdev->device->bios, reg, &pll); + if (ret) + return 0; + + pll.vco2.max_freq = 0; + pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href); + if (!pll.refclk) + return 0; + + return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P); +} + +static inline u32 +calc_P(u32 src, u32 target, int *div) +{ + u32 clk0 = src, clk1 = src; + for (*div = 0; *div <= 7; (*div)++) { + if (clk0 <= target) { + clk1 = clk0 << (*div ? 1 : 0); + break; + } + clk0 >>= 1; + } + + if (target - clk0 <= clk1 - target) + return clk0; + (*div)--; + return clk1; +} + +static int +mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) +{ + struct mcp77_clk *clk = mcp77_clk(base); + const int shader = cstate->domain[nv_clk_src_shader]; + const int core = cstate->domain[nv_clk_src_core]; + const int vdec = cstate->domain[nv_clk_src_vdec]; + struct nvkm_subdev *subdev = &clk->base.subdev; + u32 out = 0, clock = 0; + int N, M, P1, P2 = 0; + int divs = 0; + + /* cclk: find suitable source, disable PLL if we can */ + if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4)) + out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs); + + /* Calculate clock * 2, so shader clock can use it too */ + clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1); + + if (abs(core - out) <= abs(core - (clock >> 1))) { + clk->csrc = nv_clk_src_hclkm4; + clk->cctrl = divs << 16; + } else { + /* NVCTRL is actually used _after_ NVPOST, and after what we + * call NVPLL. To make matters worse, NVPOST is an integer + * divider instead of a right-shift number. */ + if(P1 > 2) { + P2 = P1 - 2; + P1 = 2; + } + + clk->csrc = nv_clk_src_core; + clk->ccoef = (N << 8) | M; + + clk->cctrl = (P2 + 1) << 16; + clk->cpost = (1 << P1) << 16; + } + + /* sclk: nvpll + divisor, href or spll */ + out = 0; + if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) { + clk->ssrc = nv_clk_src_href; + } else { + clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1); + if (clk->csrc == nv_clk_src_core) + out = calc_P((core << 1), shader, &divs); + + if (abs(shader - out) <= + abs(shader - clock) && + (divs + P2) <= 7) { + clk->ssrc = nv_clk_src_core; + clk->sctrl = (divs + P2) << 16; + } else { + clk->ssrc = nv_clk_src_shader; + clk->scoef = (N << 8) | M; + clk->sctrl = P1 << 16; + } + } + + /* vclk */ + out = calc_P(core, vdec, &divs); + clock = calc_P(500000, vdec, &P1); + if(abs(vdec - out) <= abs(vdec - clock)) { + clk->vsrc = nv_clk_src_cclk; + clk->vdiv = divs << 16; + } else { + clk->vsrc = nv_clk_src_vdec; + clk->vdiv = P1 << 16; + } + + /* Print strategy! */ + nvkm_debug(subdev, "nvpll: %08x %08x %08x\n", + clk->ccoef, clk->cpost, clk->cctrl); + nvkm_debug(subdev, " spll: %08x %08x %08x\n", + clk->scoef, clk->spost, clk->sctrl); + nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv); + if (clk->csrc == nv_clk_src_hclkm4) + nvkm_debug(subdev, "core: hrefm4\n"); + else + nvkm_debug(subdev, "core: nvpll\n"); + + if (clk->ssrc == nv_clk_src_hclkm4) + nvkm_debug(subdev, "shader: hrefm4\n"); + else if (clk->ssrc == nv_clk_src_core) + nvkm_debug(subdev, "shader: nvpll\n"); + else + nvkm_debug(subdev, "shader: spll\n"); + + if (clk->vsrc == nv_clk_src_hclkm4) + nvkm_debug(subdev, "vdec: 500MHz\n"); + else + nvkm_debug(subdev, "vdec: core\n"); + + return 0; +} + +static int +mcp77_clk_prog(struct nvkm_clk *base) +{ + struct mcp77_clk *clk = mcp77_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + u32 pllmask = 0, mast; + unsigned long flags; + unsigned long *f = &flags; + int ret = 0; + + ret = gt215_clk_pre(&clk->base, f); + if (ret) + goto out; + + /* First switch to safe clocks: href */ + mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640); + mast &= ~0x00400e73; + mast |= 0x03000000; + + switch (clk->csrc) { + case nv_clk_src_hclkm4: + nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl); + mast |= 0x00000002; + break; + case nv_clk_src_core: + nvkm_wr32(device, 0x402c, clk->ccoef); + nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl); + nvkm_wr32(device, 0x4040, clk->cpost); + pllmask |= (0x3 << 8); + mast |= 0x00000003; + break; + default: + nvkm_warn(subdev, "Reclocking failed: unknown core clock\n"); + goto resume; + } + + switch (clk->ssrc) { + case nv_clk_src_href: + nvkm_mask(device, 0x4020, 0x00070000, 0x00000000); + /* mast |= 0x00000000; */ + break; + case nv_clk_src_core: + nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl); + mast |= 0x00000020; + break; + case nv_clk_src_shader: + nvkm_wr32(device, 0x4024, clk->scoef); + nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl); + nvkm_wr32(device, 0x4070, clk->spost); + pllmask |= (0x3 << 12); + mast |= 0x00000030; + break; + default: + nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n"); + goto resume; + } + + if (nvkm_msec(device, 2000, + u32 tmp = nvkm_rd32(device, 0x004080) & pllmask; + if (tmp == pllmask) + break; + ) < 0) + goto resume; + + switch (clk->vsrc) { + case nv_clk_src_cclk: + mast |= 0x00400000; + fallthrough; + default: + nvkm_wr32(device, 0x4600, clk->vdiv); + } + + nvkm_wr32(device, 0xc054, mast); + +resume: + /* Disable some PLLs and dividers when unused */ + if (clk->csrc != nv_clk_src_core) { + nvkm_wr32(device, 0x4040, 0x00000000); + nvkm_mask(device, 0x4028, 0x80000000, 0x00000000); + } + + if (clk->ssrc != nv_clk_src_shader) { + nvkm_wr32(device, 0x4070, 0x00000000); + nvkm_mask(device, 0x4020, 0x80000000, 0x00000000); + } + +out: + if (ret == -EBUSY) + f = NULL; + + gt215_clk_post(&clk->base, f); + return ret; +} + +static void +mcp77_clk_tidy(struct nvkm_clk *base) +{ +} + +static const struct nvkm_clk_func +mcp77_clk = { + .read = mcp77_clk_read, + .calc = mcp77_clk_calc, + .prog = mcp77_clk_prog, + .tidy = mcp77_clk_tidy, + .domains = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_href , 0xff }, + { nv_clk_src_core , 0xff, 0, "core", 1000 }, + { nv_clk_src_shader , 0xff, 0, "shader", 1000 }, + { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 }, + { nv_clk_src_max } + } +}; + +int +mcp77_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + struct mcp77_clk *clk; + + if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL))) + return -ENOMEM; + *pclk = &clk->base; + + return nvkm_clk_ctor(&mcp77_clk, device, type, inst, true, &clk->base); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c new file mode 100644 index 000000000..ca13598c2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c @@ -0,0 +1,84 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "priv.h" +#include "pll.h" + +#include +#include +#include + +int +nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info, + int clk, struct nvkm_pll_vals *pv) +{ + int N1, M1, N2, M2, P; + int ret = nv04_pll_calc(&clock->subdev, info, clk, &N1, &M1, &N2, &M2, &P); + if (ret) { + pv->refclk = info->refclk; + pv->N1 = N1; + pv->M1 = M1; + pv->N2 = N2; + pv->M2 = M2; + pv->log2P = P; + } + return ret; +} + +int +nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv) +{ + struct nvkm_device *device = clk->subdev.device; + struct nvkm_devinit *devinit = device->devinit; + int cv = device->bios->version.chip; + + if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 || + cv >= 0x40) { + if (reg1 > 0x405c) + setPLL_double_highregs(devinit, reg1, pv); + else + setPLL_double_lowregs(devinit, reg1, pv); + } else + setPLL_single(devinit, reg1, pv); + + return 0; +} + +static const struct nvkm_clk_func +nv04_clk = { + .domains = { + { nv_clk_src_max } + } +}; + +int +nv04_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + int ret = nvkm_clk_new_(&nv04_clk, device, type, inst, false, pclk); + if (ret == 0) { + (*pclk)->pll_calc = nv04_clk_pll_calc; + (*pclk)->pll_prog = nv04_clk_pll_prog; + } + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c new file mode 100644 index 000000000..7ddd8cecb --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c @@ -0,0 +1,233 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#define nv40_clk(p) container_of((p), struct nv40_clk, base) +#include "priv.h" +#include "pll.h" + +#include +#include + +struct nv40_clk { + struct nvkm_clk base; + u32 ctrl; + u32 npll_ctrl; + u32 npll_coef; + u32 spll; +}; + +static u32 +read_pll_1(struct nv40_clk *clk, u32 reg) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 ctrl = nvkm_rd32(device, reg + 0x00); + int P = (ctrl & 0x00070000) >> 16; + int N = (ctrl & 0x0000ff00) >> 8; + int M = (ctrl & 0x000000ff) >> 0; + u32 ref = 27000, khz = 0; + + if (ctrl & 0x80000000) + khz = ref * N / M; + + return khz >> P; +} + +static u32 +read_pll_2(struct nv40_clk *clk, u32 reg) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 ctrl = nvkm_rd32(device, reg + 0x00); + u32 coef = nvkm_rd32(device, reg + 0x04); + int N2 = (coef & 0xff000000) >> 24; + int M2 = (coef & 0x00ff0000) >> 16; + int N1 = (coef & 0x0000ff00) >> 8; + int M1 = (coef & 0x000000ff) >> 0; + int P = (ctrl & 0x00070000) >> 16; + u32 ref = 27000, khz = 0; + + if ((ctrl & 0x80000000) && M1) { + khz = ref * N1 / M1; + if ((ctrl & 0x40000100) == 0x40000000) { + if (M2) + khz = khz * N2 / M2; + else + khz = 0; + } + } + + return khz >> P; +} + +static u32 +read_clk(struct nv40_clk *clk, u32 src) +{ + switch (src) { + case 3: + return read_pll_2(clk, 0x004000); + case 2: + return read_pll_1(clk, 0x004008); + default: + break; + } + + return 0; +} + +static int +nv40_clk_read(struct nvkm_clk *base, enum nv_clk_src src) +{ + struct nv40_clk *clk = nv40_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + u32 mast = nvkm_rd32(device, 0x00c040); + + switch (src) { + case nv_clk_src_crystal: + return device->crystal; + case nv_clk_src_href: + return 100000; /*XXX: PCIE/AGP differ*/ + case nv_clk_src_core: + return read_clk(clk, (mast & 0x00000003) >> 0); + case nv_clk_src_shader: + return read_clk(clk, (mast & 0x00000030) >> 4); + case nv_clk_src_mem: + return read_pll_2(clk, 0x4020); + default: + break; + } + + nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast); + return -EINVAL; +} + +static int +nv40_clk_calc_pll(struct nv40_clk *clk, u32 reg, u32 khz, + int *N1, int *M1, int *N2, int *M2, int *log2P) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvbios_pll pll; + int ret; + + ret = nvbios_pll_parse(subdev->device->bios, reg, &pll); + if (ret) + return ret; + + if (khz < pll.vco1.max_freq) + pll.vco2.max_freq = 0; + + ret = nv04_pll_calc(subdev, &pll, khz, N1, M1, N2, M2, log2P); + if (ret == 0) + return -ERANGE; + + return ret; +} + +static int +nv40_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) +{ + struct nv40_clk *clk = nv40_clk(base); + int gclk = cstate->domain[nv_clk_src_core]; + int sclk = cstate->domain[nv_clk_src_shader]; + int N1, M1, N2, M2, log2P; + int ret; + + /* core/geometric clock */ + ret = nv40_clk_calc_pll(clk, 0x004000, gclk, + &N1, &M1, &N2, &M2, &log2P); + if (ret < 0) + return ret; + + if (N2 == M2) { + clk->npll_ctrl = 0x80000100 | (log2P << 16); + clk->npll_coef = (N1 << 8) | M1; + } else { + clk->npll_ctrl = 0xc0000000 | (log2P << 16); + clk->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1; + } + + /* use the second pll for shader/rop clock, if it differs from core */ + if (sclk && sclk != gclk) { + ret = nv40_clk_calc_pll(clk, 0x004008, sclk, + &N1, &M1, NULL, NULL, &log2P); + if (ret < 0) + return ret; + + clk->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1; + clk->ctrl = 0x00000223; + } else { + clk->spll = 0x00000000; + clk->ctrl = 0x00000333; + } + + return 0; +} + +static int +nv40_clk_prog(struct nvkm_clk *base) +{ + struct nv40_clk *clk = nv40_clk(base); + struct nvkm_device *device = clk->base.subdev.device; + nvkm_mask(device, 0x00c040, 0x00000333, 0x00000000); + nvkm_wr32(device, 0x004004, clk->npll_coef); + nvkm_mask(device, 0x004000, 0xc0070100, clk->npll_ctrl); + nvkm_mask(device, 0x004008, 0xc007ffff, clk->spll); + mdelay(5); + nvkm_mask(device, 0x00c040, 0x00000333, clk->ctrl); + return 0; +} + +static void +nv40_clk_tidy(struct nvkm_clk *obj) +{ +} + +static const struct nvkm_clk_func +nv40_clk = { + .read = nv40_clk_read, + .calc = nv40_clk_calc, + .prog = nv40_clk_prog, + .tidy = nv40_clk_tidy, + .domains = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_href , 0xff }, + { nv_clk_src_core , 0xff, 0, "core", 1000 }, + { nv_clk_src_shader , 0xff, 0, "shader", 1000 }, + { nv_clk_src_mem , 0xff, 0, "memory", 1000 }, + { nv_clk_src_max } + } +}; + +int +nv40_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + struct nv40_clk *clk; + + if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL))) + return -ENOMEM; + clk->base.pll_calc = nv04_clk_pll_calc; + clk->base.pll_prog = nv04_clk_pll_prog; + *pclk = &clk->base; + + return nvkm_clk_ctor(&nv40_clk, device, type, inst, true, &clk->base); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c new file mode 100644 index 000000000..e1d31c62f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c @@ -0,0 +1,563 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "nv50.h" +#include "pll.h" +#include "seq.h" + +#include +#include + +static u32 +read_div(struct nv50_clk *clk) +{ + struct nvkm_device *device = clk->base.subdev.device; + switch (device->chipset) { + case 0x50: /* it exists, but only has bit 31, not the dividers.. */ + case 0x84: + case 0x86: + case 0x98: + case 0xa0: + return nvkm_rd32(device, 0x004700); + case 0x92: + case 0x94: + case 0x96: + return nvkm_rd32(device, 0x004800); + default: + return 0x00000000; + } +} + +static u32 +read_pll_src(struct nv50_clk *clk, u32 base) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + u32 coef, ref = nvkm_clk_read(&clk->base, nv_clk_src_crystal); + u32 rsel = nvkm_rd32(device, 0x00e18c); + int P, N, M, id; + + switch (device->chipset) { + case 0x50: + case 0xa0: + switch (base) { + case 0x4020: + case 0x4028: id = !!(rsel & 0x00000004); break; + case 0x4008: id = !!(rsel & 0x00000008); break; + case 0x4030: id = 0; break; + default: + nvkm_error(subdev, "ref: bad pll %06x\n", base); + return 0; + } + + coef = nvkm_rd32(device, 0x00e81c + (id * 0x0c)); + ref *= (coef & 0x01000000) ? 2 : 4; + P = (coef & 0x00070000) >> 16; + N = ((coef & 0x0000ff00) >> 8) + 1; + M = ((coef & 0x000000ff) >> 0) + 1; + break; + case 0x84: + case 0x86: + case 0x92: + coef = nvkm_rd32(device, 0x00e81c); + P = (coef & 0x00070000) >> 16; + N = (coef & 0x0000ff00) >> 8; + M = (coef & 0x000000ff) >> 0; + break; + case 0x94: + case 0x96: + case 0x98: + rsel = nvkm_rd32(device, 0x00c050); + switch (base) { + case 0x4020: rsel = (rsel & 0x00000003) >> 0; break; + case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break; + case 0x4028: rsel = (rsel & 0x00001800) >> 11; break; + case 0x4030: rsel = 3; break; + default: + nvkm_error(subdev, "ref: bad pll %06x\n", base); + return 0; + } + + switch (rsel) { + case 0: id = 1; break; + case 1: return nvkm_clk_read(&clk->base, nv_clk_src_crystal); + case 2: return nvkm_clk_read(&clk->base, nv_clk_src_href); + case 3: id = 0; break; + } + + coef = nvkm_rd32(device, 0x00e81c + (id * 0x28)); + P = (nvkm_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7; + P += (coef & 0x00070000) >> 16; + N = (coef & 0x0000ff00) >> 8; + M = (coef & 0x000000ff) >> 0; + break; + default: + BUG(); + } + + if (M) + return (ref * N / M) >> P; + + return 0; +} + +static u32 +read_pll_ref(struct nv50_clk *clk, u32 base) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + u32 src, mast = nvkm_rd32(device, 0x00c040); + + switch (base) { + case 0x004028: + src = !!(mast & 0x00200000); + break; + case 0x004020: + src = !!(mast & 0x00400000); + break; + case 0x004008: + src = !!(mast & 0x00010000); + break; + case 0x004030: + src = !!(mast & 0x02000000); + break; + case 0x00e810: + return nvkm_clk_read(&clk->base, nv_clk_src_crystal); + default: + nvkm_error(subdev, "bad pll %06x\n", base); + return 0; + } + + if (src) + return nvkm_clk_read(&clk->base, nv_clk_src_href); + + return read_pll_src(clk, base); +} + +static u32 +read_pll(struct nv50_clk *clk, u32 base) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 mast = nvkm_rd32(device, 0x00c040); + u32 ctrl = nvkm_rd32(device, base + 0); + u32 coef = nvkm_rd32(device, base + 4); + u32 ref = read_pll_ref(clk, base); + u32 freq = 0; + int N1, N2, M1, M2; + + if (base == 0x004028 && (mast & 0x00100000)) { + /* wtf, appears to only disable post-divider on gt200 */ + if (device->chipset != 0xa0) + return nvkm_clk_read(&clk->base, nv_clk_src_dom6); + } + + N2 = (coef & 0xff000000) >> 24; + M2 = (coef & 0x00ff0000) >> 16; + N1 = (coef & 0x0000ff00) >> 8; + M1 = (coef & 0x000000ff); + if ((ctrl & 0x80000000) && M1) { + freq = ref * N1 / M1; + if ((ctrl & 0x40000100) == 0x40000000) { + if (M2) + freq = freq * N2 / M2; + else + freq = 0; + } + } + + return freq; +} + +int +nv50_clk_read(struct nvkm_clk *base, enum nv_clk_src src) +{ + struct nv50_clk *clk = nv50_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + u32 mast = nvkm_rd32(device, 0x00c040); + u32 P = 0; + + switch (src) { + case nv_clk_src_crystal: + return device->crystal; + case nv_clk_src_href: + return 100000; /* PCIE reference clock */ + case nv_clk_src_hclk: + return div_u64((u64)nvkm_clk_read(&clk->base, nv_clk_src_href) * 27778, 10000); + case nv_clk_src_hclkm3: + return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3; + case nv_clk_src_hclkm3d2: + return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3 / 2; + case nv_clk_src_host: + switch (mast & 0x30000000) { + case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href); + case 0x10000000: break; + case 0x20000000: /* !0x50 */ + case 0x30000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk); + } + break; + case nv_clk_src_core: + if (!(mast & 0x00100000)) + P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16; + switch (mast & 0x00000003) { + case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P; + case 0x00000001: return nvkm_clk_read(&clk->base, nv_clk_src_dom6); + case 0x00000002: return read_pll(clk, 0x004020) >> P; + case 0x00000003: return read_pll(clk, 0x004028) >> P; + } + break; + case nv_clk_src_shader: + P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16; + switch (mast & 0x00000030) { + case 0x00000000: + if (mast & 0x00000080) + return nvkm_clk_read(&clk->base, nv_clk_src_host) >> P; + return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P; + case 0x00000010: break; + case 0x00000020: return read_pll(clk, 0x004028) >> P; + case 0x00000030: return read_pll(clk, 0x004020) >> P; + } + break; + case nv_clk_src_mem: + P = (nvkm_rd32(device, 0x004008) & 0x00070000) >> 16; + if (nvkm_rd32(device, 0x004008) & 0x00000200) { + switch (mast & 0x0000c000) { + case 0x00000000: + return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P; + case 0x00008000: + case 0x0000c000: + return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P; + } + } else { + return read_pll(clk, 0x004008) >> P; + } + break; + case nv_clk_src_vdec: + P = (read_div(clk) & 0x00000700) >> 8; + switch (device->chipset) { + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0xa0: + switch (mast & 0x00000c00) { + case 0x00000000: + if (device->chipset == 0xa0) /* wtf?? */ + return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P; + return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P; + case 0x00000400: + return 0; + case 0x00000800: + if (mast & 0x01000000) + return read_pll(clk, 0x004028) >> P; + return read_pll(clk, 0x004030) >> P; + case 0x00000c00: + return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P; + } + break; + case 0x98: + switch (mast & 0x00000c00) { + case 0x00000000: + return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P; + case 0x00000400: + return 0; + case 0x00000800: + return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2) >> P; + case 0x00000c00: + return nvkm_clk_read(&clk->base, nv_clk_src_mem) >> P; + } + break; + } + break; + case nv_clk_src_dom6: + switch (device->chipset) { + case 0x50: + case 0xa0: + return read_pll(clk, 0x00e810) >> 2; + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0x98: + P = (read_div(clk) & 0x00000007) >> 0; + switch (mast & 0x0c000000) { + case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href); + case 0x04000000: break; + case 0x08000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk); + case 0x0c000000: + return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3) >> P; + } + break; + default: + break; + } + break; + default: + break; + } + + nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast); + return -EINVAL; +} + +static u32 +calc_pll(struct nv50_clk *clk, u32 reg, u32 idx, int *N, int *M, int *P) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvbios_pll pll; + int ret; + + ret = nvbios_pll_parse(subdev->device->bios, reg, &pll); + if (ret) + return 0; + + pll.vco2.max_freq = 0; + pll.refclk = read_pll_ref(clk, reg); + if (!pll.refclk) + return 0; + + return nv04_pll_calc(subdev, &pll, idx, N, M, NULL, NULL, P); +} + +static inline u32 +calc_div(u32 src, u32 target, int *div) +{ + u32 clk0 = src, clk1 = src; + for (*div = 0; *div <= 7; (*div)++) { + if (clk0 <= target) { + clk1 = clk0 << (*div ? 1 : 0); + break; + } + clk0 >>= 1; + } + + if (target - clk0 <= clk1 - target) + return clk0; + (*div)--; + return clk1; +} + +static inline u32 +clk_same(u32 a, u32 b) +{ + return ((a / 1000) == (b / 1000)); +} + +int +nv50_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) +{ + struct nv50_clk *clk = nv50_clk(base); + struct nv50_clk_hwsq *hwsq = &clk->hwsq; + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + const int shader = cstate->domain[nv_clk_src_shader]; + const int core = cstate->domain[nv_clk_src_core]; + const int vdec = cstate->domain[nv_clk_src_vdec]; + const int dom6 = cstate->domain[nv_clk_src_dom6]; + u32 mastm = 0, mastv = 0; + u32 divsm = 0, divsv = 0; + int N, M, P1, P2; + int freq, out; + + /* prepare a hwsq script from which we'll perform the reclock */ + out = clk_init(hwsq, subdev); + if (out) + return out; + + clk_wr32(hwsq, fifo, 0x00000001); /* block fifo */ + clk_nsec(hwsq, 8000); + clk_setf(hwsq, 0x10, 0x00); /* disable fb */ + clk_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */ + + /* vdec: avoid modifying xpll until we know exactly how the other + * clock domains work, i suspect at least some of them can also be + * tied to xpll... + */ + if (vdec) { + /* see how close we can get using nvclk as a source */ + freq = calc_div(core, vdec, &P1); + + /* see how close we can get using xpll/hclk as a source */ + if (device->chipset != 0x98) + out = read_pll(clk, 0x004030); + else + out = nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2); + out = calc_div(out, vdec, &P2); + + /* select whichever gets us closest */ + if (abs(vdec - freq) <= abs(vdec - out)) { + if (device->chipset != 0x98) + mastv |= 0x00000c00; + divsv |= P1 << 8; + } else { + mastv |= 0x00000800; + divsv |= P2 << 8; + } + + mastm |= 0x00000c00; + divsm |= 0x00000700; + } + + /* dom6: nfi what this is, but we're limited to various combinations + * of the host clock frequency + */ + if (dom6) { + if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_href))) { + mastv |= 0x00000000; + } else + if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_hclk))) { + mastv |= 0x08000000; + } else { + freq = nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3; + calc_div(freq, dom6, &P1); + + mastv |= 0x0c000000; + divsv |= P1; + } + + mastm |= 0x0c000000; + divsm |= 0x00000007; + } + + /* vdec/dom6: switch to "safe" clocks temporarily, update dividers + * and then switch to target clocks + */ + clk_mask(hwsq, mast, mastm, 0x00000000); + clk_mask(hwsq, divs, divsm, divsv); + clk_mask(hwsq, mast, mastm, mastv); + + /* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6, + * sclk to hclk) before reprogramming + */ + if (device->chipset < 0x92) + clk_mask(hwsq, mast, 0x001000b0, 0x00100080); + else + clk_mask(hwsq, mast, 0x000000b3, 0x00000081); + + /* core: for the moment at least, always use nvpll */ + freq = calc_pll(clk, 0x4028, core, &N, &M, &P1); + if (freq == 0) + return -ERANGE; + + clk_mask(hwsq, nvpll[0], 0xc03f0100, + 0x80000000 | (P1 << 19) | (P1 << 16)); + clk_mask(hwsq, nvpll[1], 0x0000ffff, (N << 8) | M); + + /* shader: tie to nvclk if possible, otherwise use spll. have to be + * very careful that the shader clock is at least twice the core, or + * some chipsets will be very unhappy. i expect most or all of these + * cases will be handled by tying to nvclk, but it's possible there's + * corners + */ + if (P1-- && shader == (core << 1)) { + clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16)); + clk_mask(hwsq, mast, 0x00100033, 0x00000023); + } else { + freq = calc_pll(clk, 0x4020, shader, &N, &M, &P1); + if (freq == 0) + return -ERANGE; + + clk_mask(hwsq, spll[0], 0xc03f0100, + 0x80000000 | (P1 << 19) | (P1 << 16)); + clk_mask(hwsq, spll[1], 0x0000ffff, (N << 8) | M); + clk_mask(hwsq, mast, 0x00100033, 0x00000033); + } + + /* restore normal operation */ + clk_setf(hwsq, 0x10, 0x01); /* enable fb */ + clk_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */ + clk_wr32(hwsq, fifo, 0x00000000); /* un-block fifo */ + return 0; +} + +int +nv50_clk_prog(struct nvkm_clk *base) +{ + struct nv50_clk *clk = nv50_clk(base); + return clk_exec(&clk->hwsq, true); +} + +void +nv50_clk_tidy(struct nvkm_clk *base) +{ + struct nv50_clk *clk = nv50_clk(base); + clk_exec(&clk->hwsq, false); +} + +int +nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk) +{ + struct nv50_clk *clk; + int ret; + + if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL))) + return -ENOMEM; + ret = nvkm_clk_ctor(func, device, type, inst, allow_reclock, &clk->base); + *pclk = &clk->base; + if (ret) + return ret; + + clk->hwsq.r_fifo = hwsq_reg(0x002504); + clk->hwsq.r_spll[0] = hwsq_reg(0x004020); + clk->hwsq.r_spll[1] = hwsq_reg(0x004024); + clk->hwsq.r_nvpll[0] = hwsq_reg(0x004028); + clk->hwsq.r_nvpll[1] = hwsq_reg(0x00402c); + switch (device->chipset) { + case 0x92: + case 0x94: + case 0x96: + clk->hwsq.r_divs = hwsq_reg(0x004800); + break; + default: + clk->hwsq.r_divs = hwsq_reg(0x004700); + break; + } + clk->hwsq.r_mast = hwsq_reg(0x00c040); + return 0; +} + +static const struct nvkm_clk_func +nv50_clk = { + .read = nv50_clk_read, + .calc = nv50_clk_calc, + .prog = nv50_clk_prog, + .tidy = nv50_clk_tidy, + .domains = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_href , 0xff }, + { nv_clk_src_core , 0xff, 0, "core", 1000 }, + { nv_clk_src_shader , 0xff, 0, "shader", 1000 }, + { nv_clk_src_mem , 0xff, 0, "memory", 1000 }, + { nv_clk_src_max } + } +}; + +int +nv50_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + return nv50_clk_new_(&nv50_clk, device, type, inst, false, pclk); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h new file mode 100644 index 000000000..5b4cb7e5c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NV50_CLK_H__ +#define __NV50_CLK_H__ +#define nv50_clk(p) container_of((p), struct nv50_clk, base) +#include "priv.h" + +#include + +struct nv50_clk_hwsq { + struct hwsq base; + struct hwsq_reg r_fifo; + struct hwsq_reg r_spll[2]; + struct hwsq_reg r_nvpll[2]; + struct hwsq_reg r_divs; + struct hwsq_reg r_mast; +}; + +struct nv50_clk { + struct nvkm_clk base; + struct nv50_clk_hwsq hwsq; +}; + +int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + bool, struct nvkm_clk **); +int nv50_clk_read(struct nvkm_clk *, enum nv_clk_src); +int nv50_clk_calc(struct nvkm_clk *, struct nvkm_cstate *); +int nv50_clk_prog(struct nvkm_clk *); +void nv50_clk_tidy(struct nvkm_clk *); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h new file mode 100644 index 000000000..631907564 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_PLL_H__ +#define __NVKM_PLL_H__ +#include +struct nvkm_subdev; +struct nvbios_pll; + +int nv04_pll_calc(struct nvkm_subdev *, struct nvbios_pll *, u32 freq, + int *N1, int *M1, int *N2, int *M2, int *P); +int gt215_pll_calc(struct nvkm_subdev *, struct nvbios_pll *, u32 freq, + int *N, int *fN, int *M, int *P); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c new file mode 100644 index 000000000..c6fccd600 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c @@ -0,0 +1,87 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "pll.h" + +#include +#include + +int +gt215_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info, + u32 freq, int *pN, int *pfN, int *pM, int *P) +{ + u32 best_err = ~0, err; + int M, lM, hM, N, fN; + + *P = info->vco1.max_freq / freq; + if (*P > info->max_p) + *P = info->max_p; + if (*P < info->min_p) + *P = info->min_p; + + lM = (info->refclk + info->vco1.max_inputfreq) / info->vco1.max_inputfreq; + lM = max(lM, (int)info->vco1.min_m); + hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq; + hM = min(hM, (int)info->vco1.max_m); + lM = min(lM, hM); + + for (M = lM; M <= hM; M++) { + u32 tmp = freq * *P * M; + N = tmp / info->refclk; + fN = tmp % info->refclk; + + if (!pfN) { + if (fN >= info->refclk / 2) + N++; + } else { + if (fN < info->refclk / 2) + N--; + fN = tmp - (N * info->refclk); + } + + if (N < info->vco1.min_n) + continue; + if (N > info->vco1.max_n) + break; + + err = abs(freq - (info->refclk * N / M / *P)); + if (err < best_err) { + best_err = err; + *pN = N; + *pM = M; + } + + if (pfN) { + *pfN = ((fN << 13) + info->refclk / 2) / info->refclk; + *pfN = (*pfN - 4096) & 0xffff; + return freq; + } + } + + if (unlikely(best_err == ~0)) { + nvkm_error(subdev, "unable to find matching pll values\n"); + return -EINVAL; + } + + return info->refclk * *pN / *pM / *P; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c new file mode 100644 index 000000000..5ad67879e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c @@ -0,0 +1,245 @@ +/* + * Copyright 1993-2003 NVIDIA, Corporation + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "pll.h" + +#include +#include + +static int +getMNP_single(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk, + int *pN, int *pM, int *pP) +{ + /* Find M, N and P for a single stage PLL + * + * Note that some bioses (NV3x) have lookup tables of precomputed MNP + * values, but we're too lazy to use those atm + * + * "clk" parameter in kHz + * returns calculated clock + */ + struct nvkm_bios *bios = subdev->device->bios; + int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq; + int minM = info->vco1.min_m, maxM = info->vco1.max_m; + int minN = info->vco1.min_n, maxN = info->vco1.max_n; + int minU = info->vco1.min_inputfreq; + int maxU = info->vco1.max_inputfreq; + int minP = info->min_p; + int maxP = info->max_p_usable; + int crystal = info->refclk; + int M, N, thisP, P; + int clkP, calcclk; + int delta, bestdelta = INT_MAX; + int bestclk = 0; + + /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */ + /* possibly correlated with introduction of 27MHz crystal */ + if (bios->version.major < 0x60) { + int cv = bios->version.chip; + if (cv < 0x17 || cv == 0x1a || cv == 0x20) { + if (clk > 250000) + maxM = 6; + if (clk > 340000) + maxM = 2; + } else if (cv < 0x40) { + if (clk > 150000) + maxM = 6; + if (clk > 200000) + maxM = 4; + if (clk > 340000) + maxM = 2; + } + } + + P = 1 << maxP; + if ((clk * P) < minvco) { + minvco = clk * maxP; + maxvco = minvco * 2; + } + + if (clk + clk/200 > maxvco) /* +0.5% */ + maxvco = clk + clk/200; + + /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */ + for (thisP = minP; thisP <= maxP; thisP++) { + P = 1 << thisP; + clkP = clk * P; + + if (clkP < minvco) + continue; + if (clkP > maxvco) + return bestclk; + + for (M = minM; M <= maxM; M++) { + if (crystal/M < minU) + return bestclk; + if (crystal/M > maxU) + continue; + + /* add crystal/2 to round better */ + N = (clkP * M + crystal/2) / crystal; + + if (N < minN) + continue; + if (N > maxN) + break; + + /* more rounding additions */ + calcclk = ((N * crystal + P/2) / P + M/2) / M; + delta = abs(calcclk - clk); + /* we do an exhaustive search rather than terminating + * on an optimality condition... + */ + if (delta < bestdelta) { + bestdelta = delta; + bestclk = calcclk; + *pN = N; + *pM = M; + *pP = thisP; + if (delta == 0) /* except this one */ + return bestclk; + } + } + } + + return bestclk; +} + +static int +getMNP_double(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk, + int *pN1, int *pM1, int *pN2, int *pM2, int *pP) +{ + /* Find M, N and P for a two stage PLL + * + * Note that some bioses (NV30+) have lookup tables of precomputed MNP + * values, but we're too lazy to use those atm + * + * "clk" parameter in kHz + * returns calculated clock + */ + int chip_version = subdev->device->bios->version.chip; + int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq; + int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq; + int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq; + int maxU1 = info->vco1.max_inputfreq, maxU2 = info->vco2.max_inputfreq; + int minM1 = info->vco1.min_m, maxM1 = info->vco1.max_m; + int minN1 = info->vco1.min_n, maxN1 = info->vco1.max_n; + int minM2 = info->vco2.min_m, maxM2 = info->vco2.max_m; + int minN2 = info->vco2.min_n, maxN2 = info->vco2.max_n; + int maxlog2P = info->max_p_usable; + int crystal = info->refclk; + bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2); + int M1, N1, M2, N2, log2P; + int clkP, calcclk1, calcclk2, calcclkout; + int delta, bestdelta = INT_MAX; + int bestclk = 0; + + int vco2 = (maxvco2 - maxvco2/200) / 2; + for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++) + ; + clkP = clk << log2P; + + if (maxvco2 < clk + clk/200) /* +0.5% */ + maxvco2 = clk + clk/200; + + for (M1 = minM1; M1 <= maxM1; M1++) { + if (crystal/M1 < minU1) + return bestclk; + if (crystal/M1 > maxU1) + continue; + + for (N1 = minN1; N1 <= maxN1; N1++) { + calcclk1 = crystal * N1 / M1; + if (calcclk1 < minvco1) + continue; + if (calcclk1 > maxvco1) + break; + + for (M2 = minM2; M2 <= maxM2; M2++) { + if (calcclk1/M2 < minU2) + break; + if (calcclk1/M2 > maxU2) + continue; + + /* add calcclk1/2 to round better */ + N2 = (clkP * M2 + calcclk1/2) / calcclk1; + if (N2 < minN2) + continue; + if (N2 > maxN2) + break; + + if (!fixedgain2) { + if (chip_version < 0x60) + if (N2/M2 < 4 || N2/M2 > 10) + continue; + + calcclk2 = calcclk1 * N2 / M2; + if (calcclk2 < minvco2) + break; + if (calcclk2 > maxvco2) + continue; + } else + calcclk2 = calcclk1; + + calcclkout = calcclk2 >> log2P; + delta = abs(calcclkout - clk); + /* we do an exhaustive search rather than terminating + * on an optimality condition... + */ + if (delta < bestdelta) { + bestdelta = delta; + bestclk = calcclkout; + *pN1 = N1; + *pM1 = M1; + *pN2 = N2; + *pM2 = M2; + *pP = log2P; + if (delta == 0) /* except this one */ + return bestclk; + } + } + } + } + + return bestclk; +} + +int +nv04_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info, u32 freq, + int *N1, int *M1, int *N2, int *M2, int *P) +{ + int ret; + + if (!info->vco2.max_freq || !N2) { + ret = getMNP_single(subdev, info, freq, N1, M1, P); + if (N2) { + *N2 = 1; + *M2 = 1; + } + } else { + ret = getMNP_double(subdev, info, freq, N1, M1, N2, M2, P); + } + + if (!ret) + nvkm_error(subdev, "unable to compute acceptable pll values\n"); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h new file mode 100644 index 000000000..810cc572c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_CLK_PRIV_H__ +#define __NVKM_CLK_PRIV_H__ +#define nvkm_clk(p) container_of((p), struct nvkm_clk, subdev) +#include + +struct nvkm_clk_func { + int (*init)(struct nvkm_clk *); + void (*fini)(struct nvkm_clk *); + int (*read)(struct nvkm_clk *, enum nv_clk_src); + int (*calc)(struct nvkm_clk *, struct nvkm_cstate *); + int (*prog)(struct nvkm_clk *); + void (*tidy)(struct nvkm_clk *); + struct nvkm_pstate *pstates; + int nr_pstates; + struct nvkm_domain domains[]; +}; + +int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + bool allow_reclock, struct nvkm_clk *); +int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + bool allow_reclock, struct nvkm_clk **); + +int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk, + struct nvkm_pll_vals *); +int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h new file mode 100644 index 000000000..e4b362d34 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_CLK_SEQ_H__ +#define __NVKM_CLK_SEQ_H__ +#include + +#define clk_init(s,p) hwsq_init(&(s)->base, (p)) +#define clk_exec(s,e) hwsq_exec(&(s)->base, (e)) +#define clk_have(s,r) ((s)->r_##r.addr != 0x000000) +#define clk_rd32(s,r) hwsq_rd32(&(s)->base, &(s)->r_##r) +#define clk_wr32(s,r,d) hwsq_wr32(&(s)->base, &(s)->r_##r, (d)) +#define clk_mask(s,r,m,d) hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d)) +#define clk_setf(s,f,d) hwsq_setf(&(s)->base, (f), (d)) +#define clk_wait(s,f,d) hwsq_wait(&(s)->base, (f), (d)) +#define clk_nsec(s,n) hwsq_nsec(&(s)->base, (n)) +#endif -- cgit v1.2.3