summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c438
1 files changed, 201 insertions, 237 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 89550d3df..ced2e1bdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -56,6 +56,7 @@
#include "umc_v6_1.h"
#include "umc_v6_0.h"
#include "umc_v6_7.h"
+#include "umc_v12_0.h"
#include "hdp_v4_0.h"
#include "mca_v3_0.h"
@@ -64,8 +65,6 @@
#include "amdgpu_ras.h"
#include "amdgpu_xgmi.h"
-#include "amdgpu_reset.h"
-
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
@@ -554,6 +553,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_vmhub *hub;
const char *mmhub_cid;
const char *hub_name;
+ unsigned int vmhub;
u64 addr;
uint32_t cam_index = 0;
int ret, xcc_id = 0;
@@ -566,10 +566,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
hub_name = "mmhub0";
- hub = &adev->vmhub[AMDGPU_MMHUB0(node_id / 4)];
+ vmhub = AMDGPU_MMHUB0(node_id / 4);
} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
hub_name = "mmhub1";
- hub = &adev->vmhub[AMDGPU_MMHUB1(0)];
+ vmhub = AMDGPU_MMHUB1(0);
} else {
hub_name = "gfxhub0";
if (adev->gfx.funcs->ih_node_to_logical_xcc) {
@@ -578,8 +578,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
if (xcc_id < 0)
xcc_id = 0;
}
- hub = &adev->vmhub[xcc_id];
+ vmhub = xcc_id;
}
+ hub = &adev->vmhub[vmhub];
if (retry_fault) {
if (adev->irq.retry_cam_enabled) {
@@ -625,7 +626,6 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
if (!printk_ratelimit())
return 0;
-
memset(&task_info, 0, sizeof(struct amdgpu_task_info));
amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
@@ -639,7 +639,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr, entry->client_id,
soc15_ih_clientid_name[entry->client_id]);
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
@@ -653,7 +653,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
* the new fast GRBM interface.
*/
if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
- (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
+ (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
@@ -661,6 +661,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
+ amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
+
dev_err(adev->dev,
"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
@@ -670,7 +672,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
gfxhub_client_ids[cid],
cid);
} else {
- switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(9, 0, 0):
mmhub_cid = mmhub_client_ids_vega10[cid][rw];
break;
@@ -735,7 +737,8 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
if (!amdgpu_sriov_vf(adev) &&
- !adev->gmc.xgmi.connected_to_cpu) {
+ !adev->gmc.xgmi.connected_to_cpu &&
+ !adev->gmc.is_app_apu) {
adev->gmc.ecc_irq.num_types = 1;
adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
}
@@ -770,8 +773,8 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
- adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
return false;
return ((vmhub == AMDGPU_MMHUB0(0) ||
@@ -814,45 +817,32 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type)
{
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
+ u32 j, inv_req, tmp, sem, req, ack, inst;
const unsigned int eng = 17;
- u32 j, inv_req, inv_req2, tmp;
struct amdgpu_vmhub *hub;
BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
hub = &adev->vmhub[vmhub];
- if (adev->gmc.xgmi.num_physical_nodes &&
- adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) {
- /* Vega20+XGMI caches PTEs in TC and TLB. Add a
- * heavy-weight TLB flush (type 2), which flushes
- * both. Due to a race condition with concurrent
- * memory accesses using the same TLB cache line, we
- * still need a second TLB flush after this.
- */
- inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
- inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
- } else if (flush_type == 2 &&
- adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) &&
- adev->rev_id == 0) {
- inv_req = gmc_v9_0_get_invalidate_req(vmid, 0);
- inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
- } else {
- inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
- inv_req2 = 0;
- }
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
+ req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
+ ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* This is necessary for a HW workaround under SRIOV as well
* as GFXOFF under bare metal
*/
- if (adev->gfx.kiq[0].ring.sched.ready &&
- (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
- down_read_trylock(&adev->reset_domain->sem)) {
+ if (vmhub >= AMDGPU_MMHUB0(0))
+ inst = GET_INST(GC, 0);
+ else
+ inst = vmhub;
+ if (adev->gfx.kiq[inst].ring.sched.ready &&
+ (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
- 1 << vmid);
- up_read(&adev->reset_domain->sem);
+ 1 << vmid, inst);
return;
}
@@ -870,9 +860,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
for (j = 0; j < adev->usec_timeout; j++) {
/* a read return value of 1 means semaphore acquire */
if (vmhub >= AMDGPU_MMHUB0(0))
- tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
+ tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, inst);
else
- tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
+ tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, inst);
if (tmp & 0x1)
break;
udelay(1);
@@ -882,35 +872,29 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
- do {
- if (vmhub >= AMDGPU_MMHUB0(0))
- WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
- else
- WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
-
- /*
- * Issue a dummy read to wait for the ACK register to
- * be cleared to avoid a false ACK due to the new fast
- * GRBM interface.
- */
- if ((vmhub == AMDGPU_GFXHUB(0)) &&
- (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
- RREG32_NO_KIQ(hub->vm_inv_eng0_req +
- hub->eng_distance * eng);
+ if (vmhub >= AMDGPU_MMHUB0(0))
+ WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, inst);
+ else
+ WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, inst);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (vmhub >= AMDGPU_MMHUB0(0))
- tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
- else
- tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
- if (tmp & (1 << vmid))
- break;
- udelay(1);
- }
+ /*
+ * Issue a dummy read to wait for the ACK register to
+ * be cleared to avoid a false ACK due to the new fast
+ * GRBM interface.
+ */
+ if ((vmhub == AMDGPU_GFXHUB(0)) &&
+ (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
+ RREG32_NO_KIQ(req);
- inv_req = inv_req2;
- inv_req2 = 0;
- } while (inv_req);
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (vmhub >= AMDGPU_MMHUB0(0))
+ tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, inst);
+ else
+ tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, inst);
+ if (tmp & (1 << vmid))
+ break;
+ udelay(1);
+ }
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
if (use_semaphore) {
@@ -919,9 +903,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* write with 0 means semaphore release
*/
if (vmhub >= AMDGPU_MMHUB0(0))
- WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
+ WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, inst);
else
- WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
+ WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, inst);
}
spin_unlock(&adev->gmc.invalidate_lock);
@@ -943,91 +927,32 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
*
* Flush the TLB for the requested pasid.
*/
-static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
- uint16_t pasid, uint32_t flush_type,
- bool all_hub, uint32_t inst)
+static void gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub, uint32_t inst)
{
- int vmid, i;
- signed long r;
- uint32_t seq;
- uint16_t queried_pasid;
- bool ret;
- u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
- struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
-
- if (amdgpu_in_reset(adev))
- return -EIO;
-
- if (ring->sched.ready && down_read_trylock(&adev->reset_domain->sem)) {
- /* Vega20+XGMI caches PTEs in TC and TLB. Add a
- * heavy-weight TLB flush (type 2), which flushes
- * both. Due to a race condition with concurrent
- * memory accesses using the same TLB cache line, we
- * still need a second TLB flush after this.
- */
- bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
- adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0));
- /* 2 dwords flush + 8 dwords fence */
- unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
-
- if (vega20_xgmi_wa)
- ndw += kiq->pmf->invalidate_tlbs_size;
-
- spin_lock(&adev->gfx.kiq[inst].ring_lock);
- /* 2 dwords flush + 8 dwords fence */
- amdgpu_ring_alloc(ring, ndw);
- if (vega20_xgmi_wa)
- kiq->pmf->kiq_invalidate_tlbs(ring,
- pasid, 2, all_hub);
-
- if (flush_type == 2 &&
- adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) &&
- adev->rev_id == 0)
- kiq->pmf->kiq_invalidate_tlbs(ring,
- pasid, 0, all_hub);
-
- kiq->pmf->kiq_invalidate_tlbs(ring,
- pasid, flush_type, all_hub);
- r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
- if (r) {
- amdgpu_ring_undo(ring);
- spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- up_read(&adev->reset_domain->sem);
- return -ETIME;
- }
-
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
- if (r < 1) {
- dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
- up_read(&adev->reset_domain->sem);
- return -ETIME;
- }
- up_read(&adev->reset_domain->sem);
- return 0;
- }
+ uint16_t queried;
+ int i, vmid;
for (vmid = 1; vmid < 16; vmid++) {
+ bool valid;
- ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
- &queried_pasid);
- if (ret && queried_pasid == pasid) {
- if (all_hub) {
- for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
- gmc_v9_0_flush_gpu_tlb(adev, vmid,
- i, flush_type);
- } else {
- gmc_v9_0_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB(0), flush_type);
- }
- break;
+ valid = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
+ &queried);
+ if (!valid || queried != pasid)
+ continue;
+
+ if (all_hub) {
+ for_each_set_bit(i, adev->vmhubs_mask,
+ AMDGPU_MAX_VMHUBS)
+ gmc_v9_0_flush_gpu_tlb(adev, vmid, i,
+ flush_type);
+ } else {
+ gmc_v9_0_flush_gpu_tlb(adev, vmid,
+ AMDGPU_GFXHUB(0),
+ flush_type);
}
}
-
- return 0;
-
}
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
@@ -1183,14 +1108,15 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
- bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
+ bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | AMDGPU_GEM_CREATE_EXT_COHERENT);
+ bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
struct amdgpu_vm *vm = mapping->bo_va->base.vm;
unsigned int mtype_local, mtype;
bool snoop = false;
bool is_local;
- switch (adev->ip_versions[GC_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
if (is_vram) {
@@ -1204,8 +1130,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
/* FIXME: is this still needed? Or does
* amdgpu_ttm_tt_pde_flags already handle this?
*/
- if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
- adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) &&
+ if ((amdgpu_ip_version(adev, GC_HWIP, 0) ==
+ IP_VERSION(9, 4, 2) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) ==
+ IP_VERSION(9, 4, 3)) &&
adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
@@ -1251,6 +1179,11 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
snoop = true;
if (uncached) {
mtype = MTYPE_UC;
+ } else if (ext_coherent) {
+ if (adev->rev_id)
+ mtype = is_local ? MTYPE_CC : MTYPE_UC;
+ else
+ mtype = MTYPE_UC;
} else if (adev->flags & AMD_IS_APU) {
mtype = is_local ? mtype_local : MTYPE_NC;
} else {
@@ -1314,23 +1247,26 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
* memory can use more efficient MTYPEs.
*/
- if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3))
return;
/* Only direct-mapped memory allows us to determine the NUMA node from
* the DMA address.
*/
if (!adev->ram_is_direct_mapped) {
- dev_dbg(adev->dev, "RAM is not direct mapped\n");
+ dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n");
return;
}
- /* Only override mappings with MTYPE_NC, which is the safe default for
- * cacheable memory.
+ /* MTYPE_NC is the same default and can be overridden.
+ * MTYPE_UC will be present if the memory is extended-coherent
+ * and can also be overridden.
*/
if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
- AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) {
- dev_dbg(adev->dev, "MTYPE is not NC\n");
+ AMDGPU_PTE_MTYPE_VG10(MTYPE_NC) &&
+ (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
+ AMDGPU_PTE_MTYPE_VG10(MTYPE_UC)) {
+ dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n");
return;
}
@@ -1341,7 +1277,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
} else {
- dev_dbg(adev->dev, "Only native mode APU is supported.\n");
+ dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n");
return;
}
@@ -1349,25 +1285,33 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
* page or NUMA nodes.
*/
if (!page_is_ram(addr >> PAGE_SHIFT)) {
- dev_dbg(adev->dev, "Page is not RAM.\n");
+ dev_dbg_ratelimited(adev->dev, "Page is not RAM.\n");
return;
}
nid = pfn_to_nid(addr >> PAGE_SHIFT);
- dev_dbg(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
- vm->mem_id, local_node, nid);
+ dev_dbg_ratelimited(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
+ vm->mem_id, local_node, nid);
if (nid == local_node) {
uint64_t old_flags = *flags;
- unsigned int mtype_local = MTYPE_RW;
-
- if (amdgpu_mtype_local == 1)
- mtype_local = MTYPE_NC;
- else if (amdgpu_mtype_local == 2)
- mtype_local = MTYPE_CC;
+ if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) ==
+ AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) {
+ unsigned int mtype_local = MTYPE_RW;
+
+ if (amdgpu_mtype_local == 1)
+ mtype_local = MTYPE_NC;
+ else if (amdgpu_mtype_local == 2)
+ mtype_local = MTYPE_CC;
+
+ *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
+ AMDGPU_PTE_MTYPE_VG10(mtype_local);
+ } else if (adev->rev_id) {
+ /* MTYPE_UC case */
+ *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
+ AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
+ }
- *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
- AMDGPU_PTE_MTYPE_VG10(mtype_local);
- dev_dbg(adev->dev, "flags updated from %llx to %llx\n",
- old_flags, *flags);
+ dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n",
+ old_flags, *flags);
}
}
@@ -1383,7 +1327,7 @@ static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
} else {
u32 viewport;
- switch (adev->ip_versions[DCE_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
@@ -1454,7 +1398,7 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
{
- switch (adev->ip_versions[UMC_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
case IP_VERSION(6, 0, 0):
adev->umc.funcs = &umc_v6_0_funcs;
break;
@@ -1490,6 +1434,19 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
else
adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
break;
+ case IP_VERSION(12, 0, 0):
+ adev->umc.max_ras_err_cnt_per_query =
+ UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
+ adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM;
+ adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
+ adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
+ adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
+ adev->umc.active_mask = adev->aid_mask;
+ adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
+ adev->umc.channel_idx_tbl = &umc_v12_0_channel_idx_tbl[0][0][0];
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
+ adev->umc.ras = &umc_v12_0_ras;
+ break;
default:
break;
}
@@ -1497,7 +1454,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
- switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(9, 4, 1):
adev->mmhub.funcs = &mmhub_v9_4_funcs;
break;
@@ -1515,7 +1472,7 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
{
- switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(9, 4, 0):
adev->mmhub.ras = &mmhub_v1_0_ras;
break;
@@ -1536,7 +1493,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
else
adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1552,7 +1509,7 @@ static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
struct amdgpu_mca *mca = &adev->mca;
/* is UMC the right IP to check for MCA? Maybe DF? */
- switch (adev->ip_versions[UMC_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
case IP_VERSION(6, 7, 0):
if (!adev->gmc.xgmi.connected_to_cpu) {
mca->mp0.ras = &mca_v3_0_mp0_ras;
@@ -1579,18 +1536,18 @@ static int gmc_v9_0_early_init(void *handle)
* 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
* in their IP discovery tables
*/
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0) ||
- adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
- adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
adev->gmc.xgmi.supported = true;
- if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
+ if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
adev->gmc.xgmi.supported = true;
adev->gmc.xgmi.connected_to_cpu =
adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
}
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
enum amdgpu_pkg_type pkg_type =
adev->smuio.funcs->get_pkg_type(adev);
/* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
@@ -1639,7 +1596,7 @@ static int gmc_v9_0_late_init(void *handle)
* writes, while disables HBM ECC for vega10.
*/
if (!amdgpu_sriov_vf(adev) &&
- (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
+ (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) {
if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
if (adev->df.funcs &&
adev->df.funcs->enable_ecc_force_par_wr_rmw)
@@ -1648,13 +1605,8 @@ static int gmc_v9_0_late_init(void *handle)
}
if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
- if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
- adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
- adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
-
- if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops &&
- adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count)
- adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev);
+ amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
+ amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP);
}
r = amdgpu_gmc_ras_late_init(adev);
@@ -1669,14 +1621,17 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
{
u64 base = adev->mmhub.funcs->get_fb_location(adev);
+ amdgpu_gmc_set_agp_default(adev, mc);
+
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
if (adev->gmc.xgmi.connected_to_cpu) {
amdgpu_gmc_sysvm_location(adev, mc);
} else {
amdgpu_gmc_vram_location(adev, mc, base);
- amdgpu_gmc_gart_location(adev, mc);
- amdgpu_gmc_agp_location(adev, mc);
+ amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
+ if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
+ amdgpu_gmc_agp_location(adev, mc);
}
/* base offset of vram pages */
adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
@@ -1747,7 +1702,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
/* set the gart size */
if (amdgpu_gart_size == -1) {
- switch (adev->ip_versions[GC_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 0, 1): /* all engines support GPUVM */
case IP_VERSION(9, 2, 1): /* all engines support GPUVM */
case IP_VERSION(9, 4, 0):
@@ -1826,8 +1781,8 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
*/
static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
{
- if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
- (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1)))
+ if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
+ (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1)))
adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
@@ -1879,15 +1834,14 @@ static void
gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
struct amdgpu_mem_partition_info *mem_ranges)
{
- int num_ranges = 0, ret, mem_groups;
struct amdgpu_numa_info numa_info;
int node_ids[MAX_MEM_RANGES];
+ int num_ranges = 0, ret;
int num_xcc, xcc_id;
uint32_t xcc_mask;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
xcc_mask = (1U << num_xcc) - 1;
- mem_groups = hweight32(adev->aid_mask);
for_each_inst(xcc_id, xcc_mask) {
ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
@@ -1912,12 +1866,6 @@ gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
}
adev->gmc.num_mem_partitions = num_ranges;
-
- /* If there is only partition, don't use entire size */
- if (adev->gmc.num_mem_partitions == 1) {
- mem_ranges[0].size = mem_ranges[0].size * (mem_groups - 1);
- do_div(mem_ranges[0].size, mem_groups);
- }
}
static void
@@ -1972,10 +1920,9 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
{
bool valid;
- adev->gmc.mem_partitions = kzalloc(
- MAX_MEM_RANGES * sizeof(struct amdgpu_mem_partition_info),
- GFP_KERNEL);
-
+ adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES,
+ sizeof(struct amdgpu_mem_partition_info),
+ GFP_KERNEL);
if (!adev->gmc.mem_partitions)
return -ENOMEM;
@@ -2000,13 +1947,6 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
- static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
- u32 vram_info;
-
- if (!amdgpu_sriov_vf(adev)) {
- vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
- adev->gmc.vram_vendor = vram_info & 0xF;
- }
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
}
@@ -2023,7 +1963,7 @@ static int gmc_v9_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
gmc_v9_4_3_init_vram_info(adev);
} else if (!adev->bios) {
if (adev->flags & AMD_IS_APU) {
@@ -2063,7 +2003,7 @@ static int gmc_v9_0_sw_init(void *handle)
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
}
- switch (adev->ip_versions[GC_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
@@ -2091,12 +2031,9 @@ static int gmc_v9_0_sw_init(void *handle)
* vm size is 256TB (48bit), maximum size of Vega10,
* block size 512 (9bit)
*/
- /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
- if (amdgpu_sriov_vf(adev))
- amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
- else
- amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+
+ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
case IP_VERSION(9, 4, 1):
@@ -2128,7 +2065,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
if (r)
@@ -2142,7 +2079,8 @@ static int gmc_v9_0_sw_init(void *handle)
return r;
if (!amdgpu_sriov_vf(adev) &&
- !adev->gmc.xgmi.connected_to_cpu) {
+ !adev->gmc.xgmi.connected_to_cpu &&
+ !adev->gmc.is_app_apu) {
/* interrupt sent to DF. */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
&adev->gmc.ecc_irq);
@@ -2156,7 +2094,10 @@ static int gmc_v9_0_sw_init(void *handle)
*/
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
- dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44;
+ dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >=
+ IP_VERSION(9, 4, 2) ?
+ 48 :
+ 44;
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
if (r) {
dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
@@ -2170,7 +2111,7 @@ static int gmc_v9_0_sw_init(void *handle)
amdgpu_gmc_get_vbios_allocations(adev);
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
r = gmc_v9_0_init_mem_ranges(adev);
if (r)
return r;
@@ -2196,9 +2137,11 @@ static int gmc_v9_0_sw_init(void *handle)
* for video processing.
*/
adev->vm_manager.first_kfd_vmid =
- (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
- adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
- adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? 3 : 8;
+ (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) ?
+ 3 :
+ 8;
amdgpu_vm_manager_init(adev);
@@ -2208,7 +2151,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
amdgpu_gmc_sysfs_init(adev);
return 0;
@@ -2218,7 +2161,7 @@ static int gmc_v9_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
amdgpu_gmc_sysfs_fini(adev);
amdgpu_gmc_ras_fini(adev);
@@ -2241,8 +2184,7 @@ static int gmc_v9_0_sw_fini(void *handle)
static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
-
- switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(9, 0, 0):
if (amdgpu_sriov_vf(adev))
break;
@@ -2276,8 +2218,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
*/
void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
{
- if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
- (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) {
+ if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
+ (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) {
WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
WARN_ON(adev->gmc.sdpif_register !=
RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
@@ -2330,6 +2272,24 @@ static int gmc_v9_0_hw_init(void *handle)
bool value;
int i, r;
+ adev->gmc.flush_pasid_uses_kiq = true;
+
+ /* Vega20+XGMI caches PTEs in TC and TLB. Add a heavy-weight TLB flush
+ * (type 2), which flushes both. Due to a race condition with
+ * concurrent memory accesses using the same TLB cache line, we still
+ * need a second TLB flush after this.
+ */
+ adev->gmc.flush_tlb_needs_extra_type_2 =
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
+ adev->gmc.xgmi.num_physical_nodes;
+ /*
+ * TODO: This workaround is badly documented and had a buggy
+ * implementation. We should probably verify what we do here.
+ */
+ adev->gmc.flush_tlb_needs_extra_type_0 =
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ adev->rev_id == 0;
+
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
@@ -2373,8 +2333,8 @@ static int gmc_v9_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
/**
@@ -2413,6 +2373,10 @@ static int gmc_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ if (adev->gmc.ecc_irq.funcs &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+
return 0;
}