1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
#ifndef __MSM_RINGBUFFER_H__
#define __MSM_RINGBUFFER_H__
#include "drm/gpu_scheduler.h"
#include "msm_drv.h"
#define rbmemptr(ring, member) \
((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
#define rbmemptr_stats(ring, index, member) \
(rbmemptr((ring), stats) + \
((index) * sizeof(struct msm_gpu_submit_stats)) + \
offsetof(struct msm_gpu_submit_stats, member))
struct msm_gpu_submit_stats {
u64 cpcycles_start;
u64 cpcycles_end;
u64 alwayson_start;
u64 alwayson_end;
};
#define MSM_GPU_SUBMIT_STATS_COUNT 64
struct msm_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
volatile u64 ttbr0;
};
struct msm_cp_state {
uint64_t ib1_base, ib2_base;
uint32_t ib1_rem, ib2_rem;
};
struct msm_ringbuffer {
struct msm_gpu *gpu;
int id;
struct drm_gem_object *bo;
uint32_t *start, *end, *cur, *next;
/*
* The job scheduler for this ring.
*/
struct drm_gpu_scheduler sched;
/*
* List of in-flight submits on this ring. Protected by submit_lock.
*
* Currently just submits that are already written into the ring, not
* submits that are still in drm_gpu_scheduler's queues. At a later
* step we could probably move to letting drm_gpu_scheduler manage
* hangcheck detection and keep track of submit jobs that are in-
* flight.
*/
struct list_head submits;
spinlock_t submit_lock;
uint64_t iova;
uint32_t hangcheck_fence;
struct msm_rbmemptrs *memptrs;
uint64_t memptrs_iova;
struct msm_fence_context *fctx;
/**
* hangcheck_progress_retries:
*
* The number of extra hangcheck duration cycles that we have given
* due to it appearing that the GPU is making forward progress.
*
* For GPU generations which support progress detection (see.
* msm_gpu_funcs::progress()), if the GPU appears to be making progress
* (ie. the CP has advanced in the command stream, we'll allow up to
* DRM_MSM_HANGCHECK_PROGRESS_RETRIES expirations of the hangcheck timer
* before killing the job. But to detect progress we need two sample
* points, so the duration of the hangcheck timer is halved. In other
* words we'll let the submit run for up to:
*
* (DRM_MSM_HANGCHECK_DEFAULT_PERIOD / 2) * (DRM_MSM_HANGCHECK_PROGRESS_RETRIES + 1)
*/
int hangcheck_progress_retries;
/**
* last_cp_state: The state of the CP at the last call to gpu->progress()
*/
struct msm_cp_state last_cp_state;
/*
* preempt_lock protects preemption and serializes wptr updates against
* preemption. Can be aquired from irq context.
*/
spinlock_t preempt_lock;
};
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
void *memptrs, uint64_t memptrs_iova);
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
static inline void
OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
{
/*
* ring->next points to the current command being written - it won't be
* committed as ring->cur until the flush
*/
if (ring->next == ring->end)
ring->next = ring->start;
*(ring->next++) = data;
}
#endif /* __MSM_RINGBUFFER_H__ */
|