1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
|
/* SPDX-License-Identifier: MIT */
#ifndef NOUVEAU_SCHED_H
#define NOUVEAU_SCHED_H
#include <linux/types.h>
#include <drm/drm_exec.h>
#include <drm/gpu_scheduler.h>
#include "nouveau_drv.h"
#define to_nouveau_job(sched_job) \
container_of((sched_job), struct nouveau_job, base)
struct nouveau_job_ops;
enum nouveau_job_state {
NOUVEAU_JOB_UNINITIALIZED = 0,
NOUVEAU_JOB_INITIALIZED,
NOUVEAU_JOB_SUBMIT_SUCCESS,
NOUVEAU_JOB_SUBMIT_FAILED,
NOUVEAU_JOB_RUN_SUCCESS,
NOUVEAU_JOB_RUN_FAILED,
};
struct nouveau_job_args {
struct drm_file *file_priv;
struct nouveau_sched_entity *sched_entity;
enum dma_resv_usage resv_usage;
bool sync;
struct {
struct drm_nouveau_sync *s;
u32 count;
} in_sync;
struct {
struct drm_nouveau_sync *s;
u32 count;
} out_sync;
struct nouveau_job_ops *ops;
};
struct nouveau_job {
struct drm_sched_job base;
enum nouveau_job_state state;
struct nouveau_sched_entity *entity;
struct drm_file *file_priv;
struct nouveau_cli *cli;
struct drm_exec exec;
enum dma_resv_usage resv_usage;
struct dma_fence *done_fence;
bool sync;
struct {
struct drm_nouveau_sync *data;
u32 count;
} in_sync;
struct {
struct drm_nouveau_sync *data;
struct drm_syncobj **objs;
struct dma_fence_chain **chains;
u32 count;
} out_sync;
struct nouveau_job_ops {
/* If .submit() returns without any error, it is guaranteed that
* armed_submit() is called.
*/
int (*submit)(struct nouveau_job *);
void (*armed_submit)(struct nouveau_job *);
struct dma_fence *(*run)(struct nouveau_job *);
void (*free)(struct nouveau_job *);
enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *);
} *ops;
};
int nouveau_job_ucopy_syncs(struct nouveau_job_args *args,
u32 inc, u64 ins,
u32 outc, u64 outs);
int nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args);
void nouveau_job_free(struct nouveau_job *job);
int nouveau_job_submit(struct nouveau_job *job);
void nouveau_job_fini(struct nouveau_job *job);
#define to_nouveau_sched_entity(entity) \
container_of((entity), struct nouveau_sched_entity, base)
struct nouveau_sched_entity {
struct drm_sched_entity base;
struct mutex mutex;
struct workqueue_struct *sched_wq;
struct {
struct {
struct list_head head;
spinlock_t lock;
} list;
struct wait_queue_head wq;
} job;
};
int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
struct drm_gpu_scheduler *sched,
struct workqueue_struct *sched_wq);
void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity);
bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
struct work_struct *work);
int nouveau_sched_init(struct nouveau_drm *drm);
void nouveau_sched_fini(struct nouveau_drm *drm);
#endif
|