1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2017 Etnaviv Project
4 */
5
6 #include <linux/moduleparam.h>
7
8 #include "etnaviv_drv.h"
9 #include "etnaviv_dump.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_gpu.h"
12 #include "etnaviv_sched.h"
13 #include "state.xml.h"
14
15 static int etnaviv_job_hang_limit = 0;
16 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
17 static int etnaviv_hw_jobs_limit = 4;
18 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
19
20 static struct dma_fence *
etnaviv_sched_dependency(struct drm_sched_job * sched_job,struct drm_sched_entity * entity)21 etnaviv_sched_dependency(struct drm_sched_job *sched_job,
22 struct drm_sched_entity *entity)
23 {
24 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
25 struct dma_fence *fence;
26 int i;
27
28 if (unlikely(submit->in_fence)) {
29 fence = submit->in_fence;
30 submit->in_fence = NULL;
31
32 if (!dma_fence_is_signaled(fence))
33 return fence;
34
35 dma_fence_put(fence);
36 }
37
38 for (i = 0; i < submit->nr_bos; i++) {
39 struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
40 int j;
41
42 if (bo->excl) {
43 fence = bo->excl;
44 bo->excl = NULL;
45
46 if (!dma_fence_is_signaled(fence))
47 return fence;
48
49 dma_fence_put(fence);
50 }
51
52 for (j = 0; j < bo->nr_shared; j++) {
53 if (!bo->shared[j])
54 continue;
55
56 fence = bo->shared[j];
57 bo->shared[j] = NULL;
58
59 if (!dma_fence_is_signaled(fence))
60 return fence;
61
62 dma_fence_put(fence);
63 }
64 kfree(bo->shared);
65 bo->nr_shared = 0;
66 bo->shared = NULL;
67 }
68
69 return NULL;
70 }
71
etnaviv_sched_run_job(struct drm_sched_job * sched_job)72 static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
73 {
74 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
75 struct dma_fence *fence = NULL;
76
77 if (likely(!sched_job->s_fence->finished.error))
78 fence = etnaviv_gpu_submit(submit);
79 else
80 dev_dbg(submit->gpu->dev, "skipping bad job\n");
81
82 return fence;
83 }
84
etnaviv_sched_timedout_job(struct drm_sched_job * sched_job)85 static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
86 *sched_job)
87 {
88 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
89 struct etnaviv_gpu *gpu = submit->gpu;
90 u32 dma_addr;
91 int change;
92
93 /* block scheduler */
94 drm_sched_stop(&gpu->sched, sched_job);
95
96 /*
97 * If the GPU managed to complete this jobs fence, the timout is
98 * spurious. Bail out.
99 */
100 if (dma_fence_is_signaled(submit->out_fence))
101 goto out_no_timeout;
102
103 /*
104 * If the GPU is still making forward progress on the front-end (which
105 * should never loop) we shift out the timeout to give it a chance to
106 * finish the job.
107 */
108 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
109 change = dma_addr - gpu->hangcheck_dma_addr;
110 if (change < 0 || change > 16) {
111 gpu->hangcheck_dma_addr = dma_addr;
112 goto out_no_timeout;
113 }
114
115 if(sched_job)
116 drm_sched_increase_karma(sched_job);
117
118 /* get the GPU back into the init state */
119 etnaviv_core_dump(submit);
120 etnaviv_gpu_recover_hang(gpu);
121
122 drm_sched_resubmit_jobs(&gpu->sched);
123
124 drm_sched_start(&gpu->sched, true);
125 return DRM_GPU_SCHED_STAT_NOMINAL;
126
127 out_no_timeout:
128 /* restart scheduler after GPU is usable again */
129 drm_sched_start(&gpu->sched, true);
130 return DRM_GPU_SCHED_STAT_NOMINAL;
131 }
132
etnaviv_sched_free_job(struct drm_sched_job * sched_job)133 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
134 {
135 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
136
137 drm_sched_job_cleanup(sched_job);
138
139 etnaviv_submit_put(submit);
140 }
141
142 static const struct drm_sched_backend_ops etnaviv_sched_ops = {
143 .dependency = etnaviv_sched_dependency,
144 .run_job = etnaviv_sched_run_job,
145 .timedout_job = etnaviv_sched_timedout_job,
146 .free_job = etnaviv_sched_free_job,
147 };
148
etnaviv_sched_push_job(struct drm_sched_entity * sched_entity,struct etnaviv_gem_submit * submit)149 int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
150 struct etnaviv_gem_submit *submit)
151 {
152 int ret = 0;
153
154 /*
155 * Hold the fence lock across the whole operation to avoid jobs being
156 * pushed out of order with regard to their sched fence seqnos as
157 * allocated in drm_sched_job_init.
158 */
159 mutex_lock(&submit->gpu->fence_lock);
160
161 ret = drm_sched_job_init(&submit->sched_job, sched_entity,
162 submit->ctx);
163 if (ret)
164 goto out_unlock;
165
166 drm_sched_job_arm(&submit->sched_job);
167
168 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
169 submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
170 submit->out_fence, 0,
171 INT_MAX, GFP_KERNEL);
172 if (submit->out_fence_id < 0) {
173 drm_sched_job_cleanup(&submit->sched_job);
174 ret = -ENOMEM;
175 goto out_unlock;
176 }
177
178 /* the scheduler holds on to the job now */
179 kref_get(&submit->refcount);
180
181 drm_sched_entity_push_job(&submit->sched_job);
182
183 out_unlock:
184 mutex_unlock(&submit->gpu->fence_lock);
185
186 return ret;
187 }
188
etnaviv_sched_init(struct etnaviv_gpu * gpu)189 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
190 {
191 int ret;
192
193 ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
194 etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
195 msecs_to_jiffies(500), NULL, NULL,
196 dev_name(gpu->dev));
197 if (ret)
198 return ret;
199
200 return 0;
201 }
202
etnaviv_sched_fini(struct etnaviv_gpu * gpu)203 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
204 {
205 drm_sched_fini(&gpu->sched);
206 }
207