/linux/include/drm/ |
A D | gpu_scheduler.h | 60 struct drm_sched_entity { struct 216 struct drm_sched_entity *current_entity; argument 293 struct drm_sched_entity *entity; 339 struct drm_sched_entity *s_entity); 460 struct drm_sched_entity *entity, 484 struct drm_sched_entity *entity); 489 struct drm_sched_entity *entity); 491 struct drm_sched_entity *entity); 493 int drm_sched_entity_init(struct drm_sched_entity *entity, 509 struct drm_sched_entity *s_entity, void *owner); [all …]
|
/linux/drivers/gpu/drm/scheduler/ |
A D | sched_entity.c | 59 int drm_sched_entity_init(struct drm_sched_entity *entity, in drm_sched_entity_init() 68 memset(entity, 0, sizeof(struct drm_sched_entity)); in drm_sched_entity_init() 131 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) in drm_sched_entity_is_ready() 207 struct drm_sched_entity *entity) in drm_sched_job_dependency() 265 void drm_sched_entity_fini(struct drm_sched_entity *entity) in drm_sched_entity_fini() 308 void drm_sched_entity_destroy(struct drm_sched_entity *entity) in drm_sched_entity_destroy() 319 struct drm_sched_entity *entity = in drm_sched_entity_clear_dep() 320 container_of(cb, struct drm_sched_entity, cb); in drm_sched_entity_clear_dep() 333 struct drm_sched_entity *entity = in drm_sched_entity_wakeup() 334 container_of(cb, struct drm_sched_entity, cb); in drm_sched_entity_wakeup() [all …]
|
A D | sched_main.c | 91 struct drm_sched_entity *entity) in drm_sched_rq_add_entity() 110 struct drm_sched_entity *entity) in drm_sched_rq_remove_entity() 129 static struct drm_sched_entity * 132 struct drm_sched_entity *entity; in drm_sched_rq_select_entity() 209 struct drm_sched_entity *entity) in drm_sched_dependency_optimized() 587 struct drm_sched_entity *entity, in drm_sched_job_init() 788 static struct drm_sched_entity * 791 struct drm_sched_entity *entity; in drm_sched_select_entity() 1039 struct drm_sched_entity *s_entity; in drm_sched_fini() 1083 struct drm_sched_entity *tmp; in drm_sched_increase_karma_ext() [all …]
|
A D | gpu_scheduler_trace.h | 36 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), 39 __field(struct drm_sched_entity *, entity) 63 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), 66 __field(struct drm_sched_entity *, entity)
|
A D | sched_fence.c | 150 struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity, in drm_sched_fence_alloc() 166 struct drm_sched_entity *entity) in drm_sched_fence_init()
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
A D | amdgpu_ctx.h | 37 struct drm_sched_entity entity; 71 u32 ring, struct drm_sched_entity **entity); 73 struct drm_sched_entity *entity, 76 struct drm_sched_entity *entity, 85 struct drm_sched_entity *entity);
|
A D | amdgpu_ctx.c | 279 u32 ring, struct drm_sched_entity **entity) in amdgpu_ctx_get_entity() 538 struct drm_sched_entity *entity, in amdgpu_ctx_add_fence() 564 struct drm_sched_entity *entity, in amdgpu_ctx_get_fence() 639 struct drm_sched_entity *entity) in amdgpu_ctx_wait_prev_fence() 680 struct drm_sched_entity *entity; in amdgpu_ctx_mgr_entity_flush() 710 struct drm_sched_entity *entity; in amdgpu_ctx_mgr_entity_fini()
|
A D | amdgpu_job.c | 173 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, in amdgpu_job_submit() 213 struct drm_sched_entity *s_entity) in amdgpu_job_dependency() 284 struct drm_sched_entity *s_entity = NULL; in amdgpu_job_stop_all_jobs_on_sched()
|
A D | amdgpu_job.h | 77 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
A D | amdgpu_vce.h | 51 struct drm_sched_entity entity;
|
A D | amdgpu_uvd.h | 65 struct drm_sched_entity entity;
|
A D | amdgpu_vm.h | 284 struct drm_sched_entity immediate; 285 struct drm_sched_entity delayed;
|
A D | amdgpu_ttm.h | 75 struct drm_sched_entity entity;
|
A D | amdgpu_cs.c | 884 struct drm_sched_entity *entity; in amdgpu_cs_ib_fill() 965 struct drm_sched_entity *entity; in amdgpu_cs_process_fence_dep() 1212 struct drm_sched_entity *entity = p->entity; in amdgpu_cs_submit() 1388 struct drm_sched_entity *entity; in amdgpu_cs_wait_ioctl() 1436 struct drm_sched_entity *entity; in amdgpu_cs_get_fence()
|
A D | amdgpu_vm_sdma.c | 94 struct drm_sched_entity *entity; in amdgpu_vm_sdma_commit()
|
A D | amdgpu.h | 537 struct drm_sched_entity *entity;
|
/linux/drivers/gpu/drm/etnaviv/ |
A D | etnaviv_sched.c | 22 struct drm_sched_entity *entity) in etnaviv_sched_dependency() 149 int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, in etnaviv_sched_push_job()
|
A D | etnaviv_sched.h | 21 int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
|
A D | etnaviv_drv.h | 31 struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
|
/linux/drivers/gpu/drm/msm/ |
A D | msm_submitqueue.c | 80 static struct drm_sched_entity * 96 struct drm_sched_entity *entity; in get_sched_entity()
|
A D | msm_gpu.h | 301 struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS]; 385 struct drm_sched_entity *entity;
|
/linux/drivers/gpu/drm/lima/ |
A D | lima_sched.h | 37 struct drm_sched_entity base;
|
/linux/drivers/gpu/drm/panfrost/ |
A D | panfrost_device.h | 140 struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
|
A D | panfrost_job.c | 878 struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i]; in panfrost_job_close()
|
/linux/drivers/gpu/drm/v3d/ |
A D | v3d_drv.h | 169 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
|