Home
last modified time | relevance | path

Searched refs:job (Results 1 – 25 of 247) sorted by relevance

12345678910

/linux/drivers/gpu/host1x/
A Djob.c52 if (!job) in host1x_job_alloc()
70 job->reloc_addr_phys = job->addr_phys; in host1x_job_alloc()
89 job->release(job); in job_free()
208 job->unpins[job->num_unpins].dev = dev; in pin_job()
209 job->unpins[job->num_unpins].dir = dir; in pin_job()
213 job->addr_phys[job->num_unpins] = phys_addr; in pin_job()
215 job->unpins[job->num_unpins].sgt = sgt; in pin_job()
296 job->addr_phys[job->num_unpins] = phys_addr; in pin_job()
299 job->unpins[job->num_unpins].bo = g->bo; in pin_job()
300 job->unpins[job->num_unpins].sgt = sgt; in pin_job()
[all …]
A Dcdma.c324 !job->cancelled) { in update_cdma_locked()
326 if (job->timeout) in update_cdma_locked()
340 if (job->num_slots) { in update_cdma_locked()
349 list_del(&job->list); in update_cdma_locked()
350 host1x_job_put(job); in update_cdma_locked()
398 job = NULL; in host1x_cdma_update_sync_queue()
413 if (!job) in host1x_cdma_update_sync_queue()
422 job->timeout = 0; in host1x_cdma_update_sync_queue()
432 job->num_slots); in host1x_cdma_update_sync_queue()
539 if (job->timeout) { in host1x_cdma_begin()
[all …]
/linux/drivers/md/
A Ddm-kcopyd.c427 job->master_job->write_offset += job->source.count; in pop_io_job()
453 return job; in pop()
493 if (job->pages && job->pages != &zero_page_list) in run_complete_job()
499 if (job->master_job == job) { in run_complete_job()
568 job->write_err = job->master_job->write_err; in run_io_job()
590 push(&job->kc->io_jobs, job); in run_pages_job()
708 if ((!job->read_err && !job->write_err) || in segment_complete()
822 memset(&job->source, 0, sizeof job->source); in dm_kcopyd_copy()
823 job->source.count = job->dests[0].count; in dm_kcopyd_copy()
839 job->master_job = job; in dm_kcopyd_copy()
[all …]
/linux/drivers/gpu/drm/amd/amdgpu/
A Damdgpu_job.c89 if (!*job) in amdgpu_job_alloc()
97 (*job)->vm = vm; in amdgpu_job_alloc()
98 (*job)->ibs = (void *)&(*job)[1]; in amdgpu_job_alloc()
121 kfree(*job); in amdgpu_job_alloc_with_ib()
139 f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence; in amdgpu_job_free_resources()
157 kfree(job); in amdgpu_job_free_cb()
170 kfree(job); in amdgpu_job_free()
200 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence); in amdgpu_job_submit_direct()
206 amdgpu_job_free(job); in amdgpu_job_submit_direct()
231 job); in amdgpu_job_dependency()
[all …]
A Damdgpu_ib.c149 if (job) { in amdgpu_ib_schedule()
150 vm = job->vm; in amdgpu_ib_schedule()
151 fence_ctx = job->base.s_fence ? in amdgpu_ib_schedule()
163 if (vm && !job->vmid) { in amdgpu_ib_schedule()
191 trace_amdgpu_ib_pipe_sync(job, tmp); in amdgpu_ib_schedule()
206 if (job) { in amdgpu_ib_schedule()
223 status |= job->preamble_status; in amdgpu_ib_schedule()
224 status |= job->preemption_status; in amdgpu_ib_schedule()
260 if (job && job->uf_addr) { in amdgpu_ib_schedule()
261 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, in amdgpu_ib_schedule()
[all …]
A Damdgpu_job.h38 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0) argument
72 struct amdgpu_job **job, struct amdgpu_vm *vm);
74 enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
75 void amdgpu_job_free_resources(struct amdgpu_job *job);
76 void amdgpu_job_free(struct amdgpu_job *job);
77 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
79 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
A Damdgpu_trace.h35 #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \ argument
36 job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
165 TP_PROTO(struct amdgpu_job *job),
166 TP_ARGS(job),
183 __entry->num_ibs = job->num_ibs;
191 TP_PROTO(struct amdgpu_job *job),
192 TP_ARGS(job),
208 __entry->num_ibs = job->num_ibs;
218 struct amdgpu_job *job),
219 TP_ARGS(vm, ring, job),
[all …]
/linux/drivers/gpu/drm/v3d/
A Dv3d_sched.c62 v3d_job_cleanup(job); in v3d_sched_job_free()
71 if (job->perfmon && v3d->active_perfmon != job->perfmon) in v3d_switch_perfmon()
90 v3d->bin_job = job; in v3d_bin_job_run()
108 job->start, job->end); in v3d_bin_job_run()
115 if (job->qma) { in v3d_bin_job_run()
119 if (job->qts) { in v3d_bin_job_run()
122 job->qts); in v3d_bin_job_run()
159 job->start, job->end); in v3d_render_job_run()
186 v3d->tfu_job = job; in v3d_tfu_job_run()
314 &job->timedout_ctca, &job->timedout_ctra); in v3d_bin_job_timedout()
[all …]
A Dv3d_gem.c265 drm_gem_unlock_reservations(job->bo, job->bo_count, in v3d_lock_bo_reservations()
311 job->bo = kvmalloc_array(job->bo_count, in v3d_lookup_bos()
314 if (!job->bo) { in v3d_lookup_bos()
362 if (job->bo[i]) in v3d_job_free()
365 kvfree(job->bo); in v3d_job_free()
376 kfree(job); in v3d_job_free()
395 if (!job) in v3d_job_cleanup()
404 kref_put(&job->refcount, job->free); in v3d_job_put()
471 job->v3d = v3d; in v3d_job_init()
526 job->done_fence = dma_fence_get(&job->base.s_fence->finished); in v3d_push_job()
[all …]
/linux/block/
A Dbsg-lib.c31 struct bsg_job *job; in bsg_transport_sg_io_fn() local
50 reply = job->reply; in bsg_transport_sg_io_fn()
51 memset(job, 0, sizeof(*job)); in bsg_transport_sg_io_fn()
52 job->reply = reply; in bsg_transport_sg_io_fn()
54 job->dd_data = job + 1; in bsg_transport_sg_io_fn()
76 job->bidi_bio = job->bidi_rq->bio; in bsg_transport_sg_io_fn()
140 if (job->bidi_rq) in bsg_transport_sg_io_fn()
143 if (job->bidi_rq) in bsg_transport_sg_io_fn()
245 ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq); in bsg_prepare_job()
249 job->dev = dev; in bsg_prepare_job()
[all …]
/linux/drivers/gpu/drm/panfrost/
A Dpanfrost_job.c160 WARN_ON(!job); in panfrost_dequeue_job()
164 return job; in panfrost_dequeue_job()
276 ret = drm_gem_lock_reservations(job->bos, job->bo_count, in panfrost_job_push()
286 ret = panfrost_acquire_object_fences(job->bos, job->bo_count, in panfrost_job_push()
299 panfrost_attach_object_fences(job->bos, job->bo_count, in panfrost_job_push()
335 kfree(job); in panfrost_job_cleanup()
365 if (!job->jc) in panfrost_job_run()
424 job->jc = 0; in panfrost_job_handle_err()
431 job->jc = 0; in panfrost_job_handle_err()
454 job->jc = 0; in panfrost_job_handle_done()
[all …]
A Dpanfrost_drv.c138 if (!job->bo_count) in panfrost_lookup_bos()
143 job->bo_count, &job->bos); in panfrost_lookup_bos()
147 job->mappings = kvmalloc_array(job->bo_count, in panfrost_lookup_bos()
150 if (!job->mappings) in panfrost_lookup_bos()
238 struct panfrost_job *job; in panfrost_ioctl_submit() local
253 job = kzalloc(sizeof(*job), GFP_KERNEL); in panfrost_ioctl_submit()
254 if (!job) { in panfrost_ioctl_submit()
259 kref_init(&job->refcount); in panfrost_ioctl_submit()
261 job->pfdev = pfdev; in panfrost_ioctl_submit()
262 job->jc = args->jc; in panfrost_ioctl_submit()
[all …]
/linux/drivers/gpu/drm/
A Ddrm_writeback.c275 job->prepared = true; in drm_writeback_prepare_job()
302 struct drm_writeback_job *job; in drm_writeback_queue_job() local
323 if (job->fb) in drm_writeback_cleanup_job()
324 drm_framebuffer_put(job->fb); in drm_writeback_cleanup_job()
326 if (job->out_fence) in drm_writeback_cleanup_job()
329 kfree(job); in drm_writeback_cleanup_job()
377 if (job) in drm_writeback_signal_completion()
378 list_del(&job->list_entry); in drm_writeback_signal_completion()
382 if (WARN_ON(!job)) in drm_writeback_signal_completion()
385 out_fence = job->out_fence; in drm_writeback_signal_completion()
[all …]
/linux/drivers/gpu/host1x/hw/
A Dchannel_hw.c86 struct device *dev = job->channel->dev; in submit_gathers()
91 for (i = 0; i < job->num_cmds; i++) { in submit_gathers()
134 struct host1x_syncpt *sp = job->syncpt; in synchronize_syncpt_base()
141 host1x_cdma_push(&job->channel->cdma, in synchronize_syncpt_base()
174 job->num_cmds, job->num_relocs, in channel_submit()
175 job->syncpt->id, job->syncpt_incrs); in channel_submit()
201 if (job->serialize) { in channel_submit()
215 synchronize_syncpt_base(job); in channel_submit()
221 job->syncpt_end = syncval; in channel_submit()
224 if (job->class) in channel_submit()
[all …]
A Ddebug_hw.c196 struct host1x_job *job; in show_channel_gathers() local
202 job->syncpt->id, job->syncpt_end, job->timeout, in show_channel_gathers()
203 job->num_slots, job->num_unpins); in show_channel_gathers()
205 show_gather(o, pb->dma + job->first_get, job->num_slots * 2, cdma, in show_channel_gathers()
206 pb->dma + job->first_get, pb->mapped + job->first_get); in show_channel_gathers()
208 for (i = 0; i < job->num_cmds; i++) { in show_channel_gathers()
212 if (job->cmds[i].is_wait) in show_channel_gathers()
215 g = &job->cmds[i].gather; in show_channel_gathers()
217 if (job->gather_copy_mapped) in show_channel_gathers()
218 mapped = (u32 *)job->gather_copy_mapped; in show_channel_gathers()
[all …]
/linux/drivers/scsi/lpfc/
A Dlpfc_bsg.c319 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp()
340 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp()
375 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp()
594 if (job) { in lpfc_bsg_rport_els_cmp()
614 if (job) { in lpfc_bsg_rport_els_cmp()
645 if (job) { in lpfc_bsg_rport_els_cmp()
1390 if (job) { in lpfc_issue_ct_rsp_cmp()
1408 if (job) { in lpfc_issue_ct_rsp_cmp()
1440 if (job) { in lpfc_issue_ct_rsp_cmp()
3718 if (!job) in lpfc_bsg_issue_read_mbox_ext_cmpl()
[all …]
/linux/drivers/gpu/drm/scheduler/
A Dsched_main.c328 if (job) { in drm_sched_job_timedout()
337 status = job->sched->ops->timedout_job(job); in drm_sched_job_timedout()
344 job->sched->ops->free_job(job); in drm_sched_job_timedout()
594 job->entity = entity; in drm_sched_job_init()
596 if (!job->s_fence) in drm_sched_job_init()
629 job->sched = sched; in drm_sched_job_arm()
633 drm_sched_fence_init(job->s_fence, job->entity); in drm_sched_job_arm()
746 job->s_fence = NULL; in drm_sched_job_cleanup()
825 if (job && dma_fence_is_signaled(&job->s_fence->finished)) { in drm_sched_get_cleanup_job()
842 job = NULL; in drm_sched_get_cleanup_job()
[all …]
A Dsched_entity.c200 drm_sched_fence_finished(job->s_fence); in drm_sched_entity_kill_jobs_cb()
201 WARN_ON(job->s_fence->parent); in drm_sched_entity_kill_jobs_cb()
202 job->sched->ops->free_job(job); in drm_sched_entity_kill_jobs_cb()
206 drm_sched_job_dependency(struct drm_sched_job *job, in drm_sched_job_dependency() argument
209 if (!xa_empty(&job->dependencies)) in drm_sched_job_dependency()
210 return xa_erase(&job->dependencies, job->last_dependency++); in drm_sched_job_dependency()
212 if (job->sched->ops->dependency) in drm_sched_job_dependency()
213 return job->sched->ops->dependency(job, entity); in drm_sched_job_dependency()
220 struct drm_sched_job *job; in drm_sched_entity_kill_jobs() local
225 struct drm_sched_fence *s_fence = job->s_fence; in drm_sched_entity_kill_jobs()
[all …]
/linux/drivers/gpu/drm/tegra/
A Dsubmit.c375 struct host1x_job *job; in submit_create_job() local
389 if (!job) { in submit_create_job()
401 job->serialize = true; in submit_create_job()
457 host1x_job_put(job); in submit_create_job()
458 job = ERR_PTR(err); in submit_create_job()
463 return job; in submit_create_job()
550 if (IS_ERR(job)) { in tegra_drm_ioctl_channel_submit()
551 err = PTR_ERR(job); in tegra_drm_ioctl_channel_submit()
573 job->timeout = 10000; in tegra_drm_ioctl_channel_submit()
604 host1x_job_unpin(job); in tegra_drm_ioctl_channel_submit()
[all …]
/linux/drivers/scsi/ufs/
A Dufs_bsg.c46 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_alloc_desc_buffer()
60 if (*desc_len > job->request_payload.payload_len) { in ufs_bsg_alloc_desc_buffer()
70 sg_copy_to_buffer(job->request_payload.sg_list, in ufs_bsg_alloc_desc_buffer()
71 job->request_payload.sg_cnt, descp, in ufs_bsg_alloc_desc_buffer()
80 static int ufs_bsg_request(struct bsg_job *job) in ufs_bsg_request() argument
82 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_request()
83 struct ufs_bsg_reply *bsg_reply = job->reply; in ufs_bsg_request()
85 unsigned int req_len = job->request_len; in ufs_bsg_request()
86 unsigned int reply_len = job->reply_len; in ufs_bsg_request()
149 job->request_payload.sg_cnt, in ufs_bsg_request()
[all …]
/linux/drivers/misc/habanalabs/common/
A Dhw_queue.c286 cb = job->patched_cb; in ext_queue_schedule_job()
287 len = job->job_cb_size; in ext_queue_schedule_job()
314 job->contains_dma_pkt); in ext_queue_schedule_job()
385 len = job->job_cb_size; in hw_queue_schedule_job()
393 if (job->patched_cb) in hw_queue_schedule_job()
411 q_idx = job->hw_queue_id; in init_signal_cs()
467 q_idx = job->hw_queue_id; in init_wait_cs()
547 struct hl_cs_job *job; in init_signal_wait_cs() local
627 struct hl_cs_job *job, *tmp; in hl_hw_queue_schedule_cs() local
748 switch (job->queue_type) { in hl_hw_queue_schedule_cs()
[all …]
A Dcommand_submission.c169 kfree(job); in cs_job_do_release()
261 job->job_cb_size = job->user_cb_size; in cs_parser()
1089 job = kzalloc(sizeof(*job), GFP_ATOMIC); in hl_cs_allocate_job()
1090 if (!job) in hl_cs_allocate_job()
1091 job = kzalloc(sizeof(*job), GFP_KERNEL); in hl_cs_allocate_job()
1093 if (!job) in hl_cs_allocate_job()
1106 return job; in hl_cs_allocate_job()
1358 if (!job) { in cs_ioctl_default()
1716 if (!job) { in cs_ioctl_signal_wait_create_jobs()
1753 job->patched_cb = job->user_cb; in cs_ioctl_signal_wait_create_jobs()
[all …]
/linux/drivers/scsi/libsas/
A Dsas_host_smp.c233 if (job->request_payload.payload_len < 8 || in sas_smp_host_handler()
234 job->reply_payload.payload_len < 8) in sas_smp_host_handler()
242 job->request_payload.sg_cnt, req_data, in sas_smp_host_handler()
243 job->request_payload.payload_len); in sas_smp_host_handler()
282 if (job->request_payload.payload_len < 16) in sas_smp_host_handler()
294 if (job->request_payload.payload_len < 16) in sas_smp_host_handler()
309 if (job->request_payload.payload_len < in sas_smp_host_handler()
326 if (job->request_payload.payload_len < 44) in sas_smp_host_handler()
344 job->reply_payload.sg_cnt, resp_data, in sas_smp_host_handler()
345 job->reply_payload.payload_len); in sas_smp_host_handler()
[all …]
/linux/Documentation/devicetree/bindings/powerpc/fsl/
A Draideng.txt30 There must be a sub-node for each job queue present in RAID Engine
34 This identifies the job queue interface
35 - reg: offset and length of the register set for job queue
42 compatible = "fsl,raideng-v1.0-job-queue";
48 There must be a sub-node for each job ring present in RAID Engine
49 This node must be a sub-node of job queue node
51 - compatible: Must contain "fsl,raideng-v1.0-job-ring" as the value
52 This identifies job ring. Should contain either
55 - reg: offset and length of the register set for job ring
56 - interrupts: interrupt mapping for job ring IRQ
[all …]
/linux/drivers/media/platform/
A Drcar_fdp1.c660 if (job) in list_remove_job()
664 return job; in list_remove_job()
925 if (job->previous) in fdp1_configure_rpf()
934 if (job->next) in fdp1_configure_rpf()
1077 if (!job) { in fdp1_device_process()
1197 if (!job) { in fdp1_prepare_job()
1240 job->dst->vb->flags = job->active->vb->flags & in fdp1_prepare_job()
1253 return job; in fdp1_prepare_job()
1335 job->dst = NULL; in device_frame_end()
2006 while (job) { in fdp1_stop_streaming()
[all …]

Completed in 65 milliseconds

12345678910