Lines Matching refs:args

161 	struct kfd_ioctl_get_version_args *args = data;  in kfd_ioctl_get_version()  local
163 args->major_version = KFD_IOCTL_MAJOR_VERSION; in kfd_ioctl_get_version()
164 args->minor_version = KFD_IOCTL_MINOR_VERSION; in kfd_ioctl_get_version()
170 struct kfd_ioctl_create_queue_args *args) in set_queue_properties_from_user() argument
172 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { in set_queue_properties_from_user()
177 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { in set_queue_properties_from_user()
182 if ((args->ring_base_address) && in set_queue_properties_from_user()
183 (!access_ok((const void __user *) args->ring_base_address, in set_queue_properties_from_user()
189 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { in set_queue_properties_from_user()
194 if (!access_ok((const void __user *) args->read_pointer_address, in set_queue_properties_from_user()
200 if (!access_ok((const void __user *) args->write_pointer_address, in set_queue_properties_from_user()
206 if (args->eop_buffer_address && in set_queue_properties_from_user()
207 !access_ok((const void __user *) args->eop_buffer_address, in set_queue_properties_from_user()
213 if (args->ctx_save_restore_address && in set_queue_properties_from_user()
214 !access_ok((const void __user *) args->ctx_save_restore_address, in set_queue_properties_from_user()
222 q_properties->queue_percent = args->queue_percentage; in set_queue_properties_from_user()
223 q_properties->priority = args->queue_priority; in set_queue_properties_from_user()
224 q_properties->queue_address = args->ring_base_address; in set_queue_properties_from_user()
225 q_properties->queue_size = args->ring_size; in set_queue_properties_from_user()
226 q_properties->read_ptr = (uint32_t *) args->read_pointer_address; in set_queue_properties_from_user()
227 q_properties->write_ptr = (uint32_t *) args->write_pointer_address; in set_queue_properties_from_user()
228 q_properties->eop_ring_buffer_address = args->eop_buffer_address; in set_queue_properties_from_user()
229 q_properties->eop_ring_buffer_size = args->eop_buffer_size; in set_queue_properties_from_user()
231 args->ctx_save_restore_address; in set_queue_properties_from_user()
232 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size; in set_queue_properties_from_user()
233 q_properties->ctl_stack_size = args->ctl_stack_size; in set_queue_properties_from_user()
234 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE || in set_queue_properties_from_user()
235 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) in set_queue_properties_from_user()
237 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA) in set_queue_properties_from_user()
239 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI) in set_queue_properties_from_user()
244 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) in set_queue_properties_from_user()
250 q_properties->queue_percent, args->queue_percentage); in set_queue_properties_from_user()
253 q_properties->priority, args->queue_priority); in set_queue_properties_from_user()
256 q_properties->queue_address, args->ring_base_address); in set_queue_properties_from_user()
259 q_properties->queue_size, args->ring_size); in set_queue_properties_from_user()
278 struct kfd_ioctl_create_queue_args *args = data; in kfd_ioctl_create_queue() local
290 err = set_queue_properties_from_user(&q_properties, args); in kfd_ioctl_create_queue()
294 pr_debug("Looking for gpu id 0x%x\n", args->gpu_id); in kfd_ioctl_create_queue()
295 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_create_queue()
297 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); in kfd_ioctl_create_queue()
318 args->queue_id = queue_id; in kfd_ioctl_create_queue()
322 args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; in kfd_ioctl_create_queue()
323 args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); in kfd_ioctl_create_queue()
328 args->doorbell_offset |= doorbell_offset_in_process; in kfd_ioctl_create_queue()
332 pr_debug("Queue id %d was created successfully\n", args->queue_id); in kfd_ioctl_create_queue()
335 args->ring_base_address); in kfd_ioctl_create_queue()
338 args->read_pointer_address); in kfd_ioctl_create_queue()
341 args->write_pointer_address); in kfd_ioctl_create_queue()
355 struct kfd_ioctl_destroy_queue_args *args = data; in kfd_ioctl_destroy_queue() local
358 args->queue_id, in kfd_ioctl_destroy_queue()
363 retval = pqm_destroy_queue(&p->pqm, args->queue_id); in kfd_ioctl_destroy_queue()
373 struct kfd_ioctl_update_queue_args *args = data; in kfd_ioctl_update_queue() local
376 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { in kfd_ioctl_update_queue()
381 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { in kfd_ioctl_update_queue()
386 if ((args->ring_base_address) && in kfd_ioctl_update_queue()
387 (!access_ok((const void __user *) args->ring_base_address, in kfd_ioctl_update_queue()
393 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { in kfd_ioctl_update_queue()
398 properties.queue_address = args->ring_base_address; in kfd_ioctl_update_queue()
399 properties.queue_size = args->ring_size; in kfd_ioctl_update_queue()
400 properties.queue_percent = args->queue_percentage; in kfd_ioctl_update_queue()
401 properties.priority = args->queue_priority; in kfd_ioctl_update_queue()
404 args->queue_id, p->pasid); in kfd_ioctl_update_queue()
408 retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties); in kfd_ioctl_update_queue()
420 struct kfd_ioctl_set_cu_mask_args *args = data; in kfd_ioctl_set_cu_mask() local
422 uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr; in kfd_ioctl_set_cu_mask()
423 size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32); in kfd_ioctl_set_cu_mask()
425 if ((args->num_cu_mask % 32) != 0) { in kfd_ioctl_set_cu_mask()
427 args->num_cu_mask); in kfd_ioctl_set_cu_mask()
431 minfo.cu_mask.count = args->num_cu_mask; in kfd_ioctl_set_cu_mask()
462 retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo); in kfd_ioctl_set_cu_mask()
474 struct kfd_ioctl_get_queue_wave_state_args *args = data; in kfd_ioctl_get_queue_wave_state() local
479 r = pqm_get_wave_state(&p->pqm, args->queue_id, in kfd_ioctl_get_queue_wave_state()
480 (void __user *)args->ctl_stack_address, in kfd_ioctl_get_queue_wave_state()
481 &args->ctl_stack_used_size, in kfd_ioctl_get_queue_wave_state()
482 &args->save_area_used_size); in kfd_ioctl_get_queue_wave_state()
492 struct kfd_ioctl_set_memory_policy_args *args = data; in kfd_ioctl_set_memory_policy() local
498 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT in kfd_ioctl_set_memory_policy()
499 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { in kfd_ioctl_set_memory_policy()
503 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT in kfd_ioctl_set_memory_policy()
504 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { in kfd_ioctl_set_memory_policy()
508 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_set_memory_policy()
520 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) in kfd_ioctl_set_memory_policy()
524 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) in kfd_ioctl_set_memory_policy()
531 (void __user *)args->alternate_aperture_base, in kfd_ioctl_set_memory_policy()
532 args->alternate_aperture_size)) in kfd_ioctl_set_memory_policy()
544 struct kfd_ioctl_set_trap_handler_args *args = data; in kfd_ioctl_set_trap_handler() local
549 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_set_trap_handler()
561 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr); in kfd_ioctl_set_trap_handler()
572 struct kfd_ioctl_dbg_register_args *args = data; in kfd_ioctl_dbg_register() local
579 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_dbg_register()
626 struct kfd_ioctl_dbg_unregister_args *args = data; in kfd_ioctl_dbg_unregister() local
630 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_dbg_unregister()
664 struct kfd_ioctl_dbg_address_watch_args *args = data; in kfd_ioctl_dbg_address_watch() local
675 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_dbg_address_watch()
684 cmd_from_user = (void __user *) args->content_ptr; in kfd_ioctl_dbg_address_watch()
688 if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) || in kfd_ioctl_dbg_address_watch()
689 (args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) || in kfd_ioctl_dbg_address_watch()
695 args->buf_size_in_bytes - sizeof(*args)); in kfd_ioctl_dbg_address_watch()
716 if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) { in kfd_ioctl_dbg_address_watch()
740 if (args_idx >= args->buf_size_in_bytes - sizeof(args)) { in kfd_ioctl_dbg_address_watch()
764 struct kfd_ioctl_dbg_wave_control_args *args = data; in kfd_ioctl_dbg_wave_control() local
776 computed_buff_size = sizeof(*args) + in kfd_ioctl_dbg_wave_control()
783 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_dbg_wave_control()
793 if (args->buf_size_in_bytes != computed_buff_size) { in kfd_ioctl_dbg_wave_control()
795 args->buf_size_in_bytes, computed_buff_size); in kfd_ioctl_dbg_wave_control()
799 cmd_from_user = (void __user *) args->content_ptr; in kfd_ioctl_dbg_wave_control()
807 args->buf_size_in_bytes - sizeof(*args)); in kfd_ioctl_dbg_wave_control()
848 struct kfd_ioctl_get_clock_counters_args *args = data; in kfd_ioctl_get_clock_counters() local
851 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_get_clock_counters()
854 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd); in kfd_ioctl_get_clock_counters()
857 args->gpu_clock_counter = 0; in kfd_ioctl_get_clock_counters()
860 args->cpu_clock_counter = ktime_get_raw_ns(); in kfd_ioctl_get_clock_counters()
861 args->system_clock_counter = ktime_get_boottime_ns(); in kfd_ioctl_get_clock_counters()
864 args->system_clock_freq = 1000000000; in kfd_ioctl_get_clock_counters()
873 struct kfd_ioctl_get_process_apertures_args *args = data; in kfd_ioctl_get_process_apertures() local
879 args->num_of_nodes = 0; in kfd_ioctl_get_process_apertures()
887 &args->process_apertures[args->num_of_nodes]; in kfd_ioctl_get_process_apertures()
897 "node id %u\n", args->num_of_nodes); in kfd_ioctl_get_process_apertures()
913 if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS) in kfd_ioctl_get_process_apertures()
924 struct kfd_ioctl_get_process_apertures_new_args *args = data; in kfd_ioctl_get_process_apertures_new() local
931 if (args->num_of_nodes == 0) { in kfd_ioctl_get_process_apertures_new()
936 args->num_of_nodes = p->n_pdds; in kfd_ioctl_get_process_apertures_new()
945 args->num_of_nodes), GFP_KERNEL); in kfd_ioctl_get_process_apertures_new()
952 args->num_of_nodes = 0; in kfd_ioctl_get_process_apertures_new()
958 for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) { in kfd_ioctl_get_process_apertures_new()
986 args->num_of_nodes = i; in kfd_ioctl_get_process_apertures_new()
988 (void __user *)args->kfd_process_device_apertures_ptr, in kfd_ioctl_get_process_apertures_new()
1002 struct kfd_ioctl_create_event_args *args = data; in kfd_ioctl_create_event() local
1009 if (args->event_page_offset) { in kfd_ioctl_create_event()
1015 kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset)); in kfd_ioctl_create_event()
1036 GET_IDR_HANDLE(args->event_page_offset)); in kfd_ioctl_create_event()
1039 args->event_page_offset); in kfd_ioctl_create_event()
1058 p->signal_handle = args->event_page_offset; in kfd_ioctl_create_event()
1063 err = kfd_event_create(filp, p, args->event_type, in kfd_ioctl_create_event()
1064 args->auto_reset != 0, args->node_id, in kfd_ioctl_create_event()
1065 &args->event_id, &args->event_trigger_data, in kfd_ioctl_create_event()
1066 &args->event_page_offset, in kfd_ioctl_create_event()
1067 &args->event_slot_index); in kfd_ioctl_create_event()
1079 struct kfd_ioctl_destroy_event_args *args = data; in kfd_ioctl_destroy_event() local
1081 return kfd_event_destroy(p, args->event_id); in kfd_ioctl_destroy_event()
1087 struct kfd_ioctl_set_event_args *args = data; in kfd_ioctl_set_event() local
1089 return kfd_set_event(p, args->event_id); in kfd_ioctl_set_event()
1095 struct kfd_ioctl_reset_event_args *args = data; in kfd_ioctl_reset_event() local
1097 return kfd_reset_event(p, args->event_id); in kfd_ioctl_reset_event()
1103 struct kfd_ioctl_wait_events_args *args = data; in kfd_ioctl_wait_events() local
1106 err = kfd_wait_on_events(p, args->num_events, in kfd_ioctl_wait_events()
1107 (void __user *)args->events_ptr, in kfd_ioctl_wait_events()
1108 (args->wait_for_all != 0), in kfd_ioctl_wait_events()
1109 args->timeout, &args->wait_result); in kfd_ioctl_wait_events()
1116 struct kfd_ioctl_set_scratch_backing_va_args *args = data; in kfd_ioctl_set_scratch_backing_va() local
1121 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_set_scratch_backing_va()
1133 pdd->qpd.sh_hidden_private_base = args->va_addr; in kfd_ioctl_set_scratch_backing_va()
1140 dev->kgd, args->va_addr, pdd->qpd.vmid); in kfd_ioctl_set_scratch_backing_va()
1152 struct kfd_ioctl_get_tile_config_args *args = data; in kfd_ioctl_get_tile_config() local
1157 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_get_tile_config()
1163 args->gb_addr_config = config.gb_addr_config; in kfd_ioctl_get_tile_config()
1164 args->num_banks = config.num_banks; in kfd_ioctl_get_tile_config()
1165 args->num_ranks = config.num_ranks; in kfd_ioctl_get_tile_config()
1167 if (args->num_tile_configs > config.num_tile_configs) in kfd_ioctl_get_tile_config()
1168 args->num_tile_configs = config.num_tile_configs; in kfd_ioctl_get_tile_config()
1169 err = copy_to_user((void __user *)args->tile_config_ptr, in kfd_ioctl_get_tile_config()
1171 args->num_tile_configs * sizeof(uint32_t)); in kfd_ioctl_get_tile_config()
1173 args->num_tile_configs = 0; in kfd_ioctl_get_tile_config()
1177 if (args->num_macro_tile_configs > config.num_macro_tile_configs) in kfd_ioctl_get_tile_config()
1178 args->num_macro_tile_configs = in kfd_ioctl_get_tile_config()
1180 err = copy_to_user((void __user *)args->macro_tile_config_ptr, in kfd_ioctl_get_tile_config()
1182 args->num_macro_tile_configs * sizeof(uint32_t)); in kfd_ioctl_get_tile_config()
1184 args->num_macro_tile_configs = 0; in kfd_ioctl_get_tile_config()
1194 struct kfd_ioctl_acquire_vm_args *args = data; in kfd_ioctl_acquire_vm() local
1200 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_acquire_vm()
1204 drm_file = fget(args->drm_fd); in kfd_ioctl_acquire_vm()
1257 struct kfd_ioctl_alloc_memory_of_gpu_args *args = data; in kfd_ioctl_alloc_memory_of_gpu() local
1263 uint64_t offset = args->mmap_offset; in kfd_ioctl_alloc_memory_of_gpu()
1264 uint32_t flags = args->flags; in kfd_ioctl_alloc_memory_of_gpu()
1266 if (args->size == 0) in kfd_ioctl_alloc_memory_of_gpu()
1277 args->va_addr >> PAGE_SHIFT, in kfd_ioctl_alloc_memory_of_gpu()
1278 (args->va_addr + args->size - 1) >> PAGE_SHIFT)) { in kfd_ioctl_alloc_memory_of_gpu()
1280 args->va_addr); in kfd_ioctl_alloc_memory_of_gpu()
1286 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_alloc_memory_of_gpu()
1306 if (args->size != kfd_doorbell_process_slice(dev)) { in kfd_ioctl_alloc_memory_of_gpu()
1312 if (args->size != PAGE_SIZE) { in kfd_ioctl_alloc_memory_of_gpu()
1324 dev->kgd, args->va_addr, args->size, in kfd_ioctl_alloc_memory_of_gpu()
1339 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size); in kfd_ioctl_alloc_memory_of_gpu()
1343 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); in kfd_ioctl_alloc_memory_of_gpu()
1344 args->mmap_offset = offset; in kfd_ioctl_alloc_memory_of_gpu()
1350 args->mmap_offset = KFD_MMAP_TYPE_MMIO in kfd_ioctl_alloc_memory_of_gpu()
1351 | KFD_MMAP_GPU_ID(args->gpu_id); in kfd_ioctl_alloc_memory_of_gpu()
1366 struct kfd_ioctl_free_memory_of_gpu_args *args = data; in kfd_ioctl_free_memory_of_gpu() local
1373 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1382 if (p->signal_handle && (p->signal_handle == args->handle)) { in kfd_ioctl_free_memory_of_gpu()
1396 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1410 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1422 struct kfd_ioctl_map_memory_to_gpu_args *args = data; in kfd_ioctl_map_memory_to_gpu() local
1431 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); in kfd_ioctl_map_memory_to_gpu()
1435 if (!args->n_devices) { in kfd_ioctl_map_memory_to_gpu()
1439 if (args->n_success > args->n_devices) { in kfd_ioctl_map_memory_to_gpu()
1444 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), in kfd_ioctl_map_memory_to_gpu()
1450 (void __user *)args->device_ids_array_ptr, in kfd_ioctl_map_memory_to_gpu()
1451 args->n_devices * sizeof(*devices_arr)); in kfd_ioctl_map_memory_to_gpu()
1466 GET_IDR_HANDLE(args->handle)); in kfd_ioctl_map_memory_to_gpu()
1472 for (i = args->n_success; i < args->n_devices; i++) { in kfd_ioctl_map_memory_to_gpu()
1491 i, args->n_devices); in kfd_ioctl_map_memory_to_gpu()
1494 args->n_success = i+1; in kfd_ioctl_map_memory_to_gpu()
1507 for (i = 0; i < args->n_devices; i++) { in kfd_ioctl_map_memory_to_gpu()
1535 struct kfd_ioctl_unmap_memory_from_gpu_args *args = data; in kfd_ioctl_unmap_memory_from_gpu() local
1542 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); in kfd_ioctl_unmap_memory_from_gpu()
1546 if (!args->n_devices) { in kfd_ioctl_unmap_memory_from_gpu()
1550 if (args->n_success > args->n_devices) { in kfd_ioctl_unmap_memory_from_gpu()
1555 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), in kfd_ioctl_unmap_memory_from_gpu()
1561 (void __user *)args->device_ids_array_ptr, in kfd_ioctl_unmap_memory_from_gpu()
1562 args->n_devices * sizeof(*devices_arr)); in kfd_ioctl_unmap_memory_from_gpu()
1577 GET_IDR_HANDLE(args->handle)); in kfd_ioctl_unmap_memory_from_gpu()
1583 for (i = args->n_success; i < args->n_devices; i++) { in kfd_ioctl_unmap_memory_from_gpu()
1599 i, args->n_devices); in kfd_ioctl_unmap_memory_from_gpu()
1602 args->n_success = i+1; in kfd_ioctl_unmap_memory_from_gpu()
1615 for (i = 0; i < args->n_devices; i++) { in kfd_ioctl_unmap_memory_from_gpu()
1643 struct kfd_ioctl_alloc_queue_gws_args *args = data; in kfd_ioctl_alloc_queue_gws() local
1648 q = pqm_get_user_queue(&p->pqm, args->queue_id); in kfd_ioctl_alloc_queue_gws()
1667 retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL); in kfd_ioctl_alloc_queue_gws()
1670 args->first_gws = 0; in kfd_ioctl_alloc_queue_gws()
1681 struct kfd_ioctl_get_dmabuf_info_args *args = data; in kfd_ioctl_get_dmabuf_info() local
1696 if (args->metadata_ptr) { in kfd_ioctl_get_dmabuf_info()
1697 metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL); in kfd_ioctl_get_dmabuf_info()
1703 r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd, in kfd_ioctl_get_dmabuf_info()
1704 &dma_buf_kgd, &args->size, in kfd_ioctl_get_dmabuf_info()
1705 metadata_buffer, args->metadata_size, in kfd_ioctl_get_dmabuf_info()
1706 &args->metadata_size, &flags); in kfd_ioctl_get_dmabuf_info()
1716 args->gpu_id = dev->id; in kfd_ioctl_get_dmabuf_info()
1717 args->flags = flags; in kfd_ioctl_get_dmabuf_info()
1721 r = copy_to_user((void __user *)args->metadata_ptr, in kfd_ioctl_get_dmabuf_info()
1722 metadata_buffer, args->metadata_size); in kfd_ioctl_get_dmabuf_info()
1736 struct kfd_ioctl_import_dmabuf_args *args = data; in kfd_ioctl_import_dmabuf() local
1745 dev = kfd_device_by_id(args->gpu_id); in kfd_ioctl_import_dmabuf()
1749 dmabuf = dma_buf_get(args->dmabuf_fd); in kfd_ioctl_import_dmabuf()
1762 args->va_addr, pdd->drm_priv, in kfd_ioctl_import_dmabuf()
1777 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); in kfd_ioctl_import_dmabuf()
1794 struct kfd_ioctl_smi_events_args *args = data; in kfd_ioctl_smi_events() local
1797 dev = kfd_device_by_id(args->gpuid); in kfd_ioctl_smi_events()
1801 return kfd_smi_event_open(dev, &args->anon_fd); in kfd_ioctl_smi_events()
1807 struct kfd_ioctl_set_xnack_mode_args *args = data; in kfd_ioctl_set_xnack_mode() local
1811 if (args->xnack_enabled >= 0) { in kfd_ioctl_set_xnack_mode()
1817 if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) in kfd_ioctl_set_xnack_mode()
1820 p->xnack_enabled = args->xnack_enabled; in kfd_ioctl_set_xnack_mode()
1822 args->xnack_enabled = p->xnack_enabled; in kfd_ioctl_set_xnack_mode()
1832 struct kfd_ioctl_svm_args *args = data; in kfd_ioctl_svm() local
1836 args->start_addr, args->size, args->op, args->nattr); in kfd_ioctl_svm()
1838 if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK)) in kfd_ioctl_svm()
1840 if (!args->start_addr || !args->size) in kfd_ioctl_svm()
1845 r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr, in kfd_ioctl_svm()
1846 args->attrs); in kfd_ioctl_svm()