Lines Matching refs:plr

157 		if (rdtgrp->plr && rdtgrp->plr->minor == minor) {  in region_find_by_minor()
175 static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) in pseudo_lock_cstates_relax() argument
179 list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { in pseudo_lock_cstates_relax()
204 static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) in pseudo_lock_cstates_constrain() argument
210 for_each_cpu(cpu, &plr->d->cpu_mask) { in pseudo_lock_cstates_constrain()
228 list_add(&pm_req->list, &plr->pm_reqs); in pseudo_lock_cstates_constrain()
234 pseudo_lock_cstates_relax(plr); in pseudo_lock_cstates_constrain()
247 static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) in pseudo_lock_region_clear() argument
249 plr->size = 0; in pseudo_lock_region_clear()
250 plr->line_size = 0; in pseudo_lock_region_clear()
251 kfree(plr->kmem); in pseudo_lock_region_clear()
252 plr->kmem = NULL; in pseudo_lock_region_clear()
253 plr->s = NULL; in pseudo_lock_region_clear()
254 if (plr->d) in pseudo_lock_region_clear()
255 plr->d->plr = NULL; in pseudo_lock_region_clear()
256 plr->d = NULL; in pseudo_lock_region_clear()
257 plr->cbm = 0; in pseudo_lock_region_clear()
258 plr->debugfs_dir = NULL; in pseudo_lock_region_clear()
279 static int pseudo_lock_region_init(struct pseudo_lock_region *plr) in pseudo_lock_region_init() argument
286 plr->cpu = cpumask_first(&plr->d->cpu_mask); in pseudo_lock_region_init()
288 if (!cpu_online(plr->cpu)) { in pseudo_lock_region_init()
290 plr->cpu); in pseudo_lock_region_init()
295 ci = get_cpu_cacheinfo(plr->cpu); in pseudo_lock_region_init()
297 plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); in pseudo_lock_region_init()
300 if (ci->info_list[i].level == plr->s->res->cache_level) { in pseudo_lock_region_init()
301 plr->line_size = ci->info_list[i].coherency_line_size; in pseudo_lock_region_init()
309 pseudo_lock_region_clear(plr); in pseudo_lock_region_init()
326 struct pseudo_lock_region *plr; in pseudo_lock_init() local
328 plr = kzalloc(sizeof(*plr), GFP_KERNEL); in pseudo_lock_init()
329 if (!plr) in pseudo_lock_init()
332 init_waitqueue_head(&plr->lock_thread_wq); in pseudo_lock_init()
333 INIT_LIST_HEAD(&plr->pm_reqs); in pseudo_lock_init()
334 rdtgrp->plr = plr; in pseudo_lock_init()
348 static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) in pseudo_lock_region_alloc() argument
352 ret = pseudo_lock_region_init(plr); in pseudo_lock_region_alloc()
360 if (plr->size > KMALLOC_MAX_SIZE) { in pseudo_lock_region_alloc()
366 plr->kmem = kzalloc(plr->size, GFP_KERNEL); in pseudo_lock_region_alloc()
367 if (!plr->kmem) { in pseudo_lock_region_alloc()
376 pseudo_lock_region_clear(plr); in pseudo_lock_region_alloc()
393 pseudo_lock_region_clear(rdtgrp->plr); in pseudo_lock_free()
394 kfree(rdtgrp->plr); in pseudo_lock_free()
395 rdtgrp->plr = NULL; in pseudo_lock_free()
420 struct pseudo_lock_region *plr = rdtgrp->plr; in pseudo_lock_fn() local
469 mem_r = plr->kmem; in pseudo_lock_fn()
470 size = plr->size; in pseudo_lock_fn()
471 line_size = plr->line_size; in pseudo_lock_fn()
520 plr->thread_done = 1; in pseudo_lock_fn()
521 wake_up_interruptible(&plr->lock_thread_wq); in pseudo_lock_fn()
802 if (d->plr) { in rdtgroup_cbm_overlaps_pseudo_locked()
803 cbm_len = d->plr->s->res->cache.cbm_len; in rdtgroup_cbm_overlaps_pseudo_locked()
804 cbm_b = d->plr->cbm; in rdtgroup_cbm_overlaps_pseudo_locked()
840 if (d_i->plr) in rdtgroup_pseudo_locked_in_hierarchy()
873 struct pseudo_lock_region *plr = _plr; in measure_cycles_lat_fn() local
883 mem_r = READ_ONCE(plr->kmem); in measure_cycles_lat_fn()
889 for (i = 0; i < plr->size; i += 32) { in measure_cycles_lat_fn()
900 plr->thread_done = 1; in measure_cycles_lat_fn()
901 wake_up_interruptible(&plr->lock_thread_wq); in measure_cycles_lat_fn()
937 struct pseudo_lock_region *plr, in measure_residency_fn() argument
949 miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu, in measure_residency_fn()
954 hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu, in measure_residency_fn()
985 line_size = READ_ONCE(plr->line_size); in measure_residency_fn()
986 mem_r = READ_ONCE(plr->kmem); in measure_residency_fn()
987 size = READ_ONCE(plr->size); in measure_residency_fn()
1053 struct pseudo_lock_region *plr = _plr; in measure_l2_residency() local
1076 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); in measure_l2_residency()
1084 plr->thread_done = 1; in measure_l2_residency()
1085 wake_up_interruptible(&plr->lock_thread_wq); in measure_l2_residency()
1091 struct pseudo_lock_region *plr = _plr; in measure_l3_residency() local
1115 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); in measure_l3_residency()
1140 plr->thread_done = 1; in measure_l3_residency()
1141 wake_up_interruptible(&plr->lock_thread_wq); in measure_l3_residency()
1159 struct pseudo_lock_region *plr = rdtgrp->plr; in pseudo_lock_measure_cycles() local
1172 if (!plr->d) { in pseudo_lock_measure_cycles()
1177 plr->thread_done = 0; in pseudo_lock_measure_cycles()
1178 cpu = cpumask_first(&plr->d->cpu_mask); in pseudo_lock_measure_cycles()
1184 plr->cpu = cpu; in pseudo_lock_measure_cycles()
1187 thread = kthread_create_on_node(measure_cycles_lat_fn, plr, in pseudo_lock_measure_cycles()
1192 thread = kthread_create_on_node(measure_l2_residency, plr, in pseudo_lock_measure_cycles()
1197 thread = kthread_create_on_node(measure_l3_residency, plr, in pseudo_lock_measure_cycles()
1211 ret = wait_event_interruptible(plr->lock_thread_wq, in pseudo_lock_measure_cycles()
1212 plr->thread_done == 1); in pseudo_lock_measure_cycles()
1279 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_create() local
1285 ret = pseudo_lock_region_alloc(plr); in rdtgroup_pseudo_lock_create()
1289 ret = pseudo_lock_cstates_constrain(plr); in rdtgroup_pseudo_lock_create()
1295 plr->thread_done = 0; in rdtgroup_pseudo_lock_create()
1298 cpu_to_node(plr->cpu), in rdtgroup_pseudo_lock_create()
1299 "pseudo_lock/%u", plr->cpu); in rdtgroup_pseudo_lock_create()
1306 kthread_bind(thread, plr->cpu); in rdtgroup_pseudo_lock_create()
1309 ret = wait_event_interruptible(plr->lock_thread_wq, in rdtgroup_pseudo_lock_create()
1310 plr->thread_done == 1); in rdtgroup_pseudo_lock_create()
1342 plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, in rdtgroup_pseudo_lock_create()
1344 if (!IS_ERR_OR_NULL(plr->debugfs_dir)) in rdtgroup_pseudo_lock_create()
1346 plr->debugfs_dir, rdtgrp, in rdtgroup_pseudo_lock_create()
1369 plr->minor = new_minor; in rdtgroup_pseudo_lock_create()
1382 debugfs_remove_recursive(plr->debugfs_dir); in rdtgroup_pseudo_lock_create()
1385 pseudo_lock_cstates_relax(plr); in rdtgroup_pseudo_lock_create()
1387 pseudo_lock_region_clear(plr); in rdtgroup_pseudo_lock_create()
1408 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_remove() local
1419 pseudo_lock_cstates_relax(plr); in rdtgroup_pseudo_lock_remove()
1420 debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); in rdtgroup_pseudo_lock_remove()
1421 device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); in rdtgroup_pseudo_lock_remove()
1422 pseudo_lock_minor_release(plr->minor); in rdtgroup_pseudo_lock_remove()
1481 struct pseudo_lock_region *plr; in pseudo_lock_dev_mmap() local
1495 plr = rdtgrp->plr; in pseudo_lock_dev_mmap()
1497 if (!plr->d) { in pseudo_lock_dev_mmap()
1508 if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { in pseudo_lock_dev_mmap()
1513 physical = __pa(plr->kmem) >> PAGE_SHIFT; in pseudo_lock_dev_mmap()
1514 psize = plr->size - off; in pseudo_lock_dev_mmap()
1516 if (off > plr->size) { in pseudo_lock_dev_mmap()
1535 memset(plr->kmem + off, 0, vsize); in pseudo_lock_dev_mmap()