Lines Matching refs:pool
201 struct worker_pool *pool; /* I: the associated pool */ member
378 static void show_one_worker_pool(struct worker_pool *pool);
394 #define for_each_cpu_worker_pool(pool, cpu) \ argument
395 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
396 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
397 (pool)++)
411 #define for_each_pool(pool, pi) \ argument
412 idr_for_each_entry(&worker_pool_idr, pool, pi) \
426 #define for_each_pool_worker(worker, pool) \ argument
427 list_for_each_entry((worker), &(pool)->workers, node) \
551 static int worker_pool_assign_id(struct worker_pool *pool) in worker_pool_assign_id() argument
557 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, in worker_pool_assign_id()
560 pool->id = ret; in worker_pool_assign_id()
734 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; in get_work_pool()
756 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; in get_work_pool_id()
782 static bool __need_more_worker(struct worker_pool *pool) in __need_more_worker() argument
784 return !atomic_read(&pool->nr_running); in __need_more_worker()
795 static bool need_more_worker(struct worker_pool *pool) in need_more_worker() argument
797 return !list_empty(&pool->worklist) && __need_more_worker(pool); in need_more_worker()
801 static bool may_start_working(struct worker_pool *pool) in may_start_working() argument
803 return pool->nr_idle; in may_start_working()
807 static bool keep_working(struct worker_pool *pool) in keep_working() argument
809 return !list_empty(&pool->worklist) && in keep_working()
810 atomic_read(&pool->nr_running) <= 1; in keep_working()
814 static bool need_to_create_worker(struct worker_pool *pool) in need_to_create_worker() argument
816 return need_more_worker(pool) && !may_start_working(pool); in need_to_create_worker()
820 static bool too_many_workers(struct worker_pool *pool) in too_many_workers() argument
822 bool managing = pool->flags & POOL_MANAGER_ACTIVE; in too_many_workers()
823 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ in too_many_workers()
824 int nr_busy = pool->nr_workers - nr_idle; in too_many_workers()
834 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker() argument
836 if (unlikely(list_empty(&pool->idle_list))) in first_idle_worker()
839 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker()
851 static void wake_up_worker(struct worker_pool *pool) in wake_up_worker() argument
853 struct worker *worker = first_idle_worker(pool); in wake_up_worker()
872 atomic_inc(&worker->pool->nr_running); in wq_worker_running()
887 struct worker_pool *pool; in wq_worker_sleeping() local
897 pool = worker->pool; in wq_worker_sleeping()
904 raw_spin_lock_irq(&pool->lock); in wq_worker_sleeping()
917 if (atomic_dec_and_test(&pool->nr_running) && in wq_worker_sleeping()
918 !list_empty(&pool->worklist)) { in wq_worker_sleeping()
919 next = first_idle_worker(pool); in wq_worker_sleeping()
923 raw_spin_unlock_irq(&pool->lock); in wq_worker_sleeping()
969 struct worker_pool *pool = worker->pool; in worker_set_flags() local
976 atomic_dec(&pool->nr_running); in worker_set_flags()
994 struct worker_pool *pool = worker->pool; in worker_clr_flags() local
1008 atomic_inc(&pool->nr_running); in worker_clr_flags()
1044 static struct worker *find_worker_executing_work(struct worker_pool *pool, in find_worker_executing_work() argument
1049 hash_for_each_possible(pool->busy_hash, worker, hentry, in find_worker_executing_work()
1108 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1122 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1151 raw_spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1153 raw_spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1162 if (list_empty(&pwq->pool->worklist)) in pwq_activate_inactive_work()
1163 pwq->pool->watchdog_ts = jiffies; in pwq_activate_inactive_work()
1164 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_inactive_work()
1257 struct worker_pool *pool; in try_to_grab_pending() local
1284 pool = get_work_pool(work); in try_to_grab_pending()
1285 if (!pool) in try_to_grab_pending()
1288 raw_spin_lock(&pool->lock); in try_to_grab_pending()
1298 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1319 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
1321 raw_spin_unlock(&pool->lock); in try_to_grab_pending()
1325 raw_spin_unlock(&pool->lock); in try_to_grab_pending()
1351 struct worker_pool *pool = pwq->pool; in insert_work() local
1368 if (__need_more_worker(pool)) in insert_work()
1369 wake_up_worker(pool); in insert_work()
1462 if (last_pool && last_pool != pwq->pool) { in __queue_work()
1474 raw_spin_lock(&pwq->pool->lock); in __queue_work()
1477 raw_spin_lock(&pwq->pool->lock); in __queue_work()
1490 raw_spin_unlock(&pwq->pool->lock); in __queue_work()
1511 worklist = &pwq->pool->worklist; in __queue_work()
1513 pwq->pool->watchdog_ts = jiffies; in __queue_work()
1523 raw_spin_unlock(&pwq->pool->lock); in __queue_work()
1796 struct worker_pool *pool = worker->pool; in worker_enter_idle() local
1805 pool->nr_idle++; in worker_enter_idle()
1809 list_add(&worker->entry, &pool->idle_list); in worker_enter_idle()
1811 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) in worker_enter_idle()
1812 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); in worker_enter_idle()
1820 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && in worker_enter_idle()
1821 pool->nr_workers == pool->nr_idle && in worker_enter_idle()
1822 atomic_read(&pool->nr_running)); in worker_enter_idle()
1836 struct worker_pool *pool = worker->pool; in worker_leave_idle() local
1841 pool->nr_idle--; in worker_leave_idle()
1870 struct worker_pool *pool) in worker_attach_to_pool() argument
1879 if (pool->flags & POOL_DISASSOCIATED) in worker_attach_to_pool()
1882 kthread_set_per_cpu(worker->task, pool->cpu); in worker_attach_to_pool()
1885 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); in worker_attach_to_pool()
1887 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
1888 worker->pool = pool; in worker_attach_to_pool()
1903 struct worker_pool *pool = worker->pool; in worker_detach_from_pool() local
1910 worker->pool = NULL; in worker_detach_from_pool()
1912 if (list_empty(&pool->workers)) in worker_detach_from_pool()
1913 detach_completion = pool->detach_completion; in worker_detach_from_pool()
1935 static struct worker *create_worker(struct worker_pool *pool) in create_worker() argument
1942 id = ida_alloc(&pool->worker_ida, GFP_KERNEL); in create_worker()
1946 worker = alloc_worker(pool->node); in create_worker()
1952 if (pool->cpu >= 0) in create_worker()
1953 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, in create_worker()
1954 pool->attrs->nice < 0 ? "H" : ""); in create_worker()
1956 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); in create_worker()
1958 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, in create_worker()
1963 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
1964 kthread_bind_mask(worker->task, pool->attrs->cpumask); in create_worker()
1967 worker_attach_to_pool(worker, pool); in create_worker()
1970 raw_spin_lock_irq(&pool->lock); in create_worker()
1971 worker->pool->nr_workers++; in create_worker()
1974 raw_spin_unlock_irq(&pool->lock); in create_worker()
1979 ida_free(&pool->worker_ida, id); in create_worker()
1996 struct worker_pool *pool = worker->pool; in destroy_worker() local
1998 lockdep_assert_held(&pool->lock); in destroy_worker()
2006 pool->nr_workers--; in destroy_worker()
2007 pool->nr_idle--; in destroy_worker()
2016 struct worker_pool *pool = from_timer(pool, t, idle_timer); in idle_worker_timeout() local
2018 raw_spin_lock_irq(&pool->lock); in idle_worker_timeout()
2020 while (too_many_workers(pool)) { in idle_worker_timeout()
2025 worker = list_entry(pool->idle_list.prev, struct worker, entry); in idle_worker_timeout()
2029 mod_timer(&pool->idle_timer, expires); in idle_worker_timeout()
2036 raw_spin_unlock_irq(&pool->lock); in idle_worker_timeout()
2064 struct worker_pool *pool = from_timer(pool, t, mayday_timer); in pool_mayday_timeout() local
2067 raw_spin_lock_irq(&pool->lock); in pool_mayday_timeout()
2070 if (need_to_create_worker(pool)) { in pool_mayday_timeout()
2077 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
2082 raw_spin_unlock_irq(&pool->lock); in pool_mayday_timeout()
2084 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); in pool_mayday_timeout()
2105 static void maybe_create_worker(struct worker_pool *pool) in maybe_create_worker() argument
2106 __releases(&pool->lock) in maybe_create_worker()
2107 __acquires(&pool->lock) in maybe_create_worker()
2110 raw_spin_unlock_irq(&pool->lock); in maybe_create_worker()
2113 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); in maybe_create_worker()
2116 if (create_worker(pool) || !need_to_create_worker(pool)) in maybe_create_worker()
2121 if (!need_to_create_worker(pool)) in maybe_create_worker()
2125 del_timer_sync(&pool->mayday_timer); in maybe_create_worker()
2126 raw_spin_lock_irq(&pool->lock); in maybe_create_worker()
2132 if (need_to_create_worker(pool)) in maybe_create_worker()
2160 struct worker_pool *pool = worker->pool; in manage_workers() local
2162 if (pool->flags & POOL_MANAGER_ACTIVE) in manage_workers()
2165 pool->flags |= POOL_MANAGER_ACTIVE; in manage_workers()
2166 pool->manager = worker; in manage_workers()
2168 maybe_create_worker(pool); in manage_workers()
2170 pool->manager = NULL; in manage_workers()
2171 pool->flags &= ~POOL_MANAGER_ACTIVE; in manage_workers()
2191 __releases(&pool->lock) in process_one_work()
2192 __acquires(&pool->lock) in process_one_work()
2195 struct worker_pool *pool = worker->pool; in process_one_work() local
2212 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && in process_one_work()
2213 raw_smp_processor_id() != pool->cpu); in process_one_work()
2221 collision = find_worker_executing_work(pool, work); in process_one_work()
2229 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2260 if (need_more_worker(pool)) in process_one_work()
2261 wake_up_worker(pool); in process_one_work()
2269 set_work_pool_and_clear_pending(work, pool->id); in process_one_work()
2271 raw_spin_unlock_irq(&pool->lock); in process_one_work()
2326 raw_spin_lock_irq(&pool->lock); in process_one_work()
2390 struct worker_pool *pool = worker->pool; in worker_thread() local
2395 raw_spin_lock_irq(&pool->lock); in worker_thread()
2399 raw_spin_unlock_irq(&pool->lock); in worker_thread()
2404 ida_free(&pool->worker_ida, worker->id); in worker_thread()
2413 if (!need_more_worker(pool)) in worker_thread()
2417 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) in worker_thread()
2438 list_first_entry(&pool->worklist, in worker_thread()
2441 pool->watchdog_ts = jiffies; in worker_thread()
2452 } while (keep_working(pool)); in worker_thread()
2465 raw_spin_unlock_irq(&pool->lock); in worker_thread()
2524 struct worker_pool *pool = pwq->pool; in rescuer_thread() local
2533 worker_attach_to_pool(rescuer, pool); in rescuer_thread()
2535 raw_spin_lock_irq(&pool->lock); in rescuer_thread()
2542 list_for_each_entry_safe(work, n, &pool->worklist, entry) { in rescuer_thread()
2545 pool->watchdog_ts = jiffies; in rescuer_thread()
2563 if (pwq->nr_active && need_to_create_worker(pool)) { in rescuer_thread()
2588 if (need_more_worker(pool)) in rescuer_thread()
2589 wake_up_worker(pool); in rescuer_thread()
2591 raw_spin_unlock_irq(&pool->lock); in rescuer_thread()
2771 struct worker_pool *pool = pwq->pool; in flush_workqueue_prep_pwqs() local
2773 raw_spin_lock_irq(&pool->lock); in flush_workqueue_prep_pwqs()
2790 raw_spin_unlock_irq(&pool->lock); in flush_workqueue_prep_pwqs()
2990 raw_spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
2992 raw_spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
3016 struct worker_pool *pool; in start_flush_work() local
3022 pool = get_work_pool(work); in start_flush_work()
3023 if (!pool) { in start_flush_work()
3028 raw_spin_lock_irq(&pool->lock); in start_flush_work()
3032 if (unlikely(pwq->pool != pool)) in start_flush_work()
3035 worker = find_worker_executing_work(pool, work); in start_flush_work()
3044 raw_spin_unlock_irq(&pool->lock); in start_flush_work()
3063 raw_spin_unlock_irq(&pool->lock); in start_flush_work()
3454 static int init_worker_pool(struct worker_pool *pool) in init_worker_pool() argument
3456 raw_spin_lock_init(&pool->lock); in init_worker_pool()
3457 pool->id = -1; in init_worker_pool()
3458 pool->cpu = -1; in init_worker_pool()
3459 pool->node = NUMA_NO_NODE; in init_worker_pool()
3460 pool->flags |= POOL_DISASSOCIATED; in init_worker_pool()
3461 pool->watchdog_ts = jiffies; in init_worker_pool()
3462 INIT_LIST_HEAD(&pool->worklist); in init_worker_pool()
3463 INIT_LIST_HEAD(&pool->idle_list); in init_worker_pool()
3464 hash_init(pool->busy_hash); in init_worker_pool()
3466 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); in init_worker_pool()
3468 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); in init_worker_pool()
3470 INIT_LIST_HEAD(&pool->workers); in init_worker_pool()
3472 ida_init(&pool->worker_ida); in init_worker_pool()
3473 INIT_HLIST_NODE(&pool->hash_node); in init_worker_pool()
3474 pool->refcnt = 1; in init_worker_pool()
3477 pool->attrs = alloc_workqueue_attrs(); in init_worker_pool()
3478 if (!pool->attrs) in init_worker_pool()
3538 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); in rcu_free_pool() local
3540 ida_destroy(&pool->worker_ida); in rcu_free_pool()
3541 free_workqueue_attrs(pool->attrs); in rcu_free_pool()
3542 kfree(pool); in rcu_free_pool()
3546 static bool wq_manager_inactive(struct worker_pool *pool) in wq_manager_inactive() argument
3548 raw_spin_lock_irq(&pool->lock); in wq_manager_inactive()
3550 if (pool->flags & POOL_MANAGER_ACTIVE) { in wq_manager_inactive()
3551 raw_spin_unlock_irq(&pool->lock); in wq_manager_inactive()
3568 static void put_unbound_pool(struct worker_pool *pool) in put_unbound_pool() argument
3575 if (--pool->refcnt) in put_unbound_pool()
3579 if (WARN_ON(!(pool->cpu < 0)) || in put_unbound_pool()
3580 WARN_ON(!list_empty(&pool->worklist))) in put_unbound_pool()
3584 if (pool->id >= 0) in put_unbound_pool()
3585 idr_remove(&worker_pool_idr, pool->id); in put_unbound_pool()
3586 hash_del(&pool->hash_node); in put_unbound_pool()
3595 rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool), in put_unbound_pool()
3597 pool->flags |= POOL_MANAGER_ACTIVE; in put_unbound_pool()
3599 while ((worker = first_idle_worker(pool))) in put_unbound_pool()
3601 WARN_ON(pool->nr_workers || pool->nr_idle); in put_unbound_pool()
3602 raw_spin_unlock_irq(&pool->lock); in put_unbound_pool()
3605 if (!list_empty(&pool->workers)) in put_unbound_pool()
3606 pool->detach_completion = &detach_completion; in put_unbound_pool()
3609 if (pool->detach_completion) in put_unbound_pool()
3610 wait_for_completion(pool->detach_completion); in put_unbound_pool()
3613 del_timer_sync(&pool->idle_timer); in put_unbound_pool()
3614 del_timer_sync(&pool->mayday_timer); in put_unbound_pool()
3617 call_rcu(&pool->rcu, rcu_free_pool); in put_unbound_pool()
3637 struct worker_pool *pool; in get_unbound_pool() local
3644 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { in get_unbound_pool()
3645 if (wqattrs_equal(pool->attrs, attrs)) { in get_unbound_pool()
3646 pool->refcnt++; in get_unbound_pool()
3647 return pool; in get_unbound_pool()
3663 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node); in get_unbound_pool()
3664 if (!pool || init_worker_pool(pool) < 0) in get_unbound_pool()
3667 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ in get_unbound_pool()
3668 copy_workqueue_attrs(pool->attrs, attrs); in get_unbound_pool()
3669 pool->node = target_node; in get_unbound_pool()
3675 pool->attrs->no_numa = false; in get_unbound_pool()
3677 if (worker_pool_assign_id(pool) < 0) in get_unbound_pool()
3681 if (wq_online && !create_worker(pool)) in get_unbound_pool()
3685 hash_add(unbound_pool_hash, &pool->hash_node, hash); in get_unbound_pool()
3687 return pool; in get_unbound_pool()
3689 if (pool) in get_unbound_pool()
3690 put_unbound_pool(pool); in get_unbound_pool()
3709 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn() local
3727 put_unbound_pool(pool); in pwq_unbound_release_workfn()
3764 raw_spin_lock_irqsave(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3789 wake_up_worker(pwq->pool); in pwq_adjust_max_active()
3794 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3799 struct worker_pool *pool) in init_pwq() argument
3805 pwq->pool = pool; in init_pwq()
3840 struct worker_pool *pool; in alloc_unbound_pwq() local
3845 pool = get_unbound_pool(attrs); in alloc_unbound_pwq()
3846 if (!pool) in alloc_unbound_pwq()
3849 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
3851 put_unbound_pool(pool); in alloc_unbound_pwq()
3855 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
4174 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) { in wq_update_unbound_numa()
4175 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) in wq_update_unbound_numa()
4196 raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
4198 raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
4443 raw_spin_lock_irq(&pwq->pool->lock); in destroy_workqueue()
4448 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
4454 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
4613 struct worker_pool *pool; in work_busy() local
4621 pool = get_work_pool(work); in work_busy()
4622 if (pool) { in work_busy()
4623 raw_spin_lock_irqsave(&pool->lock, flags); in work_busy()
4624 if (find_worker_executing_work(pool, work)) in work_busy()
4626 raw_spin_unlock_irqrestore(&pool->lock, flags); in work_busy()
4706 static void pr_cont_pool_info(struct worker_pool *pool) in pr_cont_pool_info() argument
4708 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); in pr_cont_pool_info()
4709 if (pool->node != NUMA_NO_NODE) in pr_cont_pool_info()
4710 pr_cont(" node=%d", pool->node); in pr_cont_pool_info()
4711 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); in pr_cont_pool_info()
4730 struct worker_pool *pool = pwq->pool; in show_pwq() local
4736 pr_info(" pwq %d:", pool->id); in show_pwq()
4737 pr_cont_pool_info(pool); in show_pwq()
4743 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4753 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4768 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4778 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4822 raw_spin_lock_irqsave(&pwq->pool->lock, flags); in show_one_workqueue()
4833 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); in show_one_workqueue()
4848 static void show_one_worker_pool(struct worker_pool *pool) in show_one_worker_pool() argument
4854 raw_spin_lock_irqsave(&pool->lock, flags); in show_one_worker_pool()
4855 if (pool->nr_workers == pool->nr_idle) in show_one_worker_pool()
4863 pr_info("pool %d:", pool->id); in show_one_worker_pool()
4864 pr_cont_pool_info(pool); in show_one_worker_pool()
4866 jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000, in show_one_worker_pool()
4867 pool->nr_workers); in show_one_worker_pool()
4868 if (pool->manager) in show_one_worker_pool()
4870 task_pid_nr(pool->manager->task)); in show_one_worker_pool()
4871 list_for_each_entry(worker, &pool->idle_list, entry) { in show_one_worker_pool()
4879 raw_spin_unlock_irqrestore(&pool->lock, flags); in show_one_worker_pool()
4898 struct worker_pool *pool; in show_all_workqueues() local
4908 for_each_pool(pool, pi) in show_all_workqueues()
4909 show_one_worker_pool(pool); in show_all_workqueues()
4929 struct worker_pool *pool = worker->pool; in wq_worker_comm() local
4931 if (pool) { in wq_worker_comm()
4932 raw_spin_lock_irq(&pool->lock); in wq_worker_comm()
4946 raw_spin_unlock_irq(&pool->lock); in wq_worker_comm()
4972 struct worker_pool *pool; in unbind_workers() local
4975 for_each_cpu_worker_pool(pool, cpu) { in unbind_workers()
4977 raw_spin_lock_irq(&pool->lock); in unbind_workers()
4986 for_each_pool_worker(worker, pool) in unbind_workers()
4989 pool->flags |= POOL_DISASSOCIATED; in unbind_workers()
4991 raw_spin_unlock_irq(&pool->lock); in unbind_workers()
4993 for_each_pool_worker(worker, pool) { in unbind_workers()
5016 atomic_set(&pool->nr_running, 0); in unbind_workers()
5023 raw_spin_lock_irq(&pool->lock); in unbind_workers()
5024 wake_up_worker(pool); in unbind_workers()
5025 raw_spin_unlock_irq(&pool->lock); in unbind_workers()
5035 static void rebind_workers(struct worker_pool *pool) in rebind_workers() argument
5048 for_each_pool_worker(worker, pool) { in rebind_workers()
5049 kthread_set_per_cpu(worker->task, pool->cpu); in rebind_workers()
5051 pool->attrs->cpumask) < 0); in rebind_workers()
5054 raw_spin_lock_irq(&pool->lock); in rebind_workers()
5056 pool->flags &= ~POOL_DISASSOCIATED; in rebind_workers()
5058 for_each_pool_worker(worker, pool) { in rebind_workers()
5093 raw_spin_unlock_irq(&pool->lock); in rebind_workers()
5106 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) in restore_unbound_workers_cpumask() argument
5114 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) in restore_unbound_workers_cpumask()
5117 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); in restore_unbound_workers_cpumask()
5120 for_each_pool_worker(worker, pool) in restore_unbound_workers_cpumask()
5126 struct worker_pool *pool; in workqueue_prepare_cpu() local
5128 for_each_cpu_worker_pool(pool, cpu) { in workqueue_prepare_cpu()
5129 if (pool->nr_workers) in workqueue_prepare_cpu()
5131 if (!create_worker(pool)) in workqueue_prepare_cpu()
5139 struct worker_pool *pool; in workqueue_online_cpu() local
5145 for_each_pool(pool, pi) { in workqueue_online_cpu()
5148 if (pool->cpu == cpu) in workqueue_online_cpu()
5149 rebind_workers(pool); in workqueue_online_cpu()
5150 else if (pool->cpu < 0) in workqueue_online_cpu()
5151 restore_unbound_workers_cpumask(pool, cpu); in workqueue_online_cpu()
5522 unbound_pwq_by_node(wq, node)->pool->id); in wq_pool_ids_show()
5854 struct worker_pool *pool; in wq_watchdog_timer_fn() local
5862 for_each_pool(pool, pi) { in wq_watchdog_timer_fn()
5865 if (list_empty(&pool->worklist)) in wq_watchdog_timer_fn()
5875 if (pool->cpu >= 0) in wq_watchdog_timer_fn()
5876 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); in wq_watchdog_timer_fn()
5879 pool_ts = READ_ONCE(pool->watchdog_ts); in wq_watchdog_timer_fn()
5890 pr_cont_pool_info(pool); in wq_watchdog_timer_fn()
6032 struct worker_pool *pool; in workqueue_init_early() local
6035 for_each_cpu_worker_pool(pool, cpu) { in workqueue_init_early()
6036 BUG_ON(init_worker_pool(pool)); in workqueue_init_early()
6037 pool->cpu = cpu; in workqueue_init_early()
6038 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); in workqueue_init_early()
6039 pool->attrs->nice = std_nice[i++]; in workqueue_init_early()
6040 pool->node = cpu_to_node(cpu); in workqueue_init_early()
6044 BUG_ON(worker_pool_assign_id(pool)); in workqueue_init_early()
6098 struct worker_pool *pool; in workqueue_init() local
6115 for_each_cpu_worker_pool(pool, cpu) { in workqueue_init()
6116 pool->node = cpu_to_node(cpu); in workqueue_init()
6131 for_each_cpu_worker_pool(pool, cpu) { in workqueue_init()
6132 pool->flags &= ~POOL_DISASSOCIATED; in workqueue_init()
6133 BUG_ON(!create_worker(pool)); in workqueue_init()
6137 hash_for_each(unbound_pool_hash, bkt, pool, hash_node) in workqueue_init()
6138 BUG_ON(!create_worker(pool)); in workqueue_init()