/xen/xen/include/xen/ |
A D | cpumask.h | 70 extern unsigned int nr_cpu_ids; 83 ASSERT(cpu < nr_cpu_ids); in cpumask_check() 196 return bitmap_full(srcp->bits, nr_cpu_ids); in cpumask_full() 211 return min_t(int, nr_cpu_ids, find_first_bit(srcp->bits, nr_cpu_ids)); in cpumask_first() 220 return min_t(int, nr_cpu_ids, in cpumask_next() 226 int cpu, pcpu = nr_cpu_ids; in cpumask_last() 229 cpu < nr_cpu_ids; in cpumask_last() 239 if (nxt == nr_cpu_ids) in cpumask_cycle() 257 if ( w > 1 && cpu < nr_cpu_ids ) in cpumask_any() 262 if ( next >= nr_cpu_ids ) in cpumask_any() [all …]
|
/xen/xen/arch/x86/guest/xen/ |
A D | xen.c | 247 vcpu_info = xzalloc_array(struct vcpu_info, nr_cpu_ids); in setup() 253 if ( !vcpu_info && nr_cpu_ids > XEN_LEGACY_MAX_VCPUS ) in setup() 257 for ( i = XEN_LEGACY_MAX_VCPUS; i < nr_cpu_ids; i++ ) in setup() 259 nr_cpu_ids = XEN_LEGACY_MAX_VCPUS; in setup() 310 if ( map_vcpuinfo() && nr_cpu_ids > XEN_LEGACY_MAX_VCPUS ) in resume()
|
/xen/xen/common/ |
A D | cpu.c | 9 unsigned int __read_mostly nr_cpu_ids = NR_CPUS; variable 107 if ( (cpu >= nr_cpu_ids) || (cpu == 0) ) in cpu_down() 148 if ( (cpu >= nr_cpu_ids) || !cpu_present(cpu) ) in cpu_up()
|
A D | trace.c | 117 unsigned int max_cpus = nr_cpu_ids; in calculate_tbuf_size() 155 t_info_words = nr_cpu_ids * pages + t_info_first_offset; in calculate_tbuf_size() 159 t_info_pages, pages, nr_cpu_ids); in calculate_tbuf_size()
|
A D | kexec.c | 467 BUG_ON( cpu >= nr_cpu_ids || ! crash_notes ); in kexec_init_cpu_notes() 585 sizeof_cpu_notes(1) * (nr_cpu_ids - 1); in kexec_init() 603 crash_notes = xzalloc_array(crash_note_range_t, nr_cpu_ids); in kexec_init() 633 if ( nr < 0 || nr >= nr_cpu_ids ) in kexec_get_cpu()
|
A D | sysctl.c | 149 nr_cpus = min(op->u.getcpuinfo.max_cpus, nr_cpu_ids); in do_sysctl() 266 pi->max_cpu_id = nr_cpu_ids - 1; in do_sysctl()
|
A D | domctl.c | 101 nr_cpu_ids); in cpumask_to_xenctl_bitmap() 111 nr_cpu_ids); in xenctl_bitmap_to_cpumask()
|
A D | keyhandler.c | 161 if ( cpu < nr_cpu_ids ) in dump_execstate()
|
/xen/xen/arch/x86/ |
A D | mpparse.c | 87 nr_cpu_ids = min(tot_cpus, NR_CPUS + 0u); in set_nr_cpu_ids() 88 if (park_offline_cpus && nr_cpu_ids < num_processors) in set_nr_cpu_ids() 90 num_processors - nr_cpu_ids); in set_nr_cpu_ids() 93 nr_cpumask_bits = ROUNDUP(nr_cpu_ids, BITS_PER_LONG); in set_nr_cpu_ids() 162 if (num_processors >= nr_cpu_ids) { in MP_processor_info_x() 165 nr_cpu_ids); in MP_processor_info_x()
|
A D | platform_hypercall.c | 554 if ( (g_info->xen_cpuid >= nr_cpu_ids) || in do_platform_op() 587 if ( (ver->xen_cpuid >= nr_cpu_ids) || !cpu_online(ver->xen_cpuid) ) in do_platform_op() 621 if ( cpu >= nr_cpu_ids || !cpu_present(cpu) || in do_platform_op() 653 if ( cpu >= nr_cpu_ids || !cpu_present(cpu) ) in do_platform_op() 761 if ( (cpu >= nr_cpu_ids) || !cpu_online(cpu) ) in do_platform_op()
|
A D | numa.c | 191 for ( i = 0; i < nr_cpu_ids; i++ ) in numa_init_array() 275 for ( i = 0; i < nr_cpu_ids; i++ ) in numa_initmem_init() 338 for ( i = 0; i < nr_cpu_ids; i++ ) in init_cpu_to_node()
|
A D | machine_kexec.c | 168 for ( i = 0; i < nr_cpu_ids; i++ ) in machine_kexec()
|
A D | hpet.c | 403 for ( i = 0; i < num_chs && num_hpets_used < nr_cpu_ids; i++ ) in hpet_fsb_cap_lookup() 442 if ( num_hpets_used >= nr_cpu_ids ) in hpet_get_channel() 519 else if ( (next = cpumask_first(ch->cpumask)) >= nr_cpu_ids ) in hpet_detach_channel()
|
A D | setup.c | 334 j < nr_cpu_ids; in normalise_cpu_order() 350 if ( min_cpu >= nr_cpu_ids ) in normalise_cpu_order() 352 BUG_ON(cpumask_next(i, &cpu_present_map) < nr_cpu_ids); in normalise_cpu_order() 1717 max_cpus = nr_cpu_ids; in __start_xen()
|
A D | sysctl.c | 259 if ( (cpu >= nr_cpu_ids) || !cpu_online(cpu) ) in arch_do_sysctl()
|
A D | tboot.c | 257 for ( i = 0; i < nr_cpu_ids; i++ ) in mfn_in_guarded_stack()
|
A D | smpboot.c | 539 return (cpu < nr_cpu_ids) ? cpu : -ENODEV; in alloc_cpu_id() 1040 i < nr_cpu_ids && i <= (cpu | (STUBS_PER_PAGE - 1)); ++i ) in cpu_smpboot_alloc()
|
/xen/xen/arch/x86/acpi/ |
A D | lib.c | 96 else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) in arch_acpi_set_pdc_bits()
|
/xen/xen/common/sched/ |
A D | cpupool.c | 831 if ( cpu >= nr_cpu_ids ) in cpupool_do_sysctl() 835 if ( cpu >= nr_cpu_ids ) in cpupool_do_sysctl() 863 ret = (cpu < nr_cpu_ids) ? cpupool_unassign_cpu(c, cpu) : -EINVAL; in cpupool_do_sysctl()
|
A D | arinc653.c | 623 || (cpu >= nr_cpu_ids) ) in a653sched_pick_resource()
|
A D | core.c | 1581 else if ( cpu < nr_cpu_ids ) in vcpu_temporary_affinity() 1675 &vcpuaff->cpumap_hard, nr_cpu_ids); in vcpu_affinity_domctl() 1691 &vcpuaff->cpumap_soft, nr_cpu_ids); in vcpu_affinity_domctl() 2960 BUG_ON(nr_cpu_ids > ARRAY_SIZE(idle_vcpu)); in scheduler_init() 2962 idle_domain->max_vcpus = nr_cpu_ids; in scheduler_init()
|
/xen/xen/drivers/acpi/ |
A D | pmstat.c | 54 if ( !op || (op->cpuid >= nr_cpu_ids) || !cpu_online(op->cpuid) ) in do_get_pm_info() 434 if ( op->cpuid >= nr_cpu_ids || !cpu_online(op->cpuid) ) in do_pm_op()
|
/xen/xen/arch/arm/ |
A D | smpboot.c | 285 for ( i = 0; i < nr_cpu_ids; i++ ) in smp_get_max_cpus()
|
/xen/xen/arch/x86/oprofile/ |
A D | nmi_int.c | 131 for (i = 0; i < nr_cpu_ids; ++i) { in free_msrs()
|
/xen/xen/arch/x86/cpu/mcheck/ |
A D | mce.c | 1483 if ( target >= nr_cpu_ids ) in do_mca() 1559 if ( target >= nr_cpu_ids ) in do_mca() 1647 if ( nr_cpu_ids > cpumask_next(cpumask_first(cpumap), cpumap) ) in do_mca()
|