| /linux/sound/soc/intel/skylake/ |
| A D | skl-sst-dsp.c | 40 skl->cores.usage_count[SKL_DSP_CORE0_ID] = 1; in skl_dsp_init_core_state() 43 skl->cores.state[i] = SKL_DSP_RESET; in skl_dsp_init_core_state() 44 skl->cores.usage_count[i] = 0; in skl_dsp_init_core_state() 341 if (core_id >= skl->cores.count) { in skl_dsp_get_core() 346 skl->cores.usage_count[core_id]++; in skl_dsp_get_core() 358 core_id, skl->cores.state[core_id], in skl_dsp_get_core() 359 skl->cores.usage_count[core_id]); in skl_dsp_get_core() 370 if (core_id >= skl->cores.count) { in skl_dsp_put_core() 381 skl->cores.usage_count[core_id]++; in skl_dsp_put_core() 386 core_id, skl->cores.state[core_id], in skl_dsp_put_core() [all …]
|
| A D | skl-messages.c | 258 struct skl_dsp_cores *cores; in skl_init_dsp() local 287 cores = &skl->cores; in skl_init_dsp() 288 cores->count = ops->num_cores; in skl_init_dsp() 290 cores->state = kcalloc(cores->count, sizeof(*cores->state), GFP_KERNEL); in skl_init_dsp() 291 if (!cores->state) { in skl_init_dsp() 296 cores->usage_count = kcalloc(cores->count, sizeof(*cores->usage_count), in skl_init_dsp() 298 if (!cores->usage_count) { in skl_init_dsp() 308 kfree(cores->state); in skl_init_dsp() 325 kfree(skl->cores.state); in skl_free_dsp() 326 kfree(skl->cores.usage_count); in skl_free_dsp()
|
| A D | bxt-sst.c | 271 if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING) in bxt_d0i3_target_state() 326 skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING_D0I3; in bxt_set_dsp_D0i3() 358 if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING_D0I3) in bxt_set_dsp_D0i0() 381 skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING; in bxt_set_dsp_D0i0() 410 skl->cores.state[core_id] = SKL_DSP_RUNNING; in bxt_set_dsp_D0() 472 skl->cores.state[core_id] = SKL_DSP_RUNNING; in bxt_set_dsp_D0() 518 skl->cores.state[core_id] = SKL_DSP_RESET; in bxt_set_dsp_D3()
|
| /linux/Documentation/admin-guide/ |
| A D | lockup-watchdogs.rst | 67 By default, the watchdog runs on all online cores. However, on a 69 on the housekeeping cores, not the cores specified in the "nohz_full" 71 the "nohz_full" cores, we would have to run timer ticks to activate 73 from protecting the user code on those cores from the kernel. 74 Of course, disabling it by default on the nohz_full cores means that 75 when those cores do enter the kernel, by default we will not be 77 to continue to run on the housekeeping (non-tickless) cores means 78 that we will continue to detect lockups properly on those cores. 80 In either case, the set of cores excluded from running the watchdog 82 nohz_full cores, this may be useful for debugging a case where the [all …]
|
| /linux/drivers/remoteproc/ |
| A D | ti_k3_r5_remoteproc.c | 106 struct list_head cores; member 283 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 294 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 328 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_release() 340 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_release() 358 list_for_each_entry_from(core, &cluster->cores, elem) { in k3_r5_lockstep_release() 598 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_rproc_stop() 830 list_for_each_entry(temp, &cluster->cores, elem) { in k3_r5_rproc_configure() 1015 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_cluster_rproc_init() 1446 list_add_tail(&core->elem, &cluster->cores); in k3_r5_cluster_of_init() [all …]
|
| /linux/drivers/gpu/drm/nouveau/dispnv50/ |
| A D | core.c | 44 } cores[] = { in nv50_core_new() local 65 cid = nvif_mclass(&disp->disp->object, cores); in nv50_core_new() 71 return cores[cid].new(drm, cores[cid].oclass, pcore); in nv50_core_new()
|
| /linux/Documentation/devicetree/bindings/timer/ |
| A D | snps,arc-timer.txt | 4 - Two idential copies TIMER0 and TIMER1 exist in ARC cores and historically 5 TIMER0 used as clockevent provider (true for all ARC cores) 12 (16 for ARCHS cores, 3 for ARC700 cores)
|
| /linux/Documentation/devicetree/bindings/media/xilinx/ |
| A D | video.txt | 1 DT bindings for Xilinx video IP cores 4 Xilinx video IP cores process video streams by acting as video sinks and/or 10 cores are represented as defined in ../video-interfaces.txt. 18 The following properties are common to all Xilinx video IP cores. 21 AXI bus between video IP cores, using its VF code as defined in "AXI4-Stream
|
| A D | xlnx,video.txt | 8 video IP cores. Each video IP core is represented as documented in video.txt 11 mappings between DMAs and the video IP cores.
|
| /linux/Documentation/devicetree/bindings/bus/ |
| A D | brcm,bus-axi.txt | 9 The cores on the AXI bus are automatically detected by bcma with the 12 BCM47xx/BCM53xx ARM SoCs. To assign IRQ numbers to the cores, provide 17 The top-level axi bus may contain children representing attached cores 19 detected (e.g. IRQ numbers). Also some of the cores may be responsible
|
| /linux/Documentation/devicetree/bindings/arm/ |
| A D | arm,vexpress-juno.yaml | 45 - description: CoreTile Express A9x4 (V2P-CA9) has 4 Cortex A9 CPU cores 51 - description: CoreTile Express A5x2 (V2P-CA5s) has 2 Cortex A5 CPU cores 58 cores in a MPCore configuration in a test chip on the core tile. See 64 A15 CPU cores in a test chip on the core tile. This is the first test 71 CPU cores and 3 Cortex A7 cores in a big.LITTLE MPCore configuration 77 cores in a test chip on the core tile. See ARM DDI 0498D. 84 AArch64 CPU cores. It has 2 Cortex A57 CPU cores and 4 Cortex A53 85 cores in a big.LITTLE configuration. It also features the MALI T624
|
| /linux/arch/x86/mm/ |
| A D | amdtopology.c | 63 unsigned int bits, cores, apicid_base; in amd_numa_init() local 165 cores = 1 << bits; in amd_numa_init() 179 for (j = apicid_base; j < cores + apicid_base; j++) in amd_numa_init()
|
| /linux/drivers/gpu/drm/v3d/ |
| A D | v3d_irq.c | 213 for (core = 0; core < v3d->cores; core++) in v3d_irq_init() 258 for (core = 0; core < v3d->cores; core++) { in v3d_irq_enable() 273 for (core = 0; core < v3d->cores; core++) in v3d_irq_disable() 278 for (core = 0; core < v3d->cores; core++) in v3d_irq_disable()
|
| A D | v3d_debugfs.c | 102 for (core = 0; core < v3d->cores; core++) { in v3d_v3d_debugfs_regs() 132 u32 ident0, ident1, ident2, ident3, cores; in v3d_v3d_debugfs_ident() local 143 cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); in v3d_v3d_debugfs_ident() 162 for (core = 0; core < cores; core++) { in v3d_v3d_debugfs_ident()
|
| /linux/drivers/bcma/ |
| A D | main.c | 91 list_for_each_entry(core, &bus->cores, list) { in bcma_find_core_unit() 271 INIT_LIST_HEAD(&bus->cores); in bcma_init_bus() 295 list_for_each_entry(core, &bus->cores, list) { in bcma_register_devices() 363 list_for_each_entry_safe(core, tmp, &bus->cores, list) { in bcma_unregister_cores() 373 list_for_each_entry_safe(core, tmp, &bus->cores, list) { in bcma_unregister_cores() 409 list_for_each_entry(core, &bus->cores, list) { in bcma_bus_register() 534 list_for_each_entry(core, &bus->cores, list) { in bcma_bus_suspend() 555 list_for_each_entry(core, &bus->cores, list) { in bcma_bus_resume()
|
| /linux/arch/arm/boot/dts/ |
| A D | vexpress-v2p-ca15-tc1.dts | 199 volt-cores { 210 amp-cores { 211 /* Total current for the two cores */ 224 power-cores {
|
| A D | vexpress-v2p-ca15_a7.dts | 360 /* Total current for the two A15 cores */ 367 /* Total current for the three A7 cores */ 381 /* Total power for the two A15 cores */ 388 /* Total power for the three A7 cores */ 395 /* Total energy for the two A15 cores */ 402 /* Total energy for the three A7 cores */
|
| /linux/drivers/soc/tegra/ |
| A D | Kconfig | 85 the Tegra210 has four Cortex-A57 cores paired with four Cortex-A53 86 cores in a switched configuration. It features a GPU of the Maxwell 88 and providing 256 CUDA cores. It supports hardware-accelerated en- 105 combination of Denver and Cortex-A57 CPU cores and a GPU based on
|
| /linux/Documentation/admin-guide/device-mapper/ |
| A D | unstriped.rst | 85 Intel NVMe drives contain two cores on the physical device. 88 in a 256k stripe across the two cores:: 100 are striped across the two cores. When we unstripe this hardware RAID 0 113 unstriped ontop of Intel NVMe device that has 2 cores
|
| /linux/Documentation/ABI/testing/ |
| A D | sysfs-bus-bcma | 14 There are a few types of BCMA cores, they can be identified by 22 BCMA cores of the same type can still slightly differ depending
|
| /linux/Documentation/devicetree/bindings/ |
| A D | xilinx.txt | 1 d) Xilinx IP cores 3 The Xilinx EDK toolchain ships with a set of IP cores (devices) for use 14 device drivers how the IP cores are configured, but it requires the kernel 20 properties of the device node. In general, device nodes for IP-cores 89 That covers the general approach to binding xilinx IP cores into the
|
| /linux/Documentation/locking/ |
| A D | percpu-rw-semaphore.rst | 9 cores take the lock for reading, the cache line containing the semaphore 10 is bouncing between L1 caches of the cores, causing performance
|
| /linux/Documentation/x86/ |
| A D | topology.rst | 24 threads, cores, packages, etc. 36 - cores 41 Packages contain a number of cores plus shared resources, e.g. DRAM 52 The number of cores in a package. This information is retrieved via CPUID. 65 and deduced from the APIC IDs of the cores in the package.
|
| /linux/Documentation/devicetree/bindings/power/ |
| A D | renesas,apmu.yaml | 39 Array of phandles pointing to CPU cores, which should match the order of 40 CPU cores used by the WUPCR and PSTR registers in the Advanced Power
|
| /linux/Documentation/admin-guide/perf/ |
| A D | arm_dsu_pmu.rst | 5 ARM DynamIQ Shared Unit integrates one or more cores with an L3 memory system, 11 cores connected to the same DSU. Like most of the other uncore PMUs, DSU
|