Lines Matching full:cpu

23 #include <linux/cpu.h>
42 #include <asm/cpu.h>
90 static void ipi_setup(int cpu);
93 static void ipi_teardown(int cpu);
94 static int op_cpu_kill(unsigned int cpu);
96 static inline int op_cpu_kill(unsigned int cpu) in op_cpu_kill() argument
104 * Boot a secondary CPU, and assign it the specified idle task.
105 * This also gives us the initial stack to use for this CPU.
107 static int boot_secondary(unsigned int cpu, struct task_struct *idle) in boot_secondary() argument
109 const struct cpu_operations *ops = get_cpu_ops(cpu); in boot_secondary()
112 return ops->cpu_boot(cpu); in boot_secondary()
119 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument
131 /* Now bring the CPU into our world */ in __cpu_up()
132 ret = boot_secondary(cpu, idle); in __cpu_up()
135 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); in __cpu_up()
140 * CPU was successfully started, wait for it to come online or in __cpu_up()
145 if (cpu_online(cpu)) in __cpu_up()
148 pr_crit("CPU%u: failed to come online\n", cpu); in __cpu_up()
156 pr_err("CPU%u: failed in unknown state : 0x%lx\n", in __cpu_up()
157 cpu, status); in __cpu_up()
161 if (!op_cpu_kill(cpu)) { in __cpu_up()
162 pr_crit("CPU%u: died during early boot\n", cpu); in __cpu_up()
165 pr_crit("CPU%u: may not have shut down cleanly\n", cpu); in __cpu_up()
168 pr_crit("CPU%u: is stuck in kernel\n", cpu); in __cpu_up()
170 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); in __cpu_up()
172 pr_crit("CPU%u: does not support %luK granule\n", in __cpu_up()
173 cpu, PAGE_SIZE / SZ_1K); in __cpu_up()
178 panic("CPU%u detected unsupported configuration\n", cpu); in __cpu_up()
200 * This is the secondary CPU boot entry. We're using this CPUs
208 unsigned int cpu = smp_processor_id(); in secondary_start_kernel() local
226 rcutree_report_cpu_starting(cpu); in secondary_start_kernel()
231 * this CPU ticks all of those. If it doesn't, the CPU will in secondary_start_kernel()
236 ops = get_cpu_ops(cpu); in secondary_start_kernel()
241 * Log the CPU info before it is marked online and might get read. in secondary_start_kernel()
244 store_cpu_topology(cpu); in secondary_start_kernel()
249 notify_cpu_starting(cpu); in secondary_start_kernel()
251 ipi_setup(cpu); in secondary_start_kernel()
253 numa_add_cpu(cpu); in secondary_start_kernel()
256 * OK, now it's safe to let the boot CPU continue. Wait for in secondary_start_kernel()
257 * the CPU migration code to notice that the CPU is online in secondary_start_kernel()
260 pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n", in secondary_start_kernel()
261 cpu, (unsigned long)mpidr, in secondary_start_kernel()
264 set_cpu_online(cpu, true); in secondary_start_kernel()
283 static int op_cpu_disable(unsigned int cpu) in op_cpu_disable() argument
285 const struct cpu_operations *ops = get_cpu_ops(cpu); in op_cpu_disable()
299 return ops->cpu_disable(cpu); in op_cpu_disable()
309 unsigned int cpu = smp_processor_id(); in __cpu_disable() local
312 ret = op_cpu_disable(cpu); in __cpu_disable()
316 remove_cpu_topology(cpu); in __cpu_disable()
317 numa_remove_cpu(cpu); in __cpu_disable()
320 * Take this CPU offline. Once we clear this, we can't return, in __cpu_disable()
321 * and we must not schedule until we're ready to give up the cpu. in __cpu_disable()
323 set_cpu_online(cpu, false); in __cpu_disable()
324 ipi_teardown(cpu); in __cpu_disable()
327 * OK - migrate IRQs away from this CPU in __cpu_disable()
334 static int op_cpu_kill(unsigned int cpu) in op_cpu_kill() argument
336 const struct cpu_operations *ops = get_cpu_ops(cpu); in op_cpu_kill()
339 * If we have no means of synchronising with the dying CPU, then assume in op_cpu_kill()
346 return ops->cpu_kill(cpu); in op_cpu_kill()
350 * Called on the thread which is asking for a CPU to be shutdown after the
353 void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) in arch_cpuhp_cleanup_dead_cpu() argument
357 pr_debug("CPU%u: shutdown\n", cpu); in arch_cpuhp_cleanup_dead_cpu()
360 * Now that the dying CPU is beyond the point of no return w.r.t. in arch_cpuhp_cleanup_dead_cpu()
365 err = op_cpu_kill(cpu); in arch_cpuhp_cleanup_dead_cpu()
367 pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err); in arch_cpuhp_cleanup_dead_cpu()
371 * Called from the idle thread for the CPU which has been shutdown.
376 unsigned int cpu = smp_processor_id(); in cpu_die() local
377 const struct cpu_operations *ops = get_cpu_ops(cpu); in cpu_die()
383 /* Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose of */ in cpu_die()
387 * Actually shutdown the CPU. This must never fail. The specific hotplug in cpu_die()
389 * no dirty lines are lost in the process of shutting down the CPU. in cpu_die()
391 ops->cpu_die(cpu); in cpu_die()
397 static void __cpu_try_die(int cpu) in __cpu_try_die() argument
400 const struct cpu_operations *ops = get_cpu_ops(cpu); in __cpu_try_die()
403 ops->cpu_die(cpu); in __cpu_try_die()
408 * Kill the calling secondary CPU, early in bringup before it is turned
413 int cpu = smp_processor_id(); in cpu_die_early() local
415 pr_crit("CPU%d: will not boot\n", cpu); in cpu_die_early()
417 /* Mark this CPU absent */ in cpu_die_early()
418 set_cpu_present(cpu, 0); in cpu_die_early()
423 __cpu_try_die(cpu); in cpu_die_early()
434 pr_info("CPU: All CPU(s) started at EL2\n"); in hyp_mode_check()
437 "CPU: CPUs started in inconsistent modes"); in hyp_mode_check()
439 pr_info("CPU: All CPU(s) started at EL1\n"); in hyp_mode_check()
458 * The runtime per-cpu areas have been allocated by in smp_prepare_boot_cpu()
459 * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be in smp_prepare_boot_cpu()
460 * freed shortly, so we must move over to the runtime per-cpu area. in smp_prepare_boot_cpu()
479 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
482 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) in is_mpidr_duplicate() argument
486 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) in is_mpidr_duplicate()
493 * Initialize cpu operations for a logical cpu and
496 static int __init smp_cpu_setup(int cpu) in smp_cpu_setup() argument
500 if (init_cpu_ops(cpu)) in smp_cpu_setup()
503 ops = get_cpu_ops(cpu); in smp_cpu_setup()
504 if (ops->cpu_init(cpu)) in smp_cpu_setup()
507 set_cpu_possible(cpu, true); in smp_cpu_setup()
515 int arch_register_cpu(int cpu) in arch_register_cpu() argument
517 acpi_handle acpi_handle = acpi_get_processor_handle(cpu); in arch_register_cpu()
518 struct cpu *c = &per_cpu(cpu_devices, cpu); in arch_register_cpu()
525 /* For now block anything that looks like physical CPU Hotplug */ in arch_register_cpu()
526 if (invalid_logical_cpuid(cpu) || !cpu_present(cpu)) { in arch_register_cpu()
527 pr_err_once("Changing CPU present bit is not supported\n"); in arch_register_cpu()
536 c->hotpluggable = arch_cpu_is_hotpluggable(cpu); in arch_register_cpu()
538 return register_cpu(c, cpu); in arch_register_cpu()
542 void arch_unregister_cpu(int cpu) in arch_unregister_cpu() argument
544 acpi_handle acpi_handle = acpi_get_processor_handle(cpu); in arch_unregister_cpu()
545 struct cpu *c = &per_cpu(cpu_devices, cpu); in arch_unregister_cpu()
550 pr_err_once("Removing a CPU without associated ACPI handle\n"); in arch_unregister_cpu()
558 /* For now do not allow anything that looks like physical CPU HP */ in arch_unregister_cpu()
559 if (cpu_present(cpu) && !(sta & ACPI_STA_DEVICE_PRESENT)) { in arch_unregister_cpu()
560 pr_err_once("Changing CPU present bit is not supported\n"); in arch_unregister_cpu()
571 struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) in acpi_cpu_get_madt_gicc() argument
573 return &cpu_madt_gicc[cpu]; in acpi_cpu_get_madt_gicc()
590 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); in acpi_map_gic_cpu_interface()
595 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); in acpi_map_gic_cpu_interface()
600 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); in acpi_map_gic_cpu_interface()
604 /* Check if GICC structure of boot CPU is available in the MADT */ in acpi_map_gic_cpu_interface()
607 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", in acpi_map_gic_cpu_interface()
619 /* map the logical cpu id to cpu MPIDR */ in acpi_map_gic_cpu_interface()
625 * Set-up the ACPI parking protocol cpu entries in acpi_map_gic_cpu_interface()
630 * initialize the cpu if the parking protocol is in acpi_map_gic_cpu_interface()
668 * In ACPI, SMP and CPU NUMA information is provided in separate in acpi_parse_and_init_cpus()
671 * Thus, it is simpler to first create the cpu logical map through in acpi_parse_and_init_cpus()
685 * Enumerate the possible CPU set from the device tree and build the
686 * cpu logical map array containing MPIDR values related to logical
700 pr_err("%pOF: duplicate cpu reg properties in the DT\n", in of_parse_and_init_cpus()
706 * The numbering scheme requires that the boot CPU in of_parse_and_init_cpus()
713 pr_err("%pOF: duplicate boot cpu reg property in DT\n", in of_parse_and_init_cpus()
723 * initialized and the boot cpu doesn't need in of_parse_and_init_cpus()
725 * incrementing cpu. in of_parse_and_init_cpus()
733 pr_debug("cpu logical map 0x%llx\n", hwid); in of_parse_and_init_cpus()
743 * Enumerate the possible CPU set from the device tree or ACPI and build the
744 * cpu logical map array containing MPIDR values related to logical
761 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); in smp_init_cpus()
767 * the cpus so that cpu processor description entries (DT cpu nodes in smp_init_cpus()
768 * and ACPI MADT entries) can be retrieved by matching the cpu hwid in smp_init_cpus()
770 * If the cpu set-up fails, invalidate the cpu_logical_map entry. in smp_init_cpus()
784 unsigned int cpu; in smp_prepare_cpus() local
806 for_each_possible_cpu(cpu) { in smp_prepare_cpus()
808 if (cpu == smp_processor_id()) in smp_prepare_cpus()
811 ops = get_cpu_ops(cpu); in smp_prepare_cpus()
815 err = ops->cpu_prepare(cpu); in smp_prepare_cpus()
819 set_cpu_present(cpu, true); in smp_prepare_cpus()
820 numa_store_cpu_info(cpu); in smp_prepare_cpus()
827 [IPI_CPU_STOP] = "CPU stop interrupts",
828 [IPI_CPU_STOP_NMI] = "CPU stop NMIs",
831 [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
841 unsigned int cpu, i; in arch_show_interrupts() local
846 for_each_online_cpu(cpu) in arch_show_interrupts()
847 seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu)); in arch_show_interrupts()
860 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
862 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); in arch_send_call_function_single_ipi()
872 static void __noreturn local_cpu_stop(unsigned int cpu) in local_cpu_stop() argument
874 set_cpu_online(cpu, false); in local_cpu_stop()
891 static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) in ipi_cpu_crash_stop() argument
904 crash_save_cpu(regs, cpu); in ipi_cpu_crash_stop()
906 set_cpu_online(cpu, false); in ipi_cpu_crash_stop()
911 __cpu_try_die(cpu); in ipi_cpu_crash_stop()
940 int cpu; in kgdb_roundup_cpus() local
942 for_each_online_cpu(cpu) { in kgdb_roundup_cpus()
944 if (cpu == this_cpu) in kgdb_roundup_cpus()
947 __ipi_send_single(ipi_desc[IPI_KGDB_ROUNDUP], cpu); in kgdb_roundup_cpus()
957 unsigned int cpu = smp_processor_id(); in do_handle_IPI() local
974 ipi_cpu_crash_stop(cpu, get_irq_regs()); in do_handle_IPI()
977 local_cpu_stop(cpu); in do_handle_IPI()
1002 kgdb_nmicallback(cpu, get_irq_regs()); in do_handle_IPI()
1006 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); in do_handle_IPI()
1041 static void ipi_setup(int cpu) in ipi_setup() argument
1059 static void ipi_teardown(int cpu) in ipi_teardown() argument
1105 /* Setup the boot CPU immediately */ in set_smp_ipi_range()
1109 void arch_smp_send_reschedule(int cpu) in arch_smp_send_reschedule() argument
1111 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); in arch_smp_send_reschedule()
1115 void arch_send_wakeup_ipi(unsigned int cpu) in arch_send_wakeup_ipi() argument
1118 * We use a scheduler IPI to wake the CPU as this avoids the need for a in arch_send_wakeup_ipi()
1121 smp_send_reschedule(cpu); in arch_send_wakeup_ipi()
1133 * The number of CPUs online, not counting this CPU (which may not be
1150 * If this cpu is the only one alive at this point in time, online or in smp_send_stop()
1156 /* Only proceed if this is the first CPU to reach this code */ in smp_send_stop()
1161 * Send an IPI to all currently online CPUs except the CPU running in smp_send_stop()
1168 * grab the CPU hotplug mutex ourselves. Worst case: in smp_send_stop()
1169 * - If a CPU comes online as we're running, we'll likely notice it in smp_send_stop()
1176 * the fact that there could be cases where a CPU can't be stopped. in smp_send_stop()