Lines Matching full:cpu

13  * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
33 #include <linux/cpu.h>
80 * Pointer to the pcpu area of the boot CPU. This is required when a restart
81 * interrupt is triggered on an offline CPU. For that case accessing percpu
164 /* Status stored condition code is equivalent to cpu not running. */ in pcpu_running()
169 * Find struct pcpu by cpu address.
173 int cpu; in pcpu_find_address() local
175 for_each_cpu(cpu, mask) in pcpu_find_address()
176 if (per_cpu(pcpu_devices, cpu).address == address) in pcpu_find_address()
177 return &per_cpu(pcpu_devices, cpu); in pcpu_find_address()
192 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) in pcpu_alloc_lowcore() argument
208 lc->cpu_nr = cpu; in pcpu_alloc_lowcore()
209 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_alloc_lowcore()
216 if (abs_lowcore_map(cpu, lc, true)) in pcpu_alloc_lowcore()
218 lowcore_ptr[cpu] = lc; in pcpu_alloc_lowcore()
232 static void pcpu_free_lowcore(struct pcpu *pcpu, int cpu) in pcpu_free_lowcore() argument
237 lc = lowcore_ptr[cpu]; in pcpu_free_lowcore()
242 lowcore_ptr[cpu] = NULL; in pcpu_free_lowcore()
243 abs_lowcore_unmap(cpu); in pcpu_free_lowcore()
251 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) in pcpu_prepare_secondary() argument
255 lc = lowcore_ptr[cpu]; in pcpu_prepare_secondary()
256 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); in pcpu_prepare_secondary()
257 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); in pcpu_prepare_secondary()
258 lc->cpu_nr = cpu; in pcpu_prepare_secondary()
261 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_prepare_secondary()
263 lc->percpu_offset = __per_cpu_offset[cpu]; in pcpu_prepare_secondary()
275 arch_spin_lock_setup(cpu); in pcpu_prepare_secondary()
278 static void pcpu_attach_task(int cpu, struct task_struct *tsk) in pcpu_attach_task() argument
282 lc = lowcore_ptr[cpu]; in pcpu_attach_task()
295 static void pcpu_start_fn(int cpu, void (*func)(void *), void *data) in pcpu_start_fn() argument
299 lc = lowcore_ptr[cpu]; in pcpu_start_fn()
304 pcpu_sigp_retry(per_cpu_ptr(&pcpu_devices, cpu), SIGP_RESTART, 0); in pcpu_start_fn()
310 * Call function via PSW restart on pcpu and stop the current cpu.
317 static void pcpu_delegate(struct pcpu *pcpu, int cpu, in pcpu_delegate() argument
324 lc = lowcore_ptr[cpu]; in pcpu_delegate()
331 /* Stop target cpu (if func returns this stops the current cpu). */ in pcpu_delegate()
334 /* Restart func on the target cpu and stop the current cpu. */ in pcpu_delegate()
349 "0: sigp 0,%0,%2 # sigp restart to target cpu\n" in pcpu_delegate()
351 "1: sigp 0,%1,%3 # sigp stop to current cpu\n" in pcpu_delegate()
380 * Call function on the ipl CPU.
394 int cpu; in smp_find_processor_id() local
396 for_each_present_cpu(cpu) in smp_find_processor_id()
397 if (per_cpu(pcpu_devices, cpu).address == address) in smp_find_processor_id()
398 return cpu; in smp_find_processor_id()
407 bool notrace arch_vcpu_is_preempted(int cpu) in arch_vcpu_is_preempted() argument
409 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu)) in arch_vcpu_is_preempted()
411 if (pcpu_running(per_cpu_ptr(&pcpu_devices, cpu))) in arch_vcpu_is_preempted()
417 void notrace smp_yield_cpu(int cpu) in smp_yield_cpu() argument
423 : : "d" (per_cpu(pcpu_devices, cpu).address)); in smp_yield_cpu()
436 int cpu; in smp_emergency_stop() local
443 for_each_cpu(cpu, &cpumask) { in smp_emergency_stop()
444 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_emergency_stop()
452 for_each_cpu(cpu, &cpumask) in smp_emergency_stop()
453 if (pcpu_stopped(per_cpu_ptr(&pcpu_devices, cpu))) in smp_emergency_stop()
454 cpumask_clear_cpu(cpu, &cpumask); in smp_emergency_stop()
469 int cpu; in smp_send_stop() local
481 for_each_online_cpu(cpu) { in smp_send_stop()
482 if (cpu == smp_processor_id()) in smp_send_stop()
484 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_send_stop()
522 int cpu; in arch_send_call_function_ipi_mask() local
524 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
525 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single); in arch_send_call_function_ipi_mask()
528 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
530 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single); in arch_send_call_function_single_ipi()
534 * this function sends a 'reschedule' IPI to another CPU.
538 void arch_smp_send_reschedule(int cpu) in arch_smp_send_reschedule() argument
540 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_schedule); in arch_smp_send_reschedule()
552 int smp_store_status(int cpu) in smp_store_status() argument
558 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_store_status()
559 lc = lowcore_ptr[cpu]; in smp_store_status()
576 * Collect CPU state of the previous, crashed system.
580 * The state for all CPUs except the boot CPU needs to be collected
581 * with sigp stop-and-store-status. The boot CPU state is located in
583 * will copy the boot CPU state from the HSA.
586 * The state for all CPUs except the boot CPU needs to be collected
588 * stored the registers of the boot CPU in the absolute lowcore in the
592 * The state for all CPUs except the boot CPU needs to be collected
594 * stored the registers of the boot CPU in the memory of the old system.
596 * Note that the legacy kdump mode where the old kernel stored the CPU states
660 void smp_cpu_set_polarization(int cpu, int val) in smp_cpu_set_polarization() argument
662 per_cpu(pcpu_devices, cpu).polarization = val; in smp_cpu_set_polarization()
665 int smp_cpu_get_polarization(int cpu) in smp_cpu_get_polarization() argument
667 return per_cpu(pcpu_devices, cpu).polarization; in smp_cpu_get_polarization()
670 void smp_cpu_set_capacity(int cpu, unsigned long val) in smp_cpu_set_capacity() argument
672 per_cpu(pcpu_devices, cpu).capacity = val; in smp_cpu_set_capacity()
675 unsigned long smp_cpu_get_capacity(int cpu) in smp_cpu_get_capacity() argument
677 return per_cpu(pcpu_devices, cpu).capacity; in smp_cpu_get_capacity()
680 void smp_set_core_capacity(int cpu, unsigned long val) in smp_set_core_capacity() argument
684 cpu = smp_get_base_cpu(cpu); in smp_set_core_capacity()
685 for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++) in smp_set_core_capacity()
689 int smp_cpu_get_cpu_address(int cpu) in smp_cpu_get_cpu_address() argument
691 return per_cpu(pcpu_devices, cpu).address; in smp_cpu_get_cpu_address()
719 int cpu, nr, i; in smp_add_core() local
725 cpu = cpumask_first(avail); in smp_add_core()
727 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) { in smp_add_core()
730 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_add_core()
736 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); in smp_add_core()
737 smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH); in smp_add_core()
738 set_cpu_present(cpu, true); in smp_add_core()
739 if (!early && arch_register_cpu(cpu)) in smp_add_core()
740 set_cpu_present(cpu, false); in smp_add_core()
743 cpumask_clear_cpu(cpu, avail); in smp_add_core()
744 cpu = cpumask_next(cpu, avail); in smp_add_core()
762 * Add IPL core first (which got logical CPU number 0) to make sure in __smp_rescan_cpus()
763 * that all SMT threads get subsequent logical CPU numbers. in __smp_rescan_cpus()
786 unsigned int cpu, mtid, c_cpus, s_cpus; in smp_detect_cpus() local
790 /* Get CPU information */ in smp_detect_cpus()
793 /* Find boot CPU type */ in smp_detect_cpus()
796 for (cpu = 0; cpu < info->combined; cpu++) in smp_detect_cpus()
797 if (info->core[cpu].core_id == address) { in smp_detect_cpus()
798 /* The boot cpu dictates the cpu type. */ in smp_detect_cpus()
799 boot_core_type = info->core[cpu].type; in smp_detect_cpus()
802 if (cpu >= info->combined) in smp_detect_cpus()
803 panic("Could not find boot CPU type"); in smp_detect_cpus()
813 for (cpu = 0; cpu < info->combined; cpu++) { in smp_detect_cpus()
815 info->core[cpu].type != boot_core_type) in smp_detect_cpus()
817 if (cpu < info->configured) in smp_detect_cpus()
832 int cpu = raw_smp_processor_id(); in smp_start_secondary() local
842 rcutree_report_cpu_starting(cpu); in smp_start_secondary()
847 cpumask_set_cpu(cpu, &cpu_setup_mask); in smp_start_secondary()
849 notify_cpu_starting(cpu); in smp_start_secondary()
850 if (topology_cpu_dedicated(cpu)) in smp_start_secondary()
854 set_cpu_online(cpu, true); in smp_start_secondary()
861 int __cpu_up(unsigned int cpu, struct task_struct *tidle) in __cpu_up() argument
863 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu); in __cpu_up()
872 rc = pcpu_alloc_lowcore(pcpu, cpu); in __cpu_up()
877 * until new CPU has initialized control registers. in __cpu_up()
880 pcpu_prepare_secondary(pcpu, cpu); in __cpu_up()
881 pcpu_attach_task(cpu, tidle); in __cpu_up()
882 pcpu_start_fn(cpu, smp_start_secondary, NULL); in __cpu_up()
883 /* Wait until cpu puts itself in the online & active maps */ in __cpu_up()
884 while (!cpu_online(cpu)) in __cpu_up()
902 int cpu; in __cpu_disable() local
906 cpu = smp_processor_id(); in __cpu_disable()
907 set_cpu_online(cpu, false); in __cpu_disable()
908 cpumask_clear_cpu(cpu, &cpu_setup_mask); in __cpu_disable()
910 /* Disable pseudo page faults on this cpu. */ in __cpu_disable()
922 void __cpu_die(unsigned int cpu) in __cpu_die() argument
926 /* Wait until target cpu is down */ in __cpu_die()
927 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in __cpu_die()
930 pcpu_free_lowcore(pcpu, cpu); in __cpu_die()
931 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); in __cpu_die()
932 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); in __cpu_die()
945 unsigned int possible, sclp_max, cpu; in smp_fill_possible_mask() local
952 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) in smp_fill_possible_mask()
953 set_cpu_possible(cpu, true); in smp_fill_possible_mask()
1017 int cpu, val, rc, i; in cpu_configure_store() local
1028 cpu = dev->id; in cpu_configure_store()
1029 cpu = smp_get_base_cpu(cpu); in cpu_configure_store()
1031 if (cpu_online(cpu + i)) in cpu_configure_store()
1033 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in cpu_configure_store()
1043 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
1045 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_STANDBY; in cpu_configure_store()
1046 smp_cpu_set_polarization(cpu + i, in cpu_configure_store()
1058 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
1060 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_CONFIGURED; in cpu_configure_store()
1061 smp_cpu_set_polarization(cpu + i, in cpu_configure_store()
1103 static int smp_cpu_online(unsigned int cpu) in smp_cpu_online() argument
1105 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu); in smp_cpu_online()
1110 static int smp_cpu_pre_down(unsigned int cpu) in smp_cpu_pre_down() argument
1112 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu); in smp_cpu_pre_down()
1118 bool arch_cpu_is_hotpluggable(int cpu) in arch_cpu_is_hotpluggable() argument
1120 return !!cpu; in arch_cpu_is_hotpluggable()
1123 int arch_register_cpu(int cpu) in arch_register_cpu() argument
1125 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu); in arch_register_cpu()
1128 c->hotpluggable = arch_cpu_is_hotpluggable(cpu); in arch_register_cpu()
1129 rc = register_cpu(c, cpu); in arch_register_cpu()