2 * SMP related functions
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/workqueue.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
25 #include <linux/err.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/irqflags.h>
31 #include <linux/cpu.h>
32 #include <linux/slab.h>
33 #include <linux/crash_dump.h>
34 #include <linux/memblock.h>
35 #include <asm/asm-offsets.h>
37 #include <asm/switch_to.h>
38 #include <asm/facility.h>
40 #include <asm/setup.h>
42 #include <asm/tlbflush.h>
43 #include <asm/vtimer.h>
44 #include <asm/lowcore.h>
47 #include <asm/debug.h>
48 #include <asm/os_info.h>
55 ec_call_function_single,
64 static DEFINE_PER_CPU(struct cpu *, cpu_device);
67 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
68 unsigned long ec_mask; /* bit mask for ec_xxx functions */
69 signed char state; /* physical cpu state */
70 signed char polarization; /* physical polarization */
71 u16 address; /* physical cpu address */
74 static u8 boot_core_type;
75 static struct pcpu pcpu_devices[NR_CPUS];
77 unsigned int smp_cpu_mt_shift;
78 EXPORT_SYMBOL(smp_cpu_mt_shift);
80 unsigned int smp_cpu_mtid;
81 EXPORT_SYMBOL(smp_cpu_mtid);
83 static unsigned int smp_max_threads __initdata = -1U;
85 static int __init early_nosmt(char *s)
90 early_param("nosmt", early_nosmt);
92 static int __init early_smt(char *s)
94 get_option(&s, &smp_max_threads);
97 early_param("smt", early_smt);
100 * The smp_cpu_state_mutex must be held when changing the state or polarization
101 * member of a pcpu data structure within the pcpu_devices arreay.
103 DEFINE_MUTEX(smp_cpu_state_mutex);
106 * Signal processor helper functions.
108 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm,
114 cc = __pcpu_sigp(addr, order, parm, NULL);
115 if (cc != SIGP_CC_BUSY)
121 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
125 for (retry = 0; ; retry++) {
126 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
127 if (cc != SIGP_CC_BUSY)
135 static inline int pcpu_stopped(struct pcpu *pcpu)
137 u32 uninitialized_var(status);
139 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
140 0, &status) != SIGP_CC_STATUS_STORED)
142 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
145 static inline int pcpu_running(struct pcpu *pcpu)
147 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
148 0, NULL) != SIGP_CC_STATUS_STORED)
150 /* Status stored condition code is equivalent to cpu not running. */
155 * Find struct pcpu by cpu address.
157 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
161 for_each_cpu(cpu, mask)
162 if (pcpu_devices[cpu].address == address)
163 return pcpu_devices + cpu;
167 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
171 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
173 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
174 pcpu_sigp_retry(pcpu, order, 0);
177 #define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
178 #define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
180 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
182 unsigned long async_stack, panic_stack;
185 if (pcpu != &pcpu_devices[0]) {
186 pcpu->lowcore = (struct _lowcore *)
187 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
188 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
189 panic_stack = __get_free_page(GFP_KERNEL);
190 if (!pcpu->lowcore || !panic_stack || !async_stack)
193 async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
194 panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
197 memcpy(lc, &S390_lowcore, 512);
198 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
199 lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
200 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
202 lc->spinlock_lockval = arch_spin_lockval(cpu);
204 lc->vector_save_area_addr =
205 (unsigned long) &lc->vector_save_area;
206 if (vdso_alloc_per_cpu(lc))
208 lowcore_ptr[cpu] = lc;
209 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
212 if (pcpu != &pcpu_devices[0]) {
213 free_page(panic_stack);
214 free_pages(async_stack, ASYNC_ORDER);
215 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
220 #ifdef CONFIG_HOTPLUG_CPU
222 static void pcpu_free_lowcore(struct pcpu *pcpu)
224 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
225 lowcore_ptr[pcpu - pcpu_devices] = NULL;
226 vdso_free_per_cpu(pcpu->lowcore);
227 if (pcpu == &pcpu_devices[0])
229 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
230 free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
231 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
234 #endif /* CONFIG_HOTPLUG_CPU */
236 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
238 struct _lowcore *lc = pcpu->lowcore;
240 if (MACHINE_HAS_TLB_LC)
241 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
242 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
243 atomic_inc(&init_mm.context.attach_count);
245 lc->spinlock_lockval = arch_spin_lockval(cpu);
246 lc->percpu_offset = __per_cpu_offset[cpu];
247 lc->kernel_asce = S390_lowcore.kernel_asce;
248 lc->machine_flags = S390_lowcore.machine_flags;
249 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
250 __ctl_store(lc->cregs_save_area, 0, 15);
251 save_access_regs((unsigned int *) lc->access_regs_save_area);
252 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
253 sizeof(lc->stfle_fac_list));
254 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
255 sizeof(lc->alt_stfle_fac_list));
258 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
260 struct _lowcore *lc = pcpu->lowcore;
261 struct thread_info *ti = task_thread_info(tsk);
263 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
264 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
265 lc->thread_info = (unsigned long) task_thread_info(tsk);
266 lc->current_task = (unsigned long) tsk;
268 lc->current_pid = tsk->pid;
269 lc->user_timer = ti->user_timer;
270 lc->system_timer = ti->system_timer;
274 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
276 struct _lowcore *lc = pcpu->lowcore;
278 lc->restart_stack = lc->kernel_stack;
279 lc->restart_fn = (unsigned long) func;
280 lc->restart_data = (unsigned long) data;
281 lc->restart_source = -1UL;
282 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
286 * Call function via PSW restart on pcpu and stop the current cpu.
288 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
289 void *data, unsigned long stack)
291 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
292 unsigned long source_cpu = stap();
294 __load_psw_mask(PSW_KERNEL_BITS);
295 if (pcpu->address == source_cpu)
296 func(data); /* should not return */
297 /* Stop target cpu (if func returns this stops the current cpu). */
298 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
299 /* Restart func on the target cpu and stop the current cpu. */
300 mem_assign_absolute(lc->restart_stack, stack);
301 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
302 mem_assign_absolute(lc->restart_data, (unsigned long) data);
303 mem_assign_absolute(lc->restart_source, source_cpu);
305 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
306 " brc 2,0b # busy, try again\n"
307 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
308 " brc 2,1b # busy, try again\n"
309 : : "d" (pcpu->address), "d" (source_cpu),
310 "K" (SIGP_RESTART), "K" (SIGP_STOP)
316 * Enable additional logical cpus for multi-threading.
318 static int pcpu_set_smt(unsigned int mtid)
320 register unsigned long reg1 asm ("1") = (unsigned long) mtid;
323 if (smp_cpu_mtid == mtid)
326 " sigp %1,0,%2 # sigp set multi-threading\n"
329 : "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
333 smp_cpu_mt_shift = 0;
334 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
336 pcpu_devices[0].address = stap();
342 * Call function on an online CPU.
344 void smp_call_online_cpu(void (*func)(void *), void *data)
348 /* Use the current cpu if it is online. */
349 pcpu = pcpu_find_address(cpu_online_mask, stap());
351 /* Use the first online cpu. */
352 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
353 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
357 * Call function on the ipl CPU.
359 void smp_call_ipl_cpu(void (*func)(void *), void *data)
361 pcpu_delegate(&pcpu_devices[0], func, data,
362 pcpu_devices->lowcore->panic_stack -
363 PANIC_FRAME_OFFSET + PAGE_SIZE);
366 int smp_find_processor_id(u16 address)
370 for_each_present_cpu(cpu)
371 if (pcpu_devices[cpu].address == address)
376 int smp_vcpu_scheduled(int cpu)
378 return pcpu_running(pcpu_devices + cpu);
381 void smp_yield_cpu(int cpu)
383 if (MACHINE_HAS_DIAG9C) {
384 diag_stat_inc_norecursion(DIAG_STAT_X09C);
385 asm volatile("diag %0,0,0x9c"
386 : : "d" (pcpu_devices[cpu].address));
387 } else if (MACHINE_HAS_DIAG44) {
388 diag_stat_inc_norecursion(DIAG_STAT_X044);
389 asm volatile("diag 0,0,0x44");
394 * Send cpus emergency shutdown signal. This gives the cpus the
395 * opportunity to complete outstanding interrupts.
397 static void smp_emergency_stop(cpumask_t *cpumask)
402 end = get_tod_clock() + (1000000UL << 12);
403 for_each_cpu(cpu, cpumask) {
404 struct pcpu *pcpu = pcpu_devices + cpu;
405 set_bit(ec_stop_cpu, &pcpu->ec_mask);
406 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
407 0, NULL) == SIGP_CC_BUSY &&
408 get_tod_clock() < end)
411 while (get_tod_clock() < end) {
412 for_each_cpu(cpu, cpumask)
413 if (pcpu_stopped(pcpu_devices + cpu))
414 cpumask_clear_cpu(cpu, cpumask);
415 if (cpumask_empty(cpumask))
422 * Stop all cpus but the current one.
424 void smp_send_stop(void)
429 /* Disable all interrupts/machine checks */
430 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
431 trace_hardirqs_off();
433 debug_set_critical();
434 cpumask_copy(&cpumask, cpu_online_mask);
435 cpumask_clear_cpu(smp_processor_id(), &cpumask);
437 if (oops_in_progress)
438 smp_emergency_stop(&cpumask);
440 /* stop all processors */
441 for_each_cpu(cpu, &cpumask) {
442 struct pcpu *pcpu = pcpu_devices + cpu;
443 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
444 while (!pcpu_stopped(pcpu))
450 * This is the main routine where commands issued by other
453 static void smp_handle_ext_call(void)
457 /* handle bit signal external calls */
458 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
459 if (test_bit(ec_stop_cpu, &bits))
461 if (test_bit(ec_schedule, &bits))
463 if (test_bit(ec_call_function_single, &bits))
464 generic_smp_call_function_single_interrupt();
467 static void do_ext_call_interrupt(struct ext_code ext_code,
468 unsigned int param32, unsigned long param64)
470 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
471 smp_handle_ext_call();
474 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
478 for_each_cpu(cpu, mask)
479 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
482 void arch_send_call_function_single_ipi(int cpu)
484 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
488 * this function sends a 'reschedule' IPI to another CPU.
489 * it goes straight through and wastes no time serializing
490 * anything. Worst case is that we lose a reschedule ...
492 void smp_send_reschedule(int cpu)
494 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
498 * parameter area for the set/clear control bit callbacks
500 struct ec_creg_mask_parms {
502 unsigned long andval;
507 * callback for setting/clearing control bits
509 static void smp_ctl_bit_callback(void *info)
511 struct ec_creg_mask_parms *pp = info;
512 unsigned long cregs[16];
514 __ctl_store(cregs, 0, 15);
515 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
516 __ctl_load(cregs, 0, 15);
520 * Set a bit in a control register of all cpus
522 void smp_ctl_set_bit(int cr, int bit)
524 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
526 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
528 EXPORT_SYMBOL(smp_ctl_set_bit);
531 * Clear a bit in a control register of all cpus
533 void smp_ctl_clear_bit(int cr, int bit)
535 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
537 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
539 EXPORT_SYMBOL(smp_ctl_clear_bit);
541 #ifdef CONFIG_CRASH_DUMP
543 static void __init __smp_store_cpu_state(struct save_area_ext *sa_ext,
544 u16 address, int is_boot_cpu)
546 void *lc = (void *)(unsigned long) store_prefix();
550 /* Copy the registers of the boot CPU. */
551 copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
552 SAVE_AREA_BASE - PAGE_SIZE, 0);
554 save_vx_regs_safe(sa_ext->vx_regs);
557 /* Get the registers of a non-boot cpu. */
558 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
559 memcpy_real(&sa_ext->sa, lc + SAVE_AREA_BASE, sizeof(sa_ext->sa));
562 /* Get the VX registers */
563 vx_sa = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
565 panic("could not allocate memory for VX save area\n");
566 __pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL);
567 memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs));
568 memblock_free(vx_sa, PAGE_SIZE);
571 int smp_store_status(int cpu)
576 pcpu = pcpu_devices + cpu;
577 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
578 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
582 vx_sa = __pa(pcpu->lowcore->vector_save_area_addr);
583 __pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
588 #endif /* CONFIG_CRASH_DUMP */
591 * Collect CPU state of the previous, crashed system.
592 * There are four cases:
593 * 1) standard zfcp dump
594 * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
595 * The state for all CPUs except the boot CPU needs to be collected
596 * with sigp stop-and-store-status. The boot CPU state is located in
597 * the absolute lowcore of the memory stored in the HSA. The zcore code
598 * will allocate the save area and copy the boot CPU state from the HSA.
599 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
600 * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
601 * The state for all CPUs except the boot CPU needs to be collected
602 * with sigp stop-and-store-status. The firmware or the boot-loader
603 * stored the registers of the boot CPU in the absolute lowcore in the
604 * memory of the old system.
605 * 3) kdump and the old kernel did not store the CPU state,
606 * or stand-alone kdump for DASD
607 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
608 * The state for all CPUs except the boot CPU needs to be collected
609 * with sigp stop-and-store-status. The kexec code or the boot-loader
610 * stored the registers of the boot CPU in the memory of the old system.
611 * 4) kdump and the old kernel stored the CPU state
612 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
613 * The state of all CPUs is stored in ELF sections in the memory of the
614 * old system. The ELF sections are picked up by the crash_dump code
615 * via elfcorehdr_addr.
617 void __init smp_save_dump_cpus(void)
619 #ifdef CONFIG_CRASH_DUMP
620 int addr, cpu, boot_cpu_addr, max_cpu_addr;
621 struct save_area_ext *sa_ext;
624 if (is_kdump_kernel())
625 /* Previous system stored the CPU states. Nothing to do. */
627 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
628 /* No previous system present, normal boot. */
630 /* Set multi-threading state to the previous system. */
631 pcpu_set_smt(sclp.mtid_prev);
632 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
633 for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) {
634 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0, NULL) ==
635 SIGP_CC_NOT_OPERATIONAL)
639 dump_save_areas.areas = (void *)memblock_alloc(sizeof(void *) * cpu, 8);
640 dump_save_areas.count = cpu;
641 boot_cpu_addr = stap();
642 for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) {
643 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0, NULL) ==
644 SIGP_CC_NOT_OPERATIONAL)
646 sa_ext = (void *) memblock_alloc(sizeof(*sa_ext), 8);
647 dump_save_areas.areas[cpu] = sa_ext;
649 panic("could not allocate memory for save area\n");
650 is_boot_cpu = (addr == boot_cpu_addr);
652 if (is_boot_cpu && !OLDMEM_BASE)
653 /* Skip boot CPU for standard zfcp dump. */
655 /* Get state for this CPU. */
656 __smp_store_cpu_state(sa_ext, addr, is_boot_cpu);
660 #endif /* CONFIG_CRASH_DUMP */
663 void smp_cpu_set_polarization(int cpu, int val)
665 pcpu_devices[cpu].polarization = val;
668 int smp_cpu_get_polarization(int cpu)
670 return pcpu_devices[cpu].polarization;
673 static struct sclp_core_info *smp_get_core_info(void)
675 static int use_sigp_detection;
676 struct sclp_core_info *info;
679 info = kzalloc(sizeof(*info), GFP_KERNEL);
680 if (info && (use_sigp_detection || sclp_get_core_info(info))) {
681 use_sigp_detection = 1;
683 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
684 address += (1U << smp_cpu_mt_shift)) {
685 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
686 SIGP_CC_NOT_OPERATIONAL)
688 info->core[info->configured].core_id =
689 address >> smp_cpu_mt_shift;
692 info->combined = info->configured;
697 static int smp_add_present_cpu(int cpu);
699 static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
707 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
708 cpu = cpumask_first(&avail);
709 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
710 if (sclp.has_core_type && info->core[i].type != boot_core_type)
712 address = info->core[i].core_id << smp_cpu_mt_shift;
713 for (j = 0; j <= smp_cpu_mtid; j++) {
714 if (pcpu_find_address(cpu_present_mask, address + j))
716 pcpu = pcpu_devices + cpu;
717 pcpu->address = address + j;
719 (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
720 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
721 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
722 set_cpu_present(cpu, true);
723 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
724 set_cpu_present(cpu, false);
727 cpu = cpumask_next(cpu, &avail);
728 if (cpu >= nr_cpu_ids)
735 static void __init smp_detect_cpus(void)
737 unsigned int cpu, mtid, c_cpus, s_cpus;
738 struct sclp_core_info *info;
741 /* Get CPU information */
742 info = smp_get_core_info();
744 panic("smp_detect_cpus failed to allocate memory\n");
746 /* Find boot CPU type */
747 if (sclp.has_core_type) {
749 for (cpu = 0; cpu < info->combined; cpu++)
750 if (info->core[cpu].core_id == address) {
751 /* The boot cpu dictates the cpu type. */
752 boot_core_type = info->core[cpu].type;
755 if (cpu >= info->combined)
756 panic("Could not find boot CPU type");
759 /* Set multi-threading state for the current system */
760 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
761 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
764 /* Print number of CPUs */
766 for (cpu = 0; cpu < info->combined; cpu++) {
767 if (sclp.has_core_type &&
768 info->core[cpu].type != boot_core_type)
770 if (cpu < info->configured)
771 c_cpus += smp_cpu_mtid + 1;
773 s_cpus += smp_cpu_mtid + 1;
775 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
777 /* Add CPUs present at boot */
779 __smp_rescan_cpus(info, 0);
785 * Activate a secondary processor.
787 static void smp_start_secondary(void *cpuvoid)
789 S390_lowcore.last_update_clock = get_tod_clock();
790 S390_lowcore.restart_stack = (unsigned long) restart_stack;
791 S390_lowcore.restart_fn = (unsigned long) do_restart;
792 S390_lowcore.restart_data = 0;
793 S390_lowcore.restart_source = -1UL;
794 restore_access_regs(S390_lowcore.access_regs_save_area);
795 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
796 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
802 notify_cpu_starting(smp_processor_id());
803 set_cpu_online(smp_processor_id(), true);
804 inc_irq_stat(CPU_RST);
806 cpu_startup_entry(CPUHP_ONLINE);
809 /* Upping and downing of CPUs */
810 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
815 pcpu = pcpu_devices + cpu;
816 if (pcpu->state != CPU_STATE_CONFIGURED)
818 base = cpu - (cpu % (smp_cpu_mtid + 1));
819 for (i = 0; i <= smp_cpu_mtid; i++) {
820 if (base + i < nr_cpu_ids)
821 if (cpu_online(base + i))
825 * If this is the first CPU of the core to get online
826 * do an initial CPU reset.
828 if (i > smp_cpu_mtid &&
829 pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
830 SIGP_CC_ORDER_CODE_ACCEPTED)
833 rc = pcpu_alloc_lowcore(pcpu, cpu);
836 pcpu_prepare_secondary(pcpu, cpu);
837 pcpu_attach_task(pcpu, tidle);
838 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
839 /* Wait until cpu puts itself in the online & active maps */
840 while (!cpu_online(cpu) || !cpu_active(cpu))
845 static unsigned int setup_possible_cpus __initdata;
847 static int __init _setup_possible_cpus(char *s)
849 get_option(&s, &setup_possible_cpus);
852 early_param("possible_cpus", _setup_possible_cpus);
854 #ifdef CONFIG_HOTPLUG_CPU
856 int __cpu_disable(void)
858 unsigned long cregs[16];
860 /* Handle possible pending IPIs */
861 smp_handle_ext_call();
862 set_cpu_online(smp_processor_id(), false);
863 /* Disable pseudo page faults on this cpu. */
865 /* Disable interrupt sources via control register. */
866 __ctl_store(cregs, 0, 15);
867 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
868 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
869 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
870 __ctl_load(cregs, 0, 15);
871 clear_cpu_flag(CIF_NOHZ_DELAY);
875 void __cpu_die(unsigned int cpu)
879 /* Wait until target cpu is down */
880 pcpu = pcpu_devices + cpu;
881 while (!pcpu_stopped(pcpu))
883 pcpu_free_lowcore(pcpu);
884 atomic_dec(&init_mm.context.attach_count);
885 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
886 if (MACHINE_HAS_TLB_LC)
887 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
890 void __noreturn cpu_die(void)
893 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
897 #endif /* CONFIG_HOTPLUG_CPU */
899 void __init smp_fill_possible_mask(void)
901 unsigned int possible, sclp_max, cpu;
903 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
904 sclp_max = min(smp_max_threads, sclp_max);
905 sclp_max = sclp.max_cores * sclp_max ?: nr_cpu_ids;
906 possible = setup_possible_cpus ?: nr_cpu_ids;
907 possible = min(possible, sclp_max);
908 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
909 set_cpu_possible(cpu, true);
912 void __init smp_prepare_cpus(unsigned int max_cpus)
914 /* request the 0x1201 emergency signal external interrupt */
915 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
916 panic("Couldn't request external interrupt 0x1201");
917 /* request the 0x1202 external call external interrupt */
918 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
919 panic("Couldn't request external interrupt 0x1202");
923 void __init smp_prepare_boot_cpu(void)
925 struct pcpu *pcpu = pcpu_devices;
927 pcpu->state = CPU_STATE_CONFIGURED;
928 pcpu->address = stap();
929 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
930 S390_lowcore.percpu_offset = __per_cpu_offset[0];
931 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
932 set_cpu_present(0, true);
933 set_cpu_online(0, true);
936 void __init smp_cpus_done(unsigned int max_cpus)
940 void __init smp_setup_processor_id(void)
942 S390_lowcore.cpu_nr = 0;
943 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
947 * the frequency of the profiling timer can be changed
948 * by writing a multiplier value into /proc/profile.
950 * usually you want to run this on all CPUs ;)
952 int setup_profiling_timer(unsigned int multiplier)
957 #ifdef CONFIG_HOTPLUG_CPU
958 static ssize_t cpu_configure_show(struct device *dev,
959 struct device_attribute *attr, char *buf)
963 mutex_lock(&smp_cpu_state_mutex);
964 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
965 mutex_unlock(&smp_cpu_state_mutex);
969 static ssize_t cpu_configure_store(struct device *dev,
970 struct device_attribute *attr,
971 const char *buf, size_t count)
977 if (sscanf(buf, "%d %c", &val, &delim) != 1)
979 if (val != 0 && val != 1)
982 mutex_lock(&smp_cpu_state_mutex);
984 /* disallow configuration changes of online cpus and cpu 0 */
986 cpu -= cpu % (smp_cpu_mtid + 1);
989 for (i = 0; i <= smp_cpu_mtid; i++)
990 if (cpu_online(cpu + i))
992 pcpu = pcpu_devices + cpu;
996 if (pcpu->state != CPU_STATE_CONFIGURED)
998 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1001 for (i = 0; i <= smp_cpu_mtid; i++) {
1002 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1004 pcpu[i].state = CPU_STATE_STANDBY;
1005 smp_cpu_set_polarization(cpu + i,
1006 POLARIZATION_UNKNOWN);
1008 topology_expect_change();
1011 if (pcpu->state != CPU_STATE_STANDBY)
1013 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1016 for (i = 0; i <= smp_cpu_mtid; i++) {
1017 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1019 pcpu[i].state = CPU_STATE_CONFIGURED;
1020 smp_cpu_set_polarization(cpu + i,
1021 POLARIZATION_UNKNOWN);
1023 topology_expect_change();
1029 mutex_unlock(&smp_cpu_state_mutex);
1031 return rc ? rc : count;
1033 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1034 #endif /* CONFIG_HOTPLUG_CPU */
1036 static ssize_t show_cpu_address(struct device *dev,
1037 struct device_attribute *attr, char *buf)
1039 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1041 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1043 static struct attribute *cpu_common_attrs[] = {
1044 #ifdef CONFIG_HOTPLUG_CPU
1045 &dev_attr_configure.attr,
1047 &dev_attr_address.attr,
1051 static struct attribute_group cpu_common_attr_group = {
1052 .attrs = cpu_common_attrs,
1055 static struct attribute *cpu_online_attrs[] = {
1056 &dev_attr_idle_count.attr,
1057 &dev_attr_idle_time_us.attr,
1061 static struct attribute_group cpu_online_attr_group = {
1062 .attrs = cpu_online_attrs,
1065 static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
1068 unsigned int cpu = (unsigned int)(long)hcpu;
1069 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1072 switch (action & ~CPU_TASKS_FROZEN) {
1074 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1077 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1080 return notifier_from_errno(err);
1083 static int smp_add_present_cpu(int cpu)
1089 c = kzalloc(sizeof(*c), GFP_KERNEL);
1092 per_cpu(cpu_device, cpu) = c;
1094 c->hotpluggable = 1;
1095 rc = register_cpu(c, cpu);
1098 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1101 if (cpu_online(cpu)) {
1102 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1106 rc = topology_cpu_init(c);
1112 if (cpu_online(cpu))
1113 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1115 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1117 #ifdef CONFIG_HOTPLUG_CPU
1124 #ifdef CONFIG_HOTPLUG_CPU
1126 int __ref smp_rescan_cpus(void)
1128 struct sclp_core_info *info;
1131 info = smp_get_core_info();
1135 mutex_lock(&smp_cpu_state_mutex);
1136 nr = __smp_rescan_cpus(info, 1);
1137 mutex_unlock(&smp_cpu_state_mutex);
1141 topology_schedule_update();
1145 static ssize_t __ref rescan_store(struct device *dev,
1146 struct device_attribute *attr,
1152 rc = smp_rescan_cpus();
1153 return rc ? rc : count;
1155 static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1156 #endif /* CONFIG_HOTPLUG_CPU */
1158 static int __init s390_smp_init(void)
1162 #ifdef CONFIG_HOTPLUG_CPU
1163 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1167 cpu_notifier_register_begin();
1168 for_each_present_cpu(cpu) {
1169 rc = smp_add_present_cpu(cpu);
1174 __hotcpu_notifier(smp_cpu_notify, 0);
1177 cpu_notifier_register_done();
1180 subsys_initcall(s390_smp_init);