2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h> /* need_resched() */
41 #include <linux/latency.h>
42 #include <linux/clockchips.h>
43 #include <linux/cpuidle.h>
46 * Include the apic definitions for x86 to have the APIC timer related defines
47 * available also for UP (on SMP it gets magically included via linux/smp.h).
48 * asm/acpi.h is not an option, as it would require more include magic. Also
49 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
56 #include <asm/uaccess.h>
58 #include <acpi/acpi_bus.h>
59 #include <acpi/processor.h>
61 #define ACPI_PROCESSOR_COMPONENT 0x01000000
62 #define ACPI_PROCESSOR_CLASS "processor"
63 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
64 ACPI_MODULE_NAME("processor_idle");
65 #define ACPI_PROCESSOR_FILE_POWER "power"
66 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
67 #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
68 #ifndef CONFIG_CPU_IDLE
69 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
70 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
71 static void (*pm_idle_save) (void) __read_mostly;
73 #define C2_OVERHEAD 1 /* 1us */
74 #define C3_OVERHEAD 1 /* 1us */
76 #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
78 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
79 module_param(max_cstate, uint, 0000);
80 static unsigned int nocst __read_mostly;
81 module_param(nocst, uint, 0000);
83 #ifndef CONFIG_CPU_IDLE
85 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
86 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
87 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
88 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
89 * reduce history for more aggressive entry into C3
91 static unsigned int bm_history __read_mostly =
92 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
93 module_param(bm_history, uint, 0644);
95 static int acpi_processor_set_power_policy(struct acpi_processor *pr);
97 #else /* CONFIG_CPU_IDLE */
98 static unsigned int latency_factor __read_mostly = 2;
99 module_param(latency_factor, uint, 0644);
103 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
104 * For now disable this. Probably a bug somewhere else.
106 * To skip this limit, boot/load with a large max_cstate limit.
108 static int set_max_cstate(const struct dmi_system_id *id)
110 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
113 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
114 " Override with \"processor.max_cstate=%d\"\n", id->ident,
115 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
117 max_cstate = (long)id->driver_data;
122 /* Actually this shouldn't be __cpuinitdata, would be better to fix the
123 callers to only run once -AK */
124 static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
125 { set_max_cstate, "IBM ThinkPad R40e", {
126 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
127 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
128 { set_max_cstate, "IBM ThinkPad R40e", {
129 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
130 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
131 { set_max_cstate, "IBM ThinkPad R40e", {
132 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
133 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
134 { set_max_cstate, "IBM ThinkPad R40e", {
135 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
136 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
137 { set_max_cstate, "IBM ThinkPad R40e", {
138 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
139 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
140 { set_max_cstate, "IBM ThinkPad R40e", {
141 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
142 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
143 { set_max_cstate, "IBM ThinkPad R40e", {
144 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
145 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
146 { set_max_cstate, "IBM ThinkPad R40e", {
147 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
148 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
149 { set_max_cstate, "IBM ThinkPad R40e", {
150 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
151 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
152 { set_max_cstate, "IBM ThinkPad R40e", {
153 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
154 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
155 { set_max_cstate, "IBM ThinkPad R40e", {
156 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
157 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
158 { set_max_cstate, "IBM ThinkPad R40e", {
159 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
160 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
161 { set_max_cstate, "IBM ThinkPad R40e", {
162 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
163 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
164 { set_max_cstate, "IBM ThinkPad R40e", {
165 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
166 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
167 { set_max_cstate, "IBM ThinkPad R40e", {
168 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
169 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
170 { set_max_cstate, "IBM ThinkPad R40e", {
171 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
172 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
173 { set_max_cstate, "Medion 41700", {
174 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
175 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
176 { set_max_cstate, "Clevo 5600D", {
177 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
178 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
183 static inline u32 ticks_elapsed(u32 t1, u32 t2)
187 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
188 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
190 return ((0xFFFFFFFF - t1) + t2);
193 static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
196 return PM_TIMER_TICKS_TO_US(t2 - t1);
197 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
198 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
200 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
203 #ifndef CONFIG_CPU_IDLE
206 acpi_processor_power_activate(struct acpi_processor *pr,
207 struct acpi_processor_cx *new)
209 struct acpi_processor_cx *old;
214 old = pr->power.state;
217 old->promotion.count = 0;
218 new->demotion.count = 0;
220 /* Cleanup from old state. */
224 /* Disable bus master reload */
225 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
226 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
231 /* Prepare to use new state. */
234 /* Enable bus master reload */
235 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
236 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
240 pr->power.state = new;
245 static void acpi_safe_halt(void)
247 current_thread_info()->status &= ~TS_POLLING;
249 * TS_POLLING-cleared state must be visible before we
255 current_thread_info()->status |= TS_POLLING;
258 static atomic_t c3_cpu_count;
260 /* Common C-state entry for C2, C3, .. */
261 static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
263 if (cstate->space_id == ACPI_CSTATE_FFH) {
264 /* Call into architectural FFH based C-state */
265 acpi_processor_ffh_cstate_enter(cstate);
268 /* IO port based C-state */
269 inb(cstate->address);
270 /* Dummy wait op - must do something useless after P_LVL2 read
271 because chipsets cannot guarantee that STPCLK# signal
272 gets asserted in time to freeze execution properly. */
273 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
276 #endif /* !CONFIG_CPU_IDLE */
278 #ifdef ARCH_APICTIMER_STOPS_ON_C3
281 * Some BIOS implementations switch to C3 in the published C2 state.
282 * This seems to be a common problem on AMD boxen, but other vendors
283 * are affected too. We pick the most conservative approach: we assume
284 * that the local APIC stops in both C2 and C3.
286 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
287 struct acpi_processor_cx *cx)
289 struct acpi_processor_power *pwr = &pr->power;
290 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
293 * Check, if one of the previous states already marked the lapic
296 if (pwr->timer_broadcast_on_state < state)
299 if (cx->type >= type)
300 pr->power.timer_broadcast_on_state = state;
303 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
305 unsigned long reason;
307 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
308 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
310 clockevents_notify(reason, &pr->id);
313 /* Power(C) State timer broadcast control */
314 static void acpi_state_timer_broadcast(struct acpi_processor *pr,
315 struct acpi_processor_cx *cx,
318 int state = cx - pr->power.states;
320 if (state >= pr->power.timer_broadcast_on_state) {
321 unsigned long reason;
323 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
324 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
325 clockevents_notify(reason, &pr->id);
331 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
332 struct acpi_processor_cx *cstate) { }
333 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
334 static void acpi_state_timer_broadcast(struct acpi_processor *pr,
335 struct acpi_processor_cx *cx,
343 * Suspend / resume control
345 static int acpi_idle_suspend;
347 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
349 acpi_idle_suspend = 1;
353 int acpi_processor_resume(struct acpi_device * device)
355 acpi_idle_suspend = 0;
359 #ifndef CONFIG_CPU_IDLE
360 static void acpi_processor_idle(void)
362 struct acpi_processor *pr = NULL;
363 struct acpi_processor_cx *cx = NULL;
364 struct acpi_processor_cx *next_state = NULL;
369 * Interrupts must be disabled during bus mastering calculations and
370 * for C2/C3 transitions.
374 pr = processors[smp_processor_id()];
381 * Check whether we truly need to go idle, or should
384 if (unlikely(need_resched())) {
389 cx = pr->power.state;
390 if (!cx || acpi_idle_suspend) {
401 * Check for bus mastering activity (if required), record, and check
404 if (pr->flags.bm_check) {
406 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
411 pr->power.bm_activity <<= diff;
413 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
415 pr->power.bm_activity |= 0x1;
416 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
419 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
420 * the true state of bus mastering activity; forcing us to
421 * manually check the BMIDEA bit of each IDE channel.
423 else if (errata.piix4.bmisx) {
424 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
425 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
426 pr->power.bm_activity |= 0x1;
429 pr->power.bm_check_timestamp = jiffies;
432 * If bus mastering is or was active this jiffy, demote
433 * to avoid a faulty transition. Note that the processor
434 * won't enter a low-power state during this call (to this
435 * function) but should upon the next.
437 * TBD: A better policy might be to fallback to the demotion
438 * state (use it for this quantum only) istead of
439 * demoting -- and rely on duration as our sole demotion
440 * qualification. This may, however, introduce DMA
441 * issues (e.g. floppy DMA transfer overrun/underrun).
443 if ((pr->power.bm_activity & 0x1) &&
444 cx->demotion.threshold.bm) {
446 next_state = cx->demotion.state;
451 #ifdef CONFIG_HOTPLUG_CPU
453 * Check for P_LVL2_UP flag before entering C2 and above on
454 * an SMP system. We do it here instead of doing it at _CST/P_LVL
455 * detection phase, to work cleanly with logical CPU hotplug.
457 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
458 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
459 cx = &pr->power.states[ACPI_STATE_C1];
465 * Invoke the current Cx state to put the processor to sleep.
467 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
468 current_thread_info()->status &= ~TS_POLLING;
470 * TS_POLLING-cleared state must be visible before we
474 if (need_resched()) {
475 current_thread_info()->status |= TS_POLLING;
486 * Use the appropriate idle routine, the one that would
487 * be used without acpi C-states.
495 * TBD: Can't get time duration while in C1, as resumes
496 * go to an ISR rather than here. Need to instrument
497 * base interrupt handler.
499 * Note: the TSC better not stop in C1, sched_clock() will
502 sleep_ticks = 0xFFFFFFFF;
506 /* Get start time (ticks) */
507 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
508 /* Tell the scheduler that we are going deep-idle: */
509 sched_clock_idle_sleep_event();
511 acpi_state_timer_broadcast(pr, cx, 1);
512 acpi_cstate_enter(cx);
513 /* Get end time (ticks) */
514 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
516 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
517 /* TSC halts in C2, so notify users */
518 mark_tsc_unstable("possible TSC halt in C2");
520 /* Compute time (ticks) that we were actually asleep */
521 sleep_ticks = ticks_elapsed(t1, t2);
523 /* Tell the scheduler how much we idled: */
524 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
526 /* Re-enable interrupts */
528 /* Do not account our idle-switching overhead: */
529 sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
531 current_thread_info()->status |= TS_POLLING;
532 acpi_state_timer_broadcast(pr, cx, 0);
538 * bm_check implies we need ARB_DIS
539 * !bm_check implies we need cache flush
540 * bm_control implies whether we can do ARB_DIS
542 * That leaves a case where bm_check is set and bm_control is
543 * not set. In that case we cannot do much, we enter C3
544 * without doing anything.
546 if (pr->flags.bm_check && pr->flags.bm_control) {
547 if (atomic_inc_return(&c3_cpu_count) ==
550 * All CPUs are trying to go to C3
551 * Disable bus master arbitration
553 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
555 } else if (!pr->flags.bm_check) {
556 /* SMP with no shared cache... Invalidate cache */
557 ACPI_FLUSH_CPU_CACHE();
560 /* Get start time (ticks) */
561 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
563 acpi_state_timer_broadcast(pr, cx, 1);
564 /* Tell the scheduler that we are going deep-idle: */
565 sched_clock_idle_sleep_event();
566 acpi_cstate_enter(cx);
567 /* Get end time (ticks) */
568 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
569 if (pr->flags.bm_check && pr->flags.bm_control) {
570 /* Enable bus master arbitration */
571 atomic_dec(&c3_cpu_count);
572 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
575 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
576 /* TSC halts in C3, so notify users */
577 mark_tsc_unstable("TSC halts in C3");
579 /* Compute time (ticks) that we were actually asleep */
580 sleep_ticks = ticks_elapsed(t1, t2);
581 /* Tell the scheduler how much we idled: */
582 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
584 /* Re-enable interrupts */
586 /* Do not account our idle-switching overhead: */
587 sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
589 current_thread_info()->status |= TS_POLLING;
590 acpi_state_timer_broadcast(pr, cx, 0);
598 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
599 cx->time += sleep_ticks;
601 next_state = pr->power.state;
603 #ifdef CONFIG_HOTPLUG_CPU
604 /* Don't do promotion/demotion */
605 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
606 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
615 * Track the number of longs (time asleep is greater than threshold)
616 * and promote when the count threshold is reached. Note that bus
617 * mastering activity may prevent promotions.
618 * Do not promote above max_cstate.
620 if (cx->promotion.state &&
621 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
622 if (sleep_ticks > cx->promotion.threshold.ticks &&
623 cx->promotion.state->latency <= system_latency_constraint()) {
624 cx->promotion.count++;
625 cx->demotion.count = 0;
626 if (cx->promotion.count >=
627 cx->promotion.threshold.count) {
628 if (pr->flags.bm_check) {
630 (pr->power.bm_activity & cx->
631 promotion.threshold.bm)) {
637 next_state = cx->promotion.state;
647 * Track the number of shorts (time asleep is less than time threshold)
648 * and demote when the usage threshold is reached.
650 if (cx->demotion.state) {
651 if (sleep_ticks < cx->demotion.threshold.ticks) {
652 cx->demotion.count++;
653 cx->promotion.count = 0;
654 if (cx->demotion.count >= cx->demotion.threshold.count) {
655 next_state = cx->demotion.state;
663 * Demote if current state exceeds max_cstate
664 * or if the latency of the current state is unacceptable
666 if ((pr->power.state - pr->power.states) > max_cstate ||
667 pr->power.state->latency > system_latency_constraint()) {
668 if (cx->demotion.state)
669 next_state = cx->demotion.state;
675 * If we're going to start using a new Cx state we must clean up
676 * from the previous and prepare to use the new.
678 if (next_state != pr->power.state)
679 acpi_processor_power_activate(pr, next_state);
682 static int acpi_processor_set_power_policy(struct acpi_processor *pr)
685 unsigned int state_is_set = 0;
686 struct acpi_processor_cx *lower = NULL;
687 struct acpi_processor_cx *higher = NULL;
688 struct acpi_processor_cx *cx;
695 * This function sets the default Cx state policy (OS idle handler).
696 * Our scheme is to promote quickly to C2 but more conservatively
697 * to C3. We're favoring C2 for its characteristics of low latency
698 * (quick response), good power savings, and ability to allow bus
699 * mastering activity. Note that the Cx state policy is completely
700 * customizable and can be altered dynamically.
704 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
705 cx = &pr->power.states[i];
710 pr->power.state = cx;
719 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
720 cx = &pr->power.states[i];
725 cx->demotion.state = lower;
726 cx->demotion.threshold.ticks = cx->latency_ticks;
727 cx->demotion.threshold.count = 1;
728 if (cx->type == ACPI_STATE_C3)
729 cx->demotion.threshold.bm = bm_history;
736 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
737 cx = &pr->power.states[i];
742 cx->promotion.state = higher;
743 cx->promotion.threshold.ticks = cx->latency_ticks;
744 if (cx->type >= ACPI_STATE_C2)
745 cx->promotion.threshold.count = 4;
747 cx->promotion.threshold.count = 10;
748 if (higher->type == ACPI_STATE_C3)
749 cx->promotion.threshold.bm = bm_history;
757 #endif /* !CONFIG_CPU_IDLE */
759 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
768 /* if info is obtained from pblk/fadt, type equals state */
769 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
770 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
772 #ifndef CONFIG_HOTPLUG_CPU
774 * Check for P_LVL2_UP flag before entering C2 and above on
777 if ((num_online_cpus() > 1) &&
778 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
782 /* determine C2 and C3 address from pblk */
783 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
784 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
786 /* determine latencies from FADT */
787 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
788 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
790 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
791 "lvl2[0x%08x] lvl3[0x%08x]\n",
792 pr->power.states[ACPI_STATE_C2].address,
793 pr->power.states[ACPI_STATE_C3].address));
798 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
800 if (!pr->power.states[ACPI_STATE_C1].valid) {
801 /* set the first C-State to C1 */
802 /* all processors need to support C1 */
803 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
804 pr->power.states[ACPI_STATE_C1].valid = 1;
806 /* the C0 state only exists as a filler in our array */
807 pr->power.states[ACPI_STATE_C0].valid = 1;
811 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
813 acpi_status status = 0;
817 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
818 union acpi_object *cst;
826 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
827 if (ACPI_FAILURE(status)) {
828 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
832 cst = buffer.pointer;
834 /* There must be at least 2 elements */
835 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
836 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
841 count = cst->package.elements[0].integer.value;
843 /* Validate number of power states. */
844 if (count < 1 || count != cst->package.count - 1) {
845 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
850 /* Tell driver that at least _CST is supported. */
851 pr->flags.has_cst = 1;
853 for (i = 1; i <= count; i++) {
854 union acpi_object *element;
855 union acpi_object *obj;
856 struct acpi_power_register *reg;
857 struct acpi_processor_cx cx;
859 memset(&cx, 0, sizeof(cx));
861 element = &(cst->package.elements[i]);
862 if (element->type != ACPI_TYPE_PACKAGE)
865 if (element->package.count != 4)
868 obj = &(element->package.elements[0]);
870 if (obj->type != ACPI_TYPE_BUFFER)
873 reg = (struct acpi_power_register *)obj->buffer.pointer;
875 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
876 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
879 /* There should be an easy way to extract an integer... */
880 obj = &(element->package.elements[1]);
881 if (obj->type != ACPI_TYPE_INTEGER)
884 cx.type = obj->integer.value;
886 * Some buggy BIOSes won't list C1 in _CST -
887 * Let acpi_processor_get_power_info_default() handle them later
889 if (i == 1 && cx.type != ACPI_STATE_C1)
892 cx.address = reg->address;
893 cx.index = current_count + 1;
895 cx.space_id = ACPI_CSTATE_SYSTEMIO;
896 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
897 if (acpi_processor_ffh_cstate_probe
898 (pr->id, &cx, reg) == 0) {
899 cx.space_id = ACPI_CSTATE_FFH;
900 } else if (cx.type != ACPI_STATE_C1) {
902 * C1 is a special case where FIXED_HARDWARE
903 * can be handled in non-MWAIT way as well.
904 * In that case, save this _CST entry info.
905 * That is, we retain space_id of SYSTEM_IO for
907 * Otherwise, ignore this info and continue.
913 obj = &(element->package.elements[2]);
914 if (obj->type != ACPI_TYPE_INTEGER)
917 cx.latency = obj->integer.value;
919 obj = &(element->package.elements[3]);
920 if (obj->type != ACPI_TYPE_INTEGER)
923 cx.power = obj->integer.value;
926 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
929 * We support total ACPI_PROCESSOR_MAX_POWER - 1
930 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
932 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
934 "Limiting number of power states to max (%d)\n",
935 ACPI_PROCESSOR_MAX_POWER);
937 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
942 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
945 /* Validate number of power states discovered */
946 if (current_count < 2)
950 kfree(buffer.pointer);
955 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
962 * C2 latency must be less than or equal to 100
965 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
966 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
967 "latency too large [%d]\n", cx->latency));
972 * Otherwise we've met all of our C2 requirements.
973 * Normalize the C2 latency to expidite policy
977 #ifndef CONFIG_CPU_IDLE
978 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
980 cx->latency_ticks = cx->latency;
986 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
987 struct acpi_processor_cx *cx)
989 static int bm_check_flag;
996 * C3 latency must be less than or equal to 1000
999 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
1000 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1001 "latency too large [%d]\n", cx->latency));
1006 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
1007 * DMA transfers are used by any ISA device to avoid livelock.
1008 * Note that we could disable Type-F DMA (as recommended by
1009 * the erratum), but this is known to disrupt certain ISA
1010 * devices thus we take the conservative approach.
1012 else if (errata.piix4.fdma) {
1013 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1014 "C3 not supported on PIIX4 with Type-F DMA\n"));
1018 /* All the logic here assumes flags.bm_check is same across all CPUs */
1019 if (!bm_check_flag) {
1020 /* Determine whether bm_check is needed based on CPU */
1021 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
1022 bm_check_flag = pr->flags.bm_check;
1024 pr->flags.bm_check = bm_check_flag;
1027 if (pr->flags.bm_check) {
1028 if (!pr->flags.bm_control) {
1029 if (pr->flags.has_cst != 1) {
1030 /* bus mastering control is necessary */
1031 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1032 "C3 support requires BM control\n"));
1035 /* Here we enter C3 without bus mastering */
1036 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1037 "C3 support without BM control\n"));
1042 * WBINVD should be set in fadt, for C3 state to be
1043 * supported on when bm_check is not required.
1045 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
1046 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1047 "Cache invalidation should work properly"
1048 " for C3 to be enabled on SMP systems\n"));
1051 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1055 * Otherwise we've met all of our C3 requirements.
1056 * Normalize the C3 latency to expidite policy. Enable
1057 * checking of bus mastering status (bm_check) so we can
1058 * use this in our C3 policy
1062 #ifndef CONFIG_CPU_IDLE
1063 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1065 cx->latency_ticks = cx->latency;
1071 static int acpi_processor_power_verify(struct acpi_processor *pr)
1074 unsigned int working = 0;
1076 pr->power.timer_broadcast_on_state = INT_MAX;
1078 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1079 struct acpi_processor_cx *cx = &pr->power.states[i];
1087 acpi_processor_power_verify_c2(cx);
1089 acpi_timer_check_state(i, pr, cx);
1093 acpi_processor_power_verify_c3(pr, cx);
1095 acpi_timer_check_state(i, pr, cx);
1103 acpi_propagate_timer_broadcast(pr);
1108 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1114 /* NOTE: the idle thread may not be running while calling
1117 /* Zero initialize all the C-states info. */
1118 memset(pr->power.states, 0, sizeof(pr->power.states));
1120 result = acpi_processor_get_power_info_cst(pr);
1121 if (result == -ENODEV)
1122 result = acpi_processor_get_power_info_fadt(pr);
1127 acpi_processor_get_power_info_default(pr);
1129 pr->power.count = acpi_processor_power_verify(pr);
1131 #ifndef CONFIG_CPU_IDLE
1133 * Set Default Policy
1134 * ------------------
1135 * Now that we know which states are supported, set the default
1136 * policy. Note that this policy can be changed dynamically
1137 * (e.g. encourage deeper sleeps to conserve battery life when
1140 result = acpi_processor_set_power_policy(pr);
1146 * if one state of type C2 or C3 is available, mark this
1147 * CPU as being "idle manageable"
1149 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1150 if (pr->power.states[i].valid) {
1151 pr->power.count = i;
1152 if (pr->power.states[i].type >= ACPI_STATE_C2)
1153 pr->flags.power = 1;
1160 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1162 struct acpi_processor *pr = seq->private;
1169 seq_printf(seq, "active state: C%zd\n"
1171 "bus master activity: %08x\n"
1172 "maximum allowed latency: %d usec\n",
1173 pr->power.state ? pr->power.state - pr->power.states : 0,
1174 max_cstate, (unsigned)pr->power.bm_activity,
1175 system_latency_constraint());
1177 seq_puts(seq, "states:\n");
1179 for (i = 1; i <= pr->power.count; i++) {
1180 seq_printf(seq, " %cC%d: ",
1181 (&pr->power.states[i] ==
1182 pr->power.state ? '*' : ' '), i);
1184 if (!pr->power.states[i].valid) {
1185 seq_puts(seq, "<not supported>\n");
1189 switch (pr->power.states[i].type) {
1191 seq_printf(seq, "type[C1] ");
1194 seq_printf(seq, "type[C2] ");
1197 seq_printf(seq, "type[C3] ");
1200 seq_printf(seq, "type[--] ");
1204 if (pr->power.states[i].promotion.state)
1205 seq_printf(seq, "promotion[C%zd] ",
1206 (pr->power.states[i].promotion.state -
1209 seq_puts(seq, "promotion[--] ");
1211 if (pr->power.states[i].demotion.state)
1212 seq_printf(seq, "demotion[C%zd] ",
1213 (pr->power.states[i].demotion.state -
1216 seq_puts(seq, "demotion[--] ");
1218 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1219 pr->power.states[i].latency,
1220 pr->power.states[i].usage,
1221 (unsigned long long)pr->power.states[i].time);
1228 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1230 return single_open(file, acpi_processor_power_seq_show,
1234 static const struct file_operations acpi_processor_power_fops = {
1235 .open = acpi_processor_power_open_fs,
1237 .llseek = seq_lseek,
1238 .release = single_release,
1241 #ifndef CONFIG_CPU_IDLE
1243 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1255 if (!pr->flags.power_setup_done)
1258 /* Fall back to the default idle loop */
1259 pm_idle = pm_idle_save;
1260 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
1262 pr->flags.power = 0;
1263 result = acpi_processor_get_power_info(pr);
1264 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1265 pm_idle = acpi_processor_idle;
1271 static void smp_callback(void *v)
1273 /* we already woke the CPU up, nothing more to do */
1277 * This function gets called when a part of the kernel has a new latency
1278 * requirement. This means we need to get all processors out of their C-state,
1279 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1280 * wakes them all right up.
1282 static int acpi_processor_latency_notify(struct notifier_block *b,
1283 unsigned long l, void *v)
1285 smp_call_function(smp_callback, NULL, 0, 1);
1289 static struct notifier_block acpi_processor_latency_notifier = {
1290 .notifier_call = acpi_processor_latency_notify,
1295 #else /* CONFIG_CPU_IDLE */
1298 * acpi_idle_bm_check - checks if bus master activity was detected
1300 static int acpi_idle_bm_check(void)
1304 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1306 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1308 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
1309 * the true state of bus mastering activity; forcing us to
1310 * manually check the BMIDEA bit of each IDE channel.
1312 else if (errata.piix4.bmisx) {
1313 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
1314 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
1321 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1322 * @pr: the processor
1323 * @target: the new target state
1325 static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1326 struct acpi_processor_cx *target)
1328 if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
1329 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1330 pr->flags.bm_rld_set = 0;
1333 if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
1334 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1335 pr->flags.bm_rld_set = 1;
1340 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1343 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1345 if (cx->space_id == ACPI_CSTATE_FFH) {
1346 /* Call into architectural FFH based C-state */
1347 acpi_processor_ffh_cstate_enter(cx);
1350 /* IO port based C-state */
1352 /* Dummy wait op - must do something useless after P_LVL2 read
1353 because chipsets cannot guarantee that STPCLK# signal
1354 gets asserted in time to freeze execution properly. */
1355 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
1360 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
1361 * @dev: the target CPU
1362 * @state: the state data
1364 * This is equivalent to the HALT instruction.
1366 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1367 struct cpuidle_state *state)
1369 struct acpi_processor *pr;
1370 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1371 pr = processors[smp_processor_id()];
1376 if (pr->flags.bm_check)
1377 acpi_idle_update_bm_rld(pr, cx);
1379 current_thread_info()->status &= ~TS_POLLING;
1381 * TS_POLLING-cleared state must be visible before we test
1385 if (!need_resched())
1387 current_thread_info()->status |= TS_POLLING;
1395 * acpi_idle_enter_simple - enters an ACPI state without BM handling
1396 * @dev: the target CPU
1397 * @state: the state data
1399 static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1400 struct cpuidle_state *state)
1402 struct acpi_processor *pr;
1403 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1405 pr = processors[smp_processor_id()];
1410 if (acpi_idle_suspend)
1411 return(acpi_idle_enter_c1(dev, state));
1413 if (pr->flags.bm_check)
1414 acpi_idle_update_bm_rld(pr, cx);
1416 local_irq_disable();
1417 current_thread_info()->status &= ~TS_POLLING;
1419 * TS_POLLING-cleared state must be visible before we test
1424 if (unlikely(need_resched())) {
1425 current_thread_info()->status |= TS_POLLING;
1430 if (cx->type == ACPI_STATE_C3)
1431 ACPI_FLUSH_CPU_CACHE();
1433 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1434 acpi_state_timer_broadcast(pr, cx, 1);
1435 acpi_idle_do_entry(cx);
1436 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1438 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
1439 /* TSC could halt in idle, so notify users */
1440 mark_tsc_unstable("TSC halts in idle");;
1444 current_thread_info()->status |= TS_POLLING;
1448 acpi_state_timer_broadcast(pr, cx, 0);
1449 cx->time += ticks_elapsed(t1, t2);
1450 return ticks_elapsed_in_us(t1, t2);
1453 static int c3_cpu_count;
1454 static DEFINE_SPINLOCK(c3_lock);
1457 * acpi_idle_enter_bm - enters C3 with proper BM handling
1458 * @dev: the target CPU
1459 * @state: the state data
1461 * If BM is detected, the deepest non-C3 idle state is entered instead.
1463 static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1464 struct cpuidle_state *state)
1466 struct acpi_processor *pr;
1467 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1469 pr = processors[smp_processor_id()];
1474 if (acpi_idle_suspend)
1475 return(acpi_idle_enter_c1(dev, state));
1477 local_irq_disable();
1478 current_thread_info()->status &= ~TS_POLLING;
1480 * TS_POLLING-cleared state must be visible before we test
1485 if (unlikely(need_resched())) {
1486 current_thread_info()->status |= TS_POLLING;
1492 * Must be done before busmaster disable as we might need to
1495 acpi_state_timer_broadcast(pr, cx, 1);
1497 if (acpi_idle_bm_check()) {
1498 cx = pr->power.bm_state;
1500 acpi_idle_update_bm_rld(pr, cx);
1502 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1503 acpi_idle_do_entry(cx);
1504 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1506 acpi_idle_update_bm_rld(pr, cx);
1508 spin_lock(&c3_lock);
1510 /* Disable bus master arbitration when all CPUs are in C3 */
1511 if (c3_cpu_count == num_online_cpus())
1512 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1513 spin_unlock(&c3_lock);
1515 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1516 acpi_idle_do_entry(cx);
1517 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1519 spin_lock(&c3_lock);
1520 /* Re-enable bus master arbitration */
1521 if (c3_cpu_count == num_online_cpus())
1522 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1524 spin_unlock(&c3_lock);
1527 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
1528 /* TSC could halt in idle, so notify users */
1529 mark_tsc_unstable("TSC halts in idle");
1533 current_thread_info()->status |= TS_POLLING;
1537 acpi_state_timer_broadcast(pr, cx, 0);
1538 cx->time += ticks_elapsed(t1, t2);
1539 return ticks_elapsed_in_us(t1, t2);
1542 struct cpuidle_driver acpi_idle_driver = {
1543 .name = "acpi_idle",
1544 .owner = THIS_MODULE,
1548 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1549 * @pr: the ACPI processor
1551 static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1554 struct acpi_processor_cx *cx;
1555 struct cpuidle_state *state;
1556 struct cpuidle_device *dev = &pr->power.dev;
1558 if (!pr->flags.power_setup_done)
1561 if (pr->flags.power == 0) {
1565 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1566 cx = &pr->power.states[i];
1567 state = &dev->states[count];
1572 #ifdef CONFIG_HOTPLUG_CPU
1573 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1574 !pr->flags.has_cst &&
1575 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1578 cpuidle_set_statedata(state, cx);
1580 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1581 state->exit_latency = cx->latency;
1582 state->target_residency = cx->latency * latency_factor;
1583 state->power_usage = cx->power;
1588 state->flags |= CPUIDLE_FLAG_SHALLOW;
1589 state->enter = acpi_idle_enter_c1;
1593 state->flags |= CPUIDLE_FLAG_BALANCED;
1594 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1595 state->enter = acpi_idle_enter_simple;
1599 state->flags |= CPUIDLE_FLAG_DEEP;
1600 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1601 state->flags |= CPUIDLE_FLAG_CHECK_BM;
1602 state->enter = pr->flags.bm_check ?
1603 acpi_idle_enter_bm :
1604 acpi_idle_enter_simple;
1611 dev->state_count = count;
1616 /* find the deepest state that can handle active BM */
1617 if (pr->flags.bm_check) {
1618 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++)
1619 if (pr->power.states[i].type == ACPI_STATE_C3)
1621 pr->power.bm_state = &pr->power.states[i-1];
1627 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1638 if (!pr->flags.power_setup_done)
1641 cpuidle_pause_and_lock();
1642 cpuidle_disable_device(&pr->power.dev);
1643 acpi_processor_get_power_info(pr);
1644 acpi_processor_setup_cpuidle(pr);
1645 ret = cpuidle_enable_device(&pr->power.dev);
1646 cpuidle_resume_and_unlock();
1651 #endif /* CONFIG_CPU_IDLE */
1653 int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1654 struct acpi_device *device)
1656 acpi_status status = 0;
1657 static int first_run;
1658 struct proc_dir_entry *entry = NULL;
1663 dmi_check_system(processor_power_dmi_table);
1664 if (max_cstate < ACPI_C_STATES_MAX)
1666 "ACPI: processor limited to max C-state %d\n",
1669 #if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP)
1670 register_latency_notifier(&acpi_processor_latency_notifier);
1677 if (acpi_gbl_FADT.cst_control && !nocst) {
1679 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1680 if (ACPI_FAILURE(status)) {
1681 ACPI_EXCEPTION((AE_INFO, status,
1682 "Notifying BIOS of _CST ability failed"));
1686 acpi_processor_get_power_info(pr);
1687 pr->flags.power_setup_done = 1;
1690 * Install the idle handler if processor power management is supported.
1691 * Note that we use previously set idle handler will be used on
1692 * platforms that only support C1.
1694 if ((pr->flags.power) && (!boot_option_idle_override)) {
1695 #ifdef CONFIG_CPU_IDLE
1696 acpi_processor_setup_cpuidle(pr);
1697 pr->power.dev.cpu = pr->id;
1698 if (cpuidle_register_device(&pr->power.dev))
1702 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1703 for (i = 1; i <= pr->power.count; i++)
1704 if (pr->power.states[i].valid)
1705 printk(" C%d[C%d]", i,
1706 pr->power.states[i].type);
1709 #ifndef CONFIG_CPU_IDLE
1711 pm_idle_save = pm_idle;
1712 pm_idle = acpi_processor_idle;
1718 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1719 S_IRUGO, acpi_device_dir(device));
1723 entry->proc_fops = &acpi_processor_power_fops;
1724 entry->data = acpi_driver_data(device);
1725 entry->owner = THIS_MODULE;
1731 int acpi_processor_power_exit(struct acpi_processor *pr,
1732 struct acpi_device *device)
1734 #ifdef CONFIG_CPU_IDLE
1735 if ((pr->flags.power) && (!boot_option_idle_override))
1736 cpuidle_unregister_device(&pr->power.dev);
1738 pr->flags.power_setup_done = 0;
1740 if (acpi_device_dir(device))
1741 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1742 acpi_device_dir(device));
1744 #ifndef CONFIG_CPU_IDLE
1746 /* Unregister the idle handler when processor #0 is removed. */
1748 pm_idle = pm_idle_save;
1751 * We are about to unload the current idle thread pm callback
1752 * (pm_idle), Wait for all processors to update cached/local
1753 * copies of pm_idle before proceeding.
1757 unregister_latency_notifier(&acpi_processor_latency_notifier);