OSDN Git Service

cpufreq: interactive: set floor for boosted speed
[android-x86/kernel.git] / drivers / cpufreq / cpufreq_interactive.c
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/mutex.h>
23 #include <linux/sched.h>
24 #include <linux/tick.h>
25 #include <linux/time.h>
26 #include <linux/timer.h>
27 #include <linux/workqueue.h>
28 #include <linux/kthread.h>
29 #include <linux/mutex.h>
30 #include <linux/slab.h>
31 #include <linux/input.h>
32 #include <asm/cputime.h>
33
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/cpufreq_interactive.h>
36
37 static atomic_t active_count = ATOMIC_INIT(0);
38
39 struct cpufreq_interactive_cpuinfo {
40         struct timer_list cpu_timer;
41         int timer_idlecancel;
42         u64 time_in_idle;
43         u64 idle_exit_time;
44         u64 timer_run_time;
45         int idling;
46         u64 target_set_time;
47         u64 target_set_time_in_idle;
48         struct cpufreq_policy *policy;
49         struct cpufreq_frequency_table *freq_table;
50         unsigned int target_freq;
51         unsigned int floor_freq;
52         u64 floor_validate_time;
53         int governor_enabled;
54 };
55
56 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
57
58 /* Workqueues handle frequency scaling */
59 static struct task_struct *up_task;
60 static struct workqueue_struct *down_wq;
61 static struct work_struct freq_scale_down_work;
62 static cpumask_t up_cpumask;
63 static spinlock_t up_cpumask_lock;
64 static cpumask_t down_cpumask;
65 static spinlock_t down_cpumask_lock;
66 static struct mutex set_speed_lock;
67
68 /* Hi speed to bump to from lo speed when load burst (default max) */
69 static u64 hispeed_freq;
70
71 /* Go to hi speed when CPU load at or above this value. */
72 #define DEFAULT_GO_HISPEED_LOAD 85
73 static unsigned long go_hispeed_load;
74
75 /*
76  * The minimum amount of time to spend at a frequency before we can ramp down.
77  */
78 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
79 static unsigned long min_sample_time;
80
81 /*
82  * The sample rate of the timer used to increase frequency
83  */
84 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
85 static unsigned long timer_rate;
86
87 /*
88  * Wait this long before raising speed above hispeed, by default a single
89  * timer interval.
90  */
91 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
92 static unsigned long above_hispeed_delay_val;
93
94 /*
95  * Boost pulse to hispeed on touchscreen input.
96  */
97
98 static int input_boost_val;
99
100 struct cpufreq_interactive_inputopen {
101         struct input_handle *handle;
102         struct work_struct inputopen_work;
103 };
104
105 static struct cpufreq_interactive_inputopen inputopen;
106
107 /*
108  * Non-zero means longer-term speed boost active.
109  */
110
111 static int boost_val;
112
113 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
114                 unsigned int event);
115
116 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
117 static
118 #endif
119 struct cpufreq_governor cpufreq_gov_interactive = {
120         .name = "interactive",
121         .governor = cpufreq_governor_interactive,
122         .max_transition_latency = 10000000,
123         .owner = THIS_MODULE,
124 };
125
126 static void cpufreq_interactive_timer(unsigned long data)
127 {
128         unsigned int delta_idle;
129         unsigned int delta_time;
130         int cpu_load;
131         int load_since_change;
132         u64 time_in_idle;
133         u64 idle_exit_time;
134         struct cpufreq_interactive_cpuinfo *pcpu =
135                 &per_cpu(cpuinfo, data);
136         u64 now_idle;
137         unsigned int new_freq;
138         unsigned int index;
139         unsigned long flags;
140
141         smp_rmb();
142
143         if (!pcpu->governor_enabled)
144                 goto exit;
145
146         /*
147          * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
148          * this lets idle exit know the current idle time sample has
149          * been processed, and idle exit can generate a new sample and
150          * re-arm the timer.  This prevents a concurrent idle
151          * exit on that CPU from writing a new set of info at the same time
152          * the timer function runs (the timer function can't use that info
153          * until more time passes).
154          */
155         time_in_idle = pcpu->time_in_idle;
156         idle_exit_time = pcpu->idle_exit_time;
157         now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
158         smp_wmb();
159
160         /* If we raced with cancelling a timer, skip. */
161         if (!idle_exit_time)
162                 goto exit;
163
164         delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
165         delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
166                                                   idle_exit_time);
167
168         /*
169          * If timer ran less than 1ms after short-term sample started, retry.
170          */
171         if (delta_time < 1000)
172                 goto rearm;
173
174         if (delta_idle > delta_time)
175                 cpu_load = 0;
176         else
177                 cpu_load = 100 * (delta_time - delta_idle) / delta_time;
178
179         delta_idle = (unsigned int) cputime64_sub(now_idle,
180                                                 pcpu->target_set_time_in_idle);
181         delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
182                                                   pcpu->target_set_time);
183
184         if ((delta_time == 0) || (delta_idle > delta_time))
185                 load_since_change = 0;
186         else
187                 load_since_change =
188                         100 * (delta_time - delta_idle) / delta_time;
189
190         /*
191          * Choose greater of short-term load (since last idle timer
192          * started or timer function re-armed itself) or long-term load
193          * (since last frequency change).
194          */
195         if (load_since_change > cpu_load)
196                 cpu_load = load_since_change;
197
198         if (cpu_load >= go_hispeed_load || boost_val) {
199                 if (pcpu->target_freq <= pcpu->policy->min) {
200                         new_freq = hispeed_freq;
201                 } else {
202                         new_freq = pcpu->policy->max * cpu_load / 100;
203
204                         if (new_freq < hispeed_freq)
205                                 new_freq = hispeed_freq;
206
207                         if (pcpu->target_freq == hispeed_freq &&
208                             new_freq > hispeed_freq &&
209                             cputime64_sub(pcpu->timer_run_time,
210                                           pcpu->target_set_time)
211                             < above_hispeed_delay_val) {
212                                 trace_cpufreq_interactive_notyet(data, cpu_load,
213                                                                  pcpu->target_freq,
214                                                                  new_freq);
215                                 goto rearm;
216                         }
217                 }
218         } else {
219                 new_freq = pcpu->policy->max * cpu_load / 100;
220         }
221
222         if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
223                                            new_freq, CPUFREQ_RELATION_H,
224                                            &index)) {
225                 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
226                              (int) data);
227                 goto rearm;
228         }
229
230         new_freq = pcpu->freq_table[index].frequency;
231
232         /*
233          * Do not scale below floor_freq unless we have been at or above the
234          * floor frequency for the minimum sample time since last validated.
235          */
236         if (new_freq < pcpu->floor_freq) {
237                 if (cputime64_sub(pcpu->timer_run_time,
238                                   pcpu->floor_validate_time)
239                     < min_sample_time) {
240                         trace_cpufreq_interactive_notyet(data, cpu_load,
241                                          pcpu->target_freq, new_freq);
242                         goto rearm;
243                 }
244         }
245
246         pcpu->floor_freq = new_freq;
247         pcpu->floor_validate_time = pcpu->timer_run_time;
248
249         if (pcpu->target_freq == new_freq) {
250                 trace_cpufreq_interactive_already(data, cpu_load,
251                                                   pcpu->target_freq, new_freq);
252                 goto rearm_if_notmax;
253         }
254
255         trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
256                                          new_freq);
257         pcpu->target_set_time_in_idle = now_idle;
258         pcpu->target_set_time = pcpu->timer_run_time;
259
260         if (new_freq < pcpu->target_freq) {
261                 pcpu->target_freq = new_freq;
262                 spin_lock_irqsave(&down_cpumask_lock, flags);
263                 cpumask_set_cpu(data, &down_cpumask);
264                 spin_unlock_irqrestore(&down_cpumask_lock, flags);
265                 queue_work(down_wq, &freq_scale_down_work);
266         } else {
267                 pcpu->target_freq = new_freq;
268                 spin_lock_irqsave(&up_cpumask_lock, flags);
269                 cpumask_set_cpu(data, &up_cpumask);
270                 spin_unlock_irqrestore(&up_cpumask_lock, flags);
271                 wake_up_process(up_task);
272         }
273
274 rearm_if_notmax:
275         /*
276          * Already set max speed and don't see a need to change that,
277          * wait until next idle to re-evaluate, don't need timer.
278          */
279         if (pcpu->target_freq == pcpu->policy->max)
280                 goto exit;
281
282 rearm:
283         if (!timer_pending(&pcpu->cpu_timer)) {
284                 /*
285                  * If already at min: if that CPU is idle, don't set timer.
286                  * Else cancel the timer if that CPU goes idle.  We don't
287                  * need to re-evaluate speed until the next idle exit.
288                  */
289                 if (pcpu->target_freq == pcpu->policy->min) {
290                         smp_rmb();
291
292                         if (pcpu->idling)
293                                 goto exit;
294
295                         pcpu->timer_idlecancel = 1;
296                 }
297
298                 pcpu->time_in_idle = get_cpu_idle_time_us(
299                         data, &pcpu->idle_exit_time);
300                 mod_timer(&pcpu->cpu_timer,
301                           jiffies + usecs_to_jiffies(timer_rate));
302         }
303
304 exit:
305         return;
306 }
307
308 static void cpufreq_interactive_idle_start(void)
309 {
310         struct cpufreq_interactive_cpuinfo *pcpu =
311                 &per_cpu(cpuinfo, smp_processor_id());
312         int pending;
313
314         if (!pcpu->governor_enabled)
315                 return;
316
317         pcpu->idling = 1;
318         smp_wmb();
319         pending = timer_pending(&pcpu->cpu_timer);
320
321         if (pcpu->target_freq != pcpu->policy->min) {
322 #ifdef CONFIG_SMP
323                 /*
324                  * Entering idle while not at lowest speed.  On some
325                  * platforms this can hold the other CPU(s) at that speed
326                  * even though the CPU is idle. Set a timer to re-evaluate
327                  * speed so this idle CPU doesn't hold the other CPUs above
328                  * min indefinitely.  This should probably be a quirk of
329                  * the CPUFreq driver.
330                  */
331                 if (!pending) {
332                         pcpu->time_in_idle = get_cpu_idle_time_us(
333                                 smp_processor_id(), &pcpu->idle_exit_time);
334                         pcpu->timer_idlecancel = 0;
335                         mod_timer(&pcpu->cpu_timer,
336                                   jiffies + usecs_to_jiffies(timer_rate));
337                 }
338 #endif
339         } else {
340                 /*
341                  * If at min speed and entering idle after load has
342                  * already been evaluated, and a timer has been set just in
343                  * case the CPU suddenly goes busy, cancel that timer.  The
344                  * CPU didn't go busy; we'll recheck things upon idle exit.
345                  */
346                 if (pending && pcpu->timer_idlecancel) {
347                         del_timer(&pcpu->cpu_timer);
348                         /*
349                          * Ensure last timer run time is after current idle
350                          * sample start time, so next idle exit will always
351                          * start a new idle sampling period.
352                          */
353                         pcpu->idle_exit_time = 0;
354                         pcpu->timer_idlecancel = 0;
355                 }
356         }
357
358 }
359
360 static void cpufreq_interactive_idle_end(void)
361 {
362         struct cpufreq_interactive_cpuinfo *pcpu =
363                 &per_cpu(cpuinfo, smp_processor_id());
364
365         pcpu->idling = 0;
366         smp_wmb();
367
368         /*
369          * Arm the timer for 1-2 ticks later if not already, and if the timer
370          * function has already processed the previous load sampling
371          * interval.  (If the timer is not pending but has not processed
372          * the previous interval, it is probably racing with us on another
373          * CPU.  Let it compute load based on the previous sample and then
374          * re-arm the timer for another interval when it's done, rather
375          * than updating the interval start time to be "now", which doesn't
376          * give the timer function enough time to make a decision on this
377          * run.)
378          */
379         if (timer_pending(&pcpu->cpu_timer) == 0 &&
380             pcpu->timer_run_time >= pcpu->idle_exit_time &&
381             pcpu->governor_enabled) {
382                 pcpu->time_in_idle =
383                         get_cpu_idle_time_us(smp_processor_id(),
384                                              &pcpu->idle_exit_time);
385                 pcpu->timer_idlecancel = 0;
386                 mod_timer(&pcpu->cpu_timer,
387                           jiffies + usecs_to_jiffies(timer_rate));
388         }
389
390 }
391
392 static int cpufreq_interactive_up_task(void *data)
393 {
394         unsigned int cpu;
395         cpumask_t tmp_mask;
396         unsigned long flags;
397         struct cpufreq_interactive_cpuinfo *pcpu;
398
399         while (1) {
400                 set_current_state(TASK_INTERRUPTIBLE);
401                 spin_lock_irqsave(&up_cpumask_lock, flags);
402
403                 if (cpumask_empty(&up_cpumask)) {
404                         spin_unlock_irqrestore(&up_cpumask_lock, flags);
405                         schedule();
406
407                         if (kthread_should_stop())
408                                 break;
409
410                         spin_lock_irqsave(&up_cpumask_lock, flags);
411                 }
412
413                 set_current_state(TASK_RUNNING);
414                 tmp_mask = up_cpumask;
415                 cpumask_clear(&up_cpumask);
416                 spin_unlock_irqrestore(&up_cpumask_lock, flags);
417
418                 for_each_cpu(cpu, &tmp_mask) {
419                         unsigned int j;
420                         unsigned int max_freq = 0;
421
422                         pcpu = &per_cpu(cpuinfo, cpu);
423                         smp_rmb();
424
425                         if (!pcpu->governor_enabled)
426                                 continue;
427
428                         mutex_lock(&set_speed_lock);
429
430                         for_each_cpu(j, pcpu->policy->cpus) {
431                                 struct cpufreq_interactive_cpuinfo *pjcpu =
432                                         &per_cpu(cpuinfo, j);
433
434                                 if (pjcpu->target_freq > max_freq)
435                                         max_freq = pjcpu->target_freq;
436                         }
437
438                         if (max_freq != pcpu->policy->cur)
439                                 __cpufreq_driver_target(pcpu->policy,
440                                                         max_freq,
441                                                         CPUFREQ_RELATION_H);
442                         mutex_unlock(&set_speed_lock);
443                         trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
444                                                      pcpu->policy->cur);
445                 }
446         }
447
448         return 0;
449 }
450
451 static void cpufreq_interactive_freq_down(struct work_struct *work)
452 {
453         unsigned int cpu;
454         cpumask_t tmp_mask;
455         unsigned long flags;
456         struct cpufreq_interactive_cpuinfo *pcpu;
457
458         spin_lock_irqsave(&down_cpumask_lock, flags);
459         tmp_mask = down_cpumask;
460         cpumask_clear(&down_cpumask);
461         spin_unlock_irqrestore(&down_cpumask_lock, flags);
462
463         for_each_cpu(cpu, &tmp_mask) {
464                 unsigned int j;
465                 unsigned int max_freq = 0;
466
467                 pcpu = &per_cpu(cpuinfo, cpu);
468                 smp_rmb();
469
470                 if (!pcpu->governor_enabled)
471                         continue;
472
473                 mutex_lock(&set_speed_lock);
474
475                 for_each_cpu(j, pcpu->policy->cpus) {
476                         struct cpufreq_interactive_cpuinfo *pjcpu =
477                                 &per_cpu(cpuinfo, j);
478
479                         if (pjcpu->target_freq > max_freq)
480                                 max_freq = pjcpu->target_freq;
481                 }
482
483                 if (max_freq != pcpu->policy->cur)
484                         __cpufreq_driver_target(pcpu->policy, max_freq,
485                                                 CPUFREQ_RELATION_H);
486
487                 mutex_unlock(&set_speed_lock);
488                 trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
489                                                pcpu->policy->cur);
490         }
491 }
492
493 static void cpufreq_interactive_boost(void)
494 {
495         int i;
496         int anyboost = 0;
497         unsigned long flags;
498         struct cpufreq_interactive_cpuinfo *pcpu;
499
500         trace_cpufreq_interactive_boost(hispeed_freq);
501         spin_lock_irqsave(&up_cpumask_lock, flags);
502
503         for_each_online_cpu(i) {
504                 pcpu = &per_cpu(cpuinfo, i);
505
506                 if (pcpu->target_freq < hispeed_freq) {
507                         pcpu->target_freq = hispeed_freq;
508                         cpumask_set_cpu(i, &up_cpumask);
509                         pcpu->target_set_time_in_idle =
510                                 get_cpu_idle_time_us(i, &pcpu->target_set_time);
511                         anyboost = 1;
512                 }
513
514                 /*
515                  * Set floor freq and (re)start timer for when last
516                  * validated.
517                  */
518
519                 pcpu->floor_freq = hispeed_freq;
520                 pcpu->floor_validate_time = ktime_to_us(ktime_get());
521         }
522
523         spin_unlock_irqrestore(&up_cpumask_lock, flags);
524
525         if (anyboost)
526                 wake_up_process(up_task);
527 }
528
529 /*
530  * Pulsed boost on input event raises CPUs to hispeed_freq and lets
531  * usual algorithm of min_sample_time  decide when to allow speed
532  * to drop.
533  */
534
535 static void cpufreq_interactive_input_event(struct input_handle *handle,
536                                             unsigned int type,
537                                             unsigned int code, int value)
538 {
539         if (input_boost_val && type == EV_SYN && code == SYN_REPORT)
540                 cpufreq_interactive_boost();
541 }
542
543 static void cpufreq_interactive_input_open(struct work_struct *w)
544 {
545         struct cpufreq_interactive_inputopen *io =
546                 container_of(w, struct cpufreq_interactive_inputopen,
547                              inputopen_work);
548         int error;
549
550         error = input_open_device(io->handle);
551         if (error)
552                 input_unregister_handle(io->handle);
553 }
554
555 static int cpufreq_interactive_input_connect(struct input_handler *handler,
556                                              struct input_dev *dev,
557                                              const struct input_device_id *id)
558 {
559         struct input_handle *handle;
560         int error;
561
562         pr_info("%s: connect to %s\n", __func__, dev->name);
563         handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
564         if (!handle)
565                 return -ENOMEM;
566
567         handle->dev = dev;
568         handle->handler = handler;
569         handle->name = "cpufreq_interactive";
570
571         error = input_register_handle(handle);
572         if (error)
573                 goto err;
574
575         inputopen.handle = handle;
576         queue_work(down_wq, &inputopen.inputopen_work);
577         return 0;
578 err:
579         kfree(handle);
580         return error;
581 }
582
583 static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
584 {
585         input_close_device(handle);
586         input_unregister_handle(handle);
587         kfree(handle);
588 }
589
590 static const struct input_device_id cpufreq_interactive_ids[] = {
591         {
592                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
593                          INPUT_DEVICE_ID_MATCH_ABSBIT,
594                 .evbit = { BIT_MASK(EV_ABS) },
595                 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
596                             BIT_MASK(ABS_MT_POSITION_X) |
597                             BIT_MASK(ABS_MT_POSITION_Y) },
598         }, /* multi-touch touchscreen */
599         {
600                 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
601                          INPUT_DEVICE_ID_MATCH_ABSBIT,
602                 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
603                 .absbit = { [BIT_WORD(ABS_X)] =
604                             BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
605         }, /* touchpad */
606         { },
607 };
608
609 static struct input_handler cpufreq_interactive_input_handler = {
610         .event          = cpufreq_interactive_input_event,
611         .connect        = cpufreq_interactive_input_connect,
612         .disconnect     = cpufreq_interactive_input_disconnect,
613         .name           = "cpufreq_interactive",
614         .id_table       = cpufreq_interactive_ids,
615 };
616
617 static ssize_t show_hispeed_freq(struct kobject *kobj,
618                                  struct attribute *attr, char *buf)
619 {
620         return sprintf(buf, "%llu\n", hispeed_freq);
621 }
622
623 static ssize_t store_hispeed_freq(struct kobject *kobj,
624                                   struct attribute *attr, const char *buf,
625                                   size_t count)
626 {
627         int ret;
628         u64 val;
629
630         ret = strict_strtoull(buf, 0, &val);
631         if (ret < 0)
632                 return ret;
633         hispeed_freq = val;
634         return count;
635 }
636
637 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
638                 show_hispeed_freq, store_hispeed_freq);
639
640
641 static ssize_t show_go_hispeed_load(struct kobject *kobj,
642                                      struct attribute *attr, char *buf)
643 {
644         return sprintf(buf, "%lu\n", go_hispeed_load);
645 }
646
647 static ssize_t store_go_hispeed_load(struct kobject *kobj,
648                         struct attribute *attr, const char *buf, size_t count)
649 {
650         int ret;
651         unsigned long val;
652
653         ret = strict_strtoul(buf, 0, &val);
654         if (ret < 0)
655                 return ret;
656         go_hispeed_load = val;
657         return count;
658 }
659
660 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
661                 show_go_hispeed_load, store_go_hispeed_load);
662
663 static ssize_t show_min_sample_time(struct kobject *kobj,
664                                 struct attribute *attr, char *buf)
665 {
666         return sprintf(buf, "%lu\n", min_sample_time);
667 }
668
669 static ssize_t store_min_sample_time(struct kobject *kobj,
670                         struct attribute *attr, const char *buf, size_t count)
671 {
672         int ret;
673         unsigned long val;
674
675         ret = strict_strtoul(buf, 0, &val);
676         if (ret < 0)
677                 return ret;
678         min_sample_time = val;
679         return count;
680 }
681
682 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
683                 show_min_sample_time, store_min_sample_time);
684
685 static ssize_t show_above_hispeed_delay(struct kobject *kobj,
686                                         struct attribute *attr, char *buf)
687 {
688         return sprintf(buf, "%lu\n", above_hispeed_delay_val);
689 }
690
691 static ssize_t store_above_hispeed_delay(struct kobject *kobj,
692                                          struct attribute *attr,
693                                          const char *buf, size_t count)
694 {
695         int ret;
696         unsigned long val;
697
698         ret = strict_strtoul(buf, 0, &val);
699         if (ret < 0)
700                 return ret;
701         above_hispeed_delay_val = val;
702         return count;
703 }
704
705 define_one_global_rw(above_hispeed_delay);
706
707 static ssize_t show_timer_rate(struct kobject *kobj,
708                         struct attribute *attr, char *buf)
709 {
710         return sprintf(buf, "%lu\n", timer_rate);
711 }
712
713 static ssize_t store_timer_rate(struct kobject *kobj,
714                         struct attribute *attr, const char *buf, size_t count)
715 {
716         int ret;
717         unsigned long val;
718
719         ret = strict_strtoul(buf, 0, &val);
720         if (ret < 0)
721                 return ret;
722         timer_rate = val;
723         return count;
724 }
725
726 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
727                 show_timer_rate, store_timer_rate);
728
729 static ssize_t show_input_boost(struct kobject *kobj, struct attribute *attr,
730                                 char *buf)
731 {
732         return sprintf(buf, "%u\n", input_boost_val);
733 }
734
735 static ssize_t store_input_boost(struct kobject *kobj, struct attribute *attr,
736                                  const char *buf, size_t count)
737 {
738         int ret;
739         unsigned long val;
740
741         ret = strict_strtoul(buf, 0, &val);
742         if (ret < 0)
743                 return ret;
744         input_boost_val = val;
745         return count;
746 }
747
748 define_one_global_rw(input_boost);
749
750 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
751                           char *buf)
752 {
753         return sprintf(buf, "%d\n", boost_val);
754 }
755
756 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
757                            const char *buf, size_t count)
758 {
759         int ret;
760         unsigned long val;
761
762         ret = kstrtoul(buf, 0, &val);
763         if (ret < 0)
764                 return ret;
765
766         boost_val = val;
767
768         if (boost_val)
769                 cpufreq_interactive_boost();
770         else
771                 trace_cpufreq_interactive_unboost(hispeed_freq);
772
773         return count;
774 }
775
776 define_one_global_rw(boost);
777
778 static struct attribute *interactive_attributes[] = {
779         &hispeed_freq_attr.attr,
780         &go_hispeed_load_attr.attr,
781         &above_hispeed_delay.attr,
782         &min_sample_time_attr.attr,
783         &timer_rate_attr.attr,
784         &input_boost.attr,
785         &boost.attr,
786         NULL,
787 };
788
789 static struct attribute_group interactive_attr_group = {
790         .attrs = interactive_attributes,
791         .name = "interactive",
792 };
793
794 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
795                 unsigned int event)
796 {
797         int rc;
798         unsigned int j;
799         struct cpufreq_interactive_cpuinfo *pcpu;
800         struct cpufreq_frequency_table *freq_table;
801
802         switch (event) {
803         case CPUFREQ_GOV_START:
804                 if (!cpu_online(policy->cpu))
805                         return -EINVAL;
806
807                 freq_table =
808                         cpufreq_frequency_get_table(policy->cpu);
809
810                 for_each_cpu(j, policy->cpus) {
811                         pcpu = &per_cpu(cpuinfo, j);
812                         pcpu->policy = policy;
813                         pcpu->target_freq = policy->cur;
814                         pcpu->freq_table = freq_table;
815                         pcpu->target_set_time_in_idle =
816                                 get_cpu_idle_time_us(j,
817                                              &pcpu->target_set_time);
818                         pcpu->floor_freq = pcpu->target_freq;
819                         pcpu->floor_validate_time =
820                                 pcpu->target_set_time;
821                         pcpu->governor_enabled = 1;
822                         smp_wmb();
823                 }
824
825                 if (!hispeed_freq)
826                         hispeed_freq = policy->max;
827
828                 /*
829                  * Do not register the idle hook and create sysfs
830                  * entries if we have already done so.
831                  */
832                 if (atomic_inc_return(&active_count) > 1)
833                         return 0;
834
835                 rc = sysfs_create_group(cpufreq_global_kobject,
836                                 &interactive_attr_group);
837                 if (rc)
838                         return rc;
839
840                 rc = input_register_handler(&cpufreq_interactive_input_handler);
841                 if (rc)
842                         pr_warn("%s: failed to register input handler\n",
843                                 __func__);
844
845                 break;
846
847         case CPUFREQ_GOV_STOP:
848                 for_each_cpu(j, policy->cpus) {
849                         pcpu = &per_cpu(cpuinfo, j);
850                         pcpu->governor_enabled = 0;
851                         smp_wmb();
852                         del_timer_sync(&pcpu->cpu_timer);
853
854                         /*
855                          * Reset idle exit time since we may cancel the timer
856                          * before it can run after the last idle exit time,
857                          * to avoid tripping the check in idle exit for a timer
858                          * that is trying to run.
859                          */
860                         pcpu->idle_exit_time = 0;
861                 }
862
863                 flush_work(&freq_scale_down_work);
864                 if (atomic_dec_return(&active_count) > 0)
865                         return 0;
866
867                 input_unregister_handler(&cpufreq_interactive_input_handler);
868                 sysfs_remove_group(cpufreq_global_kobject,
869                                 &interactive_attr_group);
870
871                 break;
872
873         case CPUFREQ_GOV_LIMITS:
874                 if (policy->max < policy->cur)
875                         __cpufreq_driver_target(policy,
876                                         policy->max, CPUFREQ_RELATION_H);
877                 else if (policy->min > policy->cur)
878                         __cpufreq_driver_target(policy,
879                                         policy->min, CPUFREQ_RELATION_L);
880                 break;
881         }
882         return 0;
883 }
884
885 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
886                                              unsigned long val,
887                                              void *data)
888 {
889         switch (val) {
890         case IDLE_START:
891                 cpufreq_interactive_idle_start();
892                 break;
893         case IDLE_END:
894                 cpufreq_interactive_idle_end();
895                 break;
896         }
897
898         return 0;
899 }
900
901 static struct notifier_block cpufreq_interactive_idle_nb = {
902         .notifier_call = cpufreq_interactive_idle_notifier,
903 };
904
905 static int __init cpufreq_interactive_init(void)
906 {
907         unsigned int i;
908         struct cpufreq_interactive_cpuinfo *pcpu;
909         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
910
911         go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
912         min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
913         above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
914         timer_rate = DEFAULT_TIMER_RATE;
915
916         /* Initalize per-cpu timers */
917         for_each_possible_cpu(i) {
918                 pcpu = &per_cpu(cpuinfo, i);
919                 init_timer(&pcpu->cpu_timer);
920                 pcpu->cpu_timer.function = cpufreq_interactive_timer;
921                 pcpu->cpu_timer.data = i;
922         }
923
924         up_task = kthread_create(cpufreq_interactive_up_task, NULL,
925                                  "kinteractiveup");
926         if (IS_ERR(up_task))
927                 return PTR_ERR(up_task);
928
929         sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
930         get_task_struct(up_task);
931
932         /* No rescuer thread, bind to CPU queuing the work for possibly
933            warm cache (probably doesn't matter much). */
934         down_wq = alloc_workqueue("knteractive_down", 0, 1);
935
936         if (!down_wq)
937                 goto err_freeuptask;
938
939         INIT_WORK(&freq_scale_down_work,
940                   cpufreq_interactive_freq_down);
941
942         spin_lock_init(&up_cpumask_lock);
943         spin_lock_init(&down_cpumask_lock);
944         mutex_init(&set_speed_lock);
945
946         idle_notifier_register(&cpufreq_interactive_idle_nb);
947         INIT_WORK(&inputopen.inputopen_work, cpufreq_interactive_input_open);
948         return cpufreq_register_governor(&cpufreq_gov_interactive);
949
950 err_freeuptask:
951         put_task_struct(up_task);
952         return -ENOMEM;
953 }
954
955 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
956 fs_initcall(cpufreq_interactive_init);
957 #else
958 module_init(cpufreq_interactive_init);
959 #endif
960
961 static void __exit cpufreq_interactive_exit(void)
962 {
963         cpufreq_unregister_governor(&cpufreq_gov_interactive);
964         kthread_stop(up_task);
965         put_task_struct(up_task);
966         destroy_workqueue(down_wq);
967 }
968
969 module_exit(cpufreq_interactive_exit);
970
971 MODULE_AUTHOR("Mike Chan <mike@android.com>");
972 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
973         "Latency sensitive workloads");
974 MODULE_LICENSE("GPL");