blob: c88eb90007f5c815a607833d01df0cc62d225b4b [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/rwsem.h>
25#include <linux/sched.h>
26#include <linux/tick.h>
27#include <linux/time.h>
28#include <linux/timer.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
31#include <linux/slab.h>
32#include <asm/cputime.h>
33
34#include <linux/cdev.h>
35#include <linux/fs.h>
36#include <linux/serial_reg.h>
37#include <linux/types.h>
38#include <linux/device.h>
39#include <linux/miscdevice.h>
40
41#define CREATE_TRACE_POINTS
42#include <trace/events/cpufreq_interactive.h>
43
44static int active_count;
45
46struct cpufreq_interactive_cpuinfo {
47 struct timer_list cpu_timer;
48 struct timer_list cpu_slack_timer;
49 spinlock_t load_lock; /* protects the next 4 fields */
50 u64 time_in_idle;
51 u64 time_in_idle_timestamp;
52 u64 cputime_speedadj;
53 u64 cputime_speedadj_timestamp;
54 struct cpufreq_policy *policy;
55 struct cpufreq_frequency_table *freq_table;
56 unsigned int target_freq;
57 unsigned int floor_freq;
58 u64 floor_validate_time;
59 u64 hispeed_validate_time;
60 struct rw_semaphore enable_sem;
61 int governor_enabled;
62};
63
64static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
65
66/* realtime thread handles frequency scaling */
67static struct task_struct *speedchange_task;
68static cpumask_t speedchange_cpumask;
69static spinlock_t speedchange_cpumask_lock;
70static struct mutex gov_lock;
71
72/* Hi speed to bump to from lo speed when load burst (default max) */
73static unsigned int hispeed_freq;
74
75/* Go to hi speed when CPU load at or above this value. */
76#define DEFAULT_GO_HISPEED_LOAD 99
77static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
78
79/* Target load. Lower values result in higher CPU speeds. */
80#define DEFAULT_TARGET_LOAD 90
81static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
82static spinlock_t target_loads_lock;
83static unsigned int *target_loads = default_target_loads;
84static int ntarget_loads = ARRAY_SIZE(default_target_loads);
85
86/*
87 * The minimum amount of time to spend at a frequency before we can ramp down.
88 */
89#define DEFAULT_MIN_SAMPLE_TIME (400 * USEC_PER_MSEC)
90static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
91
92/*
93 * The sample rate of the timer used to increase frequency
94 */
95#define DEFAULT_TIMER_RATE (100 * USEC_PER_MSEC)
96static unsigned long timer_rate = DEFAULT_TIMER_RATE;
97
98/*
99 * Wait this long before raising speed above hispeed, by default a single
100 * timer interval.
101 */
102#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
103static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
104
105/* Non-zero means indefinite speed boost active */
106static int boost_val;
107/* Duration of a boot pulse in usecs */
108static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
109/* End time of boost pulse in ktime converted to usecs */
110static u64 boostpulse_endtime;
111
112/*
113 * Max additional time to wait in idle, beyond timer_rate, at speeds above
114 * minimum before wakeup to reduce speed, or -1 if unnecessary.
115 */
116#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
117static int timer_slack_val = DEFAULT_TIMER_SLACK;
118
119static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
120 unsigned int event);
121
122#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
123static
124#endif
125struct cpufreq_governor cpufreq_gov_interactive = {
126 .name = "interactive",
127 .governor = cpufreq_governor_interactive,
128 .max_transition_latency = 10000000,
129 .owner = THIS_MODULE,
130};
131
132/*********************************************************************
133 * CPUFREQ MIN bengin *
134 *********************************************************************/
135
136typedef struct
137{
138 struct list_head node;
139 char *appname;
140 int cpufreq;
141}psm_cpufreq_node;
142
143psm_cpufreq_node psm_cpufreq_list;
144static int cpufreq_min_open(struct inode *ip, struct file *fp);
145static int cpufreq_min_release(struct inode *ip, struct file *fp);
146static ssize_t cpufreq_min_read(struct file *fp, char __user *buf,size_t count, loff_t *pos);
147static ssize_t cpufreq_min_write(struct file *fp, const char __user *buf,size_t count, loff_t *pos);
148static long cpufreq_mint_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
149
150static const struct file_operations cpufreq_min_fops = {
151 .read = cpufreq_min_read,
152 .write = cpufreq_min_write,
153 .open = cpufreq_min_open,
154 .unlocked_ioctl = cpufreq_mint_ioctl,
155 .release = cpufreq_min_release,
156};
157
158static struct miscdevice cpufreq_min_device = {
159 .minor = MISC_DYNAMIC_MINOR,
160 .name = "cpufreqmin",
161 .fops = &cpufreq_min_fops,
162};
163int cpufreq_min_apply(const char *appname, int cpufreq)
164{
165 unsigned long flags;
166 psm_cpufreq_node *cur_curfreq;
167 psm_cpufreq_node *cpufreq_node;
168 struct list_head *head = &psm_cpufreq_list.node;
169
170 cpufreq_node = kmalloc(sizeof(psm_cpufreq_node), GFP_KERNEL);
171 if(cpufreq_node == NULL)
172 return 0;
173
174 cpufreq_node->appname = appname;
175 cpufreq_node->cpufreq = cpufreq;
176
177 local_irq_save(flags);
178 if(list_empty(head))
179 {
180 list_add(&cpufreq_node->node, head);
181 local_irq_restore(flags);
182 return 0;
183 }
184
185 list_for_each_entry(cur_curfreq, head, node)
186 {
187 if(cpufreq_node->cpufreq <= cur_curfreq->cpufreq)
188 {
189 list_add_tail(&cpufreq_node->node, &cur_curfreq->node);
190 local_irq_restore(flags);
191 return 0;
192 }
193 }
194
195 list_add_tail(&cpufreq_node->node, head);
196 local_irq_restore(flags);
197 return 0;
198}
199
200int cpureq_min_cancel(const char *appname)
201{
202 unsigned long flags;
203 psm_cpufreq_node *cur_curfreq;
204 struct list_head *head = &psm_cpufreq_list.node;
205
206 local_irq_save(flags);
207 if(list_empty(head))
208 {
209 local_irq_restore(flags);
210 return -1;
211 }
212
213 list_for_each_entry(cur_curfreq, head, node)
214 {
215 if(0 == strcmp(cur_curfreq->appname, appname))
216 {
217 list_del(&cur_curfreq->node);
218 local_irq_restore(flags);
219 kfree(cur_curfreq);
220 return 0;
221 }
222 }
223 local_irq_restore(flags);
224
225 return -1;
226}
227
228int cpufreq_min_get(void)
229{
230 unsigned long flags;
231 int cpufreq = 0;
232 psm_cpufreq_node *curfreq_node ;
233 struct list_head *head = &psm_cpufreq_list.node;
234
235 local_irq_save(flags);
236 if(list_empty(head))
237 {
238 local_irq_restore(flags);
239 return 0;
240 }
241
242 curfreq_node = (psm_cpufreq_node *)head->prev;
243 cpufreq = curfreq_node->cpufreq;
244 local_irq_restore(flags);
245
246 return cpufreq ;
247}
248
249static ssize_t cpufreq_min_read(struct file *fp, char __user *buf,size_t count, loff_t *pos)
250{
251 return 0;
252}
253
254static ssize_t cpufreq_min_write(struct file *fp, const char __user *buf,size_t count, loff_t *pos)
255{
256 return 0;
257}
258
259static int cpufreq_min_open(struct inode *ip, struct file *fp)
260{
261 return 0;
262}
263
264static int cpufreq_min_release(struct inode *ip, struct file *fp)
265{
266 return 0;
267}
268
269static long cpufreq_mint_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
270{
271 int ret = 0;
272 cpufreq_min_info *min_info;
273
274 switch(cmd)
275 {
276 case CPUFREQ_APPLY:
277 min_info = (cpufreq_min_info *)arg;
278 cpufreq_min_apply(min_info->name, min_info->cpufreq);
279 break;
280
281 case CPUFREQ_CANCLE:
282 min_info = (cpufreq_min_info *)arg;
283 cpureq_min_cancel(min_info->name);
284 break;
285
286 default:
287 ret = -1;
288 break;
289 }
290
291 return ret;
292}
293
294void cpufreq_min_init(void)
295{
296 int ret;
297
298 ret = misc_register(&cpufreq_min_device);
299 if (ret)
300 {
301 printk(KERN_ERR "cpufreq min init failed\n");
302 }
303
304 INIT_LIST_HEAD(&psm_cpufreq_list.node);
305}
306
307static void cpufreq_min_exit(void)
308{
309 misc_deregister(&cpufreq_min_device);
310}
311
312/*********************************************************************
313 * CPUFREQ MIN end *
314 *********************************************************************/
315
316static void cpufreq_interactive_timer_resched(
317 struct cpufreq_interactive_cpuinfo *pcpu)
318{
319 unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
320 unsigned long flags;
321
322 mod_timer_pinned(&pcpu->cpu_timer, expires);
323 if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
324 expires += usecs_to_jiffies(timer_slack_val);
325 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
326 }
327
328 spin_lock_irqsave(&pcpu->load_lock, flags);
329 pcpu->time_in_idle =
330 get_cpu_idle_time_us(smp_processor_id(),
331 &pcpu->time_in_idle_timestamp);
332 pcpu->cputime_speedadj = 0;
333 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
334 spin_unlock_irqrestore(&pcpu->load_lock, flags);
335}
336
337static unsigned int freq_to_targetload(unsigned int freq)
338{
339 int i;
340 unsigned int ret;
341 unsigned long flags;
342
343 spin_lock_irqsave(&target_loads_lock, flags);
344
345 for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
346 ;
347
348 ret = target_loads[i];
349 spin_unlock_irqrestore(&target_loads_lock, flags);
350 return ret;
351}
352
353/*
354 * If increasing frequencies never map to a lower target load then
355 * choose_freq() will find the minimum frequency that does not exceed its
356 * target load given the current load.
357 */
358
359static unsigned int choose_freq(
360 struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
361{
362 unsigned int freq = pcpu->policy->cur;
363 unsigned int prevfreq, freqmin, freqmax;
364 unsigned int tl;
365 int index;
366
367 freqmin = 0;
368 freqmax = UINT_MAX;
369
370 do {
371 prevfreq = freq;
372 tl = freq_to_targetload(freq);
373
374 /*
375 * Find the lowest frequency where the computed load is less
376 * than or equal to the target load.
377 */
378
379 cpufreq_frequency_table_target(
380 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
381 CPUFREQ_RELATION_L, &index);
382 freq = pcpu->freq_table[index].frequency;
383
384 if (freq > prevfreq) {
385 /* The previous frequency is too low. */
386 freqmin = prevfreq;
387
388 if (freq >= freqmax) {
389 /*
390 * Find the highest frequency that is less
391 * than freqmax.
392 */
393 cpufreq_frequency_table_target(
394 pcpu->policy, pcpu->freq_table,
395 freqmax - 1, CPUFREQ_RELATION_H,
396 &index);
397 freq = pcpu->freq_table[index].frequency;
398
399 if (freq == freqmin) {
400 /*
401 * The first frequency below freqmax
402 * has already been found to be too
403 * low. freqmax is the lowest speed
404 * we found that is fast enough.
405 */
406 freq = freqmax;
407 break;
408 }
409 }
410 } else if (freq < prevfreq) {
411 /* The previous frequency is high enough. */
412 freqmax = prevfreq;
413
414 if (freq <= freqmin) {
415 /*
416 * Find the lowest frequency that is higher
417 * than freqmin.
418 */
419 cpufreq_frequency_table_target(
420 pcpu->policy, pcpu->freq_table,
421 freqmin + 1, CPUFREQ_RELATION_L,
422 &index);
423 freq = pcpu->freq_table[index].frequency;
424
425 /*
426 * If freqmax is the first frequency above
427 * freqmin then we have already found that
428 * this speed is fast enough.
429 */
430 if (freq == freqmax)
431 break;
432 }
433 }
434
435 /* If same frequency chosen as previous then done. */
436 } while (freq != prevfreq);
437
438 return freq;
439}
440
441static u64 update_load(int cpu)
442{
443 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
444 u64 now =0;
445 u64 now_idle;
446 unsigned int delta_idle;
447 unsigned int delta_time;
448 u64 active_time;
449
450 now_idle = get_cpu_idle_time_us(cpu, &now);
451 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
452 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
453 active_time = delta_time - delta_idle;
454 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
455
456 pcpu->time_in_idle = now_idle;
457 pcpu->time_in_idle_timestamp = now;
458 return now;
459}
460
461static void cpufreq_interactive_timer(unsigned long data)
462{
463 u64 now;
464 unsigned int delta_time;
465 u64 cputime_speedadj;
466 int cpu_load;
467 struct cpufreq_interactive_cpuinfo *pcpu =
468 &per_cpu(cpuinfo, data);
469 unsigned int new_freq;
470 unsigned int loadadjfreq;
471 unsigned int index;
472 unsigned long flags;
473 bool boosted;
474
475 if (!down_read_trylock(&pcpu->enable_sem))
476 return;
477 if (!pcpu->governor_enabled)
478 goto exit;
479
480 spin_lock_irqsave(&pcpu->load_lock, flags);
481 now = update_load(data);
482 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
483 cputime_speedadj = pcpu->cputime_speedadj;
484 spin_unlock_irqrestore(&pcpu->load_lock, flags);
485
486 if (WARN_ON_ONCE(!delta_time))
487 goto rearm;
488
489 do_div(cputime_speedadj, delta_time);
490 loadadjfreq = (unsigned int)cputime_speedadj * 100;
491 cpu_load = loadadjfreq / pcpu->target_freq;
492 boosted = boost_val || now < boostpulse_endtime;
493
494 if (cpu_load >= go_hispeed_load || boosted) {
495 if (pcpu->target_freq < hispeed_freq) {
496 new_freq = hispeed_freq;
497 } else {
498 new_freq = choose_freq(pcpu, loadadjfreq);
499
500 if (new_freq < hispeed_freq)
501 new_freq = hispeed_freq;
502 }
503 } else {
504 new_freq = choose_freq(pcpu, loadadjfreq);
505 }
506 new_freq = max(cpufreq_min_get(), new_freq);
507 if (pcpu->target_freq >= hispeed_freq &&
508 new_freq > pcpu->target_freq &&
509 now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
510 goto rearm;
511 }
512
513 pcpu->hispeed_validate_time = now;
514
515 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
516 new_freq, CPUFREQ_RELATION_L,
517 &index)) {
518 pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
519 (int) data);
520 goto rearm;
521 }
522
523 new_freq = pcpu->freq_table[index].frequency;
524
525 /*
526 * Do not scale below floor_freq unless we have been at or above the
527 * floor frequency for the minimum sample time since last validated.
528 */
529 if (new_freq < pcpu->floor_freq) {
530 if (now - pcpu->floor_validate_time < min_sample_time) {
531 goto rearm;
532 }
533 }
534
535 /*
536 * Update the timestamp for checking whether speed has been held at
537 * or above the selected frequency for a minimum of min_sample_time,
538 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
539 * allow the speed to drop as soon as the boostpulse duration expires
540 * (or the indefinite boost is turned off).
541 */
542
543 if (!boosted || new_freq > hispeed_freq) {
544 pcpu->floor_freq = new_freq;
545 pcpu->floor_validate_time = now;
546 }
547
548 if (pcpu->target_freq == new_freq && pcpu->target_freq == pcpu->policy->cur) {
549 goto rearm_if_notmax;
550 }
551
552
553 pcpu->target_freq = new_freq;
554 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
555 cpumask_set_cpu(data, &speedchange_cpumask);
556 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
557 wake_up_process(speedchange_task);
558
559rearm_if_notmax:
560 /*
561 * Already set max speed and don't see a need to change that,
562 * wait until next idle to re-evaluate, don't need timer.
563 */
564 if (pcpu->target_freq == pcpu->policy->max)
565 goto exit;
566
567rearm:
568 if (!timer_pending(&pcpu->cpu_timer))
569 cpufreq_interactive_timer_resched(pcpu);
570
571exit:
572 up_read(&pcpu->enable_sem);
573 return;
574}
575
576static void cpufreq_interactive_idle_start(void)
577{
578 struct cpufreq_interactive_cpuinfo *pcpu =
579 &per_cpu(cpuinfo, smp_processor_id());
580 int pending;
581
582 if (!down_read_trylock(&pcpu->enable_sem))
583 return;
584 if (!pcpu->governor_enabled) {
585 up_read(&pcpu->enable_sem);
586 return;
587 }
588
589 pending = timer_pending(&pcpu->cpu_timer);
590
591 if (pcpu->target_freq != pcpu->policy->min) {
592 /*
593 * Entering idle while not at lowest speed. On some
594 * platforms this can hold the other CPU(s) at that speed
595 * even though the CPU is idle. Set a timer to re-evaluate
596 * speed so this idle CPU doesn't hold the other CPUs above
597 * min indefinitely. This should probably be a quirk of
598 * the CPUFreq driver.
599 */
600 if (!pending)
601 cpufreq_interactive_timer_resched(pcpu);
602 }
603
604 up_read(&pcpu->enable_sem);
605}
606
607static void cpufreq_interactive_idle_end(void)
608{
609 struct cpufreq_interactive_cpuinfo *pcpu =
610 &per_cpu(cpuinfo, smp_processor_id());
611
612 if (!down_read_trylock(&pcpu->enable_sem))
613 return;
614 if (!pcpu->governor_enabled) {
615 up_read(&pcpu->enable_sem);
616 return;
617 }
618
619 /* Arm the timer for 1-2 ticks later if not already. */
620 if (!timer_pending(&pcpu->cpu_timer)) {
621 cpufreq_interactive_timer_resched(pcpu);
622 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
623 del_timer(&pcpu->cpu_timer);
624 del_timer(&pcpu->cpu_slack_timer);
625 cpufreq_interactive_timer(smp_processor_id());
626 }
627
628 up_read(&pcpu->enable_sem);
629}
630
631static int cpufreq_interactive_speedchange_task(void *data)
632{
633 unsigned int cpu;
634 cpumask_t tmp_mask;
635 unsigned long flags;
636 struct cpufreq_interactive_cpuinfo *pcpu;
637
638 while (1) {
639 set_current_state(TASK_INTERRUPTIBLE);
640 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
641
642 if (cpumask_empty(&speedchange_cpumask)) {
643 spin_unlock_irqrestore(&speedchange_cpumask_lock,
644 flags);
645 schedule();
646
647 if (kthread_should_stop())
648 break;
649
650 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
651 }
652
653 set_current_state(TASK_RUNNING);
654 tmp_mask = speedchange_cpumask;
655 cpumask_clear(&speedchange_cpumask);
656 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
657
658 for_each_cpu(cpu, &tmp_mask) {
659 unsigned int j;
660 unsigned int max_freq = 0;
661
662 pcpu = &per_cpu(cpuinfo, cpu);
663 if (!down_read_trylock(&pcpu->enable_sem))
664 continue;
665 if (!pcpu->governor_enabled) {
666 up_read(&pcpu->enable_sem);
667 continue;
668 }
669
670 for_each_cpu(j, pcpu->policy->cpus) {
671 struct cpufreq_interactive_cpuinfo *pjcpu =
672 &per_cpu(cpuinfo, j);
673
674 if (pjcpu->target_freq > max_freq)
675 max_freq = pjcpu->target_freq;
676 }
677
678 if (max_freq != pcpu->policy->cur)
679 __cpufreq_driver_target(pcpu->policy,
680 max_freq,
681 CPUFREQ_RELATION_H);
682
683 up_read(&pcpu->enable_sem);
684 }
685 }
686
687 return 0;
688}
689
690static void cpufreq_interactive_boost(void)
691{
692 int i;
693 int anyboost = 0;
694 unsigned long flags;
695 struct cpufreq_interactive_cpuinfo *pcpu;
696
697 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
698
699 for_each_online_cpu(i) {
700 pcpu = &per_cpu(cpuinfo, i);
701
702 if (pcpu->target_freq < hispeed_freq) {
703 pcpu->target_freq = hispeed_freq;
704 cpumask_set_cpu(i, &speedchange_cpumask);
705 pcpu->hispeed_validate_time =
706 ktime_to_us(ktime_get());
707 anyboost = 1;
708 }
709
710 /*
711 * Set floor freq and (re)start timer for when last
712 * validated.
713 */
714
715 pcpu->floor_freq = hispeed_freq;
716 pcpu->floor_validate_time = ktime_to_us(ktime_get());
717 }
718
719 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
720
721 if (anyboost)
722 wake_up_process(speedchange_task);
723}
724
725static int cpufreq_interactive_notifier(
726 struct notifier_block *nb, unsigned long val, void *data)
727{
728 struct cpufreq_freqs *freq = data;
729 struct cpufreq_interactive_cpuinfo *pcpu;
730 int cpu;
731 unsigned long flags;
732
733 if (val == CPUFREQ_POSTCHANGE) {
734 pcpu = &per_cpu(cpuinfo, freq->cpu);
735 if (!down_read_trylock(&pcpu->enable_sem))
736 return 0;
737 if (!pcpu->governor_enabled) {
738 up_read(&pcpu->enable_sem);
739 return 0;
740 }
741
742 for_each_cpu(cpu, pcpu->policy->cpus) {
743 struct cpufreq_interactive_cpuinfo *pjcpu =
744 &per_cpu(cpuinfo, cpu);
745 spin_lock_irqsave(&pjcpu->load_lock, flags);
746 update_load(cpu);
747 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
748 }
749
750 up_read(&pcpu->enable_sem);
751 }
752 return 0;
753}
754
755static struct notifier_block cpufreq_notifier_block = {
756 .notifier_call = cpufreq_interactive_notifier,
757};
758
759static ssize_t show_target_loads(
760 struct kobject *kobj, struct attribute *attr, char *buf)
761{
762 int i;
763 ssize_t ret = 0;
764 unsigned long flags;
765
766 spin_lock_irqsave(&target_loads_lock, flags);
767
768 for (i = 0; i < ntarget_loads; i++)
769 ret += sprintf(buf + ret, "%u%s", target_loads[i],
770 i & 0x1 ? ":" : " ");
771
772 ret += sprintf(buf + ret, "\n");
773 spin_unlock_irqrestore(&target_loads_lock, flags);
774 return ret;
775}
776
777static ssize_t store_target_loads(
778 struct kobject *kobj, struct attribute *attr, const char *buf,
779 size_t count)
780{
781 int ret;
782 const char *cp;
783 unsigned int *new_target_loads = NULL;
784 int ntokens = 1;
785 int i;
786 unsigned long flags;
787
788 cp = buf;
789 while ((cp = strpbrk(cp + 1, " :")))
790 ntokens++;
791
792 if (!(ntokens & 0x1))
793 goto err_inval;
794
795 new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
796 if (!new_target_loads) {
797 ret = -ENOMEM;
798 goto err;
799 }
800
801 cp = buf;
802 i = 0;
803 while (i < ntokens) {
804 if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
805 goto err_inval;
806
807 cp = strpbrk(cp, " :");
808 if (!cp)
809 break;
810 cp++;
811 }
812
813 if (i != ntokens)
814 goto err_inval;
815
816 spin_lock_irqsave(&target_loads_lock, flags);
817 if (target_loads != default_target_loads)
818 kfree(target_loads);
819 target_loads = new_target_loads;
820 ntarget_loads = ntokens;
821 spin_unlock_irqrestore(&target_loads_lock, flags);
822 return count;
823
824err_inval:
825 ret = -EINVAL;
826err:
827 kfree(new_target_loads);
828 return ret;
829}
830
831static struct global_attr target_loads_attr =
832 __ATTR(target_loads, S_IRUGO | S_IWUSR,
833 show_target_loads, store_target_loads);
834
835static ssize_t show_hispeed_freq(struct kobject *kobj,
836 struct attribute *attr, char *buf)
837{
838 return sprintf(buf, "%u\n", hispeed_freq);
839}
840
841static ssize_t store_hispeed_freq(struct kobject *kobj,
842 struct attribute *attr, const char *buf,
843 size_t count)
844{
845 int ret;
846 long unsigned int val;
847
848 ret = strict_strtoul(buf, 0, &val);
849 if (ret < 0)
850 return ret;
851 hispeed_freq = val;
852 return count;
853}
854
855static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
856 show_hispeed_freq, store_hispeed_freq);
857
858
859static ssize_t show_go_hispeed_load(struct kobject *kobj,
860 struct attribute *attr, char *buf)
861{
862 return sprintf(buf, "%lu\n", go_hispeed_load);
863}
864
865static ssize_t store_go_hispeed_load(struct kobject *kobj,
866 struct attribute *attr, const char *buf, size_t count)
867{
868 int ret;
869 unsigned long val;
870
871 ret = strict_strtoul(buf, 0, &val);
872 if (ret < 0)
873 return ret;
874 go_hispeed_load = val;
875 return count;
876}
877
878static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
879 show_go_hispeed_load, store_go_hispeed_load);
880
881static ssize_t show_min_sample_time(struct kobject *kobj,
882 struct attribute *attr, char *buf)
883{
884 return sprintf(buf, "%lu\n", min_sample_time);
885}
886
887static ssize_t store_min_sample_time(struct kobject *kobj,
888 struct attribute *attr, const char *buf, size_t count)
889{
890 int ret;
891 unsigned long val;
892
893 ret = strict_strtoul(buf, 0, &val);
894 if (ret < 0)
895 return ret;
896 min_sample_time = val;
897 return count;
898}
899
900static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
901 show_min_sample_time, store_min_sample_time);
902
903static ssize_t show_above_hispeed_delay(struct kobject *kobj,
904 struct attribute *attr, char *buf)
905{
906 return sprintf(buf, "%lu\n", above_hispeed_delay_val);
907}
908
909static ssize_t store_above_hispeed_delay(struct kobject *kobj,
910 struct attribute *attr,
911 const char *buf, size_t count)
912{
913 int ret;
914 unsigned long val;
915
916 ret = strict_strtoul(buf, 0, &val);
917 if (ret < 0)
918 return ret;
919 above_hispeed_delay_val = val;
920 return count;
921}
922
923define_one_global_rw(above_hispeed_delay);
924
925static ssize_t show_timer_rate(struct kobject *kobj,
926 struct attribute *attr, char *buf)
927{
928 return sprintf(buf, "%lu\n", timer_rate);
929}
930
931static ssize_t store_timer_rate(struct kobject *kobj,
932 struct attribute *attr, const char *buf, size_t count)
933{
934 int ret;
935 unsigned long val;
936
937 ret = strict_strtoul(buf, 0, &val);
938 if (ret < 0)
939 return ret;
940 timer_rate = val;
941 return count;
942}
943
944static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
945 show_timer_rate, store_timer_rate);
946
947static ssize_t show_timer_slack(
948 struct kobject *kobj, struct attribute *attr, char *buf)
949{
950 return sprintf(buf, "%d\n", timer_slack_val);
951}
952
953static ssize_t store_timer_slack(
954 struct kobject *kobj, struct attribute *attr, const char *buf,
955 size_t count)
956{
957 int ret;
958 unsigned long val;
959
960 ret = kstrtol(buf, 10, &val);
961 if (ret < 0)
962 return ret;
963
964 timer_slack_val = val;
965 return count;
966}
967
968define_one_global_rw(timer_slack);
969
970static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
971 char *buf)
972{
973 return sprintf(buf, "%d\n", boost_val);
974}
975
976static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
977 const char *buf, size_t count)
978{
979 int ret;
980 unsigned long val;
981
982 ret = kstrtoul(buf, 0, &val);
983 if (ret < 0)
984 return ret;
985
986 boost_val = val;
987
988 if (boost_val) {
989 trace_cpufreq_interactive_boost("on");
990 cpufreq_interactive_boost();
991 } else {
992 trace_cpufreq_interactive_unboost("off");
993 }
994
995 return count;
996}
997
998define_one_global_rw(boost);
999
1000static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
1001 const char *buf, size_t count)
1002{
1003 int ret;
1004 unsigned long val;
1005
1006 ret = kstrtoul(buf, 0, &val);
1007 if (ret < 0)
1008 return ret;
1009
1010 boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
1011 trace_cpufreq_interactive_boost("pulse");
1012 cpufreq_interactive_boost();
1013 return count;
1014}
1015
1016static struct global_attr boostpulse =
1017 __ATTR(boostpulse, 0200, NULL, store_boostpulse);
1018
1019static ssize_t show_boostpulse_duration(
1020 struct kobject *kobj, struct attribute *attr, char *buf)
1021{
1022 return sprintf(buf, "%d\n", boostpulse_duration_val);
1023}
1024
1025static ssize_t store_boostpulse_duration(
1026 struct kobject *kobj, struct attribute *attr, const char *buf,
1027 size_t count)
1028{
1029 int ret;
1030 unsigned long val;
1031
1032 ret = kstrtoul(buf, 0, &val);
1033 if (ret < 0)
1034 return ret;
1035
1036 boostpulse_duration_val = val;
1037 return count;
1038}
1039
1040define_one_global_rw(boostpulse_duration);
1041
1042static struct attribute *interactive_attributes[] = {
1043 &target_loads_attr.attr,
1044 &hispeed_freq_attr.attr,
1045 &go_hispeed_load_attr.attr,
1046 &above_hispeed_delay.attr,
1047 &min_sample_time_attr.attr,
1048 &timer_rate_attr.attr,
1049 &timer_slack.attr,
1050 &boost.attr,
1051 &boostpulse.attr,
1052 &boostpulse_duration.attr,
1053 NULL,
1054};
1055
1056static struct attribute_group interactive_attr_group = {
1057 .attrs = interactive_attributes,
1058 .name = "interactive",
1059};
1060
1061static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1062 unsigned long val,
1063 void *data)
1064{
1065 switch (val) {
1066 case IDLE_START:
1067 cpufreq_interactive_idle_start();
1068 break;
1069 case IDLE_END:
1070 cpufreq_interactive_idle_end();
1071 break;
1072 }
1073
1074 return 0;
1075}
1076
1077static struct notifier_block cpufreq_interactive_idle_nb = {
1078 .notifier_call = cpufreq_interactive_idle_notifier,
1079};
1080
1081static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1082 unsigned int event)
1083{
1084 int rc;
1085 unsigned int j;
1086 struct cpufreq_interactive_cpuinfo *pcpu;
1087 struct cpufreq_frequency_table *freq_table;
1088
1089 switch (event) {
1090 case CPUFREQ_GOV_START:
1091 if (!cpu_online(policy->cpu))
1092 return -EINVAL;
1093
1094 mutex_lock(&gov_lock);
1095
1096 freq_table =
1097 cpufreq_frequency_get_table(policy->cpu);
1098 if (!hispeed_freq)
1099 hispeed_freq = policy->max;
1100
1101 for_each_cpu(j, policy->cpus) {
1102 unsigned long expires;
1103
1104 pcpu = &per_cpu(cpuinfo, j);
1105 pcpu->policy = policy;
1106 pcpu->target_freq = policy->cur;
1107 pcpu->freq_table = freq_table;
1108 pcpu->floor_freq = pcpu->target_freq;
1109 pcpu->floor_validate_time =
1110 ktime_to_us(ktime_get());
1111 pcpu->hispeed_validate_time =
1112 pcpu->floor_validate_time;
1113 down_write(&pcpu->enable_sem);
1114 expires = jiffies + usecs_to_jiffies(timer_rate);
1115 pcpu->cpu_timer.expires = expires;
1116 add_timer_on(&pcpu->cpu_timer, j);
1117 if (timer_slack_val >= 0) {
1118 expires += usecs_to_jiffies(timer_slack_val);
1119 pcpu->cpu_slack_timer.expires = expires;
1120 add_timer_on(&pcpu->cpu_slack_timer, j);
1121 }
1122 pcpu->governor_enabled = 1;
1123 up_write(&pcpu->enable_sem);
1124 }
1125
1126 /*
1127 * Do not register the idle hook and create sysfs
1128 * entries if we have already done so.
1129 */
1130 if (++active_count > 1) {
1131 mutex_unlock(&gov_lock);
1132 return 0;
1133 }
1134
1135 rc = sysfs_create_group(cpufreq_global_kobject,
1136 &interactive_attr_group);
1137 if (rc) {
1138 mutex_unlock(&gov_lock);
1139 return rc;
1140 }
1141
1142 idle_notifier_register(&cpufreq_interactive_idle_nb);
1143 cpufreq_register_notifier(
1144 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
1145 mutex_unlock(&gov_lock);
1146 break;
1147
1148 case CPUFREQ_GOV_STOP:
1149 mutex_lock(&gov_lock);
1150 for_each_cpu(j, policy->cpus) {
1151 pcpu = &per_cpu(cpuinfo, j);
1152 down_write(&pcpu->enable_sem);
1153 pcpu->governor_enabled = 0;
1154 del_timer_sync(&pcpu->cpu_timer);
1155 del_timer_sync(&pcpu->cpu_slack_timer);
1156 up_write(&pcpu->enable_sem);
1157 }
1158
1159 if (--active_count > 0) {
1160 mutex_unlock(&gov_lock);
1161 return 0;
1162 }
1163
1164 cpufreq_unregister_notifier(
1165 &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
1166 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1167 sysfs_remove_group(cpufreq_global_kobject,
1168 &interactive_attr_group);
1169 mutex_unlock(&gov_lock);
1170
1171 break;
1172
1173 case CPUFREQ_GOV_LIMITS:
1174 if (policy->max < policy->cur)
1175 __cpufreq_driver_target(policy,
1176 policy->max, CPUFREQ_RELATION_H);
1177 else if (policy->min > policy->cur)
1178 __cpufreq_driver_target(policy,
1179 policy->min, CPUFREQ_RELATION_L);
1180 break;
1181 }
1182 return 0;
1183}
1184
1185static void cpufreq_interactive_nop_timer(unsigned long data)
1186{
1187}
1188
1189static int __init cpufreq_interactive_init(void)
1190{
1191 unsigned int i;
1192 struct cpufreq_interactive_cpuinfo *pcpu;
1193 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1194
1195 /* Initalize per-cpu timers */
1196 for_each_possible_cpu(i) {
1197 pcpu = &per_cpu(cpuinfo, i);
1198 init_timer_deferrable(&pcpu->cpu_timer);
1199 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1200 pcpu->cpu_timer.data = i;
1201 init_timer(&pcpu->cpu_slack_timer);
1202 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1203 spin_lock_init(&pcpu->load_lock);
1204 init_rwsem(&pcpu->enable_sem);
1205 }
1206
1207 spin_lock_init(&target_loads_lock);
1208 spin_lock_init(&speedchange_cpumask_lock);
1209 mutex_init(&gov_lock);
1210 speedchange_task =
1211 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1212 "cfinteractive");
1213 if (IS_ERR(speedchange_task))
1214 return PTR_ERR(speedchange_task);
1215
1216 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1217 get_task_struct(speedchange_task);
1218 cpufreq_min_init();
1219 /* NB: wake up so the thread does not look hung to the freezer */
1220 wake_up_process(speedchange_task);
1221
1222 return cpufreq_register_governor(&cpufreq_gov_interactive);
1223}
1224
1225#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1226fs_initcall(cpufreq_interactive_init);
1227#else
1228module_init(cpufreq_interactive_init);
1229#endif
1230
1231static void __exit cpufreq_interactive_exit(void)
1232{
1233 cpufreq_unregister_governor(&cpufreq_gov_interactive);
1234 kthread_stop(speedchange_task);
1235 put_task_struct(speedchange_task);
1236 cpufreq_min_exit();
1237}
1238
1239module_exit(cpufreq_interactive_exit);
1240
1241MODULE_AUTHOR("Mike Chan <mike@android.com>");
1242MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1243 "Latency sensitive workloads");
1244MODULE_LICENSE("GPL");