blob: f5b5bc673ce357398e353711657ac87b3278b0f0 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14/* include <asm/percpu.h> */
15#include <trace/events/sched.h>
16#include <linux/module.h>
17#include <trace/events/irq.h>
18#include <trace/events/power.h>
19
20#include "interface.h"
21#include "met_drv.h"
22#include "cpu_pmu.h"
23#include "cpu_pmu_v2.h"
24#include "switch.h"
25#include "sampler.h"
26#include "met_kernel_symbol.h"
27/* #include "trace.h" */
28
29/*
30 * IRQ_TIRGGER and CPU_IDLE_TRIGGER
31 */
32/* #define IRQ_TRIGGER */
33/* #define CPU_IDLE_TRIGGER */
34
35static DEFINE_PER_CPU(unsigned int, first_log);
36
37#ifdef __aarch64__
38/* #include <asm/compat.h> */
39#include <linux/compat.h>
40#endif
41
42noinline void mt_switch(struct task_struct *prev, struct task_struct *next)
43{
44 int cpu;
45 int prev_state = 0, next_state = 0;
46
47#ifdef __aarch64__
48 prev_state = !(is_compat_thread(task_thread_info(prev)));
49 next_state = !(is_compat_thread(task_thread_info(next)));
50#endif
51
52 cpu = smp_processor_id();
53 if (per_cpu(first_log, cpu)) {
54 MET_TRACE("%d, %d, %d, %d\n", prev->pid, prev_state, next->pid, next_state);
55 per_cpu(first_log, cpu) = 0;
56 }
57 if (prev_state != next_state)
58 MET_TRACE("%d, %d, %d, %d\n", prev->pid, prev_state, next->pid, next_state);
59}
60
61
62#if 0 /* move to kernel space */
63MET_DEFINE_PROBE(sched_switch,
64 TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next))
65{
66 /* speedup sched_switch callback handle */
67 if (met_switch.mode == 0)
68 return;
69
70 if (met_switch.mode & MT_SWITCH_EVENT_TIMER)
71 met_event_timer_notify();
72
73 if (met_switch.mode & MT_SWITCH_64_32BIT)
74 mt_switch(prev, next);
75
76 if (met_switch.mode & MT_SWITCH_SCHEDSWITCH) {
77 if (get_pmu_profiling_version() == 1)
78 cpupmu_polling(0, smp_processor_id());
79#ifdef MET_SUPPORT_CPUPMU_V2
80 else if (get_pmu_profiling_version() == 2)
81 cpupmu_polling_v2(0, smp_processor_id());
82#endif
83 }
84}
85#endif
86
87void met_sched_switch(struct task_struct *prev, struct task_struct *next)
88{
89 /* speedup sched_switch callback handle */
90 if (met_switch.mode == 0)
91 return;
92
93 if (met_switch.mode & MT_SWITCH_EVENT_TIMER)
94 met_event_timer_notify();
95
96 if (met_switch.mode & MT_SWITCH_64_32BIT)
97 mt_switch(prev, next);
98
99 if (met_switch.mode & MT_SWITCH_SCHEDSWITCH) {
100 if (get_pmu_profiling_version() == 1)
101 cpupmu_polling(0, smp_processor_id());
102#ifdef MET_SUPPORT_CPUPMU_V2
103 else if (get_pmu_profiling_version() == 2)
104 cpupmu_polling_v2(0, smp_processor_id());
105#endif
106 }
107}
108
109#ifdef IRQ_TRIGGER
110MET_DEFINE_PROBE(irq_handler_entry, TP_PROTO(int irq, struct irqaction *action))
111{
112 if (met_switch.mode & MT_SWITCH_EVENT_TIMER) {
113 met_event_timer_notify();
114 return;
115 }
116}
117#endif
118
119#ifdef CPU_IDLE_TRIGGER
120MET_DEFINE_PROBE(cpu_idle, TP_PROTO(unsigned int state, unsigned int cpu_id))
121{
122 if (met_switch.mode & MT_SWITCH_EVENT_TIMER) {
123 met_event_timer_notify();
124 return;
125 }
126}
127#endif
128
129#ifdef MET_ANYTIME
130/*
131 * create related subfs file node
132 */
133
134static ssize_t default_on_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
135{
136 return snprintf(buf, PAGE_SIZE, "1\n");
137}
138
139static struct kobj_attribute default_on_attr = __ATTR(default_on, 0664, default_on_show, NULL);
140static struct kobject *kobj_cpu;
141#endif
142
143static int met_switch_create_subfs(struct kobject *parent)
144{
145 int ret = 0;
146
147 /* register tracepoints */
148#if 0
149 if (MET_REGISTER_TRACE(sched_switch)) {
150 pr_debug("can not register callback of sched_switch\n");
151 return -ENODEV;
152 }
153#else
154 if (met_reg_switch_symbol)
155 ret = met_reg_switch_symbol();
156#endif
157#ifdef CPU_IDLE_TRIGGER
158 if (MET_REGISTER_TRACE(cpu_idle)) {
159 pr_debug("can not register callback of irq_handler_entry\n");
160 return -ENODEV;
161 }
162#endif
163#ifdef IRQ_TRIGGER
164 if (MET_REGISTER_TRACE(irq_handler_entry)) {
165 pr_debug("can not register callback of irq_handler_entry\n");
166 return -ENODEV;
167 }
168#endif
169
170#ifdef MET_ANYTIME
171 /*
172 * to create default_on file node
173 * let user space can know we can support MET default on
174 */
175 kobj_cpu = parent;
176 ret = sysfs_create_file(kobj_cpu, &default_on_attr.attr);
177 if (ret != 0) {
178 pr_debug("Failed to create default_on in sysfs\n");
179 return -1;
180 }
181#endif
182
183 return ret;
184}
185
186
187static void met_switch_delete_subfs(void)
188{
189#ifdef MET_ANYTIME
190 if (kobj_cpu != NULL) {
191 sysfs_remove_file(kobj_cpu, &default_on_attr.attr);
192 kobj_cpu = NULL;
193 }
194#endif
195#ifdef IRQ_TRIGGER
196 MET_UNREGISTER_TRACE(irq_handler_entry);
197#endif
198#ifdef CPU_IDLE_TRIGGER
199 MET_UNREGISTER_TRACE(cpu_idle);
200#endif
201#if 0
202 MET_UNREGISTER_TRACE(sched_switch);
203#else
204 if (met_unreg_switch_symbol)
205 met_unreg_switch_symbol();
206#endif
207
208}
209
210
211static void (*cpu_timed_polling)(unsigned long long stamp, int cpu);
212/* static void (*cpu_tagged_polling)(unsigned long long stamp, int cpu); */
213
214static void met_switch_start(void)
215{
216 int cpu;
217
218 if (met_switch.mode & MT_SWITCH_SCHEDSWITCH) {
219 cpu_timed_polling = met_cpupmu.timed_polling;
220 /* cpu_tagged_polling = met_cpupmu.tagged_polling; */
221 met_cpupmu.timed_polling = NULL;
222 /* met_cpupmu.tagged_polling = NULL; */
223 }
224
225 for_each_possible_cpu(cpu) {
226 per_cpu(first_log, cpu) = 1;
227 }
228
229}
230
231
232static void met_switch_stop(void)
233{
234 int cpu;
235
236 if (met_switch.mode & MT_SWITCH_SCHEDSWITCH) {
237 met_cpupmu.timed_polling = cpu_timed_polling;
238 /* met_cpupmu.tagged_polling = cpu_tagged_polling; */
239 }
240
241 for_each_possible_cpu(cpu) {
242 per_cpu(first_log, cpu) = 0;
243 }
244
245}
246
247
248static int met_switch_process_argument(const char *arg, int len)
249{
250 unsigned int value;
251 /*ex: mxitem is 0x0005, max value should be (5-1) + (5-2) = 0x100 + 0x11 = 7 */
252 unsigned int max_value = ((MT_SWITCH_MX_ITEM * 2) - 3);
253
254
255 if (met_parse_num(arg, &value, len) < 0)
256 goto arg_switch_exit;
257
258 if ((value < 1) || (value > max_value))
259 goto arg_switch_exit;
260
261 met_switch.mode = value;
262 return 0;
263
264arg_switch_exit:
265 met_switch.mode = 0;
266 return -EINVAL;
267}
268
269static const char header[] =
270 "met-info [000] 0.0: met_switch_header: prev_pid,prev_state,next_pid,next_state\n";
271
272static const char help[] =
273" --switch=mode mode:0x1 - output CPUPMU whenever sched_switch\n"
274" mode:0x2 - output Aarch 32/64 state whenever state changed (no CPUPMU)\n"
275" mode:0x4 - force output count at tag_start/tag_end\n"
276" mode:0x8 - task switch timer\n"
277" mode:0xF - mode 0x1 + 0x2 + 04 + 08\n";
278
279static int met_switch_print_help(char *buf, int len)
280{
281 return snprintf(buf, PAGE_SIZE, help);
282}
283
284static int met_switch_print_header(char *buf, int len)
285{
286 int ret = 0;
287
288 ret =
289 snprintf(buf, PAGE_SIZE, "met-info [000] 0.0: mp_cpu_switch_base: %d\n",
290 met_switch.mode);
291 if (met_switch.mode & MT_SWITCH_64_32BIT)
292 ret += snprintf(buf + ret, PAGE_SIZE, header);
293
294 return ret;
295}
296
297
298struct metdevice met_switch = {
299 .name = "switch",
300 .type = MET_TYPE_PMU,
301 .create_subfs = met_switch_create_subfs,
302 .delete_subfs = met_switch_delete_subfs,
303 .start = met_switch_start,
304 .stop = met_switch_stop,
305 .process_argument = met_switch_process_argument,
306 .print_help = met_switch_print_help,
307 .print_header = met_switch_print_header,
308};