blob: 9ea3453dac2c07007c770e77ad1cbf9c2f48b9fe [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14/* include <asm/percpu.h> */
15#include <trace/events/sched.h>
16#include <linux/module.h>
17#include <trace/events/irq.h>
18#include <trace/events/power.h>
19
20#include "interface.h"
21#include "met_drv.h"
22#include "cpu_pmu.h"
23#include "switch.h"
24#include "sampler.h"
25#include "met_kernel_symbol.h"
26/* #include "trace.h" */
27
28/*
29 * IRQ_TIRGGER and CPU_IDLE_TRIGGER
30 */
31/* #define IRQ_TRIGGER */
32/* #define CPU_IDLE_TRIGGER */
33
34static DEFINE_PER_CPU(unsigned int, first_log);
35
36#ifdef __aarch64__
37/* #include <asm/compat.h> */
38#include <linux/compat.h>
39#endif
40
41noinline void mt_switch(struct task_struct *prev, struct task_struct *next)
42{
43 int cpu;
44 int prev_state = 0, next_state = 0;
45
46#ifdef __aarch64__
47 prev_state = !(is_compat_thread(task_thread_info(prev)));
48 next_state = !(is_compat_thread(task_thread_info(next)));
49#endif
50
51 cpu = smp_processor_id();
52 if (per_cpu(first_log, cpu)) {
53 MET_TRACE("%d, %d, %d, %d\n", prev->pid, prev_state, next->pid, next_state);
54 per_cpu(first_log, cpu) = 0;
55 }
56 if (prev_state != next_state)
57 MET_TRACE("%d, %d, %d, %d\n", prev->pid, prev_state, next->pid, next_state);
58}
59
60
61#if 0 /* move to kernel space */
62MET_DEFINE_PROBE(sched_switch,
63 TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next))
64{
65 /* speedup sched_switch callback handle */
66 if (met_switch.mode == 0)
67 return;
68
69 if (met_switch.mode & MT_SWITCH_EVENT_TIMER)
70 met_event_timer_notify();
71
72 if (met_switch.mode & MT_SWITCH_64_32BIT)
73 mt_switch(prev, next);
74
75 if (met_switch.mode & MT_SWITCH_SCHEDSWITCH) {
76 if (get_pmu_profiling_version() == 1)
77 cpupmu_polling(0, smp_processor_id());
78#ifdef MET_SUPPORT_CPUPMU_V2
79 else if (get_pmu_profiling_version() == 2)
80 cpupmu_polling_v2(0, smp_processor_id());
81#endif
82 }
83}
84#endif
85
86void met_sched_switch(struct task_struct *prev, struct task_struct *next)
87{
88 /* speedup sched_switch callback handle */
89 if (met_switch.mode == 0)
90 return;
91
92 if (met_switch.mode & MT_SWITCH_EVENT_TIMER)
93 met_event_timer_notify();
94
95 if (met_switch.mode & MT_SWITCH_64_32BIT)
96 mt_switch(prev, next);
97
98 /* met_perf_cpupmu_status: 0: stop, others: polling */
99 if ((met_switch.mode & MT_SWITCH_SCHEDSWITCH) && met_perf_cpupmu_status)
100 met_perf_cpupmu_polling(0, smp_processor_id());
101}
102
103#ifdef IRQ_TRIGGER
104MET_DEFINE_PROBE(irq_handler_entry, TP_PROTO(int irq, struct irqaction *action))
105{
106 if (met_switch.mode & MT_SWITCH_EVENT_TIMER) {
107 met_event_timer_notify();
108 return;
109 }
110}
111#endif
112
113#ifdef CPU_IDLE_TRIGGER
114MET_DEFINE_PROBE(cpu_idle, TP_PROTO(unsigned int state, unsigned int cpu_id))
115{
116 if (met_switch.mode & MT_SWITCH_EVENT_TIMER) {
117 met_event_timer_notify();
118 return;
119 }
120}
121#endif
122
123#ifdef MET_ANYTIME
124/*
125 * create related subfs file node
126 */
127
128static ssize_t default_on_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
129{
130 return snprintf(buf, PAGE_SIZE, "1\n");
131}
132
133static struct kobj_attribute default_on_attr = __ATTR(default_on, 0664, default_on_show, NULL);
134static struct kobject *kobj_cpu;
135#endif
136
137static int met_switch_create_subfs(struct kobject *parent)
138{
139 int ret = 0;
140
141 /* register tracepoints */
142#if 0
143 if (MET_REGISTER_TRACE(sched_switch)) {
144 pr_debug("can not register callback of sched_switch\n");
145 return -ENODEV;
146 }
147#else
148 if (met_reg_switch_symbol)
149 ret = met_reg_switch_symbol();
150#endif
151#ifdef CPU_IDLE_TRIGGER
152 if (MET_REGISTER_TRACE(cpu_idle)) {
153 pr_debug("can not register callback of irq_handler_entry\n");
154 return -ENODEV;
155 }
156#endif
157#ifdef IRQ_TRIGGER
158 if (MET_REGISTER_TRACE(irq_handler_entry)) {
159 pr_debug("can not register callback of irq_handler_entry\n");
160 return -ENODEV;
161 }
162#endif
163
164#ifdef MET_ANYTIME
165 /*
166 * to create default_on file node
167 * let user space can know we can support MET default on
168 */
169 kobj_cpu = parent;
170 ret = sysfs_create_file(kobj_cpu, &default_on_attr.attr);
171 if (ret != 0) {
172 pr_debug("Failed to create default_on in sysfs\n");
173 return -1;
174 }
175#endif
176
177 return ret;
178}
179
180
181static void met_switch_delete_subfs(void)
182{
183#ifdef MET_ANYTIME
184 if (kobj_cpu != NULL) {
185 sysfs_remove_file(kobj_cpu, &default_on_attr.attr);
186 kobj_cpu = NULL;
187 }
188#endif
189#ifdef IRQ_TRIGGER
190 MET_UNREGISTER_TRACE(irq_handler_entry);
191#endif
192#ifdef CPU_IDLE_TRIGGER
193 MET_UNREGISTER_TRACE(cpu_idle);
194#endif
195#if 0
196 MET_UNREGISTER_TRACE(sched_switch);
197#else
198 if (met_unreg_switch_symbol)
199 met_unreg_switch_symbol();
200#endif
201
202}
203
204
205static void (*cpu_timed_polling)(unsigned long long stamp, int cpu);
206/* static void (*cpu_tagged_polling)(unsigned long long stamp, int cpu); */
207
208static void met_switch_start(void)
209{
210 int cpu;
211
212 if (met_switch.mode & MT_SWITCH_SCHEDSWITCH) {
213 cpu_timed_polling = met_cpupmu.timed_polling;
214 /* cpu_tagged_polling = met_cpupmu.tagged_polling; */
215 met_cpupmu.timed_polling = NULL;
216 /* met_cpupmu.tagged_polling = NULL; */
217 }
218
219 for_each_possible_cpu(cpu) {
220 per_cpu(first_log, cpu) = 1;
221 }
222
223}
224
225
226static void met_switch_stop(void)
227{
228 int cpu;
229
230 if (met_switch.mode & MT_SWITCH_SCHEDSWITCH) {
231 met_cpupmu.timed_polling = cpu_timed_polling;
232 /* met_cpupmu.tagged_polling = cpu_tagged_polling; */
233 }
234
235 for_each_possible_cpu(cpu) {
236 per_cpu(first_log, cpu) = 0;
237 }
238
239}
240
241
242static int met_switch_process_argument(const char *arg, int len)
243{
244 unsigned int value;
245 /*ex: mxitem is 0x0005, max value should be (5-1) + (5-2) = 0x100 + 0x11 = 7 */
246 unsigned int max_value = ((MT_SWITCH_MX_ITEM * 2) - 3);
247
248
249 if (met_parse_num(arg, &value, len) < 0)
250 goto arg_switch_exit;
251
252 if ((value < 1) || (value > max_value))
253 goto arg_switch_exit;
254
255 met_switch.mode = value;
256 return 0;
257
258arg_switch_exit:
259 met_switch.mode = 0;
260 return -EINVAL;
261}
262
263static const char header[] =
264 "met-info [000] 0.0: met_switch_header: prev_pid,prev_state,next_pid,next_state\n";
265
266static const char help[] =
267" --switch=mode mode:0x1 - output CPUPMU whenever sched_switch\n"
268" mode:0x2 - output Aarch 32/64 state whenever state changed (no CPUPMU)\n"
269" mode:0x4 - force output count at tag_start/tag_end\n"
270" mode:0x8 - task switch timer\n"
271" mode:0xF - mode 0x1 + 0x2 + 04 + 08\n";
272
273static int met_switch_print_help(char *buf, int len)
274{
275 return snprintf(buf, PAGE_SIZE, help);
276}
277
278static int met_switch_print_header(char *buf, int len)
279{
280 int ret = 0;
281
282 ret =
283 snprintf(buf, PAGE_SIZE, "met-info [000] 0.0: mp_cpu_switch_base: %d\n",
284 met_switch.mode);
285 if (met_switch.mode & MT_SWITCH_64_32BIT)
286 ret += snprintf(buf + ret, PAGE_SIZE, header);
287
288 return ret;
289}
290
291
292struct metdevice met_switch = {
293 .name = "switch",
294 .type = MET_TYPE_PMU,
295 .create_subfs = met_switch_create_subfs,
296 .delete_subfs = met_switch_delete_subfs,
297 .start = met_switch_start,
298 .stop = met_switch_stop,
299 .process_argument = met_switch_process_argument,
300 .print_help = met_switch_print_help,
301 .print_header = met_switch_print_header,
302};