blob: c47c9d964ea2a80bc1bdf3251b33f8ef9dff0767 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2019 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/slab.h>
17#include <linux/list.h>
18#include <linux/debugfs.h>
19#include <linux/mutex.h>
20#include <linux/semaphore.h>
21#include <linux/freezer.h>
22#include <linux/uaccess.h>
23#include <linux/completion.h>
24
25#include "ondiemet_log.h"
26
27#define ONDIEMET_LOG_REQ 1
28/* TODO: abandon this constatnt */
29#define ONDIEMET_LOG_STOP 2
30
31#define PID_NONE (-1)
32
33#define ONDIEMET_LOG_STOP_MODE 0
34#define ONDIEMET_LOG_RUN_MODE 1
35#define ONDIEMET_LOG_DEBUG_MODE 2
36
37static int ondiemet_trace_run;
38static struct dentry *dbgfs_met_dir;
39
40struct mutex lock_tracef;
41struct ondiemet_log_req_q_t {
42 struct list_head listq;
43 struct mutex lockq;
44 /* struct semaphore new_evt_sema; */
45 struct completion new_evt_comp;
46 int closeq_flag;
47} ondiemet_log_req_q;
48
49struct ondiemet_log_req {
50 struct list_head list;
51 int cmd_type;
52 const char *src;
53 size_t num;
54
55 void (*on_fini_cb)(const void *p);
56 const void *param;
57};
58
59#define __ondiemet_log_req_init(req, cmd, s, n, pf, p) \
60 do { \
61 INIT_LIST_HEAD(&req->list); \
62 req->cmd_type = cmd; \
63 req->src = s; \
64 req->num = n; \
65 req->on_fini_cb = pf; \
66 req->param = p; \
67 } while (0)
68
69#define __ondiemet_log_req_fini(req) \
70 do { \
71 if (req->on_fini_cb) \
72 req->on_fini_cb(req->param); \
73 kfree(req); \
74 } while (0)
75
76static void __ondiemet_log_req_q_init(struct ondiemet_log_req_q_t *q)
77{
78 INIT_LIST_HEAD(&q->listq);
79 mutex_init(&q->lockq);
80 /* sema_init(&q->new_evt_sema, 0); */
81 init_completion(&q->new_evt_comp);
82 q->closeq_flag = 1;
83}
84
85/* undequeue is seen as a roll-back operation, so it can be done even when the queue is closed */
86static void __ondiemet_log_req_undeq(struct ondiemet_log_req *req)
87{
88 mutex_lock(&ondiemet_log_req_q.lockq);
89 list_add(&req->list, &ondiemet_log_req_q.listq);
90 mutex_unlock(&ondiemet_log_req_q.lockq);
91
92 /* up(&ondiemet_log_req_q.new_evt_sema); */
93 complete(&ondiemet_log_req_q.new_evt_comp);
94}
95
96static int __ondiemet_log_req_enq(struct ondiemet_log_req *req)
97{
98 mutex_lock(&ondiemet_log_req_q.lockq);
99 if (ondiemet_log_req_q.closeq_flag) {
100 mutex_unlock(&ondiemet_log_req_q.lockq);
101 return -EBUSY;
102 }
103
104 list_add_tail(&req->list, &ondiemet_log_req_q.listq);
105 if (req->cmd_type == ONDIEMET_LOG_STOP)
106 ondiemet_log_req_q.closeq_flag = 1;
107 mutex_unlock(&ondiemet_log_req_q.lockq);
108
109 /* up(&ondiemet_log_req_q.new_evt_sema); */
110 complete(&ondiemet_log_req_q.new_evt_comp);
111
112 return 0;
113}
114
115int ondiemet_log_req_enq(const char *src, size_t num, void (*on_fini_cb)(const void *p), const void *param)
116{
117 struct ondiemet_log_req *req = kmalloc(sizeof(*req), GFP_KERNEL);
118
119 __ondiemet_log_req_init(req, ONDIEMET_LOG_REQ, src, num, on_fini_cb, param);
120 return __ondiemet_log_req_enq(req);
121}
122
123/*int down_freezable_interruptible(struct semaphore *sem) */
124int down_freezable_interruptible(struct completion *comp)
125{
126
127 int ret;
128
129 freezer_do_not_count();
130 /* ret = down_interruptible(sem); */
131 ret = wait_for_completion_interruptible(comp);
132 freezer_count();
133
134 return ret;
135}
136
137struct ondiemet_log_req *__ondiemet_log_req_deq(void)
138{
139 struct ondiemet_log_req *ret_req;
140
141 /*if (down_freezable_interruptible(&ondiemet_log_req_q.new_evt_sema))*/
142 if (down_freezable_interruptible(&ondiemet_log_req_q.new_evt_comp))
143 return NULL;
144
145 mutex_lock(&ondiemet_log_req_q.lockq);
146 ret_req = list_entry(ondiemet_log_req_q.listq.next, struct ondiemet_log_req, list);
147 list_del_init(&ret_req->list);
148 mutex_unlock(&ondiemet_log_req_q.lockq);
149
150 return ret_req;
151}
152
153void __ondiemet_log_req_open(void)
154{
155 mutex_lock(&ondiemet_log_req_q.lockq);
156 ondiemet_log_req_q.closeq_flag = 0;
157 mutex_unlock(&ondiemet_log_req_q.lockq);
158}
159
160int __ondiemet_log_req_closed(void)
161{
162 int ret;
163
164 mutex_lock(&ondiemet_log_req_q.lockq);
165 ret = ondiemet_log_req_q.closeq_flag && list_empty(&ondiemet_log_req_q.listq);
166 mutex_unlock(&ondiemet_log_req_q.lockq);
167
168 return ret;
169}
170
171int __ondiemet_log_req_working(void)
172{
173 int ret;
174
175 mutex_lock(&ondiemet_log_req_q.lockq);
176 ret = !ondiemet_log_req_q.closeq_flag;
177 mutex_unlock(&ondiemet_log_req_q.lockq);
178
179 return ret;
180}
181
182static void *__ondiemet_trace_seq_next(struct seq_file *seqf, loff_t *offset)
183{
184 struct ondiemet_log_req *next_req;
185
186 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
187 pr_debug("[met] __ondiemet_trace_seq_next: pid: %d\n", current->pid);
188
189 if (__ondiemet_log_req_closed())
190 return NULL;
191
192 next_req = __ondiemet_log_req_deq();
193
194 if (next_req == NULL)
195 return NULL;
196
197 if (next_req->cmd_type == ONDIEMET_LOG_STOP) {
198 __ondiemet_log_req_fini(next_req);
199 return NULL;
200 }
201
202 return (void *) next_req;
203}
204
205struct mutex lock_trace_owner_pid;
206pid_t trace_owner_pid = PID_NONE;
207static void *ondiemet_trace_seq_start(struct seq_file *seqf, loff_t *offset)
208{
209 void *ret;
210
211 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE) {
212 pr_debug("[met] ondiemet_trace_seq_start: locked_pid: %d, pid: %d, offset: %llu\n",
213 trace_owner_pid, current->pid, *offset);
214 }
215
216 if (!mutex_trylock(&lock_tracef))
217 return NULL;
218
219 mutex_lock(&lock_trace_owner_pid);
220 trace_owner_pid = current->pid;
221 current->flags |= PF_NOFREEZE;
222 mutex_unlock(&lock_trace_owner_pid);
223
224 ret = __ondiemet_trace_seq_next(seqf, offset);
225
226 return ret;
227}
228
229static void *ondiemet_trace_seq_next(struct seq_file *seqf, void *p, loff_t *offset)
230{
231 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
232 pr_debug("[met] ondiemet_trace_seq_next: pid: %d\n", current->pid);
233
234 (*offset)++;
235 return __ondiemet_trace_seq_next(seqf, offset);
236}
237
238static int ondiemet_trace_seq_show(struct seq_file *seqf, void *p)
239{
240 struct ondiemet_log_req *req = (struct ondiemet_log_req *) p;
241 size_t l_sz;
242 size_t r_sz;
243 struct ondiemet_log_req *l_req;
244 struct ondiemet_log_req *r_req;
245 int ret;
246
247 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
248 pr_debug("[met] ondiemet_trace_seq_show: pid: %d\n", current->pid);
249
250 if (req->num >= seqf->size) {
251 l_req = kmalloc(sizeof(*req), GFP_KERNEL);
252 r_req = req;
253
254 l_sz = seqf->size >> 1;
255 r_sz = req->num - l_sz;
256 __ondiemet_log_req_init(l_req, ONDIEMET_LOG_REQ, req->src, l_sz, NULL, NULL);
257 __ondiemet_log_req_init(r_req, ONDIEMET_LOG_REQ, req->src + l_sz,
258 r_sz, req->on_fini_cb, req->param);
259
260 __ondiemet_log_req_undeq(r_req);
261 req = l_req;
262
263 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
264 pr_debug("[met] ondiemet_trace_seq_show: split request\n");
265 }
266
267 ret = seq_write(seqf, req->src, req->num);
268
269 if (ret) {
270 /* check if seq_file buffer overflows */
271 if (seqf->count == seqf->size) {
272 __ondiemet_log_req_undeq(req);
273 } else {
274 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
275 pr_debug("[met] ondiemet_trace_seq_show: reading trace record failed, some data may be lost or corrupted\n");
276 __ondiemet_log_req_fini(req);
277 }
278 return 0;
279 }
280
281 __ondiemet_log_req_fini(req);
282 return 0;
283}
284
285static void ondiemet_trace_seq_stop(struct seq_file *seqf, void *p)
286{
287 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
288 pr_debug("[met] ondiemet_trace_seq_stop: pid: %d\n", current->pid);
289
290 mutex_lock(&lock_trace_owner_pid);
291 if (current->pid == trace_owner_pid) {
292 trace_owner_pid = PID_NONE;
293 mutex_unlock(&lock_tracef);
294 }
295 mutex_unlock(&lock_trace_owner_pid);
296}
297
298static const struct seq_operations ondiemet_trace_seq_ops = {
299 .start = ondiemet_trace_seq_start,
300 .next = ondiemet_trace_seq_next,
301 .stop = ondiemet_trace_seq_stop,
302 .show = ondiemet_trace_seq_show
303};
304
305static int ondiemet_trace_open(struct inode *inode, struct file *fp)
306{
307 return seq_open(fp, &ondiemet_trace_seq_ops);
308}
309
310static const struct file_operations ondiemet_trace_fops = {
311 .owner = THIS_MODULE,
312 .open = ondiemet_trace_open,
313 .read = seq_read,
314 .llseek = seq_lseek,
315 .release = seq_release
316};
317
318/*struct semaphore log_start_sema;*/
319struct completion log_start_comp;
320int ondiemet_log_manager_start(void)
321{
322 int ret;
323
324 /* TODO: choose a better return value */
325 if (__ondiemet_log_req_working())
326 return -EINVAL;
327
328 if (!__ondiemet_log_req_closed()) {
329 /*ret = down_killable(&log_start_sema);*/
330 ret = wait_for_completion_killable(&log_start_comp);
331 if (ret)
332 return ret;
333 }
334
335 __ondiemet_log_req_open();
336
337 return 0;
338}
339
340/*struct semaphore log_stop_sema;*/
341struct completion log_stop_comp;
342static void __log_stop_cb(const void *p)
343{
344 /* up(&log_start_sema); */
345 /* up(&log_stop_sema); */
346 complete(&log_start_comp);
347 complete(&log_stop_comp);
348}
349
350int ondiemet_log_manager_stop(void)
351{
352 int ret;
353 struct ondiemet_log_req *req;
354
355 /* TODO: choose a better return value */
356 if (__ondiemet_log_req_closed())
357 return -EINVAL;
358
359 req = kmalloc(sizeof(*req), GFP_KERNEL);
360
361 __ondiemet_log_req_init(req, ONDIEMET_LOG_STOP, NULL, 0, __log_stop_cb, NULL);
362 /*sema_init(&log_start_sema, 0); */
363 /*sema_init(&log_stop_sema, 0); */
364 init_completion(&log_start_comp);
365 init_completion(&log_stop_comp);
366
367 ret = __ondiemet_log_req_enq(req);
368 if (ret)
369 return ret;
370
371 /* XXX: blocking may be break by SIGKILL */
372 /*return down_killable(&log_stop_sema);*/
373 return wait_for_completion_killable(&log_stop_comp);
374}
375
376int ondiemet_parse_num(const char *str, unsigned int *value, int len)
377{
378 int ret;
379
380 if (len <= 0)
381 return -1;
382
383 if ((len > 2) &&
384 ((str[0] == '0') &&
385 ((str[1] == 'x') || (str[1] == 'X')))) {
386 ret = kstrtouint(str, 16, value);
387 } else {
388 ret = kstrtouint(str, 10, value);
389 }
390
391 if (ret != 0)
392 return -1;
393
394 return 0;
395}
396
397/* XXX: seq_file will output only when a page is filled */
398static ssize_t ondiemet_log_write_store(struct device *dev,
399 struct device_attribute *attr,
400 const char *buf,
401 size_t count)
402{
403 char *plog = NULL;
404
405 plog = kmalloc_array(count, sizeof(*plog), GFP_KERNEL);
406 if (!plog) {
407 /* TODO: use a better error code */
408 return -EINVAL;
409 }
410
411 memcpy(plog, buf, count);
412
413 mutex_lock(&dev->mutex);
414 ondiemet_log_req_enq(plog, strnlen(plog, count), kfree, plog);
415 mutex_unlock(&dev->mutex);
416
417 return count;
418}
419
420static DEVICE_ATTR(ondiemet_log_write, 0664, NULL, ondiemet_log_write_store);
421
422static ssize_t ondiemet_log_run_show(struct device *dev, struct device_attribute *attr, char *buf)
423{
424 int sz;
425
426 mutex_lock(&dev->mutex);
427 sz = snprintf(buf, PAGE_SIZE, "%d\n", ondiemet_trace_run);
428 mutex_unlock(&dev->mutex);
429 return sz;
430}
431
432static ssize_t ondiemet_log_run_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
433{
434 int ret;
435 int prev_run_state;
436
437 mutex_lock(&dev->mutex);
438
439 prev_run_state = ondiemet_trace_run;
440
441 if (kstrtoint(buf, 10, &ondiemet_trace_run) != 0)
442 return -EINVAL;
443
444 if (ondiemet_trace_run <= ONDIEMET_LOG_STOP_MODE) {
445 ondiemet_trace_run = ONDIEMET_LOG_STOP_MODE;
446 ondiemet_log_manager_stop();
447
448 if (prev_run_state == ONDIEMET_LOG_DEBUG_MODE)
449 device_remove_file(dev, &dev_attr_ondiemet_log_write);
450 } else if (ondiemet_trace_run == ONDIEMET_LOG_RUN_MODE) {
451 ondiemet_trace_run = ONDIEMET_LOG_RUN_MODE;
452 ondiemet_log_manager_start();
453
454 if (prev_run_state == ONDIEMET_LOG_DEBUG_MODE)
455 device_remove_file(dev, &dev_attr_ondiemet_log_write);
456 } else {
457 ondiemet_trace_run = ONDIEMET_LOG_DEBUG_MODE;
458 ondiemet_log_manager_start();
459
460 if (prev_run_state != ONDIEMET_LOG_DEBUG_MODE) {
461 ret = device_create_file(dev, &dev_attr_ondiemet_log_write);
462 if (ret != 0)
463 pr_debug("[met] can not create device node: ondiemet_log_write\n");
464 }
465 }
466
467 mutex_unlock(&dev->mutex);
468
469 return count;
470}
471
472static DEVICE_ATTR(ondiemet_log_run, 0660, ondiemet_log_run_show, ondiemet_log_run_store);
473
474int ondiemet_log_manager_init(struct device *dev)
475{
476 int ret;
477 struct dentry *d;
478
479 mutex_init(&lock_tracef);
480
481 __ondiemet_log_req_q_init(&ondiemet_log_req_q);
482
483 /*sema_init(&log_start_sema, 0);*/
484 /*sema_init(&log_stop_sema, 0);*/
485 init_completion(&log_start_comp);
486 init_completion(&log_stop_comp);
487
488 dbgfs_met_dir = debugfs_create_dir("ondiemet", NULL);
489 if (!dbgfs_met_dir) {
490 pr_debug("[met] can not create debugfs directory: met\n");
491 return -ENOMEM;
492 }
493
494 mutex_init(&lock_trace_owner_pid);
495
496 d = debugfs_create_file("trace", 0644, dbgfs_met_dir, NULL, &ondiemet_trace_fops);
497 if (!d) {
498 pr_debug("[met] can not create devide node in debugfs: ondiemet_trace\n");
499 return -ENOMEM;
500 }
501
502 ondiemet_trace_run = __ondiemet_log_req_working();
503 ret = device_create_file(dev, &dev_attr_ondiemet_log_run);
504 if (ret != 0) {
505 pr_debug("[met] can not create device node: ondiemet_log_run\n");
506 return ret;
507 }
508
509 return 0;
510}
511
512int ondiemet_log_manager_uninit(struct device *dev)
513{
514 device_remove_file(dev, &dev_attr_ondiemet_log_run);
515 debugfs_remove_recursive(dbgfs_met_dir);
516 return 0;
517}