blob: 85514eabe999f80d7821ca2881dc7917ac0af042 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2019 MediaTek Inc.
4 */
5
6#include <linux/kernel.h>
7#include <linux/errno.h>
8#include <linux/slab.h>
9#include <linux/list.h>
10#include <linux/debugfs.h>
11#include <linux/proc_fs.h>
12#include <linux/mutex.h>
13#include <linux/semaphore.h>
14#include <linux/freezer.h>
15#include <linux/uaccess.h>
16#include <linux/completion.h>
17
18#include "ondiemet_log.h"
19#include "interface.h"
20
21#define ONDIEMET_LOG_REQ 1
22/* TODO: abandon this constatnt */
23#define ONDIEMET_LOG_STOP 2
24
25#define PID_NONE (-1)
26
27#define ONDIEMET_LOG_STOP_MODE 0
28#define ONDIEMET_LOG_RUN_MODE 1
29#define ONDIEMET_LOG_DEBUG_MODE 2
30
31static int ondiemet_trace_run;
32#ifdef ONDIEMET_MOUNT_DEBUGFS
33static struct dentry *dbgfs_met_dir;
34#else
35static struct proc_dir_entry *procfs_met_dir;
36#endif
37
38struct mutex lock_tracef;
39struct ondiemet_log_req_q_t {
40 struct list_head listq;
41 struct mutex lockq;
42 /* struct semaphore new_evt_sema; */
43 struct completion new_evt_comp;
44 int closeq_flag;
45} ondiemet_log_req_q;
46
47struct ondiemet_log_req {
48 struct list_head list;
49 int cmd_type;
50 const char *src;
51 size_t num;
52
53 void (*on_fini_cb)(const void *p);
54 const void *param;
55};
56
57#define __ondiemet_log_req_init(req, cmd, s, n, pf, p) \
58 do { \
59 INIT_LIST_HEAD(&req->list); \
60 req->cmd_type = cmd; \
61 req->src = s; \
62 req->num = n; \
63 req->on_fini_cb = pf; \
64 req->param = p; \
65 } while (0)
66
67#define __ondiemet_log_req_fini(req) \
68 do { \
69 if (req->on_fini_cb) \
70 req->on_fini_cb(req->param); \
71 kfree(req); \
72 } while (0)
73
74static void __ondiemet_log_req_q_init(struct ondiemet_log_req_q_t *q)
75{
76 INIT_LIST_HEAD(&q->listq);
77 mutex_init(&q->lockq);
78 /* sema_init(&q->new_evt_sema, 0); */
79 init_completion(&q->new_evt_comp);
80 q->closeq_flag = 1;
81}
82
83/* undequeue is seen as a roll-back operation, so it can be done even when the queue is closed */
84static void __ondiemet_log_req_undeq(struct ondiemet_log_req *req)
85{
86 mutex_lock(&ondiemet_log_req_q.lockq);
87 list_add(&req->list, &ondiemet_log_req_q.listq);
88 mutex_unlock(&ondiemet_log_req_q.lockq);
89
90 /* up(&ondiemet_log_req_q.new_evt_sema); */
91 complete(&ondiemet_log_req_q.new_evt_comp);
92}
93
94static int __ondiemet_log_req_enq(struct ondiemet_log_req *req)
95{
96 mutex_lock(&ondiemet_log_req_q.lockq);
97 if (ondiemet_log_req_q.closeq_flag) {
98 mutex_unlock(&ondiemet_log_req_q.lockq);
99 return -EBUSY;
100 }
101
102 list_add_tail(&req->list, &ondiemet_log_req_q.listq);
103 if (req->cmd_type == ONDIEMET_LOG_STOP)
104 ondiemet_log_req_q.closeq_flag = 1;
105 mutex_unlock(&ondiemet_log_req_q.lockq);
106
107 /* up(&ondiemet_log_req_q.new_evt_sema); */
108 complete(&ondiemet_log_req_q.new_evt_comp);
109
110 return 0;
111}
112
113int ondiemet_log_req_enq(const char *src, size_t num, void (*on_fini_cb)(const void *p), const void *param)
114{
115 struct ondiemet_log_req *req = kmalloc(sizeof(*req), GFP_KERNEL);
116
117 __ondiemet_log_req_init(req, ONDIEMET_LOG_REQ, src, num, on_fini_cb, param);
118 return __ondiemet_log_req_enq(req);
119}
120
121/*int down_freezable_interruptible(struct semaphore *sem) */
122int down_freezable_interruptible(struct completion *comp)
123{
124
125 int ret;
126
127 freezer_do_not_count();
128 /* ret = down_interruptible(sem); */
129 ret = wait_for_completion_interruptible(comp);
130 freezer_count();
131
132 return ret;
133}
134
135struct ondiemet_log_req *__ondiemet_log_req_deq(void)
136{
137 struct ondiemet_log_req *ret_req;
138
139 /*if (down_freezable_interruptible(&ondiemet_log_req_q.new_evt_sema))*/
140 if (down_freezable_interruptible(&ondiemet_log_req_q.new_evt_comp))
141 return NULL;
142
143 mutex_lock(&ondiemet_log_req_q.lockq);
144 ret_req = list_entry(ondiemet_log_req_q.listq.next, struct ondiemet_log_req, list);
145 list_del_init(&ret_req->list);
146 mutex_unlock(&ondiemet_log_req_q.lockq);
147
148 return ret_req;
149}
150
151void __ondiemet_log_req_open(void)
152{
153 mutex_lock(&ondiemet_log_req_q.lockq);
154 ondiemet_log_req_q.closeq_flag = 0;
155 mutex_unlock(&ondiemet_log_req_q.lockq);
156}
157
158int __ondiemet_log_req_closed(void)
159{
160 int ret;
161
162 mutex_lock(&ondiemet_log_req_q.lockq);
163 ret = ondiemet_log_req_q.closeq_flag && list_empty(&ondiemet_log_req_q.listq);
164 mutex_unlock(&ondiemet_log_req_q.lockq);
165
166 return ret;
167}
168
169int __ondiemet_log_req_working(void)
170{
171 int ret;
172
173 mutex_lock(&ondiemet_log_req_q.lockq);
174 ret = !ondiemet_log_req_q.closeq_flag;
175 mutex_unlock(&ondiemet_log_req_q.lockq);
176
177 return ret;
178}
179
180static void *__ondiemet_trace_seq_next(struct seq_file *seqf, loff_t *offset)
181{
182 struct ondiemet_log_req *next_req;
183
184 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
185 pr_debug("[met] __ondiemet_trace_seq_next: pid: %d\n", current->pid);
186
187 if (__ondiemet_log_req_closed())
188 return NULL;
189
190 next_req = __ondiemet_log_req_deq();
191
192 if (next_req == NULL)
193 return NULL;
194
195 if (next_req->cmd_type == ONDIEMET_LOG_STOP) {
196 __ondiemet_log_req_fini(next_req);
197 return NULL;
198 }
199
200 return (void *) next_req;
201}
202
203struct mutex lock_trace_owner_pid;
204pid_t trace_owner_pid = PID_NONE;
205static void *ondiemet_trace_seq_start(struct seq_file *seqf, loff_t *offset)
206{
207 void *ret;
208
209 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE) {
210 pr_debug("[met] ondiemet_trace_seq_start: locked_pid: %d, pid: %d, offset: %llu\n",
211 trace_owner_pid, current->pid, *offset);
212 }
213
214 if (!mutex_trylock(&lock_tracef))
215 return NULL;
216
217 mutex_lock(&lock_trace_owner_pid);
218 trace_owner_pid = current->pid;
219 current->flags |= PF_NOFREEZE;
220 mutex_unlock(&lock_trace_owner_pid);
221
222 ret = __ondiemet_trace_seq_next(seqf, offset);
223
224 return ret;
225}
226
227static void *ondiemet_trace_seq_next(struct seq_file *seqf, void *p, loff_t *offset)
228{
229 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
230 pr_debug("[met] ondiemet_trace_seq_next: pid: %d\n", current->pid);
231
232 (*offset)++;
233 return __ondiemet_trace_seq_next(seqf, offset);
234}
235
236static int ondiemet_trace_seq_show(struct seq_file *seqf, void *p)
237{
238 struct ondiemet_log_req *req = (struct ondiemet_log_req *) p;
239 size_t l_sz;
240 size_t r_sz;
241 struct ondiemet_log_req *l_req;
242 struct ondiemet_log_req *r_req;
243 int ret;
244
245 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
246 pr_debug("[met] ondiemet_trace_seq_show: pid: %d\n", current->pid);
247
248 if (req->num >= seqf->size) {
249 l_req = kmalloc(sizeof(*req), GFP_KERNEL);
250 r_req = req;
251
252 l_sz = seqf->size >> 1;
253 r_sz = req->num - l_sz;
254 __ondiemet_log_req_init(l_req, ONDIEMET_LOG_REQ, req->src, l_sz, NULL, NULL);
255 __ondiemet_log_req_init(r_req, ONDIEMET_LOG_REQ, req->src + l_sz,
256 r_sz, req->on_fini_cb, req->param);
257
258 __ondiemet_log_req_undeq(r_req);
259 req = l_req;
260
261 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
262 pr_debug("[met] ondiemet_trace_seq_show: split request\n");
263 }
264
265 ret = seq_write(seqf, req->src, req->num);
266
267 if (ret) {
268 /* check if seq_file buffer overflows */
269 if (seqf->count == seqf->size) {
270 __ondiemet_log_req_undeq(req);
271 } else {
272 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
273 pr_debug("[met] ondiemet_trace_seq_show: reading trace record failed, some data may be lost or corrupted\n");
274 __ondiemet_log_req_fini(req);
275 }
276 return 0;
277 }
278
279 __ondiemet_log_req_fini(req);
280 return 0;
281}
282
283static void ondiemet_trace_seq_stop(struct seq_file *seqf, void *p)
284{
285 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
286 pr_debug("[met] ondiemet_trace_seq_stop: pid: %d\n", current->pid);
287
288 mutex_lock(&lock_trace_owner_pid);
289 if (current->pid == trace_owner_pid) {
290 trace_owner_pid = PID_NONE;
291 mutex_unlock(&lock_tracef);
292 }
293 mutex_unlock(&lock_trace_owner_pid);
294}
295
296static const struct seq_operations ondiemet_trace_seq_ops = {
297 .start = ondiemet_trace_seq_start,
298 .next = ondiemet_trace_seq_next,
299 .stop = ondiemet_trace_seq_stop,
300 .show = ondiemet_trace_seq_show
301};
302
303static int ondiemet_trace_open(struct inode *inode, struct file *fp)
304{
305 return seq_open(fp, &ondiemet_trace_seq_ops);
306}
307
308static const struct file_operations ondiemet_trace_fops = {
309 .owner = THIS_MODULE,
310 .open = ondiemet_trace_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = seq_release
314};
315
316/*struct semaphore log_start_sema;*/
317struct completion log_start_comp;
318int ondiemet_log_manager_start(void)
319{
320 int ret;
321
322 /* TODO: choose a better return value */
323 if (__ondiemet_log_req_working())
324 return -EINVAL;
325
326 if (!__ondiemet_log_req_closed()) {
327 /*ret = down_killable(&log_start_sema);*/
328 ret = wait_for_completion_killable(&log_start_comp);
329 if (ret)
330 return ret;
331 }
332
333 __ondiemet_log_req_open();
334
335 return 0;
336}
337
338/*struct semaphore log_stop_sema;*/
339struct completion log_stop_comp;
340static void __log_stop_cb(const void *p)
341{
342 /* up(&log_start_sema); */
343 /* up(&log_stop_sema); */
344 complete(&log_start_comp);
345 complete(&log_stop_comp);
346}
347
348int ondiemet_log_manager_stop(void)
349{
350 int ret;
351 struct ondiemet_log_req *req;
352
353 /* TODO: choose a better return value */
354 if (__ondiemet_log_req_closed())
355 return -EINVAL;
356
357 req = kmalloc(sizeof(*req), GFP_KERNEL);
358
359 __ondiemet_log_req_init(req, ONDIEMET_LOG_STOP, NULL, 0, __log_stop_cb, NULL);
360 /*sema_init(&log_start_sema, 0); */
361 /*sema_init(&log_stop_sema, 0); */
362 init_completion(&log_start_comp);
363 init_completion(&log_stop_comp);
364
365 ret = __ondiemet_log_req_enq(req);
366 if (ret)
367 return ret;
368
369 /* XXX: blocking may be break by SIGKILL */
370 /*return down_killable(&log_stop_sema);*/
371 return wait_for_completion_killable(&log_stop_comp);
372}
373
374int ondiemet_parse_num(const char *str, unsigned int *value, int len)
375{
376 int ret;
377
378 if (len <= 0)
379 return -1;
380
381 if ((len > 2) &&
382 ((str[0] == '0') &&
383 ((str[1] == 'x') || (str[1] == 'X')))) {
384 ret = kstrtouint(str, 16, value);
385 } else {
386 ret = kstrtouint(str, 10, value);
387 }
388
389 if (ret != 0)
390 return -1;
391
392 return 0;
393}
394
395/* XXX: seq_file will output only when a page is filled */
396static ssize_t ondiemet_log_write_store(struct device *dev,
397 struct device_attribute *attr,
398 const char *buf,
399 size_t count)
400{
401 char *plog = NULL;
402
403 plog = kmalloc_array(count, sizeof(*plog), GFP_KERNEL);
404 if (!plog) {
405 /* TODO: use a better error code */
406 return -EINVAL;
407 }
408
409 memcpy(plog, buf, count);
410
411 mutex_lock(&dev->mutex);
412 ondiemet_log_req_enq(plog, strnlen(plog, count), kfree, plog);
413 mutex_unlock(&dev->mutex);
414
415 return count;
416}
417
418static DEVICE_ATTR(ondiemet_log_write, 0664, NULL, ondiemet_log_write_store);
419
420static ssize_t ondiemet_log_run_show(struct device *dev, struct device_attribute *attr, char *buf)
421{
422 int sz;
423
424 mutex_lock(&dev->mutex);
425 sz = snprintf(buf, PAGE_SIZE, "%d\n", ondiemet_trace_run);
426 mutex_unlock(&dev->mutex);
427 return sz;
428}
429
430static ssize_t ondiemet_log_run_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
431{
432 int ret;
433 int prev_run_state;
434
435 mutex_lock(&dev->mutex);
436
437 prev_run_state = ondiemet_trace_run;
438
439 if (kstrtoint(buf, 10, &ondiemet_trace_run) != 0)
440 return -EINVAL;
441
442 if (ondiemet_trace_run <= ONDIEMET_LOG_STOP_MODE) {
443 ondiemet_trace_run = ONDIEMET_LOG_STOP_MODE;
444 ondiemet_log_manager_stop();
445
446 if (prev_run_state == ONDIEMET_LOG_DEBUG_MODE)
447 device_remove_file(dev, &dev_attr_ondiemet_log_write);
448 } else if (ondiemet_trace_run == ONDIEMET_LOG_RUN_MODE) {
449 ondiemet_trace_run = ONDIEMET_LOG_RUN_MODE;
450 ondiemet_log_manager_start();
451
452 if (prev_run_state == ONDIEMET_LOG_DEBUG_MODE)
453 device_remove_file(dev, &dev_attr_ondiemet_log_write);
454 } else {
455 ondiemet_trace_run = ONDIEMET_LOG_DEBUG_MODE;
456 ondiemet_log_manager_start();
457
458 if (prev_run_state != ONDIEMET_LOG_DEBUG_MODE) {
459 ret = device_create_file(dev, &dev_attr_ondiemet_log_write);
460 if (ret != 0)
461 pr_debug("[met] can not create device node: ondiemet_log_write\n");
462 }
463 }
464
465 mutex_unlock(&dev->mutex);
466
467 return count;
468}
469
470static DEVICE_ATTR(ondiemet_log_run, 0660, ondiemet_log_run_show, ondiemet_log_run_store);
471
472int ondiemet_log_manager_init(struct device *dev)
473{
474 int ret;
475#ifdef ONDIEMET_MOUNT_DEBUGFS
476 struct dentry *d;
477#else
478 struct proc_dir_entry *d;
479#endif
480
481 mutex_init(&lock_tracef);
482
483 __ondiemet_log_req_q_init(&ondiemet_log_req_q);
484
485 /*sema_init(&log_start_sema, 0);*/
486 /*sema_init(&log_stop_sema, 0);*/
487 init_completion(&log_start_comp);
488 init_completion(&log_stop_comp);
489
490#ifdef ONDIEMET_MOUNT_DEBUGFS
491 dbgfs_met_dir = debugfs_create_dir("ondiemet", NULL);
492 if (!dbgfs_met_dir) {
493 PR_BOOTMSG("[met] can not create debugfs directory: ondiemet\n");
494 return -ENOMEM;
495 }
496#else
497 procfs_met_dir = proc_mkdir("ondiemet", NULL);
498 if (!procfs_met_dir) {
499 PR_BOOTMSG("[met] can not create proc directory: ondiemet\n");
500 return -ENOMEM;
501 }
502#endif
503
504 mutex_init(&lock_trace_owner_pid);
505
506#ifdef ONDIEMET_MOUNT_DEBUGFS
507 d = debugfs_create_file("trace", 0600, dbgfs_met_dir, NULL, &ondiemet_trace_fops);
508 if (!d) {
509 PR_BOOTMSG("[met] can not create devide node in debugfs: ondiemet_trace\n");
510 return -ENOMEM;
511 }
512#else
513 d = proc_create("trace", 0600, procfs_met_dir, &ondiemet_trace_fops);
514 if (!d) {
515 PR_BOOTMSG("[met] can not create devide node in procfs: ondiemet_trace\n");
516 return -ENOMEM;
517 }
518#endif
519
520 ondiemet_trace_run = __ondiemet_log_req_working();
521 ret = device_create_file(dev, &dev_attr_ondiemet_log_run);
522 if (ret != 0) {
523 pr_debug("[met] can not create device node: ondiemet_log_run\n");
524 return ret;
525 }
526
527 return 0;
528}
529
530int ondiemet_log_manager_uninit(struct device *dev)
531{
532 device_remove_file(dev, &dev_attr_ondiemet_log_run);
533#ifdef ONDIEMET_MOUNT_DEBUGFS
534 debugfs_remove_recursive(dbgfs_met_dir);
535#else
536 proc_remove(procfs_met_dir);
537#endif
538
539 return 0;
540}