blob: 4f3ad696d04e2d92ca034a6c64b247fe9b25dfeb [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/slab.h>
17#include <linux/list.h>
18#include <linux/debugfs.h>
19#include <linux/mutex.h>
20#include <linux/semaphore.h>
21#include <linux/freezer.h>
22#include <linux/uaccess.h>
23#include <linux/completion.h>
24
25#include "ondiemet_log.h"
26
27#define ONDIEMET_LOG_REQ 1
28/* TODO: abandon this constatnt */
29#define ONDIEMET_LOG_STOP 2
30
31#define PID_NONE (-1)
32
33#define ONDIEMET_LOG_STOP_MODE 0
34#define ONDIEMET_LOG_RUN_MODE 1
35#define ONDIEMET_LOG_DEBUG_MODE 2
36
37static int ondiemet_trace_run;
38static struct dentry *dbgfs_met_dir;
39
40struct mutex lock_tracef;
41struct ondiemet_log_req_q_t {
42 struct list_head listq;
43 struct mutex lockq;
44 /* struct semaphore new_evt_sema; */
45 struct completion new_evt_comp;
46 int closeq_flag;
47} ondiemet_log_req_q;
48
49struct ondiemet_log_req {
50 struct list_head list;
51 int cmd_type;
52 const char *src;
53 size_t num;
54
55 void (*on_fini_cb)(const void *p);
56 const void *param;
57};
58
59#define __ondiemet_log_req_init(req, cmd, s, n, pf, p) \
60 do { \
61 INIT_LIST_HEAD(&req->list); \
62 req->cmd_type = cmd; \
63 req->src = s; \
64 req->num = n; \
65 req->on_fini_cb = pf; \
66 req->param = p; \
67 } while (0)
68
69#define __ondiemet_log_req_fini(req) \
70 do { \
71 if (req->on_fini_cb) \
72 req->on_fini_cb(req->param); \
73 kfree(req); \
74 } while (0)
75
76static void __ondiemet_log_req_q_init(struct ondiemet_log_req_q_t *q)
77{
78 INIT_LIST_HEAD(&q->listq);
79 mutex_init(&q->lockq);
80 /* sema_init(&q->new_evt_sema, 0); */
81 init_completion(&q->new_evt_comp);
82 q->closeq_flag = 1;
83}
84
85/* undequeue is seen as a roll-back operation, so it can be done even when the queue is closed */
86static void __ondiemet_log_req_undeq(struct ondiemet_log_req *req)
87{
88 mutex_lock(&ondiemet_log_req_q.lockq);
89 list_add(&req->list, &ondiemet_log_req_q.listq);
90 mutex_unlock(&ondiemet_log_req_q.lockq);
91
92 /* up(&ondiemet_log_req_q.new_evt_sema); */
93 complete(&ondiemet_log_req_q.new_evt_comp);
94}
95
96static int __ondiemet_log_req_enq(struct ondiemet_log_req *req)
97{
98 mutex_lock(&ondiemet_log_req_q.lockq);
99 if (ondiemet_log_req_q.closeq_flag) {
100 mutex_unlock(&ondiemet_log_req_q.lockq);
101 return -EBUSY;
102 }
103
104 list_add_tail(&req->list, &ondiemet_log_req_q.listq);
105 if (req->cmd_type == ONDIEMET_LOG_STOP)
106 ondiemet_log_req_q.closeq_flag = 1;
107 mutex_unlock(&ondiemet_log_req_q.lockq);
108
109 /* up(&ondiemet_log_req_q.new_evt_sema); */
110 complete(&ondiemet_log_req_q.new_evt_comp);
111
112 return 0;
113}
114
115int ondiemet_log_req_enq(const char *src, size_t num, void (*on_fini_cb)(const void *p), const void *param)
116{
117 struct ondiemet_log_req *req = kmalloc(sizeof(*req), GFP_KERNEL);
118
119 __ondiemet_log_req_init(req, ONDIEMET_LOG_REQ, src, num, on_fini_cb, param);
120 return __ondiemet_log_req_enq(req);
121}
122
123/*int down_freezable_interruptible(struct semaphore *sem) */
124int down_freezable_interruptible(struct completion *comp)
125{
126
127 int ret;
128
129 freezer_do_not_count();
130 /* ret = down_interruptible(sem); */
131 ret = wait_for_completion_interruptible(comp);
132 freezer_count();
133
134 return ret;
135}
136
137struct ondiemet_log_req *__ondiemet_log_req_deq(void)
138{
139 struct ondiemet_log_req *ret_req;
140
141 /*if (down_freezable_interruptible(&ondiemet_log_req_q.new_evt_sema))*/
142 if (down_freezable_interruptible(&ondiemet_log_req_q.new_evt_comp))
143 return NULL;
144
145 mutex_lock(&ondiemet_log_req_q.lockq);
146 ret_req = list_entry(ondiemet_log_req_q.listq.next, struct ondiemet_log_req, list);
147 list_del_init(&ret_req->list);
148 mutex_unlock(&ondiemet_log_req_q.lockq);
149
150 return ret_req;
151}
152
153void __ondiemet_log_req_open(void)
154{
155 mutex_lock(&ondiemet_log_req_q.lockq);
156 ondiemet_log_req_q.closeq_flag = 0;
157 mutex_unlock(&ondiemet_log_req_q.lockq);
158}
159
160int __ondiemet_log_req_closed(void)
161{
162 int ret;
163
164 mutex_lock(&ondiemet_log_req_q.lockq);
165 ret = ondiemet_log_req_q.closeq_flag && list_empty(&ondiemet_log_req_q.listq);
166 mutex_unlock(&ondiemet_log_req_q.lockq);
167
168 return ret;
169}
170
171int __ondiemet_log_req_working(void)
172{
173 int ret;
174
175 mutex_lock(&ondiemet_log_req_q.lockq);
176 ret = !ondiemet_log_req_q.closeq_flag;
177 mutex_unlock(&ondiemet_log_req_q.lockq);
178
179 return ret;
180}
181
182static void *__ondiemet_trace_seq_next(struct seq_file *seqf, loff_t *offset)
183{
184 struct ondiemet_log_req *next_req;
185
186 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
187 pr_debug("[met] __ondiemet_trace_seq_next: pid: %d\n", current->pid);
188
189 if (__ondiemet_log_req_closed())
190 return NULL;
191
192 next_req = __ondiemet_log_req_deq();
193
194 if (next_req == NULL)
195 return NULL;
196
197 if (next_req->cmd_type == ONDIEMET_LOG_STOP) {
198 __ondiemet_log_req_fini(next_req);
199 return NULL;
200 }
201
202 return (void *) next_req;
203}
204
205struct mutex lock_trace_owner_pid;
206pid_t trace_owner_pid = PID_NONE;
207static void *ondiemet_trace_seq_start(struct seq_file *seqf, loff_t *offset)
208{
209 void *ret;
210
211 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE) {
212 pr_debug("[met] ondiemet_trace_seq_start: locked_pid: %d, pid: %d, offset: %llu\n",
213 trace_owner_pid, current->pid, *offset);
214 }
215
216 if (!mutex_trylock(&lock_tracef))
217 return NULL;
218
219 mutex_lock(&lock_trace_owner_pid);
220 trace_owner_pid = current->pid;
221 mutex_unlock(&lock_trace_owner_pid);
222
223 ret = __ondiemet_trace_seq_next(seqf, offset);
224
225 return ret;
226}
227
228static void *ondiemet_trace_seq_next(struct seq_file *seqf, void *p, loff_t *offset)
229{
230 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
231 pr_debug("[met] ondiemet_trace_seq_next: pid: %d\n", current->pid);
232
233 (*offset)++;
234 return __ondiemet_trace_seq_next(seqf, offset);
235}
236
237static int ondiemet_trace_seq_show(struct seq_file *seqf, void *p)
238{
239 struct ondiemet_log_req *req = (struct ondiemet_log_req *) p;
240 size_t l_sz;
241 size_t r_sz;
242 struct ondiemet_log_req *l_req;
243 struct ondiemet_log_req *r_req;
244 int ret;
245
246 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
247 pr_debug("[met] ondiemet_trace_seq_show: pid: %d\n", current->pid);
248
249 if (req->num >= seqf->size) {
250 l_req = kmalloc(sizeof(*req), GFP_KERNEL);
251 r_req = req;
252
253 l_sz = seqf->size >> 1;
254 r_sz = req->num - l_sz;
255 __ondiemet_log_req_init(l_req, ONDIEMET_LOG_REQ, req->src, l_sz, NULL, NULL);
256 __ondiemet_log_req_init(r_req, ONDIEMET_LOG_REQ, req->src + l_sz,
257 r_sz, req->on_fini_cb, req->param);
258
259 __ondiemet_log_req_undeq(r_req);
260 req = l_req;
261
262 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
263 pr_debug("[met] ondiemet_trace_seq_show: split request\n");
264 }
265
266 ret = seq_write(seqf, req->src, req->num);
267
268 if (ret) {
269 /* check if seq_file buffer overflows */
270 if (seqf->count == seqf->size) {
271 __ondiemet_log_req_undeq(req);
272 } else {
273 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
274 pr_debug("[met] ondiemet_trace_seq_show: reading trace record failed, some data may be lost or corrupted\n");
275 __ondiemet_log_req_fini(req);
276 }
277 return 0;
278 }
279
280 __ondiemet_log_req_fini(req);
281 return 0;
282}
283
284static void ondiemet_trace_seq_stop(struct seq_file *seqf, void *p)
285{
286 if (ondiemet_trace_run == ONDIEMET_LOG_DEBUG_MODE)
287 pr_debug("[met] ondiemet_trace_seq_stop: pid: %d\n", current->pid);
288
289 mutex_lock(&lock_trace_owner_pid);
290 if (current->pid == trace_owner_pid) {
291 trace_owner_pid = PID_NONE;
292 mutex_unlock(&lock_tracef);
293 }
294 mutex_unlock(&lock_trace_owner_pid);
295}
296
297static const struct seq_operations ondiemet_trace_seq_ops = {
298 .start = ondiemet_trace_seq_start,
299 .next = ondiemet_trace_seq_next,
300 .stop = ondiemet_trace_seq_stop,
301 .show = ondiemet_trace_seq_show
302};
303
304static int ondiemet_trace_open(struct inode *inode, struct file *fp)
305{
306 return seq_open(fp, &ondiemet_trace_seq_ops);
307}
308
309static const struct file_operations ondiemet_trace_fops = {
310 .owner = THIS_MODULE,
311 .open = ondiemet_trace_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = seq_release
315};
316
317/*struct semaphore log_start_sema;*/
318struct completion log_start_comp;
319int ondiemet_log_manager_start(void)
320{
321 int ret;
322
323 /* TODO: choose a better return value */
324 if (__ondiemet_log_req_working())
325 return -EINVAL;
326
327 if (!__ondiemet_log_req_closed()) {
328 /*ret = down_killable(&log_start_sema);*/
329 ret = wait_for_completion_killable(&log_start_comp);
330 if (ret)
331 return ret;
332 }
333
334 __ondiemet_log_req_open();
335
336 return 0;
337}
338
339/*struct semaphore log_stop_sema;*/
340struct completion log_stop_comp;
341static void __log_stop_cb(const void *p)
342{
343 /* up(&log_start_sema); */
344 /* up(&log_stop_sema); */
345 complete(&log_start_comp);
346 complete(&log_stop_comp);
347}
348
349int ondiemet_log_manager_stop(void)
350{
351 int ret;
352 struct ondiemet_log_req *req;
353
354 /* TODO: choose a better return value */
355 if (__ondiemet_log_req_closed())
356 return -EINVAL;
357
358 req = kmalloc(sizeof(*req), GFP_KERNEL);
359
360 __ondiemet_log_req_init(req, ONDIEMET_LOG_STOP, NULL, 0, __log_stop_cb, NULL);
361 /*sema_init(&log_start_sema, 0); */
362 /*sema_init(&log_stop_sema, 0); */
363 init_completion(&log_start_comp);
364 init_completion(&log_stop_comp);
365
366 ret = __ondiemet_log_req_enq(req);
367 if (ret)
368 return ret;
369
370 /* XXX: blocking may be break by SIGKILL */
371 /*return down_killable(&log_stop_sema);*/
372 return wait_for_completion_killable(&log_stop_comp);
373}
374
375int ondiemet_parse_num(const char *str, unsigned int *value, int len)
376{
377 int ret;
378
379 if (len <= 0)
380 return -1;
381
382 if ((len > 2) &&
383 ((str[0] == '0') &&
384 ((str[1] == 'x') || (str[1] == 'X')))) {
385 ret = kstrtouint(str, 16, value);
386 } else {
387 ret = kstrtouint(str, 10, value);
388 }
389
390 if (ret != 0)
391 return -1;
392
393 return 0;
394}
395
396/* XXX: seq_file will output only when a page is filled */
397static ssize_t ondiemet_log_write_store(struct device *dev,
398 struct device_attribute *attr,
399 const char *buf,
400 size_t count)
401{
402 char *plog = NULL;
403
404 plog = kmalloc_array(count, sizeof(*plog), GFP_KERNEL);
405 if (!plog) {
406 /* TODO: use a better error code */
407 return -EINVAL;
408 }
409
410 memcpy(plog, buf, count);
411
412 mutex_lock(&dev->mutex);
413 ondiemet_log_req_enq(plog, strnlen(plog, count), kfree, plog);
414 mutex_unlock(&dev->mutex);
415
416 return count;
417}
418
419static DEVICE_ATTR(ondiemet_log_write, 0664, NULL, ondiemet_log_write_store);
420
421static ssize_t ondiemet_log_run_show(struct device *dev, struct device_attribute *attr, char *buf)
422{
423 int sz;
424
425 mutex_lock(&dev->mutex);
426 sz = snprintf(buf, PAGE_SIZE, "%d\n", ondiemet_trace_run);
427 mutex_unlock(&dev->mutex);
428 return sz;
429}
430
431static ssize_t ondiemet_log_run_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
432{
433 int ret;
434 int prev_run_state;
435
436 mutex_lock(&dev->mutex);
437
438 prev_run_state = ondiemet_trace_run;
439
440 if (kstrtoint(buf, 10, &ondiemet_trace_run) != 0)
441 return -EINVAL;
442
443 if (ondiemet_trace_run <= ONDIEMET_LOG_STOP_MODE) {
444 ondiemet_trace_run = ONDIEMET_LOG_STOP_MODE;
445 ondiemet_log_manager_stop();
446
447 if (prev_run_state == ONDIEMET_LOG_DEBUG_MODE)
448 device_remove_file(dev, &dev_attr_ondiemet_log_write);
449 } else if (ondiemet_trace_run == ONDIEMET_LOG_RUN_MODE) {
450 ondiemet_trace_run = ONDIEMET_LOG_RUN_MODE;
451 ondiemet_log_manager_start();
452
453 if (prev_run_state == ONDIEMET_LOG_DEBUG_MODE)
454 device_remove_file(dev, &dev_attr_ondiemet_log_write);
455 } else {
456 ondiemet_trace_run = ONDIEMET_LOG_DEBUG_MODE;
457 ondiemet_log_manager_start();
458
459 if (prev_run_state != ONDIEMET_LOG_DEBUG_MODE) {
460 ret = device_create_file(dev, &dev_attr_ondiemet_log_write);
461 if (ret != 0)
462 pr_debug("[met] can not create device node: ondiemet_log_write\n");
463 }
464 }
465
466 mutex_unlock(&dev->mutex);
467
468 return count;
469}
470
471static DEVICE_ATTR(ondiemet_log_run, 0660, ondiemet_log_run_show, ondiemet_log_run_store);
472
473int ondiemet_log_manager_init(struct device *dev)
474{
475 int ret;
476 struct dentry *d;
477
478 mutex_init(&lock_tracef);
479
480 __ondiemet_log_req_q_init(&ondiemet_log_req_q);
481
482 /*sema_init(&log_start_sema, 0);*/
483 /*sema_init(&log_stop_sema, 0);*/
484 init_completion(&log_start_comp);
485 init_completion(&log_stop_comp);
486
487 dbgfs_met_dir = debugfs_create_dir("ondiemet", NULL);
488 if (!dbgfs_met_dir) {
489 pr_debug("[met] can not create debugfs directory: met\n");
490 return -ENOMEM;
491 }
492
493 mutex_init(&lock_trace_owner_pid);
494
495 d = debugfs_create_file("trace", 0644, dbgfs_met_dir, NULL, &ondiemet_trace_fops);
496 if (!d) {
497 pr_debug("[met] can not create devide node in debugfs: ondiemet_trace\n");
498 return -ENOMEM;
499 }
500
501 ondiemet_trace_run = __ondiemet_log_req_working();
502 ret = device_create_file(dev, &dev_attr_ondiemet_log_run);
503 if (ret != 0) {
504 pr_debug("[met] can not create device node: ondiemet_log_run\n");
505 return ret;
506 }
507
508 return 0;
509}
510
511int ondiemet_log_manager_uninit(struct device *dev)
512{
513 device_remove_file(dev, &dev_attr_ondiemet_log_run);
514 debugfs_remove_recursive(dbgfs_met_dir);
515 return 0;
516}