blob: 3de5525df0210b46e6ae44e2e13d698377198dbd [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright © 2001-2007 Red Hat, Inc.
5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 *
9 * For licensing information, see the file 'LICENCE' in this directory.
10 *
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/kernel.h>
16#include <linux/jffs2.h>
17#include <linux/mtd/mtd.h>
18#include <linux/completion.h>
19#include <linux/sched.h>
20#include <linux/freezer.h>
21#include <linux/kthread.h>
22#include <linux/syscalls.h>
23#include "nodelist.h"
24
25/* quick gc wait timeout 60 seconds*/
26#define QUICK_GC_TIMEOUT_MS 60000
27
28struct jffs2_quick_gc_manager {
29 char * name;
30 struct jffs2_sb_info *c;
31 struct completion *waiting;
32 struct semaphore wait_sem;
33};
34
35static struct jffs2_quick_gc_manager jffs2_quick_gc_mtd[] = {
36 {"imagefs", NULL, NULL},
37 {"resource", NULL, NULL},
38 {"nvrofs", NULL, NULL}
39 };
40
41
42static struct jffs2_quick_gc_manager * jffs2_quick_gc_find_manager(const char *mtd_name)
43{
44 int i;
45 struct jffs2_quick_gc_manager *m = NULL;
46
47 for(i=0; i<sizeof(jffs2_quick_gc_mtd)/sizeof(jffs2_quick_gc_mtd[0]); i++) {
48 if(strcmp(mtd_name, jffs2_quick_gc_mtd[i].name) == 0)
49 m = &jffs2_quick_gc_mtd[i];
50 }
51
52 return m;
53}
54
55static int jffs2_quick_gc_clean_manager(struct jffs2_sb_info *c)
56{
57 int i;
58
59 for(i=0; i<sizeof(jffs2_quick_gc_mtd)/sizeof(jffs2_quick_gc_mtd[0]); i++) {
60 if(strcmp(c->mtd->name, jffs2_quick_gc_mtd[i].name) == 0)
61 jffs2_quick_gc_mtd[i].c = NULL;
62 }
63
64 return 0;
65}
66
67int quick_gc_wait_done(const char *mtd_name)
68{
69 struct jffs2_sb_info *c = NULL;
70 struct jffs2_quick_gc_manager *m = NULL;
71 struct completion waiting_done;
72
73 m = jffs2_quick_gc_find_manager(mtd_name);
74 BUG_ON(!m);
75
76 c = m->c;
77 if(c == NULL)
78 return 1; /*partition not exist or not rw mode*/
79 if(!c->gc_task)
80 return 1; /*no gc_task, not rw mode*/
81 BUG_ON(m->waiting); /*must no waiting*/
82
83 down(&(m->wait_sem));
84 spin_lock(&c->erase_completion_lock);
85 if(jffs2_thread_should_wake(c)) {
86 m->waiting = &waiting_done;
87 init_completion(m->waiting);
88 printk(KERN_ALERT "[zgp] %s quick gc waiting start\n", c->mtd->name);
89 jffs2_quick_garbage_collect_trigger(c);
90 spin_unlock(&c->erase_completion_lock);
91 wait_for_completion_timeout(m->waiting, msecs_to_jiffies(QUICK_GC_TIMEOUT_MS));
92 m->waiting = NULL;
93 printk(KERN_ALERT "[zgp] %s quick gc waiting return\n", c->mtd->name);
94 up(&(m->wait_sem));
95 return 1;
96 }
97 spin_unlock(&c->erase_completion_lock);
98 up(&(m->wait_sem));
99
100 return 1;
101}
102
103SYSCALL_DEFINE1(jffs2_quick_gc_wait_done, int __user, partition_no)
104{
105 BUG_ON(partition_no >= sizeof(jffs2_quick_gc_mtd)/sizeof(jffs2_quick_gc_mtd[0]));
106
107 //printk(KERN_ALERT "[zgp]syscall %s wait done GC\n ", jffs2_quick_gc_mtd[partition_no].name);
xf.liaa4d92f2023-09-13 00:18:58 -0700108#ifdef _USE_VEHICLE_DC
109 return 1;
110#else
lh9ed821d2023-04-07 01:36:19 -0700111 return quick_gc_wait_done(jffs2_quick_gc_mtd[partition_no].name);
xf.liaa4d92f2023-09-13 00:18:58 -0700112#endif
lh9ed821d2023-04-07 01:36:19 -0700113}
114
115void jffs2_quick_gc_done(struct jffs2_sb_info *c)
116{
117 struct jffs2_quick_gc_manager *m = NULL;
118
119 assert_spin_locked(&c->erase_completion_lock);
120
121 m = jffs2_quick_gc_find_manager(c->mtd->name);
122 BUG_ON(!m);
123
124 if(m->waiting) {
125 //printk(KERN_ALERT "[zgp] mtd %s quck gc done\n", c->mtd->name);
126 complete(m->waiting);
127 }
128}
129
130static int jffs2_garbage_collect_thread(void *);
131
132void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c)
133{
134 assert_spin_locked(&c->erase_completion_lock);
135 if (c->gc_task && jffs2_thread_should_wake(c)) {
136 //c->quick_gcblock_count = 0;
137 send_sig(SIGHUP, c->gc_task, 1);
138 }
139}
140
141/* add quick garbage collect trigger zgp*/
142void jffs2_quick_garbage_collect_trigger(struct jffs2_sb_info *c)
143{
144 assert_spin_locked(&c->erase_completion_lock);
145 if (c->gc_task) {
146 c->quick_wait = 1;
147 c->quick_gcblock_count = 0;
148 send_sig(SIGHUP, c->gc_task, 1);
149 }
150}
151
152/* This must only ever be called when no GC thread is currently running */
153int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
154{
155 struct task_struct *tsk;
156 int ret = 0;
157
158 BUG_ON(c->gc_task);
159
160 init_completion(&c->gc_thread_start);
161 init_completion(&c->gc_thread_exit);
162
163 for(ret = 0; ret < sizeof(jffs2_quick_gc_mtd)/sizeof(jffs2_quick_gc_mtd[0]); ret++) {
164 if(strcmp(c->mtd->name, jffs2_quick_gc_mtd[ret].name) == 0) {
165 c->flags |= JFFS2_SB_FLAG_QUICK_GC;
166 c->quick_gcblock_count = 0;
167 c->quick_wait = 0;
168 jffs2_quick_gc_mtd[ret].c = c;
169 sema_init(&(jffs2_quick_gc_mtd[ret].wait_sem), 1);
170 //printk(KERN_ALERT "[zgp]partition %s enable quick GC\n ", c->mtd->name);
171 }
172 }
173
174 tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index);
175 if (IS_ERR(tsk)) {
176 pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n",
177 -PTR_ERR(tsk));
178 complete(&c->gc_thread_exit);
179 ret = PTR_ERR(tsk);
180 } else {
181 /* Wait for it... */
182 jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid);
183 wait_for_completion(&c->gc_thread_start);
184 ret = tsk->pid;
185 }
186
187 return ret;
188}
189
190void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c)
191{
192 int wait = 0;
193 struct jffs2_quick_gc_manager *m = NULL;
194
195 if(c->flags & JFFS2_SB_FLAG_QUICK_GC) {
196 m = jffs2_quick_gc_find_manager(c->mtd->name);
197 BUG_ON(!m);
198 BUG_ON(m->waiting); /*some one wait for GC*/
199 }
200
201 spin_lock(&c->erase_completion_lock);
202 if (c->gc_task) {
203 jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid);
204 send_sig(SIGKILL, c->gc_task, 1);
205 wait = 1;
206 }
207 spin_unlock(&c->erase_completion_lock);
208 if (wait)
209 wait_for_completion(&c->gc_thread_exit);
210
211 if(c->flags & JFFS2_SB_FLAG_QUICK_GC)
212 jffs2_quick_gc_clean_manager(c);
213
214}
215
216static int jffs2_garbage_collect_thread(void *_c)
217{
218 struct jffs2_sb_info *c = _c;
219
220 allow_signal(SIGKILL);
221 allow_signal(SIGSTOP);
222 allow_signal(SIGCONT);
223
224 c->gc_task = current;
225 complete(&c->gc_thread_start);
226
227 set_user_nice(current, 10);
228
229 set_freezable();
230 for (;;) {
231 allow_signal(SIGHUP);
232 again:
233 spin_lock(&c->erase_completion_lock);
234 if (!jffs2_thread_should_wake(c)) {
235 set_current_state (TASK_INTERRUPTIBLE);
236 if (c->flags & JFFS2_SB_FLAG_QUICK_GC) {
237 printk(KERN_ALERT "[zgp] mtd %s quck gc done and sleep,gcblock_cnt:%d\n", c->mtd->name, c->quick_gcblock_count);
238 jffs2_quick_gc_done(c);
239 }
240 spin_unlock(&c->erase_completion_lock);
241 jffs2_dbg(1, "%s(): sleeping...\n", __func__);
242 schedule();
243 } else
244 spin_unlock(&c->erase_completion_lock);
245
246
247 /* Problem - immediately after bootup, the GCD spends a lot
248 * of time in places like jffs2_kill_fragtree(); so much so
249 * that userspace processes (like gdm and X) are starved
250 * despite plenty of cond_resched()s and renicing. Yield()
251 * doesn't help, either (presumably because userspace and GCD
252 * are generally competing for a higher latency resource -
253 * disk).
254 * This forces the GCD to slow the hell down. Pulling an
255 * inode in with read_inode() is much preferable to having
256 * the GC thread get there first. */
257 if (c->flags & JFFS2_SB_FLAG_QUICK_GC) {
258 if(c->quick_wait != 2)
259 schedule_timeout_interruptible(msecs_to_jiffies(10));
260 } else
261 schedule_timeout_interruptible(msecs_to_jiffies(50));
262
263 if (kthread_should_stop()) {
264 jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__);
265 goto die;
266 }
267
268 /* Put_super will send a SIGKILL and then wait on the sem.
269 */
270 while (signal_pending(current) || freezing(current)) {
271 siginfo_t info;
272 unsigned long signr;
273
274 if (try_to_freeze())
275 goto again;
276
277 signr = dequeue_signal_lock(current, &current->blocked, &info);
278
279 switch(signr) {
280 case SIGSTOP:
281 jffs2_dbg(1, "%s(): SIGSTOP received\n",
282 __func__);
283 set_current_state(TASK_STOPPED);
284 schedule();
285 break;
286
287 case SIGKILL:
288 jffs2_dbg(1, "%s(): SIGKILL received\n",
289 __func__);
290 goto die;
291
292 case SIGHUP:
293 jffs2_dbg(1, "%s(): SIGHUP received\n",
294 __func__);
295 break;
296 default:
297 jffs2_dbg(1, "%s(): signal %ld received\n",
298 __func__, signr);
299 }
300 }
301 /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
302 disallow_signal(SIGHUP);
303
304 jffs2_dbg(1, "%s(): pass\n", __func__);
305 if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
306 pr_notice("No space for garbage collection. Aborting GC thread\n");
307 goto die;
308 }
309 }
310 die:
311 spin_lock(&c->erase_completion_lock);
312 c->gc_task = NULL;
313 spin_unlock(&c->erase_completion_lock);
314 complete_and_exit(&c->gc_thread_exit, 0);
315}