lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * JFFS2 -- Journalling Flash File System, Version 2. |
| 3 | * |
| 4 | * Copyright © 2001-2007 Red Hat, Inc. |
| 5 | * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> |
| 6 | * |
| 7 | * Created by David Woodhouse <dwmw2@infradead.org> |
| 8 | * |
| 9 | * For licensing information, see the file 'LICENCE' in this directory. |
| 10 | * |
| 11 | */ |
| 12 | |
| 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 14 | |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/jffs2.h> |
| 17 | #include <linux/mtd/mtd.h> |
| 18 | #include <linux/completion.h> |
| 19 | #include <linux/sched.h> |
| 20 | #include <linux/freezer.h> |
| 21 | #include <linux/kthread.h> |
| 22 | #include <linux/syscalls.h> |
| 23 | #include "nodelist.h" |
| 24 | |
| 25 | /* quick gc wait timeout 60 seconds*/ |
| 26 | #define QUICK_GC_TIMEOUT_MS 60000 |
| 27 | |
| 28 | struct jffs2_quick_gc_manager { |
| 29 | char * name; |
| 30 | struct jffs2_sb_info *c; |
| 31 | struct completion *waiting; |
| 32 | struct semaphore wait_sem; |
| 33 | }; |
| 34 | |
| 35 | static struct jffs2_quick_gc_manager jffs2_quick_gc_mtd[] = { |
| 36 | {"imagefs", NULL, NULL}, |
| 37 | {"resource", NULL, NULL}, |
| 38 | {"nvrofs", NULL, NULL} |
| 39 | }; |
| 40 | |
| 41 | |
| 42 | static struct jffs2_quick_gc_manager * jffs2_quick_gc_find_manager(const char *mtd_name) |
| 43 | { |
| 44 | int i; |
| 45 | struct jffs2_quick_gc_manager *m = NULL; |
| 46 | |
| 47 | for(i=0; i<sizeof(jffs2_quick_gc_mtd)/sizeof(jffs2_quick_gc_mtd[0]); i++) { |
| 48 | if(strcmp(mtd_name, jffs2_quick_gc_mtd[i].name) == 0) |
| 49 | m = &jffs2_quick_gc_mtd[i]; |
| 50 | } |
| 51 | |
| 52 | return m; |
| 53 | } |
| 54 | |
| 55 | static int jffs2_quick_gc_clean_manager(struct jffs2_sb_info *c) |
| 56 | { |
| 57 | int i; |
| 58 | |
| 59 | for(i=0; i<sizeof(jffs2_quick_gc_mtd)/sizeof(jffs2_quick_gc_mtd[0]); i++) { |
| 60 | if(strcmp(c->mtd->name, jffs2_quick_gc_mtd[i].name) == 0) |
| 61 | jffs2_quick_gc_mtd[i].c = NULL; |
| 62 | } |
| 63 | |
| 64 | return 0; |
| 65 | } |
| 66 | |
| 67 | int quick_gc_wait_done(const char *mtd_name) |
| 68 | { |
| 69 | struct jffs2_sb_info *c = NULL; |
| 70 | struct jffs2_quick_gc_manager *m = NULL; |
| 71 | struct completion waiting_done; |
| 72 | |
| 73 | m = jffs2_quick_gc_find_manager(mtd_name); |
| 74 | BUG_ON(!m); |
| 75 | |
| 76 | c = m->c; |
| 77 | if(c == NULL) |
| 78 | return 1; /*partition not exist or not rw mode*/ |
| 79 | if(!c->gc_task) |
| 80 | return 1; /*no gc_task, not rw mode*/ |
| 81 | BUG_ON(m->waiting); /*must no waiting*/ |
| 82 | |
| 83 | down(&(m->wait_sem)); |
| 84 | spin_lock(&c->erase_completion_lock); |
| 85 | if(jffs2_thread_should_wake(c)) { |
| 86 | m->waiting = &waiting_done; |
| 87 | init_completion(m->waiting); |
| 88 | printk(KERN_ALERT "[zgp] %s quick gc waiting start\n", c->mtd->name); |
| 89 | jffs2_quick_garbage_collect_trigger(c); |
| 90 | spin_unlock(&c->erase_completion_lock); |
| 91 | wait_for_completion_timeout(m->waiting, msecs_to_jiffies(QUICK_GC_TIMEOUT_MS)); |
| 92 | m->waiting = NULL; |
| 93 | printk(KERN_ALERT "[zgp] %s quick gc waiting return\n", c->mtd->name); |
| 94 | up(&(m->wait_sem)); |
| 95 | return 1; |
| 96 | } |
| 97 | spin_unlock(&c->erase_completion_lock); |
| 98 | up(&(m->wait_sem)); |
| 99 | |
| 100 | return 1; |
| 101 | } |
| 102 | |
| 103 | SYSCALL_DEFINE1(jffs2_quick_gc_wait_done, int __user, partition_no) |
| 104 | { |
| 105 | BUG_ON(partition_no >= sizeof(jffs2_quick_gc_mtd)/sizeof(jffs2_quick_gc_mtd[0])); |
| 106 | |
| 107 | //printk(KERN_ALERT "[zgp]syscall %s wait done GC\n ", jffs2_quick_gc_mtd[partition_no].name); |
| 108 | |
| 109 | return quick_gc_wait_done(jffs2_quick_gc_mtd[partition_no].name); |
| 110 | } |
| 111 | |
| 112 | void jffs2_quick_gc_done(struct jffs2_sb_info *c) |
| 113 | { |
| 114 | struct jffs2_quick_gc_manager *m = NULL; |
| 115 | |
| 116 | assert_spin_locked(&c->erase_completion_lock); |
| 117 | |
| 118 | m = jffs2_quick_gc_find_manager(c->mtd->name); |
| 119 | BUG_ON(!m); |
| 120 | |
| 121 | if(m->waiting) { |
| 122 | //printk(KERN_ALERT "[zgp] mtd %s quck gc done\n", c->mtd->name); |
| 123 | complete(m->waiting); |
| 124 | } |
| 125 | } |
| 126 | |
| 127 | static int jffs2_garbage_collect_thread(void *); |
| 128 | |
| 129 | void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c) |
| 130 | { |
| 131 | assert_spin_locked(&c->erase_completion_lock); |
| 132 | if (c->gc_task && jffs2_thread_should_wake(c)) { |
| 133 | //c->quick_gcblock_count = 0; |
| 134 | send_sig(SIGHUP, c->gc_task, 1); |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | /* add quick garbage collect trigger zgp*/ |
| 139 | void jffs2_quick_garbage_collect_trigger(struct jffs2_sb_info *c) |
| 140 | { |
| 141 | assert_spin_locked(&c->erase_completion_lock); |
| 142 | if (c->gc_task) { |
| 143 | c->quick_wait = 1; |
| 144 | c->quick_gcblock_count = 0; |
| 145 | send_sig(SIGHUP, c->gc_task, 1); |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | /* This must only ever be called when no GC thread is currently running */ |
| 150 | int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) |
| 151 | { |
| 152 | struct task_struct *tsk; |
| 153 | int ret = 0; |
| 154 | |
| 155 | BUG_ON(c->gc_task); |
| 156 | |
| 157 | init_completion(&c->gc_thread_start); |
| 158 | init_completion(&c->gc_thread_exit); |
| 159 | |
| 160 | for(ret = 0; ret < sizeof(jffs2_quick_gc_mtd)/sizeof(jffs2_quick_gc_mtd[0]); ret++) { |
| 161 | if(strcmp(c->mtd->name, jffs2_quick_gc_mtd[ret].name) == 0) { |
| 162 | c->flags |= JFFS2_SB_FLAG_QUICK_GC; |
| 163 | c->quick_gcblock_count = 0; |
| 164 | c->quick_wait = 0; |
| 165 | jffs2_quick_gc_mtd[ret].c = c; |
| 166 | sema_init(&(jffs2_quick_gc_mtd[ret].wait_sem), 1); |
| 167 | //printk(KERN_ALERT "[zgp]partition %s enable quick GC\n ", c->mtd->name); |
| 168 | } |
| 169 | } |
| 170 | |
| 171 | tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index); |
| 172 | if (IS_ERR(tsk)) { |
| 173 | pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n", |
| 174 | -PTR_ERR(tsk)); |
| 175 | complete(&c->gc_thread_exit); |
| 176 | ret = PTR_ERR(tsk); |
| 177 | } else { |
| 178 | /* Wait for it... */ |
| 179 | jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid); |
| 180 | wait_for_completion(&c->gc_thread_start); |
| 181 | ret = tsk->pid; |
| 182 | } |
| 183 | |
| 184 | return ret; |
| 185 | } |
| 186 | |
| 187 | void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) |
| 188 | { |
| 189 | int wait = 0; |
| 190 | struct jffs2_quick_gc_manager *m = NULL; |
| 191 | |
| 192 | if(c->flags & JFFS2_SB_FLAG_QUICK_GC) { |
| 193 | m = jffs2_quick_gc_find_manager(c->mtd->name); |
| 194 | BUG_ON(!m); |
| 195 | BUG_ON(m->waiting); /*some one wait for GC*/ |
| 196 | } |
| 197 | |
| 198 | spin_lock(&c->erase_completion_lock); |
| 199 | if (c->gc_task) { |
| 200 | jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid); |
| 201 | send_sig(SIGKILL, c->gc_task, 1); |
| 202 | wait = 1; |
| 203 | } |
| 204 | spin_unlock(&c->erase_completion_lock); |
| 205 | if (wait) |
| 206 | wait_for_completion(&c->gc_thread_exit); |
| 207 | |
| 208 | if(c->flags & JFFS2_SB_FLAG_QUICK_GC) |
| 209 | jffs2_quick_gc_clean_manager(c); |
| 210 | |
| 211 | } |
| 212 | |
| 213 | static int jffs2_garbage_collect_thread(void *_c) |
| 214 | { |
| 215 | struct jffs2_sb_info *c = _c; |
| 216 | |
| 217 | allow_signal(SIGKILL); |
| 218 | allow_signal(SIGSTOP); |
| 219 | allow_signal(SIGCONT); |
| 220 | |
| 221 | c->gc_task = current; |
| 222 | complete(&c->gc_thread_start); |
| 223 | |
| 224 | set_user_nice(current, 10); |
| 225 | |
| 226 | set_freezable(); |
| 227 | for (;;) { |
| 228 | allow_signal(SIGHUP); |
| 229 | again: |
| 230 | spin_lock(&c->erase_completion_lock); |
| 231 | if (!jffs2_thread_should_wake(c)) { |
| 232 | set_current_state (TASK_INTERRUPTIBLE); |
| 233 | if (c->flags & JFFS2_SB_FLAG_QUICK_GC) { |
| 234 | printk(KERN_ALERT "[zgp] mtd %s quck gc done and sleep,gcblock_cnt:%d\n", c->mtd->name, c->quick_gcblock_count); |
| 235 | jffs2_quick_gc_done(c); |
| 236 | } |
| 237 | spin_unlock(&c->erase_completion_lock); |
| 238 | jffs2_dbg(1, "%s(): sleeping...\n", __func__); |
| 239 | schedule(); |
| 240 | } else |
| 241 | spin_unlock(&c->erase_completion_lock); |
| 242 | |
| 243 | |
| 244 | /* Problem - immediately after bootup, the GCD spends a lot |
| 245 | * of time in places like jffs2_kill_fragtree(); so much so |
| 246 | * that userspace processes (like gdm and X) are starved |
| 247 | * despite plenty of cond_resched()s and renicing. Yield() |
| 248 | * doesn't help, either (presumably because userspace and GCD |
| 249 | * are generally competing for a higher latency resource - |
| 250 | * disk). |
| 251 | * This forces the GCD to slow the hell down. Pulling an |
| 252 | * inode in with read_inode() is much preferable to having |
| 253 | * the GC thread get there first. */ |
| 254 | if (c->flags & JFFS2_SB_FLAG_QUICK_GC) { |
| 255 | if(c->quick_wait != 2) |
| 256 | schedule_timeout_interruptible(msecs_to_jiffies(10)); |
| 257 | } else |
| 258 | schedule_timeout_interruptible(msecs_to_jiffies(50)); |
| 259 | |
| 260 | if (kthread_should_stop()) { |
| 261 | jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__); |
| 262 | goto die; |
| 263 | } |
| 264 | |
| 265 | /* Put_super will send a SIGKILL and then wait on the sem. |
| 266 | */ |
| 267 | while (signal_pending(current) || freezing(current)) { |
| 268 | siginfo_t info; |
| 269 | unsigned long signr; |
| 270 | |
| 271 | if (try_to_freeze()) |
| 272 | goto again; |
| 273 | |
| 274 | signr = dequeue_signal_lock(current, ¤t->blocked, &info); |
| 275 | |
| 276 | switch(signr) { |
| 277 | case SIGSTOP: |
| 278 | jffs2_dbg(1, "%s(): SIGSTOP received\n", |
| 279 | __func__); |
| 280 | set_current_state(TASK_STOPPED); |
| 281 | schedule(); |
| 282 | break; |
| 283 | |
| 284 | case SIGKILL: |
| 285 | jffs2_dbg(1, "%s(): SIGKILL received\n", |
| 286 | __func__); |
| 287 | goto die; |
| 288 | |
| 289 | case SIGHUP: |
| 290 | jffs2_dbg(1, "%s(): SIGHUP received\n", |
| 291 | __func__); |
| 292 | break; |
| 293 | default: |
| 294 | jffs2_dbg(1, "%s(): signal %ld received\n", |
| 295 | __func__, signr); |
| 296 | } |
| 297 | } |
| 298 | /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */ |
| 299 | disallow_signal(SIGHUP); |
| 300 | |
| 301 | jffs2_dbg(1, "%s(): pass\n", __func__); |
| 302 | if (jffs2_garbage_collect_pass(c) == -ENOSPC) { |
| 303 | pr_notice("No space for garbage collection. Aborting GC thread\n"); |
| 304 | goto die; |
| 305 | } |
| 306 | } |
| 307 | die: |
| 308 | spin_lock(&c->erase_completion_lock); |
| 309 | c->gc_task = NULL; |
| 310 | spin_unlock(&c->erase_completion_lock); |
| 311 | complete_and_exit(&c->gc_thread_exit, 0); |
| 312 | } |