| /* | 
 |  * JFFS2 -- Journalling Flash File System, Version 2. | 
 |  * | 
 |  * Copyright © 2001-2007 Red Hat, Inc. | 
 |  * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> | 
 |  * | 
 |  * Created by David Woodhouse <dwmw2@infradead.org> | 
 |  * | 
 |  * For licensing information, see the file 'LICENCE' in this directory. | 
 |  * | 
 |  */ | 
 |  | 
 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 
 |  | 
 | #include <linux/kernel.h> | 
 | #include <linux/jffs2.h> | 
 | #include <linux/mtd/mtd.h> | 
 | #include <linux/completion.h> | 
 | #include <linux/sched.h> | 
 | #include <linux/freezer.h> | 
 | #include <linux/kthread.h> | 
 | #include <linux/syscalls.h> | 
 | #include "nodelist.h" | 
 |  | 
 | /* quick gc wait timeout 60 seconds*/ | 
 | #define QUICK_GC_TIMEOUT_MS  60000 | 
 |  | 
 | struct jffs2_quick_gc_manager { | 
 | 	char * name; | 
 | 	struct jffs2_sb_info *c; | 
 | 	struct completion  *waiting; | 
 | 	 struct semaphore wait_sem; | 
 | }; | 
 |  | 
 | static struct jffs2_quick_gc_manager jffs2_quick_gc_mtd[] = {  | 
 |                                                   {"imagefs", NULL, NULL},  | 
 |                                                   {"resource", NULL, NULL}, | 
 |                                                   {"nvrofs", NULL, NULL} | 
 |                                                 }; | 
 |  | 
 |  | 
 | static struct jffs2_quick_gc_manager * jffs2_quick_gc_find_manager(const char *mtd_name) | 
 | { | 
 | 	int i; | 
 | 	struct jffs2_quick_gc_manager *m = NULL; | 
 |  | 
 | 	for(i=0; i<sizeof(jffs2_quick_gc_mtd)/sizeof(jffs2_quick_gc_mtd[0]); i++) { | 
 | 		if(strcmp(mtd_name, jffs2_quick_gc_mtd[i].name) == 0) | 
 | 			m = &jffs2_quick_gc_mtd[i]; | 
 | 	} | 
 |  | 
 | 	return m; | 
 | } | 
 |  | 
 | static int jffs2_quick_gc_clean_manager(struct jffs2_sb_info *c) | 
 | { | 
 | 	int i; | 
 |  | 
 | 	for(i=0; i<sizeof(jffs2_quick_gc_mtd)/sizeof(jffs2_quick_gc_mtd[0]); i++) { | 
 | 		if(strcmp(c->mtd->name, jffs2_quick_gc_mtd[i].name) == 0) | 
 | 			jffs2_quick_gc_mtd[i].c = NULL; | 
 | 	} | 
 |  | 
 | 	return 0; | 
 | } | 
 |  | 
 | int quick_gc_wait_done(const char *mtd_name) | 
 | { | 
 | 	struct jffs2_sb_info *c = NULL; | 
 | 	struct jffs2_quick_gc_manager *m = NULL; | 
 | 	struct completion waiting_done; | 
 |  | 
 | 	m = jffs2_quick_gc_find_manager(mtd_name); | 
 | 	BUG_ON(!m); | 
 |  | 
 | 	c = m->c; | 
 | 	if(c == NULL) | 
 | 		return 1; /*partition not exist or not rw mode*/ | 
 | 	if(!c->gc_task) | 
 | 		return 1; /*no gc_task, not  rw mode*/ | 
 | 	BUG_ON(m->waiting); /*must no waiting*/ | 
 |  | 
 | 	down(&(m->wait_sem)); | 
 | 	spin_lock(&c->erase_completion_lock); | 
 | 	if(jffs2_thread_should_wake(c)) { | 
 | 		m->waiting = &waiting_done; | 
 | 		init_completion(m->waiting); | 
 | 		printk(KERN_ALERT "[zgp] %s quick gc waiting start\n", c->mtd->name); | 
 | 		jffs2_quick_garbage_collect_trigger(c); | 
 | 		spin_unlock(&c->erase_completion_lock); | 
 | 		wait_for_completion_timeout(m->waiting, msecs_to_jiffies(QUICK_GC_TIMEOUT_MS)); | 
 | 		m->waiting = NULL; | 
 | 		printk(KERN_ALERT "[zgp] %s quick gc waiting return\n", c->mtd->name); | 
 | 		up(&(m->wait_sem)); | 
 | 		return 1; | 
 | 	} | 
 | 	spin_unlock(&c->erase_completion_lock); | 
 | 	up(&(m->wait_sem)); | 
 |  | 
 | 	return 1; | 
 | } | 
 |  | 
 | SYSCALL_DEFINE1(jffs2_quick_gc_wait_done, int __user, partition_no) | 
 | { | 
 | 	BUG_ON(partition_no >= sizeof(jffs2_quick_gc_mtd)/sizeof(jffs2_quick_gc_mtd[0])); | 
 |  | 
 | 	//printk(KERN_ALERT "[zgp]syscall %s wait done GC\n ", jffs2_quick_gc_mtd[partition_no].name); | 
 |  | 
 | 	return quick_gc_wait_done(jffs2_quick_gc_mtd[partition_no].name); | 
 | } | 
 |  | 
 | void jffs2_quick_gc_done(struct jffs2_sb_info *c) | 
 | { | 
 | 	struct jffs2_quick_gc_manager *m = NULL; | 
 |  | 
 | 	assert_spin_locked(&c->erase_completion_lock); | 
 |  | 
 | 	m = jffs2_quick_gc_find_manager(c->mtd->name); | 
 | 	BUG_ON(!m); | 
 |  | 
 | 	if(m->waiting) { | 
 | 		//printk(KERN_ALERT "[zgp] mtd %s quck gc done\n", c->mtd->name); | 
 | 		complete(m->waiting); | 
 | 	} | 
 | } | 
 |  | 
 | static int jffs2_garbage_collect_thread(void *); | 
 |  | 
 | void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c) | 
 | { | 
 | 	assert_spin_locked(&c->erase_completion_lock); | 
 | 	if (c->gc_task && jffs2_thread_should_wake(c)) { | 
 | 		//c->quick_gcblock_count = 0; | 
 | 		send_sig(SIGHUP, c->gc_task, 1); | 
 | 	} | 
 | } | 
 |  | 
 | /* add quick garbage collect trigger zgp*/ | 
 | void jffs2_quick_garbage_collect_trigger(struct jffs2_sb_info *c) | 
 | { | 
 | 	assert_spin_locked(&c->erase_completion_lock); | 
 | 	if (c->gc_task) { | 
 | 		c->quick_wait = 1; | 
 | 		c->quick_gcblock_count = 0; | 
 | 		send_sig(SIGHUP, c->gc_task, 1); | 
 | 	} | 
 | } | 
 |  | 
 | /* This must only ever be called when no GC thread is currently running */ | 
 | int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) | 
 | { | 
 | 	struct task_struct *tsk; | 
 | 	int ret = 0; | 
 |  | 
 | 	BUG_ON(c->gc_task); | 
 |  | 
 | 	init_completion(&c->gc_thread_start); | 
 | 	init_completion(&c->gc_thread_exit); | 
 |  | 
 | 	for(ret = 0; ret < sizeof(jffs2_quick_gc_mtd)/sizeof(jffs2_quick_gc_mtd[0]); ret++) { | 
 | 		if(strcmp(c->mtd->name, jffs2_quick_gc_mtd[ret].name) == 0) { | 
 | 			c->flags |= JFFS2_SB_FLAG_QUICK_GC; | 
 | 			c->quick_gcblock_count = 0; | 
 | 			c->quick_wait = 0; | 
 | 			jffs2_quick_gc_mtd[ret].c = c; | 
 | 			sema_init(&(jffs2_quick_gc_mtd[ret].wait_sem), 1); | 
 | 			//printk(KERN_ALERT "[zgp]partition %s enable quick GC\n ", c->mtd->name); | 
 | 		} | 
 | 	} | 
 |  | 
 | 	tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index); | 
 | 	if (IS_ERR(tsk)) { | 
 | 		pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n", | 
 | 			-PTR_ERR(tsk)); | 
 | 		complete(&c->gc_thread_exit); | 
 | 		ret = PTR_ERR(tsk); | 
 | 	} else { | 
 | 		/* Wait for it... */ | 
 | 		jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid); | 
 | 		wait_for_completion(&c->gc_thread_start); | 
 | 		ret = tsk->pid; | 
 | 	} | 
 |  | 
 | 	return ret; | 
 | } | 
 |  | 
 | void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) | 
 | { | 
 | 	int wait = 0; | 
 | 	struct jffs2_quick_gc_manager *m = NULL; | 
 |  | 
 | 	if(c->flags & JFFS2_SB_FLAG_QUICK_GC) { | 
 | 		m = jffs2_quick_gc_find_manager(c->mtd->name); | 
 | 		BUG_ON(!m); | 
 | 		BUG_ON(m->waiting); /*some one wait for GC*/ | 
 | 	} | 
 |  | 
 | 	spin_lock(&c->erase_completion_lock); | 
 | 	if (c->gc_task) { | 
 | 		jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid); | 
 | 		send_sig(SIGKILL, c->gc_task, 1); | 
 | 		wait = 1; | 
 | 	} | 
 | 	spin_unlock(&c->erase_completion_lock); | 
 | 	if (wait) | 
 | 		wait_for_completion(&c->gc_thread_exit); | 
 |  | 
 | 	if(c->flags & JFFS2_SB_FLAG_QUICK_GC) | 
 | 		jffs2_quick_gc_clean_manager(c); | 
 |  | 
 | } | 
 |  | 
 | static int jffs2_garbage_collect_thread(void *_c) | 
 | { | 
 | 	struct jffs2_sb_info *c = _c; | 
 |  | 
 | 	allow_signal(SIGKILL); | 
 | 	allow_signal(SIGSTOP); | 
 | 	allow_signal(SIGCONT); | 
 |  | 
 | 	c->gc_task = current; | 
 | 	complete(&c->gc_thread_start); | 
 |  | 
 | 	set_user_nice(current, 10); | 
 |  | 
 | 	set_freezable(); | 
 | 	for (;;) { | 
 | 		allow_signal(SIGHUP); | 
 | 	again: | 
 | 		spin_lock(&c->erase_completion_lock); | 
 | 		if (!jffs2_thread_should_wake(c)) { | 
 | 			set_current_state (TASK_INTERRUPTIBLE); | 
 | 			if (c->flags & JFFS2_SB_FLAG_QUICK_GC) { | 
 | 				printk(KERN_ALERT "[zgp] mtd %s quck gc done and sleep,gcblock_cnt:%d\n", c->mtd->name, c->quick_gcblock_count); | 
 | 				jffs2_quick_gc_done(c); | 
 | 			} | 
 | 			spin_unlock(&c->erase_completion_lock); | 
 | 			jffs2_dbg(1, "%s(): sleeping...\n", __func__); | 
 | 			schedule(); | 
 | 		} else | 
 | 			spin_unlock(&c->erase_completion_lock); | 
 | 			 | 
 |  | 
 | 		/* Problem - immediately after bootup, the GCD spends a lot | 
 | 		 * of time in places like jffs2_kill_fragtree(); so much so | 
 | 		 * that userspace processes (like gdm and X) are starved | 
 | 		 * despite plenty of cond_resched()s and renicing.  Yield() | 
 | 		 * doesn't help, either (presumably because userspace and GCD | 
 | 		 * are generally competing for a higher latency resource - | 
 | 		 * disk). | 
 | 		 * This forces the GCD to slow the hell down.   Pulling an | 
 | 		 * inode in with read_inode() is much preferable to having | 
 | 		 * the GC thread get there first. */ | 
 | 		if (c->flags & JFFS2_SB_FLAG_QUICK_GC) { | 
 | 			if(c->quick_wait != 2) | 
 | 				schedule_timeout_interruptible(msecs_to_jiffies(10)); | 
 | 		} else | 
 | 			schedule_timeout_interruptible(msecs_to_jiffies(50)); | 
 |  | 
 | 		if (kthread_should_stop()) { | 
 | 			jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__); | 
 | 			goto die; | 
 | 		} | 
 |  | 
 | 		/* Put_super will send a SIGKILL and then wait on the sem. | 
 | 		 */ | 
 | 		while (signal_pending(current) || freezing(current)) { | 
 | 			siginfo_t info; | 
 | 			unsigned long signr; | 
 |  | 
 | 			if (try_to_freeze()) | 
 | 				goto again; | 
 |  | 
 | 			signr = dequeue_signal_lock(current, ¤t->blocked, &info); | 
 |  | 
 | 			switch(signr) { | 
 | 			case SIGSTOP: | 
 | 				jffs2_dbg(1, "%s(): SIGSTOP received\n", | 
 | 					  __func__); | 
 | 				set_current_state(TASK_STOPPED); | 
 | 				schedule(); | 
 | 				break; | 
 |  | 
 | 			case SIGKILL: | 
 | 				jffs2_dbg(1, "%s(): SIGKILL received\n", | 
 | 					  __func__); | 
 | 				goto die; | 
 |  | 
 | 			case SIGHUP: | 
 | 				jffs2_dbg(1, "%s(): SIGHUP received\n", | 
 | 					  __func__); | 
 | 				break; | 
 | 			default: | 
 | 				jffs2_dbg(1, "%s(): signal %ld received\n", | 
 | 					  __func__, signr); | 
 | 			} | 
 | 		} | 
 | 		/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */ | 
 | 		disallow_signal(SIGHUP); | 
 |  | 
 | 		jffs2_dbg(1, "%s(): pass\n", __func__); | 
 | 		if (jffs2_garbage_collect_pass(c) == -ENOSPC) { | 
 | 			pr_notice("No space for garbage collection. Aborting GC thread\n"); | 
 | 			goto die; | 
 | 		} | 
 | 	} | 
 |  die: | 
 | 	spin_lock(&c->erase_completion_lock); | 
 | 	c->gc_task = NULL; | 
 | 	spin_unlock(&c->erase_completion_lock); | 
 | 	complete_and_exit(&c->gc_thread_exit, 0); | 
 | } |