[Feature][ZXW-65]merged P49 base code

Change-Id: I3e09c0c3d47483bc645f02310380ecb7fc6f4041
diff --git a/ap/os/linux/linux-3.4.x/mm/mmap.c b/ap/os/linux/linux-3.4.x/mm/mmap.c
old mode 100644
new mode 100755
index cb6456d..88d133b
--- a/ap/os/linux/linux-3.4.x/mm/mmap.c
+++ b/ap/os/linux/linux-3.4.x/mm/mmap.c
@@ -39,6 +39,19 @@
 
 #include "internal.h"
 
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+#include <../ipc/shm_ctrl.h>
+extern void shm_mmap_pagetable(struct vm_area_struct *vma, struct file *file);
+extern void shm_unmap_page_range(struct mm_struct *mm, struct vm_area_struct *vma,
+			                      unsigned long addr, unsigned long end);
+#define kenter(FMT, ...) \
+	no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+	no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+	no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
+#endif
+
 #ifndef arch_mmap_check
 #define arch_mmap_check(addr, len, flags)	(0)
 #endif
@@ -1331,9 +1344,9 @@
 	/*
 	 * Can we just expand an old mapping?
 	 */
-	vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
-	if (vma)
-		goto out;
+    vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
+    if (vma)
+	    goto out;
 
 	/*
 	 * Determine the object being mapped and call the appropriate
@@ -1420,6 +1433,14 @@
 			mm->locked_vm += (len >> PAGE_SHIFT);
 	} else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
 		make_pages_present(addr, addr + len);
+	
+#ifdef CONFIG_SYSVIPC_CROSS_SHM 
+	/*Get real phy pgae*/
+	if (file && (file->f_flags == SHM_REMOTE_ATTR_YES))
+	{		
+		shm_mmap_pagetable(vma, file);		
+	}
+#endif
 	return addr;
 
 unmap_and_free_vma:
@@ -2125,6 +2146,138 @@
 	return __split_vma(mm, vma, addr, new_below);
 }
 
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+/*
+ * delete a VMA from its owning mm_struct and address space
+ */
+static void shm_delete_vma_from_mm(struct vm_area_struct *vma)
+{
+	struct address_space *mapping;
+	struct mm_struct *mm = vma->vm_mm;
+
+	mm->map_count--;
+	if (mm->mmap_cache == vma)
+		mm->mmap_cache = NULL;
+
+	/* remove the VMA from the mapping */
+	if (vma->vm_file) {
+		mapping = vma->vm_file->f_mapping;
+
+		mutex_lock(&mapping->i_mmap_mutex);
+		flush_dcache_mmap_lock(mapping);
+		vma_prio_tree_remove(vma, &mapping->i_mmap);
+		flush_dcache_mmap_unlock(mapping);
+		mutex_unlock(&mapping->i_mmap_mutex);
+	}
+
+	/* remove from the MM's tree and list */
+	rb_erase(&vma->vm_rb, &mm->mm_rb);
+
+	if (vma->vm_prev)
+		vma->vm_prev->vm_next = vma->vm_next;
+	else
+		mm->mmap = vma->vm_next;
+
+	if (vma->vm_next)
+		vma->vm_next->vm_prev = vma->vm_prev;
+}
+
+/*
+ * destroy a VMA record
+ */
+static void shm_delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+	if (vma->vm_ops && vma->vm_ops->close)
+		vma->vm_ops->close(vma);
+	if (vma->vm_file) {
+		fput(vma->vm_file);
+		if (vma->vm_flags & VM_EXECUTABLE)
+			removed_exe_file_vma(mm);
+	}
+	mpol_put(vma_policy(vma));
+	kmem_cache_free(vm_area_cachep, vma);
+}
+
+/*
+ * release a mapping
+ * - the chunk to be unmapped must be backed by a single
+ *   VMA, though it need not cover the whole VMA
+ */
+int shm_ctrl_do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+{
+	int ret = 0;
+	struct vm_area_struct *vma;
+	unsigned long end;
+
+	len = PAGE_ALIGN(len);
+	if (len == 0)
+		return -EINVAL;
+
+	end = start + len;
+
+	/* find the first potentially overlapping VMA */
+	vma = find_vma(mm, start);
+	if (!vma) {
+		static int limit = 0;
+		if (limit < 5) {
+			printk(KERN_WARNING
+			       "munmap of memory not mmapped by process %d"
+			       " (%s): 0x%lx-0x%lx\n",
+			       current->pid, current->comm,
+			       start, start + len - 1);
+			limit++;
+		}
+		return -EINVAL;
+	}
+
+	/* we're allowed to split an anonymous VMA but not a file-backed one */
+	if (vma->vm_file) {
+		do {
+			if (start > vma->vm_start) {
+				kleave(" = -EINVAL [miss]");
+				return -EINVAL;
+			}
+			if (end == vma->vm_end)
+				goto erase_whole_vma;
+			vma = vma->vm_next;
+		} while (vma);
+		kleave(" = -EINVAL [split file]");
+		return -EINVAL;
+	} else {
+		/* the chunk must be a subset of the VMA found */
+		if (start == vma->vm_start && end == vma->vm_end)
+			goto erase_whole_vma;
+		if (start < vma->vm_start || end > vma->vm_end) {
+			kleave(" = -EINVAL [superset]");
+			return -EINVAL;
+		}
+		if (start & ~PAGE_MASK) {
+			kleave(" = -EINVAL [unaligned start]");
+			return -EINVAL;
+		}
+		if (end != vma->vm_end && end & ~PAGE_MASK) {
+			kleave(" = -EINVAL [unaligned split]");
+			return -EINVAL;
+		}
+		if (start != vma->vm_start && end != vma->vm_end) {
+			ret = split_vma(mm, vma, start, 1);
+			if (ret < 0) {
+				kleave(" = %d [split]", ret);
+				return ret;
+			}
+		}
+		return ret;
+	}
+
+erase_whole_vma:
+	shm_unmap_page_range(mm, vma, start, end);
+	shm_delete_vma_from_mm(vma);
+	shm_delete_vma(mm, vma);
+	return 0;
+}
+EXPORT_SYMBOL(shm_ctrl_do_munmap);
+#endif
+
 /* Munmap is split into 2 main parts -- this part which finds
  * what needs doing, and the areas themselves, which do the
  * work.  This now handles partial unmappings.
@@ -2185,6 +2338,13 @@
 			return error;
 	}
 	vma = prev? prev->vm_next: mm->mmap;
+		
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+	if (vma->vm_file && (vma->vm_file->f_flags == SHM_REMOTE_ATTR_YES)) {
+		shm_ctrl_do_munmap(mm, start, len);
+		return 0;
+	}
+#endif
 
 	/*
 	 * unlock any mlock()ed ranges before detaching vmas
@@ -2361,6 +2521,22 @@
 	struct vm_area_struct *vma;
 	unsigned long nr_accounted = 0;
 
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+	struct vm_area_struct *vma_shm;
+
+	vma_shm = mm->mmap;
+	while (vma_shm) {		
+		if ((vma_shm->vm_file) && 
+		   (vma_shm->vm_file->f_flags == SHM_REMOTE_ATTR_YES)) {
+			vma = vma_shm->vm_next;
+			shm_ctrl_do_munmap(mm, vma_shm->vm_start, (vma_shm->vm_end - vma_shm->vm_start));
+			vma_shm = vma;
+			continue;
+		}
+		else
+		   	vma_shm = vma_shm->vm_next;
+	}
+#endif
 	/* mm's last user has gone, and its about to be pulled down */
 	mmu_notifier_release(mm);