| lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * JFFS2 -- Journalling Flash File System, Version 2. | 
|  | 3 | * | 
|  | 4 | * Copyright © 2001-2007 Red Hat, Inc. | 
|  | 5 | * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> | 
|  | 6 | * | 
|  | 7 | * Created by David Woodhouse <dwmw2@infradead.org> | 
|  | 8 | * | 
|  | 9 | * For licensing information, see the file 'LICENCE' in this directory. | 
|  | 10 | * | 
|  | 11 | */ | 
|  | 12 |  | 
|  | 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 
|  | 14 |  | 
|  | 15 | #include <linux/kernel.h> | 
|  | 16 | #include <linux/fs.h> | 
|  | 17 | #include <linux/time.h> | 
|  | 18 | #include <linux/pagemap.h> | 
|  | 19 | #include <linux/highmem.h> | 
|  | 20 | #include <linux/crc32.h> | 
|  | 21 | #include <linux/jffs2.h> | 
|  | 22 | #include "nodelist.h" | 
|  | 23 |  | 
|  | 24 | static int jffs2_write_end(struct file *filp, struct address_space *mapping, | 
|  | 25 | loff_t pos, unsigned len, unsigned copied, | 
|  | 26 | struct page *pg, void *fsdata); | 
|  | 27 | static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | 
|  | 28 | loff_t pos, unsigned len, unsigned flags, | 
|  | 29 | struct page **pagep, void **fsdata); | 
|  | 30 | static int jffs2_readpage (struct file *filp, struct page *pg); | 
|  | 31 |  | 
|  | 32 | int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync) | 
|  | 33 | { | 
|  | 34 | struct inode *inode = filp->f_mapping->host; | 
|  | 35 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | 
|  | 36 | int ret; | 
|  | 37 |  | 
|  | 38 | ret = filemap_write_and_wait_range(inode->i_mapping, start, end); | 
|  | 39 | if (ret) | 
|  | 40 | return ret; | 
|  | 41 |  | 
|  | 42 | mutex_lock(&inode->i_mutex); | 
|  | 43 | /* Trigger GC to flush any pending writes for this inode */ | 
|  | 44 | jffs2_flush_wbuf_gc(c, inode->i_ino); | 
|  | 45 | mutex_unlock(&inode->i_mutex); | 
|  | 46 |  | 
|  | 47 | return 0; | 
|  | 48 | } | 
|  | 49 |  | 
|  | 50 | const struct file_operations jffs2_file_operations = | 
|  | 51 | { | 
|  | 52 | .llseek =	generic_file_llseek, | 
|  | 53 | .open =		generic_file_open, | 
|  | 54 | .read =		do_sync_read, | 
|  | 55 | .aio_read =	generic_file_aio_read, | 
|  | 56 | .write =	do_sync_write, | 
|  | 57 | .aio_write =	generic_file_aio_write, | 
|  | 58 | .unlocked_ioctl=jffs2_ioctl, | 
|  | 59 | .mmap =		generic_file_readonly_mmap, | 
|  | 60 | .fsync =	jffs2_fsync, | 
|  | 61 | .splice_read =	generic_file_splice_read, | 
|  | 62 | }; | 
|  | 63 |  | 
|  | 64 | /* jffs2_file_inode_operations */ | 
|  | 65 |  | 
|  | 66 | const struct inode_operations jffs2_file_inode_operations = | 
|  | 67 | { | 
|  | 68 | .get_acl =	jffs2_get_acl, | 
|  | 69 | .setattr =	jffs2_setattr, | 
|  | 70 | .setxattr =	jffs2_setxattr, | 
|  | 71 | .getxattr =	jffs2_getxattr, | 
|  | 72 | .listxattr =	jffs2_listxattr, | 
|  | 73 | .removexattr =	jffs2_removexattr | 
|  | 74 | }; | 
|  | 75 |  | 
|  | 76 | const struct address_space_operations jffs2_file_address_operations = | 
|  | 77 | { | 
|  | 78 | .readpage =	jffs2_readpage, | 
|  | 79 | .write_begin =	jffs2_write_begin, | 
|  | 80 | .write_end =	jffs2_write_end, | 
|  | 81 | }; | 
|  | 82 |  | 
|  | 83 | static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) | 
|  | 84 | { | 
|  | 85 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | 
|  | 86 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | 
|  | 87 | unsigned char *pg_buf; | 
|  | 88 | int ret; | 
|  | 89 |  | 
|  | 90 | jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", | 
|  | 91 | __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT); | 
|  | 92 |  | 
|  | 93 | BUG_ON(!PageLocked(pg)); | 
|  | 94 |  | 
|  | 95 | pg_buf = kmap(pg); | 
|  | 96 | /* FIXME: Can kmap fail? */ | 
|  | 97 |  | 
|  | 98 | ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); | 
|  | 99 |  | 
|  | 100 | if (ret) { | 
|  | 101 | ClearPageUptodate(pg); | 
|  | 102 | SetPageError(pg); | 
|  | 103 | } else { | 
|  | 104 | SetPageUptodate(pg); | 
|  | 105 | ClearPageError(pg); | 
|  | 106 | } | 
|  | 107 |  | 
|  | 108 | flush_dcache_page(pg); | 
|  | 109 | kunmap(pg); | 
|  | 110 |  | 
|  | 111 | jffs2_dbg(2, "readpage finished\n"); | 
|  | 112 | return ret; | 
|  | 113 | } | 
|  | 114 |  | 
|  | 115 | int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg) | 
|  | 116 | { | 
|  | 117 | int ret = jffs2_do_readpage_nolock(inode, pg); | 
|  | 118 | unlock_page(pg); | 
|  | 119 | return ret; | 
|  | 120 | } | 
|  | 121 |  | 
|  | 122 |  | 
|  | 123 | static int jffs2_readpage (struct file *filp, struct page *pg) | 
|  | 124 | { | 
|  | 125 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host); | 
|  | 126 | int ret; | 
|  | 127 |  | 
|  | 128 | mutex_lock(&f->sem); | 
|  | 129 | ret = jffs2_do_readpage_unlock(pg->mapping->host, pg); | 
|  | 130 | mutex_unlock(&f->sem); | 
|  | 131 | return ret; | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | 
|  | 135 | loff_t pos, unsigned len, unsigned flags, | 
|  | 136 | struct page **pagep, void **fsdata) | 
|  | 137 | { | 
|  | 138 | struct page *pg; | 
|  | 139 | struct inode *inode = mapping->host; | 
|  | 140 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | 
|  | 141 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | 
|  | 142 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | 
|  | 143 | uint32_t pageofs = index << PAGE_CACHE_SHIFT; | 
|  | 144 | int ret = 0; | 
|  | 145 |  | 
|  | 146 |  | 
|  | 147 | jffs2_dbg(1, "%s()\n", __func__); | 
|  | 148 |  | 
|  | 149 | if (pageofs > inode->i_size) { | 
|  | 150 | /* Make new hole frag from old EOF to new page */ | 
|  | 151 | /* Make new hole frag from old EOF to new page */ | 
|  | 152 | struct jffs2_raw_inode ri; | 
|  | 153 | struct jffs2_full_dnode *fn; | 
|  | 154 | uint32_t alloc_len; | 
|  | 155 |  | 
|  | 156 | jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", | 
|  | 157 | (unsigned int)inode->i_size, pageofs); | 
|  | 158 |  | 
|  | 159 | ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, | 
|  | 160 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | 
|  | 161 | if (ret) | 
|  | 162 | goto out_err; | 
|  | 163 |  | 
|  | 164 | mutex_lock(&f->sem); | 
|  | 165 | memset(&ri, 0, sizeof(ri)); | 
|  | 166 |  | 
|  | 167 | ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 
|  | 168 | ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); | 
|  | 169 | ri.totlen = cpu_to_je32(sizeof(ri)); | 
|  | 170 | ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4)); | 
|  | 171 |  | 
|  | 172 | ri.ino = cpu_to_je32(f->inocache->ino); | 
|  | 173 | ri.version = cpu_to_je32(++f->highest_version); | 
|  | 174 | ri.mode = cpu_to_jemode(inode->i_mode); | 
|  | 175 | ri.uid = cpu_to_je16(inode->i_uid); | 
|  | 176 | ri.gid = cpu_to_je16(inode->i_gid); | 
|  | 177 | ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs)); | 
|  | 178 | ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds()); | 
|  | 179 | ri.offset = cpu_to_je32(inode->i_size); | 
|  | 180 | ri.dsize = cpu_to_je32(pageofs - inode->i_size); | 
|  | 181 | ri.csize = cpu_to_je32(0); | 
|  | 182 | ri.compr = JFFS2_COMPR_ZERO; | 
|  | 183 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | 
|  | 184 | ri.data_crc = cpu_to_je32(0); | 
|  | 185 |  | 
|  | 186 | fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_NORMAL); | 
|  | 187 |  | 
|  | 188 | if (IS_ERR(fn)) { | 
|  | 189 | ret = PTR_ERR(fn); | 
|  | 190 | jffs2_complete_reservation(c); | 
|  | 191 | mutex_unlock(&f->sem); | 
|  | 192 | goto out_err; | 
|  | 193 | } | 
|  | 194 | ret = jffs2_add_full_dnode_to_inode(c, f, fn); | 
|  | 195 | if (f->metadata) { | 
|  | 196 | jffs2_mark_node_obsolete(c, f->metadata->raw); | 
|  | 197 | jffs2_free_full_dnode(f->metadata); | 
|  | 198 | f->metadata = NULL; | 
|  | 199 | } | 
|  | 200 | if (ret) { | 
|  | 201 | jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n", | 
|  | 202 | ret); | 
|  | 203 | jffs2_mark_node_obsolete(c, fn->raw); | 
|  | 204 | jffs2_free_full_dnode(fn); | 
|  | 205 | jffs2_complete_reservation(c); | 
|  | 206 | mutex_unlock(&f->sem); | 
|  | 207 | goto out_err; | 
|  | 208 | } | 
|  | 209 | jffs2_complete_reservation(c); | 
|  | 210 | inode->i_size = pageofs; | 
|  | 211 | mutex_unlock(&f->sem); | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 | /* | 
|  | 215 | * While getting a page and reading data in, lock c->alloc_sem until | 
|  | 216 | * the page is Uptodate. Otherwise GC task may attempt to read the same | 
|  | 217 | * page in read_cache_page(), which causes a deadlock. | 
|  | 218 | */ | 
|  | 219 | mutex_lock(&c->alloc_sem); | 
|  | 220 | pg = grab_cache_page_write_begin(mapping, index, flags); | 
|  | 221 | if (!pg) { | 
|  | 222 | ret = -ENOMEM; | 
|  | 223 | goto release_sem; | 
|  | 224 | } | 
|  | 225 |  | 
|  | 226 | *pagep = pg; | 
|  | 227 |  | 
|  | 228 | /* | 
|  | 229 | * Read in the page if it wasn't already present. Cannot optimize away | 
|  | 230 | * the whole page write case until jffs2_write_end can handle the | 
|  | 231 | * case of a short-copy. | 
|  | 232 | */ | 
|  | 233 | if (!PageUptodate(pg)) { | 
|  | 234 | mutex_lock(&f->sem); | 
|  | 235 | ret = jffs2_do_readpage_nolock(inode, pg); | 
|  | 236 | mutex_unlock(&f->sem); | 
|  | 237 | if (ret){ | 
|  | 238 | unlock_page(pg); | 
|  | 239 | page_cache_release(pg); | 
|  | 240 | goto release_sem; | 
|  | 241 | } | 
|  | 242 | } | 
|  | 243 | jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); | 
|  | 244 |  | 
|  | 245 | release_sem: | 
|  | 246 | mutex_unlock(&c->alloc_sem); | 
|  | 247 | out_err: | 
|  | 248 | return ret; | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | static int jffs2_write_end(struct file *filp, struct address_space *mapping, | 
|  | 252 | loff_t pos, unsigned len, unsigned copied, | 
|  | 253 | struct page *pg, void *fsdata) | 
|  | 254 | { | 
|  | 255 | /* Actually commit the write from the page cache page we're looking at. | 
|  | 256 | * For now, we write the full page out each time. It sucks, but it's simple | 
|  | 257 | */ | 
|  | 258 | struct inode *inode = mapping->host; | 
|  | 259 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | 
|  | 260 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | 
|  | 261 | struct jffs2_raw_inode *ri; | 
|  | 262 | unsigned start = pos & (PAGE_CACHE_SIZE - 1); | 
|  | 263 | unsigned end = start + copied; | 
|  | 264 | unsigned aligned_start = start & ~3; | 
|  | 265 | int ret = 0; | 
|  | 266 | uint32_t writtenlen = 0; | 
|  | 267 |  | 
|  | 268 | jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", | 
|  | 269 | __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT, | 
|  | 270 | start, end, pg->flags); | 
|  | 271 |  | 
|  | 272 | /* We need to avoid deadlock with page_cache_read() in | 
|  | 273 | jffs2_garbage_collect_pass(). So the page must be | 
|  | 274 | up to date to prevent page_cache_read() from trying | 
|  | 275 | to re-lock it. */ | 
|  | 276 | BUG_ON(!PageUptodate(pg)); | 
|  | 277 |  | 
|  | 278 | if (end == PAGE_CACHE_SIZE) { | 
|  | 279 | /* When writing out the end of a page, write out the | 
|  | 280 | _whole_ page. This helps to reduce the number of | 
|  | 281 | nodes in files which have many short writes, like | 
|  | 282 | syslog files. */ | 
|  | 283 | aligned_start = 0; | 
|  | 284 | } | 
|  | 285 |  | 
|  | 286 | ri = jffs2_alloc_raw_inode(); | 
|  | 287 |  | 
|  | 288 | if (!ri) { | 
|  | 289 | jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", | 
|  | 290 | __func__); | 
|  | 291 | unlock_page(pg); | 
|  | 292 | page_cache_release(pg); | 
|  | 293 | return -ENOMEM; | 
|  | 294 | } | 
|  | 295 |  | 
|  | 296 | /* Set the fields that the generic jffs2_write_inode_range() code can't find */ | 
|  | 297 | ri->ino = cpu_to_je32(inode->i_ino); | 
|  | 298 | ri->mode = cpu_to_jemode(inode->i_mode); | 
|  | 299 | ri->uid = cpu_to_je16(inode->i_uid); | 
|  | 300 | ri->gid = cpu_to_je16(inode->i_gid); | 
|  | 301 | ri->isize = cpu_to_je32((uint32_t)inode->i_size); | 
|  | 302 | ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds()); | 
|  | 303 |  | 
|  | 304 | /* In 2.4, it was already kmapped by generic_file_write(). Doesn't | 
|  | 305 | hurt to do it again. The alternative is ifdefs, which are ugly. */ | 
|  | 306 | kmap(pg); | 
|  | 307 |  | 
|  | 308 | ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start, | 
|  | 309 | (pg->index << PAGE_CACHE_SHIFT) + aligned_start, | 
|  | 310 | end - aligned_start, &writtenlen); | 
|  | 311 |  | 
|  | 312 | kunmap(pg); | 
|  | 313 |  | 
|  | 314 | if (ret) { | 
|  | 315 | /* There was an error writing. */ | 
|  | 316 | SetPageError(pg); | 
|  | 317 | } | 
|  | 318 |  | 
|  | 319 | /* Adjust writtenlen for the padding we did, so we don't confuse our caller */ | 
|  | 320 | writtenlen -= min(writtenlen, (start - aligned_start)); | 
|  | 321 |  | 
|  | 322 | if (writtenlen) { | 
|  | 323 | if (inode->i_size < pos + writtenlen) { | 
|  | 324 | inode->i_size = pos + writtenlen; | 
|  | 325 | inode->i_blocks = (inode->i_size + 511) >> 9; | 
|  | 326 |  | 
|  | 327 | inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime)); | 
|  | 328 | } | 
|  | 329 | } | 
|  | 330 |  | 
|  | 331 | jffs2_free_raw_inode(ri); | 
|  | 332 |  | 
|  | 333 | if (start+writtenlen < end) { | 
|  | 334 | /* generic_file_write has written more to the page cache than we've | 
|  | 335 | actually written to the medium. Mark the page !Uptodate so that | 
|  | 336 | it gets reread */ | 
|  | 337 | jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n", | 
|  | 338 | __func__); | 
|  | 339 | SetPageError(pg); | 
|  | 340 | ClearPageUptodate(pg); | 
|  | 341 | } | 
|  | 342 |  | 
|  | 343 | jffs2_dbg(1, "%s() returning %d\n", | 
|  | 344 | __func__, writtenlen > 0 ? writtenlen : ret); | 
|  | 345 | unlock_page(pg); | 
|  | 346 | page_cache_release(pg); | 
|  | 347 | return writtenlen > 0 ? writtenlen : ret; | 
|  | 348 | } |