blob: 40a5b46cf6caff782ed5bddd68430d944ba3f81f [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
3 *
4 * Copyright (C) 2002-2018 Aleph One Ltd.
5 *
6 * Created by Charles Manning <charles@aleph1.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include "yportenv.h"
14#include "yaffs_trace.h"
15
16#include "yaffs_guts.h"
17#include "yaffs_endian.h"
18#include "yaffs_getblockinfo.h"
19#include "yaffs_tagscompat.h"
20#include "yaffs_tagsmarshall.h"
21#include "yaffs_nand.h"
22#include "yaffs_yaffs1.h"
23#include "yaffs_yaffs2.h"
24#include "yaffs_bitmap.h"
25#include "yaffs_verify.h"
26#include "yaffs_nand.h"
27#include "yaffs_packedtags2.h"
28#include "yaffs_nameval.h"
29#include "yaffs_allocator.h"
30#include "yaffs_attribs.h"
31#include "yaffs_summary.h"
32
33/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
34#define YAFFS_GC_GOOD_ENOUGH 2
35#define YAFFS_GC_PASSIVE_THRESHOLD 4
36
37#include "yaffs_ecc.h"
38
39/* Forward declarations */
40
41static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
42 const u8 *buffer, int n_bytes, int use_reserve);
43
44static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
45 int buffer_size);
46
47/* Function to calculate chunk and offset */
48
49void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
50 int *chunk_out, u32 *offset_out)
51{
52 int chunk;
53 u32 offset;
54
55 chunk = (u32) (addr >> dev->chunk_shift);
56
57 if (dev->chunk_div == 1) {
58 /* easy power of 2 case */
59 offset = (u32) (addr & dev->chunk_mask);
60 } else {
61 /* Non power-of-2 case */
62
63 loff_t chunk_base;
64
65 chunk /= dev->chunk_div;
66
67 chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
68 offset = (u32) (addr - chunk_base);
69 }
70
71 *chunk_out = chunk;
72 *offset_out = offset;
73}
74
75/* Function to return the number of shifts for a power of 2 greater than or
76 * equal to the given number
77 * Note we don't try to cater for all possible numbers and this does not have to
78 * be hellishly efficient.
79 */
80
81static inline u32 calc_shifts_ceiling(u32 x)
82{
83 int extra_bits;
84 int shifts;
85
86 shifts = extra_bits = 0;
87
88 while (x > 1) {
89 if (x & 1)
90 extra_bits++;
91 x >>= 1;
92 shifts++;
93 }
94
95 if (extra_bits)
96 shifts++;
97
98 return shifts;
99}
100
101/* Function to return the number of shifts to get a 1 in bit 0
102 */
103
104static inline u32 calc_shifts(u32 x)
105{
106 u32 shifts;
107
108 shifts = 0;
109
110 if (!x)
111 return 0;
112
113 while (!(x & 1)) {
114 x >>= 1;
115 shifts++;
116 }
117
118 return shifts;
119}
120
121/*
122 * Temporary buffer manipulations.
123 */
124
125static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
126{
127 int i;
128 u8 *buf = (u8 *) 1;
129
130 memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
131
132 for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
133 dev->temp_buffer[i].in_use = 0;
134 buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
135 dev->temp_buffer[i].buffer = buf;
136 }
137
138 return buf ? YAFFS_OK : YAFFS_FAIL;
139}
140
141u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
142{
143 int i;
144
145 dev->temp_in_use++;
146 if (dev->temp_in_use > dev->max_temp)
147 dev->max_temp = dev->temp_in_use;
148
149 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
150 if (dev->temp_buffer[i].in_use == 0) {
151 dev->temp_buffer[i].in_use = 1;
152 return dev->temp_buffer[i].buffer;
153 }
154 }
155
156 yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
157 /*
158 * If we got here then we have to allocate an unmanaged one
159 * This is not good.
160 */
161
162 dev->unmanaged_buffer_allocs++;
163 return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
164
165}
166
167void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
168{
169 int i;
170
171 dev->temp_in_use--;
172
173 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
174 if (dev->temp_buffer[i].buffer == buffer) {
175 dev->temp_buffer[i].in_use = 0;
176 return;
177 }
178 }
179
180 if (buffer) {
181 /* assume it is an unmanaged one. */
182 yaffs_trace(YAFFS_TRACE_BUFFERS,
183 "Releasing unmanaged temp buffer");
184 kfree(buffer);
185 dev->unmanaged_buffer_deallocs++;
186 }
187
188}
189
190/*
191 * Functions for robustisizing TODO
192 *
193 */
194
195static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
196 const u8 *data,
197 const struct yaffs_ext_tags *tags)
198{
199 (void) dev;
200 (void) nand_chunk;
201 (void) data;
202 (void) tags;
203}
204
205static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
206 const struct yaffs_ext_tags *tags)
207{
208 (void) dev;
209 (void) nand_chunk;
210 (void) tags;
211}
212
213void yaffs_handle_chunk_error(struct yaffs_dev *dev,
214 struct yaffs_block_info *bi)
215{
216 if (!bi->gc_prioritise) {
217 bi->gc_prioritise = 1;
218 dev->has_pending_prioritised_gc = 1;
219 bi->chunk_error_strikes++;
220
221 if (bi->chunk_error_strikes > 3) {
222 bi->needs_retiring = 1; /* Too many stikes, so retire */
223 yaffs_trace(YAFFS_TRACE_ALWAYS,
224 "yaffs: Block struck out");
225
226 }
227 }
228}
229
230static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
231 int erased_ok)
232{
233 int flash_block = nand_chunk / dev->param.chunks_per_block;
234 struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
235
236 yaffs_handle_chunk_error(dev, bi);
237
238 if (erased_ok) {
239 /* Was an actual write failure,
240 * so mark the block for retirement.*/
241 bi->needs_retiring = 1;
242 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
243 "**>> Block %d needs retiring", flash_block);
244 }
245
246 /* Delete the chunk */
247 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
248 yaffs_skip_rest_of_block(dev);
249}
250
251/*
252 * Verification code
253 */
254
255/*
256 * Simple hash function. Needs to have a reasonable spread
257 */
258
259static inline int yaffs_hash_fn(int n)
260{
261 if (n < 0)
262 n = -n;
263 return n % YAFFS_NOBJECT_BUCKETS;
264}
265
266/*
267 * Access functions to useful fake objects.
268 * Note that root might have a presence in NAND if permissions are set.
269 */
270
271struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
272{
273 return dev->root_dir;
274}
275
276struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
277{
278 return dev->lost_n_found;
279}
280
281/*
282 * Erased NAND checking functions
283 */
284
285int yaffs_check_ff(u8 *buffer, int n_bytes)
286{
287 /* Horrible, slow implementation */
288 while (n_bytes--) {
289 if (*buffer != 0xff)
290 return 0;
291 buffer++;
292 }
293 return 1;
294}
295
296static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
297{
298 int retval = YAFFS_OK;
299 u8 *data = yaffs_get_temp_buffer(dev);
300 struct yaffs_ext_tags tags;
301 int result;
302
303 result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
304
305 if (result == YAFFS_FAIL ||
306 tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
307 retval = YAFFS_FAIL;
308
309 if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
310 tags.chunk_used) {
311 yaffs_trace(YAFFS_TRACE_NANDACCESS,
312 "Chunk %d not erased", nand_chunk);
313 retval = YAFFS_FAIL;
314 }
315
316 yaffs_release_temp_buffer(dev, data);
317
318 return retval;
319
320}
321
322static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
323 int nand_chunk,
324 const u8 *data,
325 struct yaffs_ext_tags *tags)
326{
327 int retval = YAFFS_OK;
328 struct yaffs_ext_tags temp_tags;
329 u8 *buffer = yaffs_get_temp_buffer(dev);
330 int result;
331
332 result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
333 if (result == YAFFS_FAIL ||
334 memcmp(buffer, data, dev->data_bytes_per_chunk) ||
335 temp_tags.obj_id != tags->obj_id ||
336 temp_tags.chunk_id != tags->chunk_id ||
337 temp_tags.n_bytes != tags->n_bytes)
338 retval = YAFFS_FAIL;
339
340 yaffs_release_temp_buffer(dev, buffer);
341
342 return retval;
343}
344
345
346int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
347{
348 int reserved_chunks;
349 int reserved_blocks = dev->param.n_reserved_blocks;
350 int checkpt_blocks;
351
352 checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
353
354 reserved_chunks =
355 (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
356
357 return (dev->n_free_chunks > (reserved_chunks + n_chunks));
358}
359
360static int yaffs_find_alloc_block(struct yaffs_dev *dev)
361{
362 u32 i;
363 struct yaffs_block_info *bi;
364
365 if (dev->n_erased_blocks < 1) {
366 /* Hoosterman we've got a problem.
367 * Can't get space to gc
368 */
369 yaffs_trace(YAFFS_TRACE_ERROR,
370 "yaffs tragedy: no more erased blocks");
371
372 return -1;
373 }
374
375 /* Find an empty block. */
376
377 for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
378 dev->alloc_block_finder++;
379 if (dev->alloc_block_finder < (int)dev->internal_start_block
380 || dev->alloc_block_finder > (int)dev->internal_end_block) {
381 dev->alloc_block_finder = dev->internal_start_block;
382 }
383
384 bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
385
386 if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
387 bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
388 dev->seq_number++;
389 bi->seq_number = dev->seq_number;
390 dev->n_erased_blocks--;
391 yaffs_trace(YAFFS_TRACE_ALLOCATE,
392 "Allocated block %d, seq %d, %d left" ,
393 dev->alloc_block_finder, dev->seq_number,
394 dev->n_erased_blocks);
395 return dev->alloc_block_finder;
396 }
397 }
398
399 yaffs_trace(YAFFS_TRACE_ALWAYS,
400 "yaffs tragedy: no more erased blocks, but there should have been %d",
401 dev->n_erased_blocks);
402
403 return -1;
404}
405
406static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
407 struct yaffs_block_info **block_ptr)
408{
409 int ret_val;
410 struct yaffs_block_info *bi;
411
412 if (dev->alloc_block < 0) {
413 /* Get next block to allocate off */
414 dev->alloc_block = yaffs_find_alloc_block(dev);
415 dev->alloc_page = 0;
416 }
417
418 if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
419 /* No space unless we're allowed to use the reserve. */
420 return -1;
421 }
422
423 if (dev->n_erased_blocks < (int)dev->param.n_reserved_blocks
424 && dev->alloc_page == 0)
425 yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
426
427 /* Next page please.... */
428 if (dev->alloc_block >= 0) {
429 bi = yaffs_get_block_info(dev, dev->alloc_block);
430
431 ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
432 dev->alloc_page;
433 bi->pages_in_use++;
434 yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
435
436 dev->alloc_page++;
437
438 dev->n_free_chunks--;
439
440 /* If the block is full set the state to full */
441 if (dev->alloc_page >= dev->param.chunks_per_block) {
442 bi->block_state = YAFFS_BLOCK_STATE_FULL;
443 dev->alloc_block = -1;
444 }
445
446 if (block_ptr)
447 *block_ptr = bi;
448
449 return ret_val;
450 }
451
452 yaffs_trace(YAFFS_TRACE_ERROR,
453 "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
454
455 return -1;
456}
457
458static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
459{
460 int n;
461
462 n = dev->n_erased_blocks * dev->param.chunks_per_block;
463
464 if (dev->alloc_block > 0)
465 n += (dev->param.chunks_per_block - dev->alloc_page);
466
467 return n;
468
469}
470
471/*
472 * yaffs_skip_rest_of_block() skips over the rest of the allocation block
473 * if we don't want to write to it.
474 */
475void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
476{
477 struct yaffs_block_info *bi;
478
479 if (dev->alloc_block > 0) {
480 bi = yaffs_get_block_info(dev, dev->alloc_block);
481 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
482 bi->block_state = YAFFS_BLOCK_STATE_FULL;
483 dev->alloc_block = -1;
484 }
485 }
486}
487
488static int yaffs_write_new_chunk(struct yaffs_dev *dev,
489 const u8 *data,
490 struct yaffs_ext_tags *tags, int use_reserver)
491{
492 u32 attempts = 0;
493 int write_ok = 0;
494 int chunk;
495
496 yaffs2_checkpt_invalidate(dev);
497
498 do {
499 struct yaffs_block_info *bi = 0;
500 int erased_ok = 0;
501
502 chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
503 if (chunk < 0) {
504 /* no space */
505 break;
506 }
507
508 /* First check this chunk is erased, if it needs
509 * checking. The checking policy (unless forced
510 * always on) is as follows:
511 *
512 * Check the first page we try to write in a block.
513 * If the check passes then we don't need to check any
514 * more. If the check fails, we check again...
515 * If the block has been erased, we don't need to check.
516 *
517 * However, if the block has been prioritised for gc,
518 * then we think there might be something odd about
519 * this block and stop using it.
520 *
521 * Rationale: We should only ever see chunks that have
522 * not been erased if there was a partially written
523 * chunk due to power loss. This checking policy should
524 * catch that case with very few checks and thus save a
525 * lot of checks that are most likely not needed.
526 *
527 * Mods to the above
528 * If an erase check fails or the write fails we skip the
529 * rest of the block.
530 */
531
532 /* let's give it a try */
533 attempts++;
534
535 if (dev->param.always_check_erased)
536 bi->skip_erased_check = 0;
537
538 if (!bi->skip_erased_check) {
539 erased_ok = yaffs_check_chunk_erased(dev, chunk);
540 if (erased_ok != YAFFS_OK) {
541 yaffs_trace(YAFFS_TRACE_ERROR,
542 "**>> yaffs chunk %d was not erased",
543 chunk);
544
545 /* If not erased, delete this one,
546 * skip rest of block and
547 * try another chunk */
548 yaffs_chunk_del(dev, chunk, 1, __LINE__);
549 yaffs_skip_rest_of_block(dev);
550 continue;
551 }
552 }
553
554 write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
555
556 if (!bi->skip_erased_check)
557 write_ok =
558 yaffs_verify_chunk_written(dev, chunk, data, tags);
559
560 if (write_ok != YAFFS_OK) {
561 /* Clean up aborted write, skip to next block and
562 * try another chunk */
563 yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
564 continue;
565 }
566
567 bi->skip_erased_check = 1;
568
569 /* Copy the data into the robustification buffer */
570 yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
571
572 } while (write_ok != YAFFS_OK &&
573 (yaffs_wr_attempts == 0 || attempts <= yaffs_wr_attempts));
574
575 if (!write_ok)
576 chunk = -1;
577
578 if (attempts > 1) {
579 yaffs_trace(YAFFS_TRACE_ERROR,
580 "**>> yaffs write required %d attempts",
581 attempts);
582 dev->n_retried_writes += (attempts - 1);
583 }
584
585 return chunk;
586}
587
588/*
589 * Block retiring for handling a broken block.
590 */
591
592static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
593{
594 struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
595
596 yaffs2_checkpt_invalidate(dev);
597
598 yaffs2_clear_oldest_dirty_seq(dev, bi);
599
600 if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
601 if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
602 yaffs_trace(YAFFS_TRACE_ALWAYS,
603 "yaffs: Failed to mark bad and erase block %d",
604 flash_block);
605 } else {
606 struct yaffs_ext_tags tags;
607 int chunk_id =
608 flash_block * dev->param.chunks_per_block;
609
610 u8 *buffer = yaffs_get_temp_buffer(dev);
611
612 memset(buffer, 0xff, dev->data_bytes_per_chunk);
613 memset(&tags, 0, sizeof(tags));
614 tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
615 if (dev->tagger.write_chunk_tags_fn(dev, chunk_id -
616 dev->chunk_offset,
617 buffer,
618 &tags) != YAFFS_OK)
619 yaffs_trace(YAFFS_TRACE_ALWAYS,
620 "yaffs: Failed to write bad block marker to block %d",
621 flash_block);
622
623 yaffs_release_temp_buffer(dev, buffer);
624 }
625 }
626
627 bi->block_state = YAFFS_BLOCK_STATE_DEAD;
628 bi->gc_prioritise = 0;
629 bi->needs_retiring = 0;
630
631 dev->n_retired_blocks++;
632}
633
634/*---------------- Name handling functions ------------*/
635
636static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
637 const YCHAR *oh_name, int buff_size)
638{
639#ifdef CONFIG_YAFFS_AUTO_UNICODE
640 if (dev->param.auto_unicode) {
641 if (*oh_name) {
642 /* It is an ASCII name, do an ASCII to
643 * unicode conversion */
644 const char *ascii_oh_name = (const char *)oh_name;
645 int n = buff_size - 1;
646 while (n > 0 && *ascii_oh_name) {
647 *name = *ascii_oh_name;
648 name++;
649 ascii_oh_name++;
650 n--;
651 }
652 } else {
653 strncpy(name, oh_name + 1, buff_size - 1);
654 }
655 } else {
656#else
657 (void) dev;
658 {
659#endif
660 strncpy(name, oh_name, buff_size - 1);
661 }
662}
663
664static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
665 const YCHAR *name)
666{
667#ifdef CONFIG_YAFFS_AUTO_UNICODE
668
669 int is_ascii;
670 const YCHAR *w;
671
672 if (dev->param.auto_unicode) {
673
674 is_ascii = 1;
675 w = name;
676
677 /* Figure out if the name will fit in ascii character set */
678 while (is_ascii && *w) {
679 if ((*w) & 0xff00)
680 is_ascii = 0;
681 w++;
682 }
683
684 if (is_ascii) {
685 /* It is an ASCII name, so convert unicode to ascii */
686 char *ascii_oh_name = (char *)oh_name;
687 int n = YAFFS_MAX_NAME_LENGTH - 1;
688 while (n > 0 && *name) {
689 *ascii_oh_name = *name;
690 name++;
691 ascii_oh_name++;
692 n--;
693 }
694 } else {
695 /* Unicode name, so save starting at the second YCHAR */
696 *oh_name = 0;
697 strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
698 }
699 } else {
700#else
701 dev = dev;
702 {
703#endif
704 strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
705 }
706}
707
708static u16 yaffs_calc_name_sum(const YCHAR *name)
709{
710 u16 sum = 0;
711 u16 i = 1;
712
713 if (!name)
714 return 0;
715
716 while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
717
718 /* 0x1f mask is case insensitive */
719 sum += ((*name) & 0x1f) * i;
720 i++;
721 name++;
722 }
723 return sum;
724}
725
726
727void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
728{
729 memset(obj->short_name, 0, sizeof(obj->short_name));
730
731 if (name && !name[0]) {
732 yaffs_fix_null_name(obj, obj->short_name,
733 YAFFS_SHORT_NAME_LENGTH);
734 name = obj->short_name;
735 } else if (name &&
736 strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
737 YAFFS_SHORT_NAME_LENGTH) {
738 strcpy(obj->short_name, name);
739 }
740
741 obj->sum = yaffs_calc_name_sum(name);
742}
743
744void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
745 const struct yaffs_obj_hdr *oh)
746{
747#ifdef CONFIG_YAFFS_AUTO_UNICODE
748 YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
749 memset(tmp_name, 0, sizeof(tmp_name));
750 yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
751 YAFFS_MAX_NAME_LENGTH + 1);
752 yaffs_set_obj_name(obj, tmp_name);
753#else
754 yaffs_set_obj_name(obj, oh->name);
755#endif
756}
757
758loff_t yaffs_max_file_size(struct yaffs_dev *dev)
759{
760 if (sizeof(loff_t) < 8)
761 return YAFFS_MAX_FILE_SIZE_32;
762 else
763 return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
764}
765
766/*-------------------- TNODES -------------------
767
768 * List of spare tnodes
769 * The list is hooked together using the first pointer
770 * in the tnode.
771 */
772
773struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
774{
775 struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
776
777 if (tn) {
778 memset(tn, 0, dev->tnode_size);
779 dev->n_tnodes++;
780 }
781
782 dev->checkpoint_blocks_required = 0; /* force recalculation */
783
784 return tn;
785}
786
787/* FreeTnode frees up a tnode and puts it back on the free list */
788static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
789{
790 yaffs_free_raw_tnode(dev, tn);
791 dev->n_tnodes--;
792 dev->checkpoint_blocks_required = 0; /* force recalculation */
793}
794
795static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
796{
797 yaffs_deinit_raw_tnodes_and_objs(dev);
798 dev->n_obj = 0;
799 dev->n_tnodes = 0;
800}
801
802static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
803 unsigned pos, unsigned val)
804{
805 u32 *map = (u32 *) tn;
806 u32 bit_in_map;
807 u32 bit_in_word;
808 u32 word_in_map;
809 u32 mask;
810
811 pos &= YAFFS_TNODES_LEVEL0_MASK;
812 val >>= dev->chunk_grp_bits;
813
814 bit_in_map = pos * dev->tnode_width;
815 word_in_map = bit_in_map / 32;
816 bit_in_word = bit_in_map & (32 - 1);
817
818 mask = dev->tnode_mask << bit_in_word;
819
820 map[word_in_map] &= ~mask;
821 map[word_in_map] |= (mask & (val << bit_in_word));
822
823 if (dev->tnode_width > (32 - bit_in_word)) {
824 bit_in_word = (32 - bit_in_word);
825 word_in_map++;
826 mask =
827 dev->tnode_mask >> bit_in_word;
828 map[word_in_map] &= ~mask;
829 map[word_in_map] |= (mask & (val >> bit_in_word));
830 }
831}
832
833u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
834 unsigned pos)
835{
836 u32 *map = (u32 *) tn;
837 u32 bit_in_map;
838 u32 bit_in_word;
839 u32 word_in_map;
840 u32 val;
841
842 pos &= YAFFS_TNODES_LEVEL0_MASK;
843
844 bit_in_map = pos * dev->tnode_width;
845 word_in_map = bit_in_map / 32;
846 bit_in_word = bit_in_map & (32 - 1);
847
848 val = map[word_in_map] >> bit_in_word;
849
850 if (dev->tnode_width > (32 - bit_in_word)) {
851 bit_in_word = (32 - bit_in_word);
852 word_in_map++;
853 val |= (map[word_in_map] << bit_in_word);
854 }
855
856 val &= dev->tnode_mask;
857 val <<= dev->chunk_grp_bits;
858
859 return val;
860}
861
862/* ------------------- End of individual tnode manipulation -----------------*/
863
864/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
865 * The look up tree is represented by the top tnode and the number of top_level
866 * in the tree. 0 means only the level 0 tnode is in the tree.
867 */
868
869/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
870struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
871 struct yaffs_file_var *file_struct,
872 u32 chunk_id)
873{
874 struct yaffs_tnode *tn = file_struct->top;
875 u32 i;
876 int required_depth;
877 int level = file_struct->top_level;
878
879 (void) dev;
880
881 /* Check sane level and chunk Id */
882 if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
883 return NULL;
884
885 if (chunk_id > YAFFS_MAX_CHUNK_ID)
886 return NULL;
887
888 /* First check we're tall enough (ie enough top_level) */
889
890 i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
891 required_depth = 0;
892 while (i) {
893 i >>= YAFFS_TNODES_INTERNAL_BITS;
894 required_depth++;
895 }
896
897 if (required_depth > file_struct->top_level)
898 return NULL; /* Not tall enough, so we can't find it */
899
900 /* Traverse down to level 0 */
901 while (level > 0 && tn) {
902 tn = tn->internal[(chunk_id >>
903 (YAFFS_TNODES_LEVEL0_BITS +
904 (level - 1) *
905 YAFFS_TNODES_INTERNAL_BITS)) &
906 YAFFS_TNODES_INTERNAL_MASK];
907 level--;
908 }
909
910 return tn;
911}
912
913/* add_find_tnode_0 finds the level 0 tnode if it exists,
914 * otherwise first expands the tree.
915 * This happens in two steps:
916 * 1. If the tree isn't tall enough, then make it taller.
917 * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
918 *
919 * Used when modifying the tree.
920 *
921 * If the tn argument is NULL, then a fresh tnode will be added otherwise the
922 * specified tn will be plugged into the ttree.
923 */
924
925struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
926 struct yaffs_file_var *file_struct,
927 u32 chunk_id,
928 struct yaffs_tnode *passed_tn)
929{
930 int required_depth;
931 int i;
932 int l;
933 struct yaffs_tnode *tn;
934 u32 x;
935
936 /* Check sane level and page Id */
937 if (file_struct->top_level < 0 ||
938 file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
939 return NULL;
940
941 if (chunk_id > YAFFS_MAX_CHUNK_ID)
942 return NULL;
943
944 /* First check we're tall enough (ie enough top_level) */
945
946 x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
947 required_depth = 0;
948 while (x) {
949 x >>= YAFFS_TNODES_INTERNAL_BITS;
950 required_depth++;
951 }
952
953 if (required_depth > file_struct->top_level) {
954 /* Not tall enough, gotta make the tree taller */
955 for (i = file_struct->top_level; i < required_depth; i++) {
956
957 tn = yaffs_get_tnode(dev);
958
959 if (tn) {
960 tn->internal[0] = file_struct->top;
961 file_struct->top = tn;
962 file_struct->top_level++;
963 } else {
964 yaffs_trace(YAFFS_TRACE_ERROR,
965 "yaffs: no more tnodes");
966 return NULL;
967 }
968 }
969 }
970
971 /* Traverse down to level 0, adding anything we need */
972
973 l = file_struct->top_level;
974 tn = file_struct->top;
975
976 if (l > 0) {
977 while (l > 0 && tn) {
978 x = (chunk_id >>
979 (YAFFS_TNODES_LEVEL0_BITS +
980 (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
981 YAFFS_TNODES_INTERNAL_MASK;
982
983 if ((l > 1) && !tn->internal[x]) {
984 /* Add missing non-level-zero tnode */
985 tn->internal[x] = yaffs_get_tnode(dev);
986 if (!tn->internal[x])
987 return NULL;
988 } else if (l == 1) {
989 /* Looking from level 1 at level 0 */
990 if (passed_tn) {
991 /* If we already have one, release it */
992 if (tn->internal[x])
993 yaffs_free_tnode(dev,
994 tn->internal[x]);
995 tn->internal[x] = passed_tn;
996
997 } else if (!tn->internal[x]) {
998 /* Don't have one, none passed in */
999 tn->internal[x] = yaffs_get_tnode(dev);
1000 if (!tn->internal[x])
1001 return NULL;
1002 }
1003 }
1004
1005 tn = tn->internal[x];
1006 l--;
1007 }
1008 } else {
1009 /* top is level 0 */
1010 if (passed_tn) {
1011 memcpy(tn, passed_tn,
1012 (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
1013 yaffs_free_tnode(dev, passed_tn);
1014 }
1015 }
1016
1017 return tn;
1018}
1019
1020static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
1021 int chunk_obj)
1022{
1023 return (tags->chunk_id == (u32)chunk_obj &&
1024 tags->obj_id == (u32)obj_id &&
1025 !tags->is_deleted) ? 1 : 0;
1026
1027}
1028
1029static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
1030 struct yaffs_ext_tags *tags, int obj_id,
1031 int inode_chunk)
1032{
1033 int j;
1034
1035 for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
1036 if (yaffs_check_chunk_bit
1037 (dev, the_chunk / dev->param.chunks_per_block,
1038 the_chunk % dev->param.chunks_per_block)) {
1039
1040 if (dev->chunk_grp_size == 1)
1041 return the_chunk;
1042 else {
1043 yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
1044 tags);
1045 if (yaffs_tags_match(tags,
1046 obj_id, inode_chunk)) {
1047 /* found it; */
1048 return the_chunk;
1049 }
1050 }
1051 }
1052 the_chunk++;
1053 }
1054 return -1;
1055}
1056
1057int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
1058 struct yaffs_ext_tags *tags)
1059{
1060 /*Get the Tnode, then get the level 0 offset chunk offset */
1061 struct yaffs_tnode *tn;
1062 int the_chunk = -1;
1063 struct yaffs_ext_tags local_tags;
1064 int ret_val = -1;
1065 struct yaffs_dev *dev = in->my_dev;
1066
1067 if (!tags) {
1068 /* Passed a NULL, so use our own tags space */
1069 tags = &local_tags;
1070 }
1071
1072 tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
1073
1074 if (!tn)
1075 return ret_val;
1076
1077 the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
1078
1079 ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
1080 inode_chunk);
1081 return ret_val;
1082}
1083
1084static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
1085 struct yaffs_ext_tags *tags)
1086{
1087 /* Get the Tnode, then get the level 0 offset chunk offset */
1088 struct yaffs_tnode *tn;
1089 int the_chunk = -1;
1090 struct yaffs_ext_tags local_tags;
1091 struct yaffs_dev *dev = in->my_dev;
1092 int ret_val = -1;
1093
1094 if (!tags) {
1095 /* Passed a NULL, so use our own tags space */
1096 tags = &local_tags;
1097 }
1098
1099 tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
1100
1101 if (!tn)
1102 return ret_val;
1103
1104 the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
1105
1106 ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
1107 inode_chunk);
1108
1109 /* Delete the entry in the filestructure (if found) */
1110 if (ret_val != -1)
1111 yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
1112
1113 return ret_val;
1114}
1115
1116int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
1117 int nand_chunk, int in_scan)
1118{
1119 /* NB in_scan is zero unless scanning.
1120 * For forward scanning, in_scan is > 0;
1121 * for backward scanning in_scan is < 0
1122 *
1123 * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
1124 */
1125
1126 struct yaffs_tnode *tn;
1127 struct yaffs_dev *dev = in->my_dev;
1128 int existing_cunk;
1129 struct yaffs_ext_tags existing_tags;
1130 struct yaffs_ext_tags new_tags;
1131 unsigned existing_serial, new_serial;
1132
1133 if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
1134 /* Just ignore an attempt at putting a chunk into a non-file
1135 * during scanning.
1136 * If it is not during Scanning then something went wrong!
1137 */
1138 if (!in_scan) {
1139 yaffs_trace(YAFFS_TRACE_ERROR,
1140 "yaffs tragedy:attempt to put data chunk into a non-file"
1141 );
1142 BUG();
1143 }
1144
1145 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1146 return YAFFS_OK;
1147 }
1148
1149 tn = yaffs_add_find_tnode_0(dev,
1150 &in->variant.file_variant,
1151 inode_chunk, NULL);
1152 if (!tn)
1153 return YAFFS_FAIL;
1154
1155 if (!nand_chunk)
1156 /* Dummy insert, bail now */
1157 return YAFFS_OK;
1158
1159 existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
1160
1161 if (in_scan != 0) {
1162 /* If we're scanning then we need to test for duplicates
1163 * NB This does not need to be efficient since it should only
1164 * happen when the power fails during a write, then only one
1165 * chunk should ever be affected.
1166 *
1167 * Correction for YAFFS2: This could happen quite a lot and we
1168 * need to think about efficiency! TODO
1169 * Update: For backward scanning we don't need to re-read tags
1170 * so this is quite cheap.
1171 */
1172
1173 if (existing_cunk > 0) {
1174 /* NB Right now existing chunk will not be real
1175 * chunk_id if the chunk group size > 1
1176 * thus we have to do a FindChunkInFile to get the
1177 * real chunk id.
1178 *
1179 * We have a duplicate now we need to decide which
1180 * one to use:
1181 *
1182 * Backwards scanning YAFFS2: The old one is what
1183 * we use, dump the new one.
1184 * YAFFS1: Get both sets of tags and compare serial
1185 * numbers.
1186 */
1187
1188 if (in_scan > 0) {
1189 /* Only do this for forward scanning */
1190 yaffs_rd_chunk_tags_nand(dev,
1191 nand_chunk,
1192 NULL, &new_tags);
1193
1194 /* Do a proper find */
1195 existing_cunk =
1196 yaffs_find_chunk_in_file(in, inode_chunk,
1197 &existing_tags);
1198 }
1199
1200 if (existing_cunk <= 0) {
1201 /*Hoosterman - how did this happen? */
1202
1203 yaffs_trace(YAFFS_TRACE_ERROR,
1204 "yaffs tragedy: existing chunk < 0 in scan"
1205 );
1206
1207 }
1208
1209 /* NB The deleted flags should be false, otherwise
1210 * the chunks will not be loaded during a scan
1211 */
1212
1213 if (in_scan > 0) {
1214 new_serial = new_tags.serial_number;
1215 existing_serial = existing_tags.serial_number;
1216 }
1217
1218 if ((in_scan > 0) &&
1219 (existing_cunk <= 0 ||
1220 ((existing_serial + 1) & 3) == new_serial)) {
1221 /* Forward scanning.
1222 * Use new
1223 * Delete the old one and drop through to
1224 * update the tnode
1225 */
1226 yaffs_chunk_del(dev, existing_cunk, 1,
1227 __LINE__);
1228 } else {
1229 /* Backward scanning or we want to use the
1230 * existing one
1231 * Delete the new one and return early so that
1232 * the tnode isn't changed
1233 */
1234 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
1235 return YAFFS_OK;
1236 }
1237 }
1238
1239 }
1240
1241 if (existing_cunk == 0)
1242 in->n_data_chunks++;
1243
1244 yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
1245
1246 return YAFFS_OK;
1247}
1248
1249static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
1250{
1251 struct yaffs_block_info *the_block;
1252 unsigned block_no;
1253
1254 yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
1255
1256 block_no = chunk / dev->param.chunks_per_block;
1257 the_block = yaffs_get_block_info(dev, block_no);
1258 if (the_block) {
1259 the_block->soft_del_pages++;
1260 dev->n_free_chunks++;
1261 yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
1262 }
1263}
1264
1265/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
1266 * the chunks in the file.
1267 * All soft deleting does is increment the block's softdelete count and pulls
1268 * the chunk out of the tnode.
1269 * Thus, essentially this is the same as DeleteWorker except that the chunks
1270 * are soft deleted.
1271 */
1272
1273static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
1274 u32 level, int chunk_offset)
1275{
1276 int i;
1277 int the_chunk;
1278 int all_done = 1;
1279 struct yaffs_dev *dev = in->my_dev;
1280
1281 if (!tn)
1282 return 1;
1283
1284 if (level > 0) {
1285 for (i = YAFFS_NTNODES_INTERNAL - 1;
1286 all_done && i >= 0;
1287 i--) {
1288 if (tn->internal[i]) {
1289 all_done =
1290 yaffs_soft_del_worker(in,
1291 tn->internal[i],
1292 level - 1,
1293 (chunk_offset <<
1294 YAFFS_TNODES_INTERNAL_BITS)
1295 + i);
1296 if (all_done) {
1297 yaffs_free_tnode(dev,
1298 tn->internal[i]);
1299 tn->internal[i] = NULL;
1300 } else {
1301 /* Can this happen? */
1302 }
1303 }
1304 }
1305 return (all_done) ? 1 : 0;
1306 }
1307
1308 /* level 0 */
1309 for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
1310 the_chunk = yaffs_get_group_base(dev, tn, i);
1311 if (the_chunk) {
1312 yaffs_soft_del_chunk(dev, the_chunk);
1313 yaffs_load_tnode_0(dev, tn, i, 0);
1314 }
1315 }
1316 return 1;
1317}
1318
1319static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
1320{
1321 struct yaffs_dev *dev = obj->my_dev;
1322 struct yaffs_obj *parent;
1323
1324 yaffs_verify_obj_in_dir(obj);
1325 parent = obj->parent;
1326
1327 yaffs_verify_dir(parent);
1328
1329 if (dev && dev->param.remove_obj_fn)
1330 dev->param.remove_obj_fn(obj);
1331
1332 list_del_init(&obj->siblings);
1333 obj->parent = NULL;
1334
1335 yaffs_verify_dir(parent);
1336}
1337
1338void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
1339{
1340 if (!directory) {
1341 yaffs_trace(YAFFS_TRACE_ALWAYS,
1342 "tragedy: Trying to add an object to a null pointer directory"
1343 );
1344 BUG();
1345 return;
1346 }
1347 if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1348 yaffs_trace(YAFFS_TRACE_ALWAYS,
1349 "tragedy: Trying to add an object to a non-directory"
1350 );
1351 BUG();
1352 }
1353
1354 if (obj->siblings.prev == NULL) {
1355 /* Not initialised */
1356 BUG();
1357 }
1358
1359 yaffs_verify_dir(directory);
1360
1361 yaffs_remove_obj_from_dir(obj);
1362
1363 /* Now add it */
1364 list_add(&obj->siblings, &directory->variant.dir_variant.children);
1365 obj->parent = directory;
1366
1367 if (directory == obj->my_dev->unlinked_dir
1368 || directory == obj->my_dev->del_dir) {
1369 obj->unlinked = 1;
1370 obj->my_dev->n_unlinked_files++;
1371 obj->rename_allowed = 0;
1372 }
1373
1374 yaffs_verify_dir(directory);
1375 yaffs_verify_obj_in_dir(obj);
1376}
1377
1378static int yaffs_change_obj_name(struct yaffs_obj *obj,
1379 struct yaffs_obj *new_dir,
1380 const YCHAR *new_name, int force, int shadows)
1381{
1382 int unlink_op;
1383 int del_op;
1384 struct yaffs_obj *existing_target;
1385
1386 if (new_dir == NULL)
1387 new_dir = obj->parent; /* use the old directory */
1388
1389 if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
1390 yaffs_trace(YAFFS_TRACE_ALWAYS,
1391 "tragedy: yaffs_change_obj_name: new_dir is not a directory"
1392 );
1393 BUG();
1394 }
1395
1396 unlink_op = (new_dir == obj->my_dev->unlinked_dir);
1397 del_op = (new_dir == obj->my_dev->del_dir);
1398
1399 existing_target = yaffs_find_by_name(new_dir, new_name);
1400
1401 /* If the object is a file going into the unlinked directory,
1402 * then it is OK to just stuff it in since duplicate names are OK.
1403 * else only proceed if the new name does not exist and we're putting
1404 * it into a directory.
1405 */
1406 if (!(unlink_op || del_op || force ||
1407 shadows > 0 || !existing_target) ||
1408 new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
1409 return YAFFS_FAIL;
1410
1411 yaffs_set_obj_name(obj, new_name);
1412 obj->dirty = 1;
1413 yaffs_add_obj_to_dir(new_dir, obj);
1414
1415 if (unlink_op)
1416 obj->unlinked = 1;
1417
1418 /* If it is a deletion then we mark it as a shrink for gc */
1419 if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
1420 return YAFFS_OK;
1421
1422 return YAFFS_FAIL;
1423}
1424
1425/*------------------------ Short Operations Cache ------------------------------
1426 * In many situations where there is no high level buffering a lot of
1427 * reads might be short sequential reads, and a lot of writes may be short
1428 * sequential writes. eg. scanning/writing a jpeg file.
1429 * In these cases, a short read/write cache can provide a huge perfomance
1430 * benefit with dumb-as-a-rock code.
1431 * In Linux, the page cache provides read buffering and the short op cache
1432 * provides write buffering.
1433 *
1434 * There are a small number (~10) of cache chunks per device so that we don't
1435 * need a very intelligent search.
1436 */
1437
1438static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
1439{
1440 struct yaffs_dev *dev = obj->my_dev;
1441 int i;
1442 struct yaffs_cache *cache;
1443 int n_caches = obj->my_dev->param.n_caches;
1444
1445 for (i = 0; i < n_caches; i++) {
1446 cache = &dev->cache[i];
1447 if (cache->object == obj && cache->dirty)
1448 return 1;
1449 }
1450
1451 return 0;
1452}
1453
1454static void yaffs_flush_single_cache(struct yaffs_cache *cache, int discard)
1455{
1456
1457 if (!cache || cache->locked)
1458 return;
1459
1460 /* Write it out and free it up if need be.*/
1461 if (cache->dirty) {
1462 yaffs_wr_data_obj(cache->object,
1463 cache->chunk_id,
1464 cache->data,
1465 cache->n_bytes,
1466 1);
1467
1468 cache->dirty = 0;
1469 }
1470
1471 if (discard)
1472 cache->object = NULL;
1473}
1474
1475static void yaffs_flush_file_cache(struct yaffs_obj *obj, int discard)
1476{
1477 struct yaffs_dev *dev = obj->my_dev;
1478 int i;
1479 struct yaffs_cache *cache;
1480 int n_caches = obj->my_dev->param.n_caches;
1481
1482 if (n_caches < 1)
1483 return;
1484
1485
1486 /* Find the chunks for this object and flush them. */
1487 for (i = 0; i < n_caches; i++) {
1488 cache = &dev->cache[i];
1489 if (cache->object == obj)
1490 yaffs_flush_single_cache(cache, discard);
1491 }
1492
1493}
1494
1495
1496void yaffs_flush_whole_cache(struct yaffs_dev *dev, int discard)
1497{
1498 struct yaffs_obj *obj;
1499 int n_caches = dev->param.n_caches;
1500 int i;
1501
1502 /* Find a dirty object in the cache and flush it...
1503 * until there are no further dirty objects.
1504 */
1505 do {
1506 obj = NULL;
1507 for (i = 0; i < n_caches && !obj; i++) {
1508 if (dev->cache[i].object && dev->cache[i].dirty)
1509 obj = dev->cache[i].object;
1510 }
1511 if (obj)
1512 yaffs_flush_file_cache(obj, discard);
1513 } while (obj);
1514
1515}
1516
1517/* Grab us an unused cache chunk for use.
1518 * First look for an empty one.
1519 * Then look for the least recently used non-dirty one.
1520 * Then look for the least recently used dirty one...., flush and look again.
1521 */
1522static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
1523{
1524 u32 i;
1525
1526 if (dev->param.n_caches > 0) {
1527 for (i = 0; i < dev->param.n_caches; i++) {
1528 if (!dev->cache[i].object)
1529 return &dev->cache[i];
1530 }
1531 }
1532
1533 return NULL;
1534}
1535
1536static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
1537{
1538 struct yaffs_cache *cache;
1539 int usage;
1540 u32 i;
1541
1542 if (dev->param.n_caches < 1)
1543 return NULL;
1544
1545 /* First look for an unused cache */
1546
1547 cache = yaffs_grab_chunk_worker(dev);
1548
1549 if (cache)
1550 return cache;
1551
1552 /*
1553 * Thery were all in use.
1554 * Find the LRU cache and flush it if it is dirty.
1555 */
1556
1557 usage = -1;
1558 cache = NULL;
1559
1560 for (i = 0; i < dev->param.n_caches; i++) {
1561 if (dev->cache[i].object &&
1562 !dev->cache[i].locked &&
1563 (dev->cache[i].last_use < usage || !cache)) {
1564 usage = dev->cache[i].last_use;
1565 cache = &dev->cache[i];
1566 }
1567 }
1568
1569#if 1
1570 yaffs_flush_single_cache(cache, 1);
1571#else
1572 yaffs_flush_file_cache(cache->object, 1);
1573 cache = yaffs_grab_chunk_worker(dev);
1574#endif
1575
1576 return cache;
1577}
1578
1579/* Find a cached chunk */
1580static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
1581 int chunk_id)
1582{
1583 struct yaffs_dev *dev = obj->my_dev;
1584 u32 i;
1585
1586 if (dev->param.n_caches < 1)
1587 return NULL;
1588
1589 for (i = 0; i < dev->param.n_caches; i++) {
1590 if (dev->cache[i].object == obj &&
1591 dev->cache[i].chunk_id == chunk_id) {
1592 dev->cache_hits++;
1593
1594 return &dev->cache[i];
1595 }
1596 }
1597 return NULL;
1598}
1599
1600/* Mark the chunk for the least recently used algorithym */
1601static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
1602 int is_write)
1603{
1604 u32 i;
1605
1606 if (dev->param.n_caches < 1)
1607 return;
1608
1609 if (dev->cache_last_use < 0 ||
1610 dev->cache_last_use > 100000000) {
1611 /* Reset the cache usages */
1612 for (i = 1; i < dev->param.n_caches; i++)
1613 dev->cache[i].last_use = 0;
1614
1615 dev->cache_last_use = 0;
1616 }
1617 dev->cache_last_use++;
1618 cache->last_use = dev->cache_last_use;
1619
1620 if (is_write)
1621 cache->dirty = 1;
1622}
1623
1624/* Invalidate a single cache page.
1625 * Do this when a whole page gets written,
1626 * ie the short cache for this page is no longer valid.
1627 */
1628static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
1629{
1630 struct yaffs_cache *cache;
1631
1632 if (object->my_dev->param.n_caches > 0) {
1633 cache = yaffs_find_chunk_cache(object, chunk_id);
1634
1635 if (cache)
1636 cache->object = NULL;
1637 }
1638}
1639
1640/* Invalidate all the cache pages associated with this object
1641 * Do this whenever ther file is deleted or resized.
1642 */
1643static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
1644{
1645 u32 i;
1646 struct yaffs_dev *dev = in->my_dev;
1647
1648 if (dev->param.n_caches > 0) {
1649 /* Invalidate it. */
1650 for (i = 0; i < dev->param.n_caches; i++) {
1651 if (dev->cache[i].object == in)
1652 dev->cache[i].object = NULL;
1653 }
1654 }
1655}
1656
1657static void yaffs_unhash_obj(struct yaffs_obj *obj)
1658{
1659 int bucket;
1660 struct yaffs_dev *dev = obj->my_dev;
1661
1662 /* If it is still linked into the bucket list, free from the list */
1663 if (!list_empty(&obj->hash_link)) {
1664 list_del_init(&obj->hash_link);
1665 bucket = yaffs_hash_fn(obj->obj_id);
1666 dev->obj_bucket[bucket].count--;
1667 }
1668}
1669
1670/* FreeObject frees up a Object and puts it back on the free list */
1671static void yaffs_free_obj(struct yaffs_obj *obj)
1672{
1673 struct yaffs_dev *dev;
1674
1675 if (!obj) {
1676 BUG();
1677 return;
1678 }
1679 dev = obj->my_dev;
1680 yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
1681 obj, obj->my_inode);
1682 if (obj->parent)
1683 BUG();
1684 if (!list_empty(&obj->siblings))
1685 BUG();
1686
1687 if (obj->my_inode) {
1688 /* We're still hooked up to a cached inode.
1689 * Don't delete now, but mark for later deletion
1690 */
1691 obj->defered_free = 1;
1692 return;
1693 }
1694
1695 yaffs_unhash_obj(obj);
1696
1697 yaffs_free_raw_obj(dev, obj);
1698 dev->n_obj--;
1699 dev->checkpoint_blocks_required = 0; /* force recalculation */
1700}
1701
1702void yaffs_handle_defered_free(struct yaffs_obj *obj)
1703{
1704 if (obj->defered_free)
1705 yaffs_free_obj(obj);
1706}
1707
1708static int yaffs_generic_obj_del(struct yaffs_obj *in)
1709{
1710 /* Iinvalidate the file's data in the cache, without flushing. */
1711 yaffs_invalidate_whole_cache(in);
1712
1713 if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
1714 /* Move to unlinked directory so we have a deletion record */
1715 yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
1716 0);
1717 }
1718
1719 yaffs_remove_obj_from_dir(in);
1720 yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
1721 in->hdr_chunk = 0;
1722
1723 yaffs_free_obj(in);
1724 return YAFFS_OK;
1725
1726}
1727
1728static void yaffs_soft_del_file(struct yaffs_obj *obj)
1729{
1730 if (!obj->deleted ||
1731 obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
1732 obj->soft_del)
1733 return;
1734
1735 if (obj->n_data_chunks <= 0) {
1736 /* Empty file with no duplicate object headers,
1737 * just delete it immediately */
1738 yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
1739 obj->variant.file_variant.top = NULL;
1740 yaffs_trace(YAFFS_TRACE_TRACING,
1741 "yaffs: Deleting empty file %d",
1742 obj->obj_id);
1743 yaffs_generic_obj_del(obj);
1744 } else {
1745 yaffs_soft_del_worker(obj,
1746 obj->variant.file_variant.top,
1747 obj->variant.
1748 file_variant.top_level, 0);
1749 obj->soft_del = 1;
1750 }
1751}
1752
1753/* Pruning removes any part of the file structure tree that is beyond the
1754 * bounds of the file (ie that does not point to chunks).
1755 *
1756 * A file should only get pruned when its size is reduced.
1757 *
1758 * Before pruning, the chunks must be pulled from the tree and the
1759 * level 0 tnode entries must be zeroed out.
1760 * Could also use this for file deletion, but that's probably better handled
1761 * by a special case.
1762 *
1763 * This function is recursive. For levels > 0 the function is called again on
1764 * any sub-tree. For level == 0 we just check if the sub-tree has data.
1765 * If there is no data in a subtree then it is pruned.
1766 */
1767
1768static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
1769 struct yaffs_tnode *tn, u32 level,
1770 int del0)
1771{
1772 int i;
1773 int has_data;
1774
1775 if (!tn)
1776 return tn;
1777
1778 has_data = 0;
1779
1780 if (level > 0) {
1781 for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
1782 if (tn->internal[i]) {
1783 tn->internal[i] =
1784 yaffs_prune_worker(dev,
1785 tn->internal[i],
1786 level - 1,
1787 (i == 0) ? del0 : 1);
1788 }
1789
1790 if (tn->internal[i])
1791 has_data++;
1792 }
1793 } else {
1794 int tnode_size_u32 = dev->tnode_size / sizeof(u32);
1795 u32 *map = (u32 *) tn;
1796
1797 for (i = 0; !has_data && i < tnode_size_u32; i++) {
1798 if (map[i])
1799 has_data++;
1800 }
1801 }
1802
1803 if (has_data == 0 && del0) {
1804 /* Free and return NULL */
1805 yaffs_free_tnode(dev, tn);
1806 tn = NULL;
1807 }
1808 return tn;
1809}
1810
1811static int yaffs_prune_tree(struct yaffs_dev *dev,
1812 struct yaffs_file_var *file_struct)
1813{
1814 int i;
1815 int has_data;
1816 int done = 0;
1817 struct yaffs_tnode *tn;
1818
1819 if (file_struct->top_level < 1)
1820 return YAFFS_OK;
1821
1822 file_struct->top =
1823 yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
1824
1825 /* Now we have a tree with all the non-zero branches NULL but
1826 * the height is the same as it was.
1827 * Let's see if we can trim internal tnodes to shorten the tree.
1828 * We can do this if only the 0th element in the tnode is in use
1829 * (ie all the non-zero are NULL)
1830 */
1831
1832 while (file_struct->top_level && !done) {
1833 tn = file_struct->top;
1834
1835 has_data = 0;
1836 for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
1837 if (tn->internal[i])
1838 has_data++;
1839 }
1840
1841 if (!has_data) {
1842 file_struct->top = tn->internal[0];
1843 file_struct->top_level--;
1844 yaffs_free_tnode(dev, tn);
1845 } else {
1846 done = 1;
1847 }
1848 }
1849
1850 return YAFFS_OK;
1851}
1852
1853/*-------------------- End of File Structure functions.-------------------*/
1854
1855/* alloc_empty_obj gets us a clean Object.*/
1856static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
1857{
1858 struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
1859
1860 if (!obj)
1861 return obj;
1862
1863 dev->n_obj++;
1864
1865 /* Now sweeten it up... */
1866
1867 memset(obj, 0, sizeof(struct yaffs_obj));
1868 obj->being_created = 1;
1869
1870 obj->my_dev = dev;
1871 obj->hdr_chunk = 0;
1872 obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
1873 INIT_LIST_HEAD(&(obj->hard_links));
1874 INIT_LIST_HEAD(&(obj->hash_link));
1875 INIT_LIST_HEAD(&obj->siblings);
1876
1877 /* Now make the directory sane */
1878 if (dev->root_dir) {
1879 obj->parent = dev->root_dir;
1880 list_add(&(obj->siblings),
1881 &dev->root_dir->variant.dir_variant.children);
1882 }
1883
1884 /* Add it to the lost and found directory.
1885 * NB Can't put root or lost-n-found in lost-n-found so
1886 * check if lost-n-found exists first
1887 */
1888 if (dev->lost_n_found)
1889 yaffs_add_obj_to_dir(dev->lost_n_found, obj);
1890
1891 obj->being_created = 0;
1892
1893 dev->checkpoint_blocks_required = 0; /* force recalculation */
1894
1895 return obj;
1896}
1897
1898static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
1899{
1900 int i;
1901 int l = 999;
1902 int lowest = 999999;
1903
1904 /* Search for the shortest list or one that
1905 * isn't too long.
1906 */
1907
1908 for (i = 0; i < 10 && lowest > 4; i++) {
1909 dev->bucket_finder++;
1910 dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
1911 if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
1912 lowest = dev->obj_bucket[dev->bucket_finder].count;
1913 l = dev->bucket_finder;
1914 }
1915 }
1916
1917 return l;
1918}
1919
1920static int yaffs_new_obj_id(struct yaffs_dev *dev)
1921{
1922 int bucket = yaffs_find_nice_bucket(dev);
1923 int found = 0;
1924 struct list_head *i;
1925 u32 n = (u32) bucket;
1926
1927 /*
1928 * Now find an object value that has not already been taken
1929 * by scanning the list, incrementing each time by number of buckets.
1930 */
1931 while (!found) {
1932 found = 1;
1933 n += YAFFS_NOBJECT_BUCKETS;
1934 list_for_each(i, &dev->obj_bucket[bucket].list) {
1935 /* Check if this value is already taken. */
1936 if (i && list_entry(i, struct yaffs_obj,
1937 hash_link)->obj_id == n)
1938 found = 0;
1939 }
1940 }
1941 return n;
1942}
1943
1944static void yaffs_hash_obj(struct yaffs_obj *in)
1945{
1946 int bucket = yaffs_hash_fn(in->obj_id);
1947 struct yaffs_dev *dev = in->my_dev;
1948
1949 list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
1950 dev->obj_bucket[bucket].count++;
1951}
1952
1953struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
1954{
1955 int bucket = yaffs_hash_fn(number);
1956 struct list_head *i;
1957 struct yaffs_obj *in;
1958
1959 list_for_each(i, &dev->obj_bucket[bucket].list) {
1960 /* Look if it is in the list */
1961 in = list_entry(i, struct yaffs_obj, hash_link);
1962 if (in->obj_id == number) {
1963 /* Don't show if it is defered free */
1964 if (in->defered_free)
1965 return NULL;
1966 return in;
1967 }
1968 }
1969
1970 return NULL;
1971}
1972
1973static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
1974 enum yaffs_obj_type type)
1975{
1976 struct yaffs_obj *the_obj = NULL;
1977 struct yaffs_tnode *tn = NULL;
1978
1979 if (number < 0)
1980 number = yaffs_new_obj_id(dev);
1981
1982 if (type == YAFFS_OBJECT_TYPE_FILE) {
1983 tn = yaffs_get_tnode(dev);
1984 if (!tn)
1985 return NULL;
1986 }
1987
1988 the_obj = yaffs_alloc_empty_obj(dev);
1989 if (!the_obj) {
1990 if (tn)
1991 yaffs_free_tnode(dev, tn);
1992 return NULL;
1993 }
1994
1995 the_obj->fake = 0;
1996 the_obj->rename_allowed = 1;
1997 the_obj->unlink_allowed = 1;
1998 the_obj->obj_id = number;
1999 yaffs_hash_obj(the_obj);
2000 the_obj->variant_type = type;
2001 yaffs_load_current_time(the_obj, 1, 1);
2002
2003 switch (type) {
2004 case YAFFS_OBJECT_TYPE_FILE:
2005 the_obj->variant.file_variant.file_size = 0;
2006 the_obj->variant.file_variant.stored_size = 0;
2007 the_obj->variant.file_variant.shrink_size =
2008 yaffs_max_file_size(dev);
2009 the_obj->variant.file_variant.top_level = 0;
2010 the_obj->variant.file_variant.top = tn;
2011 break;
2012 case YAFFS_OBJECT_TYPE_DIRECTORY:
2013 INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
2014 INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
2015 break;
2016 case YAFFS_OBJECT_TYPE_SYMLINK:
2017 case YAFFS_OBJECT_TYPE_HARDLINK:
2018 case YAFFS_OBJECT_TYPE_SPECIAL:
2019 /* No action required */
2020 break;
2021 case YAFFS_OBJECT_TYPE_UNKNOWN:
2022 /* todo this should not happen */
2023 break;
2024 }
2025 return the_obj;
2026}
2027
2028static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
2029 int number, u32 mode)
2030{
2031
2032 struct yaffs_obj *obj =
2033 yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
2034
2035 if (!obj)
2036 return NULL;
2037
2038 obj->fake = 1; /* it is fake so it might not use NAND */
2039 obj->rename_allowed = 0;
2040 obj->unlink_allowed = 0;
2041 obj->deleted = 0;
2042 obj->unlinked = 0;
2043 obj->yst_mode = mode;
2044 obj->my_dev = dev;
2045 obj->hdr_chunk = 0; /* Not a valid chunk. */
2046 return obj;
2047
2048}
2049
2050
2051static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
2052{
2053 int i;
2054
2055 dev->n_obj = 0;
2056 dev->n_tnodes = 0;
2057 yaffs_init_raw_tnodes_and_objs(dev);
2058
2059 for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
2060 INIT_LIST_HEAD(&dev->obj_bucket[i].list);
2061 dev->obj_bucket[i].count = 0;
2062 }
2063}
2064
2065struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
2066 int number,
2067 enum yaffs_obj_type type)
2068{
2069 struct yaffs_obj *the_obj = NULL;
2070
2071 if (number > 0)
2072 the_obj = yaffs_find_by_number(dev, number);
2073
2074 if (!the_obj)
2075 the_obj = yaffs_new_obj(dev, number, type);
2076
2077 return the_obj;
2078
2079}
2080
2081YCHAR *yaffs_clone_str(const YCHAR *str)
2082{
2083 YCHAR *new_str = NULL;
2084 int len;
2085
2086 if (!str)
2087 str = _Y("");
2088
2089 len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
2090 new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
2091 if (new_str) {
2092 strncpy(new_str, str, len);
2093 new_str[len] = 0;
2094 }
2095 return new_str;
2096
2097}
2098/*
2099 *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
2100 * link (ie. name) is created or deleted in the directory.
2101 *
2102 * ie.
2103 * create dir/a : update dir's mtime/ctime
2104 * rm dir/a: update dir's mtime/ctime
2105 * modify dir/a: don't update dir's mtimme/ctime
2106 *
2107 * This can be handled immediately or defered. Defering helps reduce the number
2108 * of updates when many files in a directory are changed within a brief period.
2109 *
2110 * If the directory updating is defered then yaffs_update_dirty_dirs must be
2111 * called periodically.
2112 */
2113
2114static void yaffs_update_parent(struct yaffs_obj *obj)
2115{
2116 struct yaffs_dev *dev;
2117
2118 if (!obj)
2119 return;
2120 dev = obj->my_dev;
2121 obj->dirty = 1;
2122 yaffs_load_current_time(obj, 0, 1);
2123 if (dev->param.defered_dir_update) {
2124 struct list_head *link = &obj->variant.dir_variant.dirty;
2125
2126 if (list_empty(link)) {
2127 list_add(link, &dev->dirty_dirs);
2128 yaffs_trace(YAFFS_TRACE_BACKGROUND,
2129 "Added object %d to dirty directories",
2130 obj->obj_id);
2131 }
2132
2133 } else {
2134 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2135 }
2136}
2137
2138void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
2139{
2140 struct list_head *link;
2141 struct yaffs_obj *obj;
2142 struct yaffs_dir_var *d_s;
2143 union yaffs_obj_var *o_v;
2144
2145 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
2146
2147 while (!list_empty(&dev->dirty_dirs)) {
2148 link = dev->dirty_dirs.next;
2149 list_del_init(link);
2150
2151 d_s = list_entry(link, struct yaffs_dir_var, dirty);
2152 o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
2153 obj = list_entry(o_v, struct yaffs_obj, variant);
2154
2155 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
2156 obj->obj_id);
2157
2158 if (obj->dirty)
2159 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
2160 }
2161}
2162
2163/*
2164 * Mknod (create) a new object.
2165 * equiv_obj only has meaning for a hard link;
2166 * alias_str only has meaning for a symlink.
2167 * rdev only has meaning for devices (a subset of special objects)
2168 */
2169
2170static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
2171 struct yaffs_obj *parent,
2172 const YCHAR *name,
2173 u32 mode,
2174 u32 uid,
2175 u32 gid,
2176 struct yaffs_obj *equiv_obj,
2177 const YCHAR *alias_str, u32 rdev)
2178{
2179 struct yaffs_obj *in;
2180 YCHAR *str = NULL;
2181 struct yaffs_dev *dev = parent->my_dev;
2182
2183 /* Check if the entry exists.
2184 * If it does then fail the call since we don't want a dup. */
2185 if (yaffs_find_by_name(parent, name))
2186 return NULL;
2187
2188 if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
2189 str = yaffs_clone_str(alias_str);
2190 if (!str)
2191 return NULL;
2192 }
2193
2194 in = yaffs_new_obj(dev, -1, type);
2195
2196 if (!in) {
2197 kfree(str);
2198 return NULL;
2199 }
2200
2201 in->hdr_chunk = 0;
2202 in->valid = 1;
2203 in->variant_type = type;
2204
2205 in->yst_mode = mode;
2206
2207 yaffs_attribs_init(in, gid, uid, rdev);
2208
2209 in->n_data_chunks = 0;
2210
2211 yaffs_set_obj_name(in, name);
2212 in->dirty = 1;
2213
2214 yaffs_add_obj_to_dir(parent, in);
2215
2216 in->my_dev = parent->my_dev;
2217
2218 switch (type) {
2219 case YAFFS_OBJECT_TYPE_SYMLINK:
2220 in->variant.symlink_variant.alias = str;
2221 break;
2222 case YAFFS_OBJECT_TYPE_HARDLINK:
2223 in->variant.hardlink_variant.equiv_obj = equiv_obj;
2224 in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
2225 list_add(&in->hard_links, &equiv_obj->hard_links);
2226 break;
2227 case YAFFS_OBJECT_TYPE_FILE:
2228 case YAFFS_OBJECT_TYPE_DIRECTORY:
2229 case YAFFS_OBJECT_TYPE_SPECIAL:
2230 case YAFFS_OBJECT_TYPE_UNKNOWN:
2231 /* do nothing */
2232 break;
2233 }
2234
2235 if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
2236 /* Could not create the object header, fail */
2237 yaffs_del_obj(in);
2238 in = NULL;
2239 }
2240
2241 if (in)
2242 yaffs_update_parent(parent);
2243
2244 return in;
2245}
2246
2247struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
2248 const YCHAR *name, u32 mode, u32 uid,
2249 u32 gid)
2250{
2251 return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
2252 uid, gid, NULL, NULL, 0);
2253}
2254
2255struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
2256 u32 mode, u32 uid, u32 gid)
2257{
2258 return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
2259 mode, uid, gid, NULL, NULL, 0);
2260}
2261
2262struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
2263 const YCHAR *name, u32 mode, u32 uid,
2264 u32 gid, u32 rdev)
2265{
2266 return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
2267 uid, gid, NULL, NULL, rdev);
2268}
2269
2270struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
2271 const YCHAR *name, u32 mode, u32 uid,
2272 u32 gid, const YCHAR *alias)
2273{
2274 return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
2275 uid, gid, NULL, alias, 0);
2276}
2277
2278/* yaffs_link_obj returns the object id of the equivalent object.*/
2279struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
2280 struct yaffs_obj *equiv_obj)
2281{
2282 /* Get the real object in case we were fed a hard link obj */
2283 equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
2284
2285 if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
2286 parent, name, 0, 0, 0,
2287 equiv_obj, NULL, 0))
2288 return equiv_obj;
2289
2290 return NULL;
2291
2292}
2293
2294
2295
2296/*---------------------- Block Management and Page Allocation -------------*/
2297
2298static void yaffs_deinit_blocks(struct yaffs_dev *dev)
2299{
2300 if (dev->block_info_alt && dev->block_info)
2301 vfree(dev->block_info);
2302 else
2303 kfree(dev->block_info);
2304
2305 dev->block_info_alt = 0;
2306
2307 dev->block_info = NULL;
2308
2309 if (dev->chunk_bits_alt && dev->chunk_bits)
2310 vfree(dev->chunk_bits);
2311 else
2312 kfree(dev->chunk_bits);
2313 dev->chunk_bits_alt = 0;
2314 dev->chunk_bits = NULL;
2315}
2316
2317static int yaffs_init_blocks(struct yaffs_dev *dev)
2318{
2319 int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
2320
2321 dev->block_info = NULL;
2322 dev->chunk_bits = NULL;
2323 dev->alloc_block = -1; /* force it to get a new one */
2324
2325 /* If the first allocation strategy fails, thry the alternate one */
2326 dev->block_info =
2327 kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
2328 if (!dev->block_info) {
2329 dev->block_info =
2330 vmalloc(n_blocks * sizeof(struct yaffs_block_info));
2331 dev->block_info_alt = 1;
2332 } else {
2333 dev->block_info_alt = 0;
2334 }
2335
2336 if (!dev->block_info)
2337 goto alloc_error;
2338
2339 /* Set up dynamic blockinfo stuff. Round up bytes. */
2340 dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
2341 dev->chunk_bits =
2342 kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
2343 if (!dev->chunk_bits) {
2344 dev->chunk_bits =
2345 vmalloc(dev->chunk_bit_stride * n_blocks);
2346 dev->chunk_bits_alt = 1;
2347 } else {
2348 dev->chunk_bits_alt = 0;
2349 }
2350 if (!dev->chunk_bits)
2351 goto alloc_error;
2352
2353
2354 memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
2355 memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
2356 return YAFFS_OK;
2357
2358alloc_error:
2359 yaffs_deinit_blocks(dev);
2360 return YAFFS_FAIL;
2361}
2362
2363
2364void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
2365{
2366 struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
2367 int erased_ok = 0;
2368 u32 i;
2369
2370 /* If the block is still healthy erase it and mark as clean.
2371 * If the block has had a data failure, then retire it.
2372 */
2373
2374 yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
2375 "yaffs_block_became_dirty block %d state %d %s",
2376 block_no, bi->block_state,
2377 (bi->needs_retiring) ? "needs retiring" : "");
2378
2379 yaffs2_clear_oldest_dirty_seq(dev, bi);
2380
2381 bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
2382
2383 /* If this is the block being garbage collected then stop gc'ing */
2384 if (block_no == (int)dev->gc_block)
2385 dev->gc_block = 0;
2386
2387 /* If this block is currently the best candidate for gc
2388 * then drop as a candidate */
2389 if (block_no == (int)dev->gc_dirtiest) {
2390 dev->gc_dirtiest = 0;
2391 dev->gc_pages_in_use = 0;
2392 }
2393
2394 if (!bi->needs_retiring) {
2395 yaffs2_checkpt_invalidate(dev);
2396 erased_ok = yaffs_erase_block(dev, block_no);
2397 if (!erased_ok) {
2398 dev->n_erase_failures++;
2399 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2400 "**>> Erasure failed %d", block_no);
2401 }
2402 }
2403
2404 /* Verify erasure if needed */
2405 if (erased_ok &&
2406 ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
2407 !yaffs_skip_verification(dev))) {
2408 for (i = 0; i < dev->param.chunks_per_block; i++) {
2409 if (!yaffs_check_chunk_erased(dev,
2410 block_no * dev->param.chunks_per_block + i)) {
2411 yaffs_trace(YAFFS_TRACE_ERROR,
2412 ">>Block %d erasure supposedly OK, but chunk %d not erased",
2413 block_no, i);
2414 }
2415 }
2416 }
2417
2418 if (!erased_ok) {
2419 /* We lost a block of free space */
2420 dev->n_free_chunks -= dev->param.chunks_per_block;
2421 yaffs_retire_block(dev, block_no);
2422 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
2423 "**>> Block %d retired", block_no);
2424 return;
2425 }
2426
2427 /* Clean it up... */
2428 bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
2429 bi->seq_number = 0;
2430 dev->n_erased_blocks++;
2431 bi->pages_in_use = 0;
2432 bi->soft_del_pages = 0;
2433 bi->has_shrink_hdr = 0;
2434 bi->skip_erased_check = 1; /* Clean, so no need to check */
2435 bi->gc_prioritise = 0;
2436 bi->has_summary = 0;
2437
2438 yaffs_clear_chunk_bits(dev, block_no);
2439
2440 yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
2441}
2442
2443static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
2444 struct yaffs_block_info *bi,
2445 int old_chunk, u8 *buffer)
2446{
2447 int new_chunk;
2448 int mark_flash = 1;
2449 struct yaffs_ext_tags tags;
2450 struct yaffs_obj *object;
2451 int matching_chunk;
2452 int ret_val = YAFFS_OK;
2453
2454 memset(&tags, 0, sizeof(tags));
2455 yaffs_rd_chunk_tags_nand(dev, old_chunk,
2456 buffer, &tags);
2457 object = yaffs_find_by_number(dev, tags.obj_id);
2458
2459 yaffs_trace(YAFFS_TRACE_GC_DETAIL,
2460 "Collecting chunk in block %d, %d %d %d ",
2461 dev->gc_chunk, tags.obj_id,
2462 tags.chunk_id, tags.n_bytes);
2463
2464 if (object && !yaffs_skip_verification(dev)) {
2465 if (tags.chunk_id == 0)
2466 matching_chunk =
2467 object->hdr_chunk;
2468 else if (object->soft_del)
2469 /* Defeat the test */
2470 matching_chunk = old_chunk;
2471 else
2472 matching_chunk =
2473 yaffs_find_chunk_in_file
2474 (object, tags.chunk_id,
2475 NULL);
2476
2477 if (old_chunk != matching_chunk)
2478 yaffs_trace(YAFFS_TRACE_ERROR,
2479 "gc: page in gc mismatch: %d %d %d %d",
2480 old_chunk,
2481 matching_chunk,
2482 tags.obj_id,
2483 tags.chunk_id);
2484 }
2485
2486 if (!object) {
2487 yaffs_trace(YAFFS_TRACE_ERROR,
2488 "page %d in gc has no object: %d %d %d ",
2489 old_chunk,
2490 tags.obj_id, tags.chunk_id,
2491 tags.n_bytes);
2492 }
2493
2494 if (object &&
2495 object->deleted &&
2496 object->soft_del && tags.chunk_id != 0) {
2497 /* Data chunk in a soft deleted file,
2498 * throw it away.
2499 * It's a soft deleted data chunk,
2500 * No need to copy this, just forget
2501 * about it and fix up the object.
2502 */
2503
2504 /* Free chunks already includes
2505 * softdeleted chunks, how ever this
2506 * chunk is going to soon be really
2507 * deleted which will increment free
2508 * chunks. We have to decrement free
2509 * chunks so this works out properly.
2510 */
2511 dev->n_free_chunks--;
2512 bi->soft_del_pages--;
2513
2514 object->n_data_chunks--;
2515 if (object->n_data_chunks <= 0) {
2516 /* remeber to clean up obj */
2517 dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
2518 dev->n_clean_ups++;
2519 }
2520 mark_flash = 0;
2521 } else if (object) {
2522 /* It's either a data chunk in a live
2523 * file or an ObjectHeader, so we're
2524 * interested in it.
2525 * NB Need to keep the ObjectHeaders of
2526 * deleted files until the whole file
2527 * has been deleted off
2528 */
2529 tags.serial_number++;
2530 dev->n_gc_copies++;
2531
2532 if (tags.chunk_id == 0) {
2533 /* It is an object Id,
2534 * We need to nuke the shrinkheader flags since its
2535 * work is done.
2536 * Also need to clean up shadowing.
2537 * NB We don't want to do all the work of translating
2538 * object header endianism back and forth so we leave
2539 * the oh endian in its stored order.
2540 */
2541
2542 struct yaffs_obj_hdr *oh;
2543 oh = (struct yaffs_obj_hdr *) buffer;
2544
2545 oh->is_shrink = 0;
2546 tags.extra_is_shrink = 0;
2547 oh->shadows_obj = 0;
2548 oh->inband_shadowed_obj_id = 0;
2549 tags.extra_shadows = 0;
2550
2551 /* Update file size */
2552 if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
2553 yaffs_oh_size_load(dev, oh,
2554 object->variant.file_variant.stored_size, 1);
2555 tags.extra_file_size =
2556 object->variant.file_variant.stored_size;
2557 }
2558
2559 yaffs_verify_oh(object, oh, &tags, 1);
2560 new_chunk =
2561 yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
2562 } else {
2563 new_chunk =
2564 yaffs_write_new_chunk(dev, buffer, &tags, 1);
2565 }
2566
2567 if (new_chunk < 0) {
2568 ret_val = YAFFS_FAIL;
2569 } else {
2570
2571 /* Now fix up the Tnodes etc. */
2572
2573 if (tags.chunk_id == 0) {
2574 /* It's a header */
2575 object->hdr_chunk = new_chunk;
2576 object->serial = tags.serial_number;
2577 } else {
2578 /* It's a data chunk */
2579 yaffs_put_chunk_in_file(object, tags.chunk_id,
2580 new_chunk, 0);
2581 }
2582 }
2583 }
2584 if (ret_val == YAFFS_OK)
2585 yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
2586 return ret_val;
2587}
2588
2589static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
2590{
2591 int old_chunk;
2592 int ret_val = YAFFS_OK;
2593 u32 i;
2594 int is_checkpt_block;
2595 int max_copies;
2596 int chunks_before = yaffs_get_erased_chunks(dev);
2597 int chunks_after;
2598 struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
2599
2600 is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
2601
2602 yaffs_trace(YAFFS_TRACE_TRACING,
2603 "Collecting block %d, in use %d, shrink %d, whole_block %d",
2604 block, bi->pages_in_use, bi->has_shrink_hdr,
2605 whole_block);
2606
2607 /*yaffs_verify_free_chunks(dev); */
2608
2609 if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
2610 bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
2611
2612 bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
2613
2614 dev->gc_disable = 1;
2615
2616 yaffs_summary_gc(dev, block);
2617
2618 if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
2619 yaffs_trace(YAFFS_TRACE_TRACING,
2620 "Collecting block %d that has no chunks in use",
2621 block);
2622 yaffs_block_became_dirty(dev, block);
2623 } else {
2624
2625 u8 *buffer = yaffs_get_temp_buffer(dev);
2626
2627 yaffs_verify_blk(dev, bi, block);
2628
2629 max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
2630 old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
2631
2632 for (/* init already done */ ;
2633 ret_val == YAFFS_OK &&
2634 dev->gc_chunk < dev->param.chunks_per_block &&
2635 (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
2636 max_copies > 0;
2637 dev->gc_chunk++, old_chunk++) {
2638 if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
2639 /* Page is in use and might need to be copied */
2640 max_copies--;
2641 ret_val = yaffs_gc_process_chunk(dev, bi,
2642 old_chunk, buffer);
2643 }
2644 }
2645 yaffs_release_temp_buffer(dev, buffer);
2646 }
2647
2648 yaffs_verify_collected_blk(dev, bi, block);
2649
2650 if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
2651 /*
2652 * The gc did not complete. Set block state back to FULL
2653 * because checkpointing does not restore gc.
2654 */
2655 bi->block_state = YAFFS_BLOCK_STATE_FULL;
2656 } else {
2657 /* The gc completed. */
2658 /* Do any required cleanups */
2659 for (i = 0; i < dev->n_clean_ups; i++) {
2660 /* Time to delete the file too */
2661 struct yaffs_obj *object =
2662 yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
2663 if (object) {
2664 yaffs_free_tnode(dev,
2665 object->variant.file_variant.top);
2666 object->variant.file_variant.top = NULL;
2667 yaffs_trace(YAFFS_TRACE_GC,
2668 "yaffs: About to finally delete object %d",
2669 object->obj_id);
2670 yaffs_generic_obj_del(object);
2671 object->my_dev->n_deleted_files--;
2672 }
2673
2674 }
2675 chunks_after = yaffs_get_erased_chunks(dev);
2676 if (chunks_before >= chunks_after)
2677 yaffs_trace(YAFFS_TRACE_GC,
2678 "gc did not increase free chunks before %d after %d",
2679 chunks_before, chunks_after);
2680 dev->gc_block = 0;
2681 dev->gc_chunk = 0;
2682 dev->n_clean_ups = 0;
2683 }
2684
2685 dev->gc_disable = 0;
2686
2687 return ret_val;
2688}
2689
2690/*
2691 * find_gc_block() selects the dirtiest block (or close enough)
2692 * for garbage collection.
2693 */
2694
2695static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
2696 int aggressive, int background)
2697{
2698 u32 i;
2699 u32 iterations;
2700 u32 selected = 0;
2701 int prioritised = 0;
2702 int prioritised_exist = 0;
2703 struct yaffs_block_info *bi;
2704 u32 threshold;
2705
2706 /* First let's see if we need to grab a prioritised block */
2707 if (dev->has_pending_prioritised_gc && !aggressive) {
2708 dev->gc_dirtiest = 0;
2709 bi = dev->block_info;
2710 for (i = dev->internal_start_block;
2711 i <= dev->internal_end_block && !selected; i++) {
2712
2713 if (bi->gc_prioritise) {
2714 prioritised_exist = 1;
2715 if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2716 yaffs_block_ok_for_gc(dev, bi)) {
2717 selected = i;
2718 prioritised = 1;
2719 }
2720 }
2721 bi++;
2722 }
2723
2724 /*
2725 * If there is a prioritised block and none was selected then
2726 * this happened because there is at least one old dirty block
2727 * gumming up the works. Let's gc the oldest dirty block.
2728 */
2729
2730 if (prioritised_exist &&
2731 !selected && dev->oldest_dirty_block > 0)
2732 selected = dev->oldest_dirty_block;
2733
2734 if (!prioritised_exist) /* None found, so we can clear this */
2735 dev->has_pending_prioritised_gc = 0;
2736 }
2737
2738 /* If we're doing aggressive GC then we are happy to take a less-dirty
2739 * block, and search harder.
2740 * else (leasurely gc), then we only bother to do this if the
2741 * block has only a few pages in use.
2742 */
2743
2744 if (!selected) {
2745 u32 pages_used;
2746 int n_blocks =
2747 dev->internal_end_block - dev->internal_start_block + 1;
2748 if (aggressive) {
2749 threshold = dev->param.chunks_per_block;
2750 iterations = n_blocks;
2751 } else {
2752 u32 max_threshold;
2753
2754 if (background)
2755 max_threshold = dev->param.chunks_per_block / 2;
2756 else
2757 max_threshold = dev->param.chunks_per_block / 8;
2758
2759 if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2760 max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2761
2762 threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
2763 if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
2764 threshold = YAFFS_GC_PASSIVE_THRESHOLD;
2765 if (threshold > max_threshold)
2766 threshold = max_threshold;
2767
2768 iterations = n_blocks / 16 + 1;
2769 if (iterations > 100)
2770 iterations = 100;
2771 }
2772
2773 for (i = 0;
2774 i < iterations &&
2775 (dev->gc_dirtiest < 1 ||
2776 dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
2777 i++) {
2778 dev->gc_block_finder++;
2779 if (dev->gc_block_finder < dev->internal_start_block ||
2780 dev->gc_block_finder > dev->internal_end_block)
2781 dev->gc_block_finder =
2782 dev->internal_start_block;
2783
2784 bi = yaffs_get_block_info(dev, dev->gc_block_finder);
2785
2786 pages_used = bi->pages_in_use - bi->soft_del_pages;
2787
2788 if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
2789 pages_used < dev->param.chunks_per_block &&
2790 (dev->gc_dirtiest < 1 ||
2791 pages_used < dev->gc_pages_in_use) &&
2792 yaffs_block_ok_for_gc(dev, bi)) {
2793 dev->gc_dirtiest = dev->gc_block_finder;
2794 dev->gc_pages_in_use = pages_used;
2795 }
2796 }
2797
2798 if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
2799 selected = dev->gc_dirtiest;
2800 }
2801
2802 /*
2803 * If nothing has been selected for a while, try the oldest dirty
2804 * because that's gumming up the works.
2805 */
2806
2807 if (!selected && dev->param.is_yaffs2 &&
2808 dev->gc_not_done >= (background ? 10 : 20)) {
2809 yaffs2_find_oldest_dirty_seq(dev);
2810 if (dev->oldest_dirty_block > 0) {
2811 selected = dev->oldest_dirty_block;
2812 dev->gc_dirtiest = selected;
2813 dev->oldest_dirty_gc_count++;
2814 bi = yaffs_get_block_info(dev, selected);
2815 dev->gc_pages_in_use =
2816 bi->pages_in_use - bi->soft_del_pages;
2817 } else {
2818 dev->gc_not_done = 0;
2819 }
2820 }
2821
2822 if (selected) {
2823 yaffs_trace(YAFFS_TRACE_GC,
2824 "GC Selected block %d with %d free, prioritised:%d",
2825 selected,
2826 dev->param.chunks_per_block - dev->gc_pages_in_use,
2827 prioritised);
2828
2829 dev->n_gc_blocks++;
2830 if (background)
2831 dev->bg_gcs++;
2832
2833 dev->gc_dirtiest = 0;
2834 dev->gc_pages_in_use = 0;
2835 dev->gc_not_done = 0;
2836 if (dev->refresh_skip > 0)
2837 dev->refresh_skip--;
2838 } else {
2839 dev->gc_not_done++;
2840 yaffs_trace(YAFFS_TRACE_GC,
2841 "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
2842 dev->gc_block_finder, dev->gc_not_done, threshold,
2843 dev->gc_dirtiest, dev->gc_pages_in_use,
2844 dev->oldest_dirty_block, background ? " bg" : "");
2845 }
2846
2847 return selected;
2848}
2849
2850/* New garbage collector
2851 * If we're very low on erased blocks then we do aggressive garbage collection
2852 * otherwise we do "leasurely" garbage collection.
2853 * Aggressive gc looks further (whole array) and will accept less dirty blocks.
2854 * Passive gc only inspects smaller areas and only accepts more dirty blocks.
2855 *
2856 * The idea is to help clear out space in a more spread-out manner.
2857 * Dunno if it really does anything useful.
2858 */
2859static int yaffs_check_gc(struct yaffs_dev *dev, int background)
2860{
2861 int aggressive = 0;
2862 int gc_ok = YAFFS_OK;
2863 int max_tries = 0;
2864 int min_erased;
2865 int erased_chunks;
2866 int checkpt_block_adjust;
2867
2868 if (dev->param.gc_control_fn &&
2869 (dev->param.gc_control_fn(dev) & 1) == 0)
2870 return YAFFS_OK;
2871
2872 if (dev->gc_disable)
2873 /* Bail out so we don't get recursive gc */
2874 return YAFFS_OK;
2875
2876 /* This loop should pass the first time.
2877 * Only loops here if the collection does not increase space.
2878 */
2879
2880 do {
2881 max_tries++;
2882
2883 checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
2884
2885 min_erased =
2886 dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
2887 erased_chunks =
2888 dev->n_erased_blocks * dev->param.chunks_per_block;
2889
2890 /* If we need a block soon then do aggressive gc. */
2891 if (dev->n_erased_blocks < min_erased)
2892 aggressive = 1;
2893 else {
2894 if (!background
2895 && erased_chunks > (dev->n_free_chunks / 4))
2896 break;
2897
2898 if (dev->gc_skip > 20)
2899 dev->gc_skip = 20;
2900 if (erased_chunks < dev->n_free_chunks / 2 ||
2901 dev->gc_skip < 1 || background)
2902 aggressive = 0;
2903 else {
2904 dev->gc_skip--;
2905 break;
2906 }
2907 }
2908
2909 dev->gc_skip = 5;
2910
2911 /* If we don't already have a block being gc'd then see if we
2912 * should start another */
2913
2914 if (dev->gc_block < 1 && !aggressive) {
2915 dev->gc_block = yaffs2_find_refresh_block(dev);
2916 dev->gc_chunk = 0;
2917 dev->n_clean_ups = 0;
2918 }
2919 if (dev->gc_block < 1) {
2920 dev->gc_block =
2921 yaffs_find_gc_block(dev, aggressive, background);
2922 dev->gc_chunk = 0;
2923 dev->n_clean_ups = 0;
2924 }
2925
2926 if (dev->gc_block > 0) {
2927 dev->all_gcs++;
2928 if (!aggressive)
2929 dev->passive_gc_count++;
2930
2931 yaffs_trace(YAFFS_TRACE_GC,
2932 "yaffs: GC n_erased_blocks %d aggressive %d",
2933 dev->n_erased_blocks, aggressive);
2934
2935 gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
2936 }
2937
2938 if (dev->n_erased_blocks < (int)dev->param.n_reserved_blocks &&
2939 dev->gc_block > 0) {
2940 yaffs_trace(YAFFS_TRACE_GC,
2941 "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
2942 dev->n_erased_blocks, max_tries,
2943 dev->gc_block);
2944 }
2945 } while ((dev->n_erased_blocks < (int)dev->param.n_reserved_blocks) &&
2946 (dev->gc_block > 0) && (max_tries < 2));
2947
2948 return aggressive ? gc_ok : YAFFS_OK;
2949}
2950
2951/*
2952 * yaffs_bg_gc()
2953 * Garbage collects. Intended to be called from a background thread.
2954 * Returns non-zero if at least half the free chunks are erased.
2955 */
2956int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
2957{
2958 int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
2959
2960 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
2961
2962 yaffs_check_gc(dev, 1);
2963 return erased_chunks > dev->n_free_chunks / 2;
2964}
2965
2966/*-------------------- Data file manipulation -----------------*/
2967
2968static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
2969{
2970 int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
2971
2972 if (nand_chunk >= 0)
2973 return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
2974 buffer, NULL);
2975 else {
2976 yaffs_trace(YAFFS_TRACE_NANDACCESS,
2977 "Chunk %d not found zero instead",
2978 nand_chunk);
2979 /* get sane (zero) data if you read a hole */
2980 memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
2981 return 0;
2982 }
2983
2984}
2985
2986void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
2987 int lyn)
2988{
2989 int block;
2990 int page;
2991 struct yaffs_ext_tags tags;
2992 struct yaffs_block_info *bi;
2993
2994 if (chunk_id <= 0)
2995 return;
2996
2997 dev->n_deletions++;
2998 block = chunk_id / dev->param.chunks_per_block;
2999 page = chunk_id % dev->param.chunks_per_block;
3000
3001 if (!yaffs_check_chunk_bit(dev, block, page))
3002 yaffs_trace(YAFFS_TRACE_VERIFY,
3003 "Deleting invalid chunk %d", chunk_id);
3004
3005 bi = yaffs_get_block_info(dev, block);
3006
3007 yaffs2_update_oldest_dirty_seq(dev, block, bi);
3008
3009 yaffs_trace(YAFFS_TRACE_DELETION,
3010 "line %d delete of chunk %d",
3011 lyn, chunk_id);
3012
3013 if (!dev->param.is_yaffs2 && mark_flash &&
3014 bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
3015
3016 memset(&tags, 0, sizeof(tags));
3017 tags.is_deleted = 1;
3018 yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
3019 yaffs_handle_chunk_update(dev, chunk_id, &tags);
3020 } else {
3021 dev->n_unmarked_deletions++;
3022 }
3023
3024 /* Pull out of the management area.
3025 * If the whole block became dirty, this will kick off an erasure.
3026 */
3027 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
3028 bi->block_state == YAFFS_BLOCK_STATE_FULL ||
3029 bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
3030 bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
3031 dev->n_free_chunks++;
3032 yaffs_clear_chunk_bit(dev, block, page);
3033 bi->pages_in_use--;
3034
3035 if (bi->pages_in_use == 0 &&
3036 !bi->has_shrink_hdr &&
3037 bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
3038 bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
3039 yaffs_block_became_dirty(dev, block);
3040 }
3041 }
3042}
3043
3044static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
3045 const u8 *buffer, int n_bytes, int use_reserve)
3046{
3047 /* Find old chunk Need to do this to get serial number
3048 * Write new one and patch into tree.
3049 * Invalidate old tags.
3050 */
3051
3052 int prev_chunk_id;
3053 struct yaffs_ext_tags prev_tags;
3054 int new_chunk_id;
3055 struct yaffs_ext_tags new_tags;
3056 struct yaffs_dev *dev = in->my_dev;
3057 loff_t endpos;
3058
3059 yaffs_check_gc(dev, 0);
3060
3061 /* Get the previous chunk at this location in the file if it exists.
3062 * If it does not exist then put a zero into the tree. This creates
3063 * the tnode now, rather than later when it is harder to clean up.
3064 */
3065 prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
3066 if (prev_chunk_id < 1 &&
3067 !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
3068 return 0;
3069
3070 /* Set up new tags */
3071 memset(&new_tags, 0, sizeof(new_tags));
3072
3073 new_tags.chunk_id = inode_chunk;
3074 new_tags.obj_id = in->obj_id;
3075 new_tags.serial_number =
3076 (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
3077 new_tags.n_bytes = n_bytes;
3078
3079 if (n_bytes < 1 || n_bytes > (int)dev->data_bytes_per_chunk) {
3080 yaffs_trace(YAFFS_TRACE_ERROR,
3081 "Writing %d bytes to chunk!!!!!!!!!",
3082 n_bytes);
3083 BUG();
3084 }
3085
3086 /*
3087 * If this is a data chunk and the write goes past the end of the stored
3088 * size then update the stored_size.
3089 */
3090 if (inode_chunk > 0) {
3091 endpos = (inode_chunk - 1) * dev->data_bytes_per_chunk +
3092 n_bytes;
3093 if (in->variant.file_variant.stored_size < endpos)
3094 in->variant.file_variant.stored_size = endpos;
3095 }
3096
3097 new_chunk_id =
3098 yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
3099
3100 if (new_chunk_id > 0) {
3101 yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
3102
3103 if (prev_chunk_id > 0)
3104 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3105
3106 yaffs_verify_file_sane(in);
3107 }
3108 return new_chunk_id;
3109}
3110
3111
3112
3113static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
3114 const YCHAR *name, const void *value, int size,
3115 int flags)
3116{
3117 struct yaffs_xattr_mod xmod;
3118 int result;
3119
3120 xmod.set = set;
3121 xmod.name = name;
3122 xmod.data = value;
3123 xmod.size = size;
3124 xmod.flags = flags;
3125 xmod.result = -ENOSPC;
3126
3127 result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
3128
3129 if (result > 0)
3130 return xmod.result;
3131 else
3132 return -ENOSPC;
3133}
3134
3135static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
3136 struct yaffs_xattr_mod *xmod)
3137{
3138 int retval = 0;
3139 int x_offs = sizeof(struct yaffs_obj_hdr);
3140 struct yaffs_dev *dev = obj->my_dev;
3141 int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3142 char *x_buffer = buffer + x_offs;
3143
3144 if (xmod->set)
3145 retval =
3146 nval_set(dev, x_buffer, x_size, xmod->name, xmod->data,
3147 xmod->size, xmod->flags);
3148 else
3149 retval = nval_del(dev, x_buffer, x_size, xmod->name);
3150
3151 obj->has_xattr = nval_hasvalues(dev, x_buffer, x_size);
3152 obj->xattr_known = 1;
3153 xmod->result = retval;
3154
3155 return retval;
3156}
3157
3158static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
3159 void *value, int size)
3160{
3161 char *buffer = NULL;
3162 int result;
3163 struct yaffs_ext_tags tags;
3164 struct yaffs_dev *dev = obj->my_dev;
3165 int x_offs = sizeof(struct yaffs_obj_hdr);
3166 int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
3167 char *x_buffer;
3168 int retval = 0;
3169
3170 if (obj->hdr_chunk < 1)
3171 return -ENODATA;
3172
3173 /* If we know that the object has no xattribs then don't do all the
3174 * reading and parsing.
3175 */
3176 if (obj->xattr_known && !obj->has_xattr) {
3177 if (name)
3178 return -ENODATA;
3179 else
3180 return 0;
3181 }
3182
3183 buffer = (char *)yaffs_get_temp_buffer(dev);
3184 if (!buffer)
3185 return -ENOMEM;
3186
3187 result =
3188 yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
3189
3190 if (result != YAFFS_OK)
3191 retval = -ENOENT;
3192 else {
3193 x_buffer = buffer + x_offs;
3194
3195 if (!obj->xattr_known) {
3196 obj->has_xattr = nval_hasvalues(dev, x_buffer, x_size);
3197 obj->xattr_known = 1;
3198 }
3199
3200 if (name)
3201 retval = nval_get(dev, x_buffer, x_size,
3202 name, value, size);
3203 else
3204 retval = nval_list(dev, x_buffer, x_size, value, size);
3205 }
3206 yaffs_release_temp_buffer(dev, (u8 *) buffer);
3207 return retval;
3208}
3209
3210int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
3211 const void *value, int size, int flags)
3212{
3213 return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
3214}
3215
3216int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
3217{
3218 return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
3219}
3220
3221int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
3222 int size)
3223{
3224 return yaffs_do_xattrib_fetch(obj, name, value, size);
3225}
3226
3227int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
3228{
3229 return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
3230}
3231
3232static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
3233{
3234 u8 *buf;
3235 struct yaffs_obj_hdr *oh;
3236 struct yaffs_dev *dev;
3237 struct yaffs_ext_tags tags;
3238 int result;
3239
3240 if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
3241 return;
3242
3243 dev = in->my_dev;
3244 buf = yaffs_get_temp_buffer(dev);
3245
3246 result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
3247
3248 if (result == YAFFS_FAIL)
3249 return;
3250
3251 oh = (struct yaffs_obj_hdr *)buf;
3252
3253 yaffs_do_endian_oh(dev, oh);
3254
3255 in->lazy_loaded = 0;
3256 in->yst_mode = oh->yst_mode;
3257 yaffs_load_attribs(in, oh);
3258 yaffs_set_obj_name_from_oh(in, oh);
3259
3260 if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
3261 in->variant.symlink_variant.alias =
3262 yaffs_clone_str(oh->alias);
3263 yaffs_release_temp_buffer(dev, buf);
3264}
3265
3266/* UpdateObjectHeader updates the header on NAND for an object.
3267 * If name is not NULL, then that new name is used.
3268 *
3269 * We're always creating the obj header from scratch (except reading
3270 * the old name) so first set up in cpu endianness then run it through
3271 * endian fixing at the end.
3272 *
3273 * However, a twist: If there are xattribs we leave them as they were.
3274 *
3275 * Careful! The buffer holds the whole chunk. Part of the chunk holds the
3276 * object header and the rest holds the xattribs, therefore we use a buffer
3277 * pointer and an oh pointer to point to the same memory.
3278 */
3279
3280int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
3281 int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
3282{
3283
3284 struct yaffs_block_info *bi;
3285 struct yaffs_dev *dev = in->my_dev;
3286 int prev_chunk_id;
3287 int ret_val = 0;
3288 int result = 0;
3289 int new_chunk_id;
3290 struct yaffs_ext_tags new_tags;
3291 struct yaffs_ext_tags old_tags;
3292 const YCHAR *alias = NULL;
3293 u8 *buffer = NULL;
3294 YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
3295 struct yaffs_obj_hdr *oh = NULL;
3296 loff_t file_size = 0;
3297
3298 strcpy(old_name, _Y("silly old name"));
3299
3300 if (in->fake && in != dev->root_dir && !force && !xmod)
3301 return ret_val;
3302
3303 yaffs_check_gc(dev, 0);
3304 yaffs_check_obj_details_loaded(in);
3305
3306 buffer = yaffs_get_temp_buffer(in->my_dev);
3307 oh = (struct yaffs_obj_hdr *)buffer;
3308
3309 prev_chunk_id = in->hdr_chunk;
3310
3311 if (prev_chunk_id > 0) {
3312 /* Access the old obj header just to read the name. */
3313 result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
3314 buffer, &old_tags);
3315 if (result == YAFFS_OK) {
3316 yaffs_verify_oh(in, oh, &old_tags, 0);
3317 memcpy(old_name, oh->name, sizeof(oh->name));
3318
3319 /*
3320 * NB We only wipe the object header area because the rest of
3321 * the buffer might contain xattribs.
3322 */
3323 memset(oh, 0xff, sizeof(*oh));
3324 }
3325 } else {
3326 memset(buffer, 0xff, dev->data_bytes_per_chunk);
3327 }
3328
3329 oh->type = in->variant_type;
3330 oh->yst_mode = in->yst_mode;
3331 oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
3332
3333 yaffs_load_attribs_oh(oh, in);
3334
3335 if (in->parent)
3336 oh->parent_obj_id = in->parent->obj_id;
3337 else
3338 oh->parent_obj_id = 0;
3339
3340 if (name && *name) {
3341 memset(oh->name, 0, sizeof(oh->name));
3342 yaffs_load_oh_from_name(dev, oh->name, name);
3343 } else if (prev_chunk_id > 0) {
3344 memcpy(oh->name, old_name, sizeof(oh->name));
3345 } else {
3346 memset(oh->name, 0, sizeof(oh->name));
3347 }
3348
3349 oh->is_shrink = is_shrink;
3350
3351 switch (in->variant_type) {
3352 case YAFFS_OBJECT_TYPE_UNKNOWN:
3353 /* Should not happen */
3354 break;
3355 case YAFFS_OBJECT_TYPE_FILE:
3356 if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
3357 oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
3358 file_size = in->variant.file_variant.stored_size;
3359 yaffs_oh_size_load(dev, oh, file_size, 0);
3360 break;
3361 case YAFFS_OBJECT_TYPE_HARDLINK:
3362 oh->equiv_id = in->variant.hardlink_variant.equiv_id;
3363 break;
3364 case YAFFS_OBJECT_TYPE_SPECIAL:
3365 /* Do nothing */
3366 break;
3367 case YAFFS_OBJECT_TYPE_DIRECTORY:
3368 /* Do nothing */
3369 break;
3370 case YAFFS_OBJECT_TYPE_SYMLINK:
3371 alias = in->variant.symlink_variant.alias;
3372 if (!alias)
3373 alias = _Y("no alias");
3374 strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
3375 oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
3376 break;
3377 }
3378
3379 /* process any xattrib modifications */
3380 if (xmod)
3381 yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
3382
3383 /* Tags */
3384 memset(&new_tags, 0, sizeof(new_tags));
3385 in->serial++;
3386 new_tags.chunk_id = 0;
3387 new_tags.obj_id = in->obj_id;
3388 new_tags.serial_number = in->serial;
3389
3390 /* Add extra info for file header */
3391 new_tags.extra_available = 1;
3392 new_tags.extra_parent_id = oh->parent_obj_id;
3393 new_tags.extra_file_size = file_size;
3394 new_tags.extra_is_shrink = oh->is_shrink;
3395 new_tags.extra_equiv_id = oh->equiv_id;
3396 new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
3397 new_tags.extra_obj_type = in->variant_type;
3398
3399 /* Now endian swizzle the oh if needed. */
3400 yaffs_do_endian_oh(dev, oh);
3401
3402 yaffs_verify_oh(in, oh, &new_tags, 1);
3403
3404 /* Create new chunk in NAND */
3405 new_chunk_id =
3406 yaffs_write_new_chunk(dev, buffer, &new_tags,
3407 (prev_chunk_id > 0) ? 1 : 0);
3408
3409 if (buffer)
3410 yaffs_release_temp_buffer(dev, buffer);
3411
3412 if (new_chunk_id < 0)
3413 return new_chunk_id;
3414
3415 in->hdr_chunk = new_chunk_id;
3416
3417 if (prev_chunk_id > 0)
3418 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
3419
3420 if (!yaffs_obj_cache_dirty(in))
3421 in->dirty = 0;
3422
3423 /* If this was a shrink, then mark the block
3424 * that the chunk lives on */
3425 if (is_shrink) {
3426 bi = yaffs_get_block_info(in->my_dev,
3427 new_chunk_id /
3428 in->my_dev->param.chunks_per_block);
3429 bi->has_shrink_hdr = 1;
3430 }
3431
3432
3433 return new_chunk_id;
3434}
3435
3436/*--------------------- File read/write ------------------------
3437 * Read and write have very similar structures.
3438 * In general the read/write has three parts to it
3439 * An incomplete chunk to start with (if the read/write is not chunk-aligned)
3440 * Some complete chunks
3441 * An incomplete chunk to end off with
3442 *
3443 * Curve-balls: the first chunk might also be the last chunk.
3444 */
3445
3446int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
3447{
3448 int chunk;
3449 u32 start;
3450 int n_copy;
3451 int n = n_bytes;
3452 int n_done = 0;
3453 struct yaffs_cache *cache;
3454 struct yaffs_dev *dev;
3455
3456 dev = in->my_dev;
3457
3458 while (n > 0) {
3459 yaffs_addr_to_chunk(dev, offset, &chunk, &start);
3460 chunk++;
3461
3462 /* OK now check for the curveball where the start and end are in
3463 * the same chunk.
3464 */
3465 if ((start + n) < dev->data_bytes_per_chunk)
3466 n_copy = n;
3467 else
3468 n_copy = dev->data_bytes_per_chunk - start;
3469
3470 cache = yaffs_find_chunk_cache(in, chunk);
3471
3472 /* If the chunk is already in the cache or it is less than
3473 * a whole chunk or we're using inband tags then use the cache
3474 * (if there is caching) else bypass the cache.
3475 */
3476 if (cache || n_copy != (int)dev->data_bytes_per_chunk ||
3477 dev->param.inband_tags) {
3478 if (dev->param.n_caches > 0) {
3479
3480 /* If we can't find the data in the cache,
3481 * then load it up. */
3482
3483 if (!cache) {
3484 cache =
3485 yaffs_grab_chunk_cache(in->my_dev);
3486 cache->object = in;
3487 cache->chunk_id = chunk;
3488 cache->dirty = 0;
3489 cache->locked = 0;
3490 yaffs_rd_data_obj(in, chunk,
3491 cache->data);
3492 cache->n_bytes = 0;
3493 }
3494
3495 yaffs_use_cache(dev, cache, 0);
3496
3497 cache->locked = 1;
3498
3499 memcpy(buffer, &cache->data[start], n_copy);
3500
3501 cache->locked = 0;
3502 } else {
3503 /* Read into the local buffer then copy.. */
3504
3505 u8 *local_buffer =
3506 yaffs_get_temp_buffer(dev);
3507 yaffs_rd_data_obj(in, chunk, local_buffer);
3508
3509 memcpy(buffer, &local_buffer[start], n_copy);
3510
3511 yaffs_release_temp_buffer(dev, local_buffer);
3512 }
3513 } else {
3514 /* A full chunk. Read directly into the buffer. */
3515 yaffs_rd_data_obj(in, chunk, buffer);
3516 }
3517 n -= n_copy;
3518 offset += n_copy;
3519 buffer += n_copy;
3520 n_done += n_copy;
3521 }
3522 return n_done;
3523}
3524
3525int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
3526 int n_bytes, int write_through)
3527{
3528
3529 int chunk;
3530 u32 start;
3531 int n_copy;
3532 int n = n_bytes;
3533 int n_done = 0;
3534 int n_writeback;
3535 loff_t start_write = offset;
3536 int chunk_written = 0;
3537 u32 n_bytes_read;
3538 loff_t chunk_start;
3539 struct yaffs_dev *dev;
3540
3541 dev = in->my_dev;
3542
3543 while (n > 0 && chunk_written >= 0) {
3544 yaffs_addr_to_chunk(dev, offset, &chunk, &start);
3545
3546 if (((loff_t)chunk) *
3547 dev->data_bytes_per_chunk + start != offset ||
3548 start >= dev->data_bytes_per_chunk) {
3549 yaffs_trace(YAFFS_TRACE_ERROR,
3550 "AddrToChunk of offset %lld gives chunk %d start %d",
3551 (long long)offset, chunk, start);
3552 }
3553 chunk++; /* File pos to chunk in file offset */
3554
3555 /* OK now check for the curveball where the start and end are in
3556 * the same chunk.
3557 */
3558
3559 if ((start + n) < dev->data_bytes_per_chunk) {
3560 n_copy = n;
3561
3562 /* Now calculate how many bytes to write back....
3563 * If we're overwriting and not writing to then end of
3564 * file then we need to write back as much as was there
3565 * before.
3566 */
3567
3568 chunk_start = (((loff_t)(chunk - 1)) *
3569 dev->data_bytes_per_chunk);
3570
3571 if (chunk_start > in->variant.file_variant.file_size)
3572 n_bytes_read = 0; /* Past end of file */
3573 else
3574 n_bytes_read =
3575 in->variant.file_variant.file_size -
3576 chunk_start;
3577
3578 if (n_bytes_read > dev->data_bytes_per_chunk)
3579 n_bytes_read = dev->data_bytes_per_chunk;
3580
3581 n_writeback =
3582 (n_bytes_read >
3583 (start + n)) ? n_bytes_read : (start + n);
3584
3585 if (n_writeback < 0 ||
3586 n_writeback > (int)dev->data_bytes_per_chunk)
3587 BUG();
3588
3589 } else {
3590 n_copy = dev->data_bytes_per_chunk - start;
3591 n_writeback = dev->data_bytes_per_chunk;
3592 }
3593
3594 if (n_copy != (int)dev->data_bytes_per_chunk ||
3595 !dev->param.cache_bypass_aligned ||
3596 dev->param.inband_tags) {
3597 /* An incomplete start or end chunk (or maybe both
3598 * start and end chunk), or we're using inband tags,
3599 * or we're forcing writes through the cache,
3600 * so we want to use the cache buffers.
3601 */
3602 if (dev->param.n_caches > 0) {
3603 struct yaffs_cache *cache;
3604
3605 /* If we can't find the data in the cache, then
3606 * load the cache */
3607 cache = yaffs_find_chunk_cache(in, chunk);
3608
3609 if (!cache &&
3610 yaffs_check_alloc_available(dev, 1)) {
3611 cache = yaffs_grab_chunk_cache(dev);
3612 cache->object = in;
3613 cache->chunk_id = chunk;
3614 cache->dirty = 0;
3615 cache->locked = 0;
3616 yaffs_rd_data_obj(in, chunk,
3617 cache->data);
3618 } else if (cache &&
3619 !cache->dirty &&
3620 !yaffs_check_alloc_available(dev,
3621 1)) {
3622 /* Drop the cache if it was a read cache
3623 * item and no space check has been made
3624 * for it.
3625 */
3626 cache = NULL;
3627 }
3628
3629 if (cache) {
3630 yaffs_use_cache(dev, cache, 1);
3631 cache->locked = 1;
3632
3633 memcpy(&cache->data[start], buffer,
3634 n_copy);
3635
3636 cache->locked = 0;
3637 cache->n_bytes = n_writeback;
3638
3639 if (write_through) {
3640 chunk_written =
3641 yaffs_wr_data_obj
3642 (cache->object,
3643 cache->chunk_id,
3644 cache->data,
3645 cache->n_bytes, 1);
3646 cache->dirty = 0;
3647 }
3648 } else {
3649 chunk_written = -1; /* fail write */
3650 }
3651 } else {
3652 /* An incomplete start or end chunk (or maybe
3653 * both start and end chunk). Read into the
3654 * local buffer then copy over and write back.
3655 */
3656
3657 u8 *local_buffer = yaffs_get_temp_buffer(dev);
3658
3659 yaffs_rd_data_obj(in, chunk, local_buffer);
3660 memcpy(&local_buffer[start], buffer, n_copy);
3661
3662 chunk_written =
3663 yaffs_wr_data_obj(in, chunk,
3664 local_buffer,
3665 n_writeback, 0);
3666
3667 yaffs_release_temp_buffer(dev, local_buffer);
3668 }
3669 } else {
3670 /* A full chunk. Write directly from the buffer. */
3671
3672 chunk_written =
3673 yaffs_wr_data_obj(in, chunk, buffer,
3674 dev->data_bytes_per_chunk, 0);
3675
3676 /* Since we've overwritten the cached data,
3677 * we better invalidate it. */
3678 yaffs_invalidate_chunk_cache(in, chunk);
3679 }
3680
3681 if (chunk_written >= 0) {
3682 n -= n_copy;
3683 offset += n_copy;
3684 buffer += n_copy;
3685 n_done += n_copy;
3686 }
3687 }
3688
3689 /* Update file object */
3690
3691 if ((start_write + n_done) > in->variant.file_variant.file_size)
3692 in->variant.file_variant.file_size = (start_write + n_done);
3693
3694 in->dirty = 1;
3695 return n_done;
3696}
3697
3698int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
3699 int n_bytes, int write_through)
3700{
3701 yaffs2_handle_hole(in, offset);
3702 return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through);
3703}
3704
3705/* ---------------------- File resizing stuff ------------------ */
3706
3707static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size)
3708{
3709
3710 struct yaffs_dev *dev = in->my_dev;
3711 loff_t old_size = in->variant.file_variant.file_size;
3712 int i;
3713 int chunk_id;
3714 u32 dummy;
3715 int last_del;
3716 int start_del;
3717
3718 if (old_size > 0)
3719 yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy);
3720 else
3721 last_del = 0;
3722
3723 yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1,
3724 &start_del, &dummy);
3725 last_del++;
3726 start_del++;
3727
3728 /* Delete backwards so that we don't end up with holes if
3729 * power is lost part-way through the operation.
3730 */
3731 for (i = last_del; i >= start_del; i--) {
3732 /* NB this could be optimised somewhat,
3733 * eg. could retrieve the tags and write them without
3734 * using yaffs_chunk_del
3735 */
3736
3737 chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
3738
3739 if (chunk_id < 1)
3740 continue;
3741
3742 if ((u32)chunk_id <
3743 (dev->internal_start_block * dev->param.chunks_per_block) ||
3744 (u32)chunk_id >=
3745 ((dev->internal_end_block + 1) *
3746 dev->param.chunks_per_block)) {
3747 yaffs_trace(YAFFS_TRACE_ALWAYS,
3748 "Found daft chunk_id %d for %d",
3749 chunk_id, i);
3750 } else {
3751 in->n_data_chunks--;
3752 yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
3753 }
3754 }
3755}
3756
3757void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
3758{
3759 int new_full;
3760 u32 new_partial;
3761 struct yaffs_dev *dev = obj->my_dev;
3762
3763 yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
3764
3765 yaffs_prune_chunks(obj, new_size);
3766
3767 if (new_partial != 0) {
3768 int last_chunk = 1 + new_full;
3769 u8 *local_buffer = yaffs_get_temp_buffer(dev);
3770
3771 /* Rewrite the last chunk with its new size and zero pad */
3772 yaffs_rd_data_obj(obj, last_chunk, local_buffer);
3773 memset(local_buffer + new_partial, 0,
3774 dev->data_bytes_per_chunk - new_partial);
3775
3776 yaffs_wr_data_obj(obj, last_chunk, local_buffer,
3777 new_partial, 1);
3778
3779 yaffs_release_temp_buffer(dev, local_buffer);
3780 }
3781
3782 obj->variant.file_variant.file_size = new_size;
3783 obj->variant.file_variant.stored_size = new_size;
3784
3785 yaffs_prune_tree(dev, &obj->variant.file_variant);
3786}
3787
3788int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
3789{
3790 struct yaffs_dev *dev = in->my_dev;
3791 loff_t old_size = in->variant.file_variant.file_size;
3792
3793 yaffs_flush_file_cache(in, 1);
3794 yaffs_invalidate_whole_cache(in);
3795
3796 yaffs_check_gc(dev, 0);
3797
3798 if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
3799 return YAFFS_FAIL;
3800
3801 if (new_size == old_size)
3802 return YAFFS_OK;
3803
3804 if (new_size > old_size) {
3805 yaffs2_handle_hole(in, new_size);
3806 in->variant.file_variant.file_size = new_size;
3807 } else {
3808 /* new_size < old_size */
3809 yaffs_resize_file_down(in, new_size);
3810 }
3811
3812 /* Write a new object header to reflect the resize.
3813 * show we've shrunk the file, if need be
3814 * Do this only if the file is not in the deleted directories
3815 * and is not shadowed.
3816 */
3817 if (in->parent &&
3818 !in->is_shadowed &&
3819 in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
3820 in->parent->obj_id != YAFFS_OBJECTID_DELETED)
3821 yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
3822
3823 return YAFFS_OK;
3824}
3825
3826int yaffs_flush_file(struct yaffs_obj *in,
3827 int update_time,
3828 int data_sync,
3829 int discard_cache)
3830{
3831 if (!in->dirty)
3832 return YAFFS_OK;
3833
3834 yaffs_flush_file_cache(in, discard_cache);
3835
3836 if (data_sync)
3837 return YAFFS_OK;
3838
3839 if (update_time)
3840 yaffs_load_current_time(in, 0, 0);
3841
3842 return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ?
3843 YAFFS_OK : YAFFS_FAIL;
3844}
3845
3846
3847/* yaffs_del_file deletes the whole file data
3848 * and the inode associated with the file.
3849 * It does not delete the links associated with the file.
3850 */
3851static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
3852{
3853 int ret_val;
3854 int del_now = 0;
3855 struct yaffs_dev *dev = in->my_dev;
3856
3857 if (!in->my_inode)
3858 del_now = 1;
3859
3860 if (del_now) {
3861 ret_val =
3862 yaffs_change_obj_name(in, in->my_dev->del_dir,
3863 _Y("deleted"), 0, 0);
3864 yaffs_trace(YAFFS_TRACE_TRACING,
3865 "yaffs: immediate deletion of file %d",
3866 in->obj_id);
3867 in->deleted = 1;
3868 in->my_dev->n_deleted_files++;
3869 if (dev->param.disable_soft_del || dev->param.is_yaffs2)
3870 yaffs_resize_file(in, 0);
3871 yaffs_soft_del_file(in);
3872 } else {
3873 ret_val =
3874 yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
3875 _Y("unlinked"), 0, 0);
3876 }
3877 return ret_val;
3878}
3879
3880static int yaffs_del_file(struct yaffs_obj *in)
3881{
3882 int ret_val = YAFFS_OK;
3883 int deleted; /* Need to cache value on stack if in is freed */
3884 struct yaffs_dev *dev = in->my_dev;
3885
3886 if (dev->param.disable_soft_del || dev->param.is_yaffs2)
3887 yaffs_resize_file(in, 0);
3888
3889 if (in->n_data_chunks > 0) {
3890 /* Use soft deletion if there is data in the file.
3891 * That won't be the case if it has been resized to zero.
3892 */
3893 if (!in->unlinked)
3894 ret_val = yaffs_unlink_file_if_needed(in);
3895
3896 deleted = in->deleted;
3897
3898 if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
3899 in->deleted = 1;
3900 deleted = 1;
3901 in->my_dev->n_deleted_files++;
3902 yaffs_soft_del_file(in);
3903 }
3904 return deleted ? YAFFS_OK : YAFFS_FAIL;
3905 } else {
3906 /* The file has no data chunks so we toss it immediately */
3907 yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
3908 in->variant.file_variant.top = NULL;
3909 yaffs_generic_obj_del(in);
3910
3911 return YAFFS_OK;
3912 }
3913}
3914
3915int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
3916{
3917 return (obj &&
3918 obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
3919 !(list_empty(&obj->variant.dir_variant.children));
3920}
3921
3922static int yaffs_del_dir(struct yaffs_obj *obj)
3923{
3924 /* First check that the directory is empty. */
3925 if (yaffs_is_non_empty_dir(obj))
3926 return YAFFS_FAIL;
3927
3928 return yaffs_generic_obj_del(obj);
3929}
3930
3931static int yaffs_del_symlink(struct yaffs_obj *in)
3932{
3933 kfree(in->variant.symlink_variant.alias);
3934 in->variant.symlink_variant.alias = NULL;
3935
3936 return yaffs_generic_obj_del(in);
3937}
3938
3939static int yaffs_del_link(struct yaffs_obj *in)
3940{
3941 /* remove this hardlink from the list associated with the equivalent
3942 * object
3943 */
3944 list_del_init(&in->hard_links);
3945 return yaffs_generic_obj_del(in);
3946}
3947
3948int yaffs_del_obj(struct yaffs_obj *obj)
3949{
3950 int ret_val = -1;
3951
3952 switch (obj->variant_type) {
3953 case YAFFS_OBJECT_TYPE_FILE:
3954 ret_val = yaffs_del_file(obj);
3955 break;
3956 case YAFFS_OBJECT_TYPE_DIRECTORY:
3957 if (!list_empty(&obj->variant.dir_variant.dirty)) {
3958 yaffs_trace(YAFFS_TRACE_BACKGROUND,
3959 "Remove object %d from dirty directories",
3960 obj->obj_id);
3961 list_del_init(&obj->variant.dir_variant.dirty);
3962 }
3963 return yaffs_del_dir(obj);
3964 break;
3965 case YAFFS_OBJECT_TYPE_SYMLINK:
3966 ret_val = yaffs_del_symlink(obj);
3967 break;
3968 case YAFFS_OBJECT_TYPE_HARDLINK:
3969 ret_val = yaffs_del_link(obj);
3970 break;
3971 case YAFFS_OBJECT_TYPE_SPECIAL:
3972 ret_val = yaffs_generic_obj_del(obj);
3973 break;
3974 case YAFFS_OBJECT_TYPE_UNKNOWN:
3975 ret_val = 0;
3976 break; /* should not happen. */
3977 }
3978 return ret_val;
3979}
3980
3981
3982static void yaffs_empty_dir_to_dir(struct yaffs_obj *from_dir,
3983 struct yaffs_obj *to_dir)
3984{
3985 struct yaffs_obj *obj;
3986 struct list_head *lh;
3987 struct list_head *n;
3988
3989 list_for_each_safe(lh, n, &from_dir->variant.dir_variant.children) {
3990 obj = list_entry(lh, struct yaffs_obj, siblings);
3991 yaffs_add_obj_to_dir(to_dir, obj);
3992 }
3993}
3994
3995struct yaffs_obj *yaffs_retype_obj(struct yaffs_obj *obj,
3996 enum yaffs_obj_type type)
3997{
3998 /* Tear down the old variant */
3999 switch (obj->variant_type) {
4000 case YAFFS_OBJECT_TYPE_FILE:
4001 /* Nuke file data */
4002 yaffs_resize_file(obj, 0);
4003 yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
4004 obj->variant.file_variant.top = NULL;
4005 break;
4006 case YAFFS_OBJECT_TYPE_DIRECTORY:
4007 /* Put the children in lost and found. */
4008 yaffs_empty_dir_to_dir(obj, obj->my_dev->lost_n_found);
4009 if (!list_empty(&obj->variant.dir_variant.dirty))
4010 list_del_init(&obj->variant.dir_variant.dirty);
4011 break;
4012 case YAFFS_OBJECT_TYPE_SYMLINK:
4013 /* Nuke symplink data */
4014 kfree(obj->variant.symlink_variant.alias);
4015 obj->variant.symlink_variant.alias = NULL;
4016 break;
4017 case YAFFS_OBJECT_TYPE_HARDLINK:
4018 list_del_init(&obj->hard_links);
4019 break;
4020 default:
4021 break;
4022 }
4023
4024 memset(&obj->variant, 0, sizeof(obj->variant));
4025
4026 /*Set up new variant if the memset is not enough. */
4027 switch (type) {
4028 case YAFFS_OBJECT_TYPE_DIRECTORY:
4029 INIT_LIST_HEAD(&obj->variant.dir_variant.children);
4030 INIT_LIST_HEAD(&obj->variant.dir_variant.dirty);
4031 break;
4032 case YAFFS_OBJECT_TYPE_FILE:
4033 case YAFFS_OBJECT_TYPE_SYMLINK:
4034 case YAFFS_OBJECT_TYPE_HARDLINK:
4035 default:
4036 break;
4037 }
4038
4039 obj->variant_type = type;
4040
4041 return obj;
4042
4043}
4044
4045static int yaffs_unlink_worker(struct yaffs_obj *obj)
4046{
4047 int del_now = 0;
4048
4049 if (!obj)
4050 return YAFFS_FAIL;
4051
4052 if (!obj->my_inode)
4053 del_now = 1;
4054
4055 yaffs_update_parent(obj->parent);
4056
4057 if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
4058 return yaffs_del_link(obj);
4059 } else if (!list_empty(&obj->hard_links)) {
4060 /* Curve ball: We're unlinking an object that has a hardlink.
4061 *
4062 * This problem arises because we are not strictly following
4063 * The Linux link/inode model.
4064 *
4065 * We can't really delete the object.
4066 * Instead, we do the following:
4067 * - Select a hardlink.
4068 * - Unhook it from the hard links
4069 * - Move it from its parent directory so that the rename works.
4070 * - Rename the object to the hardlink's name.
4071 * - Delete the hardlink
4072 */
4073
4074 struct yaffs_obj *hl;
4075 struct yaffs_obj *parent;
4076 int ret_val;
4077 YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
4078
4079 hl = list_entry(obj->hard_links.next, struct yaffs_obj,
4080 hard_links);
4081
4082 yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
4083 parent = hl->parent;
4084
4085 list_del_init(&hl->hard_links);
4086
4087 yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
4088
4089 ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
4090
4091 if (ret_val == YAFFS_OK)
4092 ret_val = yaffs_generic_obj_del(hl);
4093
4094 return ret_val;
4095
4096 } else if (del_now) {
4097 switch (obj->variant_type) {
4098 case YAFFS_OBJECT_TYPE_FILE:
4099 return yaffs_del_file(obj);
4100 break;
4101 case YAFFS_OBJECT_TYPE_DIRECTORY:
4102 list_del_init(&obj->variant.dir_variant.dirty);
4103 return yaffs_del_dir(obj);
4104 break;
4105 case YAFFS_OBJECT_TYPE_SYMLINK:
4106 return yaffs_del_symlink(obj);
4107 break;
4108 case YAFFS_OBJECT_TYPE_SPECIAL:
4109 return yaffs_generic_obj_del(obj);
4110 break;
4111 case YAFFS_OBJECT_TYPE_HARDLINK:
4112 case YAFFS_OBJECT_TYPE_UNKNOWN:
4113 default:
4114 return YAFFS_FAIL;
4115 }
4116 } else if (yaffs_is_non_empty_dir(obj)) {
4117 return YAFFS_FAIL;
4118 } else {
4119 return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
4120 _Y("unlinked"), 0, 0);
4121 }
4122}
4123
4124int yaffs_unlink_obj(struct yaffs_obj *obj)
4125{
4126 if (obj && obj->unlink_allowed)
4127 return yaffs_unlink_worker(obj);
4128
4129 return YAFFS_FAIL;
4130}
4131
4132int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name)
4133{
4134 struct yaffs_obj *obj;
4135
4136 obj = yaffs_find_by_name(dir, name);
4137 return yaffs_unlink_obj(obj);
4138}
4139
4140/* Note:
4141 * If old_name is NULL then we take old_dir as the object to be renamed.
4142 */
4143int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name,
4144 struct yaffs_obj *new_dir, const YCHAR *new_name)
4145{
4146 struct yaffs_obj *obj = NULL;
4147 struct yaffs_obj *existing_target = NULL;
4148 int force = 0;
4149 int result;
4150 struct yaffs_dev *dev;
4151
4152 if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
4153 BUG();
4154 return YAFFS_FAIL;
4155 }
4156 if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
4157 BUG();
4158 return YAFFS_FAIL;
4159 }
4160
4161 dev = old_dir->my_dev;
4162
4163#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
4164 /* Special case for case insemsitive systems.
4165 * While look-up is case insensitive, the name isn't.
4166 * Therefore we might want to change x.txt to X.txt
4167 */
4168 if (old_dir == new_dir &&
4169 old_name && new_name &&
4170 strcmp(old_name, new_name) == 0)
4171 force = 1;
4172#endif
4173
4174 if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
4175 YAFFS_MAX_NAME_LENGTH)
4176 /* ENAMETOOLONG */
4177 return YAFFS_FAIL;
4178
4179 if (old_name)
4180 obj = yaffs_find_by_name(old_dir, old_name);
4181 else{
4182 obj = old_dir;
4183 old_dir = obj->parent;
4184 }
4185
4186 if (obj && obj->rename_allowed) {
4187 /* Now handle an existing target, if there is one */
4188 existing_target = yaffs_find_by_name(new_dir, new_name);
4189 if (yaffs_is_non_empty_dir(existing_target)) {
4190 return YAFFS_FAIL; /* ENOTEMPTY */
4191 } else if (existing_target && existing_target != obj) {
4192 /* Nuke the target first, using shadowing,
4193 * but only if it isn't the same object.
4194 *
4195 * Note we must disable gc here otherwise it can mess
4196 * up the shadowing.
4197 *
4198 */
4199 dev->gc_disable = 1;
4200 yaffs_change_obj_name(obj, new_dir, new_name, force,
4201 existing_target->obj_id);
4202 existing_target->is_shadowed = 1;
4203 yaffs_unlink_obj(existing_target);
4204 dev->gc_disable = 0;
4205 }
4206
4207 result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
4208
4209 yaffs_update_parent(old_dir);
4210 if (new_dir != old_dir)
4211 yaffs_update_parent(new_dir);
4212
4213 return result;
4214 }
4215 return YAFFS_FAIL;
4216}
4217
4218/*----------------------- Initialisation Scanning ---------------------- */
4219
4220void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
4221 int backward_scanning)
4222{
4223 struct yaffs_obj *obj;
4224
4225 if (backward_scanning) {
4226 /* Handle YAFFS2 case (backward scanning)
4227 * If the shadowed object exists then ignore.
4228 */
4229 obj = yaffs_find_by_number(dev, obj_id);
4230 if (obj)
4231 return;
4232 }
4233
4234 /* Let's create it (if it does not exist) assuming it is a file so that
4235 * it can do shrinking etc.
4236 * We put it in unlinked dir to be cleaned up after the scanning
4237 */
4238 obj =
4239 yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
4240 if (!obj)
4241 return;
4242 obj->is_shadowed = 1;
4243 yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
4244 obj->variant.file_variant.shrink_size = 0;
4245 obj->valid = 1; /* So that we don't read any other info. */
4246}
4247
4248void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list)
4249{
4250 struct list_head *lh;
4251 struct list_head *save;
4252 struct yaffs_obj *hl;
4253 struct yaffs_obj *in;
4254
4255 list_for_each_safe(lh, save, hard_list) {
4256 hl = list_entry(lh, struct yaffs_obj, hard_links);
4257 in = yaffs_find_by_number(dev,
4258 hl->variant.hardlink_variant.equiv_id);
4259
4260 if (in) {
4261 /* Add the hardlink pointers */
4262 hl->variant.hardlink_variant.equiv_obj = in;
4263 list_add(&hl->hard_links, &in->hard_links);
4264 } else {
4265 /* Todo Need to report/handle this better.
4266 * Got a problem... hardlink to a non-existant object
4267 */
4268 hl->variant.hardlink_variant.equiv_obj = NULL;
4269 INIT_LIST_HEAD(&hl->hard_links);
4270 }
4271 }
4272}
4273
4274static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
4275{
4276 /*
4277 * Sort out state of unlinked and deleted objects after scanning.
4278 */
4279 struct list_head *i;
4280 struct list_head *n;
4281 struct yaffs_obj *l;
4282
4283 if (dev->read_only)
4284 return;
4285
4286 /* Soft delete all the unlinked files */
4287 list_for_each_safe(i, n,
4288 &dev->unlinked_dir->variant.dir_variant.children) {
4289 l = list_entry(i, struct yaffs_obj, siblings);
4290 yaffs_del_obj(l);
4291 }
4292
4293 list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
4294 l = list_entry(i, struct yaffs_obj, siblings);
4295 yaffs_del_obj(l);
4296 }
4297}
4298
4299/*
4300 * This code iterates through all the objects making sure that they are rooted.
4301 * Any unrooted objects are re-rooted in lost+found.
4302 * An object needs to be in one of:
4303 * - Directly under deleted, unlinked
4304 * - Directly or indirectly under root.
4305 *
4306 * Note:
4307 * This code assumes that we don't ever change the current relationships
4308 * between directories:
4309 * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
4310 * lost-n-found->parent == root_dir
4311 *
4312 * This fixes the problem where directories might have inadvertently been
4313 * deleted leaving the object "hanging" without being rooted in the
4314 * directory tree.
4315 */
4316
4317static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
4318{
4319 return (obj == dev->del_dir ||
4320 obj == dev->unlinked_dir || obj == dev->root_dir);
4321}
4322
4323static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
4324{
4325 struct yaffs_obj *obj;
4326 struct yaffs_obj *parent;
4327 int i;
4328 struct list_head *lh;
4329 struct list_head *n;
4330 int depth_limit;
4331 int hanging;
4332
4333 if (dev->read_only)
4334 return;
4335
4336 /* Iterate through the objects in each hash entry,
4337 * looking at each object.
4338 * Make sure it is rooted.
4339 */
4340
4341 for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
4342 list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
4343 obj = list_entry(lh, struct yaffs_obj, hash_link);
4344 parent = obj->parent;
4345
4346 if (yaffs_has_null_parent(dev, obj)) {
4347 /* These directories are not hanging */
4348 hanging = 0;
4349 } else if (!parent ||
4350 parent->variant_type !=
4351 YAFFS_OBJECT_TYPE_DIRECTORY) {
4352 hanging = 1;
4353 } else if (yaffs_has_null_parent(dev, parent)) {
4354 hanging = 0;
4355 } else {
4356 /*
4357 * Need to follow the parent chain to
4358 * see if it is hanging.
4359 */
4360 hanging = 0;
4361 depth_limit = 100;
4362
4363 while (parent != dev->root_dir &&
4364 parent->parent &&
4365 parent->parent->variant_type ==
4366 YAFFS_OBJECT_TYPE_DIRECTORY &&
4367 depth_limit > 0) {
4368 parent = parent->parent;
4369 depth_limit--;
4370 }
4371 if (parent != dev->root_dir)
4372 hanging = 1;
4373 }
4374 if (hanging) {
4375 yaffs_trace(YAFFS_TRACE_SCAN,
4376 "Hanging object %d moved to lost and found",
4377 obj->obj_id);
4378 yaffs_add_obj_to_dir(dev->lost_n_found, obj);
4379 }
4380 }
4381 }
4382}
4383
4384/*
4385 * Delete directory contents for cleaning up lost and found.
4386 */
4387static void yaffs_del_dir_contents(struct yaffs_obj *dir)
4388{
4389 struct yaffs_obj *obj;
4390 struct list_head *lh;
4391 struct list_head *n;
4392
4393 if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
4394 BUG();
4395
4396 list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
4397 obj = list_entry(lh, struct yaffs_obj, siblings);
4398 if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
4399 yaffs_del_dir_contents(obj);
4400 yaffs_trace(YAFFS_TRACE_SCAN,
4401 "Deleting lost_found object %d",
4402 obj->obj_id);
4403 yaffs_unlink_obj(obj);
4404 }
4405}
4406
4407static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
4408{
4409 yaffs_del_dir_contents(dev->lost_n_found);
4410}
4411
4412
4413struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
4414 const YCHAR *name)
4415{
4416 int sum;
4417 struct list_head *i;
4418 YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
4419 struct yaffs_obj *l;
4420
4421 if (!name)
4422 return NULL;
4423
4424 if (!directory) {
4425 yaffs_trace(YAFFS_TRACE_ALWAYS,
4426 "tragedy: yaffs_find_by_name: null pointer directory"
4427 );
4428 BUG();
4429 return NULL;
4430 }
4431 if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
4432 yaffs_trace(YAFFS_TRACE_ALWAYS,
4433 "tragedy: yaffs_find_by_name: non-directory"
4434 );
4435 BUG();
4436 }
4437
4438 sum = yaffs_calc_name_sum(name);
4439
4440 list_for_each(i, &directory->variant.dir_variant.children) {
4441 l = list_entry(i, struct yaffs_obj, siblings);
4442
4443 if (l->parent != directory)
4444 BUG();
4445
4446 yaffs_check_obj_details_loaded(l);
4447
4448 /* Special case for lost-n-found */
4449 if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
4450 if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
4451 return l;
4452 } else if (l->sum == sum || l->hdr_chunk <= 0) {
4453 /* LostnFound chunk called Objxxx
4454 * Do a real check
4455 */
4456 yaffs_get_obj_name(l, buffer,
4457 YAFFS_MAX_NAME_LENGTH + 1);
4458 if (!strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH))
4459 return l;
4460 }
4461 }
4462 return NULL;
4463}
4464
4465/* GetEquivalentObject dereferences any hard links to get to the
4466 * actual object.
4467 */
4468
4469struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
4470{
4471 if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
4472 obj = obj->variant.hardlink_variant.equiv_obj;
4473 yaffs_check_obj_details_loaded(obj);
4474 }
4475 return obj;
4476}
4477
4478/*
4479 * A note or two on object names.
4480 * * If the object name is missing, we then make one up in the form objnnn
4481 *
4482 * * ASCII names are stored in the object header's name field from byte zero
4483 * * Unicode names are historically stored starting from byte zero.
4484 *
4485 * Then there are automatic Unicode names...
4486 * The purpose of these is to save names in a way that can be read as
4487 * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
4488 * system to share files.
4489 *
4490 * These automatic unicode are stored slightly differently...
4491 * - If the name can fit in the ASCII character space then they are saved as
4492 * ascii names as per above.
4493 * - If the name needs Unicode then the name is saved in Unicode
4494 * starting at oh->name[1].
4495
4496 */
4497static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
4498 int buffer_size)
4499{
4500 /* Create an object name if we could not find one. */
4501 if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
4502 YCHAR local_name[20];
4503 YCHAR num_string[20];
4504 YCHAR *x = &num_string[19];
4505 unsigned v = obj->obj_id;
4506 num_string[19] = 0;
4507 while (v > 0) {
4508 x--;
4509 *x = '0' + (v % 10);
4510 v /= 10;
4511 }
4512 /* make up a name */
4513 strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
4514 strcat(local_name, x);
4515 strncpy(name, local_name, buffer_size - 1);
4516 }
4517}
4518
4519int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size)
4520{
4521 memset(name, 0, buffer_size * sizeof(YCHAR));
4522 yaffs_check_obj_details_loaded(obj);
4523 if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
4524 strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
4525 } else if (obj->short_name[0]) {
4526 strcpy(name, obj->short_name);
4527 } else if (obj->hdr_chunk > 0) {
4528 int result;
4529 u8 *buffer = yaffs_get_temp_buffer(obj->my_dev);
4530
4531 struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
4532
4533 memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
4534
4535 if (obj->hdr_chunk > 0) {
4536 result = yaffs_rd_chunk_tags_nand(obj->my_dev,
4537 obj->hdr_chunk, buffer, NULL);
4538 if (result == YAFFS_OK)
4539 yaffs_load_name_from_oh(obj->my_dev, name,
4540 oh->name, buffer_size);
4541 }
4542 yaffs_release_temp_buffer(obj->my_dev, buffer);
4543 }
4544
4545 yaffs_fix_null_name(obj, name, buffer_size);
4546
4547 return strnlen(name, YAFFS_MAX_NAME_LENGTH);
4548}
4549
4550loff_t yaffs_get_obj_length(struct yaffs_obj *obj)
4551{
4552 /* Dereference any hard linking */
4553 obj = yaffs_get_equivalent_obj(obj);
4554
4555 if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
4556 return obj->variant.file_variant.file_size;
4557 if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
4558 if (!obj->variant.symlink_variant.alias)
4559 return 0;
4560 return strnlen(obj->variant.symlink_variant.alias,
4561 YAFFS_MAX_ALIAS_LENGTH);
4562 } else {
4563 /* Only a directory should drop through to here */
4564 return obj->my_dev->data_bytes_per_chunk;
4565 }
4566}
4567
4568int yaffs_get_obj_link_count(struct yaffs_obj *obj)
4569{
4570 int count = 0;
4571 struct list_head *i;
4572
4573 if (!obj->unlinked)
4574 count++; /* the object itself */
4575
4576 list_for_each(i, &obj->hard_links)
4577 count++; /* add the hard links; */
4578
4579 return count;
4580}
4581
4582int yaffs_get_obj_inode(struct yaffs_obj *obj)
4583{
4584 obj = yaffs_get_equivalent_obj(obj);
4585
4586 return obj->obj_id;
4587}
4588
4589unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
4590{
4591 obj = yaffs_get_equivalent_obj(obj);
4592
4593 switch (obj->variant_type) {
4594 case YAFFS_OBJECT_TYPE_FILE:
4595 return DT_REG;
4596 break;
4597 case YAFFS_OBJECT_TYPE_DIRECTORY:
4598 return DT_DIR;
4599 break;
4600 case YAFFS_OBJECT_TYPE_SYMLINK:
4601 return DT_LNK;
4602 break;
4603 case YAFFS_OBJECT_TYPE_HARDLINK:
4604 return DT_REG;
4605 break;
4606 case YAFFS_OBJECT_TYPE_SPECIAL:
4607 if (S_ISFIFO(obj->yst_mode))
4608 return DT_FIFO;
4609 if (S_ISCHR(obj->yst_mode))
4610 return DT_CHR;
4611 if (S_ISBLK(obj->yst_mode))
4612 return DT_BLK;
4613 if (S_ISSOCK(obj->yst_mode))
4614 return DT_SOCK;
4615 return DT_REG;
4616 break;
4617 default:
4618 return DT_REG;
4619 break;
4620 }
4621}
4622
4623YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
4624{
4625 obj = yaffs_get_equivalent_obj(obj);
4626 if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
4627 return yaffs_clone_str(obj->variant.symlink_variant.alias);
4628 else
4629 return yaffs_clone_str(_Y(""));
4630}
4631
4632/*--------------------------- Initialisation code -------------------------- */
4633
4634static int yaffs_check_dev_fns(struct yaffs_dev *dev)
4635{
4636 struct yaffs_driver *drv = &dev->drv;
4637 struct yaffs_tags_handler *tagger = &dev->tagger;
4638
4639 /* Common functions, gotta have */
4640 if (!drv->drv_read_chunk_fn ||
4641 !drv->drv_write_chunk_fn ||
4642 !drv->drv_erase_fn)
4643 return 0;
4644
4645 if (dev->param.is_yaffs2 &&
4646 (!drv->drv_mark_bad_fn || !drv->drv_check_bad_fn))
4647 return 0;
4648
4649 /* Install the default tags marshalling functions if needed. */
4650 yaffs_tags_compat_install(dev);
4651 yaffs_tags_marshall_install(dev);
4652
4653 /* Check we now have the marshalling functions required. */
4654 if (!tagger->write_chunk_tags_fn ||
4655 !tagger->read_chunk_tags_fn ||
4656 !tagger->query_block_fn ||
4657 !tagger->mark_bad_fn)
4658 return 0;
4659
4660 return 1;
4661}
4662
4663static int yaffs_create_initial_dir(struct yaffs_dev *dev)
4664{
4665 /* Initialise the unlinked, deleted, root and lost+found directories */
4666 dev->lost_n_found = NULL;
4667 dev->root_dir = NULL;
4668 dev->unlinked_dir = NULL;
4669 dev->del_dir = NULL;
4670
4671 dev->unlinked_dir =
4672 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
4673 dev->del_dir =
4674 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
4675 dev->root_dir =
4676 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
4677 YAFFS_ROOT_MODE | S_IFDIR);
4678 dev->lost_n_found =
4679 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
4680 YAFFS_LOSTNFOUND_MODE | S_IFDIR);
4681
4682 if (dev->lost_n_found &&
4683 dev->root_dir &&
4684 dev->unlinked_dir &&
4685 dev->del_dir) {
4686 /* If lost-n-found is hidden then yank it out of the directory tree. */
4687 if (dev->param.hide_lost_n_found)
4688 list_del_init(&dev->lost_n_found->siblings);
4689 else
4690 yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
4691 return YAFFS_OK;
4692 }
4693 return YAFFS_FAIL;
4694}
4695
4696/* Low level init.
4697 * Typically only used by yaffs_guts_initialise, but also used by the
4698 * Low level yaffs driver tests.
4699 */
4700
4701int yaffs_guts_ll_init(struct yaffs_dev *dev)
4702{
4703
4704
4705 yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_ll_init()");
4706
4707 if (!dev) {
4708 yaffs_trace(YAFFS_TRACE_ALWAYS,
4709 "yaffs: Need a device"
4710 );
4711 return YAFFS_FAIL;
4712 }
4713
4714 if (dev->ll_init)
4715 return YAFFS_OK;
4716
4717 dev->internal_start_block = dev->param.start_block;
4718 dev->internal_end_block = dev->param.end_block;
4719 dev->block_offset = 0;
4720 dev->chunk_offset = 0;
4721 dev->n_free_chunks = 0;
4722
4723 dev->gc_block = 0;
4724
4725 if (dev->param.start_block == 0) {
4726 dev->internal_start_block = dev->param.start_block + 1;
4727 dev->internal_end_block = dev->param.end_block + 1;
4728 dev->block_offset = 1;
4729 dev->chunk_offset = dev->param.chunks_per_block;
4730 }
4731
4732 /* Check geometry parameters. */
4733
4734 if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
4735 dev->param.total_bytes_per_chunk < 1024) ||
4736 (!dev->param.is_yaffs2 &&
4737 dev->param.total_bytes_per_chunk < 512) ||
4738 (dev->param.inband_tags && !dev->param.is_yaffs2) ||
4739 dev->param.chunks_per_block < 2 ||
4740 dev->param.n_reserved_blocks < 2 ||
4741 dev->internal_start_block <= 0 ||
4742 dev->internal_end_block <= 0 ||
4743 dev->internal_end_block <=
4744 (dev->internal_start_block + dev->param.n_reserved_blocks + 2)
4745 ) {
4746 /* otherwise it is too small */
4747 yaffs_trace(YAFFS_TRACE_ALWAYS,
4748 "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
4749 dev->param.total_bytes_per_chunk,
4750 dev->param.is_yaffs2 ? "2" : "",
4751 dev->param.inband_tags);
4752 return YAFFS_FAIL;
4753 }
4754
4755 /* Sort out space for inband tags, if required */
4756 if (dev->param.inband_tags)
4757 dev->data_bytes_per_chunk =
4758 dev->param.total_bytes_per_chunk -
4759 sizeof(struct yaffs_packed_tags2_tags_only);
4760 else
4761 dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
4762
4763 /* Got the right mix of functions? */
4764 if (!yaffs_check_dev_fns(dev)) {
4765 /* Function missing */
4766 yaffs_trace(YAFFS_TRACE_ALWAYS,
4767 "device function(s) missing or wrong");
4768
4769 return YAFFS_FAIL;
4770 }
4771
4772 if (yaffs_init_nand(dev) != YAFFS_OK) {
4773 yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
4774 return YAFFS_FAIL;
4775 }
4776
4777 return YAFFS_OK;
4778}
4779
4780
4781int yaffs_guts_format_dev(struct yaffs_dev *dev)
4782{
4783 u32 i;
4784 enum yaffs_block_state state;
4785 u32 dummy;
4786
4787 if(yaffs_guts_ll_init(dev) != YAFFS_OK)
4788 return YAFFS_FAIL;
4789
4790 if(dev->is_mounted)
4791 return YAFFS_FAIL;
4792
4793 for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
4794 yaffs_query_init_block_state(dev, i, &state, &dummy);
4795 if (state != YAFFS_BLOCK_STATE_DEAD)
4796 yaffs_erase_block(dev, i);
4797 }
4798
4799 return YAFFS_OK;
4800}
4801
4802
4803int yaffs_guts_initialise(struct yaffs_dev *dev)
4804{
4805 int init_failed = 0;
4806 u32 x;
4807 u32 bits;
4808
4809 if(yaffs_guts_ll_init(dev) != YAFFS_OK)
4810 return YAFFS_FAIL;
4811
4812 if (dev->is_mounted) {
4813 yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
4814 return YAFFS_FAIL;
4815 }
4816
4817 dev->is_mounted = 1;
4818
4819 /* OK now calculate a few things for the device */
4820
4821 /*
4822 * Calculate all the chunk size manipulation numbers:
4823 */
4824 x = dev->data_bytes_per_chunk;
4825 /* We always use dev->chunk_shift and dev->chunk_div */
4826 dev->chunk_shift = calc_shifts(x);
4827 x >>= dev->chunk_shift;
4828 dev->chunk_div = x;
4829 /* We only use chunk mask if chunk_div is 1 */
4830 dev->chunk_mask = (1 << dev->chunk_shift) - 1;
4831
4832 /*
4833 * Calculate chunk_grp_bits.
4834 * We need to find the next power of 2 > than internal_end_block
4835 */
4836
4837 x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
4838
4839 bits = calc_shifts_ceiling(x);
4840
4841 /* Set up tnode width if wide tnodes are enabled. */
4842 if (!dev->param.wide_tnodes_disabled) {
4843 /* bits must be even so that we end up with 32-bit words */
4844 if (bits & 1)
4845 bits++;
4846 if (bits < 16)
4847 dev->tnode_width = 16;
4848 else
4849 dev->tnode_width = bits;
4850 } else {
4851 dev->tnode_width = 16;
4852 }
4853
4854 dev->tnode_mask = (1 << dev->tnode_width) - 1;
4855
4856 /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
4857 * so if the bitwidth of the
4858 * chunk range we're using is greater than 16 we need
4859 * to figure out chunk shift and chunk_grp_size
4860 */
4861
4862 if (bits <= dev->tnode_width)
4863 dev->chunk_grp_bits = 0;
4864 else
4865 dev->chunk_grp_bits = bits - dev->tnode_width;
4866
4867 dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
4868 if (dev->tnode_size < sizeof(struct yaffs_tnode))
4869 dev->tnode_size = sizeof(struct yaffs_tnode);
4870
4871 dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
4872
4873 if (dev->param.chunks_per_block < dev->chunk_grp_size) {
4874 /* We have a problem because the soft delete won't work if
4875 * the chunk group size > chunks per block.
4876 * This can be remedied by using larger "virtual blocks".
4877 */
4878 yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
4879
4880 return YAFFS_FAIL;
4881 }
4882
4883 /* Finished verifying the device, continue with initialisation */
4884
4885 /* More device initialisation */
4886 dev->all_gcs = 0;
4887 dev->passive_gc_count = 0;
4888 dev->oldest_dirty_gc_count = 0;
4889 dev->bg_gcs = 0;
4890 dev->gc_block_finder = 0;
4891 dev->buffered_block = -1;
4892 dev->doing_buffered_block_rewrite = 0;
4893 dev->n_deleted_files = 0;
4894 dev->n_bg_deletions = 0;
4895 dev->n_unlinked_files = 0;
4896 dev->n_ecc_fixed = 0;
4897 dev->n_ecc_unfixed = 0;
4898 dev->n_tags_ecc_fixed = 0;
4899 dev->n_tags_ecc_unfixed = 0;
4900 dev->n_erase_failures = 0;
4901 dev->n_erased_blocks = 0;
4902 dev->gc_disable = 0;
4903 dev->has_pending_prioritised_gc = 1; /* Assume the worst for now,
4904 * will get fixed on first GC */
4905 INIT_LIST_HEAD(&dev->dirty_dirs);
4906 dev->oldest_dirty_seq = 0;
4907 dev->oldest_dirty_block = 0;
4908
4909 yaffs_endian_config(dev);
4910
4911 /* Initialise temporary buffers and caches. */
4912 if (!yaffs_init_tmp_buffers(dev))
4913 init_failed = 1;
4914
4915 dev->cache = NULL;
4916 dev->gc_cleanup_list = NULL;
4917
4918 if (!init_failed && dev->param.n_caches > 0) {
4919 u32 i;
4920 void *buf;
4921 u32 cache_bytes =
4922 dev->param.n_caches * sizeof(struct yaffs_cache);
4923
4924 if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
4925 dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
4926
4927 dev->cache = kmalloc(cache_bytes, GFP_NOFS);
4928
4929 buf = (u8 *) dev->cache;
4930
4931 if (dev->cache)
4932 memset(dev->cache, 0, cache_bytes);
4933
4934 for (i = 0; i < dev->param.n_caches && buf; i++) {
4935 dev->cache[i].object = NULL;
4936 dev->cache[i].last_use = 0;
4937 dev->cache[i].dirty = 0;
4938 dev->cache[i].data = buf =
4939 kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
4940 }
4941 if (!buf)
4942 init_failed = 1;
4943
4944 dev->cache_last_use = 0;
4945 }
4946
4947 dev->cache_hits = 0;
4948
4949 if (!init_failed) {
4950 dev->gc_cleanup_list =
4951 kmalloc(dev->param.chunks_per_block * sizeof(u32),
4952 GFP_NOFS);
4953 if (!dev->gc_cleanup_list)
4954 init_failed = 1;
4955 }
4956
4957 if (dev->param.is_yaffs2)
4958 dev->param.use_header_file_size = 1;
4959
4960 if (!init_failed && !yaffs_init_blocks(dev))
4961 init_failed = 1;
4962
4963 yaffs_init_tnodes_and_objs(dev);
4964
4965 if (!init_failed && !yaffs_create_initial_dir(dev))
4966 init_failed = 1;
4967
4968 if (!init_failed && dev->param.is_yaffs2 &&
4969 !dev->param.disable_summary &&
4970 !yaffs_summary_init(dev))
4971 init_failed = 1;
4972
4973 if (!init_failed) {
4974 /* Now scan the flash. */
4975 if (dev->param.is_yaffs2) {
4976 if (yaffs2_checkpt_restore(dev)) {
4977 yaffs_check_obj_details_loaded(dev->root_dir);
4978 yaffs_trace(YAFFS_TRACE_CHECKPOINT |
4979 YAFFS_TRACE_MOUNT,
4980 "yaffs: restored from checkpoint"
4981 );
4982 } else {
4983
4984 /* Clean up the mess caused by an aborted
4985 * checkpoint load then scan backwards.
4986 */
4987 yaffs_deinit_blocks(dev);
4988
4989 yaffs_deinit_tnodes_and_objs(dev);
4990
4991 dev->n_erased_blocks = 0;
4992 dev->n_free_chunks = 0;
4993 dev->alloc_block = -1;
4994 dev->alloc_page = -1;
4995 dev->n_deleted_files = 0;
4996 dev->n_unlinked_files = 0;
4997 dev->n_bg_deletions = 0;
4998
4999 if (!init_failed && !yaffs_init_blocks(dev))
5000 init_failed = 1;
5001
5002 yaffs_init_tnodes_and_objs(dev);
5003
5004 if (!init_failed
5005 && !yaffs_create_initial_dir(dev))
5006 init_failed = 1;
5007
5008 if (!init_failed && !yaffs2_scan_backwards(dev))
5009 init_failed = 1;
5010 }
5011 } else if (!yaffs1_scan(dev)) {
5012 init_failed = 1;
5013 }
5014
5015 yaffs_strip_deleted_objs(dev);
5016 yaffs_fix_hanging_objs(dev);
5017 if (dev->param.empty_lost_n_found)
5018 yaffs_empty_l_n_f(dev);
5019 }
5020
5021 if (init_failed) {
5022 /* Clean up the mess */
5023 yaffs_trace(YAFFS_TRACE_TRACING,
5024 "yaffs: yaffs_guts_initialise() aborted.");
5025
5026 yaffs_deinitialise(dev);
5027 return YAFFS_FAIL;
5028 }
5029
5030 /* Zero out stats */
5031 dev->n_page_reads = 0;
5032 dev->n_page_writes = 0;
5033 dev->n_erasures = 0;
5034 dev->n_gc_copies = 0;
5035 dev->n_retried_writes = 0;
5036
5037 dev->n_retired_blocks = 0;
5038
5039 yaffs_verify_free_chunks(dev);
5040 yaffs_verify_blocks(dev);
5041
5042 /* Clean up any aborted checkpoint data */
5043 if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
5044 yaffs2_checkpt_invalidate(dev);
5045
5046 yaffs_trace(YAFFS_TRACE_TRACING,
5047 "yaffs: yaffs_guts_initialise() done.");
5048 return YAFFS_OK;
5049}
5050
5051void yaffs_deinitialise(struct yaffs_dev *dev)
5052{
5053 if (dev->is_mounted) {
5054 u32 i;
5055
5056 yaffs_deinit_blocks(dev);
5057 yaffs_deinit_tnodes_and_objs(dev);
5058 yaffs_summary_deinit(dev);
5059
5060 if (dev->param.n_caches > 0 && dev->cache) {
5061
5062 for (i = 0; i < dev->param.n_caches; i++) {
5063 kfree(dev->cache[i].data);
5064 dev->cache[i].data = NULL;
5065 }
5066
5067 kfree(dev->cache);
5068 dev->cache = NULL;
5069 }
5070
5071 kfree(dev->gc_cleanup_list);
5072
5073 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
5074 kfree(dev->temp_buffer[i].buffer);
5075 dev->temp_buffer[i].buffer = NULL;
5076 }
5077
5078 kfree(dev->checkpt_buffer);
5079 dev->checkpt_buffer = NULL;
5080 kfree(dev->checkpt_block_list);
5081 dev->checkpt_block_list = NULL;
5082
5083 dev->is_mounted = 0;
5084
5085 yaffs_deinit_nand(dev);
5086 }
5087}
5088
5089int yaffs_count_free_chunks(struct yaffs_dev *dev)
5090{
5091 int n_free = 0;
5092 u32 b;
5093 struct yaffs_block_info *blk;
5094
5095 blk = dev->block_info;
5096 for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
5097 switch (blk->block_state) {
5098 case YAFFS_BLOCK_STATE_EMPTY:
5099 case YAFFS_BLOCK_STATE_ALLOCATING:
5100 case YAFFS_BLOCK_STATE_COLLECTING:
5101 case YAFFS_BLOCK_STATE_FULL:
5102 n_free +=
5103 (dev->param.chunks_per_block - blk->pages_in_use +
5104 blk->soft_del_pages);
5105 break;
5106 default:
5107 break;
5108 }
5109 blk++;
5110 }
5111 return n_free;
5112}
5113
5114int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
5115{
5116 /* This is what we report to the outside world */
5117 int n_free;
5118 int n_dirty_caches;
5119 int blocks_for_checkpt;
5120 u32 i;
5121
5122 n_free = dev->n_free_chunks;
5123 n_free += dev->n_deleted_files;
5124
5125 /* Now count and subtract the number of dirty chunks in the cache. */
5126
5127 for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
5128 if (dev->cache[i].dirty)
5129 n_dirty_caches++;
5130 }
5131
5132 n_free -= n_dirty_caches;
5133
5134 n_free -=
5135 ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
5136
5137 /* Now figure checkpoint space and report that... */
5138 blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
5139
5140 n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
5141
5142 if (n_free < 0)
5143 n_free = 0;
5144
5145 return n_free;
5146}
5147
5148
5149/*
5150 * Marshalling functions to get loff_t file sizes into and out of
5151 * object headers.
5152 */
5153void yaffs_oh_size_load(struct yaffs_dev *dev,
5154 struct yaffs_obj_hdr *oh,
5155 loff_t fsize,
5156 int do_endian)
5157{
5158 oh->file_size_low = FSIZE_LOW(fsize);
5159
5160 oh->file_size_high = FSIZE_HIGH(fsize);
5161
5162 if (do_endian) {
5163 yaffs_do_endian_u32(dev, &oh->file_size_low);
5164 yaffs_do_endian_u32(dev, &oh->file_size_high);
5165 }
5166}
5167
5168loff_t yaffs_oh_to_size(struct yaffs_dev *dev, struct yaffs_obj_hdr *oh,
5169 int do_endian)
5170{
5171 loff_t retval;
5172
5173
5174 if (sizeof(loff_t) >= 8 && ~(oh->file_size_high)) {
5175 u32 low = oh->file_size_low;
5176 u32 high = oh->file_size_high;
5177
5178 if (do_endian) {
5179 yaffs_do_endian_u32 (dev, &low);
5180 yaffs_do_endian_u32 (dev, &high);
5181 }
5182 retval = FSIZE_COMBINE(high, low);
5183 } else {
5184 u32 low = oh->file_size_low;
5185
5186 if (do_endian)
5187 yaffs_do_endian_u32(dev, &low);
5188 retval = (loff_t)low;
5189 }
5190
5191 return retval;
5192}
5193
5194
5195void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10])
5196{
5197 u32 i;
5198 struct yaffs_block_info *bi;
5199 int s;
5200
5201 for(i = 0; i < 10; i++)
5202 bs[i] = 0;
5203
5204 for(i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
5205 bi = yaffs_get_block_info(dev, i);
5206 s = bi->block_state;
5207 if(s > YAFFS_BLOCK_STATE_DEAD || s < YAFFS_BLOCK_STATE_UNKNOWN)
5208 bs[0]++;
5209 else
5210 bs[s]++;
5211 }
5212}