Root/
1 | /* |
2 | * JFFS2 -- Journalling Flash File System, Version 2. |
3 | * |
4 | * Copyright © 2001-2007 Red Hat, Inc. |
5 | * |
6 | * Created by David Woodhouse <dwmw2@infradead.org> |
7 | * |
8 | * For licensing information, see the file 'LICENCE' in this directory. |
9 | * |
10 | */ |
11 | |
12 | #include <linux/kernel.h> |
13 | #include <linux/mtd/mtd.h> |
14 | #include <linux/compiler.h> |
15 | #include <linux/sched.h> /* For cond_resched() */ |
16 | #include "nodelist.h" |
17 | #include "debug.h" |
18 | |
19 | /** |
20 | * jffs2_reserve_space - request physical space to write nodes to flash |
21 | * @c: superblock info |
22 | * @minsize: Minimum acceptable size of allocation |
23 | * @len: Returned value of allocation length |
24 | * @prio: Allocation type - ALLOC_{NORMAL,DELETION} |
25 | * |
26 | * Requests a block of physical space on the flash. Returns zero for success |
27 | * and puts 'len' into the appropriate place, or returns -ENOSPC or other |
28 | * error if appropriate. Doesn't return len since that's |
29 | * |
30 | * If it returns zero, jffs2_reserve_space() also downs the per-filesystem |
31 | * allocation semaphore, to prevent more than one allocation from being |
32 | * active at any time. The semaphore is later released by jffs2_commit_allocation() |
33 | * |
34 | * jffs2_reserve_space() may trigger garbage collection in order to make room |
35 | * for the requested allocation. |
36 | */ |
37 | |
38 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, |
39 | uint32_t *len, uint32_t sumsize); |
40 | |
41 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, |
42 | uint32_t *len, int prio, uint32_t sumsize) |
43 | { |
44 | int ret = -EAGAIN; |
45 | int blocksneeded = c->resv_blocks_write; |
46 | /* align it */ |
47 | minsize = PAD(minsize); |
48 | |
49 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize)); |
50 | mutex_lock(&c->alloc_sem); |
51 | |
52 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n")); |
53 | |
54 | spin_lock(&c->erase_completion_lock); |
55 | |
56 | /* this needs a little more thought (true <tglx> :)) */ |
57 | while(ret == -EAGAIN) { |
58 | while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) { |
59 | uint32_t dirty, avail; |
60 | |
61 | /* calculate real dirty size |
62 | * dirty_size contains blocks on erase_pending_list |
63 | * those blocks are counted in c->nr_erasing_blocks. |
64 | * If one block is actually erased, it is not longer counted as dirty_space |
65 | * but it is counted in c->nr_erasing_blocks, so we add it and subtract it |
66 | * with c->nr_erasing_blocks * c->sector_size again. |
67 | * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks |
68 | * This helps us to force gc and pick eventually a clean block to spread the load. |
69 | * We add unchecked_size here, as we hopefully will find some space to use. |
70 | * This will affect the sum only once, as gc first finishes checking |
71 | * of nodes. |
72 | */ |
73 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size; |
74 | if (dirty < c->nospc_dirty_size) { |
75 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { |
76 | D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n")); |
77 | break; |
78 | } |
79 | D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n", |
80 | dirty, c->unchecked_size, c->sector_size)); |
81 | |
82 | spin_unlock(&c->erase_completion_lock); |
83 | mutex_unlock(&c->alloc_sem); |
84 | return -ENOSPC; |
85 | } |
86 | |
87 | /* Calc possibly available space. Possibly available means that we |
88 | * don't know, if unchecked size contains obsoleted nodes, which could give us some |
89 | * more usable space. This will affect the sum only once, as gc first finishes checking |
90 | * of nodes. |
91 | + Return -ENOSPC, if the maximum possibly available space is less or equal than |
92 | * blocksneeded * sector_size. |
93 | * This blocks endless gc looping on a filesystem, which is nearly full, even if |
94 | * the check above passes. |
95 | */ |
96 | avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; |
97 | if ( (avail / c->sector_size) <= blocksneeded) { |
98 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { |
99 | D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n")); |
100 | break; |
101 | } |
102 | |
103 | D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", |
104 | avail, blocksneeded * c->sector_size)); |
105 | spin_unlock(&c->erase_completion_lock); |
106 | mutex_unlock(&c->alloc_sem); |
107 | return -ENOSPC; |
108 | } |
109 | |
110 | mutex_unlock(&c->alloc_sem); |
111 | |
112 | D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", |
113 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, |
114 | c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); |
115 | spin_unlock(&c->erase_completion_lock); |
116 | |
117 | ret = jffs2_garbage_collect_pass(c); |
118 | |
119 | if (ret == -EAGAIN) { |
120 | spin_lock(&c->erase_completion_lock); |
121 | if (c->nr_erasing_blocks && |
122 | list_empty(&c->erase_pending_list) && |
123 | list_empty(&c->erase_complete_list)) { |
124 | DECLARE_WAITQUEUE(wait, current); |
125 | set_current_state(TASK_UNINTERRUPTIBLE); |
126 | add_wait_queue(&c->erase_wait, &wait); |
127 | D1(printk(KERN_DEBUG "%s waiting for erase to complete\n", __func__)); |
128 | spin_unlock(&c->erase_completion_lock); |
129 | |
130 | schedule(); |
131 | } else |
132 | spin_unlock(&c->erase_completion_lock); |
133 | } else if (ret) |
134 | return ret; |
135 | |
136 | cond_resched(); |
137 | |
138 | if (signal_pending(current)) |
139 | return -EINTR; |
140 | |
141 | mutex_lock(&c->alloc_sem); |
142 | spin_lock(&c->erase_completion_lock); |
143 | } |
144 | |
145 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); |
146 | if (ret) { |
147 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); |
148 | } |
149 | } |
150 | spin_unlock(&c->erase_completion_lock); |
151 | if (!ret) |
152 | ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); |
153 | if (ret) |
154 | mutex_unlock(&c->alloc_sem); |
155 | return ret; |
156 | } |
157 | |
158 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, |
159 | uint32_t *len, uint32_t sumsize) |
160 | { |
161 | int ret = -EAGAIN; |
162 | minsize = PAD(minsize); |
163 | |
164 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize)); |
165 | |
166 | spin_lock(&c->erase_completion_lock); |
167 | while(ret == -EAGAIN) { |
168 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); |
169 | if (ret) { |
170 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); |
171 | } |
172 | } |
173 | spin_unlock(&c->erase_completion_lock); |
174 | if (!ret) |
175 | ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); |
176 | |
177 | return ret; |
178 | } |
179 | |
180 | |
181 | /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */ |
182 | |
183 | static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
184 | { |
185 | |
186 | if (c->nextblock == NULL) { |
187 | D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n", |
188 | jeb->offset)); |
189 | return; |
190 | } |
191 | /* Check, if we have a dirty block now, or if it was dirty already */ |
192 | if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) { |
193 | c->dirty_size += jeb->wasted_size; |
194 | c->wasted_size -= jeb->wasted_size; |
195 | jeb->dirty_size += jeb->wasted_size; |
196 | jeb->wasted_size = 0; |
197 | if (VERYDIRTY(c, jeb->dirty_size)) { |
198 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", |
199 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); |
200 | list_add_tail(&jeb->list, &c->very_dirty_list); |
201 | } else { |
202 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", |
203 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); |
204 | list_add_tail(&jeb->list, &c->dirty_list); |
205 | } |
206 | } else { |
207 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", |
208 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); |
209 | list_add_tail(&jeb->list, &c->clean_list); |
210 | } |
211 | c->nextblock = NULL; |
212 | |
213 | } |
214 | |
215 | /* Select a new jeb for nextblock */ |
216 | |
217 | static int jffs2_find_nextblock(struct jffs2_sb_info *c) |
218 | { |
219 | struct list_head *next; |
220 | |
221 | /* Take the next block off the 'free' list */ |
222 | |
223 | if (list_empty(&c->free_list)) { |
224 | |
225 | if (!c->nr_erasing_blocks && |
226 | !list_empty(&c->erasable_list)) { |
227 | struct jffs2_eraseblock *ejeb; |
228 | |
229 | ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); |
230 | list_move_tail(&ejeb->list, &c->erase_pending_list); |
231 | c->nr_erasing_blocks++; |
232 | jffs2_garbage_collect_trigger(c); |
233 | D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n", |
234 | ejeb->offset)); |
235 | } |
236 | |
237 | if (!c->nr_erasing_blocks && |
238 | !list_empty(&c->erasable_pending_wbuf_list)) { |
239 | D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n")); |
240 | /* c->nextblock is NULL, no update to c->nextblock allowed */ |
241 | spin_unlock(&c->erase_completion_lock); |
242 | jffs2_flush_wbuf_pad(c); |
243 | spin_lock(&c->erase_completion_lock); |
244 | /* Have another go. It'll be on the erasable_list now */ |
245 | return -EAGAIN; |
246 | } |
247 | |
248 | if (!c->nr_erasing_blocks) { |
249 | /* Ouch. We're in GC, or we wouldn't have got here. |
250 | And there's no space left. At all. */ |
251 | printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", |
252 | c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", |
253 | list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); |
254 | return -ENOSPC; |
255 | } |
256 | |
257 | spin_unlock(&c->erase_completion_lock); |
258 | /* Don't wait for it; just erase one right now */ |
259 | jffs2_erase_pending_blocks(c, 1); |
260 | spin_lock(&c->erase_completion_lock); |
261 | |
262 | /* An erase may have failed, decreasing the |
263 | amount of free space available. So we must |
264 | restart from the beginning */ |
265 | return -EAGAIN; |
266 | } |
267 | |
268 | next = c->free_list.next; |
269 | list_del(next); |
270 | c->nextblock = list_entry(next, struct jffs2_eraseblock, list); |
271 | c->nr_free_blocks--; |
272 | |
273 | jffs2_sum_reset_collected(c->summary); /* reset collected summary */ |
274 | |
275 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
276 | /* adjust write buffer offset, else we get a non contiguous write bug */ |
277 | if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len) |
278 | c->wbuf_ofs = 0xffffffff; |
279 | #endif |
280 | |
281 | D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset)); |
282 | |
283 | return 0; |
284 | } |
285 | |
286 | /* Called with alloc sem _and_ erase_completion_lock */ |
287 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, |
288 | uint32_t *len, uint32_t sumsize) |
289 | { |
290 | struct jffs2_eraseblock *jeb = c->nextblock; |
291 | uint32_t reserved_size; /* for summary information at the end of the jeb */ |
292 | int ret; |
293 | |
294 | restart: |
295 | reserved_size = 0; |
296 | |
297 | if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) { |
298 | /* NOSUM_SIZE means not to generate summary */ |
299 | |
300 | if (jeb) { |
301 | reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); |
302 | dbg_summary("minsize=%d , jeb->free=%d ," |
303 | "summary->size=%d , sumsize=%d\n", |
304 | minsize, jeb->free_size, |
305 | c->summary->sum_size, sumsize); |
306 | } |
307 | |
308 | /* Is there enough space for writing out the current node, or we have to |
309 | write out summary information now, close this jeb and select new nextblock? */ |
310 | if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize + |
311 | JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) { |
312 | |
313 | /* Has summary been disabled for this jeb? */ |
314 | if (jffs2_sum_is_disabled(c->summary)) { |
315 | sumsize = JFFS2_SUMMARY_NOSUM_SIZE; |
316 | goto restart; |
317 | } |
318 | |
319 | /* Writing out the collected summary information */ |
320 | dbg_summary("generating summary for 0x%08x.\n", jeb->offset); |
321 | ret = jffs2_sum_write_sumnode(c); |
322 | |
323 | if (ret) |
324 | return ret; |
325 | |
326 | if (jffs2_sum_is_disabled(c->summary)) { |
327 | /* jffs2_write_sumnode() couldn't write out the summary information |
328 | diabling summary for this jeb and free the collected information |
329 | */ |
330 | sumsize = JFFS2_SUMMARY_NOSUM_SIZE; |
331 | goto restart; |
332 | } |
333 | |
334 | jffs2_close_nextblock(c, jeb); |
335 | jeb = NULL; |
336 | /* keep always valid value in reserved_size */ |
337 | reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); |
338 | } |
339 | } else { |
340 | if (jeb && minsize > jeb->free_size) { |
341 | uint32_t waste; |
342 | |
343 | /* Skip the end of this block and file it as having some dirty space */ |
344 | /* If there's a pending write to it, flush now */ |
345 | |
346 | if (jffs2_wbuf_dirty(c)) { |
347 | spin_unlock(&c->erase_completion_lock); |
348 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); |
349 | jffs2_flush_wbuf_pad(c); |
350 | spin_lock(&c->erase_completion_lock); |
351 | jeb = c->nextblock; |
352 | goto restart; |
353 | } |
354 | |
355 | spin_unlock(&c->erase_completion_lock); |
356 | |
357 | ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); |
358 | if (ret) |
359 | return ret; |
360 | /* Just lock it again and continue. Nothing much can change because |
361 | we hold c->alloc_sem anyway. In fact, it's not entirely clear why |
362 | we hold c->erase_completion_lock in the majority of this function... |
363 | but that's a question for another (more caffeine-rich) day. */ |
364 | spin_lock(&c->erase_completion_lock); |
365 | |
366 | waste = jeb->free_size; |
367 | jffs2_link_node_ref(c, jeb, |
368 | (jeb->offset + c->sector_size - waste) | REF_OBSOLETE, |
369 | waste, NULL); |
370 | /* FIXME: that made it count as dirty. Convert to wasted */ |
371 | jeb->dirty_size -= waste; |
372 | c->dirty_size -= waste; |
373 | jeb->wasted_size += waste; |
374 | c->wasted_size += waste; |
375 | |
376 | jffs2_close_nextblock(c, jeb); |
377 | jeb = NULL; |
378 | } |
379 | } |
380 | |
381 | if (!jeb) { |
382 | |
383 | ret = jffs2_find_nextblock(c); |
384 | if (ret) |
385 | return ret; |
386 | |
387 | jeb = c->nextblock; |
388 | |
389 | if (jeb->free_size != c->sector_size - c->cleanmarker_size) { |
390 | printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); |
391 | goto restart; |
392 | } |
393 | } |
394 | /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has |
395 | enough space */ |
396 | *len = jeb->free_size - reserved_size; |
397 | |
398 | if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && |
399 | !jeb->first_node->next_in_ino) { |
400 | /* Only node in it beforehand was a CLEANMARKER node (we think). |
401 | So mark it obsolete now that there's going to be another node |
402 | in the block. This will reduce used_size to zero but We've |
403 | already set c->nextblock so that jffs2_mark_node_obsolete() |
404 | won't try to refile it to the dirty_list. |
405 | */ |
406 | spin_unlock(&c->erase_completion_lock); |
407 | jffs2_mark_node_obsolete(c, jeb->first_node); |
408 | spin_lock(&c->erase_completion_lock); |
409 | } |
410 | |
411 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", |
412 | *len, jeb->offset + (c->sector_size - jeb->free_size))); |
413 | return 0; |
414 | } |
415 | |
416 | /** |
417 | * jffs2_add_physical_node_ref - add a physical node reference to the list |
418 | * @c: superblock info |
419 | * @new: new node reference to add |
420 | * @len: length of this physical node |
421 | * |
422 | * Should only be used to report nodes for which space has been allocated |
423 | * by jffs2_reserve_space. |
424 | * |
425 | * Must be called with the alloc_sem held. |
426 | */ |
427 | |
428 | struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, |
429 | uint32_t ofs, uint32_t len, |
430 | struct jffs2_inode_cache *ic) |
431 | { |
432 | struct jffs2_eraseblock *jeb; |
433 | struct jffs2_raw_node_ref *new; |
434 | |
435 | jeb = &c->blocks[ofs / c->sector_size]; |
436 | |
437 | D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", |
438 | ofs & ~3, ofs & 3, len)); |
439 | #if 1 |
440 | /* Allow non-obsolete nodes only to be added at the end of c->nextblock, |
441 | if c->nextblock is set. Note that wbuf.c will file obsolete nodes |
442 | even after refiling c->nextblock */ |
443 | if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE)) |
444 | && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) { |
445 | printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3); |
446 | if (c->nextblock) |
447 | printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset); |
448 | else |
449 | printk(KERN_WARNING "No nextblock"); |
450 | printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size)); |
451 | return ERR_PTR(-EINVAL); |
452 | } |
453 | #endif |
454 | spin_lock(&c->erase_completion_lock); |
455 | |
456 | new = jffs2_link_node_ref(c, jeb, ofs, len, ic); |
457 | |
458 | if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { |
459 | /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ |
460 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", |
461 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); |
462 | if (jffs2_wbuf_dirty(c)) { |
463 | /* Flush the last write in the block if it's outstanding */ |
464 | spin_unlock(&c->erase_completion_lock); |
465 | jffs2_flush_wbuf_pad(c); |
466 | spin_lock(&c->erase_completion_lock); |
467 | } |
468 | |
469 | list_add_tail(&jeb->list, &c->clean_list); |
470 | c->nextblock = NULL; |
471 | } |
472 | jffs2_dbg_acct_sanity_check_nolock(c,jeb); |
473 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
474 | |
475 | spin_unlock(&c->erase_completion_lock); |
476 | |
477 | return new; |
478 | } |
479 | |
480 | |
481 | void jffs2_complete_reservation(struct jffs2_sb_info *c) |
482 | { |
483 | D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n")); |
484 | spin_lock(&c->erase_completion_lock); |
485 | jffs2_garbage_collect_trigger(c); |
486 | spin_unlock(&c->erase_completion_lock); |
487 | mutex_unlock(&c->alloc_sem); |
488 | } |
489 | |
490 | static inline int on_list(struct list_head *obj, struct list_head *head) |
491 | { |
492 | struct list_head *this; |
493 | |
494 | list_for_each(this, head) { |
495 | if (this == obj) { |
496 | D1(printk("%p is on list at %p\n", obj, head)); |
497 | return 1; |
498 | |
499 | } |
500 | } |
501 | return 0; |
502 | } |
503 | |
504 | void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref) |
505 | { |
506 | struct jffs2_eraseblock *jeb; |
507 | int blocknr; |
508 | struct jffs2_unknown_node n; |
509 | int ret, addedsize; |
510 | size_t retlen; |
511 | uint32_t freed_len; |
512 | |
513 | if(unlikely(!ref)) { |
514 | printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); |
515 | return; |
516 | } |
517 | if (ref_obsolete(ref)) { |
518 | D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref))); |
519 | return; |
520 | } |
521 | blocknr = ref->flash_offset / c->sector_size; |
522 | if (blocknr >= c->nr_blocks) { |
523 | printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset); |
524 | BUG(); |
525 | } |
526 | jeb = &c->blocks[blocknr]; |
527 | |
528 | if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && |
529 | !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) { |
530 | /* Hm. This may confuse static lock analysis. If any of the above |
531 | three conditions is false, we're going to return from this |
532 | function without actually obliterating any nodes or freeing |
533 | any jffs2_raw_node_refs. So we don't need to stop erases from |
534 | happening, or protect against people holding an obsolete |
535 | jffs2_raw_node_ref without the erase_completion_lock. */ |
536 | mutex_lock(&c->erase_free_sem); |
537 | } |
538 | |
539 | spin_lock(&c->erase_completion_lock); |
540 | |
541 | freed_len = ref_totlen(c, jeb, ref); |
542 | |
543 | if (ref_flags(ref) == REF_UNCHECKED) { |
544 | D1(if (unlikely(jeb->unchecked_size < freed_len)) { |
545 | printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", |
546 | freed_len, blocknr, ref->flash_offset, jeb->used_size); |
547 | BUG(); |
548 | }) |
549 | D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len)); |
550 | jeb->unchecked_size -= freed_len; |
551 | c->unchecked_size -= freed_len; |
552 | } else { |
553 | D1(if (unlikely(jeb->used_size < freed_len)) { |
554 | printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", |
555 | freed_len, blocknr, ref->flash_offset, jeb->used_size); |
556 | BUG(); |
557 | }) |
558 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len)); |
559 | jeb->used_size -= freed_len; |
560 | c->used_size -= freed_len; |
561 | } |
562 | |
563 | // Take care, that wasted size is taken into concern |
564 | if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { |
565 | D1(printk("Dirtying\n")); |
566 | addedsize = freed_len; |
567 | jeb->dirty_size += freed_len; |
568 | c->dirty_size += freed_len; |
569 | |
570 | /* Convert wasted space to dirty, if not a bad block */ |
571 | if (jeb->wasted_size) { |
572 | if (on_list(&jeb->list, &c->bad_used_list)) { |
573 | D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n", |
574 | jeb->offset)); |
575 | addedsize = 0; /* To fool the refiling code later */ |
576 | } else { |
577 | D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n", |
578 | jeb->wasted_size, jeb->offset)); |
579 | addedsize += jeb->wasted_size; |
580 | jeb->dirty_size += jeb->wasted_size; |
581 | c->dirty_size += jeb->wasted_size; |
582 | c->wasted_size -= jeb->wasted_size; |
583 | jeb->wasted_size = 0; |
584 | } |
585 | } |
586 | } else { |
587 | D1(printk("Wasting\n")); |
588 | addedsize = 0; |
589 | jeb->wasted_size += freed_len; |
590 | c->wasted_size += freed_len; |
591 | } |
592 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; |
593 | |
594 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); |
595 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
596 | |
597 | if (c->flags & JFFS2_SB_FLAG_SCANNING) { |
598 | /* Flash scanning is in progress. Don't muck about with the block |
599 | lists because they're not ready yet, and don't actually |
600 | obliterate nodes that look obsolete. If they weren't |
601 | marked obsolete on the flash at the time they _became_ |
602 | obsolete, there was probably a reason for that. */ |
603 | spin_unlock(&c->erase_completion_lock); |
604 | /* We didn't lock the erase_free_sem */ |
605 | return; |
606 | } |
607 | |
608 | if (jeb == c->nextblock) { |
609 | D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset)); |
610 | } else if (!jeb->used_size && !jeb->unchecked_size) { |
611 | if (jeb == c->gcblock) { |
612 | D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset)); |
613 | c->gcblock = NULL; |
614 | } else { |
615 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset)); |
616 | list_del(&jeb->list); |
617 | } |
618 | if (jffs2_wbuf_dirty(c)) { |
619 | D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n")); |
620 | list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list); |
621 | } else { |
622 | if (jiffies & 127) { |
623 | /* Most of the time, we just erase it immediately. Otherwise we |
624 | spend ages scanning it on mount, etc. */ |
625 | D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); |
626 | list_add_tail(&jeb->list, &c->erase_pending_list); |
627 | c->nr_erasing_blocks++; |
628 | jffs2_garbage_collect_trigger(c); |
629 | } else { |
630 | /* Sometimes, however, we leave it elsewhere so it doesn't get |
631 | immediately reused, and we spread the load a bit. */ |
632 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); |
633 | list_add_tail(&jeb->list, &c->erasable_list); |
634 | } |
635 | } |
636 | D1(printk(KERN_DEBUG "Done OK\n")); |
637 | } else if (jeb == c->gcblock) { |
638 | D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset)); |
639 | } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) { |
640 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset)); |
641 | list_del(&jeb->list); |
642 | D1(printk(KERN_DEBUG "...and adding to dirty_list\n")); |
643 | list_add_tail(&jeb->list, &c->dirty_list); |
644 | } else if (VERYDIRTY(c, jeb->dirty_size) && |
645 | !VERYDIRTY(c, jeb->dirty_size - addedsize)) { |
646 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset)); |
647 | list_del(&jeb->list); |
648 | D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n")); |
649 | list_add_tail(&jeb->list, &c->very_dirty_list); |
650 | } else { |
651 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", |
652 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); |
653 | } |
654 | |
655 | spin_unlock(&c->erase_completion_lock); |
656 | |
657 | if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) || |
658 | (c->flags & JFFS2_SB_FLAG_BUILDING)) { |
659 | /* We didn't lock the erase_free_sem */ |
660 | return; |
661 | } |
662 | |
663 | /* The erase_free_sem is locked, and has been since before we marked the node obsolete |
664 | and potentially put its eraseblock onto the erase_pending_list. Thus, we know that |
665 | the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet |
666 | by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */ |
667 | |
668 | D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref))); |
669 | ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); |
670 | if (ret) { |
671 | printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); |
672 | goto out_erase_sem; |
673 | } |
674 | if (retlen != sizeof(n)) { |
675 | printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); |
676 | goto out_erase_sem; |
677 | } |
678 | if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) { |
679 | printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len); |
680 | goto out_erase_sem; |
681 | } |
682 | if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { |
683 | D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype))); |
684 | goto out_erase_sem; |
685 | } |
686 | /* XXX FIXME: This is ugly now */ |
687 | n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE); |
688 | ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); |
689 | if (ret) { |
690 | printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); |
691 | goto out_erase_sem; |
692 | } |
693 | if (retlen != sizeof(n)) { |
694 | printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); |
695 | goto out_erase_sem; |
696 | } |
697 | |
698 | /* Nodes which have been marked obsolete no longer need to be |
699 | associated with any inode. Remove them from the per-inode list. |
700 | |
701 | Note we can't do this for NAND at the moment because we need |
702 | obsolete dirent nodes to stay on the lists, because of the |
703 | horridness in jffs2_garbage_collect_deletion_dirent(). Also |
704 | because we delete the inocache, and on NAND we need that to |
705 | stay around until all the nodes are actually erased, in order |
706 | to stop us from giving the same inode number to another newly |
707 | created inode. */ |
708 | if (ref->next_in_ino) { |
709 | struct jffs2_inode_cache *ic; |
710 | struct jffs2_raw_node_ref **p; |
711 | |
712 | spin_lock(&c->erase_completion_lock); |
713 | |
714 | ic = jffs2_raw_ref_to_ic(ref); |
715 | for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino)) |
716 | ; |
717 | |
718 | *p = ref->next_in_ino; |
719 | ref->next_in_ino = NULL; |
720 | |
721 | switch (ic->class) { |
722 | #ifdef CONFIG_JFFS2_FS_XATTR |
723 | case RAWNODE_CLASS_XATTR_DATUM: |
724 | jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); |
725 | break; |
726 | case RAWNODE_CLASS_XATTR_REF: |
727 | jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); |
728 | break; |
729 | #endif |
730 | default: |
731 | if (ic->nodes == (void *)ic && ic->pino_nlink == 0) |
732 | jffs2_del_ino_cache(c, ic); |
733 | break; |
734 | } |
735 | spin_unlock(&c->erase_completion_lock); |
736 | } |
737 | |
738 | out_erase_sem: |
739 | mutex_unlock(&c->erase_free_sem); |
740 | } |
741 | |
742 | int jffs2_thread_should_wake(struct jffs2_sb_info *c) |
743 | { |
744 | int ret = 0; |
745 | uint32_t dirty; |
746 | int nr_very_dirty = 0; |
747 | struct jffs2_eraseblock *jeb; |
748 | |
749 | if (!list_empty(&c->erase_complete_list) || |
750 | !list_empty(&c->erase_pending_list)) |
751 | return 1; |
752 | |
753 | if (c->unchecked_size) { |
754 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n", |
755 | c->unchecked_size, c->checked_ino)); |
756 | return 1; |
757 | } |
758 | |
759 | /* dirty_size contains blocks on erase_pending_list |
760 | * those blocks are counted in c->nr_erasing_blocks. |
761 | * If one block is actually erased, it is not longer counted as dirty_space |
762 | * but it is counted in c->nr_erasing_blocks, so we add it and subtract it |
763 | * with c->nr_erasing_blocks * c->sector_size again. |
764 | * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks |
765 | * This helps us to force gc and pick eventually a clean block to spread the load. |
766 | */ |
767 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; |
768 | |
769 | if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && |
770 | (dirty > c->nospc_dirty_size)) |
771 | ret = 1; |
772 | |
773 | list_for_each_entry(jeb, &c->very_dirty_list, list) { |
774 | nr_very_dirty++; |
775 | if (nr_very_dirty == c->vdirty_blocks_gctrigger) { |
776 | ret = 1; |
777 | /* In debug mode, actually go through and count them all */ |
778 | D1(continue); |
779 | break; |
780 | } |
781 | } |
782 | |
783 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n", |
784 | c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no")); |
785 | |
786 | return ret; |
787 | } |
788 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9