Root/
1 | /* |
2 | * Simple MTD partitioning layer |
3 | * |
4 | * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net> |
5 | * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de> |
6 | * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or |
11 | * (at your option) any later version. |
12 | * |
13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. |
17 | * |
18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
21 | * |
22 | */ |
23 | |
24 | #include <linux/module.h> |
25 | #include <linux/types.h> |
26 | #include <linux/kernel.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/list.h> |
29 | #include <linux/kmod.h> |
30 | #include <linux/mtd/mtd.h> |
31 | #include <linux/mtd/partitions.h> |
32 | #include <linux/err.h> |
33 | |
34 | #include "mtdcore.h" |
35 | |
36 | /* Our partition linked list */ |
37 | static LIST_HEAD(mtd_partitions); |
38 | static DEFINE_MUTEX(mtd_partitions_mutex); |
39 | |
40 | /* Our partition node structure */ |
41 | struct mtd_part { |
42 | struct mtd_info mtd; |
43 | struct mtd_info *master; |
44 | uint64_t offset; |
45 | struct list_head list; |
46 | }; |
47 | |
48 | /* |
49 | * Given a pointer to the MTD object in the mtd_part structure, we can retrieve |
50 | * the pointer to that structure with this macro. |
51 | */ |
52 | #define PART(x) ((struct mtd_part *)(x)) |
53 | |
54 | |
55 | /* |
56 | * MTD methods which simply translate the effective address and pass through |
57 | * to the _real_ device. |
58 | */ |
59 | |
60 | static int part_read(struct mtd_info *mtd, loff_t from, size_t len, |
61 | size_t *retlen, u_char *buf) |
62 | { |
63 | struct mtd_part *part = PART(mtd); |
64 | struct mtd_ecc_stats stats; |
65 | int res; |
66 | |
67 | stats = part->master->ecc_stats; |
68 | res = part->master->_read(part->master, from + part->offset, len, |
69 | retlen, buf); |
70 | if (unlikely(mtd_is_eccerr(res))) |
71 | mtd->ecc_stats.failed += |
72 | part->master->ecc_stats.failed - stats.failed; |
73 | else |
74 | mtd->ecc_stats.corrected += |
75 | part->master->ecc_stats.corrected - stats.corrected; |
76 | return res; |
77 | } |
78 | |
79 | static int part_point(struct mtd_info *mtd, loff_t from, size_t len, |
80 | size_t *retlen, void **virt, resource_size_t *phys) |
81 | { |
82 | struct mtd_part *part = PART(mtd); |
83 | |
84 | return part->master->_point(part->master, from + part->offset, len, |
85 | retlen, virt, phys); |
86 | } |
87 | |
88 | static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) |
89 | { |
90 | struct mtd_part *part = PART(mtd); |
91 | |
92 | return part->master->_unpoint(part->master, from + part->offset, len); |
93 | } |
94 | |
95 | static unsigned long part_get_unmapped_area(struct mtd_info *mtd, |
96 | unsigned long len, |
97 | unsigned long offset, |
98 | unsigned long flags) |
99 | { |
100 | struct mtd_part *part = PART(mtd); |
101 | |
102 | offset += part->offset; |
103 | return part->master->_get_unmapped_area(part->master, len, offset, |
104 | flags); |
105 | } |
106 | |
107 | static int part_read_oob(struct mtd_info *mtd, loff_t from, |
108 | struct mtd_oob_ops *ops) |
109 | { |
110 | struct mtd_part *part = PART(mtd); |
111 | int res; |
112 | |
113 | if (from >= mtd->size) |
114 | return -EINVAL; |
115 | if (ops->datbuf && from + ops->len > mtd->size) |
116 | return -EINVAL; |
117 | |
118 | /* |
119 | * If OOB is also requested, make sure that we do not read past the end |
120 | * of this partition. |
121 | */ |
122 | if (ops->oobbuf) { |
123 | size_t len, pages; |
124 | |
125 | if (ops->mode == MTD_OPS_AUTO_OOB) |
126 | len = mtd->oobavail; |
127 | else |
128 | len = mtd->oobsize; |
129 | pages = mtd_div_by_ws(mtd->size, mtd); |
130 | pages -= mtd_div_by_ws(from, mtd); |
131 | if (ops->ooboffs + ops->ooblen > pages * len) |
132 | return -EINVAL; |
133 | } |
134 | |
135 | res = part->master->_read_oob(part->master, from + part->offset, ops); |
136 | if (unlikely(res)) { |
137 | if (mtd_is_bitflip(res)) |
138 | mtd->ecc_stats.corrected++; |
139 | if (mtd_is_eccerr(res)) |
140 | mtd->ecc_stats.failed++; |
141 | } |
142 | return res; |
143 | } |
144 | |
145 | static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, |
146 | size_t len, size_t *retlen, u_char *buf) |
147 | { |
148 | struct mtd_part *part = PART(mtd); |
149 | return part->master->_read_user_prot_reg(part->master, from, len, |
150 | retlen, buf); |
151 | } |
152 | |
153 | static int part_get_user_prot_info(struct mtd_info *mtd, |
154 | struct otp_info *buf, size_t len) |
155 | { |
156 | struct mtd_part *part = PART(mtd); |
157 | return part->master->_get_user_prot_info(part->master, buf, len); |
158 | } |
159 | |
160 | static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, |
161 | size_t len, size_t *retlen, u_char *buf) |
162 | { |
163 | struct mtd_part *part = PART(mtd); |
164 | return part->master->_read_fact_prot_reg(part->master, from, len, |
165 | retlen, buf); |
166 | } |
167 | |
168 | static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, |
169 | size_t len) |
170 | { |
171 | struct mtd_part *part = PART(mtd); |
172 | return part->master->_get_fact_prot_info(part->master, buf, len); |
173 | } |
174 | |
175 | static int part_write(struct mtd_info *mtd, loff_t to, size_t len, |
176 | size_t *retlen, const u_char *buf) |
177 | { |
178 | struct mtd_part *part = PART(mtd); |
179 | return part->master->_write(part->master, to + part->offset, len, |
180 | retlen, buf); |
181 | } |
182 | |
183 | static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, |
184 | size_t *retlen, const u_char *buf) |
185 | { |
186 | struct mtd_part *part = PART(mtd); |
187 | return part->master->_panic_write(part->master, to + part->offset, len, |
188 | retlen, buf); |
189 | } |
190 | |
191 | static int part_write_oob(struct mtd_info *mtd, loff_t to, |
192 | struct mtd_oob_ops *ops) |
193 | { |
194 | struct mtd_part *part = PART(mtd); |
195 | |
196 | if (to >= mtd->size) |
197 | return -EINVAL; |
198 | if (ops->datbuf && to + ops->len > mtd->size) |
199 | return -EINVAL; |
200 | return part->master->_write_oob(part->master, to + part->offset, ops); |
201 | } |
202 | |
203 | static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, |
204 | size_t len, size_t *retlen, u_char *buf) |
205 | { |
206 | struct mtd_part *part = PART(mtd); |
207 | return part->master->_write_user_prot_reg(part->master, from, len, |
208 | retlen, buf); |
209 | } |
210 | |
211 | static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, |
212 | size_t len) |
213 | { |
214 | struct mtd_part *part = PART(mtd); |
215 | return part->master->_lock_user_prot_reg(part->master, from, len); |
216 | } |
217 | |
218 | static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, |
219 | unsigned long count, loff_t to, size_t *retlen) |
220 | { |
221 | struct mtd_part *part = PART(mtd); |
222 | return part->master->_writev(part->master, vecs, count, |
223 | to + part->offset, retlen); |
224 | } |
225 | |
226 | static int part_erase(struct mtd_info *mtd, struct erase_info *instr) |
227 | { |
228 | struct mtd_part *part = PART(mtd); |
229 | int ret; |
230 | |
231 | instr->addr += part->offset; |
232 | ret = part->master->_erase(part->master, instr); |
233 | if (ret) { |
234 | if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
235 | instr->fail_addr -= part->offset; |
236 | instr->addr -= part->offset; |
237 | } |
238 | return ret; |
239 | } |
240 | |
241 | void mtd_erase_callback(struct erase_info *instr) |
242 | { |
243 | if (instr->mtd->_erase == part_erase) { |
244 | struct mtd_part *part = PART(instr->mtd); |
245 | |
246 | if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
247 | instr->fail_addr -= part->offset; |
248 | instr->addr -= part->offset; |
249 | } |
250 | if (instr->callback) |
251 | instr->callback(instr); |
252 | } |
253 | EXPORT_SYMBOL_GPL(mtd_erase_callback); |
254 | |
255 | static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
256 | { |
257 | struct mtd_part *part = PART(mtd); |
258 | return part->master->_lock(part->master, ofs + part->offset, len); |
259 | } |
260 | |
261 | static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
262 | { |
263 | struct mtd_part *part = PART(mtd); |
264 | return part->master->_unlock(part->master, ofs + part->offset, len); |
265 | } |
266 | |
267 | static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
268 | { |
269 | struct mtd_part *part = PART(mtd); |
270 | return part->master->_is_locked(part->master, ofs + part->offset, len); |
271 | } |
272 | |
273 | static void part_sync(struct mtd_info *mtd) |
274 | { |
275 | struct mtd_part *part = PART(mtd); |
276 | part->master->_sync(part->master); |
277 | } |
278 | |
279 | static int part_suspend(struct mtd_info *mtd) |
280 | { |
281 | struct mtd_part *part = PART(mtd); |
282 | return part->master->_suspend(part->master); |
283 | } |
284 | |
285 | static void part_resume(struct mtd_info *mtd) |
286 | { |
287 | struct mtd_part *part = PART(mtd); |
288 | part->master->_resume(part->master); |
289 | } |
290 | |
291 | static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) |
292 | { |
293 | struct mtd_part *part = PART(mtd); |
294 | ofs += part->offset; |
295 | return part->master->_block_isbad(part->master, ofs); |
296 | } |
297 | |
298 | static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) |
299 | { |
300 | struct mtd_part *part = PART(mtd); |
301 | int res; |
302 | |
303 | ofs += part->offset; |
304 | res = part->master->_block_markbad(part->master, ofs); |
305 | if (!res) |
306 | mtd->ecc_stats.badblocks++; |
307 | return res; |
308 | } |
309 | |
310 | static inline void free_partition(struct mtd_part *p) |
311 | { |
312 | kfree(p->mtd.name); |
313 | kfree(p); |
314 | } |
315 | |
316 | /* |
317 | * This function unregisters and destroy all slave MTD objects which are |
318 | * attached to the given master MTD object. |
319 | */ |
320 | |
321 | int del_mtd_partitions(struct mtd_info *master) |
322 | { |
323 | struct mtd_part *slave, *next; |
324 | int ret, err = 0; |
325 | |
326 | mutex_lock(&mtd_partitions_mutex); |
327 | list_for_each_entry_safe(slave, next, &mtd_partitions, list) |
328 | if (slave->master == master) { |
329 | ret = del_mtd_device(&slave->mtd); |
330 | if (ret < 0) { |
331 | err = ret; |
332 | continue; |
333 | } |
334 | list_del(&slave->list); |
335 | free_partition(slave); |
336 | } |
337 | mutex_unlock(&mtd_partitions_mutex); |
338 | |
339 | return err; |
340 | } |
341 | |
342 | static struct mtd_part *allocate_partition(struct mtd_info *master, |
343 | const struct mtd_partition *part, int partno, |
344 | uint64_t cur_offset) |
345 | { |
346 | struct mtd_part *slave; |
347 | char *name; |
348 | |
349 | /* allocate the partition structure */ |
350 | slave = kzalloc(sizeof(*slave), GFP_KERNEL); |
351 | name = kstrdup(part->name, GFP_KERNEL); |
352 | if (!name || !slave) { |
353 | printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", |
354 | master->name); |
355 | kfree(name); |
356 | kfree(slave); |
357 | return ERR_PTR(-ENOMEM); |
358 | } |
359 | |
360 | /* set up the MTD object for this partition */ |
361 | slave->mtd.type = master->type; |
362 | slave->mtd.flags = master->flags & ~part->mask_flags; |
363 | slave->mtd.size = part->size; |
364 | slave->mtd.writesize = master->writesize; |
365 | slave->mtd.writebufsize = master->writebufsize; |
366 | slave->mtd.oobsize = master->oobsize; |
367 | slave->mtd.oobavail = master->oobavail; |
368 | slave->mtd.subpage_sft = master->subpage_sft; |
369 | |
370 | slave->mtd.name = name; |
371 | slave->mtd.owner = master->owner; |
372 | slave->mtd.backing_dev_info = master->backing_dev_info; |
373 | |
374 | /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone |
375 | * to have the same data be in two different partitions. |
376 | */ |
377 | slave->mtd.dev.parent = master->dev.parent; |
378 | |
379 | slave->mtd._read = part_read; |
380 | slave->mtd._write = part_write; |
381 | |
382 | if (master->_panic_write) |
383 | slave->mtd._panic_write = part_panic_write; |
384 | |
385 | if (master->_point && master->_unpoint) { |
386 | slave->mtd._point = part_point; |
387 | slave->mtd._unpoint = part_unpoint; |
388 | } |
389 | |
390 | if (master->_get_unmapped_area) |
391 | slave->mtd._get_unmapped_area = part_get_unmapped_area; |
392 | if (master->_read_oob) |
393 | slave->mtd._read_oob = part_read_oob; |
394 | if (master->_write_oob) |
395 | slave->mtd._write_oob = part_write_oob; |
396 | if (master->_read_user_prot_reg) |
397 | slave->mtd._read_user_prot_reg = part_read_user_prot_reg; |
398 | if (master->_read_fact_prot_reg) |
399 | slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; |
400 | if (master->_write_user_prot_reg) |
401 | slave->mtd._write_user_prot_reg = part_write_user_prot_reg; |
402 | if (master->_lock_user_prot_reg) |
403 | slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; |
404 | if (master->_get_user_prot_info) |
405 | slave->mtd._get_user_prot_info = part_get_user_prot_info; |
406 | if (master->_get_fact_prot_info) |
407 | slave->mtd._get_fact_prot_info = part_get_fact_prot_info; |
408 | if (master->_sync) |
409 | slave->mtd._sync = part_sync; |
410 | if (!partno && !master->dev.class && master->_suspend && |
411 | master->_resume) { |
412 | slave->mtd._suspend = part_suspend; |
413 | slave->mtd._resume = part_resume; |
414 | } |
415 | if (master->_writev) |
416 | slave->mtd._writev = part_writev; |
417 | if (master->_lock) |
418 | slave->mtd._lock = part_lock; |
419 | if (master->_unlock) |
420 | slave->mtd._unlock = part_unlock; |
421 | if (master->_is_locked) |
422 | slave->mtd._is_locked = part_is_locked; |
423 | if (master->_block_isbad) |
424 | slave->mtd._block_isbad = part_block_isbad; |
425 | if (master->_block_markbad) |
426 | slave->mtd._block_markbad = part_block_markbad; |
427 | slave->mtd._erase = part_erase; |
428 | slave->master = master; |
429 | slave->offset = part->offset; |
430 | |
431 | if (slave->offset == MTDPART_OFS_APPEND) |
432 | slave->offset = cur_offset; |
433 | if (slave->offset == MTDPART_OFS_NXTBLK) { |
434 | slave->offset = cur_offset; |
435 | if (mtd_mod_by_eb(cur_offset, master) != 0) { |
436 | /* Round up to next erasesize */ |
437 | slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; |
438 | printk(KERN_NOTICE "Moving partition %d: " |
439 | "0x%012llx -> 0x%012llx\n", partno, |
440 | (unsigned long long)cur_offset, (unsigned long long)slave->offset); |
441 | } |
442 | } |
443 | if (slave->offset == MTDPART_OFS_RETAIN) { |
444 | slave->offset = cur_offset; |
445 | if (master->size - slave->offset >= slave->mtd.size) { |
446 | slave->mtd.size = master->size - slave->offset |
447 | - slave->mtd.size; |
448 | } else { |
449 | printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", |
450 | part->name, master->size - slave->offset, |
451 | slave->mtd.size); |
452 | /* register to preserve ordering */ |
453 | goto out_register; |
454 | } |
455 | } |
456 | if (slave->mtd.size == MTDPART_SIZ_FULL) |
457 | slave->mtd.size = master->size - slave->offset; |
458 | |
459 | printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, |
460 | (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); |
461 | |
462 | /* let's do some sanity checks */ |
463 | if (slave->offset >= master->size) { |
464 | /* let's register it anyway to preserve ordering */ |
465 | slave->offset = 0; |
466 | slave->mtd.size = 0; |
467 | printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", |
468 | part->name); |
469 | goto out_register; |
470 | } |
471 | if (slave->offset + slave->mtd.size > master->size) { |
472 | slave->mtd.size = master->size - slave->offset; |
473 | printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", |
474 | part->name, master->name, (unsigned long long)slave->mtd.size); |
475 | } |
476 | if (master->numeraseregions > 1) { |
477 | /* Deal with variable erase size stuff */ |
478 | int i, max = master->numeraseregions; |
479 | u64 end = slave->offset + slave->mtd.size; |
480 | struct mtd_erase_region_info *regions = master->eraseregions; |
481 | |
482 | /* Find the first erase regions which is part of this |
483 | * partition. */ |
484 | for (i = 0; i < max && regions[i].offset <= slave->offset; i++) |
485 | ; |
486 | /* The loop searched for the region _behind_ the first one */ |
487 | if (i > 0) |
488 | i--; |
489 | |
490 | /* Pick biggest erasesize */ |
491 | for (; i < max && regions[i].offset < end; i++) { |
492 | if (slave->mtd.erasesize < regions[i].erasesize) { |
493 | slave->mtd.erasesize = regions[i].erasesize; |
494 | } |
495 | } |
496 | BUG_ON(slave->mtd.erasesize == 0); |
497 | } else { |
498 | /* Single erase size */ |
499 | slave->mtd.erasesize = master->erasesize; |
500 | } |
501 | |
502 | if ((slave->mtd.flags & MTD_WRITEABLE) && |
503 | mtd_mod_by_eb(slave->offset, &slave->mtd)) { |
504 | /* Doesn't start on a boundary of major erase size */ |
505 | /* FIXME: Let it be writable if it is on a boundary of |
506 | * _minor_ erase size though */ |
507 | slave->mtd.flags &= ~MTD_WRITEABLE; |
508 | printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", |
509 | part->name); |
510 | } |
511 | if ((slave->mtd.flags & MTD_WRITEABLE) && |
512 | mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { |
513 | slave->mtd.flags &= ~MTD_WRITEABLE; |
514 | printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", |
515 | part->name); |
516 | } |
517 | |
518 | slave->mtd.ecclayout = master->ecclayout; |
519 | slave->mtd.ecc_strength = master->ecc_strength; |
520 | slave->mtd.bitflip_threshold = master->bitflip_threshold; |
521 | |
522 | if (master->_block_isbad) { |
523 | uint64_t offs = 0; |
524 | |
525 | while (offs < slave->mtd.size) { |
526 | if (mtd_block_isbad(master, offs + slave->offset)) |
527 | slave->mtd.ecc_stats.badblocks++; |
528 | offs += slave->mtd.erasesize; |
529 | } |
530 | } |
531 | |
532 | out_register: |
533 | return slave; |
534 | } |
535 | |
536 | int mtd_add_partition(struct mtd_info *master, char *name, |
537 | long long offset, long long length) |
538 | { |
539 | struct mtd_partition part; |
540 | struct mtd_part *p, *new; |
541 | uint64_t start, end; |
542 | int ret = 0; |
543 | |
544 | /* the direct offset is expected */ |
545 | if (offset == MTDPART_OFS_APPEND || |
546 | offset == MTDPART_OFS_NXTBLK) |
547 | return -EINVAL; |
548 | |
549 | if (length == MTDPART_SIZ_FULL) |
550 | length = master->size - offset; |
551 | |
552 | if (length <= 0) |
553 | return -EINVAL; |
554 | |
555 | part.name = name; |
556 | part.size = length; |
557 | part.offset = offset; |
558 | part.mask_flags = 0; |
559 | part.ecclayout = NULL; |
560 | |
561 | new = allocate_partition(master, &part, -1, offset); |
562 | if (IS_ERR(new)) |
563 | return PTR_ERR(new); |
564 | |
565 | start = offset; |
566 | end = offset + length; |
567 | |
568 | mutex_lock(&mtd_partitions_mutex); |
569 | list_for_each_entry(p, &mtd_partitions, list) |
570 | if (p->master == master) { |
571 | if ((start >= p->offset) && |
572 | (start < (p->offset + p->mtd.size))) |
573 | goto err_inv; |
574 | |
575 | if ((end >= p->offset) && |
576 | (end < (p->offset + p->mtd.size))) |
577 | goto err_inv; |
578 | } |
579 | |
580 | list_add(&new->list, &mtd_partitions); |
581 | mutex_unlock(&mtd_partitions_mutex); |
582 | |
583 | add_mtd_device(&new->mtd); |
584 | |
585 | return ret; |
586 | err_inv: |
587 | mutex_unlock(&mtd_partitions_mutex); |
588 | free_partition(new); |
589 | return -EINVAL; |
590 | } |
591 | EXPORT_SYMBOL_GPL(mtd_add_partition); |
592 | |
593 | int mtd_del_partition(struct mtd_info *master, int partno) |
594 | { |
595 | struct mtd_part *slave, *next; |
596 | int ret = -EINVAL; |
597 | |
598 | mutex_lock(&mtd_partitions_mutex); |
599 | list_for_each_entry_safe(slave, next, &mtd_partitions, list) |
600 | if ((slave->master == master) && |
601 | (slave->mtd.index == partno)) { |
602 | ret = del_mtd_device(&slave->mtd); |
603 | if (ret < 0) |
604 | break; |
605 | |
606 | list_del(&slave->list); |
607 | free_partition(slave); |
608 | break; |
609 | } |
610 | mutex_unlock(&mtd_partitions_mutex); |
611 | |
612 | return ret; |
613 | } |
614 | EXPORT_SYMBOL_GPL(mtd_del_partition); |
615 | |
616 | /* |
617 | * This function, given a master MTD object and a partition table, creates |
618 | * and registers slave MTD objects which are bound to the master according to |
619 | * the partition definitions. |
620 | * |
621 | * We don't register the master, or expect the caller to have done so, |
622 | * for reasons of data integrity. |
623 | */ |
624 | |
625 | int add_mtd_partitions(struct mtd_info *master, |
626 | const struct mtd_partition *parts, |
627 | int nbparts) |
628 | { |
629 | struct mtd_part *slave; |
630 | uint64_t cur_offset = 0; |
631 | int i; |
632 | |
633 | printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); |
634 | |
635 | for (i = 0; i < nbparts; i++) { |
636 | slave = allocate_partition(master, parts + i, i, cur_offset); |
637 | if (IS_ERR(slave)) |
638 | return PTR_ERR(slave); |
639 | |
640 | mutex_lock(&mtd_partitions_mutex); |
641 | list_add(&slave->list, &mtd_partitions); |
642 | mutex_unlock(&mtd_partitions_mutex); |
643 | |
644 | add_mtd_device(&slave->mtd); |
645 | |
646 | cur_offset = slave->offset + slave->mtd.size; |
647 | } |
648 | |
649 | return 0; |
650 | } |
651 | |
652 | static DEFINE_SPINLOCK(part_parser_lock); |
653 | static LIST_HEAD(part_parsers); |
654 | |
655 | static struct mtd_part_parser *get_partition_parser(const char *name) |
656 | { |
657 | struct mtd_part_parser *p, *ret = NULL; |
658 | |
659 | spin_lock(&part_parser_lock); |
660 | |
661 | list_for_each_entry(p, &part_parsers, list) |
662 | if (!strcmp(p->name, name) && try_module_get(p->owner)) { |
663 | ret = p; |
664 | break; |
665 | } |
666 | |
667 | spin_unlock(&part_parser_lock); |
668 | |
669 | return ret; |
670 | } |
671 | |
672 | #define put_partition_parser(p) do { module_put((p)->owner); } while (0) |
673 | |
674 | int register_mtd_parser(struct mtd_part_parser *p) |
675 | { |
676 | spin_lock(&part_parser_lock); |
677 | list_add(&p->list, &part_parsers); |
678 | spin_unlock(&part_parser_lock); |
679 | |
680 | return 0; |
681 | } |
682 | EXPORT_SYMBOL_GPL(register_mtd_parser); |
683 | |
684 | int deregister_mtd_parser(struct mtd_part_parser *p) |
685 | { |
686 | spin_lock(&part_parser_lock); |
687 | list_del(&p->list); |
688 | spin_unlock(&part_parser_lock); |
689 | return 0; |
690 | } |
691 | EXPORT_SYMBOL_GPL(deregister_mtd_parser); |
692 | |
693 | /* |
694 | * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you |
695 | * are changing this array! |
696 | */ |
697 | static const char *default_mtd_part_types[] = { |
698 | "cmdlinepart", |
699 | "ofpart", |
700 | NULL |
701 | }; |
702 | |
703 | /** |
704 | * parse_mtd_partitions - parse MTD partitions |
705 | * @master: the master partition (describes whole MTD device) |
706 | * @types: names of partition parsers to try or %NULL |
707 | * @pparts: array of partitions found is returned here |
708 | * @data: MTD partition parser-specific data |
709 | * |
710 | * This function tries to find partition on MTD device @master. It uses MTD |
711 | * partition parsers, specified in @types. However, if @types is %NULL, then |
712 | * the default list of parsers is used. The default list contains only the |
713 | * "cmdlinepart" and "ofpart" parsers ATM. |
714 | * |
715 | * This function may return: |
716 | * o a negative error code in case of failure |
717 | * o zero if no partitions were found |
718 | * o a positive number of found partitions, in which case on exit @pparts will |
719 | * point to an array containing this number of &struct mtd_info objects. |
720 | */ |
721 | int parse_mtd_partitions(struct mtd_info *master, const char **types, |
722 | struct mtd_partition **pparts, |
723 | struct mtd_part_parser_data *data) |
724 | { |
725 | struct mtd_part_parser *parser; |
726 | int ret = 0; |
727 | |
728 | if (!types) |
729 | types = default_mtd_part_types; |
730 | |
731 | for ( ; ret <= 0 && *types; types++) { |
732 | parser = get_partition_parser(*types); |
733 | if (!parser && !request_module("%s", *types)) |
734 | parser = get_partition_parser(*types); |
735 | if (!parser) |
736 | continue; |
737 | ret = (*parser->parse_fn)(master, pparts, data); |
738 | if (ret > 0) { |
739 | printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", |
740 | ret, parser->name, master->name); |
741 | } |
742 | put_partition_parser(parser); |
743 | } |
744 | return ret; |
745 | } |
746 | |
747 | int mtd_is_partition(struct mtd_info *mtd) |
748 | { |
749 | struct mtd_part *part; |
750 | int ispart = 0; |
751 | |
752 | mutex_lock(&mtd_partitions_mutex); |
753 | list_for_each_entry(part, &mtd_partitions, list) |
754 | if (&part->mtd == mtd) { |
755 | ispart = 1; |
756 | break; |
757 | } |
758 | mutex_unlock(&mtd_partitions_mutex); |
759 | |
760 | return ispart; |
761 | } |
762 | EXPORT_SYMBOL_GPL(mtd_is_partition); |
763 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9