Root/drivers/mtd/nand/nandsim.c

Source at commit f42d987a04d6f9366c47edf794f66796151867b9 created 4 years 2 months ago.
By Lars-Peter Clausen, MTD: NAND: jz4740: Fix potential that can happen when the hardware is broken
1/*
2 * NAND flash simulator.
3 *
4 * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org>
5 *
6 * Copyright (C) 2004 Nokia Corporation
7 *
8 * Note: NS means "NAND Simulator".
9 * Note: Input means input TO flash chip, output means output FROM chip.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2, or (at your option) any later
14 * version.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
19 * Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
24 */
25
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/vmalloc.h>
31#include <asm/div64.h>
32#include <linux/slab.h>
33#include <linux/errno.h>
34#include <linux/string.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/nand.h>
37#include <linux/mtd/partitions.h>
38#include <linux/delay.h>
39#include <linux/list.h>
40#include <linux/random.h>
41#include <linux/sched.h>
42#include <linux/fs.h>
43#include <linux/pagemap.h>
44
45/* Default simulator parameters values */
46#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
47    !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
48    !defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \
49    !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
50#define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98
51#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
52#define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF /* No byte */
53#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */
54#endif
55
56#ifndef CONFIG_NANDSIM_ACCESS_DELAY
57#define CONFIG_NANDSIM_ACCESS_DELAY 25
58#endif
59#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
60#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
61#endif
62#ifndef CONFIG_NANDSIM_ERASE_DELAY
63#define CONFIG_NANDSIM_ERASE_DELAY 2
64#endif
65#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
66#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
67#endif
68#ifndef CONFIG_NANDSIM_INPUT_CYCLE
69#define CONFIG_NANDSIM_INPUT_CYCLE 50
70#endif
71#ifndef CONFIG_NANDSIM_BUS_WIDTH
72#define CONFIG_NANDSIM_BUS_WIDTH 8
73#endif
74#ifndef CONFIG_NANDSIM_DO_DELAYS
75#define CONFIG_NANDSIM_DO_DELAYS 0
76#endif
77#ifndef CONFIG_NANDSIM_LOG
78#define CONFIG_NANDSIM_LOG 0
79#endif
80#ifndef CONFIG_NANDSIM_DBG
81#define CONFIG_NANDSIM_DBG 0
82#endif
83
84static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
85static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
86static uint third_id_byte = CONFIG_NANDSIM_THIRD_ID_BYTE;
87static uint fourth_id_byte = CONFIG_NANDSIM_FOURTH_ID_BYTE;
88static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
89static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
90static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
91static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE;
92static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE;
93static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
94static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
95static uint log = CONFIG_NANDSIM_LOG;
96static uint dbg = CONFIG_NANDSIM_DBG;
97static unsigned long parts[MAX_MTD_DEVICES];
98static unsigned int parts_num;
99static char *badblocks = NULL;
100static char *weakblocks = NULL;
101static char *weakpages = NULL;
102static unsigned int bitflips = 0;
103static char *gravepages = NULL;
104static unsigned int rptwear = 0;
105static unsigned int overridesize = 0;
106static char *cache_file = NULL;
107
108module_param(first_id_byte, uint, 0400);
109module_param(second_id_byte, uint, 0400);
110module_param(third_id_byte, uint, 0400);
111module_param(fourth_id_byte, uint, 0400);
112module_param(access_delay, uint, 0400);
113module_param(programm_delay, uint, 0400);
114module_param(erase_delay, uint, 0400);
115module_param(output_cycle, uint, 0400);
116module_param(input_cycle, uint, 0400);
117module_param(bus_width, uint, 0400);
118module_param(do_delays, uint, 0400);
119module_param(log, uint, 0400);
120module_param(dbg, uint, 0400);
121module_param_array(parts, ulong, &parts_num, 0400);
122module_param(badblocks, charp, 0400);
123module_param(weakblocks, charp, 0400);
124module_param(weakpages, charp, 0400);
125module_param(bitflips, uint, 0400);
126module_param(gravepages, charp, 0400);
127module_param(rptwear, uint, 0400);
128module_param(overridesize, uint, 0400);
129module_param(cache_file, charp, 0400);
130
131MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
132MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
133MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
134MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
135MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
136MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
137MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
138MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)");
139MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)");
140MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
141MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
142MODULE_PARM_DESC(log, "Perform logging if not zero");
143MODULE_PARM_DESC(dbg, "Output debug information if not zero");
144MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas");
145/* Page and erase block positions for the following parameters are independent of any partitions */
146MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas");
147MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
148                 " separated by commas e.g. 113:2 means eb 113"
149                 " can be erased only twice before failing");
150MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]"
151                 " separated by commas e.g. 1401:2 means page 1401"
152                 " can be written only twice before failing");
153MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)");
154MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
155                 " separated by commas e.g. 1401:2 means page 1401"
156                 " can be read only twice before failing");
157MODULE_PARM_DESC(rptwear, "Number of erases inbetween reporting wear, if not zero");
158MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
159                 "The size is specified in erase blocks and as the exponent of a power of two"
160                 " e.g. 5 means a size of 32 erase blocks");
161MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
162
163/* The largest possible page size */
164#define NS_LARGEST_PAGE_SIZE 4096
165
166/* The prefix for simulator output */
167#define NS_OUTPUT_PREFIX "[nandsim]"
168
169/* Simulator's output macros (logging, debugging, warning, error) */
170#define NS_LOG(args...) \
171    do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0)
172#define NS_DBG(args...) \
173    do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0)
174#define NS_WARN(args...) \
175    do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0)
176#define NS_ERR(args...) \
177    do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0)
178#define NS_INFO(args...) \
179    do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0)
180
181/* Busy-wait delay macros (microseconds, milliseconds) */
182#define NS_UDELAY(us) \
183        do { if (do_delays) udelay(us); } while(0)
184#define NS_MDELAY(us) \
185        do { if (do_delays) mdelay(us); } while(0)
186
187/* Is the nandsim structure initialized ? */
188#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
189
190/* Good operation completion status */
191#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
192
193/* Operation failed completion status */
194#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
195
196/* Calculate the page offset in flash RAM image by (row, column) address */
197#define NS_RAW_OFFSET(ns) \
198    (((ns)->regs.row << (ns)->geom.pgshift) + ((ns)->regs.row * (ns)->geom.oobsz) + (ns)->regs.column)
199
200/* Calculate the OOB offset in flash RAM image by (row, column) address */
201#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
202
203/* After a command is input, the simulator goes to one of the following states */
204#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
205#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
206#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
207#define STATE_CMD_PAGEPROG 0x00000004 /* start page programm */
208#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
209#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
210#define STATE_CMD_STATUS 0x00000007 /* read status */
211#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
212#define STATE_CMD_SEQIN 0x00000009 /* sequential data imput */
213#define STATE_CMD_READID 0x0000000A /* read ID */
214#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
215#define STATE_CMD_RESET 0x0000000C /* reset */
216#define STATE_CMD_RNDOUT 0x0000000D /* random output command */
217#define STATE_CMD_RNDOUTSTART 0x0000000E /* random output start command */
218#define STATE_CMD_MASK 0x0000000F /* command states mask */
219
220/* After an address is input, the simulator goes to one of these states */
221#define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */
222#define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */
223#define STATE_ADDR_COLUMN 0x00000030 /* column address was accepted */
224#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */
225#define STATE_ADDR_MASK 0x00000070 /* address states mask */
226
227/* Durind data input/output the simulator is in these states */
228#define STATE_DATAIN 0x00000100 /* waiting for data input */
229#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
230
231#define STATE_DATAOUT 0x00001000 /* waiting for page data output */
232#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */
233#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */
234#define STATE_DATAOUT_STATUS_M 0x00004000 /* waiting for multi-plane status output */
235#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */
236
237/* Previous operation is done, ready to accept new requests */
238#define STATE_READY 0x00000000
239
240/* This state is used to mark that the next state isn't known yet */
241#define STATE_UNKNOWN 0x10000000
242
243/* Simulator's actions bit masks */
244#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
245#define ACTION_PRGPAGE 0x00200000 /* programm the internal buffer to flash */
246#define ACTION_SECERASE 0x00300000 /* erase sector */
247#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
248#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
249#define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */
250#define ACTION_MASK 0x00700000 /* action mask */
251
252#define NS_OPER_NUM 13 /* Number of operations supported by the simulator */
253#define NS_OPER_STATES 6 /* Maximum number of states in operation */
254
255#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
256#define OPT_PAGE256 0x00000001 /* 256-byte page chips */
257#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
258#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
259#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
260#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */
261#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
262#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
263#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
264#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
265
266/* Remove action bits ftom state */
267#define NS_STATE(x) ((x) & ~ACTION_MASK)
268
269/*
270 * Maximum previous states which need to be saved. Currently saving is
271 * only needed for page programm operation with preceeded read command
272 * (which is only valid for 512-byte pages).
273 */
274#define NS_MAX_PREVSTATES 1
275
276/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
277#define NS_MAX_HELD_PAGES 16
278
279/*
280 * A union to represent flash memory contents and flash buffer.
281 */
282union ns_mem {
283    u_char *byte; /* for byte access */
284    uint16_t *word; /* for 16-bit word access */
285};
286
287/*
288 * The structure which describes all the internal simulator data.
289 */
290struct nandsim {
291    struct mtd_partition partitions[MAX_MTD_DEVICES];
292    unsigned int nbparts;
293
294    uint busw; /* flash chip bus width (8 or 16) */
295    u_char ids[4]; /* chip's ID bytes */
296    uint32_t options; /* chip's characteristic bits */
297    uint32_t state; /* current chip state */
298    uint32_t nxstate; /* next expected state */
299
300    uint32_t *op; /* current operation, NULL operations isn't known yet */
301    uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
302    uint16_t npstates; /* number of previous states saved */
303    uint16_t stateidx; /* current state index */
304
305    /* The simulated NAND flash pages array */
306    union ns_mem *pages;
307
308    /* Slab allocator for nand pages */
309    struct kmem_cache *nand_pages_slab;
310
311    /* Internal buffer of page + OOB size bytes */
312    union ns_mem buf;
313
314    /* NAND flash "geometry" */
315    struct nandsin_geometry {
316        uint64_t totsz; /* total flash size, bytes */
317        uint32_t secsz; /* flash sector (erase block) size, bytes */
318        uint pgsz; /* NAND flash page size, bytes */
319        uint oobsz; /* page OOB area size, bytes */
320        uint64_t totszoob; /* total flash size including OOB, bytes */
321        uint pgszoob; /* page size including OOB , bytes*/
322        uint secszoob; /* sector size including OOB, bytes */
323        uint pgnum; /* total number of pages */
324        uint pgsec; /* number of pages per sector */
325        uint secshift; /* bits number in sector size */
326        uint pgshift; /* bits number in page size */
327        uint oobshift; /* bits number in OOB size */
328        uint pgaddrbytes; /* bytes per page address */
329        uint secaddrbytes; /* bytes per sector address */
330        uint idbytes; /* the number ID bytes that this chip outputs */
331    } geom;
332
333    /* NAND flash internal registers */
334    struct nandsim_regs {
335        unsigned command; /* the command register */
336        u_char status; /* the status register */
337        uint row; /* the page number */
338        uint column; /* the offset within page */
339        uint count; /* internal counter */
340        uint num; /* number of bytes which must be processed */
341        uint off; /* fixed page offset */
342    } regs;
343
344    /* NAND flash lines state */
345        struct ns_lines_status {
346                int ce; /* chip Enable */
347                int cle; /* command Latch Enable */
348                int ale; /* address Latch Enable */
349                int wp; /* write Protect */
350        } lines;
351
352    /* Fields needed when using a cache file */
353    struct file *cfile; /* Open file */
354    unsigned char *pages_written; /* Which pages have been written */
355    void *file_buf;
356    struct page *held_pages[NS_MAX_HELD_PAGES];
357    int held_cnt;
358};
359
360/*
361 * Operations array. To perform any operation the simulator must pass
362 * through the correspondent states chain.
363 */
364static struct nandsim_operations {
365    uint32_t reqopts; /* options which are required to perform the operation */
366    uint32_t states[NS_OPER_STATES]; /* operation's states */
367} ops[NS_OPER_NUM] = {
368    /* Read page + OOB from the beginning */
369    {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
370            STATE_DATAOUT, STATE_READY}},
371    /* Read page + OOB from the second half */
372    {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
373            STATE_DATAOUT, STATE_READY}},
374    /* Read OOB */
375    {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
376            STATE_DATAOUT, STATE_READY}},
377    /* Programm page starting from the beginning */
378    {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
379            STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
380    /* Programm page starting from the beginning */
381    {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
382                  STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
383    /* Programm page starting from the second half */
384    {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
385                  STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
386    /* Programm OOB */
387    {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
388                  STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
389    /* Erase sector */
390    {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
391    /* Read status */
392    {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
393    /* Read multi-plane status */
394    {OPT_SMARTMEDIA, {STATE_CMD_STATUS_M, STATE_DATAOUT_STATUS_M, STATE_READY}},
395    /* Read ID */
396    {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
397    /* Large page devices read page */
398    {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
399                   STATE_DATAOUT, STATE_READY}},
400    /* Large page devices random page read */
401    {OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
402                   STATE_DATAOUT, STATE_READY}},
403};
404
405struct weak_block {
406    struct list_head list;
407    unsigned int erase_block_no;
408    unsigned int max_erases;
409    unsigned int erases_done;
410};
411
412static LIST_HEAD(weak_blocks);
413
414struct weak_page {
415    struct list_head list;
416    unsigned int page_no;
417    unsigned int max_writes;
418    unsigned int writes_done;
419};
420
421static LIST_HEAD(weak_pages);
422
423struct grave_page {
424    struct list_head list;
425    unsigned int page_no;
426    unsigned int max_reads;
427    unsigned int reads_done;
428};
429
430static LIST_HEAD(grave_pages);
431
432static unsigned long *erase_block_wear = NULL;
433static unsigned int wear_eb_count = 0;
434static unsigned long total_wear = 0;
435static unsigned int rptwear_cnt = 0;
436
437/* MTD structure for NAND controller */
438static struct mtd_info *nsmtd;
439
440static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
441
442/*
443 * Allocate array of page pointers, create slab allocation for an array
444 * and initialize the array by NULL pointers.
445 *
446 * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
447 */
448static int alloc_device(struct nandsim *ns)
449{
450    struct file *cfile;
451    int i, err;
452
453    if (cache_file) {
454        cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
455        if (IS_ERR(cfile))
456            return PTR_ERR(cfile);
457        if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
458            NS_ERR("alloc_device: cache file not readable\n");
459            err = -EINVAL;
460            goto err_close;
461        }
462        if (!cfile->f_op->write && !cfile->f_op->aio_write) {
463            NS_ERR("alloc_device: cache file not writeable\n");
464            err = -EINVAL;
465            goto err_close;
466        }
467        ns->pages_written = vmalloc(ns->geom.pgnum);
468        if (!ns->pages_written) {
469            NS_ERR("alloc_device: unable to allocate pages written array\n");
470            err = -ENOMEM;
471            goto err_close;
472        }
473        ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
474        if (!ns->file_buf) {
475            NS_ERR("alloc_device: unable to allocate file buf\n");
476            err = -ENOMEM;
477            goto err_free;
478        }
479        ns->cfile = cfile;
480        memset(ns->pages_written, 0, ns->geom.pgnum);
481        return 0;
482    }
483
484    ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
485    if (!ns->pages) {
486        NS_ERR("alloc_device: unable to allocate page array\n");
487        return -ENOMEM;
488    }
489    for (i = 0; i < ns->geom.pgnum; i++) {
490        ns->pages[i].byte = NULL;
491    }
492    ns->nand_pages_slab = kmem_cache_create("nandsim",
493                        ns->geom.pgszoob, 0, 0, NULL);
494    if (!ns->nand_pages_slab) {
495        NS_ERR("cache_create: unable to create kmem_cache\n");
496        return -ENOMEM;
497    }
498
499    return 0;
500
501err_free:
502    vfree(ns->pages_written);
503err_close:
504    filp_close(cfile, NULL);
505    return err;
506}
507
508/*
509 * Free any allocated pages, and free the array of page pointers.
510 */
511static void free_device(struct nandsim *ns)
512{
513    int i;
514
515    if (ns->cfile) {
516        kfree(ns->file_buf);
517        vfree(ns->pages_written);
518        filp_close(ns->cfile, NULL);
519        return;
520    }
521
522    if (ns->pages) {
523        for (i = 0; i < ns->geom.pgnum; i++) {
524            if (ns->pages[i].byte)
525                kmem_cache_free(ns->nand_pages_slab,
526                        ns->pages[i].byte);
527        }
528        kmem_cache_destroy(ns->nand_pages_slab);
529        vfree(ns->pages);
530    }
531}
532
533static char *get_partition_name(int i)
534{
535    char buf[64];
536    sprintf(buf, "NAND simulator partition %d", i);
537    return kstrdup(buf, GFP_KERNEL);
538}
539
540static uint64_t divide(uint64_t n, uint32_t d)
541{
542    do_div(n, d);
543    return n;
544}
545
546/*
547 * Initialize the nandsim structure.
548 *
549 * RETURNS: 0 if success, -ERRNO if failure.
550 */
551static int init_nandsim(struct mtd_info *mtd)
552{
553    struct nand_chip *chip = (struct nand_chip *)mtd->priv;
554    struct nandsim *ns = (struct nandsim *)(chip->priv);
555    int i, ret = 0;
556    uint64_t remains;
557    uint64_t next_offset;
558
559    if (NS_IS_INITIALIZED(ns)) {
560        NS_ERR("init_nandsim: nandsim is already initialized\n");
561        return -EIO;
562    }
563
564    /* Force mtd to not do delays */
565    chip->chip_delay = 0;
566
567    /* Initialize the NAND flash parameters */
568    ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
569    ns->geom.totsz = mtd->size;
570    ns->geom.pgsz = mtd->writesize;
571    ns->geom.oobsz = mtd->oobsize;
572    ns->geom.secsz = mtd->erasesize;
573    ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
574    ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
575    ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
576    ns->geom.secshift = ffs(ns->geom.secsz) - 1;
577    ns->geom.pgshift = chip->page_shift;
578    ns->geom.oobshift = ffs(ns->geom.oobsz) - 1;
579    ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
580    ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
581    ns->options = 0;
582
583    if (ns->geom.pgsz == 256) {
584        ns->options |= OPT_PAGE256;
585    }
586    else if (ns->geom.pgsz == 512) {
587        ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
588        if (ns->busw == 8)
589            ns->options |= OPT_PAGE512_8BIT;
590    } else if (ns->geom.pgsz == 2048) {
591        ns->options |= OPT_PAGE2048;
592    } else if (ns->geom.pgsz == 4096) {
593        ns->options |= OPT_PAGE4096;
594    } else {
595        NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
596        return -EIO;
597    }
598
599    if (ns->options & OPT_SMALLPAGE) {
600        if (ns->geom.totsz <= (32 << 20)) {
601            ns->geom.pgaddrbytes = 3;
602            ns->geom.secaddrbytes = 2;
603        } else {
604            ns->geom.pgaddrbytes = 4;
605            ns->geom.secaddrbytes = 3;
606        }
607    } else {
608        if (ns->geom.totsz <= (128 << 20)) {
609            ns->geom.pgaddrbytes = 4;
610            ns->geom.secaddrbytes = 2;
611        } else {
612            ns->geom.pgaddrbytes = 5;
613            ns->geom.secaddrbytes = 3;
614        }
615    }
616
617    /* Fill the partition_info structure */
618    if (parts_num > ARRAY_SIZE(ns->partitions)) {
619        NS_ERR("too many partitions.\n");
620        ret = -EINVAL;
621        goto error;
622    }
623    remains = ns->geom.totsz;
624    next_offset = 0;
625    for (i = 0; i < parts_num; ++i) {
626        uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
627
628        if (!part_sz || part_sz > remains) {
629            NS_ERR("bad partition size.\n");
630            ret = -EINVAL;
631            goto error;
632        }
633        ns->partitions[i].name = get_partition_name(i);
634        ns->partitions[i].offset = next_offset;
635        ns->partitions[i].size = part_sz;
636        next_offset += ns->partitions[i].size;
637        remains -= ns->partitions[i].size;
638    }
639    ns->nbparts = parts_num;
640    if (remains) {
641        if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
642            NS_ERR("too many partitions.\n");
643            ret = -EINVAL;
644            goto error;
645        }
646        ns->partitions[i].name = get_partition_name(i);
647        ns->partitions[i].offset = next_offset;
648        ns->partitions[i].size = remains;
649        ns->nbparts += 1;
650    }
651
652    /* Detect how many ID bytes the NAND chip outputs */
653        for (i = 0; nand_flash_ids[i].name != NULL; i++) {
654                if (second_id_byte != nand_flash_ids[i].id)
655                        continue;
656        if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
657            ns->options |= OPT_AUTOINCR;
658    }
659
660    if (ns->busw == 16)
661        NS_WARN("16-bit flashes support wasn't tested\n");
662
663    printk("flash size: %llu MiB\n",
664            (unsigned long long)ns->geom.totsz >> 20);
665    printk("page size: %u bytes\n", ns->geom.pgsz);
666    printk("OOB area size: %u bytes\n", ns->geom.oobsz);
667    printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
668    printk("pages number: %u\n", ns->geom.pgnum);
669    printk("pages per sector: %u\n", ns->geom.pgsec);
670    printk("bus width: %u\n", ns->busw);
671    printk("bits in sector size: %u\n", ns->geom.secshift);
672    printk("bits in page size: %u\n", ns->geom.pgshift);
673    printk("bits in OOB size: %u\n", ns->geom.oobshift);
674    printk("flash size with OOB: %llu KiB\n",
675            (unsigned long long)ns->geom.totszoob >> 10);
676    printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
677    printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
678    printk("options: %#x\n", ns->options);
679
680    if ((ret = alloc_device(ns)) != 0)
681        goto error;
682
683    /* Allocate / initialize the internal buffer */
684    ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
685    if (!ns->buf.byte) {
686        NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
687            ns->geom.pgszoob);
688        ret = -ENOMEM;
689        goto error;
690    }
691    memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
692
693    return 0;
694
695error:
696    free_device(ns);
697
698    return ret;
699}
700
701/*
702 * Free the nandsim structure.
703 */
704static void free_nandsim(struct nandsim *ns)
705{
706    kfree(ns->buf.byte);
707    free_device(ns);
708
709    return;
710}
711
712static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
713{
714    char *w;
715    int zero_ok;
716    unsigned int erase_block_no;
717    loff_t offset;
718
719    if (!badblocks)
720        return 0;
721    w = badblocks;
722    do {
723        zero_ok = (*w == '0' ? 1 : 0);
724        erase_block_no = simple_strtoul(w, &w, 0);
725        if (!zero_ok && !erase_block_no) {
726            NS_ERR("invalid badblocks.\n");
727            return -EINVAL;
728        }
729        offset = erase_block_no * ns->geom.secsz;
730        if (mtd->block_markbad(mtd, offset)) {
731            NS_ERR("invalid badblocks.\n");
732            return -EINVAL;
733        }
734        if (*w == ',')
735            w += 1;
736    } while (*w);
737    return 0;
738}
739
740static int parse_weakblocks(void)
741{
742    char *w;
743    int zero_ok;
744    unsigned int erase_block_no;
745    unsigned int max_erases;
746    struct weak_block *wb;
747
748    if (!weakblocks)
749        return 0;
750    w = weakblocks;
751    do {
752        zero_ok = (*w == '0' ? 1 : 0);
753        erase_block_no = simple_strtoul(w, &w, 0);
754        if (!zero_ok && !erase_block_no) {
755            NS_ERR("invalid weakblocks.\n");
756            return -EINVAL;
757        }
758        max_erases = 3;
759        if (*w == ':') {
760            w += 1;
761            max_erases = simple_strtoul(w, &w, 0);
762        }
763        if (*w == ',')
764            w += 1;
765        wb = kzalloc(sizeof(*wb), GFP_KERNEL);
766        if (!wb) {
767            NS_ERR("unable to allocate memory.\n");
768            return -ENOMEM;
769        }
770        wb->erase_block_no = erase_block_no;
771        wb->max_erases = max_erases;
772        list_add(&wb->list, &weak_blocks);
773    } while (*w);
774    return 0;
775}
776
777static int erase_error(unsigned int erase_block_no)
778{
779    struct weak_block *wb;
780
781    list_for_each_entry(wb, &weak_blocks, list)
782        if (wb->erase_block_no == erase_block_no) {
783            if (wb->erases_done >= wb->max_erases)
784                return 1;
785            wb->erases_done += 1;
786            return 0;
787        }
788    return 0;
789}
790
791static int parse_weakpages(void)
792{
793    char *w;
794    int zero_ok;
795    unsigned int page_no;
796    unsigned int max_writes;
797    struct weak_page *wp;
798
799    if (!weakpages)
800        return 0;
801    w = weakpages;
802    do {
803        zero_ok = (*w == '0' ? 1 : 0);
804        page_no = simple_strtoul(w, &w, 0);
805        if (!zero_ok && !page_no) {
806            NS_ERR("invalid weakpagess.\n");
807            return -EINVAL;
808        }
809        max_writes = 3;
810        if (*w == ':') {
811            w += 1;
812            max_writes = simple_strtoul(w, &w, 0);
813        }
814        if (*w == ',')
815            w += 1;
816        wp = kzalloc(sizeof(*wp), GFP_KERNEL);
817        if (!wp) {
818            NS_ERR("unable to allocate memory.\n");
819            return -ENOMEM;
820        }
821        wp->page_no = page_no;
822        wp->max_writes = max_writes;
823        list_add(&wp->list, &weak_pages);
824    } while (*w);
825    return 0;
826}
827
828static int write_error(unsigned int page_no)
829{
830    struct weak_page *wp;
831
832    list_for_each_entry(wp, &weak_pages, list)
833        if (wp->page_no == page_no) {
834            if (wp->writes_done >= wp->max_writes)
835                return 1;
836            wp->writes_done += 1;
837            return 0;
838        }
839    return 0;
840}
841
842static int parse_gravepages(void)
843{
844    char *g;
845    int zero_ok;
846    unsigned int page_no;
847    unsigned int max_reads;
848    struct grave_page *gp;
849
850    if (!gravepages)
851        return 0;
852    g = gravepages;
853    do {
854        zero_ok = (*g == '0' ? 1 : 0);
855        page_no = simple_strtoul(g, &g, 0);
856        if (!zero_ok && !page_no) {
857            NS_ERR("invalid gravepagess.\n");
858            return -EINVAL;
859        }
860        max_reads = 3;
861        if (*g == ':') {
862            g += 1;
863            max_reads = simple_strtoul(g, &g, 0);
864        }
865        if (*g == ',')
866            g += 1;
867        gp = kzalloc(sizeof(*gp), GFP_KERNEL);
868        if (!gp) {
869            NS_ERR("unable to allocate memory.\n");
870            return -ENOMEM;
871        }
872        gp->page_no = page_no;
873        gp->max_reads = max_reads;
874        list_add(&gp->list, &grave_pages);
875    } while (*g);
876    return 0;
877}
878
879static int read_error(unsigned int page_no)
880{
881    struct grave_page *gp;
882
883    list_for_each_entry(gp, &grave_pages, list)
884        if (gp->page_no == page_no) {
885            if (gp->reads_done >= gp->max_reads)
886                return 1;
887            gp->reads_done += 1;
888            return 0;
889        }
890    return 0;
891}
892
893static void free_lists(void)
894{
895    struct list_head *pos, *n;
896    list_for_each_safe(pos, n, &weak_blocks) {
897        list_del(pos);
898        kfree(list_entry(pos, struct weak_block, list));
899    }
900    list_for_each_safe(pos, n, &weak_pages) {
901        list_del(pos);
902        kfree(list_entry(pos, struct weak_page, list));
903    }
904    list_for_each_safe(pos, n, &grave_pages) {
905        list_del(pos);
906        kfree(list_entry(pos, struct grave_page, list));
907    }
908    kfree(erase_block_wear);
909}
910
911static int setup_wear_reporting(struct mtd_info *mtd)
912{
913    size_t mem;
914
915    if (!rptwear)
916        return 0;
917    wear_eb_count = divide(mtd->size, mtd->erasesize);
918    mem = wear_eb_count * sizeof(unsigned long);
919    if (mem / sizeof(unsigned long) != wear_eb_count) {
920        NS_ERR("Too many erase blocks for wear reporting\n");
921        return -ENOMEM;
922    }
923    erase_block_wear = kzalloc(mem, GFP_KERNEL);
924    if (!erase_block_wear) {
925        NS_ERR("Too many erase blocks for wear reporting\n");
926        return -ENOMEM;
927    }
928    return 0;
929}
930
931static void update_wear(unsigned int erase_block_no)
932{
933    unsigned long wmin = -1, wmax = 0, avg;
934    unsigned long deciles[10], decile_max[10], tot = 0;
935    unsigned int i;
936
937    if (!erase_block_wear)
938        return;
939    total_wear += 1;
940    if (total_wear == 0)
941        NS_ERR("Erase counter total overflow\n");
942    erase_block_wear[erase_block_no] += 1;
943    if (erase_block_wear[erase_block_no] == 0)
944        NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
945    rptwear_cnt += 1;
946    if (rptwear_cnt < rptwear)
947        return;
948    rptwear_cnt = 0;
949    /* Calc wear stats */
950    for (i = 0; i < wear_eb_count; ++i) {
951        unsigned long wear = erase_block_wear[i];
952        if (wear < wmin)
953            wmin = wear;
954        if (wear > wmax)
955            wmax = wear;
956        tot += wear;
957    }
958    for (i = 0; i < 9; ++i) {
959        deciles[i] = 0;
960        decile_max[i] = (wmax * (i + 1) + 5) / 10;
961    }
962    deciles[9] = 0;
963    decile_max[9] = wmax;
964    for (i = 0; i < wear_eb_count; ++i) {
965        int d;
966        unsigned long wear = erase_block_wear[i];
967        for (d = 0; d < 10; ++d)
968            if (wear <= decile_max[d]) {
969                deciles[d] += 1;
970                break;
971            }
972    }
973    avg = tot / wear_eb_count;
974    /* Output wear report */
975    NS_INFO("*** Wear Report ***\n");
976    NS_INFO("Total numbers of erases: %lu\n", tot);
977    NS_INFO("Number of erase blocks: %u\n", wear_eb_count);
978    NS_INFO("Average number of erases: %lu\n", avg);
979    NS_INFO("Maximum number of erases: %lu\n", wmax);
980    NS_INFO("Minimum number of erases: %lu\n", wmin);
981    for (i = 0; i < 10; ++i) {
982        unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
983        if (from > decile_max[i])
984            continue;
985        NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
986            from,
987            decile_max[i],
988            deciles[i]);
989    }
990    NS_INFO("*** End of Wear Report ***\n");
991}
992
993/*
994 * Returns the string representation of 'state' state.
995 */
996static char *get_state_name(uint32_t state)
997{
998    switch (NS_STATE(state)) {
999        case STATE_CMD_READ0:
1000            return "STATE_CMD_READ0";
1001        case STATE_CMD_READ1:
1002            return "STATE_CMD_READ1";
1003        case STATE_CMD_PAGEPROG:
1004            return "STATE_CMD_PAGEPROG";
1005        case STATE_CMD_READOOB:
1006            return "STATE_CMD_READOOB";
1007        case STATE_CMD_READSTART:
1008            return "STATE_CMD_READSTART";
1009        case STATE_CMD_ERASE1:
1010            return "STATE_CMD_ERASE1";
1011        case STATE_CMD_STATUS:
1012            return "STATE_CMD_STATUS";
1013        case STATE_CMD_STATUS_M:
1014            return "STATE_CMD_STATUS_M";
1015        case STATE_CMD_SEQIN:
1016            return "STATE_CMD_SEQIN";
1017        case STATE_CMD_READID:
1018            return "STATE_CMD_READID";
1019        case STATE_CMD_ERASE2:
1020            return "STATE_CMD_ERASE2";
1021        case STATE_CMD_RESET:
1022            return "STATE_CMD_RESET";
1023        case STATE_CMD_RNDOUT:
1024            return "STATE_CMD_RNDOUT";
1025        case STATE_CMD_RNDOUTSTART:
1026            return "STATE_CMD_RNDOUTSTART";
1027        case STATE_ADDR_PAGE:
1028            return "STATE_ADDR_PAGE";
1029        case STATE_ADDR_SEC:
1030            return "STATE_ADDR_SEC";
1031        case STATE_ADDR_ZERO:
1032            return "STATE_ADDR_ZERO";
1033        case STATE_ADDR_COLUMN:
1034            return "STATE_ADDR_COLUMN";
1035        case STATE_DATAIN:
1036            return "STATE_DATAIN";
1037        case STATE_DATAOUT:
1038            return "STATE_DATAOUT";
1039        case STATE_DATAOUT_ID:
1040            return "STATE_DATAOUT_ID";
1041        case STATE_DATAOUT_STATUS:
1042            return "STATE_DATAOUT_STATUS";
1043        case STATE_DATAOUT_STATUS_M:
1044            return "STATE_DATAOUT_STATUS_M";
1045        case STATE_READY:
1046            return "STATE_READY";
1047        case STATE_UNKNOWN:
1048            return "STATE_UNKNOWN";
1049    }
1050
1051    NS_ERR("get_state_name: unknown state, BUG\n");
1052    return NULL;
1053}
1054
1055/*
1056 * Check if command is valid.
1057 *
1058 * RETURNS: 1 if wrong command, 0 if right.
1059 */
1060static int check_command(int cmd)
1061{
1062    switch (cmd) {
1063
1064    case NAND_CMD_READ0:
1065    case NAND_CMD_READ1:
1066    case NAND_CMD_READSTART:
1067    case NAND_CMD_PAGEPROG:
1068    case NAND_CMD_READOOB:
1069    case NAND_CMD_ERASE1:
1070    case NAND_CMD_STATUS:
1071    case NAND_CMD_SEQIN:
1072    case NAND_CMD_READID:
1073    case NAND_CMD_ERASE2:
1074    case NAND_CMD_RESET:
1075    case NAND_CMD_RNDOUT:
1076    case NAND_CMD_RNDOUTSTART:
1077        return 0;
1078
1079    case NAND_CMD_STATUS_MULTI:
1080    default:
1081        return 1;
1082    }
1083}
1084
1085/*
1086 * Returns state after command is accepted by command number.
1087 */
1088static uint32_t get_state_by_command(unsigned command)
1089{
1090    switch (command) {
1091        case NAND_CMD_READ0:
1092            return STATE_CMD_READ0;
1093        case NAND_CMD_READ1:
1094            return STATE_CMD_READ1;
1095        case NAND_CMD_PAGEPROG:
1096            return STATE_CMD_PAGEPROG;
1097        case NAND_CMD_READSTART:
1098            return STATE_CMD_READSTART;
1099        case NAND_CMD_READOOB:
1100            return STATE_CMD_READOOB;
1101        case NAND_CMD_ERASE1:
1102            return STATE_CMD_ERASE1;
1103        case NAND_CMD_STATUS:
1104            return STATE_CMD_STATUS;
1105        case NAND_CMD_STATUS_MULTI:
1106            return STATE_CMD_STATUS_M;
1107        case NAND_CMD_SEQIN:
1108            return STATE_CMD_SEQIN;
1109        case NAND_CMD_READID:
1110            return STATE_CMD_READID;
1111        case NAND_CMD_ERASE2:
1112            return STATE_CMD_ERASE2;
1113        case NAND_CMD_RESET:
1114            return STATE_CMD_RESET;
1115        case NAND_CMD_RNDOUT:
1116            return STATE_CMD_RNDOUT;
1117        case NAND_CMD_RNDOUTSTART:
1118            return STATE_CMD_RNDOUTSTART;
1119    }
1120
1121    NS_ERR("get_state_by_command: unknown command, BUG\n");
1122    return 0;
1123}
1124
1125/*
1126 * Move an address byte to the correspondent internal register.
1127 */
1128static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
1129{
1130    uint byte = (uint)bt;
1131
1132    if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
1133        ns->regs.column |= (byte << 8 * ns->regs.count);
1134    else {
1135        ns->regs.row |= (byte << 8 * (ns->regs.count -
1136                        ns->geom.pgaddrbytes +
1137                        ns->geom.secaddrbytes));
1138    }
1139
1140    return;
1141}
1142
1143/*
1144 * Switch to STATE_READY state.
1145 */
1146static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
1147{
1148    NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
1149
1150    ns->state = STATE_READY;
1151    ns->nxstate = STATE_UNKNOWN;
1152    ns->op = NULL;
1153    ns->npstates = 0;
1154    ns->stateidx = 0;
1155    ns->regs.num = 0;
1156    ns->regs.count = 0;
1157    ns->regs.off = 0;
1158    ns->regs.row = 0;
1159    ns->regs.column = 0;
1160    ns->regs.status = status;
1161}
1162
1163/*
1164 * If the operation isn't known yet, try to find it in the global array
1165 * of supported operations.
1166 *
1167 * Operation can be unknown because of the following.
1168 * 1. New command was accepted and this is the firs call to find the
1169 * correspondent states chain. In this case ns->npstates = 0;
1170 * 2. There is several operations which begin with the same command(s)
1171 * (for example program from the second half and read from the
1172 * second half operations both begin with the READ1 command). In this
1173 * case the ns->pstates[] array contains previous states.
1174 *
1175 * Thus, the function tries to find operation containing the following
1176 * states (if the 'flag' parameter is 0):
1177 * ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
1178 *
1179 * If (one and only one) matching operation is found, it is accepted (
1180 * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
1181 * zeroed).
1182 *
1183 * If there are several maches, the current state is pushed to the
1184 * ns->pstates.
1185 *
1186 * The operation can be unknown only while commands are input to the chip.
1187 * As soon as address command is accepted, the operation must be known.
1188 * In such situation the function is called with 'flag' != 0, and the
1189 * operation is searched using the following pattern:
1190 * ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
1191 *
1192 * It is supposed that this pattern must either match one operation on
1193 * none. There can't be ambiguity in that case.
1194 *
1195 * If no matches found, the functions does the following:
1196 * 1. if there are saved states present, try to ignore them and search
1197 * again only using the last command. If nothing was found, switch
1198 * to the STATE_READY state.
1199 * 2. if there are no saved states, switch to the STATE_READY state.
1200 *
1201 * RETURNS: -2 - no matched operations found.
1202 * -1 - several matches.
1203 * 0 - operation is found.
1204 */
1205static int find_operation(struct nandsim *ns, uint32_t flag)
1206{
1207    int opsfound = 0;
1208    int i, j, idx = 0;
1209
1210    for (i = 0; i < NS_OPER_NUM; i++) {
1211
1212        int found = 1;
1213
1214        if (!(ns->options & ops[i].reqopts))
1215            /* Ignore operations we can't perform */
1216            continue;
1217
1218        if (flag) {
1219            if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
1220                continue;
1221        } else {
1222            if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
1223                continue;
1224        }
1225
1226        for (j = 0; j < ns->npstates; j++)
1227            if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
1228                && (ns->options & ops[idx].reqopts)) {
1229                found = 0;
1230                break;
1231            }
1232
1233        if (found) {
1234            idx = i;
1235            opsfound += 1;
1236        }
1237    }
1238
1239    if (opsfound == 1) {
1240        /* Exact match */
1241        ns->op = &ops[idx].states[0];
1242        if (flag) {
1243            /*
1244             * In this case the find_operation function was
1245             * called when address has just began input. But it isn't
1246             * yet fully input and the current state must
1247             * not be one of STATE_ADDR_*, but the STATE_ADDR_*
1248             * state must be the next state (ns->nxstate).
1249             */
1250            ns->stateidx = ns->npstates - 1;
1251        } else {
1252            ns->stateidx = ns->npstates;
1253        }
1254        ns->npstates = 0;
1255        ns->state = ns->op[ns->stateidx];
1256        ns->nxstate = ns->op[ns->stateidx + 1];
1257        NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
1258                idx, get_state_name(ns->state), get_state_name(ns->nxstate));
1259        return 0;
1260    }
1261
1262    if (opsfound == 0) {
1263        /* Nothing was found. Try to ignore previous commands (if any) and search again */
1264        if (ns->npstates != 0) {
1265            NS_DBG("find_operation: no operation found, try again with state %s\n",
1266                    get_state_name(ns->state));
1267            ns->npstates = 0;
1268            return find_operation(ns, 0);
1269
1270        }
1271        NS_DBG("find_operation: no operations found\n");
1272        switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1273        return -2;
1274    }
1275
1276    if (flag) {
1277        /* This shouldn't happen */
1278        NS_DBG("find_operation: BUG, operation must be known if address is input\n");
1279        return -2;
1280    }
1281
1282    NS_DBG("find_operation: there is still ambiguity\n");
1283
1284    ns->pstates[ns->npstates++] = ns->state;
1285
1286    return -1;
1287}
1288
1289static void put_pages(struct nandsim *ns)
1290{
1291    int i;
1292
1293    for (i = 0; i < ns->held_cnt; i++)
1294        page_cache_release(ns->held_pages[i]);
1295}
1296
1297/* Get page cache pages in advance to provide NOFS memory allocation */
1298static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
1299{
1300    pgoff_t index, start_index, end_index;
1301    struct page *page;
1302    struct address_space *mapping = file->f_mapping;
1303
1304    start_index = pos >> PAGE_CACHE_SHIFT;
1305    end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
1306    if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
1307        return -EINVAL;
1308    ns->held_cnt = 0;
1309    for (index = start_index; index <= end_index; index++) {
1310        page = find_get_page(mapping, index);
1311        if (page == NULL) {
1312            page = find_or_create_page(mapping, index, GFP_NOFS);
1313            if (page == NULL) {
1314                write_inode_now(mapping->host, 1);
1315                page = find_or_create_page(mapping, index, GFP_NOFS);
1316            }
1317            if (page == NULL) {
1318                put_pages(ns);
1319                return -ENOMEM;
1320            }
1321            unlock_page(page);
1322        }
1323        ns->held_pages[ns->held_cnt++] = page;
1324    }
1325    return 0;
1326}
1327
1328static int set_memalloc(void)
1329{
1330    if (current->flags & PF_MEMALLOC)
1331        return 0;
1332    current->flags |= PF_MEMALLOC;
1333    return 1;
1334}
1335
1336static void clear_memalloc(int memalloc)
1337{
1338    if (memalloc)
1339        current->flags &= ~PF_MEMALLOC;
1340}
1341
1342static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
1343{
1344    mm_segment_t old_fs;
1345    ssize_t tx;
1346    int err, memalloc;
1347
1348    err = get_pages(ns, file, count, *pos);
1349    if (err)
1350        return err;
1351    old_fs = get_fs();
1352    set_fs(get_ds());
1353    memalloc = set_memalloc();
1354    tx = vfs_read(file, (char __user *)buf, count, pos);
1355    clear_memalloc(memalloc);
1356    set_fs(old_fs);
1357    put_pages(ns);
1358    return tx;
1359}
1360
1361static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
1362{
1363    mm_segment_t old_fs;
1364    ssize_t tx;
1365    int err, memalloc;
1366
1367    err = get_pages(ns, file, count, *pos);
1368    if (err)
1369        return err;
1370    old_fs = get_fs();
1371    set_fs(get_ds());
1372    memalloc = set_memalloc();
1373    tx = vfs_write(file, (char __user *)buf, count, pos);
1374    clear_memalloc(memalloc);
1375    set_fs(old_fs);
1376    put_pages(ns);
1377    return tx;
1378}
1379
1380/*
1381 * Returns a pointer to the current page.
1382 */
1383static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
1384{
1385    return &(ns->pages[ns->regs.row]);
1386}
1387
1388/*
1389 * Retuns a pointer to the current byte, within the current page.
1390 */
1391static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
1392{
1393    return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
1394}
1395
1396int do_read_error(struct nandsim *ns, int num)
1397{
1398    unsigned int page_no = ns->regs.row;
1399
1400    if (read_error(page_no)) {
1401        int i;
1402        memset(ns->buf.byte, 0xFF, num);
1403        for (i = 0; i < num; ++i)
1404            ns->buf.byte[i] = random32();
1405        NS_WARN("simulating read error in page %u\n", page_no);
1406        return 1;
1407    }
1408    return 0;
1409}
1410
1411void do_bit_flips(struct nandsim *ns, int num)
1412{
1413    if (bitflips && random32() < (1 << 22)) {
1414        int flips = 1;
1415        if (bitflips > 1)
1416            flips = (random32() % (int) bitflips) + 1;
1417        while (flips--) {
1418            int pos = random32() % (num * 8);
1419            ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
1420            NS_WARN("read_page: flipping bit %d in page %d "
1421                "reading from %d ecc: corrected=%u failed=%u\n",
1422                pos, ns->regs.row, ns->regs.column + ns->regs.off,
1423                nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
1424        }
1425    }
1426}
1427
1428/*
1429 * Fill the NAND buffer with data read from the specified page.
1430 */
1431static void read_page(struct nandsim *ns, int num)
1432{
1433    union ns_mem *mypage;
1434
1435    if (ns->cfile) {
1436        if (!ns->pages_written[ns->regs.row]) {
1437            NS_DBG("read_page: page %d not written\n", ns->regs.row);
1438            memset(ns->buf.byte, 0xFF, num);
1439        } else {
1440            loff_t pos;
1441            ssize_t tx;
1442
1443            NS_DBG("read_page: page %d written, reading from %d\n",
1444                ns->regs.row, ns->regs.column + ns->regs.off);
1445            if (do_read_error(ns, num))
1446                return;
1447            pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
1448            tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos);
1449            if (tx != num) {
1450                NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
1451                return;
1452            }
1453            do_bit_flips(ns, num);
1454        }
1455        return;
1456    }
1457
1458    mypage = NS_GET_PAGE(ns);
1459    if (mypage->byte == NULL) {
1460        NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
1461        memset(ns->buf.byte, 0xFF, num);
1462    } else {
1463        NS_DBG("read_page: page %d allocated, reading from %d\n",
1464            ns->regs.row, ns->regs.column + ns->regs.off);
1465        if (do_read_error(ns, num))
1466            return;
1467        memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
1468        do_bit_flips(ns, num);
1469    }
1470}
1471
1472/*
1473 * Erase all pages in the specified sector.
1474 */
1475static void erase_sector(struct nandsim *ns)
1476{
1477    union ns_mem *mypage;
1478    int i;
1479
1480    if (ns->cfile) {
1481        for (i = 0; i < ns->geom.pgsec; i++)
1482            if (ns->pages_written[ns->regs.row + i]) {
1483                NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
1484                ns->pages_written[ns->regs.row + i] = 0;
1485            }
1486        return;
1487    }
1488
1489    mypage = NS_GET_PAGE(ns);
1490    for (i = 0; i < ns->geom.pgsec; i++) {
1491        if (mypage->byte != NULL) {
1492            NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
1493            kmem_cache_free(ns->nand_pages_slab, mypage->byte);
1494            mypage->byte = NULL;
1495        }
1496        mypage++;
1497    }
1498}
1499
1500/*
1501 * Program the specified page with the contents from the NAND buffer.
1502 */
1503static int prog_page(struct nandsim *ns, int num)
1504{
1505    int i;
1506    union ns_mem *mypage;
1507    u_char *pg_off;
1508
1509    if (ns->cfile) {
1510        loff_t off, pos;
1511        ssize_t tx;
1512        int all;
1513
1514        NS_DBG("prog_page: writing page %d\n", ns->regs.row);
1515        pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
1516        off = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
1517        if (!ns->pages_written[ns->regs.row]) {
1518            all = 1;
1519            memset(ns->file_buf, 0xff, ns->geom.pgszoob);
1520        } else {
1521            all = 0;
1522            pos = off;
1523            tx = read_file(ns, ns->cfile, pg_off, num, &pos);
1524            if (tx != num) {
1525                NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
1526                return -1;
1527            }
1528        }
1529        for (i = 0; i < num; i++)
1530            pg_off[i] &= ns->buf.byte[i];
1531        if (all) {
1532            pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
1533            tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos);
1534            if (tx != ns->geom.pgszoob) {
1535                NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
1536                return -1;
1537            }
1538            ns->pages_written[ns->regs.row] = 1;
1539        } else {
1540            pos = off;
1541            tx = write_file(ns, ns->cfile, pg_off, num, &pos);
1542            if (tx != num) {
1543                NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
1544                return -1;
1545            }
1546        }
1547        return 0;
1548    }
1549
1550    mypage = NS_GET_PAGE(ns);
1551    if (mypage->byte == NULL) {
1552        NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
1553        /*
1554         * We allocate memory with GFP_NOFS because a flash FS may
1555         * utilize this. If it is holding an FS lock, then gets here,
1556         * then kernel memory alloc runs writeback which goes to the FS
1557         * again and deadlocks. This was seen in practice.
1558         */
1559        mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
1560        if (mypage->byte == NULL) {
1561            NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
1562            return -1;
1563        }
1564        memset(mypage->byte, 0xFF, ns->geom.pgszoob);
1565    }
1566
1567    pg_off = NS_PAGE_BYTE_OFF(ns);
1568    for (i = 0; i < num; i++)
1569        pg_off[i] &= ns->buf.byte[i];
1570
1571    return 0;
1572}
1573
1574/*
1575 * If state has any action bit, perform this action.
1576 *
1577 * RETURNS: 0 if success, -1 if error.
1578 */
1579static int do_state_action(struct nandsim *ns, uint32_t action)
1580{
1581    int num;
1582    int busdiv = ns->busw == 8 ? 1 : 2;
1583    unsigned int erase_block_no, page_no;
1584
1585    action &= ACTION_MASK;
1586
1587    /* Check that page address input is correct */
1588    if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
1589        NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
1590        return -1;
1591    }
1592
1593    switch (action) {
1594
1595    case ACTION_CPY:
1596        /*
1597         * Copy page data to the internal buffer.
1598         */
1599
1600        /* Column shouldn't be very large */
1601        if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
1602            NS_ERR("do_state_action: column number is too large\n");
1603            break;
1604        }
1605        num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1606        read_page(ns, num);
1607
1608        NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
1609            num, NS_RAW_OFFSET(ns) + ns->regs.off);
1610
1611        if (ns->regs.off == 0)
1612            NS_LOG("read page %d\n", ns->regs.row);
1613        else if (ns->regs.off < ns->geom.pgsz)
1614            NS_LOG("read page %d (second half)\n", ns->regs.row);
1615        else
1616            NS_LOG("read OOB of page %d\n", ns->regs.row);
1617
1618        NS_UDELAY(access_delay);
1619        NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
1620
1621        break;
1622
1623    case ACTION_SECERASE:
1624        /*
1625         * Erase sector.
1626         */
1627
1628        if (ns->lines.wp) {
1629            NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
1630            return -1;
1631        }
1632
1633        if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
1634            || (ns->regs.row & ~(ns->geom.secsz - 1))) {
1635            NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
1636            return -1;
1637        }
1638
1639        ns->regs.row = (ns->regs.row <<
1640                8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
1641        ns->regs.column = 0;
1642
1643        erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
1644
1645        NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
1646                ns->regs.row, NS_RAW_OFFSET(ns));
1647        NS_LOG("erase sector %u\n", erase_block_no);
1648
1649        erase_sector(ns);
1650
1651        NS_MDELAY(erase_delay);
1652
1653        if (erase_block_wear)
1654            update_wear(erase_block_no);
1655
1656        if (erase_error(erase_block_no)) {
1657            NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
1658            return -1;
1659        }
1660
1661        break;
1662
1663    case ACTION_PRGPAGE:
1664        /*
1665         * Programm page - move internal buffer data to the page.
1666         */
1667
1668        if (ns->lines.wp) {
1669            NS_WARN("do_state_action: device is write-protected, programm\n");
1670            return -1;
1671        }
1672
1673        num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1674        if (num != ns->regs.count) {
1675            NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
1676                    ns->regs.count, num);
1677            return -1;
1678        }
1679
1680        if (prog_page(ns, num) == -1)
1681            return -1;
1682
1683        page_no = ns->regs.row;
1684
1685        NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
1686            num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
1687        NS_LOG("programm page %d\n", ns->regs.row);
1688
1689        NS_UDELAY(programm_delay);
1690        NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
1691
1692        if (write_error(page_no)) {
1693            NS_WARN("simulating write failure in page %u\n", page_no);
1694            return -1;
1695        }
1696
1697        break;
1698
1699    case ACTION_ZEROOFF:
1700        NS_DBG("do_state_action: set internal offset to 0\n");
1701        ns->regs.off = 0;
1702        break;
1703
1704    case ACTION_HALFOFF:
1705        if (!(ns->options & OPT_PAGE512_8BIT)) {
1706            NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
1707                "byte page size 8x chips\n");
1708            return -1;
1709        }
1710        NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
1711        ns->regs.off = ns->geom.pgsz/2;
1712        break;
1713
1714    case ACTION_OOBOFF:
1715        NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
1716        ns->regs.off = ns->geom.pgsz;
1717        break;
1718
1719    default:
1720        NS_DBG("do_state_action: BUG! unknown action\n");
1721    }
1722
1723    return 0;
1724}
1725
1726/*
1727 * Switch simulator's state.
1728 */
1729static void switch_state(struct nandsim *ns)
1730{
1731    if (ns->op) {
1732        /*
1733         * The current operation have already been identified.
1734         * Just follow the states chain.
1735         */
1736
1737        ns->stateidx += 1;
1738        ns->state = ns->nxstate;
1739        ns->nxstate = ns->op[ns->stateidx + 1];
1740
1741        NS_DBG("switch_state: operation is known, switch to the next state, "
1742            "state: %s, nxstate: %s\n",
1743            get_state_name(ns->state), get_state_name(ns->nxstate));
1744
1745        /* See, whether we need to do some action */
1746        if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1747            switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1748            return;
1749        }
1750
1751    } else {
1752        /*
1753         * We don't yet know which operation we perform.
1754         * Try to identify it.
1755         */
1756
1757        /*
1758         * The only event causing the switch_state function to
1759         * be called with yet unknown operation is new command.
1760         */
1761        ns->state = get_state_by_command(ns->regs.command);
1762
1763        NS_DBG("switch_state: operation is unknown, try to find it\n");
1764
1765        if (find_operation(ns, 0) != 0)
1766            return;
1767
1768        if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1769            switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1770            return;
1771        }
1772    }
1773
1774    /* For 16x devices column means the page offset in words */
1775    if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
1776        NS_DBG("switch_state: double the column number for 16x device\n");
1777        ns->regs.column <<= 1;
1778    }
1779
1780    if (NS_STATE(ns->nxstate) == STATE_READY) {
1781        /*
1782         * The current state is the last. Return to STATE_READY
1783         */
1784
1785        u_char status = NS_STATUS_OK(ns);
1786
1787        /* In case of data states, see if all bytes were input/output */
1788        if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
1789            && ns->regs.count != ns->regs.num) {
1790            NS_WARN("switch_state: not all bytes were processed, %d left\n",
1791                    ns->regs.num - ns->regs.count);
1792            status = NS_STATUS_FAILED(ns);
1793        }
1794
1795        NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
1796
1797        switch_to_ready_state(ns, status);
1798
1799        return;
1800    } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
1801        /*
1802         * If the next state is data input/output, switch to it now
1803         */
1804
1805        ns->state = ns->nxstate;
1806        ns->nxstate = ns->op[++ns->stateidx + 1];
1807        ns->regs.num = ns->regs.count = 0;
1808
1809        NS_DBG("switch_state: the next state is data I/O, switch, "
1810            "state: %s, nxstate: %s\n",
1811            get_state_name(ns->state), get_state_name(ns->nxstate));
1812
1813        /*
1814         * Set the internal register to the count of bytes which
1815         * are expected to be input or output
1816         */
1817        switch (NS_STATE(ns->state)) {
1818            case STATE_DATAIN:
1819            case STATE_DATAOUT:
1820                ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1821                break;
1822
1823            case STATE_DATAOUT_ID:
1824                ns->regs.num = ns->geom.idbytes;
1825                break;
1826
1827            case STATE_DATAOUT_STATUS:
1828            case STATE_DATAOUT_STATUS_M:
1829                ns->regs.count = ns->regs.num = 0;
1830                break;
1831
1832            default:
1833                NS_ERR("switch_state: BUG! unknown data state\n");
1834        }
1835
1836    } else if (ns->nxstate & STATE_ADDR_MASK) {
1837        /*
1838         * If the next state is address input, set the internal
1839         * register to the number of expected address bytes
1840         */
1841
1842        ns->regs.count = 0;
1843
1844        switch (NS_STATE(ns->nxstate)) {
1845            case STATE_ADDR_PAGE:
1846                ns->regs.num = ns->geom.pgaddrbytes;
1847
1848                break;
1849            case STATE_ADDR_SEC:
1850                ns->regs.num = ns->geom.secaddrbytes;
1851                break;
1852
1853            case STATE_ADDR_ZERO:
1854                ns->regs.num = 1;
1855                break;
1856
1857            case STATE_ADDR_COLUMN:
1858                /* Column address is always 2 bytes */
1859                ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
1860                break;
1861
1862            default:
1863                NS_ERR("switch_state: BUG! unknown address state\n");
1864        }
1865    } else {
1866        /*
1867         * Just reset internal counters.
1868         */
1869
1870        ns->regs.num = 0;
1871        ns->regs.count = 0;
1872    }
1873}
1874
1875static u_char ns_nand_read_byte(struct mtd_info *mtd)
1876{
1877        struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
1878    u_char outb = 0x00;
1879
1880    /* Sanity and correctness checks */
1881    if (!ns->lines.ce) {
1882        NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
1883        return outb;
1884    }
1885    if (ns->lines.ale || ns->lines.cle) {
1886        NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
1887        return outb;
1888    }
1889    if (!(ns->state & STATE_DATAOUT_MASK)) {
1890        NS_WARN("read_byte: unexpected data output cycle, state is %s "
1891            "return %#x\n", get_state_name(ns->state), (uint)outb);
1892        return outb;
1893    }
1894
1895    /* Status register may be read as many times as it is wanted */
1896    if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
1897        NS_DBG("read_byte: return %#x status\n", ns->regs.status);
1898        return ns->regs.status;
1899    }
1900
1901    /* Check if there is any data in the internal buffer which may be read */
1902    if (ns->regs.count == ns->regs.num) {
1903        NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
1904        return outb;
1905    }
1906
1907    switch (NS_STATE(ns->state)) {
1908        case STATE_DATAOUT:
1909            if (ns->busw == 8) {
1910                outb = ns->buf.byte[ns->regs.count];
1911                ns->regs.count += 1;
1912            } else {
1913                outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
1914                ns->regs.count += 2;
1915            }
1916            break;
1917        case STATE_DATAOUT_ID:
1918            NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
1919            outb = ns->ids[ns->regs.count];
1920            ns->regs.count += 1;
1921            break;
1922        default:
1923            BUG();
1924    }
1925
1926    if (ns->regs.count == ns->regs.num) {
1927        NS_DBG("read_byte: all bytes were read\n");
1928
1929        /*
1930         * The OPT_AUTOINCR allows to read next conseqitive pages without
1931         * new read operation cycle.
1932         */
1933        if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
1934            ns->regs.count = 0;
1935            if (ns->regs.row + 1 < ns->geom.pgnum)
1936                ns->regs.row += 1;
1937            NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
1938            do_state_action(ns, ACTION_CPY);
1939        }
1940        else if (NS_STATE(ns->nxstate) == STATE_READY)
1941            switch_state(ns);
1942
1943    }
1944
1945    return outb;
1946}
1947
1948static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
1949{
1950        struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
1951
1952    /* Sanity and correctness checks */
1953    if (!ns->lines.ce) {
1954        NS_ERR("write_byte: chip is disabled, ignore write\n");
1955        return;
1956    }
1957    if (ns->lines.ale && ns->lines.cle) {
1958        NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
1959        return;
1960    }
1961
1962    if (ns->lines.cle == 1) {
1963        /*
1964         * The byte written is a command.
1965         */
1966
1967        if (byte == NAND_CMD_RESET) {
1968            NS_LOG("reset chip\n");
1969            switch_to_ready_state(ns, NS_STATUS_OK(ns));
1970            return;
1971        }
1972
1973        /* Check that the command byte is correct */
1974        if (check_command(byte)) {
1975            NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
1976            return;
1977        }
1978
1979        if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
1980            || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M
1981            || NS_STATE(ns->state) == STATE_DATAOUT) {
1982            int row = ns->regs.row;
1983
1984            switch_state(ns);
1985            if (byte == NAND_CMD_RNDOUT)
1986                ns->regs.row = row;
1987        }
1988
1989        /* Check if chip is expecting command */
1990        if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
1991            /* Do not warn if only 2 id bytes are read */
1992            if (!(ns->regs.command == NAND_CMD_READID &&
1993                NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
1994                /*
1995                 * We are in situation when something else (not command)
1996                 * was expected but command was input. In this case ignore
1997                 * previous command(s)/state(s) and accept the last one.
1998                 */
1999                NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
2000                    "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
2001            }
2002            switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2003        }
2004
2005        NS_DBG("command byte corresponding to %s state accepted\n",
2006            get_state_name(get_state_by_command(byte)));
2007        ns->regs.command = byte;
2008        switch_state(ns);
2009
2010    } else if (ns->lines.ale == 1) {
2011        /*
2012         * The byte written is an address.
2013         */
2014
2015        if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
2016
2017            NS_DBG("write_byte: operation isn't known yet, identify it\n");
2018
2019            if (find_operation(ns, 1) < 0)
2020                return;
2021
2022            if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
2023                switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2024                return;
2025            }
2026
2027            ns->regs.count = 0;
2028            switch (NS_STATE(ns->nxstate)) {
2029                case STATE_ADDR_PAGE:
2030                    ns->regs.num = ns->geom.pgaddrbytes;
2031                    break;
2032                case STATE_ADDR_SEC:
2033                    ns->regs.num = ns->geom.secaddrbytes;
2034                    break;
2035                case STATE_ADDR_ZERO:
2036                    ns->regs.num = 1;
2037                    break;
2038                default:
2039                    BUG();
2040            }
2041        }
2042
2043        /* Check that chip is expecting address */
2044        if (!(ns->nxstate & STATE_ADDR_MASK)) {
2045            NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
2046                "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
2047            switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2048            return;
2049        }
2050
2051        /* Check if this is expected byte */
2052        if (ns->regs.count == ns->regs.num) {
2053            NS_ERR("write_byte: no more address bytes expected\n");
2054            switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2055            return;
2056        }
2057
2058        accept_addr_byte(ns, byte);
2059
2060        ns->regs.count += 1;
2061
2062        NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
2063                (uint)byte, ns->regs.count, ns->regs.num);
2064
2065        if (ns->regs.count == ns->regs.num) {
2066            NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
2067            switch_state(ns);
2068        }
2069
2070    } else {
2071        /*
2072         * The byte written is an input data.
2073         */
2074
2075        /* Check that chip is expecting data input */
2076        if (!(ns->state & STATE_DATAIN_MASK)) {
2077            NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
2078                "switch to %s\n", (uint)byte,
2079                get_state_name(ns->state), get_state_name(STATE_READY));
2080            switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2081            return;
2082        }
2083
2084        /* Check if this is expected byte */
2085        if (ns->regs.count == ns->regs.num) {
2086            NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
2087                    ns->regs.num);
2088            return;
2089        }
2090
2091        if (ns->busw == 8) {
2092            ns->buf.byte[ns->regs.count] = byte;
2093            ns->regs.count += 1;
2094        } else {
2095            ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
2096            ns->regs.count += 2;
2097        }
2098    }
2099
2100    return;
2101}
2102
2103static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
2104{
2105    struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
2106
2107    ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
2108    ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
2109    ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
2110
2111    if (cmd != NAND_CMD_NONE)
2112        ns_nand_write_byte(mtd, cmd);
2113}
2114
2115static int ns_device_ready(struct mtd_info *mtd)
2116{
2117    NS_DBG("device_ready\n");
2118    return 1;
2119}
2120
2121static uint16_t ns_nand_read_word(struct mtd_info *mtd)
2122{
2123    struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2124
2125    NS_DBG("read_word\n");
2126
2127    return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
2128}
2129
2130static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
2131{
2132        struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
2133
2134    /* Check that chip is expecting data input */
2135    if (!(ns->state & STATE_DATAIN_MASK)) {
2136        NS_ERR("write_buf: data input isn't expected, state is %s, "
2137            "switch to STATE_READY\n", get_state_name(ns->state));
2138        switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2139        return;
2140    }
2141
2142    /* Check if these are expected bytes */
2143    if (ns->regs.count + len > ns->regs.num) {
2144        NS_ERR("write_buf: too many input bytes\n");
2145        switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2146        return;
2147    }
2148
2149    memcpy(ns->buf.byte + ns->regs.count, buf, len);
2150    ns->regs.count += len;
2151
2152    if (ns->regs.count == ns->regs.num) {
2153        NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
2154    }
2155}
2156
2157static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
2158{
2159        struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
2160
2161    /* Sanity and correctness checks */
2162    if (!ns->lines.ce) {
2163        NS_ERR("read_buf: chip is disabled\n");
2164        return;
2165    }
2166    if (ns->lines.ale || ns->lines.cle) {
2167        NS_ERR("read_buf: ALE or CLE pin is high\n");
2168        return;
2169    }
2170    if (!(ns->state & STATE_DATAOUT_MASK)) {
2171        NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
2172            get_state_name(ns->state));
2173        return;
2174    }
2175
2176    if (NS_STATE(ns->state) != STATE_DATAOUT) {
2177        int i;
2178
2179        for (i = 0; i < len; i++)
2180            buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd);
2181
2182        return;
2183    }
2184
2185    /* Check if these are expected bytes */
2186    if (ns->regs.count + len > ns->regs.num) {
2187        NS_ERR("read_buf: too many bytes to read\n");
2188        switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2189        return;
2190    }
2191
2192    memcpy(buf, ns->buf.byte + ns->regs.count, len);
2193    ns->regs.count += len;
2194
2195    if (ns->regs.count == ns->regs.num) {
2196        if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
2197            ns->regs.count = 0;
2198            if (ns->regs.row + 1 < ns->geom.pgnum)
2199                ns->regs.row += 1;
2200            NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
2201            do_state_action(ns, ACTION_CPY);
2202        }
2203        else if (NS_STATE(ns->nxstate) == STATE_READY)
2204            switch_state(ns);
2205    }
2206
2207    return;
2208}
2209
2210static int ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
2211{
2212    ns_nand_read_buf(mtd, (u_char *)&ns_verify_buf[0], len);
2213
2214    if (!memcmp(buf, &ns_verify_buf[0], len)) {
2215        NS_DBG("verify_buf: the buffer is OK\n");
2216        return 0;
2217    } else {
2218        NS_DBG("verify_buf: the buffer is wrong\n");
2219        return -EFAULT;
2220    }
2221}
2222
2223/*
2224 * Module initialization function
2225 */
2226static int __init ns_init_module(void)
2227{
2228    struct nand_chip *chip;
2229    struct nandsim *nand;
2230    int retval = -ENOMEM, i;
2231
2232    if (bus_width != 8 && bus_width != 16) {
2233        NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
2234        return -EINVAL;
2235    }
2236
2237    /* Allocate and initialize mtd_info, nand_chip and nandsim structures */
2238    nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
2239                + sizeof(struct nandsim), GFP_KERNEL);
2240    if (!nsmtd) {
2241        NS_ERR("unable to allocate core structures.\n");
2242        return -ENOMEM;
2243    }
2244    chip = (struct nand_chip *)(nsmtd + 1);
2245        nsmtd->priv = (void *)chip;
2246    nand = (struct nandsim *)(chip + 1);
2247    chip->priv = (void *)nand;
2248
2249    /*
2250     * Register simulator's callbacks.
2251     */
2252    chip->cmd_ctrl = ns_hwcontrol;
2253    chip->read_byte = ns_nand_read_byte;
2254    chip->dev_ready = ns_device_ready;
2255    chip->write_buf = ns_nand_write_buf;
2256    chip->read_buf = ns_nand_read_buf;
2257    chip->verify_buf = ns_nand_verify_buf;
2258    chip->read_word = ns_nand_read_word;
2259    chip->ecc.mode = NAND_ECC_SOFT;
2260    /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
2261    /* and 'badblocks' parameters to work */
2262    chip->options |= NAND_SKIP_BBTSCAN;
2263
2264    /*
2265     * Perform minimum nandsim structure initialization to handle
2266     * the initial ID read command correctly
2267     */
2268    if (third_id_byte != 0xFF || fourth_id_byte != 0xFF)
2269        nand->geom.idbytes = 4;
2270    else
2271        nand->geom.idbytes = 2;
2272    nand->regs.status = NS_STATUS_OK(nand);
2273    nand->nxstate = STATE_UNKNOWN;
2274    nand->options |= OPT_PAGE256; /* temporary value */
2275    nand->ids[0] = first_id_byte;
2276    nand->ids[1] = second_id_byte;
2277    nand->ids[2] = third_id_byte;
2278    nand->ids[3] = fourth_id_byte;
2279    if (bus_width == 16) {
2280        nand->busw = 16;
2281        chip->options |= NAND_BUSWIDTH_16;
2282    }
2283
2284    nsmtd->owner = THIS_MODULE;
2285
2286    if ((retval = parse_weakblocks()) != 0)
2287        goto error;
2288
2289    if ((retval = parse_weakpages()) != 0)
2290        goto error;
2291
2292    if ((retval = parse_gravepages()) != 0)
2293        goto error;
2294
2295    if ((retval = nand_scan(nsmtd, 1)) != 0) {
2296        NS_ERR("can't register NAND Simulator\n");
2297        if (retval > 0)
2298            retval = -ENXIO;
2299        goto error;
2300    }
2301
2302    if (overridesize) {
2303        uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
2304        if (new_size >> overridesize != nsmtd->erasesize) {
2305            NS_ERR("overridesize is too big\n");
2306            goto err_exit;
2307        }
2308        /* N.B. This relies on nand_scan not doing anything with the size before we change it */
2309        nsmtd->size = new_size;
2310        chip->chipsize = new_size;
2311        chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
2312        chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
2313    }
2314
2315    if ((retval = setup_wear_reporting(nsmtd)) != 0)
2316        goto err_exit;
2317
2318    if ((retval = init_nandsim(nsmtd)) != 0)
2319        goto err_exit;
2320
2321    if ((retval = parse_badblocks(nand, nsmtd)) != 0)
2322        goto err_exit;
2323
2324    if ((retval = nand_default_bbt(nsmtd)) != 0)
2325        goto err_exit;
2326
2327    /* Register NAND partitions */
2328    if ((retval = add_mtd_partitions(nsmtd, &nand->partitions[0], nand->nbparts)) != 0)
2329        goto err_exit;
2330
2331        return 0;
2332
2333err_exit:
2334    free_nandsim(nand);
2335    nand_release(nsmtd);
2336    for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
2337        kfree(nand->partitions[i].name);
2338error:
2339    kfree(nsmtd);
2340    free_lists();
2341
2342    return retval;
2343}
2344
2345module_init(ns_init_module);
2346
2347/*
2348 * Module clean-up function
2349 */
2350static void __exit ns_cleanup_module(void)
2351{
2352    struct nandsim *ns = (struct nandsim *)(((struct nand_chip *)nsmtd->priv)->priv);
2353    int i;
2354
2355    free_nandsim(ns); /* Free nandsim private resources */
2356    nand_release(nsmtd); /* Unregister driver */
2357    for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
2358        kfree(ns->partitions[i].name);
2359    kfree(nsmtd); /* Free other structures */
2360    free_lists();
2361}
2362
2363module_exit(ns_cleanup_module);
2364
2365MODULE_LICENSE ("GPL");
2366MODULE_AUTHOR ("Artem B. Bityuckiy");
2367MODULE_DESCRIPTION ("The NAND flash simulator");
2368

Archive Download this file



interactive