1 Index: drivers/mtd/brcmnand/bcm7xxx-nand.c
2 ===================================================================
3 --- drivers/mtd/brcmnand/bcm7xxx-nand.c (revision 1)
4 +++ drivers/mtd/brcmnand/bcm7xxx-nand.c (working copy)
6 * start of flash 1f7f_ffff flashSize-8MB rootfs Linux File System
8 #define SMALLEST_FLASH_SIZE (16<<20)
9 -#define DEFAULT_RESERVED_SIZE (8<<20)
11 +#define DEFAULT_RESERVED_SIZE (14<<20)
13 +#define DEFAULT_RESERVED_SIZE (12<<20)
15 #define DEFAULT_SPLASH_SIZE (1<<20)
16 #define DEFAULT_BBT0_SIZE_MB (1)
17 #define DEFAULT_BBT1_SIZE_MB (4)
19 #define ROOTFS_PART (0)
21 -#if defined( CONFIG_MTD_BRCMNAND_DISABLE_XOR )
22 -/* Implies new partition scheme, starting with 7420
23 - cfe: 0-4MB (not mapped)
24 - mtd0: rootfs: Starts at 4MB offset
25 - mtd1: all flash less BBT0 (1MB) for flash <= 512MB
27 - mtd3: Data, for flash>512MB, from 512MB up to flash - BBT1 (4MB)
31 -#define KERNEL_PART (2)
32 -#define DATA_PART (3)
33 -#define AVAIL1_PART (-1)
35 -#define DEFAULT_ECM_SIZE (0)
36 -#define DEFAULT_AVAIL1_SIZE (0)
38 -#elif defined( CONFIG_MTD_NEW_PARTITION )
39 +#ifdef CONFIG_MTD_NEW_PARTITION
40 /* New partition scheme, starting with 7420
42 mtd1: all flash less BBT0 (1MB) for flash <= 512MB
44 #define DEFAULT_ECM_SIZE (0)
45 #define DEFAULT_AVAIL1_SIZE (0)
48 - #if defined( CONFIG_MTD_ECM_PARTITION )
49 +#elif defined( CONFIG_MTD_ECM_PARTITION )
50 #define DEFAULT_OCAP_SIZE (6<<20)
51 #define DEFAULT_AVAIL1_SIZE (32<<20)
52 #define DEFAULT_ECM_SIZE (DEFAULT_OCAP_SIZE+DEFAULT_AVAIL1_SIZE)
53 #define AVAIL1_PART (1)
57 #define DEFAULT_ECM_SIZE (0)
58 #define DEFAULT_OCAP_SIZE (0)
59 #define DEFAULT_AVAIL1_SIZE (0)
60 #define AVAIL1_PART (-1)
61 #define OCAP_PART (-1)
64 - /* Definitions for NOR+NAND */
66 -#define KERNEL_PART (2)
67 -#define DATA_PART (3)
69 #define DEFAULT_ROOTFS_SIZE (SMALLEST_FLASH_SIZE - DEFAULT_RESERVED_SIZE - DEFAULT_ECM_SIZE)
75 -static struct mtd_partition bcm7XXX_no_xor_partition[] =
76 +static struct mtd_partition bcm7XXX_nand_parts[] =
77 +#ifdef CONFIG_MTD_NEW_PARTITION
79 - /* XOR disabled: Everything is shifted down 4MB */
80 - { name: N_ROOTFS, offset: 0x00400000, size: DEFAULT_ROOTFS_SIZE - (DEFAULT_BBT0_SIZE_MB <<20) }, // Less 1MB for BBT
81 - { name: N_ALL, offset: 0, size: DEFAULT_ROOTFS_SIZE - (DEFAULT_BBT0_SIZE_MB <<20) },
82 - { name: N_KERNEL, offset: 0x00b00000, size: 4<<20 },
83 - /* BBT0 1MB not mountable by anyone */
85 - /* Following partitions only present on flash with size > 512MB */
86 - { name: N_DATA, offset: 0x20000000, size: 0 },
87 - /* BBT1 4MB not mountable by anyone */
88 - {name: NULL, offset: 0, size: 0} /* End marker */
91 -static struct mtd_partition bcm7XXX_new_partition[] =
93 { name: N_ROOTFS, offset: 0, size: DEFAULT_ROOTFS_SIZE },
94 { name: N_ALL, offset: 0x0, size: DEFAULT_ROOTFS_SIZE - (DEFAULT_BBT0_SIZE_MB <<20) },
95 { name: N_KERNEL, offset: 0x00800000, size: 4<<20 },
97 {name: NULL, offset: 0, size: 0} /* End marker */
100 -static struct mtd_partition bcm7XXX_old_partition[] =
103 { name: N_ROOTFS, offset: 0, size: DEFAULT_ROOTFS_SIZE },
104 #ifdef CONFIG_MTD_ECM_PARTITION
105 { name: N_AVAIL1, offset: DEFAULT_ROOTFS_SIZE, size: DEFAULT_AVAIL1_SIZE },
106 { name: N_OCAP, offset: DEFAULT_ROOTFS_SIZE+DEFAULT_AVAIL1_SIZE, size: DEFAULT_OCAP_SIZE },
108 - { name: N_KERNEL, offset: 0x00800000, size: 4<<20 },
109 - { name: N_CFE, offset: 0x00C00000, size: 2<<20 },
111 + { name: N_KERNEL, offset: 0x00200000, size: 4<<20 },
112 + { name: "boot", offset: 0x00600000, size: 4<<20 },
113 + { name: "bootimg", offset: 0x00A00000, size: 2<<20 },
115 + { name: N_KERNEL, offset: 0x00400000, size: 4<<20 },
116 + { name: "boot", offset: 0x00800000, size: 4<<20 },
118 + { name: N_CFE, offset: 0x00C00000, size: 1<<20 },
119 + { name: "mac", offset: 0x00D00000, size: 1<<19 },
120 + { name: "env", offset: 0x00D80000, size: 1<<19 },
121 { name: N_NVM, offset: 0x00E00000, size: 1<<20 },
122 /* BBT 1MB not mountable by anyone */
123 { name: N_DATA, offset: 0x20000000, size: 0 },
125 {name: NULL, offset: 0, size: 0},
126 {name: NULL, offset: 0, size: 0}
129 -#if defined( CONFIG_MTD_BRCMNAND_DISABLE_XOR )
130 -static struct mtd_partition* bcm7XXX_nand_parts = bcm7XXX_no_xor_partition;
132 -#elif defined( CONFIG_MTD_NEW_PARTITION )
133 -static struct mtd_partition* bcm7XXX_nand_parts = bcm7XXX_new_partition;
136 -static struct mtd_partition* bcm7XXX_nand_parts = bcm7XXX_old_partition;
139 struct brcmnand_info {
140 @@ -253,41 +220,17 @@
141 unsigned int ocap_size = DEFAULT_OCAP_SIZE;
143 unsigned int avail1_size = DEFAULT_AVAIL1_SIZE;
144 - int oldNumParts = ARRAY_SIZE(bcm7XXX_old_partition);
146 -//printk("========================> %s\n", __FUNCTION__);
150 - * Is XOR disabled? if so use the new partition.
152 - if (nandinfo->brcmnand.xor_disable) {
153 - bcm7XXX_nand_parts = bcm7XXX_no_xor_partition;
155 - if (device_size(mtd) <= (512ULL <<20)) {
156 - bcm7XXX_nand_parts[ALL_PART].size =
157 - device_size(mtd) - (uint64_t) (DEFAULT_BBT0_SIZE_MB<<20);
161 - bcm7XXX_nand_parts[ALL_PART].size = ((512-DEFAULT_BBT1_SIZE_MB)<<20);
164 - for (i=0; i<*numParts;i++) {
165 - bcm7XXX_nand_parts[i].ecclayout = mtd->ecclayout;
168 - // Kernel partition will be initialized by Env Vars.
169 - //printk("<-- %s, device_size=%0llx\n", __FUNCTION__, device_size(mtd));
170 - //print_partition(*numParts);
172 - nandinfo->parts = bcm7XXX_nand_parts;
175 + if (device_size(mtd) <= (512ULL <<20)) {
176 + size = (unsigned long) device_size(mtd); // mtd->size may be different than nandinfo->size
177 + *numParts = ARRAY_SIZE(bcm7XXX_nand_parts) - 3; /* take into account the extra 2 parts
178 + and the data partition */
181 + *numParts = ARRAY_SIZE(bcm7XXX_nand_parts) - 2; // take into account the extra 2 parts
185 -#if defined( CONFIG_MTD_NEW_PARTITION )
186 +#ifdef CONFIG_MTD_NEW_PARTITION
187 if (device_size(mtd) <= (512ULL <<20)) {
188 bcm7XXX_nand_parts[ALL_PART].size =
189 device_size(mtd) - (uint64_t) (DEFAULT_BBT0_SIZE_MB<<20);
191 nandinfo->parts = bcm7XXX_nand_parts;
196 - /* NAND on CS1, same partition as that of CONFIG_MTD_NEW_PARTITION */
197 -PRINTK("nandinfo->brcmnand.CS[0] = %d\n", nandinfo->brcmnand.CS[0]);
198 -PRINTK("bcm7XXX_nand_parts=%p, bcm7XXX_new_partition=%p, bcm7XXX_old_partition=%p\n",
199 - bcm7XXX_nand_parts, &bcm7XXX_new_partition[0], &bcm7XXX_old_partition[0]);
200 - if (nandinfo->brcmnand.CS[0] != 0) {
201 - bcm7XXX_nand_parts = bcm7XXX_new_partition;
203 - if (device_size(mtd) <= (512ULL <<20)) {
204 - bcm7XXX_nand_parts[0].size = device_size(mtd) - DEFAULT_RESERVED_SIZE - ecm_size;
205 - bcm7XXX_nand_parts[ALL_PART].size =
206 - device_size(mtd) - ((uint64_t) (DEFAULT_BBT0_SIZE_MB) <<20);
210 - bcm7XXX_nand_parts[0].size = (512ULL <<20) - DEFAULT_RESERVED_SIZE - ecm_size;
211 - bcm7XXX_nand_parts[ALL_PART].size =
212 - device_size(mtd) - ((uint64_t) (DEFAULT_BBT1_SIZE_MB)<<20);
215 - for (i=0; i<*numParts;i++) {
216 - bcm7XXX_nand_parts[i].ecclayout = mtd->ecclayout;
218 +#elif defined( CONFIG_MTD_ECM_PARTITION )
220 - nandinfo->parts = bcm7XXX_nand_parts;
223 -PRINTK("%s: NAND on CS1: numparts=%d\n", __FUNCTION__, *numParts);
224 -print_partition(*numParts);
230 - /* From now on, we are only dealing with old partition table */
231 - if (device_size(mtd) <= (512ULL <<20)) {
232 - size = (unsigned long) device_size(mtd); // mtd->size may be different than nandinfo->size
233 - *numParts = oldNumParts - 3; /* take into account the extra 2 parts
234 - and the data partition */
237 - *numParts = oldNumParts - 2; // take into account the extra 2 parts
240 - #if defined( CONFIG_MTD_ECM_PARTITION )
242 /* Do not generate AVAIL1 partition if usable flash size is less than 64MB */
244 if (size < (64<<20)) {
245 @@ -370,12 +268,11 @@
246 ecm_size = ocap_size + avail1_size;
252 nandinfo->parts = bcm7XXX_nand_parts;
253 bcm7XXX_nand_parts[0].size = size - DEFAULT_RESERVED_SIZE - ecm_size;
254 bcm7XXX_nand_parts[0].ecclayout = mtd->ecclayout;
255 -PRINTK("numParts=%d\n", numParts);
256 PRINTK("Part[%d] name=%s, size=%llx, offset=%llx\n", i, bcm7XXX_nand_parts[0].name,
257 bcm7XXX_nand_parts[0].size, bcm7XXX_nand_parts[0].offset);
260 int e; // Index into Env vars
261 int i; // Index into mtd partition
264 // Not configured for Splash, but does CFE define it?
266 for (i=0; i < gCfePartitions.numParts; i++) {
274 * Remove OCAP partitions if Env Vars are defined
276 //unsigned long size = res->end - res->start + 1;
279 - struct brcmnand_chip* chip;
282 info = kmalloc(sizeof(struct brcmnand_info), GFP_KERNEL);
284 //info->brcmnand.mmcontrol = NULL; // THT: Sync Burst Read TBD. pdata->mmcontrol;
286 info->mtd.name = pdev->dev.bus_id;
287 - chip = info->mtd.priv = &info->brcmnand;
288 + info->mtd.priv = &info->brcmnand;
289 info->mtd.owner = THIS_MODULE;
291 /* Enable the following for a flash based bad block table */
292 @@ -690,19 +588,12 @@
294 //print_partition(numParts);
296 - // Nand not on CS0, set it up to allow 1 partition, as in the new partition scheme
297 - if (chip->CS[0] != 0) {
298 - bcm7XXX_nand_parts = bcm7XXX_new_partition;
301 if (gCfePartitions.numParts == 0) {
302 brcmnanddrv_setup_mtd_partitions(info, &numParts);
305 brcmnanddrv_setup_mtdpart_cfe_env(info, &numParts);
310 //print_partition(numParts);
313 //printk(" dev_set_drvdata\n");
314 dev_set_drvdata(&pdev->dev, info);
315 //printk("<-- brcmnanddrv_probe\n");
317 -/* NOR+NAND configuration */
318 -#ifdef CONFIG_MTD_BRCMNAND_NOR_ACCESS
319 - /* Append NOR partition to the end */
321 - extern void (*gInitialize_Nor_Partition)(void);
323 - if (gInitialize_Nor_Partition) {
324 - (*gInitialize_Nor_Partition) ();
326 - // Else NAND is loaded first, NOR will append when it is started.
333 Index: drivers/mtd/brcmnand/brcmnand_base.c
334 ===================================================================
335 --- drivers/mtd/brcmnand/brcmnand_base.c (revision 1)
336 +++ drivers/mtd/brcmnand/brcmnand_base.c (working copy)
338 #include <linux/byteorder/generic.h>
339 #include <linux/reboot.h>
340 #include <linux/vmalloc.h>
341 -#include <linux/dma-mapping.h>
342 -#include <linux/interrupt.h>
348 //#define DEBUG_HW_ECC
350 -//#define BRCMNAND_READ_VERIFY
351 -#undef BRCMNAND_READ_VERIFY
353 -//#ifdef CONFIG_MTD_BRCMNAND_VERIFY_WRITE
354 -//#define BRCMNAND_WRITE_VERIFY
356 -#undef BRCMNAND_WRITE_VERIFY
360 -#if defined( DEBUG_ISR ) || defined(BRCMNAND_READ_VERIFY) \
361 - || defined(BRCMNAND_WRITE_VERIFY)
362 -#if defined(DEBUG_ISR ) || defined(BRCMNAND_READ_VERIFY)
365 -#if defined(DEBUG_ISR ) || defined(BRCMNAND_WRITE_VERIFY)
370 #define my_be32_to_cpu(x) be32_to_cpu(x)
372 #if defined( CONFIG_MTI_R24K ) || defined( CONFIG_MTI_R34K ) || defined( CONFIG_MTD_BRCMNAND_EDU )
375 #define HW_AUTOOOB_LAYOUT_SIZE 32 /* should be enough */
377 +#define BRCMNAND_CORRECTABLE_ECC_ERROR (1)
378 +#define BRCMNAND_SUCCESS (0)
379 +#define BRCMNAND_UNCORRECTABLE_ECC_ERROR (-1)
380 +#define BRCMNAND_FLASH_STATUS_ERROR (-2)
381 +#define BRCMNAND_TIMED_OUT (-3)
383 +#ifdef CONFIG_MTD_BRCMNAND_EDU
384 +#define BRCMEDU_CORRECTABLE_ECC_ERROR (4)
385 +#define BRCMEDU_UNCORRECTABLE_ECC_ERROR (-4)
387 +#define BRCMEDU_MEM_BUS_ERROR (-5)
390 +#endif // #ifdef CONFIG_MTD_BRCMNAND_EDU
392 #ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
393 /* Avoid infinite recursion between brcmnand_refresh_blk() and brcmnand_read_ecc() */
394 static atomic_t inrefresh = ATOMIC_INIT(0);
397 uint32_t idOptions; // Whether chip has all 5 ID bytes
398 uint32 timing1, timing2; // Specify a non-zero value to override the default timings.
399 - int nop; // Number of partial writes per page
400 unsigned int ctrlVersion; // Required controller version if different than 0
404 //| NAND_COMPLEX_OOB_WRITE /* Write data together with OOB for write_oob */
405 .timing1 = 0, //00070000,
408 .ctrlVersion = 0, /* THT Verified on data-sheet 7/10/08: Allows 4 on main and 4 on OOB */
413 .timing1 = 0, //0x6474555f,
414 .timing2 = 0, //0x00000fc7,
421 .timing1 = 0, //0x6474555f,
422 .timing2 = 0, //0x00000fc7,
428 .options = NAND_USE_FLASH_BBT,
430 .timing1 = 0, .timing2 = 0,
436 .options = NAND_USE_FLASH_BBT,
438 .timing1 = 0, .timing2 = 0,
442 /* This is just the 16 bit version of the above?
444 .options = NAND_USE_FLASH_BBT,
446 .timing1 = 0, .timing2 = 0,
448 - .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
449 + .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
454 .options = NAND_USE_FLASH_BBT,
456 .timing1 = 0, .timing2 = 0,
458 - .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
459 + .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
464 .options = NAND_USE_FLASH_BBT,
466 .timing1 = 0, .timing2 = 0,
472 .options = NAND_USE_FLASH_BBT,
474 .timing1 = 0, .timing2 = 0,
480 .options = NAND_USE_FLASH_BBT,
482 .timing1 = 0, .timing2 = 0,
488 .options = NAND_USE_FLASH_BBT,
490 .timing1 = 0, .timing2 = 0,
496 .options = NAND_USE_FLASH_BBT,
498 .timing1 = 0, .timing2 = 0,
504 .options = NAND_USE_FLASH_BBT,
506 .timing1 = 0, .timing2 = 0,
512 .options = NAND_USE_FLASH_BBT,
514 .timing1 = 0, .timing2 = 0,
520 .options = NAND_USE_FLASH_BBT,
522 .timing1 = 0, .timing2 = 0,
528 .options = NAND_USE_FLASH_BBT,
530 .timing1 = 0, .timing2 = 0,
536 .options = NAND_USE_FLASH_BBT,
538 .timing1 = 0, .timing2 = 0,
544 .options = NAND_USE_FLASH_BBT,
546 .timing1 = 0, .timing2 = 0,
552 .options = NAND_USE_FLASH_BBT,
554 .timing1 = 0, .timing2 = 0,
560 .options = NAND_USE_FLASH_BBT,
562 .timing1 = 0, .timing2 = 0,
568 .options = NAND_USE_FLASH_BBT,
570 .timing1 = 0, .timing2 = 0,
575 @@ -438,11 +408,10 @@
576 .options = NAND_USE_FLASH_BBT,
578 .timing1 = 0, .timing2 = 0,
583 - /* The following 6 ST chips only allow 4 writes per page, and requires version2.1 (4) of the controller or later */
584 + /* The following 6 ST chips only allow 4 writes per page, and requires version2.2 (5) of the controller or later */
586 .chipId = ST_NAND01GW3B,
587 .mafId = FLASHTYPE_ST,
589 .options = NAND_USE_FLASH_BBT,
591 .timing1 = 0, .timing2 = 0,
593 - .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
594 + .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
599 .options = NAND_USE_FLASH_BBT,
601 .timing1 = 0, .timing2 = 0,
603 - .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
604 + .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
609 .options = NAND_USE_FLASH_BBT,
611 .timing1 = 0, .timing2 = 0,
613 - .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
614 + .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
617 .chipId = ST_NAND02GW3B,
619 .options = NAND_USE_FLASH_BBT,
621 .timing1 = 0, .timing2 = 0,
623 - .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
624 + .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
629 .options = NAND_USE_FLASH_BBT,
631 .timing1 = 0, .timing2 = 0,
633 - .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
634 + .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
637 .chipId = ST_NAND08GW3B,
639 .options = NAND_USE_FLASH_BBT,
641 .timing1 = 0, .timing2 = 0,
643 - .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
644 + .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
649 .options = NAND_USE_FLASH_BBT, /* Use BBT on flash */
650 //| NAND_COMPLEX_OOB_WRITE /* Write data together with OOB for write_oob */
651 .idOptions = BRCMNAND_ID_EXT_BYTES,
653 + .timing1 = 0, //00070000,
656 .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0,
660 .options = NAND_USE_FLASH_BBT, /* Use BBT on flash */
661 //| NAND_COMPLEX_OOB_WRITE /* Write data together with OOB for write_oob */
662 .idOptions = BRCMNAND_ID_EXT_BYTES_TYPE2,
664 + .timing1 = 0, //00070000,
667 .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0,
670 @@ -540,24 +501,10 @@
671 .options = NAND_USE_FLASH_BBT, /* Use BBT on flash */
672 //| NAND_COMPLEX_OOB_WRITE /* Write data together with OOB for write_oob */
673 .idOptions = BRCMNAND_ID_EXT_BYTES,
675 + .timing1 = 0, //00070000,
678 .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0,
682 - .chipId = HYNIX_HY27UAG8T2M,
683 - .mafId = FLASHTYPE_HYNIX,
684 - .chipIdStr = "HYNIX_HY27UAG8T2M",
685 - .options = NAND_USE_FLASH_BBT, /* Use BBT on flash */
686 - //| NAND_COMPLEX_OOB_WRITE /* Write data together with OOB for write_oob */
687 - .idOptions = BRCMNAND_ID_EXT_BYTES,
691 - .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0,
694 { /* LAST DUMMY ENTRY */
698 if (nandCtrlReg < BCHP_NAND_REVISION || nandCtrlReg > BCHP_NAND_BLK_WR_PROTECT ||
699 (nandCtrlReg & 0x3) != 0) {
700 - printk("brcmnand_ctrl_read: Invalid register value %08x\n", nandCtrlReg);
701 + printk(KERN_ERR "brcmnand_ctrl_read: Invalid register value %08x\n", nandCtrlReg);
703 if (gdebug > 3) printk("%s: CMDREG=%08x val=%08x\n", __FUNCTION__, (unsigned int) nandCtrlReg, (unsigned int)*pReg);
704 return (uint32_t) (*pReg);
707 if (nandCtrlReg < BCHP_NAND_REVISION || nandCtrlReg > BCHP_NAND_BLK_WR_PROTECT ||
708 (nandCtrlReg & 0x3) != 0) {
709 - printk( "brcmnand_ctrl_read: Invalid register value %08x\n", nandCtrlReg);
710 + printk(KERN_ERR "brcmnand_ctrl_read: Invalid register value %08x\n", nandCtrlReg);
712 *pReg = (volatile unsigned long) (val);
713 if (gdebug > 3) printk("%s: CMDREG=%08x val=%08x\n", __FUNCTION__, nandCtrlReg, val);
714 @@ -670,14 +617,13 @@
717 if (gdebug) printk("CS=%d, chip->CS[cs]=%d\n", cs, chip->CS[cs]);
718 - // ldw is lower 32 bit of chipOffset, need to add pbase when on CS0 and XOR is ON.
719 - if (!chip->xor_disable[cs]) {
720 + // ldw is lower 32 bit of chipOffset, need to add pbase when on CS0
721 + if (chip->CS[cs] == 0) {
722 ldw = chipOffset.s.low + chip->pbase;
726 ldw = chipOffset.s.low;
730 udw = chipOffset.s.high | (chip->CS[cs] << 16);
732 if (gdebug > 3) printk("%s: offset=%0llx cs=%d ldw = %08x, udw = %08x\n", __FUNCTION__, offset, cs, ldw, udw);
735 /* Dont delete, may be useful for debugging */
737 -static void print_diagnostics(struct brcmnand_chip* chip)
738 +static void print_diagnostics(void)
740 uint32_t nand_acc_control = brcmnand_ctrl_read(BCHP_NAND_ACC_CONTROL);
741 uint32_t nand_select = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
743 uint32_t pageAddrExt = brcmnand_ctrl_read(BCHP_NAND_PROGRAM_PAGE_EXT_ADDR);
747 + uint32_t ebiCSBase0 = * ((volatile unsigned long*) (0xb0000000|BCHP_EBI_CS_BASE_0));
748 //unsigned long nand_timing1 = brcmnand_ctrl_read(BCHP_NAND_TIMING_1);
749 //unsigned long nand_timing2 = brcmnand_ctrl_read(BCHP_NAND_TIMING_2);
752 #if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
753 printk("PAGE_EXT_ADDR=%08x\n", pageAddrExt);
755 - if (chip->CS[0] == 0) {
756 - uint32_t ebiCSBase0 = * ((volatile unsigned long*) (0xb0000000|BCHP_EBI_CS_BASE_0));
757 - printk("PAGE_ADDR=%08x, \tCS0_BASE=%08x\n", pageAddr, ebiCSBase0);
760 - //uint32_t ebiCSBaseN = * ((volatile unsigned long*) (0xb0000000|(BCHP_EBI_CS_BASE_0));
761 - uint32_t csNandBaseN = *(volatile unsigned long*) (0xb0000000 + BCHP_EBI_CS_BASE_0 + 8*chip->CS[0]);
763 - printk("PAGE_ADDR=%08x, \tCS%-d_BASE=%08x\n", pageAddr, chip->CS[0], csNandBaseN);
764 - printk("pbase=%08lx, vbase=%p\n", chip->pbase, chip->vbase);
766 + printk("PAGE_ADDR=%08x, \tCS0_BASE=%08x\n", pageAddr, ebiCSBase0);
771 nand_acc_control, nand_config, flash_id, nand_timing1, nand_timing2);
774 -#define NUM_NAND_REGS (1+((BCHP_NAND_BLK_WR_PROTECT-BCHP_NAND_REVISION)/4))
776 -static void print_nand_ctrl_regs(void)
780 - for (i=0; i<NUM_NAND_REGS; i++) {
781 - uint32_t reg = (uint32_t) (BCHP_NAND_REVISION+(i*4));
783 - uint32_t regoff = reg - BCHP_NAND_REVISION; // i*4
785 - if ((i % 4) == 0) {
786 - printk("\n%08x:", reg);
789 -#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_1_0
790 - // V0.0, V0.1 7401Cx
791 - if (regoff == 0x14 || regoff == 0x18 || regoff == 0x1c ) { // No NAND register at 0x281c
794 -#elif CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_2_0
796 - if (regoff == 0x18 || regoff == 0x1c ) { // No NAND register at 0x281c
799 -#elif CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_3_0
800 - // V2.x 7325, 7335, 7405bx
801 - if (regoff == 0x1c) { // No NAND register at 0x281c
804 -#else // if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_0
805 - // V3.x 3548, 7420a0, 7420b0
806 - if (regoff == 0x1c || regoff == 0x44 || regoff == 0x4c || regoff == 0x5c
807 - || regoff == 0x88 || regoff == 0x8c
808 - || regoff == 0xb8 || regoff == 0xbc) {
813 - regval = (uint32_t) brcmnand_ctrl_read(reg);
815 - printk(" %08x", regval);
819 void print_NandCtrl_Status(void)
821 #ifdef CONFIG_MTD_BRCMNAND_EDU
822 @@ -1021,8 +912,11 @@
826 - rd_data = ISR_cache_is_valid();
827 + rd_data = ISR_cache_is_valid(intr);
834 printk("%s: rd_data=0 TIMEOUT\n", __FUNCTION__);
835 @@ -1158,7 +1052,7 @@
839 - if (state != FL_READING && (!wr_preempt_en) && !in_interrupt())
840 + if (state != FL_READING && (!wr_preempt_en))
842 //touch_softlockup_watchdog();
844 @@ -1192,10 +1086,6 @@
846 if (ready & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK &&
847 (ready & BCHP_NAND_INTFC_STATUS_SPARE_AREA_VALID_MASK)) {
851 -// THT 6/15/09: Reading OOB would not affect ECC
855 @@ -1205,10 +1095,9 @@
862 - if (state != FL_READING && !wr_preempt_en && !in_interrupt())
863 + if (state != FL_READING && !wr_preempt_en)
867 @@ -1261,7 +1150,7 @@
869 //return BRCMNAND_SUCCESS;
871 - if (state != FL_READING && (!wr_preempt_en) && !in_interrupt())
872 + if (state != FL_READING && (!wr_preempt_en))
876 @@ -1292,7 +1181,7 @@
880 -printk("%s: intr_status = %08x\n", __FUNCTION__, intr_status); }
881 +printk("%s: intr_status = %08x\n", intr_status); }
883 if (intr_status == 0) {
884 /* EDU_read timed out */
885 @@ -1319,7 +1208,7 @@
888 if (!(intr_status & HIF_INTR2_CTRL_READY)) {
889 - (void) ISR_cache_is_valid();
890 + (void) ISR_cache_is_valid(0);
894 @@ -1356,12 +1245,9 @@
899 - * Returns 1 on success,
905 static int brcmnand_ctrl_write_is_complete(struct mtd_info *mtd, int* outp_needBBT)
908 @@ -1384,188 +1270,8 @@
914 -//#define EDU_DEBUG_2
917 -// EDU_DEBUG_4: Verify on Read
918 -//#define EDU_DEBUG_4
919 -//#undef EDU_DEBUG_4
921 -// EDU_DEBUG_5: Verify on Write
922 -//#define EDU_DEBUG_5
923 -//#undef EDU_DEBUG_5
925 -#if defined( EDU_DEBUG_2 ) || defined( EDU_DEBUG_4 ) || defined( EDU_DEBUG_5 )
926 -/* 3548 internal buffer is 4K in size */
927 -//static uint32_t edu_lbuf[2048];
928 -static uint32_t* edu_buf32;
929 -static uint8_t* edu_buf; // Used by EDU in Debug2
930 -static uint8_t* ctrl_buf; // Used by Ctrl in Debug4
931 -static uint32_t ctrl_oob32[4];
932 -static uint8_t* ctrl_oob = (uint8_t*) ctrl_oob32;
934 -#define PATTERN 0xa55a0000
936 -#define EDU_BUFSIZE_B (512)
937 -// One before and one after
938 -#define EDU_BUF32_SIZE_B (EDU_BUFSIZE_B*3)
940 -// Same as above in DW instead
941 -#define EDU_BUFSIZE_DW (EDU_BUFSIZE_B/4)
942 -#define EDU_BUF32_SIZE_DW (EDU_BUF32_SIZE_B/4)
944 -// Real buffer starts at 1/3
945 -#define EDU_BUF_START_DW (EDU_BUF32_SIZE_DW/3)
948 -static void init_edu_buf(void)
950 - /* Write pattern */
954 - edu_buf32 = (uint32_t*) kmalloc(EDU_BUF32_SIZE_B, GFP_KERNEL);
956 - printk("%s: Out of memory\n", __FUNCTION__);
960 - edu_buf = ctrl_buf = (uint8_t*) &edu_buf32[EDU_BUF_START_DW];
961 - printk("%s: Buffer allocated at %p, %d bytes\n", __FUNCTION__, edu_buf32, EDU_BUF32_SIZE_B);
962 - printk("Real buffer starts at %p\n", ctrl_buf);
965 - for (i=0; i<EDU_BUF32_SIZE_DW; i++) {
966 - edu_buf32[i] = PATTERN | i;
970 -static int verify_edu_buf(void)
975 - for (i=0; i<EDU_BUF_START_DW; i++) {
976 - if (edu_buf32[i] != (PATTERN | i)) {
977 - printk("############ %s: pattern overwritten at offset %d, expect %08x, found %08x\n",
978 - __FUNCTION__, i*4, PATTERN | i, edu_buf32[i]);
982 - for (i=EDU_BUF_START_DW+EDU_BUFSIZE_DW; i<EDU_BUF32_SIZE_DW; i++) {
983 - if (edu_buf32[i] != (PATTERN | i)) {
984 - printk("############ %s: pattern overwritten at offset %d, expect %08x, found %08x\n",
985 - __FUNCTION__, i*4, PATTERN | i, edu_buf32[i]);
989 -if (ret) printk("+++++++++++++++ %s: %d DW overwritten by EDU\n", __FUNCTION__, ret);
994 -static uint8_t edu_write_buf[512];
998 #ifdef CONFIG_MTD_BRCMNAND_EDU
999 -#define NUM_EDU_REGS (1+((BCHP_EDU_ERR_STATUS-BCHP_EDU_CONFIG)/4))
1001 -#define NUM_EDU_REGS 1
1004 -#define MAX_DUMPS 20
1006 -typedef struct nand_dump {
1008 - uint32_t physAddr;
1009 - struct brcmnand_chip* chip;
1010 - struct register_dump_t {
1011 - unsigned long timestamp;
1012 - uint32_t nand_regs[NUM_NAND_REGS]; // NAND register dump
1013 - uint32_t edu_regs[NUM_EDU_REGS]; // EDU register
1014 - uint32_t hif_intr2; // HIF_INTR2 Interrupt status
1015 - uint8_t data[512]; // NAND controller cache
1016 - } dump[MAX_DUMPS];
1017 - //uint8_t udata[512]; // Uncached
1018 -} nand_dump_t; // Before and after
1019 -nand_dump_t nandDump;
1023 -#ifdef CONFIG_MTD_BRCMNAND_EDU
1024 -static void print_dump_nand_regs(int which)
1028 - printk("NAND registers snapshot #%d: TS=%0lx, offset=%0llx, PA=%08x\n",
1029 - 1+which, nandDump.dump[which].timestamp, nandDump.offset, nandDump.physAddr);
1030 - for (i=0; i<NUM_NAND_REGS; i++) {
1031 - if ((i % 4) == 0) {
1032 - printk("\n%08x:", BCHP_NAND_REVISION+(i*4));
1034 - printk(" %08x", nandDump.dump[which].nand_regs[i]);
1036 - printk("\nEDU registers:\n");
1037 - for (i=0; i<NUM_EDU_REGS; i++) {
1038 - if ((i % 4) == 0) {
1039 - printk("\n%08x:", BCHP_EDU_CONFIG+(i*4));
1041 - printk(" %08x", nandDump.dump[which].edu_regs[i]);
1043 - printk("\n HIF_INTR2_STATUS=%08x\n", nandDump.dump[which].hif_intr2);
1044 - printk("\nNAND controller Internal cache:\n");
1045 - print_databuf(nandDump.dump[which].data, 512);
1048 -void dump_nand_regs(struct brcmnand_chip* chip, loff_t offset, uint32_t pa, int which)
1052 - /* We don't have the value of offset during snapshot #2 */
1053 - if (which == 0) {nandDump.offset = offset; nandDump.physAddr = pa;nandDump.chip = chip;}
1055 - nandDump.dump[which].timestamp = jiffies;
1057 - for (i=0; i<NUM_NAND_REGS; i++) {
1058 - uint32_t reg = BCHP_NAND_REVISION+(i*4);
1061 - if (reg == 0x281c) { // No NAND register at 0x281c
1065 - regval = brcmnand_ctrl_read(reg);
1067 - nandDump.dump[which].nand_regs[i] = regval;
1069 - for (i=0; i<NUM_EDU_REGS; i++) {
1070 - nandDump.dump[which].edu_regs[i] = EDU_volatileRead(EDU_BASE_ADDRESS + BCHP_EDU_CONFIG + ( i*4));
1072 - nandDump.dump[which].hif_intr2 = EDU_volatileRead(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_STATUS);
1073 - brcmnand_from_flash_memcpy32(nandDump.chip, &nandDump.dump[which].data[0], nandDump.offset, 512);
1078 -#define print_dump_nand_regs(...)
1080 -#define dump_nand_regs(...)
1082 -#endif // EDU_DEBUG_2,4,5
1086 -#ifdef CONFIG_MTD_BRCMNAND_EDU
1090 - * Returns 1 on success,
1094 static int brcmnand_EDU_write_is_complete(struct mtd_info *mtd, int* outp_needBBT)
1096 uint32_t hif_err, edu_err;
1097 @@ -1581,45 +1287,37 @@
1100 #ifdef CONFIG_MTD_BRCMNAND_USE_ISR
1101 - #if 0 // No need in Batch mode
1102 // Unlike the Read case where we retry on everything, we either complete the write or die trying.
1103 - // Here we use retry only for ERESTARTSYS, relying on the fact that we write the same data
1104 - // over the flash.
1105 - // Caution: Since this can be called from an interrupt context, we cannot call the regular brcmnand_wait()
1106 - // call, since those call schedule()
1107 + // Here we use retry only for ERESTARTSYS, relying on the fact that we write the same data over the flash.
1108 hif_err = ISR_wait_for_completion();
1109 if ((hif_err == ERESTARTSYS) || (hif_err & HIF_INTR2_EBI_TIMEOUT))
1111 - #endif // Batch mode
1114 hif_err = EDU_poll(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_STATUS,
1115 - HIF_INTR2_EDU_DONE|HIF_INTR2_CTRL_READY,
1116 + HIF_INTR2_EDU_DONE,
1118 - HIF_INTR2_EDU_DONE_MASK|HIF_INTR2_CTRL_READY);
1120 + HIF_INTR2_EDU_DONE_MASK);
1124 if (hif_err != 0) // No timeout
1126 - uint32_t flashStatus; // = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
1127 + int flashStatus; // = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
1130 -if (!(hif_err & HIF_INTR2_EDU_DONE))
1131 -printk("hif_err=%08x\n", hif_err);
1136 /******************* BUG BUG BUG *****************
1137 * THT 01/06/09: What if EDU returns bus error? We should not mark the block bad then.
1139 //Get status: should we check HIF_INTR2_ERR?
1140 - if (hif_err & HIF_INTR2_EDU_ERR)
1141 - edu_err = EDU_get_error_status_register();
1144 + edu_err = EDU_get_error_status_register();
1147 //EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_DONE, 0x00000000);
1149 + EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_ERR_STATUS, 0x00000000);
1150 + EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_STATUS, HIF_INTR2_EDU_CLEAR_MASK);
1152 flashStatus = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
1154 @@ -1627,56 +1325,39 @@
1155 if (!(flashStatus & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK)) {
1156 ret = brcmnand_ctrl_write_is_complete(mtd, outp_needBBT);
1157 // No need to check on the EDU side, already done inside ctrl_write_is_complete
1159 - //dump_nand_regs(chip, 0, 0, numDumps++);
1167 -// 2nd dump after CTRL_READY is asserted
1169 -//dump_nand_regs(chip, 0, 0, numDumps++);
1173 if ((edu_err & EDU_ERR_STATUS_NandWrite) || (flashStatus & 0x01)) {
1174 /* Write did not complete, flash error, will mark block bad */
1176 printk("EDU_write_is_complete(): error 0x%08X\n", edu_err);
1182 /* Write did not complete, bus error, will NOT mark block bad */
1184 printk("EDU_write_is_complete(): error 0x%08X\n", edu_err);
1190 - ret = 1; // Success brcmnand_ctrl_write_is_complete(mtd, outp_needBBT);
1192 + return 1; // Success brcmnand_ctrl_write_is_complete(mtd, outp_needBBT);
1194 else { // Write timeout
1195 printk("%s: Write has timed out\n", __FUNCTION__);
1196 //*outp_needBBT = 1;
1200 + EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_ERR_STATUS, 0x00000000);
1201 + EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_STATUS, HIF_INTR2_EDU_CLEAR_MASK);
1209 - EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_ERR_STATUS, 0x00000000);
1210 - EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_STATUS, HIF_INTR2_EDU_CLEAR_MASK);
1211 + printk("EDU_write_is_complete(): error 2 hif_err: %08x\n", hif_err);
1214 - //printk("EDU_write_is_complete(): error 2 hif_err: %08x\n", hif_err);
1216 //Poll time out or did not return HIF_INTR2_EDU_DONE:
1222 @@ -1689,7 +1370,7 @@
1226 - * brcmnand_transfer_oob - [Internal] Transfer oob from chip->oob_poi to client buffer
1227 + * brcmnand_transfer_oob - [Internal] Transfer oob to client buffer
1228 * @chip: nand chip structure
1229 * @oob: oob destination address
1230 * @ops: oob ops structure
1231 @@ -1727,10 +1408,6 @@
1232 bytes = min_t(size_t, len, free->length);
1233 boffs = free->offset;
1236 -printk("%s: AUTO: oob=%p, chip->oob_poi=%p, ooboffs=%d, len=%d, bytes=%d, boffs=%d\n",
1237 - __FUNCTION__, oob, chip->oob_poi, ops->ooboffs, len, bytes, boffs);
1239 memcpy(oob, chip->oob_poi + boffs, bytes);
1242 @@ -1752,7 +1429,7 @@
1243 void* buffer, u_char* oobarea, loff_t offset)
1245 struct brcmnand_chip* chip = mtd->priv;
1246 - //int retries = 2;
1247 + int retries = 2, done = 0;
1248 static uint32_t oobbuf[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
1249 uint32_t* p32 = (oobarea ? (uint32_t*) oobarea : (uint32_t*) &oobbuf[0]);
1250 u_char* p8 = (u_char*) p32;
1251 @@ -1769,31 +1446,7 @@
1252 //u_char oobbuf[16];
1255 - uint32_t acc, acc0;
1259 - * First disable Read ECC then re-try read OOB, because some times, the controller
1260 - * just drop the op on ECC errors.
1263 -#if 1 /* Testing 1 2 3 */
1265 - acc = brcmnand_ctrl_read(BCHP_NAND_ACC_CONTROL);
1266 - acc0 = acc & ~(BCHP_NAND_ACC_CONTROL_RD_ECC_EN_MASK | BCHP_NAND_ACC_CONTROL_RD_ECC_BLK0_EN_MASK);
1267 - brcmnand_ctrl_write(BCHP_NAND_ACC_CONTROL, acc0);
1269 - chip->ctrl_writeAddr(chip, offset, 0);
1270 - PLATFORM_IOFLUSH_WAR();
1271 - chip->ctrl_write(BCHP_NAND_CMD_START, OP_SPARE_AREA_READ);
1273 - // Wait until cache is filled up, disabling ECC checking
1274 - (void) brcmnand_spare_is_valid(mtd, FL_READING, 1);
1277 - brcmnand_ctrl_write(BCHP_NAND_ACC_CONTROL, acc);
1280 for (i = 0; i < 4; i++) {
1281 p32[i] = be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
1283 @@ -1801,25 +1454,19 @@
1284 erased = (p8[6] == 0xff && p8[7] == 0xff && p8[8] == 0xff);
1285 allFF = (p8[6] == 0x00 && p8[7] == 0x00 && p8[8] == 0x00);
1287 -{printk("%s: offset=%0llx, erased=%d, allFF=%d\n",
1288 -__FUNCTION__, offset, erased, allFF);
1289 +{printk("%s: erased=%d, allFF=%d\n", __FUNCTION__, erased, allFF);
1290 print_oobbuf(p8, 16);
1293 else if (chip->ecclevel >= BRCMNAND_ECC_BCH_1 && chip->ecclevel <= BRCMNAND_ECC_BCH_12) {
1295 - allFF = 0; // Not sure for BCH.
1296 + erased = allFF = 1;
1297 // For BCH-n, the ECC bytes are at the end of the OOB area
1298 - for (i=chip->eccOobSize-chip->eccbytes; i<min(16,chip->eccOobSize); i++) {
1299 + for (i=chip->eccOobSize-chip->eccbytes; i<chip->eccOobSize; i++) {
1300 erased = erased && (p8[i] == 0xff);
1302 - printk("p8[%d]=%02x\n", i, p8[i]);
1304 + allFF = allFF && (p8[i] == 0x00);
1308 -{printk("%s: offset=%0llx, i=%d from %d to %d, eccOobSize=%d, eccbytes=%d, erased=%d, allFF=%d\n",
1309 -__FUNCTION__, offset, i, chip->eccOobSize-chip->eccbytes, chip->eccOobSize,
1311 +{printk("%s: eccOobSize=%d, eccbytes=%d, erased=%d, allFF=%d\n", __FUNCTION__,
1312 chip->eccOobSize, chip->eccbytes, erased, allFF);}
1315 @@ -2134,7 +1781,7 @@
1316 static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
1317 uint32_t* p32 = (oobarea ? (uint32_t*) oobarea : (uint32_t*) &oob0[0]);
1318 u_char* p8 = (u_char*) p32;
1319 - //unsigned long irqflags;
1320 + unsigned long irqflags;
1321 int retries = 5, done=0;
1324 @@ -2257,24 +1904,17 @@
1325 print_databuf(buffer, 32);
1328 -#if defined( EDU_DEBUG ) || defined (BRCMNAND_READ_VERIFY )
1329 -//if (in_verify <=0)
1330 -if (chip->ecclevel == BRCMNAND_ECC_HAMMING) {
1332 +if (in_verify <=0) {
1333 u_char edu_sw_ecc[4];
1335 brcmnand_Hamming_ecc(buffer, edu_sw_ecc);
1337 -if ((p8[6] != edu_sw_ecc[0] || p8[7] != edu_sw_ecc[1] || p8[8] != edu_sw_ecc[2])
1338 - && !(p8[6]==0xff && p8[7]==0xff && p8[8]==0xff &&
1339 - edu_sw_ecc[0]==0x0 && edu_sw_ecc[1]==0x0 && edu_sw_ecc[2]==0x0)
1341 printk("!!!!!!!!! %s: offset=%0llx ECC=%02x%02x%02x, OOB:",
1342 in_verify < 0 ? "WR" : "RD",
1343 offset, edu_sw_ecc[0], edu_sw_ecc[1], edu_sw_ecc[2]);
1344 - print_oobbuf(p8, 16);
1346 + print_oobbuf(oobarea, 16);
1352 @@ -2282,14 +1922,24 @@
1357 - * Clear the controller cache by reading at a location we don't normally read
1361 +#ifdef CONFIG_MTD_BRCMNAND_EDU
1364 +extern int EDU_buffer_OK(volatile void* addr);
1368 +static uint32_t debug_buf32[512];
1369 +static u_char* ver_buf = (u_char*) &debug_buf32[0];
1370 +static u_char ver_oob[16];
1372 static void debug_clear_ctrl_cache(struct mtd_info* mtd)
1374 /* clear the internal cache by writing a new address */
1375 struct brcmnand_chip* chip = mtd->priv;
1376 - loff_t offset = chip->chipSize-chip->blockSize; // Start of BBT region
1377 + loff_t offset = chip->chipSize-0x100000; // Start of BBT region
1379 chip->ctrl_writeAddr(chip, offset, 0);
1380 PLATFORM_IOFLUSH_WAR();
1381 @@ -2299,20 +1949,6 @@
1382 (void) brcmnand_cache_is_valid(mtd, FL_READING, offset);
1385 -#ifdef CONFIG_MTD_BRCMNAND_EDU
1388 -extern int EDU_buffer_OK(volatile void* addr, int command);
1392 -static uint32_t debug_buf32[512];
1393 -static u_char* ver_buf = (u_char*) &debug_buf32[0];
1394 -static u_char ver_oob[16];
1399 static void debug_EDU_read(struct mtd_info* mtd,
1400 void* edu_buffer, u_char* edu_oob, loff_t offset, uint32_t intr_status,
1401 uint32_t edu_status, u_char* edu_sw_ecc)
1402 @@ -2373,126 +2009,213 @@
1407 + * brcmnand_posted_read_cache - [BrcmNAND Interface] Read the 512B cache area
1408 + * Assuming brcmnand_get_device() has been called to obtain exclusive lock
1409 + * @param mtd MTD data structure
1410 + * @param oobarea Spare area, pass NULL if not interested
1411 + * @param buffer the databuffer to put/get data, pass NULL if only spare area is wanted.
1412 + * @param offset offset to read from or write to, must be 512B aligned.
1413 + * @param raw: Ignore BBT bytes when raw = 1
1415 + * Caller is responsible to pass a buffer that is
1416 + * (1) large enough for 512B for data and optionally an oobarea large enough for 16B.
1417 + * (2) 4-byte aligned.
1419 + * Read the cache area into buffer. The size of the cache is mtd-->eccsize and is always 512B.
1421 +//#define EDU_DEBUG_2
1425 -int edu_read_verify(struct mtd_info *mtd, char* buffer, char* oobarea, loff_t offset)
1427 - struct brcmnand_chip* chip = mtd->priv;
1428 - static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
1429 - uint32_t* p32 = (oobarea ? (uint32_t*) oobarea : (uint32_t*) &oob0[0]);
1431 +// EDU_DEBUG_4: Verify on Read
1432 +//#define EDU_DEBUG_4
1435 -PRINTK("%s: buffer=%08x, ctrlbuf=%08x, oobarea=%08x, ctrl_oob=%08x, offset=%08llx\n", __FUNCTION__,
1436 - buffer, ctrl_buf, oobarea, ctrl_oob, offset);
1437 +// EDU_DEBUG_5: Verify on Write
1438 +//#define EDU_DEBUG_5
1441 +#if defined( EDU_DEBUG_2 ) || defined( EDU_DEBUG_4 )
1442 +/* 3548 internal buffer is 4K in size */
1443 +//static uint32_t edu_lbuf[2048];
1444 +static uint32_t* edu_buf32;
1445 +static uint8_t* edu_buf; // Used by EDU in Debug2
1446 +static uint8_t* ctrl_buf; // Used by Ctrl in Debug4
1447 +static uint32_t ctrl_oob32[4];
1448 +static uint8_t* ctrl_oob = (uint8_t*) ctrl_oob32;
1450 +#define PATTERN 0xa55a0000
1452 - ctrlret = brcmnand_ctrl_posted_read_cache(mtd, ctrl_buf, ctrl_oob, offset);
1453 - //verify_edu_buf();
1454 - // Compare buffer returned from EDU and Ctrl reads:
1455 - if (0 != memcmp(ctrl_buf, buffer, 512)) {
1456 -printk("$$$$$$$$$$$$ EDU Read: offset=%08llx\n", offset);
1457 -print_databuf(buffer, 512);
1458 -printk("------------ Ctrl Read: \n");
1459 -print_databuf(ctrl_buf, 512);
1464 - if (0 != memcmp(p32, ctrl_oob, 16)) {
1465 -printk("########## Ctrl OOB:\n");
1466 -print_oobbuf(ctrl_oob, 16);
1467 -printk("------------ EDU OOB: \n");
1468 -print_oobbuf(p32, 16);
1469 -/* Which one is correct? Since the data buffers agree, use Hamming codes */
1470 - if (chip->ecclevel == BRCMNAND_ECC_HAMMING)
1472 - unsigned char ecc1[3]; // SW ECC, manually calculated
1473 - brcmnand_Hamming_WAR(mtd, offset, buffer, &ctrl_oob[6], &ecc1[0]);
1474 - printk("Hamming ECC=%02x%02x%02x\n", ecc1[0], ecc1[1], ecc1[2]);
1476 +#define EDU_BUFSIZE_B (512)
1477 +// One before and one after
1478 +#define EDU_BUF32_SIZE_B (EDU_BUFSIZE_B*3)
1480 +// Same as above in DW instead
1481 +#define EDU_BUFSIZE_DW (EDU_BUFSIZE_B/4)
1482 +#define EDU_BUF32_SIZE_DW (EDU_BUF32_SIZE_B/4)
1484 +// Real buffer starts at 1/3
1485 +#define EDU_BUF_START_DW (EDU_BUF32_SIZE_DW/3)
1488 +static void init_edu_buf(void)
1490 + /* Write pattern */
1494 + edu_buf32 = (uint32_t*) kmalloc(EDU_BUF32_SIZE_B, GFP_KERNEL);
1496 + printk("%s: Out of memory\n", __FUNCTION__);
1500 + edu_buf = ctrl_buf = (uint8_t*) &edu_buf32[EDU_BUF_START_DW];
1501 + printk("%s: Buffer allocated at %p, %d bytes\n", __FUNCTION__, edu_buf32, EDU_BUF32_SIZE_B);
1502 + printk("Real buffer starts at %p\n", ctrl_buf);
1506 + for (i=0; i<EDU_BUF32_SIZE_DW; i++) {
1507 + edu_buf32[i] = PATTERN | i;
1510 -#endif // Verify EDU on Read
1514 - * Read completion after EDU_Read is called.
1515 - * In ISR mode, this routine is run in interrupt context
1518 -brcmnand_edu_read_comp_intr(struct mtd_info* mtd,
1519 - void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status)
1520 +static int verify_edu_buf(void)
1522 - struct brcmnand_chip* chip = mtd->priv;
1523 - uint32_t intfc_status;
1525 - static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
1526 - uint32_t* p32 = (oobarea ? (uint32_t*) oobarea : (uint32_t*) &oob0[0]);
1529 - if (intr_status & HIF_INTR2_EDU_ERR) {
1530 - printk("%s: Should not call me with EDU ERR\n", __FUNCTION__);
1532 + for (i=0; i<EDU_BUF_START_DW; i++) {
1533 + if (edu_buf32[i] != (PATTERN | i)) {
1534 + printk("############ %s: pattern overwritten at offset %d, expect %08x, found %08x\n",
1535 + __FUNCTION__, i*4, PATTERN | i, edu_buf32[i]);
1539 - intfc_status = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
1540 - if (!(intfc_status & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK)) {
1541 - printk("%s: Impossible, HIF_INTR2_CTRL_READY already asserted\n", __FUNCTION__);
1545 - // Remember last good sector read. Needed for HIF_INTR2 workaround.
1546 - gLastKnownGoodEcc = offset;
1549 - PLATFORM_IOFLUSH_WAR();
1550 - for (i = 0; i < 4; i++) {
1551 - p32[i] = be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
1552 + for (i=EDU_BUF_START_DW+EDU_BUFSIZE_DW; i<EDU_BUF32_SIZE_DW; i++) {
1553 + if (edu_buf32[i] != (PATTERN | i)) {
1554 + printk("############ %s: pattern overwritten at offset %d, expect %08x, found %08x\n",
1555 + __FUNCTION__, i*4, PATTERN | i, edu_buf32[i]);
1558 -if (gdebug > 3) {printk("SUCCESS: %s: offset=%0llx, oob=\n", __FUNCTION__, offset); print_oobbuf((u_char*) &p32[0], 16);}
1563 +if (ret) printk("+++++++++++++++ %s: %d DW overwritten by EDU\n", __FUNCTION__, ret);
1568 - * Read WAR after EDU_Read is called, and EDU returns errors.
1569 - * This routine can only be called in process context
1572 -brcmnand_edu_read_completion(struct mtd_info* mtd,
1573 - void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status)
1576 +static int brcmnand_EDU_posted_read_cache(struct mtd_info* mtd,
1577 + void* buffer, u_char* oobarea, loff_t offset)
1582 struct brcmnand_chip* chip = mtd->priv;
1583 - uint32_t edu_err_status;
1584 + loff_t sliceOffset = offset & (~ (mtd->eccsize - 1));
1586 static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
1587 uint32_t* p32 = (oobarea ? (uint32_t*) oobarea : (uint32_t*) &oob0[0]);
1588 u_char* p8 = (u_char*) p32;
1592 + uint32_t intr_status;
1593 + unsigned long irqflags;
1597 +uint32_t edu_status;
1599 - if (in_interrupt()) {
1600 - printk(KERN_ERR "%s cannot be run in interrupt context\n", __FUNCTION__);
1603 +u_char* save_buf = buffer;
1606 +//if((offset >= (0x3a8148 & ~(0x1FF))) && (offset < ((0x3a8298+0x1F) & ~(0x1FF)))) gdebug=4;
1609 +printk("%s: offset=%0llx, buffer=%p, oobarea=%p\n", __FUNCTION__, offset, buffer, oobarea);}
1611 +#if 0 //def EDU_DEBUG_4
1612 +printk("%s: offset=%0llx, buffer=%p, oobarea=%p\n", __FUNCTION__, offset, buffer, oobarea);
1616 + if (unlikely(offset - sliceOffset)) {
1617 + printk(KERN_ERR "%s: offset %0llx is not cache aligned, sliceOffset=%0llx, CacheSize=%d\n",
1618 + __FUNCTION__, offset, sliceOffset, mtd->eccsize);
1622 - if (intr_status & HIF_INTR2_EDU_ERR) {
1624 +//#if 0 // Testing 1 2 3
1625 + if (unlikely(!EDU_buffer_OK(buffer)))
1628 +if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
1629 + /* EDU does not work on non-aligned buffers */
1630 + ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, oobarea, offset);
1634 + if (wr_preempt_en) {
1635 + // local_irq_save(irqflags);
1638 +#if defined( EDU_DEBUG_2 )
1643 +#elif defined( EDU_DEBUG_4 )
1651 + EDU_ldw = chip->ctrl_writeAddr(chip, sliceOffset, 0);
1652 + PLATFORM_IOFLUSH_WAR();
1654 + if (intr_status & HIF_INTR2_EBI_TIMEOUT) {
1655 + EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_CLEAR, HIF_INTR2_EBI_TIMEOUT);
1657 + intr_status = EDU_read(buffer, EDU_ldw);
1660 +if ((intr_status == ERESTARTSYS) || (intr_status & HIF_INTR2_EBI_TIMEOUT) ) {
1661 +uint32_t rd_data = ISR_volatileRead(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_STATUS);
1662 +printk("%s: EDU_read returns error %08x , intr=%08x at offset %0llx\n", __FUNCTION__, intr_status, rd_data, offset);
1665 + } while (retries-- > 0 && ((intr_status == ERESTARTSYS) || (intr_status & HIF_INTR2_EBI_TIMEOUT) ));
1667 + if (retries <= 0 && ((intr_status == ERESTARTSYS) || (intr_status & HIF_INTR2_EBI_TIMEOUT))) { // EBI Timeout
1668 + // Use controller read
1669 + printk("%s: EBI timeout, use controller read at offset %0llx\n", __FUNCTION__, offset);
1670 + ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, oobarea, offset);
1674 + else if (intr_status & HIF_INTR2_EDU_ERR) {
1675 if (wr_preempt_en) {
1676 //local_irq_restore(irqflags);
1678 - edu_err_status = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_ERR_STATUS);
1679 + edu_status = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_ERR_STATUS);
1680 +//if (edu_status == 0)
1681 +// printk("+++++++++++ %s:offset=%0llx Intr=%08x but EDU_status=%08x, LKG=%0llx\n", __FUNCTION__,
1682 +// offset, intr_status, edu_status, gLastKnownGoodEcc);
1685 /**** WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR */
1686 /* Do a dummy read on a known good ECC sector to clear error */
1687 - if (edu_err_status) {
1688 - static uint8_t myBuf2[512+31];
1690 - uint8_t* tmpBuf = (uint8_t*) ((((unsigned int) &myBuf2[0]) + 31) & (~31));
1693 + static uint32_t tmpBuf[128];
1694 // We start from the BBT, since these would (hopefully) always be good sectors.
1695 loff_t tmpOffset = chip->chipSize - 512;
1697 +//printk("Handle HIF_INTR2_UNC_ERR: Step 1: @offset %0llx\n", offset);
1698 +//print_oobreg(chip);
1700 // First make sure that there is a last known good sector
1701 while (gLastKnownGoodEcc == 0 && tmpOffset >= 0) {
1702 ret = brcmnand_ctrl_posted_read_cache(mtd, tmpBuf, NULL, tmpOffset);
1703 @@ -2502,21 +2225,22 @@
1705 // Clear the error condition
1706 //(void) brcmnand_EDU_posted_read_cache(mtd, tmpBuf, NULL, gLastKnownGoodEcc);
1707 + lkgs = chip->ctrl_writeAddr(chip, gLastKnownGoodEcc, 0);
1708 + PLATFORM_IOFLUSH_WAR();
1711 // Use Register Array
1712 // EDU_ldw = BCHP_PHYSICAL_OFFSET + BCHP_NAND_FLASH_CACHEi_ARRAY_BASE;
1713 -#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
1715 - ISR_push_request(mtd, tmpBuf, NULL, tmpOffset);
1717 - lkgs = chip->ctrl_writeAddr(chip, gLastKnownGoodEcc, 0);
1718 - PLATFORM_IOFLUSH_WAR();
1719 intr_status = EDU_read(buffer, lkgs);
1722 +//printk("intr_status returns from dummy read at offset %0llx: %08x\n", gLastKnownGoodEcc, intr_status);
1723 +//printk("Handle HIF_INTR2_UNC_ERR: Step 2:\n");
1724 +//print_oobreg(chip);
1725 ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, p8, offset);
1727 +//printk("Handle HIF_INTR2_UNC_ERR: Step 3:\n");
1728 +//print_oobreg(chip);
1731 +// printk("Unc Error WAR OOB="); print_oobbuf(p8, 16);
1735 // else there can be no workaround possible, use controller read
1736 @@ -2525,8 +2249,16 @@
1739 /**** ENDWAR ENDWAR ENDWAR ENDWAR */
1741 + // If error was not due to UNC or COR errors, or poll timeout, try the old-fashioned way
1742 + //ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, oobarea, offset);
1747 +//if (intr_status & HIF_INTR2_EDU_ERR)
1748 +// printk("%s: EDU_read returns error at offset=%0llx, intr_status=%08x\n", __FUNCTION__, offset, intr_status);
1751 * Wait for Controller ready, which indicates the OOB and buffer are ready to be read.
1753 @@ -2563,7 +2295,7 @@
1754 for (i = 0; i < 4; i++) {
1755 p32[i] = be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
1757 -if (gdebug > 3) {printk("SUCCESS: %s: offset=%0llx, oob=\n", __FUNCTION__, offset); print_oobbuf((u_char*) &p32[0], 16);}
1758 +if (gdebug > 3) {printk("SUCCESS: %s: offset=%0llx, oob=\n", __FUNCTION__, sliceOffset); print_oobbuf((u_char*) &p32[0], 16);}
1760 ret = 0; // Success!
1762 @@ -2571,7 +2303,9 @@
1763 case BRCMEDU_CORRECTABLE_ECC_ERROR:
1765 case BRCMNAND_CORRECTABLE_ECC_ERROR:
1767 +{save_debug = gdebug;
1770 printk("+++++++++++++++ CORRECTABLE_ECC: offset=%0llx ++++++++++++++++++++\n", offset);
1771 // Have to manually copy. EDU drops the buffer on error - even correctable errors
1773 @@ -2584,7 +2318,7 @@
1774 for (i = 0; i < 4; i++) {
1775 p32[i] = be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
1777 -if (gdebug > 3) {printk("CORRECTABLE: %s: offset=%0llx, oob=\n", __FUNCTION__, offset); print_oobbuf(oobarea, 16);}
1778 +if (gdebug > 3) {printk("CORRECTABLE: %s: offset=%0llx, oob=\n", __FUNCTION__, sliceOffset); print_oobbuf(oobarea, 16);}
1781 #ifndef DEBUG_HW_ECC // Comment out for debugging
1782 @@ -2604,7 +2338,7 @@
1787 +gdebug = edu_debug = save_debug;}
1790 case BRCMEDU_UNCORRECTABLE_ECC_ERROR:
1791 @@ -2612,13 +2346,16 @@
1796 +save_debug = gdebug;
1800 PRINTK("************* UNCORRECTABLE_ECC (offset=%0llx) ********************\n", offset);
1802 * THT: Since EDU does not handle OOB area, unlike the UNC ERR case of the ctrl read,
1803 * we have to explicitly read the OOB, before calling the WAR routine.
1805 - chip->ctrl_writeAddr(chip, offset, 0);
1806 + chip->ctrl_writeAddr(chip, sliceOffset, 0);
1807 chip->ctrl_write(BCHP_NAND_CMD_START, OP_SPARE_AREA_READ);
1809 // Wait until spare area is filled up
1810 @@ -2635,6 +2372,8 @@
1811 printk("************* UNCORRECTABLE_ECC (offset=%0llx) valid!=0 ********************\n", offset);
1815 +{gdebug = edu_debug = save_debug;}
1819 @@ -2661,126 +2400,73 @@
1829 - #ifndef CONFIG_MTD_BRCMNAND_ISR_QUEUE
1831 - * brcmnand_posted_read_cache - [BrcmNAND Interface] Read the 512B cache area
1832 - * Assuming brcmnand_get_device() has been called to obtain exclusive lock
1833 - * @param mtd MTD data structure
1834 - * @param oobarea Spare area, pass NULL if not interested
1835 - * @param buffer the databuffer to put/get data, pass NULL if only spare area is wanted.
1836 - * @param offset offset to read from or write to, must be 512B aligned.
1837 - * @param raw: Ignore BBT bytes when raw = 1
1839 - * Caller is responsible to pass a buffer that is
1840 - * (1) large enough for 512B for data and optionally an oobarea large enough for 16B.
1841 - * (2) 4-byte aligned.
1843 - * Read the cache area into buffer. The size of the cache is mtd-->eccsize and is always 512B.
1847 -static int brcmnand_EDU_posted_read_cache(struct mtd_info* mtd,
1848 - void* buffer, u_char* oobarea, loff_t offset)
1852 + u_char edu_sw_ecc[4];
1855 + debug_EDU_read(mtd, buffer, oobarea, offset, intr_status, edu_status, edu_sw_ecc);
1857 - struct brcmnand_chip* chip = mtd->priv;
1858 - loff_t sliceOffset = offset & (~ (mtd->eccsize - 1));
1860 - //static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
1861 - //uint32_t* p32 = (oobarea ? (uint32_t*) oobarea : (uint32_t*) &oob0[0]);
1862 - //u_char* p8 = (u_char*) p32;
1864 - uint32_t intr_status;
1865 - unsigned long irqflags;
1869 -uint32_t edu_status;
1872 -u_char* save_buf = buffer;
1873 + printk("!!!!!!!!! RD: offset=%0llx ECC=%02x%02x%02x, OOB:",
1874 +offset, edu_sw_ecc[0], edu_sw_ecc[1], edu_sw_ecc[2]);
1875 + print_oobbuf(oobarea, 16);
1879 -//if((offset >= (0x3a8148 & ~(0x1FF))) && (offset < ((0x3a8298+0x1F) & ~(0x1FF)))) gdebug=4;
1882 -printk("%s: offset=%0llx, buffer=%p, oobarea=%p\n", __FUNCTION__, offset, buffer, oobarea);}
1884 -#if 0 //def EDU_DEBUG_4
1885 -printk("%s: offset=%0llx, buffer=%p, oobarea=%p\n", __FUNCTION__, offset, buffer, oobarea);
1887 +if (offset <= 0x3a3600 && (offset+512) > 0x3a3600) {
1888 +printk("@@@@@@@@@ Dump EDU Read around 0x3a3600:\n");
1889 +print_databuf(buffer, 512);print_oobbuf(p32, 16);
1897 - if (unlikely(offset - sliceOffset)) {
1898 - printk(KERN_ERR "%s: offset %0llx is not cache aligned, sliceOffset=%0llx, CacheSize=%d\n",
1899 - __FUNCTION__, offset, sliceOffset, mtd->eccsize);
1902 + ctrlret = brcmnand_ctrl_posted_read_cache(mtd, ctrl_buf, ctrl_oob, offset);
1903 + //verify_edu_buf();
1904 + // Compare buffer returned from EDU and Ctrl reads:
1905 + if (0 != memcmp(ctrl_buf, buffer, 512)) {
1906 +printk("$$$$$$$$$$$$ Read buffer from Ctrl & EDU read-ops differ at offset %0llx, intr_status=%08x, ecc=%d\n",
1907 + offset, intr_status, ecc);
1908 +printk("$$$$$$$$$$$$ EDU Read:\n");
1909 +print_databuf(buffer, 512);
1910 +printk("------------ Ctrl Read: \n");
1911 +print_databuf(edu_buf, 512);
1915 -//#if 0 // Testing 1 2 3
1916 - if (unlikely(!EDU_buffer_OK(buffer, EDU_READ)))
1920 -if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
1921 - /* EDU does not work on non-aligned buffers */
1922 - ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, oobarea, offset);
1924 + if (0 != memcmp(p32, ctrl_oob, 16)) {
1925 +printk("########## Read OOB from Ctrl & EDU read-ops differ at offset %0llx, intr_status=%08x, ecc=%d\n",
1926 + offset, intr_status, ecc);
1927 +printk("########## Ctrl OOB:\n");
1928 +print_oobbuf(ctrl_oob, 16);
1929 +printk("------------ EDU OOB: \n");
1930 +print_oobbuf(p32, 16);
1931 +/* Which one is correct? Since the data buffers agree, use Hamming codes */
1932 + if (chip->ecclevel == BRCMNAND_ECC_HAMMING)
1934 + unsigned char ecc1[3]; // SW ECC, manually calculated
1935 + brcmnand_Hamming_WAR(mtd, offset, buffer, &ctrl_oob[6], &ecc1[0]);
1936 + printk("Hamming ECC=%02x%02x%02x\n", ecc1[0], ecc1[1], ecc1[2]);
1942 +#endif // Verify EDU on Read
1944 - if (wr_preempt_en) {
1945 - // local_irq_save(irqflags);
1948 -#if defined( EDU_DEBUG_2 )
1953 -#elif defined( EDU_DEBUG_4 )
1961 - EDU_ldw = chip->ctrl_writeAddr(chip, sliceOffset, 0);
1962 - PLATFORM_IOFLUSH_WAR();
1964 - if (intr_status & HIF_INTR2_EBI_TIMEOUT) {
1965 - EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_CLEAR, HIF_INTR2_EBI_TIMEOUT);
1967 - intr_status = EDU_read(buffer, EDU_ldw);
1969 - } while (retries-- > 0 && ((intr_status == ERESTARTSYS) || (intr_status & HIF_INTR2_EBI_TIMEOUT) ));
1972 - ret = brcmnand_edu_read_completion(mtd, buffer, oobarea, offset, intr_status);
1981 static int (*brcmnand_posted_read_cache)(struct mtd_info*,
1982 void*, u_char*, loff_t) = brcmnand_EDU_posted_read_cache;
1984 - #else /* Queue Mode */
1985 -static int (*brcmnand_posted_read_cache)(struct mtd_info*,
1986 - void*, u_char*, loff_t) = brcmnand_ctrl_posted_read_cache;
1991 static int (*brcmnand_posted_read_cache)(struct mtd_info*,
1992 void*, u_char*, loff_t) = brcmnand_ctrl_posted_read_cache;
1994 @@ -2805,33 +2491,16 @@
1995 loff_t sliceOffset = offset & (~(mtd->eccsize - 1));
1996 int i, ret = 0, valid, done = 0;
1998 - //unsigned long irqflags;
1999 + unsigned long irqflags;
2003 -#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_0
2004 - static uint8_t myBuf2[512+31]; // Place holder only.
2005 - static uint8_t* myBuf = NULL;
2008 - * Force alignment on 32B boundary
2011 - myBuf = (uint8_t*) ((((unsigned int) &myBuf2[0]) + 31) & (~31));
2014 - #if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_3_0
2016 - // PR2516. Not a very good WAR, but the affected chips (3548A0,7443A0) have been EOL'ed
2017 - return brcmnand_ctrl_posted_read_cache(mtd, (void*) myBuf, oobarea, offset);
2020 - #else /* 3.1 or later */
2021 - // If BCH codes, force full page read to activate ECC correction on OOB bytes.
2022 - if (chip->ecclevel != BRCMNAND_ECC_HAMMING && chip->ecclevel != BRCMNAND_ECC_DISABLE) {
2023 - return brcmnand_ctrl_posted_read_cache(mtd, (void*) myBuf, oobarea, offset);
2026 +#if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_3_0
2028 + // PR2516. Not a very good WAR, but the affected chips (3548A0,7443A0) have been EOL'ed
2029 + static uint32_t myBuf[128]; // Place holder only.
2030 + return brcmnand_ctrl_posted_read_cache(mtd, (void*) myBuf, oobarea, offset);
2034 if (gdebug > 3 ) PRINTK("->%s: offset=%0llx\n", __FUNCTION__, offset);
2035 @@ -2921,151 +2590,6 @@
2040 -//#ifdef CONFIG_MTD_BRCMNAND_EDU
2042 -//#define EDU_DEBUG_3
2045 -#if 0 //defined( EDU_DEBUG_3 ) || defined( EDU_DEBUG_5 ) || defined(BRCMNAND_WRITE_VERIFY )
2049 - * Returns 0 on no errors.
2050 - * THis should never be called, because partial writes may screw up the verify-read.
2052 -static int edu_write_verify(struct mtd_info *mtd,
2053 - const void* buffer, const u_char* oobarea, loff_t offset)
2055 - struct brcmnand_chip* chip = mtd->priv;
2056 - static uint8_t sw_ecc[4];
2057 - static uint32_t read_oob[4];
2058 - static uint8_t write_oob[16];
2059 - uint8_t* oobpoi = (uint8_t*) &read_oob[0];
2062 - // Dump the register, done immediately after EDU_Write returns
2063 - // dump_nand_regs(chip, offset);
2065 - if ( chip->ecclevel != BRCMNAND_ECC_HAMMING) {
2066 - // Read back the data, but first clear the internal cache first.
2067 - debug_clear_ctrl_cache(mtd);
2069 - ret = brcmnand_ctrl_posted_read_cache(mtd, edu_write_buf, oobpoi, offset);
2071 - printk("+++++++++++++++++++++++ %s: Read Verify returns %d\n", __FUNCTION__, ret);
2074 - if (0 != memcmp(buffer, edu_write_buf, 512)) {
2075 - printk("+++++++++++++++++++++++ %s: WRITE buffer differ with READ-Back buffer\n",
2080 - if (oobarea) { /* For BCH, the ECC is at the end */
2081 - // Number of bytes to compare (with ECC bytes taken out)
2082 - int numFree = min(16, chip->eccOobSize - chip->eccbytes);
2084 - if (memcmp(oobarea, oobpoi, numFree)) {
2085 - printk("+++++++++++++++++++++++ %s: BCH-%-d OOB comp failed, numFree=%d\n",
2086 - __FUNCTION__, chip->ecclevel, numFree);
2087 - printk("In OOB:\n"); print_oobbuf(oobarea, 16);
2088 - printk("\nVerify OOB:\n"); print_oobbuf(oobpoi, 16);
2096 - // Calculate the ECC
2097 - // brcmnand_Hamming_ecc(buffer, sw_ecc);
2099 - // Read back the data, but first clear the internal cache first.
2100 - debug_clear_ctrl_cache(mtd);
2103 - ret = brcmnand_ctrl_posted_read_cache(mtd, edu_write_buf, oobpoi, offset);
2107 - printk("+++++++++++++++++++++++ %s: Read Verify returns %d\n", __FUNCTION__, ret);
2112 - if (sw_ecc[0] != oobpoi[6] || sw_ecc[1] != oobpoi[7] || sw_ecc[2] != oobpoi[8]) {
2113 -printk("+++++++++++++++++++++++ %s: SWECC=%02x%02x%02x ReadOOB=%02x%02x%02x, buffer=%p, offset=%0llx\n",
2115 - sw_ecc[0], sw_ecc[1], sw_ecc[2], oobpoi[6], oobpoi[7], oobpoi[8], buffer, offset);
2122 - // Verify the OOB if not NULL
2124 - //memcpy(write_oob, oobarea, 16);
2125 - //write_oob[6] = sw_ecc[0];
2126 - //write_oob[7] = sw_ecc[1];
2127 - //write_oob[8] = sw_ecc[2];
2128 - if (memcmp(oobarea, oobpoi, 6) || memcmp(&oobarea[9], &oobpoi[9],7)) {
2129 - printk("+++++++++++++++++++++++ %s: OOB comp Hamming failed\n", __FUNCTION__);
2130 - printk("In OOB:\n"); print_oobbuf(oobarea, 16);
2131 - printk("\nVerify OOB:\n"); print_oobbuf(oobpoi, 16);
2140 - uint8_t* writeBuf = (uint8_t*) buffer;
2141 -//for (i=0; i<2; i++)
2143 -// Let user land completes its run to avoid garbled printout
2145 -for (j=0; j<512; j++) {
2146 - if (writeBuf[j] != edu_write_buf[j]) {
2147 - printk("Buffers differ at offset %04x\n", j);
2151 -printk("$$$$$$$$$$$$$$$$$ Register dump:\n");
2156 -for (k=0; k<numDumps; k++) {
2159 -printk("$$$$$$$$$$$$$$$$$ Register dump snapshot #%d:\n", k+1);
2160 -print_dump_nand_regs(k);
2165 -printk("EDU_write 99, ret=%d, offset=%0llx, buffer=%p\n", ret, offset, buffer);
2166 -printk("Write buffer:\n"); print_databuf(buffer, 512);
2167 -if (oobarea) { printk("Write OOB: "); print_oobbuf(oobarea, 512); }
2168 -printk("Read back buffer:\n"); print_databuf(edu_write_buf, 512);
2169 -if (oobarea) { printk("Read OOB: "); print_oobbuf(write_oob, 512); }
2171 -//printk("$$$$$$$$$$$$$$$$$ Register dump:\n");
2172 -//print_dump_nand_regs();
2180 -#define edu_write_verify(...) (0)
2185 * brcmnand_posted_write - [BrcmNAND Interface] Write a buffer to the flash cache
2186 * Assuming brcmnand_get_device() has been called to obtain exclusive lock
2187 @@ -3160,136 +2684,88 @@
2192 #ifdef CONFIG_MTD_BRCMNAND_EDU
2193 - #ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
2196 - * Performs WAR for queue-write. Currently, it is always called with needBBT=1
2197 - * Runs in process context.
2198 - * Return 0 on success, error codes on errors.
2201 -brcmnand_edu_write_war(struct mtd_info *mtd,
2202 - const void* buffer, const u_char* oobarea, loff_t offset, uint32_t intr_status,
2205 - struct brcmnand_chip* chip = mtd->priv;
2207 +//#define EDU_DEBUG_3
2212 - if (!(intr_status & HIF_INTR2_CTRL_READY)) {
2213 - printk("%s: Impossible, ctrl-ready asserted in interrupt handler\n", __FUNCTION__);
2216 +static uint8_t edu_write_buf[512];
2224 -#if 1 //defined (ECC_CORRECTABLE_SIMULATION) || defined(ECC_UNCORRECTABLE_SIMULATION) || defined(WR_BADBLOCK_SIMULATION)
2225 - printk("%s: Marking bad block @%0llx\n", __FUNCTION__, offset);
2227 - ret = chip->block_markbad(mtd, offset);
2231 -#if defined(EDU_DEBUG_5) // || defined( CONFIG_MTD_BRCMNAND_VERIFY_WRITE )
2234 - if (edu_write_verify(mtd, buffer, oobarea, offset)) {
2243 -// When buffer is nor aligned as per EDU requirement, use controller-write
2244 -static int (*brcmnand_posted_write_cache)(struct mtd_info*,
2245 - const void*, const u_char*, loff_t) = brcmnand_ctrl_posted_write_cache;
2247 - #else //#ifndef CONFIG_MTD_BRCMNAND_ISR_QUEUE
2250 - * Write completion after EDU_Read is called.
2254 -brcmnand_edu_write_completion(struct mtd_info *mtd,
2255 - const void* buffer, const u_char* oobarea, loff_t offset, uint32_t intr_status, uint32_t physAddr)
2256 +static int edu_write_verify(struct mtd_info *mtd,
2257 + const void* buffer, const u_char* oobarea, loff_t offset)
2259 struct brcmnand_chip* chip = mtd->priv;
2263 + static uint8_t sw_ecc[4];
2264 + static uint32_t read_oob[4];
2265 + static uint8_t write_oob[16];
2266 + uint8_t* oobpoi = (uint8_t*) &read_oob[0];
2270 -#ifdef CONFIG_MTD_BRCMNAND_USE_ISR
2271 - if (!(intr_status & HIF_INTR2_CTRL_READY)) {
2272 - printk("%s: Impossible, ctrl-ready asserted in interrupt handler\n", __FUNCTION__);
2276 - // Wait until flash is ready.
2277 - // Becareful here. Since this can be called in interrupt context,
2278 - // we cannot call sleep or schedule()
2279 - comp = brcmnand_EDU_write_is_complete(mtd, &needBBT);
2281 - // Already done in interrupt handler
2282 - (void) dma_unmap_single(NULL, physAddr, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
2291 + if (chip->ecclevel != BRCMNAND_ECC_HAMMING) {
2292 + ret = brcmnand_ctrl_posted_read_cache(mtd, edu_write_buf, oobpoi, offset);
2294 + printk("+++++++++++++++++++++++ %s: Read Verify returns %d\n", __FUNCTION__, ret);
2299 -#if 1 //defined (ECC_CORRECTABLE_SIMULATION) || defined(ECC_UNCORRECTABLE_SIMULATION) || defined(WR_BADBLOCK_SIMULATION)
2300 - printk("%s: Marking bad block @%0llx\n", __FUNCTION__, offset);
2302 - ret = chip->block_markbad(mtd, offset);
2306 + if (0 != memcmp(buffer, edu_write_buf, 512)) {
2307 + printk("+++++++++++++++++++++++ %s: WRITE buffer differ with READ-Back buffer\n",
2312 + if (memcmp(oobarea, oobpoi, 16)) {
2313 + printk("+++++++++++++++++++++++ %s: OOB comp failed\n", __FUNCTION__);
2314 + printk("In OOB:\n"); print_oobbuf(oobarea, 16);
2315 + printk("\nVerify OOB:\n"); print_oobbuf(oobpoi, 16);
2321 + // Calculate the ECC
2322 + brcmnand_Hamming_ecc(buffer, sw_ecc);
2324 - //Write has timed out or read found bad block. TBD: Find out which is which
2325 - printk(KERN_INFO "%s: Timeout at offset %0llx\n", __FUNCTION__, offset);
2326 - // Marking bad block
2328 - printk("%s: Marking bad block @%0llx\n", __FUNCTION__, offset);
2330 - ret = chip->block_markbad(mtd, offset);
2336 + // Read back the data, but first clear the internal cache first.
2337 + debug_clear_ctrl_cache(mtd);
2341 + ret = brcmnand_ctrl_posted_read_cache(mtd, edu_write_buf, oobpoi, offset);
2344 -#if defined(EDU_DEBUG_5) // || defined( CONFIG_MTD_BRCMNAND_VERIFY_WRITE )
2347 - if (edu_write_verify(mtd, buffer, oobarea, offset)) {
2352 + printk("+++++++++++++++++++++++ %s: Read Verify returns %d\n", __FUNCTION__, ret);
2357 + if (sw_ecc[0] != oobpoi[6] || sw_ecc[1] != oobpoi[7] || sw_ecc[2] != oobpoi[8]) {
2358 + printk("+++++++++++++++++++++++ %s: SWECC=%02x%02x%02x ReadOOB=%02x%02x%02x\n",
2360 + sw_ecc[0], sw_ecc[1], sw_ecc[2], oobpoi[6], oobpoi[7], oobpoi[8]);
2364 + // Verify the OOB if not NULL
2366 + memcpy(write_oob, oobarea, 16);
2367 + write_oob[6] = sw_ecc[0];
2368 + write_oob[7] = sw_ecc[1];
2369 + write_oob[8] = sw_ecc[2];
2370 + if (memcmp(write_oob, oobpoi, 16)) {
2371 + printk("+++++++++++++++++++++++ %s: OOB comp failed\n", __FUNCTION__);
2372 + printk("In OOB:\n"); print_oobbuf(write_oob, 16);
2373 + printk("\nVerify OOB:\n"); print_oobbuf(oobpoi, 16);
2381 +#define edu_write_verify(...) (0)
2385 * brcmnand_posted_write - [BrcmNAND Interface] Write a buffer to the flash cache
2386 * Assuming brcmnand_get_device() has been called to obtain exclusive lock
2387 @@ -3307,14 +2783,12 @@
2393 struct brcmnand_chip* chip = mtd->priv;
2395 loff_t sliceOffset = offset & (~ (mtd->eccsize - 1));
2398 - uint32_t physAddr;
2400 #ifdef WR_BADBLOCK_SIMULATION
2401 unsigned long tmp = (unsigned long) offset;
2402 @@ -3333,7 +2807,7 @@
2406 - if (unlikely(!EDU_buffer_OK(buffer, EDU_WRITE))) {
2407 + if (unlikely(!EDU_buffer_OK(buffer))) {
2408 // EDU requires the buffer to be DW-aligned
2409 PRINTK("%s: Buffer %p not suitable for EDU at %0llx, trying ctrl read op\n", __FUNCTION__, buffer, offset);
2410 ret = brcmnand_ctrl_posted_write_cache(mtd, buffer, oobarea, offset);
2411 @@ -3362,26 +2836,23 @@
2413 PLATFORM_IOFLUSH_WAR(); // Check if this line may be taken-out
2415 + //chip->ctrl_write(BCHP_NAND_CMD_START, OP_PROGRAM_PAGE);
2417 if (ret & HIF_INTR2_EBI_TIMEOUT) {
2418 EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_CLEAR, HIF_INTR2_EBI_TIMEOUT);
2420 - ret = EDU_write(buffer, EDU_ldw, &physAddr);
2422 + ret = EDU_write(buffer, EDU_ldw);
2424 // Nothing we can do, because, unlike read op, where we can just call the traditional read,
2425 // here we may need to erase the flash first before we can write again.
2426 -//printk("EDU_write returns %d, trying ctrl write \n", ret);
2427 -// ret = brcmnand_ctrl_posted_write_cache(mtd, buffer, oobarea, offset);
2428 + ret = brcmnand_ctrl_posted_write_cache(mtd, buffer, oobarea, offset);
2432 -//printk("EDU50\n");
2433 +// printk("EDU50\n");
2435 // Wait until flash is ready
2436 - comp = brcmnand_EDU_write_is_complete(mtd, &needBBT);
2438 - (void) dma_unmap_single(NULL, physAddr, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
2439 + ret = brcmnand_EDU_write_is_complete(mtd, &needBBT);
2440 }while (retries-- > 0 && ((ret == ERESTARTSYS) || (ret & HIF_INTR2_EBI_TIMEOUT)));
2442 if (retries <= 0 && ((ret == ERESTARTSYS) || (ret & HIF_INTR2_EBI_TIMEOUT))) {
2443 @@ -3390,9 +2861,18 @@
2447 +#ifdef WR_BADBLOCK_SIMULATION
2448 + if((tmp == wrBadBlockFailLocation) && (bScanBypass_badBlock == 0))
2450 + wrFailLocationOffset.s.high = 0;
2451 + wrFailLocationOffset.s.low = wrBadBlockFailLocation;
2452 + printk("Creating new bad block @ %0llx\n", EDU_sprintf(brcmNandMsg, wrFailLocationOffset.ll, this->xor_invert_val));
2464 @@ -3425,10 +2905,10 @@
2468 +// printk("EDU99\n");
2472 #if defined(EDU_DEBUG_5) // || defined( CONFIG_MTD_BRCMNAND_VERIFY_WRITE )
2475 if (edu_write_verify(mtd, buffer, oobarea, offset)) {
2477 @@ -3440,11 +2920,17 @@
2483 static int (*brcmnand_posted_write_cache)(struct mtd_info*,
2484 const void*, const u_char*, loff_t) = brcmnand_EDU_posted_write_cache;
2487 +/* Testing 1 2 3, use controller write */
2488 +static int (*brcmnand_posted_write_cache)(struct mtd_info*,
2489 + const void*, const u_char*, loff_t) = brcmnand_ctrl_posted_write_cache;
2494 static int (*brcmnand_posted_write_cache)(struct mtd_info*,
2495 const void*, const u_char*, loff_t) = brcmnand_ctrl_posted_write_cache;
2497 @@ -3564,7 +3050,7 @@
2498 set_current_state(TASK_UNINTERRUPTIBLE);
2499 add_wait_queue(&chip->wq, &wait);
2500 spin_unlock(&chip->chip_lock);
2501 - if (!wr_preempt_en && !in_interrupt())
2502 + if (!wr_preempt_en)
2504 remove_wait_queue(&chip->wq, &wait);
2506 @@ -3616,7 +3102,6 @@
2512 * brcmnand_read_page - {REPLACEABLE] hardware ecc based page read function
2513 * @mtd: mtd info structure
2514 @@ -3722,7 +3207,7 @@
2515 #ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
2516 static int brcmnand_refresh_blk(struct mtd_info *mtd, loff_t from)
2518 - struct brcmnand_chip *chip = mtd->priv;
2519 + struct brcmnand_chip *this = mtd->priv;
2520 int i, j, k, numpages, ret, count = 0, nonecccount = 0;
2521 uint8_t *blk_buf; /* Store one block of data (including OOB) */
2522 unsigned int pg_idx, oob_idx;
2523 @@ -3737,9 +3222,9 @@
2526 #if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
2527 - chip->ctrl_write(BCHP_NAND_ECC_CORR_EXT_ADDR, 0);
2528 + this->ctrl_write(BCHP_NAND_ECC_CORR_EXT_ADDR, 0);
2530 - chip->ctrl_write(BCHP_NAND_ECC_CORR_ADDR, 0);
2531 + this->ctrl_write(BCHP_NAND_ECC_CORR_ADDR, 0);
2533 DEBUG(MTD_DEBUG_LEVEL3, "Inside %s: from=%0llx\n", __FUNCTION__, from);
2534 printk(KERN_INFO "%s: Performing block refresh for correctable ECC error at %0llx\n",
2535 @@ -3747,9 +3232,9 @@
2537 oob_idx = mtd->writesize;
2538 numpages = mtd->erasesize/mtd->writesize;
2539 - block_size = (1 << chip->erase_shift);
2540 + block_size = (1 << this->erase_shift);
2541 blkbegin = (from & (~(mtd->erasesize-1)));
2542 - realpage = blkbegin >> chip->page_shift;
2543 + realpage = blkbegin >> this->page_shift;
2545 #ifdef CONFIG_MTD_BRCMNAND_EDU
2547 @@ -3777,7 +3262,7 @@
2548 /* Read an entire block */
2549 brcmnand_get_device(mtd, FL_READING);
2550 for (i = 0; i < numpages; i++) {
2551 - ret = chip->read_page(mtd, blk_buf+pg_idx, blk_buf+oob_idx, realpage);
2552 + ret = brcmnand_read_page(mtd, blk_buf+pg_idx, blk_buf+oob_idx, realpage);
2554 #ifndef CONFIG_MTD_BRCMNAND_EDU
2555 BRCMNAND_free(blk_buf);
2556 @@ -3795,7 +3280,7 @@
2557 if (unlikely(gdebug > 0)) {
2558 printk("---> %s: Read -> erase\n", __FUNCTION__);
2560 - chip->state = FL_ERASING;
2561 + this->state = FL_ERASING;
2563 /* Erase the block */
2564 instr = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
2565 @@ -3813,7 +3298,7 @@
2566 instr->addr = blkbegin;
2567 instr->len = mtd->erasesize;
2568 if (unlikely(gdebug > 0)) {
2569 - printk("DEBUG -> erasing %0llx, %x %d\n",instr->addr, instr->len, chip->state);
2570 + printk("DEBUG -> erasing %0llx, %x %d\n",instr->addr, instr->len, this->state);
2572 ret = brcmnand_erase_nolock(mtd, instr, 0);
2574 @@ -3831,12 +3316,12 @@
2575 /* Write the entire block */
2577 oob_idx = mtd->writesize;
2578 - realpage = blkbegin >> chip->page_shift;
2579 + realpage = blkbegin >> this->page_shift;
2580 if (unlikely(gdebug > 0)) {
2581 - printk("---> %s: Erase -> write ... %d\n", __FUNCTION__, chip->state);
2582 + printk("---> %s: Erase -> write ... %d\n", __FUNCTION__, this->state);
2584 - oobinfo = chip->ecclayout;
2585 - chip->state = FL_WRITING;
2586 + oobinfo = this->ecclayout;
2587 + this->state = FL_WRITING;
2588 for (i = 0; i < numpages; i++) {
2589 /* Avoid writing empty pages */
2591 @@ -3858,7 +3343,7 @@
2593 /* Skip this page, but write the OOB */
2594 if (count == j && nonecccount != k) {
2595 - ret = chip->write_page_oob(mtd, blk_buf + oob_idx, realpage);
2596 + ret = this->write_page_oob(mtd, blk_buf + oob_idx, realpage);
2598 #ifndef CONFIG_MTD_BRCMNAND_EDU
2599 BRCMNAND_free(blk_buf);
2600 @@ -3875,7 +3360,7 @@
2601 for (j = 0; j < oobinfo->eccbytes; j++) {
2602 oobptr[oobinfo->eccpos[j]] = 0xff;
2604 - ret = chip->write_page(mtd, blk_buf+pg_idx, blk_buf+oob_idx, realpage);
2605 + ret = this->write_page(mtd, blk_buf+pg_idx, blk_buf+oob_idx, realpage);
2607 #ifndef CONFIG_MTD_BRCMNAND_EDU
2608 BRCMNAND_free(blk_buf);
2609 @@ -3900,463 +3385,7 @@
2613 -#ifdef CONFIG_MTD_BRCMNAND_USE_ISR
2615 - * EDU ISR Implementation
2620 - * Submit the read op, then return immediately, without waiting for completion.
2621 - * Assuming queue lock held (with interrupt disable).
2624 -EDU_submit_read(eduIsrNode_t* req)
2626 - struct brcmnand_chip* chip = (struct brcmnand_chip*) req->mtd->priv;
2627 - uint32_t edu_status;
2629 - // THT: TBD: Need to adjust for cache line size here, especially on 7420.
2630 - req->physAddr = dma_map_single(NULL, req->buffer, EDU_LENGTH_VALUE, DMA_FROM_DEVICE);
2632 -if (edu_debug) PRINTK("%s: vBuff: %p physDev: %08x, PA=%08x\n", __FUNCTION__,
2633 -req->buffer, external_physical_device_address, phys_mem);
2635 - spin_lock(&req->lock);
2637 - req->edu_ldw = chip->ctrl_writeAddr(chip, req->offset, 0);
2638 - PLATFORM_IOFLUSH_WAR();
2640 - //req->cmd = EDU_READ;
2641 - req->opComplete = ISR_OP_SUBMITTED;
2644 - // We must also wait for Ctlr_Ready, otherwise the OOB is not correct, since we read the OOB bytes off the controller
2646 - req->mask = HIF_INTR2_EDU_CLEAR_MASK|HIF_INTR2_CTRL_READY;
2647 - req->expect = HIF_INTR2_EDU_DONE;
2648 - // On error we also want Ctrlr-Ready because for COR ERR, the Hamming WAR depends on the OOB bytes.
2649 - req->error = HIF_INTR2_EDU_ERR;
2650 - req->intr = HIF_INTR2_EDU_DONE_MASK;
2651 - req->expired = jiffies + 3*HZ;
2653 - edu_status = EDU_volatileRead(EDU_BASE_ADDRESS+EDU_STATUS);
2654 - // Enable HIF_INTR2 only when we submit the first job in double buffering scheme
2655 - if (0 == (edu_status & BCHP_EDU_STATUS_Active_MASK)) {
2656 - ISR_enable_irq(req);
2659 - //EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_DONE, 0x00000000);
2662 - EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_ERR_STATUS, 0x00000000);
2664 - EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_LENGTH, EDU_LENGTH_VALUE);
2666 - EDU_waitForNoPendingAndActiveBit();
2668 - EDU_issue_command(req->physAddr , req->edu_ldw, EDU_READ);
2670 - spin_unlock(&req->lock);
2675 -int EDU_submit_write(eduIsrNode_t* req)
2677 - struct brcmnand_chip* chip = (struct brcmnand_chip*) req->mtd->priv;
2678 - uint32_t edu_status;
2682 - spin_lock(&req->lock);
2683 - // EDU is not a PCI device
2684 - // THT: TBD: Need to adjust for cache line size here, especially on 7420.
2685 - req->physAddr = dma_map_single(NULL, req->buffer, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
2687 - if (!(req->physAddr)) {
2688 - spin_unlock(&req->lock);
2693 - req->edu_ldw = chip->ctrl_writeAddr(chip, req->offset, 0);
2696 - if (req->oobarea) {
2697 - p32 = (uint32_t*) req->oobarea;
2698 -if (gdebug) {printk("%s: oob=\n", __FUNCTION__); print_oobbuf(req->oobarea, 16);}
2701 - // Fill with 0xFF if don't want to change OOB
2702 - p32 = (uint32_t*) &ffchars[0];
2705 -// printk("EDU40\n");
2706 - for (i = 0; i < 4; i++) {
2707 - chip->ctrl_write(BCHP_NAND_SPARE_AREA_WRITE_OFS_0 + i*4, cpu_to_be32(p32[i]));
2710 - PLATFORM_IOFLUSH_WAR(); // Check if this line may be taken-out
2713 - * Enable L2 Interrupt
2715 - //req->cmd = EDU_WRITE;
2716 - req->opComplete = ISR_OP_SUBMITTED;
2719 - /* On write we wait for both DMA done|error and Flash Status */
2720 - req->mask = HIF_INTR2_EDU_CLEAR_MASK|HIF_INTR2_CTRL_READY;
2721 - req->expect = HIF_INTR2_EDU_DONE;
2722 - req->error = HIF_INTR2_EDU_ERR;
2723 - req->intr = HIF_INTR2_EDU_DONE_MASK|HIF_INTR2_CTRL_READY;
2726 - ISR_enable_irq(req);
2728 - //EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_DONE, 0x00000000);
2730 - EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_ERR_STATUS, 0x00000000);
2732 - EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_LENGTH, EDU_LENGTH_VALUE);
2734 - EDU_issue_command(req->physAddr, req->edu_ldw, EDU_WRITE); /* 1: Is a Read, 0 Is a Write */
2735 - spin_unlock(&req->lock);
2741 - * Submit the first entry that is in queued state,
2742 - * assuming queue lock has been held by caller.
2744 - * @doubleBuffering indicates whether we need to submit just 1 job or until EDU is full (double buffering)
2745 - * Return the number of job submitted (either 1 or zero), as we don't support doublebuffering yet.
2747 - * In current version (v3.3 controller), since EDU only have 1 register for EDU_ERR_STATUS,
2748 - * we can't really do double-buffering without losing the returned status of the previous read-op.
2751 -brcmnand_isr_submit_job(void)
2753 - uint32_t edu_pending;
2754 - eduIsrNode_t* req;
2755 - //struct list_head* node;
2758 -//printk("-->%s\n", __FUNCTION__);
2759 -//ISR_print_queue();
2761 - list_for_each_entry(req, &gJobQ.jobQ, list) {
2762 - //req = container_of(node, eduIsrNode_t, list);
2763 - switch (req->opComplete) {
2764 - case ISR_OP_QUEUED:
2765 - edu_pending = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_STATUS);
2766 - if (!(BCHP_EDU_STATUS_Pending_MASK & edu_pending)) {
2767 - if (gJobQ.cmd == EDU_READ) {
2768 - EDU_submit_read(req);
2770 - else if (gJobQ.cmd == EDU_WRITE) {
2771 - EDU_submit_write(req);
2774 - printk("%s: Invalid op\n", __FUNCTION__);
2778 -#ifdef EDU_DOUBLE_BUFFER_READ
2779 - if (/*doubleBuffering &&*/ numReq < 2) {
2784 -PRINTK("<-- %s: numReq=%d\n", __FUNCTION__, numReq);
2787 - case ISR_OP_COMPLETED:
2788 - case ISR_OP_SUBMITTED:
2789 - case ISR_OP_NEED_WAR:
2790 - case ISR_OP_TIMEDOUT:
2795 -PRINTK("<-- %s: numReq=%d\n", __FUNCTION__, numReq);
2800 - * Queue the entire page, then wait for completion
2803 -brcmnand_isr_read_page(struct mtd_info *mtd,
2804 - uint8_t *outp_buf, uint8_t* outp_oob, uint64_t page)
2806 - struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
2811 - uint64_t offset = ((uint64_t) page) << chip->page_shift;
2812 - uint32_t edu_pending;
2813 - int submitted = 0;
2814 - unsigned long flags;
2816 -//if (1/* (int) offset <= 0x2000 /*gdebug > 3 */) {
2817 -//printk("-->%s, offset=%08x\n", __FUNCTION__, (uint32_t) offset);}
2819 -printk("-->%s, page=%0llx, buffer=%p\n", __FUNCTION__, page, outp_buf);}
2822 -#if 0 // No need to check, we are aligned on a page
2823 - if (unlikely(offset - sliceOffset)) {
2824 - printk(KERN_ERR "%s: offset %0llx is not cache aligned, sliceOffset=%0llx, CacheSize=%d\n",
2825 - __FUNCTION__, offset, sliceOffset, mtd->eccsize);
2832 - if (unlikely(!EDU_buffer_OK(outp_buf, EDU_READ)))
2834 -if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
2835 - /* EDU does not work on non-aligned buffers */
2836 - ret = brcmnand_read_page(mtd, outp_buf, outp_oob, page);
2840 - chip->pagebuf = page;
2842 - spin_lock_irqsave(&gJobQ.lock, flags);
2843 - if (!list_empty(&gJobQ.jobQ)) {
2844 - printk("%s: Start read page but job queue not empty\n", __FUNCTION__);
2845 -//ISR_print_queue();
2848 - gJobQ.cmd = EDU_READ;
2849 - gJobQ.needWakeUp = 0;
2851 - for (eccstep = 0; eccstep < chip->eccsteps && ret == 0; eccstep++) {
2852 - eduIsrNode_t* req;
2854 - * Queue the 512B sector read, then read the EDU pending bit,
2855 - * and issue read command, if EDU is available for read.
2857 - req = ISR_queue_read_request(mtd, &outp_buf[dataRead],
2858 - outp_oob ? &outp_oob[oobRead] : NULL,
2859 - offset + dataRead);
2861 - dataRead += chip->eccsize;
2862 - oobRead += chip->eccOobSize;
2864 - //BUG_ON(submitted != 1);
2868 - /* Kick start it. The ISR will submit the next job */
2870 - submitted = brcmnand_isr_submit_job();
2873 - while (!list_empty(&gJobQ.jobQ)) {
2874 - spin_unlock_irqrestore(&gJobQ.lock, flags);
2875 - ret = ISR_wait_for_queue_completion();
2876 - spin_lock_irqsave(&gJobQ.lock, flags);
2878 - spin_unlock_irqrestore(&gJobQ.lock, flags);
2884 - * Queue several pages for small page SLC, then wait for completion,
2886 - * (1) offset is aligned on a 512B boundary
2887 - * (2) that outp_buf is aligned on a 32B boundary.
2888 - * (3) Not in raw mode
2889 - * This routine only works when ECC-size = Page-Size (Small SLC flashes), and relies on the fact
2890 - * that the internal buffer can hold several data+OOB buffers for several small pages at once.
2892 - * The OOB are read into chip->buffers->OOB.
2893 - * The Queue Size and chip->buffers->oob are chosen such that the OOB
2894 - * will all fit inside the buffers.
2895 - * After a batch of jobs is completed, the OOB is then copied to the output OOB parameter.
2896 - * To keep it simple stupid, this routine cannot handle Raw mode Read.
2899 - * @mtd: MTD handle
2900 - * @outp_buf Data buffer, passed from file system driver
2901 - * @inoutpp_oob Address of OOB buffer, passed INOUT from file system driver
2902 - * @startPage page 0 of batch
2903 - * @numPages nbr of pages in batch
2904 - * @ops MTD ops from file system driver. We only look at the OOB mode (raw vs auto vs inplace)
2907 -brcmnand_isr_read_pages(struct mtd_info *mtd,
2908 - uint8_t *outp_buf, uint8_t** inoutpp_oob, uint64_t startPage, int numPages,
2909 - struct mtd_oob_ops *ops)
2911 - struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
2916 - uint64_t offset = ((uint64_t) startPage) << chip->page_shift;
2917 - uint32_t edu_pending;
2918 - int submitted = 0;
2919 - unsigned long flags;
2921 - u_char* oob = inoutpp_oob ? *inoutpp_oob : NULL;
2922 - u_char* oobpoi = NULL;
2923 - u_char* buf = outp_buf;
2927 - if (chip->pageSize != chip->eccsize) {
2928 - printk("%s: Can only be called on small page flash\n", __FUNCTION__);
2932 - if (ops->mode == MTD_OOB_RAW) {
2933 - printk("%s: Can only be called when not in RAW mode\n", __FUNCTION__);
2937 -printk("-->%s: mtd=%p, buf=%p, &oob=%p, oob=%p\n", __FUNCTION__,
2938 -mtd, outp_buf, inoutpp_oob, inoutpp_oob? *inoutpp_oob: NULL);
2941 - spin_lock_irqsave(&gJobQ.lock, flags);
2942 - if (!list_empty(&gJobQ.jobQ)) {
2943 - printk("%s: Start read page but job queue not empty\n", __FUNCTION__);
2944 -//ISR_print_queue();
2947 - gJobQ.cmd = EDU_READ;
2948 - gJobQ.needWakeUp = 0;
2950 - if (inoutpp_oob && *inoutpp_oob) {
2951 - // In batch mode, read OOB into internal OOB buffer first.
2952 - // This pointer will be advanced because oob_transfer depends on it.
2953 - chip->oob_poi= BRCMNAND_OOBBUF(chip->buffers);
2954 - oobpoi = chip->oob_poi; // This pointer remains fixed
2957 - for (page = 0; page < numPages && ret == 0; page++) {
2958 - eduIsrNode_t* req;
2960 - req = ISR_queue_read_request(mtd, buf,
2961 - (inoutpp_oob && *inoutpp_oob) ? &oobpoi[oobRead] : NULL,
2962 - offset + dataRead);
2964 - dataRead += chip->eccsize;
2965 - oobRead += chip->eccOobSize;
2966 - buf += chip->eccsize;
2969 - //BUG_ON(submitted != 1);
2971 - /* Kick start it. The ISR will submit the next job */
2973 - submitted = brcmnand_isr_submit_job();
2976 - while (!list_empty(&gJobQ.jobQ)) {
2977 - spin_unlock_irqrestore(&gJobQ.lock, flags);
2978 - ret = ISR_wait_for_queue_completion();
2979 - spin_lock_irqsave(&gJobQ.lock, flags);
2981 - spin_unlock_irqrestore(&gJobQ.lock, flags);
2984 - /* Abort, and return error to file system */
2989 - /* Format OOB, from chip->OOB buffers */
2992 - oob = (inoutpp_oob && *inoutpp_oob) ? *inoutpp_oob : NULL;
2995 -PRINTK("%s: B4 transfer OOB: buf=%08x, chip->buffers=%08x, offset=%08llx\n",
2996 -__FUNCTION__, (uint32_t) buf, chip->buffers, offset + dataRead);
2998 - // Reset oob_poi to beginning of OOB buffer.
2999 - // This will get advanced, cuz brcmnand_transfer_oob depends on it.
3000 - chip->oob_poi = BRCMNAND_OOBBUF(chip->buffers);
3001 - // oobpoi pointer does not change in for loop
3002 - oobpoi = chip->oob_poi;
3004 - for (page=0; page < numPages && ret == 0; page++) {
3005 - u_char* newoob = NULL;
3007 -#ifdef EDU_DEBUG_4 /* Read verify */
3008 - ret = edu_read_verify(mtd, buf,
3009 - (inoutpp_oob && *inoutpp_oob) ? &oobpoi[oobRead] : NULL,
3010 - offset + dataRead);
3015 - if (unlikely(inoutpp_oob && *inoutpp_oob)) {
3016 - newoob = brcmnand_transfer_oob(chip, oob, ops);
3017 - chip->oob_poi += chip->eccOobSize;
3019 - // oobpoi stays the same
3022 - dataRead += chip->eccsize;
3023 - oobRead += chip->eccOobSize;
3024 - buf += chip->eccsize;
3028 - if (unlikely(inoutpp_oob && *inoutpp_oob)) {
3029 - *inoutpp_oob = oob;
3032 -PRINTK("<-- %s\n", __FUNCTION__);
3039 - * brcmnand_isr_read_page_oob - {REPLACABLE] hardware ecc based page read function
3040 - * @mtd: mtd info structure
3041 - * @chip: nand chip info structure. The OOB buf is stored in the oob_poi ptr on return
3043 - * Not for syndrome calculating ecc controllers which need a special oob layout
3046 -brcmnand_isr_read_page_oob(struct mtd_info *mtd,
3047 - uint8_t* outp_oob, uint64_t page)
3049 - struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
3052 - * if BCH codes, use full page read to activate ECC on OOB area
3054 - if (chip->ecclevel != BRCMNAND_ECC_HAMMING && chip->ecclevel != BRCMNAND_ECC_DISABLE) {
3055 - return brcmnand_isr_read_page(mtd, chip->buffers->databuf, outp_oob, page);
3059 - return brcmnand_read_page_oob(mtd, outp_oob, page);
3070 * brcmnand_do_read_ops - [Internal] Read data with ECC
3072 * @mtd: MTD device structure
3073 @@ -4390,13 +3419,17 @@
3076 uint32_t readlen = ops->len;
3077 - uint32_t oobread = 0;
3078 uint8_t *bufpoi, *oob, *buf;
3080 - int buffer_aligned = 0;
3081 -//int nonBatch = 0;
3086 +printk("-->%s, buf=%p, oob=%p, offset=%0llx, len=%d, end=%0llx\n", __FUNCTION__,
3087 + ops->datbuf, ops->oobbuf, from, readlen, from+readlen);
3088 +printk("chip->buffers=%p, chip->oob=%p\n",
3089 + chip->buffers, BRCMNAND_OOBBUF(chip->buffers));
3092 stats = mtd->ecc_stats;
3094 // THT: BrcmNAND controller treats multiple chip as one logical chip.
3095 @@ -4407,7 +3440,6 @@
3096 //page = realpage & chip->pagemask;
3098 col = mtd64_ll_low(from & (mtd->writesize - 1));
3101 /* Debugging 12/27/08 */
3102 chip->oob_poi = BRCMNAND_OOBBUF(chip->buffers);
3103 @@ -4419,91 +3451,38 @@
3107 -#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
3109 - * Group several pages for submission for small page NAND
3111 - if (chip->pageSize == chip->eccsize && ops->mode != MTD_OOB_RAW) {
3114 - bytes = min(mtd->writesize - col, readlen);
3115 - // (1) Writing partial or full page
3116 - aligned = (bytes == mtd->writesize);
3118 + bytes = min(mtd->writesize - col, readlen);
3119 + aligned = (bytes == mtd->writesize);
3121 - // If writing full page, use user buffer, otherwise, internal buffer
3122 + /* Is the current page in the buffer ? */
3123 + if ( 1 /* (int64_t) realpage != chip->pagebuf */ || oob) {
3124 +#ifndef EDU_DEBUG_1
3125 bufpoi = aligned ? buf : chip->buffers->databuf;
3127 - // (2) Buffer satisfies 32B alignment required by EDU?
3128 - buffer_aligned = EDU_buffer_OK(bufpoi, EDU_READ);
3130 - // (3) Batch mode if writing more than 1 pages.
3131 - numPages = min(MAX_JOB_QUEUE_SIZE, readlen>>chip->page_shift);
3133 - // Only do Batch mode if all 3 conditions are satisfied.
3134 - if (!aligned || !buffer_aligned || numPages <= 1) {
3135 - /* Submit 1 page at a time */
3137 - numPages = 1; // We count partial page read
3138 - ret = chip->read_page(mtd, bufpoi, chip->oob_poi, realpage);
3143 - /* Transfer not aligned data */
3145 - chip->pagebuf = realpage;
3146 - memcpy(buf, &bufpoi[col], bytes);
3150 - if (unlikely(oob)) {
3151 - /* if (ops->mode != MTD_OOB_RAW) */
3152 - oob = brcmnand_transfer_oob(chip, oob, ops);
3159 - * Batch job possible, all 3 conditions are met
3160 - * bufpoi = Data buffer from FS driver
3161 - * oob = OOB buffer from FS driver
3163 - bytes = numPages*mtd->writesize;
3165 - ret = brcmnand_isr_read_pages(mtd, bufpoi, oob? &oob : NULL, realpage, numPages, ops);
3170 - buf += bytes; /* Advance Read pointer */
3180 - /* For subsequent reads align to page boundary. */
3182 - /* Increment page address */
3183 - realpage += numPages;
3191 + bufpoi = &debug_dbuf.databuf;
3192 + // rely on size of buffer to be 4096
3193 + memcpy(&bufpoi[mtd->writesize], debug_sig, 1+strlen(debug_sig));
3197 - bytes = min(mtd->writesize - col, readlen);
3198 - aligned = (bytes == mtd->writesize);
3200 - bufpoi = aligned ? buf : chip->buffers->databuf;
3202 + printk("%s: aligned=%d, buf=%p, bufpoi=%p, oob_poi=%p, bytes=%d, readlen=%d\n",
3203 + __FUNCTION__, aligned, buf, bufpoi, chip->oob_poi, bytes, readlen);
3206 ret = chip->read_page(mtd, bufpoi, chip->oob_poi, realpage);
3209 + if (0 != strcmp(&bufpoi[mtd->writesize], debug_sig)) {
3210 + printk("$$$$$$$$$$$$$$ Memory smash at end of buffer at %0llx, expect=%s\n",
3212 + printk(".... found\n"); print_oobbuf(&bufpoi[mtd->writesize], 1+strlen(debug_sig));
3214 + if (buf) memcpy(buf, &bufpoi[col], bytes);
3215 + if (oob) memcpy(oob, chip->oob_poi, mtd->oobsize);
3221 @@ -4524,25 +3503,45 @@
3226 + if (!(chip->options & NAND_NO_READRDY)) {
3228 + * Apply delay or wait for ready/busy pin. Do
3229 + * this before the AUTOINCR check, so no
3230 + * problems arise if a chip which does auto
3231 + * increment is marked as NOAUTOINCR by the
3234 + if (!chip->dev_ready)
3235 + udelay(chip->chip_delay);
3237 + nand_wait_ready(mtd);
3241 +printk("%s: Should never get here\n", __FUNCTION__);
3243 + memcpy(buf, chip->buffers->databuf + col, bytes);
3255 - /* For subsequent reads align to page boundary. */
3257 - /* Increment page address */
3259 + /* For subsequent reads align to page boundary. */
3261 + /* Increment page address */
3270 ops->retlen = ops->len - (size_t) readlen;
3272 +//#ifndef EDU_DEBUG_1
3273 +if (gdebug > 3 ) printk("<-- %s, ret=%d\n", __FUNCTION__, ret);
3278 @@ -4577,7 +3576,7 @@
3279 DEBUG(MTD_DEBUG_LEVEL3, "%s: from=%0llx\n", __FUNCTION__, from);
3282 -printk("-->%s, offset=%0llx, len=%08x\n", __FUNCTION__, from, len);}
3283 +printk("-->%s, offset=%0llx\n", __FUNCTION__, from);}
3286 /* Do not allow reads past end of device */
3287 @@ -4610,20 +3609,11 @@
3288 if (likely(chip->cet)) {
3289 if (likely(chip->cet->flags != BRCMNAND_CET_DISABLED)) {
3290 if (brcmnand_cet_update(mtd, from, &status) == 0) {
3293 - * PR57272: Provide workaround for BCH-n ECC HW bug when # error bits >= 4
3294 - * We will not mark a block bad when the a correctable error already happened on the same page
3296 -#if CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_3_4
3307 printk(KERN_INFO "DEBUG -> %s ret = %d, status = %d\n", __FUNCTION__, ret, status);
3308 @@ -4879,7 +3869,7 @@
3309 //struct nand_oobinfo noauto_oobsel;
3311 printk("Comparison Failed\n");
3312 - print_diagnostics(chip);
3313 + print_diagnostics();
3315 //noauto_oobsel = *oobsel;
3316 //noauto_oobsel.useecc = MTD_NANDECC_PLACEONLY;
3317 @@ -4917,7 +3907,7 @@
3319 struct brcmnand_chip * chip = mtd->priv;
3321 - int ret = 0; // Matched
3323 //int ooblen=0, datalen=0;
3325 u_char* oobbuf = v_oob_buf;
3326 @@ -4929,12 +3919,7 @@
3328 if (gdebug > 3) printk("-->%s: addr=%0llx\n", __FUNCTION__, addr);
3331 - * Only do it for Hamming codes because
3332 - * (1) We can't do it for BCH until we can read the full OOB area for BCH-8
3333 - * (2) OOB area is included in ECC calculation for BCH, so no need to check it
3336 + /* Only do it for Hamming codes */
3337 if (chip->ecclevel != BRCMNAND_ECC_HAMMING) {
3340 @@ -4942,7 +3927,7 @@
3342 page = ((uint64_t) addr) >> chip->page_shift;
3343 // Must read entire page
3344 - ret = chip->read_page(mtd, vbuf, oobbuf, page);
3345 + ret = brcmnand_read_page(mtd, vbuf, oobbuf, page);
3347 printk(KERN_ERR "%s: brcmnand_read_page at %08x failed ret=%d\n",
3348 __FUNCTION__, (unsigned int) addr, ret);
3349 @@ -4967,28 +3952,12 @@
3350 brcmnand_Hamming_ecc(&dbuf[pageOffset], sw_ecc);
3352 if (sw_ecc[0] != oobp[6] || sw_ecc[1] != oobp[7] || sw_ecc[2] != oobp[8]) {
3353 - if (oobp[6] == 0xff && oobp[7] == 0xff && oobp[8] == 0xff
3354 - && sw_ecc[0] == 0 && sw_ecc[1] == 0 && sw_ecc[2] == 0)
3357 - printk("%s: Verification failed at %0llx. HW ECC=%02x%02x%02x, SW ECC=%02x%02x%02x\n",
3358 - __FUNCTION__, addr,
3359 - oobp[6], oobp[7], oobp[8], sw_ecc[0], sw_ecc[1], sw_ecc[2]);
3363 + printk("%s: Verification failed at %0llx. HW ECC=%02x%02x%02x, SW ECC=%02x%02x%02x\n",
3364 + __FUNCTION__, addr,
3365 + oobp[6], oobp[7], oobp[8], sw_ecc[0], sw_ecc[1], sw_ecc[2]);
3370 - // Verify the OOB if not NULL
3372 - if (memcmp(&inp_oob[oobOffset], oobp, 6) || memcmp(&inp_oob[oobOffset+9], &oobp[9],7)) {
3373 - printk("+++++++++++++++++++++++ %s: OOB comp Hamming failed\n", __FUNCTION__);
3374 - printk("In OOB:\n"); print_oobbuf(&inp_oob[oobOffset], 16);
3375 - printk("\nVerify OOB:\n"); print_oobbuf(oobp, 16);
3383 @@ -5062,8 +4031,7 @@
3384 * @page: page number to write
3385 * @cached: cached programming [removed]
3388 -brcmnand_write_page(struct mtd_info *mtd,
3389 +static int brcmnand_write_page(struct mtd_info *mtd,
3390 const uint8_t *inp_buf, const uint8_t* inp_oob, uint64_t page)
3392 struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
3393 @@ -5094,206 +4062,17 @@
3397 -#ifdef BRCMNAND_WRITE_VERIFY
3401 +int save_debug = gdebug;
3403 - vret = brcmnand_verify_page(mtd, offset, inp_buf, mtd->writesize, inp_oob, chip->eccOobSize);
3404 + ret = brcmnand_verify_page(mtd, offset, inp_buf, mtd->writesize, inp_oob, chip->eccOobSize);
3405 //gdebug=save_debug;
3414 -#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
3417 - * Queue the entire page, then wait for completion
3420 -brcmnand_isr_write_page(struct mtd_info *mtd,
3421 - const uint8_t *inp_buf, const uint8_t* inp_oob, uint64_t page)
3423 - struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
3425 - int dataWritten = 0;
3426 - int oobWritten = 0;
3428 - uint64_t offset = page << chip->page_shift;
3430 - uint32_t edu_pending;
3431 - int submitted = 0;
3432 - unsigned long flags;
3435 -printk("-->%s, page=%0llx\n", __FUNCTION__, page);}
3438 -#if 0 // No need to check, we are aligned on a page
3439 - if (unlikely(offset - sliceOffset)) {
3440 - printk(KERN_ERR "%s: offset %0llx is not cache aligned, sliceOffset=%0llx, CacheSize=%d\n",
3441 - __FUNCTION__, offset, sliceOffset, mtd->eccsize);
3448 - if (unlikely(!EDU_buffer_OK(inp_buf, EDU_WRITE)))
3450 -if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
3451 - /* EDU does not work on non-aligned buffers */
3452 - ret = brcmnand_write_page(mtd, inp_buf, inp_oob, page);
3456 - chip->pagebuf = page;
3458 - spin_lock_irqsave(&gJobQ.lock, flags);
3459 - if (!list_empty(&gJobQ.jobQ)) {
3460 - printk("%s: Start read page but job queue not empty\n", __FUNCTION__);
3463 - gJobQ.cmd = EDU_WRITE;
3464 - gJobQ.needWakeUp = 0;
3467 - for (eccstep = 0; eccstep < chip->eccsteps && ret == 0; eccstep++) {
3468 - eduIsrNode_t* req;
3470 - * Queue the 512B sector read, then read the EDU pending bit,
3471 - * and issue read command, if EDU is available for read.
3473 - req = ISR_queue_write_request(mtd, &inp_buf[dataWritten],
3474 - inp_oob ? &inp_oob[oobWritten] : NULL,
3475 - offset + dataWritten);
3477 - dataWritten += chip->eccsize;
3478 - oobWritten += chip->eccOobSize;
3483 - * Kick start it. The ISR will submit the next job
3486 - submitted = brcmnand_isr_submit_job();
3489 - while (!list_empty(&gJobQ.jobQ)) {
3490 - spin_unlock_irqrestore(&gJobQ.lock, flags);
3491 - ret = ISR_wait_for_queue_completion();
3492 - spin_lock_irqsave(&gJobQ.lock, flags);
3494 - spin_unlock_irqrestore(&gJobQ.lock, flags);
3500 - * Queue the several pages, then wait for completion
3501 - * For 512B page sizes only.
3504 -brcmnand_isr_write_pages(struct mtd_info *mtd,
3505 - const uint8_t *inp_buf, const uint8_t* inp_oob, uint64_t startPage, int numPages)
3507 - struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
3509 - int dataWritten = 0;
3510 - int oobWritten = 0;
3512 - uint64_t offset = startPage << chip->page_shift;
3515 - uint32_t edu_pending;
3516 - int submitted = 0;
3517 - unsigned long flags;
3520 - /* Already checked by caller */
3521 - if (unlikely(!EDU_buffer_OK(inp_buf, EDU_WRITE)))
3523 -if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
3524 - /* EDU does not work on non-aligned buffers */
3525 - ret = brcmnand_write_page(mtd, inp_buf, inp_oob, startPage);
3530 - if (chip->pageSize != chip->eccsize) {
3531 - printk("%s: Can only be called on small page flash\n", __FUNCTION__);
3535 - spin_lock_irqsave(&gJobQ.lock, flags);
3536 - if (!list_empty(&gJobQ.jobQ)) {
3537 - printk("%s: Start read page but job queue not empty\n", __FUNCTION__);
3540 - gJobQ.cmd = EDU_WRITE;
3541 - gJobQ.needWakeUp = 0;
3544 - for (page = 0; page < numPages && ret == 0; page++) {
3545 - eduIsrNode_t* req;
3547 - * Queue the 512B sector read, then read the EDU pending bit,
3548 - * and issue read command, if EDU is available for read.
3551 - req = ISR_queue_write_request(mtd, &inp_buf[dataWritten],
3552 - inp_oob ? &inp_oob[oobWritten] : NULL,
3553 - offset + dataWritten);
3555 - dataWritten += chip->eccsize;
3556 - oobWritten += chip->eccOobSize;
3562 - * Kick start it. The ISR will submit the next job
3563 - * We do it here, in order to avoid having to obtain the queue lock
3564 - * inside the ISR, in preparation for an RCU implementation.
3567 - submitted = brcmnand_isr_submit_job();
3570 - while (!list_empty(&gJobQ.jobQ)) {
3571 - spin_unlock_irqrestore(&gJobQ.lock, flags);
3572 - ret = ISR_wait_for_queue_completion();
3573 - spin_lock_irqsave(&gJobQ.lock, flags);
3575 - spin_unlock_irqrestore(&gJobQ.lock, flags);
3582 - for (page = 0; page < numPages && ret == 0; page++) {
3583 - ret = edu_write_verify(mtd, &inp_buf[dataWritten],
3584 - inp_oob ? &inp_oob[oobWritten] : NULL,
3585 - offset + dataWritten);
3587 - dataWritten += chip->eccsize;
3588 - oobWritten += chip->eccOobSize;
3601 * brcmnand_fill_oob - [Internal] Transfer client buffer to oob
3602 * @chip: nand chip structure
3603 @@ -5307,7 +4086,6 @@
3605 size_t len = ops->ooblen;
3611 @@ -5320,8 +4098,6 @@
3612 uint32_t boffs = 0, woffs = ops->ooboffs;
3615 - memset(chip->oob_poi + ops->ooboffs, 0xff, chip->eccOobSize-ops->ooboffs);
3617 for(; free->length && len; free++, len -= bytes) {
3618 /* Write request not from offset 0 ? */
3619 if (unlikely(woffs)) {
3620 @@ -5370,8 +4146,6 @@
3621 uint8_t *buf = ops->datbuf;
3622 int bytes = mtd->writesize;
3625 - int buffer_aligned = 0;
3627 DEBUG(MTD_DEBUG_LEVEL3, "-->%s, offset=%0llx\n", __FUNCTION__, to);
3629 @@ -5392,8 +4166,13 @@
3630 chip->select_chip(mtd, chipnr);
3635 + /* Check, if it is write protected */
3636 + if (nand_check_wp(mtd))
3641 realpage = to >> chip->page_shift;
3642 //page = realpage & chip->pagemask;
3643 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
3644 @@ -5414,70 +4193,15 @@
3645 chip->oob_poi = NULL;
3648 -#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
3649 - /* Buffer must be aligned for EDU */
3650 - buffer_aligned = EDU_buffer_OK(buf, EDU_WRITE);
3652 -#else /* Dont care */
3653 - buffer_aligned = 0;
3658 -#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
3660 - * Group several pages for submission for small page NAND
3662 - numPages = min(MAX_JOB_QUEUE_SIZE, writelen>>chip->page_shift);
3665 - if (buffer_aligned && numPages > 1 && chip->pageSize == chip->eccsize) {
3668 - /* Submit min(queueSize, len/512B) at a time */
3669 - //numPages = min(MAX_JOB_QUEUE_SIZE, writelen>>chip->page_shift);
3670 - bytes = chip->eccsize*numPages;
3672 - if (unlikely(oob)) {
3674 - for (j=0; j<numPages; j++) {
3675 - oob = brcmnand_fill_oob(chip, oob, ops);
3676 - /* THT: oob now points to where to read next,
3677 - * chip->oob_poi contains the OOB to be written
3679 - /* In batch mode, we advance the OOB pointer to the next OOB slot
3680 - * using chip->oob_poi
3682 - chip->oob_poi += chip->eccOobSize;
3684 - // Reset chip->oob_poi to beginning of OOB buffer for submission.
3685 - chip->oob_poi = BRCMNAND_OOBBUF(chip->buffers);
3688 - ret = brcmnand_isr_write_pages(mtd, buf, chip->oob_poi, realpage, numPages);
3690 + if (unlikely(oob)) {
3691 + oob = brcmnand_fill_oob(chip, oob, ops);
3692 + /* THT: oob now points to where to read next,
3693 + * chip->oob_poi contains the OOB to be written
3697 - else /* Else submit one page at a time */
3700 - /* Submit one page at a time */
3703 - bytes = mtd->writesize;
3705 - if (unlikely(oob)) {
3706 - chip->oob_poi = BRCMNAND_OOBBUF(chip->buffers);
3707 - oob = brcmnand_fill_oob(chip, oob, ops);
3708 - /* THT: oob now points to where to read next,
3709 - * chip->oob_poi contains the OOB to be written
3713 - ret = chip->write_page(mtd, buf, chip->oob_poi, realpage);
3717 + ret = chip->write_page(mtd, buf, chip->oob_poi, realpage);
3721 @@ -5486,9 +4210,21 @@
3725 - realpage += numPages;
3729 + page = realpage & chip->pagemask;
3730 + /* Check, if we cross a chip boundary */
3733 + chip->select_chip(mtd, -1);
3734 + chip->select_chip(mtd, chipnr);
3739 + if (unlikely(oob))
3740 + memset(chip->oob_poi, 0xff, mtd->oobsize);
3742 ops->retlen = ops->len - writelen;
3743 DEBUG(MTD_DEBUG_LEVEL3, "<-- %s\n", __FUNCTION__);
3744 @@ -6593,29 +5329,12 @@
3746 static void brcmnand_adjust_timings(struct brcmnand_chip *this, brcmnand_chip_Id* chip)
3748 - unsigned long nand_timing1 = this->ctrl_read(BCHP_NAND_TIMING_1);
3749 - unsigned long nand_timing1_b4;
3750 - unsigned long nand_timing2 = this->ctrl_read(BCHP_NAND_TIMING_2);
3751 - unsigned long nand_timing2_b4;
3752 - extern uint32_t gNandTiming1;
3753 - extern uint32_t gNandTiming2;
3756 - * Override database values with kernel command line values
3758 - if (0 != gNandTiming1 || 0 != gNandTiming2) {
3759 - if (0 != gNandTiming1) {
3760 - chip->timing1 = gNandTiming1;
3761 - //this->ctrl_write(BCHP_NAND_TIMING_1, gNandTiming1);
3763 - if (0 != gNandTiming2) {
3764 - chip->timing2 = gNandTiming2;
3765 - //this->ctrl_write(BCHP_NAND_TIMING_2, gNandTiming2);
3770 - // Adjust NAND timings from database or command line
3771 + unsigned long nand_timing1 = this->ctrl_read(BCHP_NAND_TIMING_1);
3772 + unsigned long nand_timing1_b4;
3773 + unsigned long nand_timing2 = this->ctrl_read(BCHP_NAND_TIMING_2);
3774 + unsigned long nand_timing2_b4;
3776 + // Adjust NAND timings:
3777 if (chip->timing1) {
3778 nand_timing1_b4 = nand_timing1;
3780 @@ -6688,61 +5407,20 @@
3781 brcmnand_read_id(struct mtd_info *mtd, unsigned int chipSelect, unsigned long* dev_id)
3783 struct brcmnand_chip * chip = mtd->priv;
3785 - uint32_t nandConfig = chip->ctrl_read(BCHP_NAND_CONFIG);
3786 - uint32_t csNandSelect = 0;
3787 - uint32_t nandSelect = 0;
3789 - if (chipSelect > 0) { // Do not re-initialize when on CS0, Bootloader already done that
3791 -#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_0_1
3792 - nandSelect = chip->ctrl_read(BCHP_NAND_CS_NAND_SELECT);
3794 -printk("B4: NandSelect=%08x, nandConfig=%08x, chipSelect=%d\n", nandSelect, nandConfig, chipSelect);
3797 - #if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
3798 - /* Older version do not have EXT_ADDR registers */
3799 - chip->ctrl_write(BCHP_NAND_CMD_ADDRESS, 0);
3800 - chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, chipSelect << BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT);
3801 - #endif // Set EXT address if version >= 1.0
3802 +#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
3803 + /* Set correct chip Select */
3804 + chip->ctrl_write(BCHP_NAND_CMD_ADDRESS, BCHP_NAND_CMD_START_OPCODE_DEVICE_ID_READ);
3805 + chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, chipSelect << 16);
3808 - // Has CFE initialized the register?
3809 - if (0 == (nandSelect & BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK)) {
3811 - #if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_0_1
3812 - csNandSelect = 1<<(BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_SHIFT + chipSelect);
3813 +PRINTK("-->%s: this=%p, chip->ctrl_read=%p\n", __FUNCTION__, chip, chip->ctrl_read);
3815 - // v1.0 does not define it
3816 - #elif CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_2_0
3817 - csNandSelect = 1<<(BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT + chipSelect);
3819 - #endif // If brcmNAND Version >= 1.0
3821 - nandSelect = BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK | csNandSelect;
3822 - chip->ctrl_write(BCHP_NAND_CS_NAND_SELECT, nandSelect);
3825 - /* Send the command for reading device ID from controller */
3826 - chip->ctrl_write(BCHP_NAND_CMD_START, OP_DEVICE_ID_READ);
3828 - /* Wait for CTRL_Ready */
3829 - brcmnand_wait(mtd, FL_READY, &status);
3831 -#endif // if BrcmNAND Version >= 0.1
3834 + /* Send the command for reading device ID from controller */
3835 *dev_id = chip->ctrl_read(BCHP_NAND_FLASH_DEVICE_ID);
3837 printk(KERN_INFO "brcmnand_probe: CS%1d: dev_id=%08x\n", chipSelect, (unsigned int) *dev_id);
3839 -#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_0_1
3840 - nandSelect = chip->ctrl_read(BCHP_NAND_CS_NAND_SELECT);
3843 - nandConfig = chip->ctrl_read(BCHP_NAND_CONFIG);
3845 -printk("After: NandSelect=%08x, nandConfig=%08x\n", nandSelect, nandConfig);
3849 @@ -6764,8 +5442,6 @@
3856 /* Read manufacturer and device IDs from Controller */
3857 brcmnand_read_id(mtd, chipSelect, &chip->device_id);
3858 @@ -7169,10 +5845,9 @@
3860 version_id = chip->ctrl_read(BCHP_NAND_REVISION);
3862 - printk(KERN_INFO "BrcmNAND version = 0x%04x %dMB @%08lx\n",
3863 - version_id, mtd64_ll_low(chip->chipSize>>20), chip->pbase);
3864 + printk(KERN_INFO "BrcmNAND version = 0x%04x %dMB @%p\n",
3865 + version_id, mtd64_ll_low(chip->chipSize>>20), chip->vbase);
3871 @@ -7615,92 +6290,51 @@
3874 #elif CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_2_0
3877 - uint32_t nand_xor;
3879 + * Starting with version 2.0 (bcm7325 and later),
3880 + * we can use EBI_CS_USES_NAND Registers to find out where the NAND
3881 + * chips are (which CS)
3883 + if (gNumNand > 0) { /* Kernel argument nandcs=<comma-sep-list> override CFE settings */
3884 + if (brcmnand_sort_chipSelects(mtd, maxchips, gNandCS, chip->CS))
3886 + cs = chip->CS[chip->numchips - 1];
3887 +PRINTK("gNumNand=%d, cs=%d\n", gNumNand, cs);
3892 - * Starting with version 2.0 (bcm7325 and later),
3893 - * we can use EBI_CS_USES_NAND Registers to find out where the NAND
3894 - * chips are (which CS)
3896 + /* Load the gNandCS_priv[] array from EBI_CS_USES_NAND values,
3897 + * same way that get_options() does, i.e. first entry is gNumNand
3899 + int nandCsShift, i;
3901 + int nandCS[MAX_NAND_CS];
3904 - if (gNumNand > 0) { /* Kernel argument nandcs=<comma-sep-list> override CFE settings */
3905 - if (brcmnand_sort_chipSelects(mtd, maxchips, gNandCS, chip->CS))
3907 - cs = chip->CS[chip->numchips - 1];
3908 - PRINTK("gNumNand=%d, cs=%d\n", gNumNand, cs);
3912 - /* Load the gNandCS_priv[] array from EBI_CS_USES_NAND values,
3913 - * same way that get_options() does, i.e. first entry is gNumNand
3916 - int numNand = 0; // Number of NAND chips
3917 - int nandCS[MAX_NAND_CS];
3919 - for (i = 0; i< MAX_NAND_CS; i++) {
3923 - nand_select = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
3924 - // Be careful here, the last bound depends on chips. Some chips allow 8 CS'es (3548a0) some only 2 (3548b0)
3925 - // Here we rely on BCHP_NAND_CS_NAND_SELECT_reserved1_SHIFT being the next bit.
3926 - for (i=0, nandCsShift = BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT;
3927 - nandCsShift < BCHP_NAND_CS_NAND_SELECT_reserved1_SHIFT;
3930 - if (nand_select & (1 << nandCsShift)) {
3931 - nandCS[i] = nandCsShift - BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT;
3932 - PRINTK("Found NAND on CS%1d\n", nandCS[i]);
3937 - if (brcmnand_sort_chipSelects(mtd, maxchips, nandCS, chip->CS))
3939 - cs = chip->CS[chip->numchips - 1];
3940 - PRINTK("gNumNand=%d, cs=%d\n", gNumNand, cs);
3946 + for (i = 0; i< MAX_NAND_CS; i++) {
3951 - * 2618-7.3: For v2.0 or later, set xor_disable according to NAND_CS_NAND_XOR:00 bit
3954 - nand_xor = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_XOR);
3955 - printk("NAND_CS_NAND_XOR=%08x\n", nand_xor);
3957 -#ifdef CONFIG_MTD_BRCMNAND_DISABLE_XOR
3958 - /* Testing 1,2,3: Force XOR disable on CS0, if not done by CFE */
3959 - if (chip->CS[0] == 0) {
3960 - printk("Disabling XOR: Before: SEL=%08x, XOR=%08x\n", nand_select, nand_xor);
3962 - nand_select &= ~BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_MASK;
3963 - nand_xor &= ~BCHP_NAND_CS_NAND_XOR_EBI_CS_0_ADDR_1FC0_XOR_MASK;
3965 - brcmnand_ctrl_write(BCHP_NAND_CS_NAND_SELECT, nand_select);
3966 - brcmnand_ctrl_write(BCHP_NAND_CS_NAND_XOR, nand_xor);
3968 - printk("Disabling XOR: After: SEL=%08x, XOR=%08x\n", nand_select, nand_xor);
3971 - /* Translate nand_xor into our internal flag, for brcmnand_writeAddr */
3972 - for (i=0; i<chip->numchips; i++) {
3974 - /* Set xor_disable, 1 for each NAND chip */
3975 - if (!(nand_xor & (BCHP_NAND_CS_NAND_XOR_EBI_CS_0_ADDR_1FC0_XOR_MASK<<i))) {
3976 -printk("Disabling XOR on CS#%1d\n", chip->CS[i]);
3977 - chip->xor_disable[i] = 1;
3979 + nand_select = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
3980 + // Be careful here, the last bound depends on chips. Some chips allow 8 CS'es (3548a0) some only 2 (3548b0)
3981 + // Here we rely on BCHP_NAND_CS_NAND_SELECT_reserved1_SHIFT being the next bit.
3982 + for (i=0, nandCsShift = BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT;
3983 + nandCsShift < BCHP_NAND_CS_NAND_SELECT_reserved1_SHIFT;
3986 + if (nand_select & (1 << nandCsShift)) {
3987 + nandCS[i] = nandCsShift - BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT;
3988 + PRINTK("Found NAND on CS%1d\n", nandCS[i]);
3993 + if (brcmnand_sort_chipSelects(mtd, maxchips, nandCS, chip->CS))
3995 + cs = chip->CS[chip->numchips - 1];
3996 +PRINTK("gNumNand=%d, cs=%d\n", gNumNand, cs);
4001 #error "Unknown Broadcom NAND controller version"
4002 #endif /* Versions >= 1.0 */
4003 @@ -7728,15 +6362,24 @@
4004 volatile unsigned long acc_control;
4007 + if (chip->chipSize >= (128 << 20)) {
4008 + chip->pbase = 0x11000000; /* Skip 16MB EBI Registers */
4010 - /* Set up base, based on flash size */
4011 - if (chip->chipSize >= (256 << 20)) {
4012 - chip->pbase = 0x12000000;
4013 - mtd->size = 0x20000000 - chip->pbase; // THT: This is different than chip->chipSize
4015 - /* We know that flash endAddr is 0x2000_0000 */
4016 - chip->pbase = 0x20000000 - chip->chipSize;
4017 + mtd->num_eraseblocks = (chip->chipSize - (16<<20)) >> chip->erase_shift; // Maximum size on a 128MB/256MB flash
4018 + chip->mtdSize = device_size(mtd);
4021 + else if (chip->chipSize == (256 << 20)) {
4022 + chip->pbase = 0x11000000; // Skip 16MB EBI Registers
4023 + mtd->size = 240<<20; // Maximum size on a 256MB flash, provided CS0/NOR is disabled
4027 + chip->pbase = 0x18000000 - chip->chipSize;
4028 mtd->size = chip->chipSize;
4029 + chip->mtdSize = mtd->size;
4031 + //mtd->size_hi = 0;
4034 printk("Found NAND chip on Chip Select %d, chipSize=%dMB, usable size=%dMB, base=%08x\n",
4035 @@ -7926,7 +6569,7 @@
4036 printk("ACC: %d OOB bytes per 512B ECC step; from ID probe: %d\n", eccOobSize, chip->eccOobSize);
4037 // We have recorded chip->eccOobSize during probe, let's compare it against value from ACC
4038 if (chip->eccOobSize < eccOobSize) {
4039 - printk("Flash says it has %d OOB bytes, but ECC level %lu need %d bytes\n",
4040 + printk("Flash says it has %d OOB bytes, but ECC level %d need %d bytes\n",
4041 chip->eccOobSize, eccLevel, eccOobSize);
4042 printk(KERN_INFO "Please fix your board straps. Aborting to avoid file system damage\n");
4044 @@ -7941,7 +6584,7 @@
4048 - printk(KERN_ERR "Unsupported ECC level %lu\n", eccLevel);
4049 + printk(KERN_ERR "Unsupported ECC level %d\n", eccLevel);
4053 @@ -7963,11 +6606,11 @@
4054 brcmnand_ctrl_write(BCHP_NAND_ACC_CONTROL, acc_control );
4055 printk("Corrected PARTIAL_PAGE_EN: ACC_CONTROL = %08lx\n", acc_control);
4057 -#ifdef CONFIG_MIPS_BCM3548
4058 - /* THT PR50928: Disable WR_PREEMPT for 3548L and 3556 */
4059 - acc_control &= ~(BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK);
4061 + /* THT Disable Optimization for 2K page */
4062 + acc_control &= ~(BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK|BCHP_NAND_ACC_CONTROL_PAGE_HIT_EN_MASK);
4063 brcmnand_ctrl_write(BCHP_NAND_ACC_CONTROL, acc_control );
4064 - printk("Disable WR_PREEMPT: ACC_CONTROL = %08lx\n", acc_control);
4065 + printk("Disable WR_PREEMPT and PAGE_HIT_EN: ACC_CONTROL = %08lx\n", acc_control);
4067 printk("ACC_CONTROL for MLC NAND: %08lx\n", acc_control);
4069 @@ -8010,58 +6653,7 @@
4070 printk("SLC flash: Corrected ACC_CONTROL = %08lx from %08lx\n", acc_control, org_acc_control);
4075 -#if CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_3_4
4077 - * PR57272: Workaround for BCH-n error,
4078 - * reporting correctable errors with 4 or more bits as uncorrectable:
4080 - if (chip->ecclevel != 0 && chip->ecclevel != BRCMNAND_ECC_HAMMING) {
4081 - int corr_threshold;
4083 - if ( chip->ecclevel >= BRCMNAND_ECC_BCH_4) {
4084 - corr_threshold = 2;
4087 - corr_threshold = 1; // 1 , default for Hamming
4090 - printk(KERN_INFO "%s: CORR ERR threshold set to %d bits\n", __FUNCTION__, corr_threshold);
4091 - corr_threshold <<= BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_SHIFT;
4092 - brcmnand_ctrl_write(BCHP_NAND_CORR_STAT_THRESHOLD, corr_threshold);
4097 - * If ECC level is BCH, set CORR Threshold according to # bits corrected
4099 - if (chip->ecclevel != 0 && chip->ecclevel != BRCMNAND_ECC_HAMMING) {
4100 - int corr_threshold;
4102 - if (chip->ecclevel >= BRCMNAND_ECC_BCH_8) {
4103 - corr_threshold = 6; // 6 out of 8
4105 - else if ( chip->ecclevel >= BRCMNAND_ECC_BCH_4) {
4106 - corr_threshold = 3; // 3 out of 4
4109 - corr_threshold = 1; // 1 , default for Hamming
4111 - printk(KERN_INFO "%s: CORR ERR threshold set to %d bits\n", __FUNCTION__, corr_threshold);
4112 - corr_threshold <<= BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_SHIFT;
4113 - brcmnand_ctrl_write(BCHP_NAND_CORR_STAT_THRESHOLD, corr_threshold);
4120 - /* Version 2.x, Hamming codes only */
4121 - /* If chip Select is not zero, the CFE may not have initialized the NAND flash */
4122 - if (chip->CS[0]) {
4123 - /* Nothing for now */
4125 #endif // Version 3.0+
4126 #endif // Version 1.0+
4128 @@ -8112,17 +6704,12 @@
4130 printk("++++++++++++ EDU_DEBUG_3 enabled\n");
4132 -#if defined( EDU_DEBUG_4 ) || defined( EDU_DEBUG_5 )
4135 - #ifdef EDU_DEBUG_4
4136 - printk("++++++++++++ EDU_DEBUG_4 (read verify) enabled\n");
4139 - #ifdef EDU_DEBUG_5
4140 - printk("++++++++++++ EDU_DEBUG_5 (write verify) enabled\n");
4143 +printk("++++++++++++ EDU_DEBUG_4 (read verify) enabled\n");
4146 +printk("++++++++++++ EDU_DEBUG_5 (write verify) enabled\n");
4149 PRINTK("%s 30\n", __FUNCTION__);
4151 @@ -8200,22 +6787,8 @@
4155 - switch (mtd->writesize) {
4157 - if (chip->ecclevel == BRCMNAND_ECC_HAMMING) {
4158 - printk(KERN_WARNING "This SLC-4K-page flash may not be suitable for Hamming codes\n");
4159 - chip->ecclayout = &brcmnand_oob_128;
4162 - chip->ecclayout = &brcmnand_oob_bch4_4k;
4167 - printk(KERN_ERR "Unsupported page size of %d\n", mtd->writesize);
4171 + printk(KERN_ERR "Unsupported SLC NAND with page size of %d\n", mtd->writesize);
4176 @@ -8239,18 +6812,7 @@
4177 //chip->eccOobSize = (mtd->oobsize*512) /mtd->writesize;
4178 printk(KERN_INFO "mtd->oobsize=%d, mtd->eccOobSize=%d\n", mtd->oobsize, chip->eccOobSize);
4180 -#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
4181 if (!chip->read_page)
4182 - chip->read_page = brcmnand_isr_read_page;
4183 - if (!chip->write_page)
4184 - chip->write_page = brcmnand_isr_write_page;
4185 - if (!chip->read_page_oob)
4186 - chip->read_page_oob = brcmnand_isr_read_page_oob;
4187 - /* There is no brcmnand_isr_write_page_oob */
4188 - if (!chip->write_page_oob)
4189 - chip->write_page_oob = brcmnand_write_page_oob;
4191 - if (!chip->read_page)
4192 chip->read_page = brcmnand_read_page;
4193 if (!chip->write_page)
4194 chip->write_page = brcmnand_write_page;
4195 @@ -8258,7 +6820,6 @@
4196 chip->read_page_oob = brcmnand_read_page_oob;
4197 if (!chip->write_page_oob)
4198 chip->write_page_oob = brcmnand_write_page_oob;
4200 if (!chip->read_oob)
4201 chip->read_oob = brcmnand_do_read_ops;
4202 if (!chip->write_oob)
4203 @@ -8387,17 +6948,21 @@
4211 + printk("------------------> Dry-run\n");
4212 + brcmnand_posted_read_oob(mtd, oob, device_size(mtd) - mtd->erasesize, 1);
4213 + print_oobbuf(oob, 16);
4214 + printk("<------------------ End Dry-run\n");
4217 +if (gdebug > 3) printk("%s 60 Calling scan_bbt\n", __FUNCTION__);
4219 -#ifdef CONFIG_MTD_BRCMNAND_DISABLE_XOR
4221 - printk("-----------------------------------------------------\n");
4222 - print_nand_ctrl_regs();
4223 - printk("-----------------------------------------------------\n");
4227 err = chip->scan_bbt(mtd);
4228 +if (gdebug > 3) printk("%s 80 Done scan_bbt\n", __FUNCTION__);
4232 #ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
4233 @@ -8411,9 +6976,9 @@
4238 PRINTK("%s 99\n", __FUNCTION__);
4240 +if (gdebug) print_diagnostics();
4244 Index: drivers/mtd/brcmnand/brcmnand_cet.c
4245 ===================================================================
4246 --- drivers/mtd/brcmnand/brcmnand_cet.c (revision 1)
4247 +++ drivers/mtd/brcmnand/brcmnand_cet.c (working copy)
4250 #define CET_SYNC_FREQ (10*60*HZ)
4253 static char cet_pattern[] = {'C', 'E', 'T', 0};
4254 static struct brcmnand_cet_descr cet_descr = {
4257 .pattern = cet_pattern
4261 - * This also applies to Large Page SLC flashes with BCH-4 ECC.
4262 - * We don't support BCH-4 on Small Page SLCs because there are not
4263 - * enough free bytes for the OOB, but we don't enforce it,
4264 - * in order to allow page aggregation like in YAFFS2 on small page SLCs.
4266 static struct brcmnand_cet_descr cet_descr_mlc = {
4269 @@ -685,18 +677,10 @@
4270 if (unlikely(gdebug)) {
4271 printk(KERN_INFO "brcmnandCET: Creating correctable error table ...\n");
4274 - if (NAND_IS_MLC(this) || /* MLC flashes */
4275 - /* SLC w/ BCH-n; We don't check for pageSize, and let it be */
4276 - (this->ecclevel >= BRCMNAND_ECC_BCH_1 && this->ecclevel <= BRCMNAND_ECC_BCH_12))
4278 + if (NAND_IS_MLC(this)) {
4279 this->cet = cet = &cet_descr_mlc;
4280 -if (gdebug) printk("%s: CET = cet_desc_mlc\n", __FUNCTION__);
4285 this->cet = cet = &cet_descr;
4286 -if (gdebug) printk("%s: CET = cet_descr\n", __FUNCTION__);
4289 /* Check that BBT table and mirror exist */
4290 Index: drivers/mtd/brcmnand/brcmnand_isr.c
4291 ===================================================================
4292 --- drivers/mtd/brcmnand/brcmnand_isr.c (revision 1)
4293 +++ drivers/mtd/brcmnand/brcmnand_isr.c (working copy)
4294 @@ -22,705 +22,189 @@
4295 * 20090318 tht Original coding
4298 -//#define ISR_DEBUG_SMP
4299 -#undef ISR_DEBUG_SMP
4301 -#ifdef ISR_DEBUG_SMP
4302 -#include <asm/atomic.h>
4306 #include "brcmnand_priv.h"
4309 -#include <linux/dma-mapping.h>
4312 -//#define PRINTK printk
4314 -#ifdef ISR_DEBUG_SMP
4315 -static atomic_t v = ATOMIC_INIT(1);
4316 -#define PRINTK1(...) if (!atomic_dec_and_test(&v)) printk("<")
4317 -#define PRINTK2(...) atomic_inc(&v) //, printk(">"))
4318 -#define PRINTK5(...) if (!atomic_dec_and_test(&v)) printk("+");
4319 -#define PRINTK6(...) atomic_inc(&v) // printk("-");
4320 -#define PRINTK3(...) if (!atomic_dec_and_test(&v)) printk("[");
4321 -#define PRINTK4(...) atomic_inc(&v) // printk("]");
4324 -#define PRINTK1(...)
4325 -#define PRINTK2(...)
4326 -#define PRINTK3(...)
4327 -#define PRINTK4(...)
4328 -#define PRINTK5(...)
4329 -#define PRINTK6(...)
4331 +//define PRINTK printk
4334 // Wakes up the sleeping calling thread.
4335 static DECLARE_WAIT_QUEUE_HEAD(gEduWaitQ);
4337 -//eduIsrNode_t gEduIsrData;
4338 -eduIsrNode_t gEduIsrPool[MAX_JOB_QUEUE_SIZE+2]; /* ReadOp Pool, add 2 for Pushed WAR jobs */
4339 +eduIsrData_t gEduIsrData;
4341 -isrJobQ_t gJobQ; /* Job Queue */
4347 - * Queue next sector for read/write, assuming caller holds queue lock
4350 -ISR_queue_read_request(struct mtd_info *mtd,
4351 - void* buffer, u_char* oobarea, loff_t offset)
4352 +static irqreturn_t ISR_isr(int irq, void *devid, struct pt_regs *regs)
4354 - eduIsrNode_t* entry;
4355 - struct list_head* node;
4357 - // Grab one request from avail list
4358 - if (list_empty(&gJobQ.availList)) {
4359 - printk("%s: Empty avail list\n", __FUNCTION__);
4362 - node = gJobQ.availList.next;
4364 - printk("%s: Empty avail list\n", __FUNCTION__);
4367 - entry = list_entry(node, eduIsrNode_t, list);
4371 - list_add_tail(node, &gJobQ.jobQ);
4372 - spin_lock_init(&entry->lock);
4374 - entry->buffer = buffer;
4375 - entry->oobarea = oobarea;
4376 - entry->offset = offset;
4378 - entry->refCount = 1;
4379 - entry->opComplete = ISR_OP_QUEUED;
4385 -ISR_queue_write_request(struct mtd_info *mtd,
4386 - const void* buffer, const u_char* oobarea, loff_t offset)
4388 - eduIsrNode_t* entry;
4389 - struct list_head* node;
4391 - // Grab one request from avail list
4392 - if (list_empty(&gJobQ.availList)) {
4393 - printk("%s: Empty avail list\n", __FUNCTION__);
4396 - node = gJobQ.availList.next;
4398 - printk("%s: Empty avail list\n", __FUNCTION__);
4401 - entry = list_entry(node, eduIsrNode_t, list);
4405 - list_add_tail(node, &gJobQ.jobQ);
4406 - spin_lock_init(&entry->lock);
4408 - entry->buffer = buffer;
4409 - entry->oobarea = oobarea;
4410 - entry->offset = offset;
4412 - entry->refCount = 1;
4413 - entry->opComplete = ISR_OP_QUEUED;
4420 - * Push next sector for dummy read to head of queue, assuming caller holds queue lock
4421 - * Job will be next to be executed
4424 -ISR_push_request(struct mtd_info *mtd,
4425 - void* buffer, u_char* oobarea, loff_t offset)
4427 - eduIsrNode_t* entry;
4428 - struct list_head* node;
4430 - // Grab one request from avail list
4431 - if (list_empty(&gJobQ.availList)) {
4432 - printk("%s: Empty avail list\n", __FUNCTION__);
4435 - node = gJobQ.availList.next;
4437 - printk("%s: Empty avail list\n", __FUNCTION__);
4440 - entry = list_entry(node, eduIsrNode_t, list);
4443 - // Push to head of queue
4444 - list_add(node, &gJobQ.jobQ);
4445 - spin_lock_init(&entry->lock);
4447 - entry->buffer = buffer;
4448 - entry->oobarea = oobarea;
4449 - entry->offset = offset;
4451 - entry->refCount = 1;
4452 - entry->opComplete = ISR_OP_QUEUED;
4459 - * Called with ReqdQ Read lock held
4460 - * Returns pointer to node that satisfies opStatus,
4461 - * with spin lock held (spin_lock()'ed assuming queue lock has been held))
4464 -ISR_find_request( isrOpStatus_t opStatus)
4466 - eduIsrNode_t* req;
4468 - list_for_each_entry(req, &gJobQ.jobQ, list) {
4470 - // We called this with spin_lock_irqsave on queue lock, so no need for the irq variant
4471 - spin_lock(&req->lock);
4472 - if (req->opComplete == opStatus) {
4475 - spin_unlock(&req->lock);
4477 - return (eduIsrNode_t*) 0;;
4482 -ISR_print_queue(void)
4484 - eduIsrNode_t* req;
4487 - list_for_each_entry(req, &gJobQ.jobQ, list) {
4489 - // We called this with spin_lock_irqsave on queue lock, so no need for the irq variant
4490 - printk("I=%d req=%p, offset=%0llx, opComp=%d, list=%p, next=%p, prev=%p\n",
4491 - i, req, req->offset, req->opComplete, &req->list, req->list.next, req->list.prev);
4494 - return (eduIsrNode_t*) 0;;
4500 - * We've got interrupted, and verified that job is complete.
4501 - * Job lock has been held by caller.
4502 - * Do Read completion routines
4503 - * runs in interrupt context.
4504 - * Return returned value of read-op.
4509 -#if 0 //def EDU_DOUBLE_BUFFER_READ
4511 -/* Save this to be revived when we are sure that EDU's double buffering works */
4513 -ISR_read_completion(eduIsrNode_t* req)
4515 - /* Make sure that the current request does not cause an UNC ERR, as
4516 - * that would require a read from the LKGS to reset EDU
4518 - if (req->status & HIF_INTR2_EDU_ERR) {
4519 - uint32_t edu_err_status;
4521 - edu_err_status = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_ERR_STATUS);
4522 - if (edu_err_status && edu_err_status != EDU_ERR_STATUS_NandECCcor) {
4524 - /* If error, we must stop the on-going EDU op, because it will be dropped by EDU.
4525 - * This is VLSI PR2389
4527 - edu_status = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_STATUS);
4528 - if (edu_status & BCHP_EDU_STATUS_Active_MASK) {
4529 - uint32_t edu_done = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_DONE);
4532 - // Abort current command
4533 - EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_STOP, BCHP_EDU_STOP_Stop_MASK);
4535 - // Wait for Done to increment
4536 - while (edu_done == EDU_volatileRead(EDU_BASE_ADDRESS + EDU_DONE))
4538 - // Wait for Pending and Active to Clear
4539 - while (0 != (edu_status = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_STATUS)))
4542 - EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_STOP, 0);
4543 - // Let the process context thread handle the WAR,
4544 - // But we need to requeue the current op (req2)
4545 - req2 = req->list.next;
4546 - down(&req2->lock);
4547 - if (req2 && req2->opComplete == ISR_OP_SUBMITTED) {
4548 - req2->opComplete = ISR_OP_QUEUED;
4555 - // ReadOp completes with no errors, queue next requests until Pending is set
4563 - * The requests are queued, some with ISR_OP_SUBMITTED status, some with ISR_OP_QUEUED
4564 - * When an interrupt comes in, we just look for the one that are in submitted status, and mark them
4565 - * as ISR_OP_COMPLETE, and wake up the wait queue.
4566 - * However, if (1) there is an error that requires a workaround, or (2) that the operation is not yet completed,
4567 - * we need to take appropriate action depending on the case.
4568 - * In (1), we have a false uncorrectable error, that need a read from the last known good sector,
4569 - * so if double buffering is in effect, we need to abort the current EDU job, in order to do the workaround.
4570 - * In (2) we just update the current job, and let the HW interrupt us again.
4572 - * Runs in interrupt context.
4575 -ISR_isr(int irq, void *devid, struct pt_regs *regs)
4577 uint32_t status, rd_data;
4579 - eduIsrNode_t* req;
4580 - //struct list_head* node;
4581 - uint32_t flashAddr;
4582 unsigned long flags;
4587 - if (devid != (void*) &gJobQ) {
4588 + if (devid != (void*) &gEduIsrData) {
4592 - spin_lock_irqsave(&gJobQ.lock, flags);
4593 - /* TBD: How to tell Read Request from Write Request */
4594 - if (list_empty(&gJobQ.jobQ)) {
4595 - printk("%s: Impossible no job to process\n", __FUNCTION__);
4597 - // CLear interrupt and return
4598 - intrMask = ISR_volatileRead(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_MASK_STATUS);
4599 - ISR_disable_irq(intrMask);
4600 - spin_unlock_irqrestore(&gJobQ.lock, flags);
4601 - return IRQ_HANDLED;
4604 - flashAddr = EDU_volatileRead(EDU_BASE_ADDRESS+EDU_EXT_ADDR) - (EDU_LENGTH_VALUE-1);
4606 - flashAddr &= ~(EDU_LENGTH_VALUE-1);
4608 - req = ISR_find_request(ISR_OP_SUBMITTED);
4612 - printk("%s: Impossible failed to find queued job\n", __FUNCTION__);
4616 - // req->lock held here.
4619 - * Remember the status, as there can be several L1 interrupts before completion.
4620 - * Grab the lock first, we don't want any race condition.
4622 - // spin_lock(&req->lock); Already locked by ISR_find_request
4623 intrMask = ISR_volatileRead(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_MASK_STATUS);
4624 rd_data = ISR_volatileRead(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_STATUS);
4626 -PRINTK("==> %s: Awaken rd_data=%08x, intrMask=%08x, cmd=%d, flashAddr=%08x\n", __FUNCTION__,
4627 - rd_data, intrMask, gJobQ.cmd, req->edu_ldw);
4628 +PRINTK("%s: Awaken rd_data=%08x, intrMask=%08x, cmd=%d, flashAddr=%08x\n", __FUNCTION__,
4629 + rd_data, intrMask, gEduIsrData.cmd, gEduIsrData.flashAddr);
4631 - req->status |= rd_data;
4632 - status = req->status & req->mask;
4635 - * Evaluate exit/completion condition.
4636 + * Remember the status, as there can be several L1 interrupts before completion
4638 - switch (gJobQ.cmd) {
4639 + spin_lock_irqsave(&gEduIsrData.lock, flags);
4640 + gEduIsrData.status |= rd_data;
4641 + status = gEduIsrData.status & gEduIsrData.mask;
4643 + // Evaluate exit/completion condition
4644 + switch (gEduIsrData.cmd) {
4646 case NAND_CTRL_READY:
4647 - if ((req->expect == (req->status & req->expect)) ||
4648 - (req->status & req->error))
4650 - req->opComplete = ISR_OP_COMPLETED;
4652 + gEduIsrData.opComplete = ((gEduIsrData.expect == (gEduIsrData.status & gEduIsrData.expect)) ||
4653 + (gEduIsrData.status & gEduIsrData.error));
4658 * We wait for both DONE|ERR +CTRL_READY
4660 - if ((req->expect == (req->status & req->expect) ||
4661 - (req->status & req->error))
4662 + gEduIsrData.opComplete = ((gEduIsrData.expect == (gEduIsrData.status & gEduIsrData.expect) ||
4663 + (gEduIsrData.status & gEduIsrData.error))
4665 - (req->status & HIF_INTR2_CTRL_READY))
4667 - req->opComplete = ISR_OP_COMPLETED;
4668 - (void) dma_unmap_single(NULL, req->physAddr, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
4673 - printk("%s: Invalid command %08x\n", __FUNCTION__, gJobQ.cmd);
4675 + (gEduIsrData.status & HIF_INTR2_CTRL_READY));
4678 - if (ISR_OP_COMPLETED == req->opComplete) {
4681 - /* ACK interrupt */
4682 - ISR_disable_irq(req->intr);
4684 - // Do we need to do WAR for EDU, since EDU stop dead in its track regardless of the kind of errors. Bummer!
4685 - if (req->status & HIF_INTR2_EDU_ERR) {
4686 - uint32_t edu_err_status;
4689 - * We need to do WAR for EDU, which just stops dead on its tracks if there is any error, correctable or not.
4690 - * Problem is, the WAR needs to be done in process context,
4691 - * so we wake up the process context thread, and handle the WAR there.
4693 -PRINTK("%s: Awaken process context thread for EDU WAR, flashAddr=%08x, status=%08x, hif_intr2=%08x\n",
4694 -__FUNCTION__, req->edu_ldw, req->status, HIF_INTR2_EDU_ERR);
4695 - gJobQ.needWakeUp= 1;
4696 - req->opComplete = ISR_OP_NEED_WAR;
4697 - wake_up(&gEduWaitQ);
4698 - spin_unlock(&req->lock);
4699 - spin_unlock_irqrestore(&gJobQ.lock, flags);
4700 - return IRQ_HANDLED;
4704 - * Get here only if there are no errors, call job completion routine.
4706 - switch (gJobQ.cmd) {
4708 - /* All is left to do is to handle the OOB read */
4709 - req->ret = brcmnand_edu_read_comp_intr(req->mtd, req->buffer, req->oobarea, req->offset,
4716 - * Even if there are no HIF_INTR2_ERR, we still need to check
4717 - * the flash status. If it is set, we need to update the BBT
4718 - * which requires process context WAR
4720 - struct brcmnand_chip *chip = req->mtd->priv;
4721 - uint32_t flashStatus = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
4724 - /* Just to be dead sure */
4725 - if (!(flashStatus & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK)) {
4726 - printk("%s: Impossible, CTRL-READY already asserted\n", __FUNCTION__);
4729 - /* Check for flash write error, in which case tell process context thread to handle it */
4730 - if (flashStatus & 0x1) {
4732 - gJobQ.needWakeUp= 1;
4733 - req->opComplete = ISR_OP_NEED_WAR;
4734 - wake_up(&gEduWaitQ);
4735 - spin_unlock(&req->lock);
4736 - spin_unlock_irqrestore(&gJobQ.lock, flags);
4737 - return IRQ_HANDLED;
4739 - /* Nothing to be done when everything is OK
4741 - * req->ret = brcmnand_edu_write_completion(req->mtd, req->buffer, req->oobarea, req->offset,
4742 - * req->status, req->physAddr, rq->needBBT);
4748 - // Jop completes with no errors, queue next requests until Pending is set
4749 - list_del(&req->list);
4751 - list_add_tail(&req->list, &gJobQ.availList);
4752 - spin_unlock(&req->lock);
4754 - submitted = brcmnand_isr_submit_job();
4756 - if (!submitted) { /* No more job to submit, we are done, wake up process context thread */
4757 - wake_up(&gEduWaitQ);
4760 + if (gEduIsrData.opComplete) {
4761 + ISR_disable_irq(gEduIsrData.intr);
4762 + wake_up_interruptible(&gEduWaitQ);
4766 /* Ack only the ones that show */
4767 - uint32_t ack = req->status & req->intr;
4768 + uint32_t ack = gEduIsrData.status & gEduIsrData.intr;
4770 -PRINTK("%s: opComp=0, intr=%08x, mask=%08x, expect=%08x, err=%08x, status=%08x, rd_data=%08x, intrMask=%08x, flashAddr=%08x, DRAM=%08x\n", __FUNCTION__,
4771 -req->intr, req->mask, req->expect, req->error, req->status, rd_data, intrMask, req->flashAddr, req->dramAddr);
4772 +printk("%s: opComp=0, intr=%08x, mask=%08x, expect=%08x, err=%08x, status=%08x, rd_data=%08x, intrMask=%08x, flashAddr=%08x, DRAM=%08x\n", __FUNCTION__,
4773 +gEduIsrData.intr, gEduIsrData.mask, gEduIsrData.expect, gEduIsrData.error, gEduIsrData.status, rd_data, intrMask, gEduIsrData.flashAddr, gEduIsrData.dramAddr);
4775 // Just disable the ones that are triggered
4776 ISR_disable_irq(ack);
4777 - req->intr &= ~ack;
4778 + gEduIsrData.intr &= ~ack;
4781 + if (gEduIsrData.intr) {
4783 - ISR_enable_irq(req);
4787 printk(KERN_ERR "%s: Lost interrupt\n", __FUNCTION__);
4790 - spin_unlock(&req->lock);
4793 - spin_unlock_irqrestore(&gJobQ.lock, flags);
4795 -PRINTK2("<== %s: \n", __FUNCTION__);
4796 + spin_unlock_irqrestore(&gEduIsrData.lock, flags);
4803 - * Called with no lock
4804 - * Wait until the Read Queue is empty
4805 - * Run in process context.
4806 - * Return 0 if all jobs complete successfully
4807 - * Return error codes and abort if any job returned un-correctable errors.
4810 -ISR_wait_for_queue_completion(void)
4811 +uint32_t ISR_wait_for_completion(void)
4814 -//volatile unsigned int c = 0xfedeadad;
4815 - int ret = -ERESTARTSYS;
4818 unsigned long to_jiffies = 3*HZ; /* 3 secs */
4819 - //unsigned long cur_jiffies = jiffies;
4820 - unsigned long expired = jiffies + to_jiffies;
4822 - eduIsrNode_t* req;
4823 - eduIsrNode_t saveReq;
4825 unsigned long flags;
4827 - /* Loop is for wait_event_interruptible_timeout */
4829 - waitret = wait_event_timeout(gEduWaitQ, list_empty(&gJobQ.jobQ) || gJobQ.needWakeUp, to_jiffies);
4830 - if (waitret == 0) { /* TimeOut */
4831 - ret = BRCMNAND_TIMED_OUT;
4834 - spin_lock_irqsave(&gJobQ.lock, flags);
4835 - if (gJobQ.needWakeUp) { /* Need to do process context WAR */
4836 - req = ISR_find_request(ISR_OP_NEED_WAR);
4837 + ret = wait_event_interruptible_timeout(gEduWaitQ, gEduIsrData.opComplete, to_jiffies);
4840 - printk("%s: Cannot find job that need WAR\n", __FUNCTION__);
4843 + spin_lock_irqsave(&gEduIsrData.lock, flags);
4847 + cmd = gEduIsrData.cmd;
4848 + gEduIsrData.cmd = -1;
4850 - /* Mark the job as complete and free it */
4851 - req->opComplete = ISR_OP_COMPLETED;
4852 - gJobQ.needWakeUp = 0;
4854 - // Job, with error, is now complete, remove it from queue, and submit next request
4855 - list_del(&req->list);
4857 - list_add_tail(&req->list, &gJobQ.availList);
4859 - spin_unlock(&req->lock);
4861 - // req lock held inside ISR_find_request
4862 - switch (gJobQ.cmd) {
4864 - ret = brcmnand_edu_read_completion(
4865 - saveReq.mtd, saveReq.buffer, saveReq.oobarea, saveReq.offset,
4869 - ret = brcmnand_edu_write_war(
4870 - saveReq.mtd, saveReq.buffer, saveReq.oobarea, saveReq.offset,
4871 - saveReq.status, saveReq.needBBT);
4874 - printk("%s: Unknown command %d\n", __FUNCTION__, gJobQ.cmd);
4877 - if (ret == 0) { /* WAR worked */
4878 - // Submit next job (which is our dummy job in WAR)
4879 - submitted = brcmnand_isr_submit_job();
4882 - eduIsrNode_t* tmp;
4884 - // Abort queue, TBD
4885 - list_for_each_entry_safe(req, tmp, &gJobQ.jobQ, list) {
4886 - list_del(&req->list);
4888 - list_add_tail(&req->list, &gJobQ.availList);
4891 + if (!gEduIsrData.opComplete && ret <= 0) {
4892 + ISR_disable_irq(gEduIsrData.intr);
4893 + if (ret == -ERESTARTSYS) {
4894 + spin_unlock_irqrestore(&gEduIsrData.lock, flags);
4895 + return (uint32_t) (ERESTARTSYS); // Retry on Read
4897 + else if (ret == 0) {
4898 + //gEduIsrData.opComplete = 1;
4899 + printk("%s: DMA timedout\n", __FUNCTION__);
4900 + spin_unlock_irqrestore(&gEduIsrData.lock, flags);
4901 + return 0; // Timed Out
4903 - else { // List is empty
4904 - ret = 0; // Loop exit condition
4906 - spin_unlock_irqrestore(&gJobQ.lock, flags);
4907 - } while ((ret == -ERESTARTSYS) && time_before(jiffies, expired));
4910 + // DMA completes on Done or Error.
4911 + //rd_data = ISR_volatileRead(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_STATUS);
4913 + printk("%s: EDU completes but Status is %08x\n", __FUNCTION__, gEduIsrData.status);
4914 + //rd_data = 0; // Treat as a timeout
4916 + spin_unlock_irqrestore(&gEduIsrData.lock, flags);
4917 + return gEduIsrData.status;
4921 -#if 0 //ndef CONFIG_MTD_BRCMNAND_ISR_QUEUE
4924 - * Wait for completion when not using queue
4926 -uint32_t ISR_wait_for_completion(void)
4927 +uint32_t ISR_cache_is_valid(uint32_t clearMask)
4929 - //uint32_t rd_data;
4930 -//volatile unsigned int c = 0xfedeadad;
4931 - int ret = -ERESTARTSYS;
4932 - unsigned long to_jiffies = 3*HZ; /* 3 secs */
4933 - //unsigned long cur_jiffies = jiffies;
4934 - unsigned long expired = jiffies + to_jiffies;
4937 - //unsigned long flags;
4938 -//volatile unsigned int counter = 0xAABBCCDD;
4939 -//static int erestartsys = 0;
4940 + uint32_t rd_data = ISR_volatileRead(BCM_BASE_ADDRESS+BCHP_HIF_INTR2_CPU_STATUS);
4941 + unsigned long flags;
4944 - while (ret == -ERESTARTSYS ) {
4945 -//printk("%s: jiffies=%08lx, expired=%08lx\n", __FUNCTION__, jiffies, expired);
4946 - if (((retries--) < 0) || time_after(jiffies, expired)) {
4947 - ret = 0; // Timed out
4948 - return ERESTARTSYS;
4951 - // Recalculate TO, for retries
4952 - to_jiffies = expired - jiffies;
4953 - //ret = wait_event_interruptible_timeout(gEduWaitQ, gEduIsrData.opComplete, to_jiffies);
4954 - ret = wait_event_timeout(gEduWaitQ, gEduIsrData.opComplete, to_jiffies);
4957 + * Already there, no need to wait
4959 + if (rd_data & HIF_INTR2_CTRL_READY)
4962 -PRINTK3("==>%s\n", __FUNCTION__);
4963 - down(&gEduIsrData.lock);
4965 - cmd = gEduIsrData.cmd;
4966 - gEduIsrData.cmd = -1;
4968 - if (!gEduIsrData.opComplete && ret <= 0) {
4969 - ISR_disable_irq(gEduIsrData.intr);
4971 - if (ret == -ERESTARTSYS) {
4972 - up(&gEduIsrData.lock);
4974 -//if (5 >= erestartsys++)
4975 -//printk("Pending signals: %08lx-%08lx-%08lx-%08lx\n",
4976 -//current->pending.signal.sig[0], current->pending.signal.sig[1],current->pending.signal.sig[2], current->pending.signal.sig[3]);
4979 - else if (ret == 0) {
4980 - //gEduIsrData.opComplete = 1;
4981 - PRINTK("%s: DMA timedout\n", __FUNCTION__);
4983 - up(&gEduIsrData.lock);
4984 -//printk("<==%s, ret=0 TimeOut\n", __FUNCTION__);
4985 -PRINTK4("<==%s, ret=0 TimeOut\n", __FUNCTION__);
4987 - return 0; // Timed Out
4992 - // DMA completes on Done or Error.
4993 - //rd_data = ISR_volatileRead(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_STATUS);
4994 + // Clear existing interrupt
4995 + ISR_volatileWrite(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_MASK_SET, clearMask);
4998 + spin_lock_irqsave(&gEduIsrData.lock, flags);
4999 + gEduIsrData.flashAddr = 0;
5000 + gEduIsrData.dramAddr = 0;
5002 -PRINTK("%s: EDU completes but Status is %08x\n", __FUNCTION__, gEduIsrData.status);
5003 - //rd_data = 0; // Treat as a timeout
5006 + * Enable L2 Interrupt
5008 + gEduIsrData.cmd = NAND_CTRL_READY;
5009 + gEduIsrData.opComplete = 0;
5010 + gEduIsrData.status = 0;
5012 + gEduIsrData.mask = HIF_INTR2_CTRL_READY;
5013 + gEduIsrData.expect = HIF_INTR2_CTRL_READY;
5014 + gEduIsrData.error = 0;
5015 + gEduIsrData.intr = HIF_INTR2_CTRL_READY;
5017 - up(&gEduIsrData.lock);
5019 + spin_unlock_irqrestore(&gEduIsrData.lock, flags);
5021 - return gEduIsrData.status;
5026 + rd_data = ISR_wait_for_completion();
5027 + } while (rd_data != 0 && !(rd_data & HIF_INTR2_CTRL_READY));
5031 - * Since we cannot use the interrupt, or call schedule, we will have to busy-wait for controller ready.
5032 - * Executes in interrupt context
5035 -ISR_cache_is_valid(void)
5038 - unsigned long expired = jiffies + HZ/10000; /* 100 usec, enough for any flash op to complete */
5041 - rd_data = ISR_volatileRead(BCM_BASE_ADDRESS+BCHP_HIF_INTR2_CPU_STATUS);
5043 - } while (!(rd_data & HIF_INTR2_CTRL_READY) && time_before(jiffies, expired));
5044 - return (0 != (rd_data & HIF_INTR2_CTRL_READY)) ;
5052 - unsigned long flags;
5054 - //init_MUTEX(&gEduIsrData.lock); // Write lock
5055 - spin_lock_init(&gJobQ.lock); // Read queue lock
5056 + spin_lock_init(&gEduIsrData.lock);
5058 - INIT_LIST_HEAD(&gJobQ.jobQ);
5059 - INIT_LIST_HEAD(&gJobQ.availList);
5060 - /* Add all nodes from pool to avail list */
5062 - spin_lock_irqsave(&gJobQ.lock, flags);
5063 -PRINTK("%s: B4\n", __FUNCTION__);
5064 -ISR_print_avail_list();
5065 - for (i=0; i<MAX_JOB_QUEUE_SIZE;i++) {
5066 - eduIsrNode_t* e = &gEduIsrPool[i];
5068 - //init_MUTEX(&e->lock);
5069 - list_add_tail(&e->list, &gJobQ.availList);
5071 - spin_unlock_irqrestore(&gJobQ.lock, flags);
5072 -PRINTK("%s: After\n", __FUNCTION__);
5073 -ISR_print_avail_list();
5076 // Mask all L2 interrupts
5077 intrMask = ISR_volatileRead(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_MASK_STATUS);
5078 ISR_volatileWrite(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_MASK_SET, ~intrMask);
5081 - ret = request_irq(BCM_LINUX_CPU_INTR1_IRQ, ISR_isr, SA_SHIRQ, "brcmnand EDU", &gJobQ);
5082 + ret = request_irq(BCM_LINUX_CPU_INTR1_IRQ, ISR_isr, SA_SHIRQ, "brcmnand EDU", &gEduIsrData);
5084 printk(KERN_INFO "%s: request_irq(BCM_LINUX_CPU_INTR1_IRQ) failed ret=%d. Someone not sharing?\n",
5091 Index: drivers/mtd/brcmnand/eduproto.h
5092 ===================================================================
5093 --- drivers/mtd/brcmnand/eduproto.h (revision 1)
5094 +++ drivers/mtd/brcmnand/eduproto.h (working copy)
5098 extern void EDU_init(void);
5099 -extern int EDU_write(volatile const void*, uint32_t, uint32_t*);
5100 +extern int EDU_write(volatile const void*, uint32_t);
5101 extern int EDU_read(volatile void*, uint32_t);
5103 extern uint32_t EDU_get_error_status_register(void);
5104 Index: drivers/mtd/brcmnand/brcmnand_priv.h
5105 ===================================================================
5106 --- drivers/mtd/brcmnand/brcmnand_priv.h (revision 1)
5107 +++ drivers/mtd/brcmnand/brcmnand_priv.h (working copy)
5109 #include <linux/wait.h>
5110 #include <linux/spinlock.h>
5111 #include <linux/interrupt.h>
5112 -#include <linux/list.h>
5117 -#define BRCMNAND_CORRECTABLE_ECC_ERROR (1)
5118 -#define BRCMNAND_SUCCESS (0)
5119 -#define BRCMNAND_UNCORRECTABLE_ECC_ERROR (-1)
5120 -#define BRCMNAND_FLASH_STATUS_ERROR (-2)
5121 -#define BRCMNAND_TIMED_OUT (-3)
5123 -#ifdef CONFIG_MTD_BRCMNAND_EDU
5124 -#define BRCMEDU_CORRECTABLE_ECC_ERROR (4)
5125 -#define BRCMEDU_UNCORRECTABLE_ECC_ERROR (-4)
5127 -#define BRCMEDU_MEM_BUS_ERROR (-5)
5130 +#if defined( CONFIG_MTD_BRCMNAND_EDU )
5131 #define BRCMNAND_malloc(size) kmalloc(size, GFP_DMA)
5132 #define BRCMNAND_free(addr) kfree(addr)
5135 #define BRCMNAND_malloc(size) vmalloc(size)
5136 #define BRCMNAND_free(addr) vfree(addr)
5137 @@ -77,125 +63,31 @@
5138 "nop; nop; nop; nop; nop; nop;\n\t" \
5142 - * Right now we submit a full page Read for queueing, so with a 8KB page,
5143 - * and an ECC step of 512B, the queue depth is 16. Add 2 for dummy elements
5146 -#if CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_3_3
5147 -#define MAX_NAND_PAGE_SIZE (4<<10)
5150 -#define MAX_NAND_PAGE_SIZE (8<<10)
5152 +typedef struct eduIsrData {
5153 + spinlock_t lock; // For SMP and future double buffering on Read.
5154 + int cmd; // 1 == Read, 0 == Write
5156 -/* Max queue size is (PageSize/512B_ECCSize)+2 spare for WAR */
5157 -#define MAX_JOB_QUEUE_SIZE ((MAX_NAND_PAGE_SIZE>>9))
5160 - ISR_OP_QUEUED = 0,
5161 - ISR_OP_SUBMITTED = 1,
5162 - ISR_OP_NEED_WAR = 2,
5163 - ISR_OP_COMPLETED = 3,
5164 - ISR_OP_TIMEDOUT = 4
5167 -typedef struct eduIsrNode {
5168 - struct list_head list;
5169 - spinlock_t lock; // per Node update lock
5170 - // int cmd; // 1 == Read, 0 == Write
5173 uint32_t mask; /* Clear status mask */
5174 uint32_t expect; /* Status on success */
5175 uint32_t error; /* Status on error */
5176 uint32_t intr; /* Interrupt bits */
5177 uint32_t status; /* Status read during ISR. There may be several interrupts before completion */
5178 - isrOpStatus_t opComplete; /* Completion status */
5179 + int opComplete; /* Completion criterium */
5181 - /* Controller Level params (for queueing) */
5182 - struct mtd_info* mtd;
5188 + /* For debugging only */
5189 + uint32_t flashAddr;
5190 + uint32_t dramAddr;
5193 - /* EDU level params (for ISR) */
5195 - uint32_t physAddr;
5196 - uint32_t hif_intr2;
5197 - uint32_t edu_status;
5198 +extern eduIsrData_t gEduIsrData;
5200 - int refCount; /* Marked for re-use when refCount=0 */
5201 - unsigned long expired; /* Time stamp for expiration, 3 secs from submission */
5205 - * Read/Write Job Q.
5206 - * Process one page at a time, and queue 512B sector Read or Write EDU jobs.
5207 - * ISR will wake up the process context thread iff
5208 - * 1-EDU reports an error, in which case the process context thread need to be awaken
5209 - * in order to do WAR
5210 - * 2-Q is empty, in which case the page read/write op is complete.
5212 -typedef struct jobQ_t {
5213 - struct list_head jobQ; /* Nodes queued for EDU jobs */
5214 - struct list_head availList; /* Free Nodes */
5215 - spinlock_t lock; /* Queues guarding spin lock */
5216 - int needWakeUp; /* Wake up Process context thread to do EDU WAR */
5217 - int cmd; /* 1 == Read, 0 == Write */
5220 -extern isrJobQ_t gJobQ;
5222 void ISR_init(void);
5225 - * Submit the first entry that is in queued state,
5226 - * assuming queue lock has been held by caller.
5228 - * @doubleBuffering indicates whether we need to submit just 1 job or until EDU is full (double buffering)
5229 - * Return the number of job submitted for read.
5231 - * In current version (v3.3 controller), since EDU only have 1 register for EDU_ERR_STATUS,
5232 - * we can't really do double-buffering without losing the returned status of the previous read-op.
5234 -#undef EDU_DOUBLE_BUFFER_READ
5236 -int brcmnand_isr_submit_job(void);
5238 -eduIsrNode_t* ISR_queue_read_request(struct mtd_info *mtd,
5239 - void* buffer, u_char* oobarea, loff_t offset);
5240 -eduIsrNode_t* ISR_queue_write_request(struct mtd_info *mtd,
5241 - const void* buffer, const u_char* oobarea, loff_t offset);
5242 -eduIsrNode_t* ISR_push_request(struct mtd_info *mtd,
5243 - void* buffer, u_char* oobarea, loff_t offset);
5246 -int brcmnand_edu_read_completion(struct mtd_info* mtd,
5247 - void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status);
5249 -int brcmnand_edu_read_comp_intr(struct mtd_info* mtd,
5250 - void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status);
5252 -#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
5253 -int brcmnand_edu_write_completion(struct mtd_info *mtd,
5254 - const void* buffer, const u_char* oobarea, loff_t offset, uint32_t intr_status,
5257 -eduIsrNode_t* ISR_find_request( isrOpStatus_t opStatus);
5259 uint32_t ISR_wait_for_completion(void);
5260 +uint32_t ISR_cache_is_valid(uint32_t clearMask);
5263 - * wait for completion with read/write Queue
5265 -int ISR_wait_for_queue_completion(void);
5267 -int ISR_cache_is_valid(void);
5269 -static __inline__ uint32_t ISR_volatileRead(uint32_t addr)
5270 +static inline uint32_t ISR_volatileRead(uint32_t addr)
5272 volatile uint32_t* pAddr;
5275 return *(uint32_t *)pAddr;
5278 -static __inline__ void ISR_volatileWrite(uint32_t addr, uint32_t data)
5279 +static inline void ISR_volatileWrite(uint32_t addr, uint32_t data)
5281 volatile uint32_t* pAddr;
5284 *pAddr = (volatile uint32_t)data;
5287 -static __inline__ void ISR_enable_irq(eduIsrNode_t* req)
5288 +static inline void ISR_enable_irq(void)
5291 //unsigned long flags;
5292 @@ -220,68 +112,42 @@
5293 //spin_lock_irqsave(&gEduIsrData.lock, flags);
5295 // Clear status bits
5296 - ISR_volatileWrite(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_CLEAR, req->mask);
5297 + ISR_volatileWrite(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_CLEAR, gEduIsrData.mask);
5300 + // Disable everything that may screw us up
5301 + intrMask = EDU_volatileRead(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_MASK_STATUS);
5302 + EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_MASK_SET, ~intrMask);
5303 +PRINTK("%s-1: intrMask=%08x\n", __FUNCTION__, intrMask);
5309 - ISR_volatileWrite(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_MASK_CLEAR, req->intr);
5310 + ISR_volatileWrite(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_MASK_CLEAR, gEduIsrData.intr);
5313 +intrMask = EDU_volatileRead(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_MASK_STATUS);
5314 +PRINTK("%s-2: intrMask=%08x\n", __FUNCTION__, intrMask);
5316 //spin_unlock_irqrestore(&gEduIsrData.lock, flags);
5319 -static __inline__ void ISR_disable_irq(uint32_t mask)
5320 +static inline void ISR_disable_irq(uint32_t mask)
5323 /* Disable L2 interrupts */
5324 ISR_volatileWrite(BCM_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_MASK_SET, mask);
5326 + /* Clear L2 interrupts */
5327 + //EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_CLEAR, mask);
5338 -static void __inline__
5339 -ISR_print_queue(void)
5341 - eduIsrNode_t* req;
5342 - //struct list_head* node;
5345 - list_for_each_entry(req, &gJobQ.jobQ, list) {
5347 - printk("i=%d, cmd=%d, offset=%08llx, flashAddr=%08x, opComp=%d, status=%08x\n",
5348 - i, gJobQ.cmd, req->offset, req->edu_ldw,req->opComplete, req->status);
5353 -static void __inline__
5354 -ISR_print_avail_list(void)
5356 - eduIsrNode_t* req;
5357 - //struct list_head* node;
5360 - printk("AvailList=%p, next=%p\n", &gJobQ.availList, gJobQ.availList.next);
5361 - list_for_each_entry(req, &gJobQ.availList, list) {
5362 - printk("i=%d, req=%p, list=%p\n", i, req, &req->list);
5367 -#define IS_print_queue()
5368 -#define ISR_print_avail_list()
5369 -#endif // DEBUG_ISR
5372 -#endif // CONFIG_MTD_BRCMNAND_USE_ISR
5378 * brcmnand_scan - [BrcmNAND Interface] Scan for the BrcmNAND device
5379 * @param mtd MTD device structure
5380 Index: drivers/mtd/brcmnand/edu.c
5381 ===================================================================
5382 --- drivers/mtd/brcmnand/edu.c (revision 1)
5383 +++ drivers/mtd/brcmnand/edu.c (working copy)
5387 #include <linux/mm.h>
5388 -#include <linux/dma-mapping.h>
5389 #include <asm/page.h>
5392 @@ -134,11 +133,11 @@
5396 -int EDU_buffer_OK(volatile void* vaddr, int command)
5397 +int EDU_buffer_OK(volatile void* vaddr)
5399 unsigned long addr = (unsigned long) vaddr;
5401 -#if !defined(CONFIG_MIPS_BCM7440) && !defined(CONFIG_MIPS_BCM7601) && !defined(CONFIG_MIPS_BCM7635)
5402 +#if !defined(CONFIG_MIPS_BCM7440) && !defined(CONFIG_MIPS_BCM7601)
5403 // Requires 32byte alignment only of platforms other than 7440 and 7601 (and Dune)
5405 // Must be 32-byte-aligned
5406 @@ -155,14 +154,11 @@
5411 else if (!(addr & KSEG0)) {
5418 // TBD: Since we only enable block for MEM0, we should make sure that the physical
5419 // address falls in MEM0.
5421 @@ -170,13 +166,6 @@
5426 -#if 0 //def CONFIG_MIPS_BCM7420
5427 - else if (command == EDU_WRITE && (addr & 0xff)) { // Write must be aligned on 256B
5428 -printk("Write must be aligned on 128B (addr=%08x)\n", addr);
5435 @@ -518,10 +507,6 @@
5436 * Read data on success or error.
5440 -dump_nand_regs(struct brcmnand_chip* chip, loff_t offset, uint32_t pa, int which);
5441 -#define MAX_DUMPS 10
5442 -extern int numDumps;
5444 uint32_t EDU_poll(uint32_t address, uint32_t expect, uint32_t error, uint32_t mask)
5446 @@ -535,11 +520,6 @@
5447 address, expect, mask, error);
5449 rd_data = EDU_volatileRead(address);
5450 -if (numDumps < MAX_DUMPS)
5452 - dump_nand_regs(NULL, 0, 0, numDumps++);
5457 timeout = jiffies + msecs_to_jiffies(1000); // 3 sec timeout for now (testing)
5458 @@ -548,23 +528,18 @@
5459 // while ((rd_data & mask) != (expect & mask)) /* && (i<cnt) */
5460 while (((rd_data & mask) != (expect & mask)) && !((rd_data & mask) & error))
5463 if ( 0 /*(i %1000000) == 1 */)
5464 {PRINTK("Polling addr=%08x, expect=%08x, mask=%08x!\n", address, expect, mask);
5465 PRINTK("EDU_poll read: %08x\n", rd_data);}
5467 //__sync(); //PLATFORM_IOFLUSH_WAR();
5468 rd_data = EDU_volatileRead(address);
5470 - // JR+ 2008-02-01 Allow other tasks to run while waiting
5472 + // JR+ 2008-02-01 Allow other tasks to run while waiting
5475 // JR- 2008-02-01 Allow other tasks to run while waiting
5476 -if (numDumps < MAX_DUMPS)
5478 - dump_nand_regs(NULL, 0, 0, numDumps++);
5483 if(!time_before(jiffies, timeout))
5486 // SUN_GISB_ARB_TIMER = 0x10000
5487 EDU_volatileWrite(0xb040600c, 0x00010000);
5489 -#elif defined( CONFIG_MIPS_BCM7601 ) || defined( CONFIG_MIPS_BCM7635 )
5490 +#elif defined( CONFIG_MIPS_BCM7601 )
5492 #define ENABLE_256MB_GISB_WINDOW 0x1
5493 volatile unsigned long* PCI_GEN_GISB_WINDOW_SIZE =
5497 #elif defined( CONFIG_MIPS_BCM7420 )
5498 - // Make sure that RTS grants some cycle to EDU, or we have to steal some from RR
5499 + // Make sure that RTS grant some cycle to EDU, or we have to steal some
5501 #define BLOCKED_OUT 0x001fff00
5502 #define RR_ENABLED 0x80 /* Bit 7 */
5503 @@ -708,29 +683,6 @@
5504 volatile unsigned long* PCI_GEN_PCI_CTRL =
5505 (volatile unsigned long*) KSEG1ADDR(0x10440104);
5506 volatile unsigned long pci_gen_pci_ctrl;
5508 -#if 0 // Block out MoCA
5509 - volatile unsigned long* MEMC_0_1_CLIENT_INFO_59=
5510 - (volatile unsigned long*) KSEG1ADDR(0x103b10f0);
5511 - volatile unsigned long memc_client_59;
5512 - volatile unsigned long* MEMC_0_1_CLIENT_INFO_62=
5513 - (volatile unsigned long*) KSEG1ADDR(0x103b10fc);
5514 - volatile unsigned long memc_client_62;
5516 - /* Bits 08-20 are all 1 == Blocked */
5517 - memc_client_59 = *MEMC_0_1_CLIENT_INFO_59;
5518 - printk("MEMC_0_1_CLIENT_INFO_59 Before=%08lx\n", memc_client_59);
5519 - *MEMC_0_1_CLIENT_INFO_59 = memc_client_59|0x001fff00;
5520 - *MEMC_0_1_CLIENT_INFO_59 &= ~RR_ENABLED;
5521 - printk("MEMC_0_1_CLIENT_INFO_59 After blocked out=%08lx\n", *MEMC_0_1_CLIENT_INFO_59);
5523 - memc_client_62 = *MEMC_0_1_CLIENT_INFO_62;
5524 - printk("MEMC_0_1_CLIENT_INFO_62 Before=%08lx\n", memc_client_62);
5525 - *MEMC_0_1_CLIENT_INFO_62 = memc_client_62|0x001fff00;
5526 - *MEMC_0_1_CLIENT_INFO_62 &= ~RR_ENABLED;
5527 - printk("MEMC_0_1_CLIENT_INFO_62 After blocked out=%08lx\n", *MEMC_0_1_CLIENT_INFO_62);
5531 /* Bits 08-20 are all 1 == Blocked */
5532 memc_client_17 = *MEMC_0_1_CLIENT_INFO_17;
5534 pci_gen_pci_ctrl = *PCI_GEN_PCI_CTRL;
5535 pci_gen_pci_ctrl &= ~PARK_ON_MASK;
5536 pci_gen_pci_ctrl |= PARK_ON_EBI;
5537 - EDU_volatileWrite(PCI_GEN_PCI_CTRL, pci_gen_pci_ctrl);
5541 @@ -771,50 +722,37 @@
5545 -#ifndef CONFIG_MTD_BRCMNAND_ISR_QUEUE // batch mode
5548 * THT: 07/31/08: This does not work. One has to write the 512B Array from the NAND controller into
5549 * the EXT registers for it to work. Will fix it when I come back.
5551 -int EDU_write(volatile const void* virtual_addr_buffer,
5552 - uint32_t external_physical_device_address,
5553 - uint32_t* physAddr)
5554 +int EDU_write(volatile const void* virtual_addr_buffer, uint32_t external_physical_device_address)
5556 - //uint32_t phys_mem;
5557 + uint32_t phys_mem;
5558 // uint32_t rd_data;
5559 - //unsigned long flags;
5560 + unsigned long flags;
5565 phys_mem = EDU_virt_to_phys((void *)virtual_addr_buffer);
5568 - // EDU is not a PCI device
5569 - // THT: TBD: Need to adjust for cache line size here, especially on 7420.
5570 - *physAddr = dma_map_single(NULL, virtual_addr_buffer, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
5573 - if (!(*physAddr)) {
5580 -//printk("EDU_write: vBuff: %p physDev: %08x, PA=%08x\n",
5581 -// virtual_addr_buffer, external_physical_device_address, phys_mem);
5582 +//PRINTK("EDU_write: vBuff: %p physDev: %08x, PA=%08x\n",
5583 +//virtual_addr_buffer, external_physical_device_address, phys_mem);
5585 #ifdef CONFIG_MTD_BRCMNAND_USE_ISR
5586 - down(&gEduIsrData.lock);
5587 - gEduIsrData.edu_ldw = external_physical_device_address;
5588 - gEduIsrData.physAddr = *physAddr;
5589 + spin_lock_irqsave(&gEduIsrData.lock, flags);
5590 + gEduIsrData.flashAddr = external_physical_device_address;
5591 + gEduIsrData.dramAddr = phys_mem;
5594 * Enable L2 Interrupt
5596 gEduIsrData.cmd = EDU_WRITE;
5597 - gEduIsrData.opComplete = ISR_OP_SUBMITTED;
5598 + gEduIsrData.opComplete = 0;
5599 gEduIsrData.status = 0;
5601 /* On write we wait for both DMA done|error and Flash Status */
5603 gEduIsrData.error = HIF_INTR2_EDU_ERR;
5604 gEduIsrData.intr = HIF_INTR2_EDU_DONE_MASK|HIF_INTR2_CTRL_READY;
5606 - up(&gEduIsrData.lock);
5607 - ISR_enable_irq(&gEduIsrData);
5608 + spin_unlock_irqrestore(&gEduIsrData.lock, flags);
5612 EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_CLEAR, HIF_INTR2_EDU_CLEAR_MASK);
5613 @@ -838,17 +776,15 @@
5615 //EDU_waitForNoPendingAndActiveBit();
5617 -// Already covered by dma_map_single()
5618 -// dma_cache_wback((unsigned long) virtual_addr_buffer, EDU_LENGTH_VALUE);
5620 - EDU_issue_command(*physAddr, external_physical_device_address, EDU_WRITE); /* 1: Is a Read, 0 Is a Write */
5621 + dma_cache_wback((unsigned long) virtual_addr_buffer, 512);
5623 + EDU_issue_command(phys_mem, external_physical_device_address, EDU_WRITE); /* 1: Is a Read, 0 Is a Write */
5625 // rd_data = EDU_poll(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_STATUS, HIF_INTR2_EDU_DONE, HIF_INTR2_EDU_DONE);
5626 // EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_DONE, 0x00000000);
5629 -//printk("<-- %s\n", __FUNCTION__);
5635 // uint32_t rd_data;
5638 - //unsigned long flags;
5639 + unsigned long flags;
5643 @@ -877,31 +813,33 @@
5646 //PRINTK("--> %s: vAddr=%p, ext=%08x\n", __FUNCTION__, virtual_addr_buffer, external_physical_device_address);
5648 phys_mem = EDU_virt_to_phys((void *)virtual_addr_buffer);
5653 - // THT: TBD: Need to adjust for cache line size here, especially on 7420.
5654 - phys_mem = dma_map_single(NULL, virtual_addr_buffer, EDU_LENGTH_VALUE, DMA_FROM_DEVICE);
5657 if (edu_debug) PRINTK("EDU_read: vBuff: %p physDev: %08x, PA=%08x\n",
5658 virtual_addr_buffer, external_physical_device_address, phys_mem);
5660 #ifdef CONFIG_MTD_BRCMNAND_USE_ISR
5661 - down(&gEduIsrData.lock);
5662 - gEduIsrData.edu_ldw = external_physical_device_address;
5663 - gEduIsrData.physAddr = phys_mem;
5664 + spin_lock_irqsave(&gEduIsrData.lock, flags);
5665 + gEduIsrData.flashAddr = external_physical_device_address;
5666 + gEduIsrData.dramAddr = phys_mem;
5669 * Enable L2 Interrupt
5671 gEduIsrData.cmd = EDU_READ;
5672 - gEduIsrData.opComplete = ISR_OP_SUBMITTED;
5673 + gEduIsrData.opComplete = 0;
5674 gEduIsrData.status = 0;
5677 + /* On Read we only wait for DMA completion or Error */
5678 + gEduIsrData.mask = HIF_INTR2_EDU_CLEAR_MASK|HIF_INTR2_CTRL_READY;
5679 + gEduIsrData.expect = HIF_INTR2_EDU_DONE;
5680 + gEduIsrData.error = HIF_INTR2_EDU_ERR;
5681 + gEduIsrData.intr = HIF_INTR2_EDU_DONE_MASK;
5684 // We must also wait for Ctlr_Ready, otherwise the OOB is not correct, since we read the OOB bytes off the controller
5687 // On error we also want Ctrlr-Ready because for COR ERR, the Hamming WAR depends on the OOB bytes.
5688 gEduIsrData.error = HIF_INTR2_EDU_ERR;
5689 gEduIsrData.intr = HIF_INTR2_EDU_DONE_MASK;
5690 - up(&gEduIsrData.lock);
5691 + spin_unlock_irqrestore(&gEduIsrData.lock, flags);
5693 - ISR_enable_irq(&gEduIsrData);
5697 EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_CLEAR, HIF_INTR2_EDU_CLEAR_MASK);
5698 @@ -928,7 +866,29 @@
5699 //EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_DONE, 0x00000000);
5703 + if( (EDU_volatileRead(EDU_BASE_ADDRESS + EDU_DONE) && 0x00000003) != 0)
5705 + PRINTK("EDU_DONE != 0!!!\n");
5708 EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_ERR_STATUS, 0x00000000);
5710 + if( EDU_volatileRead(EDU_BASE_ADDRESS + EDU_ERR_STATUS) != 0)
5712 + PRINTK("EDU_ERR_STATUS != 0!!!\n");
5716 +#if 1 //ndef CONFIG_BMIPS4380
5717 + dma_cache_inv((unsigned long) virtual_addr_buffer, EDU_LENGTH_VALUE);
5720 + extern void (*flush_cache_all)(void);
5722 + flush_cache_all();
5726 EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_LENGTH, EDU_LENGTH_VALUE);
5728 @@ -956,13 +916,8 @@
5729 HIF_INTR2_EDU_DONE_MASK);
5732 - (void) dma_unmap_single(NULL, phys_mem, EDU_LENGTH_VALUE, DMA_FROM_DEVICE);
5734 if (edu_debug) PRINTK("<-- %s ret=%08x\n", __FUNCTION__, ret);
5736 if (edu_debug > 3 && ret) {show_stack(current,NULL);dump_stack();}
5740 -#endif // Batch mode