Merge commit 'opendreambox/opendreambox-1.6' into vuplus-1.6
[vuplus_openembedded] / recipes / linux / linux-vusolo / linux-vusolo_nand.patch
1 Index: drivers/mtd/brcmnand/bcm7xxx-nand.c
2 ===================================================================
3 --- drivers/mtd/brcmnand/bcm7xxx-nand.c (revision 1)
4 +++ drivers/mtd/brcmnand/bcm7xxx-nand.c (working copy)
5 @@ -74,31 +74,18 @@
6   *     start of flash  1f7f_ffff               flashSize-8MB   rootfs          Linux File System
7   */
8  #define SMALLEST_FLASH_SIZE    (16<<20)
9 -#define DEFAULT_RESERVED_SIZE  (8<<20) 
10 +#ifdef USE_SPLASH
11 +#define DEFAULT_RESERVED_SIZE  (14<<20) 
12 +#else
13 +#define DEFAULT_RESERVED_SIZE  (12<<20) 
14 +#endif
15  #define DEFAULT_SPLASH_SIZE    (1<<20)
16  #define DEFAULT_BBT0_SIZE_MB   (1)
17  #define DEFAULT_BBT1_SIZE_MB   (4)
18  
19  #define ROOTFS_PART    (0)
20  
21 -#if defined( CONFIG_MTD_BRCMNAND_DISABLE_XOR )
22 -/* Implies new partition scheme, starting with 7420
23 -       cfe: 0-4MB (not mapped)
24 -       mtd0: rootfs: Starts at 4MB offset
25 -       mtd1: all flash less BBT0 (1MB) for flash <= 512MB
26 -       mtd2: Kernel (4MB)
27 -       mtd3: Data, for flash>512MB, from 512MB up to flash - BBT1 (4MB)
28 - */
29 -
30 -#define ALL_PART                               (1)
31 -#define KERNEL_PART                    (2)
32 -#define DATA_PART                      (3)
33 -#define AVAIL1_PART                    (-1)
34 -
35 -#define DEFAULT_ECM_SIZE       (0)
36 -#define DEFAULT_AVAIL1_SIZE    (0)
37 -
38 -#elif defined( CONFIG_MTD_NEW_PARTITION )
39 +#ifdef CONFIG_MTD_NEW_PARTITION
40  /* New partition scheme, starting with 7420
41         mtd0: rootfs
42         mtd1: all flash less BBT0 (1MB) for flash <= 512MB
43 @@ -114,25 +101,18 @@
44  #define DEFAULT_ECM_SIZE       (0)
45  #define DEFAULT_AVAIL1_SIZE    (0)
46  
47 -#else
48 -  #if defined( CONFIG_MTD_ECM_PARTITION )
49 +#elif defined( CONFIG_MTD_ECM_PARTITION )
50  #define DEFAULT_OCAP_SIZE      (6<<20)
51  #define DEFAULT_AVAIL1_SIZE (32<<20)
52  #define DEFAULT_ECM_SIZE (DEFAULT_OCAP_SIZE+DEFAULT_AVAIL1_SIZE)
53  #define AVAIL1_PART    (1)
54  #define OCAP_PART      (2)
55 -  #else
56 +#else
57  #define DEFAULT_ECM_SIZE       (0)
58  #define DEFAULT_OCAP_SIZE      (0)
59  #define DEFAULT_AVAIL1_SIZE    (0)
60  #define AVAIL1_PART    (-1)
61  #define OCAP_PART      (-1)
62 -  #endif // if ECM
63 -
64 -  /* Definitions for NOR+NAND */
65 -#define ALL_PART                               (1)
66 -#define KERNEL_PART                    (2)
67 -#define DATA_PART                      (3)
68  #endif
69  #define DEFAULT_ROOTFS_SIZE (SMALLEST_FLASH_SIZE - DEFAULT_RESERVED_SIZE - DEFAULT_ECM_SIZE)
70  
71 @@ -147,22 +127,9 @@
72  #define N_ALL          "all"
73  
74  
75 -static struct mtd_partition bcm7XXX_no_xor_partition[] = 
76 +static struct mtd_partition bcm7XXX_nand_parts[] = 
77 +#ifdef CONFIG_MTD_NEW_PARTITION
78  {
79 -       /* XOR disabled: Everything is shifted down 4MB */
80 -       { name: N_ROOTFS,       offset: 0x00400000,                     size: DEFAULT_ROOTFS_SIZE - (DEFAULT_BBT0_SIZE_MB <<20) },      // Less 1MB for BBT
81 -       { name: N_ALL,          offset: 0,                                      size: DEFAULT_ROOTFS_SIZE - (DEFAULT_BBT0_SIZE_MB <<20) },
82 -       { name: N_KERNEL,       offset: 0x00b00000,                     size: 4<<20 }, 
83 -       /* BBT0 1MB not mountable by anyone */
84 -
85 -       /* Following partitions only present on flash with size > 512MB */
86 -       { name: N_DATA,         offset: 0x20000000,                     size: 0 },
87 -       /* BBT1 4MB not mountable by anyone */
88 -       {name: NULL,            offset: 0,                                      size: 0}        /* End marker */
89 -};
90 -
91 -static struct mtd_partition bcm7XXX_new_partition[] = 
92 -{
93         { name: N_ROOTFS,       offset: 0,                                      size: DEFAULT_ROOTFS_SIZE },    
94         { name: N_ALL,          offset: 0x0,                                    size: DEFAULT_ROOTFS_SIZE - (DEFAULT_BBT0_SIZE_MB <<20) },
95         { name: N_KERNEL,       offset: 0x00800000,                     size: 4<<20 },
96 @@ -174,15 +141,24 @@
97         {name: NULL,            offset: 0,                                      size: 0}        /* End marker */
98  };
99  
100 -static struct mtd_partition bcm7XXX_old_partition[] = 
101 +#else
102  {
103         { name: N_ROOTFS,       offset: 0,                                      size: DEFAULT_ROOTFS_SIZE },    
104  #ifdef CONFIG_MTD_ECM_PARTITION
105         { name: N_AVAIL1,       offset: DEFAULT_ROOTFS_SIZE,    size: DEFAULT_AVAIL1_SIZE },
106         { name: N_OCAP,         offset: DEFAULT_ROOTFS_SIZE+DEFAULT_AVAIL1_SIZE,        size: DEFAULT_OCAP_SIZE },
107  #endif
108 -       { name: N_KERNEL,       offset: 0x00800000,                     size: 4<<20 },
109 -       { name: N_CFE,          offset: 0x00C00000,                     size: 2<<20 },
110 +#ifdef USE_SPLASH
111 +       { name: N_KERNEL,       offset: 0x00200000,                     size: 4<<20 },
112 +       { name: "boot",         offset: 0x00600000,                     size: 4<<20 },
113 +       { name: "bootimg",      offset: 0x00A00000,                     size: 2<<20 },
114 +#else
115 +       { name: N_KERNEL,       offset: 0x00400000,                     size: 4<<20 },
116 +       { name: "boot",         offset: 0x00800000,                     size: 4<<20 },
117 +#endif
118 +       { name: N_CFE,          offset: 0x00C00000,                     size: 1<<20 },
119 +       { name: "mac",          offset: 0x00D00000,                     size: 1<<19 },
120 +       { name: "env",          offset: 0x00D80000,                     size: 1<<19 },
121         { name: N_NVM,          offset: 0x00E00000,                     size: 1<<20 },
122         /* BBT 1MB not mountable by anyone */
123         { name: N_DATA,         offset: 0x20000000,             size: 0 },
124 @@ -190,15 +166,6 @@
125         {name: NULL, offset: 0, size: 0},
126         {name: NULL, offset: 0, size: 0}
127  };
128 -
129 -#if defined( CONFIG_MTD_BRCMNAND_DISABLE_XOR )
130 -static struct mtd_partition* bcm7XXX_nand_parts = bcm7XXX_no_xor_partition;
131 -
132 -#elif defined( CONFIG_MTD_NEW_PARTITION )
133 -static struct mtd_partition* bcm7XXX_nand_parts = bcm7XXX_new_partition;
134 -
135 -#else
136 -static struct mtd_partition* bcm7XXX_nand_parts = bcm7XXX_old_partition;
137  #endif
138  
139  struct brcmnand_info {
140 @@ -253,41 +220,17 @@
141         unsigned int ocap_size = DEFAULT_OCAP_SIZE;
142  #endif
143         unsigned int avail1_size = DEFAULT_AVAIL1_SIZE;
144 -       int oldNumParts = ARRAY_SIZE(bcm7XXX_old_partition);
145  
146 -//printk("========================> %s\n", __FUNCTION__);
147 -
148 -
149 -       /* 
150 -        * Is XOR disabled? if so use the new partition.
151 -        */
152 -       if (nandinfo->brcmnand.xor_disable) {
153 -               bcm7XXX_nand_parts = bcm7XXX_no_xor_partition;
154 -
155 -               if (device_size(mtd) <= (512ULL <<20)) {
156 -                       bcm7XXX_nand_parts[ALL_PART].size = 
157 -                               device_size(mtd) - (uint64_t) (DEFAULT_BBT0_SIZE_MB<<20);
158 -                       *numParts = 3;
159 -               } 
160 -               else {
161 -                       bcm7XXX_nand_parts[ALL_PART].size = ((512-DEFAULT_BBT1_SIZE_MB)<<20);
162 -                       *numParts = 4;
163 -               }
164 -               for (i=0; i<*numParts;i++) {
165 -                       bcm7XXX_nand_parts[i].ecclayout = mtd->ecclayout;
166 -               }
167 -       
168 -               // Kernel partition will be initialized by Env Vars.
169 -       //printk("<-- %s, device_size=%0llx\n", __FUNCTION__, device_size(mtd));
170 -       //print_partition(*numParts);
171 -
172 -               nandinfo->parts = bcm7XXX_nand_parts;
173 -               
174 -               return;
175 +       if (device_size(mtd) <= (512ULL <<20)) {
176 +               size = (unsigned long) device_size(mtd);        // mtd->size may be different than nandinfo->size
177 +               *numParts = ARRAY_SIZE(bcm7XXX_nand_parts) - 3; /* take into account the extra 2 parts
178 +                                                                  and the data partition */
179 +       } else {
180 +               size = 512 << 20;
181 +               *numParts = ARRAY_SIZE(bcm7XXX_nand_parts) - 2; // take into account the extra 2 parts
182         }
183  
184 -
185 -#if defined( CONFIG_MTD_NEW_PARTITION ) 
186 +#ifdef CONFIG_MTD_NEW_PARTITION
187         if (device_size(mtd) <= (512ULL <<20)) {
188                 bcm7XXX_nand_parts[ALL_PART].size = 
189                         device_size(mtd) - (uint64_t) (DEFAULT_BBT0_SIZE_MB<<20);
190 @@ -308,53 +251,8 @@
191         nandinfo->parts = bcm7XXX_nand_parts;
192         
193         return;
194 -#else
195 -                                                                  
196 -       /* NAND on CS1, same partition as that of CONFIG_MTD_NEW_PARTITION */
197 -PRINTK("nandinfo->brcmnand.CS[0] = %d\n", nandinfo->brcmnand.CS[0]);
198 -PRINTK("bcm7XXX_nand_parts=%p, bcm7XXX_new_partition=%p, bcm7XXX_old_partition=%p\n",
199 -       bcm7XXX_nand_parts, &bcm7XXX_new_partition[0], &bcm7XXX_old_partition[0]);
200 -       if (nandinfo->brcmnand.CS[0] != 0) {
201 -               bcm7XXX_nand_parts = bcm7XXX_new_partition;
202 -               
203 -               if (device_size(mtd) <= (512ULL <<20)) {
204 -                       bcm7XXX_nand_parts[0].size = device_size(mtd) - DEFAULT_RESERVED_SIZE - ecm_size;
205 -                       bcm7XXX_nand_parts[ALL_PART].size = 
206 -                               device_size(mtd) - ((uint64_t) (DEFAULT_BBT0_SIZE_MB) <<20);
207 -                       *numParts = 3;
208 -               } 
209 -               else {
210 -                       bcm7XXX_nand_parts[0].size = (512ULL <<20) - DEFAULT_RESERVED_SIZE - ecm_size;
211 -                       bcm7XXX_nand_parts[ALL_PART].size = 
212 -                               device_size(mtd) - ((uint64_t) (DEFAULT_BBT1_SIZE_MB)<<20);
213 -                       *numParts = 4;
214 -               }
215 -               for (i=0; i<*numParts;i++) {
216 -                       bcm7XXX_nand_parts[i].ecclayout = mtd->ecclayout;
217 -               }
218 +#elif defined( CONFIG_MTD_ECM_PARTITION )
219  
220 -               nandinfo->parts = bcm7XXX_nand_parts;
221 -
222 -#if 1
223 -PRINTK("%s: NAND on CS1: numparts=%d\n", __FUNCTION__, *numParts);
224 -print_partition(*numParts);
225 -#endif
226 -
227 -               return;
228 -         }
229 -
230 -       /* From now on, we are only dealing with old partition table */
231 -       if (device_size(mtd) <= (512ULL <<20)) {
232 -               size = (unsigned long) device_size(mtd);        // mtd->size may be different than nandinfo->size
233 -               *numParts =  oldNumParts - 3; /* take into account the extra 2 parts
234 -                                                                  and the data partition */
235 -       } else {
236 -               size = 512 << 20;
237 -               *numParts =  oldNumParts - 2; // take into account the extra 2 parts
238 -       }
239 -  
240 -  #if defined( CONFIG_MTD_ECM_PARTITION )
241 -
242         /* Do not generate AVAIL1 partition if usable flash size is less than 64MB */
243         
244         if (size < (64<<20)) {
245 @@ -370,12 +268,11 @@
246                 ecm_size = ocap_size + avail1_size;
247         }
248         
249 -  #endif
250 +
251  #endif
252         nandinfo->parts = bcm7XXX_nand_parts;
253         bcm7XXX_nand_parts[0].size = size - DEFAULT_RESERVED_SIZE - ecm_size;
254         bcm7XXX_nand_parts[0].ecclayout = mtd->ecclayout;
255 -PRINTK("numParts=%d\n", numParts);
256  PRINTK("Part[%d] name=%s, size=%llx, offset=%llx\n", i, bcm7XXX_nand_parts[0].name, 
257  bcm7XXX_nand_parts[0].size, bcm7XXX_nand_parts[0].offset);
258  
259 @@ -549,6 +446,7 @@
260         int e; // Index into Env vars
261         int i; // Index into mtd partition
262  
263 +#ifndef USE_SPLASH
264         // Not configured for Splash, but does CFE define it?
265         if (!gBcmSplash) { 
266                 for (i=0; i < gCfePartitions.numParts; i++) {
267 @@ -558,6 +456,7 @@
268                         }
269                 }
270         }
271 +#endif
272  
273         /*
274          * Remove OCAP partitions if Env Vars are defined
275 @@ -640,7 +539,6 @@
276         //unsigned long size = res->end - res->start + 1;
277         int err = 0;
278         int numParts = 0;
279 -       struct brcmnand_chip* chip;
280  
281         gPageBuffer = NULL;
282         info = kmalloc(sizeof(struct brcmnand_info), GFP_KERNEL);
283 @@ -672,7 +570,7 @@
284         //info->brcmnand.mmcontrol = NULL;  // THT: Sync Burst Read TBD.  pdata->mmcontrol;
285  
286         info->mtd.name = pdev->dev.bus_id;
287 -       chip = info->mtd.priv = &info->brcmnand;
288 +       info->mtd.priv = &info->brcmnand;
289         info->mtd.owner = THIS_MODULE;
290  
291         /* Enable the following for a flash based bad block table */
292 @@ -690,19 +588,12 @@
293  
294  //print_partition(numParts);
295  
296 -       // Nand not on CS0, set it up to allow 1 partition, as in the new partition scheme
297 -       if (chip->CS[0] != 0) { 
298 -               bcm7XXX_nand_parts = bcm7XXX_new_partition;
299 -       }
300 -       
301         if (gCfePartitions.numParts == 0) {
302                 brcmnanddrv_setup_mtd_partitions(info, &numParts);
303         }
304         else {
305                 brcmnanddrv_setup_mtdpart_cfe_env(info, &numParts);
306         }
307 -       
308 -       
309  
310  //print_partition(numParts);
311                 
312 @@ -711,20 +602,6 @@
313  //printk("     dev_set_drvdata\n");    
314         dev_set_drvdata(&pdev->dev, info);
315  //printk("<-- brcmnanddrv_probe\n");
316 -
317 -/* NOR+NAND configuration */
318 -#ifdef CONFIG_MTD_BRCMNAND_NOR_ACCESS
319 -       /* Append NOR partition to the end */
320 -       {
321 -               extern void (*gInitialize_Nor_Partition)(void);
322 -
323 -               if (gInitialize_Nor_Partition) {
324 -                       (*gInitialize_Nor_Partition) ();
325 -               }
326 -               // Else NAND is loaded first, NOR will append when it is started.
327 -       }
328 -
329 -#endif
330         return 0;
331  
332  
333 Index: drivers/mtd/brcmnand/brcmnand_base.c
334 ===================================================================
335 --- drivers/mtd/brcmnand/brcmnand_base.c        (revision 1)
336 +++ drivers/mtd/brcmnand/brcmnand_base.c        (working copy)
337 @@ -39,8 +39,6 @@
338  #include <linux/byteorder/generic.h>
339  #include <linux/reboot.h>
340  #include <linux/vmalloc.h>
341 -#include <linux/dma-mapping.h>
342 -#include <linux/interrupt.h>
343  
344  #include <asm/io.h>
345  #include <asm/bug.h>
346 @@ -60,26 +58,6 @@
347  
348  //#define DEBUG_HW_ECC
349  
350 -//#define BRCMNAND_READ_VERIFY
351 -#undef BRCMNAND_READ_VERIFY
352 -
353 -//#ifdef CONFIG_MTD_BRCMNAND_VERIFY_WRITE
354 -//#define BRCMNAND_WRITE_VERIFY
355 -//#endif
356 -#undef BRCMNAND_WRITE_VERIFY
357 -
358 -//#define DEBUG_ISR
359 -#undef DEBUG_ISR
360 -#if defined( DEBUG_ISR )  || defined(BRCMNAND_READ_VERIFY) \
361 -       || defined(BRCMNAND_WRITE_VERIFY)
362 -#if defined(DEBUG_ISR )  || defined(BRCMNAND_READ_VERIFY)
363 -#define EDU_DEBUG_4
364 -#endif
365 -#if defined(DEBUG_ISR )  || defined(BRCMNAND_WRITE_VERIFY)
366 -#define EDU_DEBUG_5
367 -#endif
368 -#endif
369 -
370  #define my_be32_to_cpu(x) be32_to_cpu(x)
371  
372  #if defined( CONFIG_MTI_R24K ) || defined( CONFIG_MTI_R34K ) || defined( CONFIG_MTD_BRCMNAND_EDU )
373 @@ -131,7 +109,21 @@
374  
375  #define HW_AUTOOOB_LAYOUT_SIZE         32 /* should be enough */
376  
377 +#define BRCMNAND_CORRECTABLE_ECC_ERROR         (1)
378 +#define BRCMNAND_SUCCESS                                               (0)
379 +#define BRCMNAND_UNCORRECTABLE_ECC_ERROR       (-1)
380 +#define BRCMNAND_FLASH_STATUS_ERROR                    (-2)
381 +#define BRCMNAND_TIMED_OUT                                     (-3)
382  
383 +#ifdef CONFIG_MTD_BRCMNAND_EDU
384 +#define BRCMEDU_CORRECTABLE_ECC_ERROR          (4)
385 +#define BRCMEDU_UNCORRECTABLE_ECC_ERROR      (-4)
386 +
387 +#define  BRCMEDU_MEM_BUS_ERROR                         (-5)
388 +
389 +//uint32_t EDU_ldw;
390 +#endif // #ifdef CONFIG_MTD_BRCMNAND_EDU
391 +
392  #ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
393  /* Avoid infinite recursion between brcmnand_refresh_blk() and brcmnand_read_ecc() */
394  static atomic_t inrefresh = ATOMIC_INIT(0); 
395 @@ -166,7 +158,6 @@
396         uint32 options;
397         uint32_t idOptions;     // Whether chip has all 5 ID bytes
398         uint32 timing1, timing2; // Specify a non-zero value to override the default timings.
399 -       int nop;                                // Number of partial writes per page
400         unsigned int ctrlVersion; // Required controller version if different than 0
401  } brcmnand_chip_Id;
402  
403 @@ -183,7 +174,6 @@
404                                 //| NAND_COMPLEX_OOB_WRITE      /* Write data together with OOB for write_oob */
405                 .timing1 = 0, //00070000,
406                 .timing2 = 0,
407 -               .nop=8,
408                 .ctrlVersion = 0, /* THT Verified on data-sheet 7/10/08: Allows 4 on main and 4 on OOB */
409         },
410  
411 @@ -195,7 +185,6 @@
412                 .idOptions = 0,
413                 .timing1 = 0, //0x6474555f, 
414                 .timing2 = 0, //0x00000fc7,
415 -               .nop=8,
416                 .ctrlVersion = 0,
417         },
418         {       /* 2 */
419 @@ -206,7 +195,6 @@
420                 .idOptions = 0,
421                 .timing1 = 0, //0x6474555f, 
422                 .timing2 = 0, //0x00000fc7,
423 -               .nop=8,
424                 .ctrlVersion = 0,
425         },
426  #if 0 // EOL
427 @@ -228,7 +216,6 @@
428                 .options = NAND_USE_FLASH_BBT,
429                 .idOptions = 0,
430                 .timing1 = 0, .timing2 = 0,
431 -               .nop=8,
432                 .ctrlVersion = 0,
433         },
434  
435 @@ -239,7 +226,6 @@
436                 .options = NAND_USE_FLASH_BBT,
437                 .idOptions = 0,
438                 .timing1 = 0, .timing2 = 0,
439 -               .nop=8,
440                 .ctrlVersion = 0,
441         },
442  /* This is just the 16 bit version of the above?
443 @@ -259,8 +245,7 @@
444                 .options = NAND_USE_FLASH_BBT,
445                 .idOptions = 0,
446                 .timing1 = 0, .timing2 = 0,
447 -               .nop=4,
448 -               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
449 +               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
450         },
451  
452         {       /* 6 */
453 @@ -270,8 +255,7 @@
454                 .options = NAND_USE_FLASH_BBT,
455                 .idOptions = 0,
456                 .timing1 = 0, .timing2 = 0,
457 -               .nop=4,
458 -               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
459 +               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
460         },
461  
462  
463 @@ -282,7 +266,6 @@
464                 .options = NAND_USE_FLASH_BBT,
465                 .idOptions = 0,
466                 .timing1 = 0, .timing2 = 0,
467 -               .nop=8,
468                 .ctrlVersion = 0,
469         },
470  
471 @@ -295,7 +278,6 @@
472                 .options = NAND_USE_FLASH_BBT,
473                 .idOptions = 0,
474                 .timing1 = 0, .timing2 = 0,
475 -               .nop=8,
476                 .ctrlVersion = 0,
477         },
478  
479 @@ -306,7 +288,6 @@
480                 .options = NAND_USE_FLASH_BBT,
481                 .idOptions = 0,
482                 .timing1 = 0, .timing2 = 0,
483 -               .nop=8,
484                 .ctrlVersion = 0,
485         },
486  
487 @@ -317,7 +298,6 @@
488                 .options = NAND_USE_FLASH_BBT,
489                 .idOptions = 0,
490                 .timing1 = 0, .timing2 = 0,
491 -               .nop=8,
492                 .ctrlVersion = 0,
493         },
494  
495 @@ -328,7 +308,6 @@
496                 .options = NAND_USE_FLASH_BBT,
497                 .idOptions = 0,
498                 .timing1 = 0, .timing2 = 0,
499 -               .nop=8,
500                 .ctrlVersion = 0,
501         },
502  
503 @@ -339,7 +318,6 @@
504                 .options = NAND_USE_FLASH_BBT,
505                 .idOptions = 0,
506                 .timing1 = 0, .timing2 = 0,
507 -               .nop=8,
508                 .ctrlVersion = 0,
509         },
510  
511 @@ -350,7 +328,6 @@
512                 .options = NAND_USE_FLASH_BBT,
513                 .idOptions = 0,
514                 .timing1 = 0, .timing2 = 0,
515 -               .nop=8,
516                 .ctrlVersion = 0,
517         },
518  
519 @@ -361,7 +338,6 @@
520                 .options = NAND_USE_FLASH_BBT,
521                 .idOptions = 0,
522                 .timing1 = 0, .timing2 = 0,
523 -               .nop=8,
524                 .ctrlVersion = 0,
525         },
526  
527 @@ -372,7 +348,6 @@
528                 .options = NAND_USE_FLASH_BBT,
529                 .idOptions = 0,
530                 .timing1 = 0, .timing2 = 0,
531 -               .nop=8,
532                 .ctrlVersion = 0,
533         },
534  
535 @@ -383,7 +358,6 @@
536                 .options = NAND_USE_FLASH_BBT,
537                 .idOptions = 0,
538                 .timing1 = 0, .timing2 = 0,
539 -               .nop=8,
540                 .ctrlVersion = 0,
541         },
542  
543 @@ -394,7 +368,6 @@
544                 .options = NAND_USE_FLASH_BBT,
545                 .idOptions = 0,
546                 .timing1 = 0, .timing2 = 0,
547 -               .nop=8,
548                 .ctrlVersion = 0,
549         },
550  
551 @@ -405,7 +378,6 @@
552                 .options = NAND_USE_FLASH_BBT,
553                 .idOptions = 0,
554                 .timing1 = 0, .timing2 = 0,
555 -               .nop=8,
556                 .ctrlVersion = 0,
557         },
558  
559 @@ -416,7 +388,6 @@
560                 .options = NAND_USE_FLASH_BBT,
561                 .idOptions = 0,
562                 .timing1 = 0, .timing2 = 0,
563 -               .nop=8,
564                 .ctrlVersion = 0,
565         },
566  
567 @@ -427,7 +398,6 @@
568                 .options = NAND_USE_FLASH_BBT,
569                 .idOptions = 0,
570                 .timing1 = 0, .timing2 = 0,
571 -               .nop=8,
572                 .ctrlVersion = 0,
573         },
574  
575 @@ -438,11 +408,10 @@
576                 .options = NAND_USE_FLASH_BBT,
577                 .idOptions = 0,
578                 .timing1 = 0, .timing2 = 0,
579 -               .nop=8,
580                 .ctrlVersion = 0,
581         },
582  
583 -       /* The following 6 ST chips only allow 4 writes per page, and requires version2.1 (4) of the controller or later */
584 +       /* The following 6 ST chips only allow 4 writes per page, and requires version2.2 (5) of the controller or later */
585         {       /* 22 */
586                 .chipId = ST_NAND01GW3B,
587                 .mafId = FLASHTYPE_ST,
588 @@ -450,8 +419,7 @@
589                 .options = NAND_USE_FLASH_BBT,
590                 .idOptions = 0,
591                 .timing1 = 0, .timing2 = 0,
592 -               .nop=4,
593 -               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
594 +               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
595         },
596  
597         {       /* 23 */ 
598 @@ -461,8 +429,7 @@
599                 .options = NAND_USE_FLASH_BBT,
600                 .idOptions = 0,
601                 .timing1 = 0, .timing2 = 0,
602 -               .nop=4,
603 -               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
604 +               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
605         },
606  
607         {       /* 24 */ 
608 @@ -472,8 +439,7 @@
609                 .options = NAND_USE_FLASH_BBT,
610                 .idOptions = 0,
611                 .timing1 = 0, .timing2 = 0,
612 -               .nop=4,
613 -               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
614 +               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
615         },
616         {       /* 25 */ 
617                 .chipId = ST_NAND02GW3B,
618 @@ -482,8 +448,7 @@
619                 .options = NAND_USE_FLASH_BBT,
620                 .idOptions = 0,
621                 .timing1 = 0, .timing2 = 0,
622 -               .nop=4,
623 -               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
624 +               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
625         },
626         
627         {       /* 26 */ 
628 @@ -493,8 +458,7 @@
629                 .options = NAND_USE_FLASH_BBT,
630                 .idOptions = 0,
631                 .timing1 = 0, .timing2 = 0,
632 -               .nop=4,
633 -               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
634 +               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
635         },
636         {       /* 27 */ 
637                 .chipId = ST_NAND08GW3B,
638 @@ -503,8 +467,7 @@
639                 .options = NAND_USE_FLASH_BBT,
640                 .idOptions = 0,
641                 .timing1 = 0, .timing2 = 0,
642 -               .nop=4,
643 -               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_1,
644 +               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_2_0,
645         },
646                 
647         {       /* 28 */
648 @@ -514,9 +477,8 @@
649                 .options = NAND_USE_FLASH_BBT,          /* Use BBT on flash */
650                                 //| NAND_COMPLEX_OOB_WRITE      /* Write data together with OOB for write_oob */
651                 .idOptions = BRCMNAND_ID_EXT_BYTES,
652 -               .timing1 = 0, 
653 +               .timing1 = 0, //00070000,
654                 .timing2 = 0,
655 -               .nop=1,
656                 .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
657         },
658  
659 @@ -527,9 +489,8 @@
660                 .options = NAND_USE_FLASH_BBT,          /* Use BBT on flash */
661                                 //| NAND_COMPLEX_OOB_WRITE      /* Write data together with OOB for write_oob */
662                 .idOptions = BRCMNAND_ID_EXT_BYTES_TYPE2,
663 -               .timing1 = 0, 
664 +               .timing1 = 0, //00070000,
665                 .timing2 = 0,
666 -               .nop=1,
667                 .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
668         },
669  
670 @@ -540,24 +501,10 @@
671                 .options = NAND_USE_FLASH_BBT,          /* Use BBT on flash */
672                                 //| NAND_COMPLEX_OOB_WRITE      /* Write data together with OOB for write_oob */
673                 .idOptions = BRCMNAND_ID_EXT_BYTES,
674 -               .timing1 = 0, 
675 +               .timing1 = 0, //00070000,
676                 .timing2 = 0,
677 -               .nop=1,
678                 .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
679         },
680 -
681 -       {       /* 31 */  
682 -               .chipId = HYNIX_HY27UAG8T2M,
683 -               .mafId = FLASHTYPE_HYNIX,
684 -               .chipIdStr = "HYNIX_HY27UAG8T2M",
685 -               .options = NAND_USE_FLASH_BBT,          /* Use BBT on flash */
686 -                               //| NAND_COMPLEX_OOB_WRITE      /* Write data together with OOB for write_oob */
687 -               .idOptions = BRCMNAND_ID_EXT_BYTES,
688 -               .timing1 = 0, 
689 -               .timing2 = 0,
690 -               .nop=1,
691 -               .ctrlVersion = CONFIG_MTD_BRCMNAND_VERS_3_0, 
692 -       },
693                 
694         {       /* LAST DUMMY ENTRY */
695                 .chipId = 0,
696 @@ -613,7 +560,7 @@
697  
698         if (nandCtrlReg < BCHP_NAND_REVISION || nandCtrlReg > BCHP_NAND_BLK_WR_PROTECT ||
699                 (nandCtrlReg & 0x3) != 0) {
700 -               printk("brcmnand_ctrl_read: Invalid register value %08x\n", nandCtrlReg);
701 +               printk(KERN_ERR "brcmnand_ctrl_read: Invalid register value %08x\n", nandCtrlReg);
702         }
703  if (gdebug > 3) printk("%s: CMDREG=%08x val=%08x\n", __FUNCTION__, (unsigned int) nandCtrlReg, (unsigned int)*pReg);
704         return (uint32_t) (*pReg);
705 @@ -627,7 +574,7 @@
706  
707         if (nandCtrlReg < BCHP_NAND_REVISION || nandCtrlReg > BCHP_NAND_BLK_WR_PROTECT ||
708                 (nandCtrlReg & 0x3) != 0) {
709 -               printk( "brcmnand_ctrl_read: Invalid register value %08x\n", nandCtrlReg);
710 +               printk(KERN_ERR "brcmnand_ctrl_read: Invalid register value %08x\n", nandCtrlReg);
711         }
712         *pReg = (volatile unsigned long) (val);
713  if (gdebug > 3) printk("%s: CMDREG=%08x val=%08x\n", __FUNCTION__, nandCtrlReg, val);
714 @@ -670,14 +617,13 @@
715         }
716  
717  if (gdebug) printk("CS=%d, chip->CS[cs]=%d\n", cs, chip->CS[cs]);
718 -       // ldw is lower 32 bit of chipOffset, need to add pbase when on CS0 and XOR is ON.
719 -       if (!chip->xor_disable[cs]) {
720 +       // ldw is lower 32 bit of chipOffset, need to add pbase when on CS0
721 +       if (chip->CS[cs] == 0) {
722                 ldw = chipOffset.s.low + chip->pbase;
723 -       } 
724 +       }
725         else {
726                 ldw = chipOffset.s.low;
727 -       } 
728 -       
729 +       }
730         udw = chipOffset.s.high | (chip->CS[cs] << 16);
731  
732  if (gdebug > 3) printk("%s: offset=%0llx  cs=%d ldw = %08x, udw = %08x\n", __FUNCTION__, offset, cs,  ldw, udw);
733 @@ -692,7 +638,7 @@
734  #if 1
735  /* Dont delete, may be useful for debugging */
736  
737 -static void print_diagnostics(struct brcmnand_chip* chip)
738 +static void print_diagnostics(void)
739  {
740         uint32_t nand_acc_control = brcmnand_ctrl_read(BCHP_NAND_ACC_CONTROL);
741         uint32_t nand_select = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
742 @@ -703,7 +649,7 @@
743         uint32_t pageAddrExt = brcmnand_ctrl_read(BCHP_NAND_PROGRAM_PAGE_EXT_ADDR);
744  #endif
745  
746 -       
747 +       uint32_t ebiCSBase0 = * ((volatile unsigned long*) (0xb0000000|BCHP_EBI_CS_BASE_0));
748         //unsigned long nand_timing1 = brcmnand_ctrl_read(BCHP_NAND_TIMING_1);
749         //unsigned long nand_timing2 = brcmnand_ctrl_read(BCHP_NAND_TIMING_2);
750  
751 @@ -712,17 +658,7 @@
752  #if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
753         printk("PAGE_EXT_ADDR=%08x\n", pageAddrExt);
754  #endif
755 -       if (chip->CS[0] == 0) {
756 -               uint32_t ebiCSBase0 = * ((volatile unsigned long*) (0xb0000000|BCHP_EBI_CS_BASE_0));
757 -               printk("PAGE_ADDR=%08x, \tCS0_BASE=%08x\n", pageAddr, ebiCSBase0);
758 -       }
759 -       else {
760 -               //uint32_t ebiCSBaseN = * ((volatile unsigned long*) (0xb0000000|(BCHP_EBI_CS_BASE_0));
761 -               uint32_t csNandBaseN = *(volatile unsigned long*) (0xb0000000 + BCHP_EBI_CS_BASE_0 + 8*chip->CS[0]);
762 -
763 -               printk("PAGE_ADDR=%08x, \tCS%-d_BASE=%08x\n", pageAddr, chip->CS[0], csNandBaseN);
764 -               printk("pbase=%08lx, vbase=%p\n", chip->pbase, chip->vbase);
765 -       }
766 +       printk("PAGE_ADDR=%08x, \tCS0_BASE=%08x\n", pageAddr, ebiCSBase0);
767  }      
768  #endif
769  
770 @@ -739,51 +675,6 @@
771                 nand_acc_control, nand_config, flash_id, nand_timing1, nand_timing2);   
772  }
773  
774 -#define NUM_NAND_REGS  (1+((BCHP_NAND_BLK_WR_PROTECT-BCHP_NAND_REVISION)/4))
775 -
776 -static void print_nand_ctrl_regs(void)
777 -{
778 -       int i;
779 -
780 -       for (i=0; i<NUM_NAND_REGS; i++) {
781 -               uint32_t reg = (uint32_t) (BCHP_NAND_REVISION+(i*4));
782 -               uint32_t regval; 
783 -               uint32_t regoff = reg - BCHP_NAND_REVISION; // i*4
784 -               
785 -               if ((i % 4) == 0) {
786 -                       printk("\n%08x:", reg);
787 -               }
788 -
789 -#if CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_1_0
790 -               // V0.0, V0.1 7401Cx
791 -               if (regoff == 0x14 || regoff == 0x18 || regoff == 0x1c ) { // No NAND register at 0x281c
792 -                       regval = 0;
793 -               }               
794 -#elif CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_2_0
795 -               // V1.0 7440Bx
796 -               if (regoff == 0x18 || regoff == 0x1c ) { // No NAND register at 0x281c
797 -                       regval = 0;
798 -               }
799 -#elif CONFIG_MTD_BRCMNAND_VERSION < CONFIG_MTD_BRCMNAND_VERS_3_0
800 -               // V2.x 7325, 7335, 7405bx
801 -               if (regoff == 0x1c) { // No NAND register at 0x281c
802 -                       regval = 0;
803 -               }
804 -#else // if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_0
805 -               // V3.x 3548, 7420a0, 7420b0
806 -               if (regoff == 0x1c || regoff == 0x44 || regoff == 0x4c || regoff == 0x5c 
807 -                       || regoff == 0x88 || regoff == 0x8c
808 -                       || regoff == 0xb8 || regoff == 0xbc) {
809 -                       regval = 0;
810 -               }
811 -#endif
812 -               else {
813 -                       regval = (uint32_t) brcmnand_ctrl_read(reg);
814 -               }
815 -               printk("  %08x", regval);
816 -       }
817 -}
818 -
819  void print_NandCtrl_Status(void)
820  {
821  #ifdef CONFIG_MTD_BRCMNAND_EDU
822 @@ -1021,8 +912,11 @@
823                 uint32_t rd_data;
824  
825         
826 -               rd_data = ISR_cache_is_valid();
827 +               rd_data = ISR_cache_is_valid(intr);
828  
829 +
830 +
831 +
832                 if (rd_data == 0) {
833                 /* timed out */
834  printk("%s: rd_data=0 TIMEOUT\n", __FUNCTION__);
835 @@ -1158,7 +1052,7 @@
836                         return 0;
837                 }
838  
839 -               if (state != FL_READING && (!wr_preempt_en) && !in_interrupt())
840 +               if (state != FL_READING && (!wr_preempt_en))
841                         cond_resched();
842                 //touch_softlockup_watchdog();
843         }
844 @@ -1192,10 +1086,6 @@
845  
846                 if (ready & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK && 
847                    (ready & BCHP_NAND_INTFC_STATUS_SPARE_AREA_VALID_MASK)) {
848 -
849 -
850 -#if 0
851 -// THT 6/15/09: Reading OOB would not affect ECC
852                         int ecc;
853  
854                         if (!raw) {
855 @@ -1205,10 +1095,9 @@
856                                         return -1;
857                                 }
858                         }
859 -#endif
860                         return 1;
861                 }
862 -               if (state != FL_READING && !wr_preempt_en && !in_interrupt())
863 +               if (state != FL_READING && !wr_preempt_en)
864                         cond_resched();
865         }
866  
867 @@ -1261,7 +1150,7 @@
868                         //}
869                         //return BRCMNAND_SUCCESS;
870                 }
871 -               if (state != FL_READING && (!wr_preempt_en) && !in_interrupt())
872 +               if (state != FL_READING && (!wr_preempt_en))
873                         cond_resched();
874  
875         }
876 @@ -1292,7 +1181,7 @@
877         uint32_t rd_data;
878  
879  if (gdebug > 3 ) {
880 -printk("%s: intr_status = %08x\n", __FUNCTION__, intr_status); }        
881 +printk("%s: intr_status = %08x\n", intr_status); }      
882  
883           if (intr_status == 0) {
884                 /* EDU_read timed out */
885 @@ -1319,7 +1208,7 @@
886                  */
887  
888                 if (!(intr_status & HIF_INTR2_CTRL_READY)) {
889 -                       (void) ISR_cache_is_valid(); 
890 +                       (void) ISR_cache_is_valid(0); 
891                 }
892  #endif
893                 /*
894 @@ -1356,12 +1245,9 @@
895  #endif
896  
897  
898 -/*
899 - * Returns 1 on success,
900 - *               0 on error
901 - */
902  
903  
904 +
905  static int brcmnand_ctrl_write_is_complete(struct mtd_info *mtd, int* outp_needBBT)
906  {
907         int err;
908 @@ -1384,188 +1270,8 @@
909  }
910  
911  
912 -
913 -
914 -//#define EDU_DEBUG_2
915 -#undef EDU_DEBUG_2
916 -
917 -// EDU_DEBUG_4: Verify on Read
918 -//#define EDU_DEBUG_4
919 -//#undef EDU_DEBUG_4
920 -
921 -// EDU_DEBUG_5: Verify on Write
922 -//#define EDU_DEBUG_5
923 -//#undef EDU_DEBUG_5
924 -
925 -#if defined( EDU_DEBUG_2 ) || defined( EDU_DEBUG_4 ) || defined( EDU_DEBUG_5 )
926 -/* 3548 internal buffer is 4K in size */
927 -//static uint32_t edu_lbuf[2048];
928 -static uint32_t* edu_buf32;
929 -static uint8_t* edu_buf;       // Used by EDU in Debug2
930 -static uint8_t* ctrl_buf;      // Used by Ctrl in Debug4
931 -static uint32_t ctrl_oob32[4];
932 -static uint8_t* ctrl_oob = (uint8_t*) ctrl_oob32;
933 -
934 -#define PATTERN 0xa55a0000
935 -
936 -#define EDU_BUFSIZE_B (512)
937 -// One before and one after
938 -#define EDU_BUF32_SIZE_B (EDU_BUFSIZE_B*3)
939 -
940 -// Same as above in DW instead
941 -#define EDU_BUFSIZE_DW (EDU_BUFSIZE_B/4)
942 -#define EDU_BUF32_SIZE_DW (EDU_BUF32_SIZE_B/4)
943 -
944 -// Real buffer starts at 1/3 
945 -#define EDU_BUF_START_DW (EDU_BUF32_SIZE_DW/3)
946 -
947 -
948 -static void init_edu_buf(void)
949 -{
950 -       /* Write pattern */
951 -       int i;
952 -
953 -       if (!edu_buf32) {
954 -               edu_buf32 = (uint32_t*) kmalloc(EDU_BUF32_SIZE_B, GFP_KERNEL);
955 -               if (!edu_buf32) {
956 -                       printk("%s: Out of memory\n", __FUNCTION__);
957 -                       BUG();
958 -               }
959 -                       
960 -               edu_buf = ctrl_buf = (uint8_t*)  &edu_buf32[EDU_BUF_START_DW];
961 -               printk("%s: Buffer allocated at %p, %d bytes\n", __FUNCTION__, edu_buf32, EDU_BUF32_SIZE_B);
962 -               printk("Real buffer starts at %p\n", ctrl_buf);
963 -       }
964 -
965 -       for (i=0; i<EDU_BUF32_SIZE_DW; i++) {
966 -               edu_buf32[i] = PATTERN | i;
967 -       }       
968 -}
969 -
970 -static int verify_edu_buf(void) 
971 -{
972 -       int i;
973 -       int ret = 0;
974 -       
975 -       for (i=0; i<EDU_BUF_START_DW; i++) {
976 -               if (edu_buf32[i] != (PATTERN | i)) {
977 -                       printk("############ %s: pattern overwritten at offset %d, expect %08x, found %08x\n", 
978 -                               __FUNCTION__, i*4, PATTERN | i, edu_buf32[i]);
979 -                       ret++;
980 -               }
981 -       }
982 -       for (i=EDU_BUF_START_DW+EDU_BUFSIZE_DW; i<EDU_BUF32_SIZE_DW; i++) {
983 -               if (edu_buf32[i] != (PATTERN | i)) {
984 -                       printk("############ %s: pattern overwritten at offset %d, expect %08x, found %08x\n", 
985 -                               __FUNCTION__, i*4, PATTERN | i, edu_buf32[i]);
986 -                       ret++;
987 -               }
988 -       }
989 -if (ret) printk("+++++++++++++++ %s: %d DW overwritten by EDU\n", __FUNCTION__, ret);
990 -       return ret;
991 -}
992 -
993 -
994 -static uint8_t edu_write_buf[512];
995 -
996 -
997 -
998  #ifdef CONFIG_MTD_BRCMNAND_EDU
999 -#define NUM_EDU_REGS   (1+((BCHP_EDU_ERR_STATUS-BCHP_EDU_CONFIG)/4))
1000 -#else
1001 -#define NUM_EDU_REGS   1
1002 -#endif
1003  
1004 -#define MAX_DUMPS              20
1005 -
1006 -typedef struct nand_dump {
1007 -       loff_t offset;
1008 -       uint32_t physAddr;
1009 -       struct brcmnand_chip* chip;
1010 -       struct register_dump_t {
1011 -               unsigned long timestamp;
1012 -               uint32_t nand_regs[NUM_NAND_REGS]; // NAND register dump
1013 -               uint32_t edu_regs[NUM_EDU_REGS];        // EDU register
1014 -               uint32_t hif_intr2;             // HIF_INTR2 Interrupt status
1015 -               uint8_t data[512];              // NAND controller cache
1016 -       } dump[MAX_DUMPS];
1017 -       //uint8_t udata[512];   // Uncached
1018 -} nand_dump_t; // Before and after
1019 -nand_dump_t nandDump; 
1020 -int numDumps = 0;
1021 -
1022 -
1023 -#ifdef CONFIG_MTD_BRCMNAND_EDU
1024 -static void print_dump_nand_regs(int which)
1025 -{
1026 -       int i;
1027 -
1028 -       printk("NAND registers snapshot #%d: TS=%0lx, offset=%0llx, PA=%08x\n", 
1029 -               1+which, nandDump.dump[which].timestamp, nandDump.offset, nandDump.physAddr);
1030 -       for (i=0; i<NUM_NAND_REGS; i++) {
1031 -               if ((i % 4) == 0) {
1032 -                       printk("\n%08x:", BCHP_NAND_REVISION+(i*4));
1033 -               }
1034 -               printk("  %08x", nandDump.dump[which].nand_regs[i]);
1035 -       }
1036 -       printk("\nEDU registers:\n");
1037 -       for (i=0; i<NUM_EDU_REGS; i++) {
1038 -               if ((i % 4) == 0) {
1039 -                       printk("\n%08x:", BCHP_EDU_CONFIG+(i*4));
1040 -               }
1041 -               printk("  %08x", nandDump.dump[which].edu_regs[i]);
1042 -       }
1043 -       printk("\n HIF_INTR2_STATUS=%08x\n", nandDump.dump[which].hif_intr2);
1044 -       printk("\nNAND controller Internal cache:\n");
1045 -       print_databuf(nandDump.dump[which].data, 512);
1046 -}
1047 -
1048 -void dump_nand_regs(struct brcmnand_chip* chip, loff_t offset, uint32_t pa, int which)
1049 -{
1050 -       int i;
1051 -
1052 -       /* We don't have the value of offset during snapshot #2 */
1053 -       if (which == 0) {nandDump.offset = offset; nandDump.physAddr = pa;nandDump.chip = chip;}
1054 -
1055 -       nandDump.dump[which].timestamp = jiffies;
1056 -       
1057 -       for (i=0; i<NUM_NAND_REGS; i++) {
1058 -               uint32_t reg = BCHP_NAND_REVISION+(i*4);
1059 -               uint32_t regval;
1060 -
1061 -               if (reg == 0x281c) { // No NAND register at 0x281c
1062 -                       regval = 0;
1063 -               }
1064 -               else {
1065 -                       regval = brcmnand_ctrl_read(reg);
1066 -               }
1067 -               nandDump.dump[which].nand_regs[i] = regval;
1068 -       }
1069 -       for (i=0; i<NUM_EDU_REGS; i++) {
1070 -               nandDump.dump[which].edu_regs[i] = EDU_volatileRead(EDU_BASE_ADDRESS  + BCHP_EDU_CONFIG + ( i*4));
1071 -       }
1072 -       nandDump.dump[which].hif_intr2 = EDU_volatileRead(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_STATUS);
1073 -       brcmnand_from_flash_memcpy32(nandDump.chip, &nandDump.dump[which].data[0], nandDump.offset, 512);
1074 -}
1075 -
1076 -#else
1077 -
1078 -#define print_dump_nand_regs(...)
1079 -
1080 -#define dump_nand_regs(...)
1081 -
1082 -#endif // EDU_DEBUG_2,4,5
1083 -#endif
1084 -
1085 -
1086 -#ifdef CONFIG_MTD_BRCMNAND_EDU
1087 -
1088 -
1089 -/*
1090 - * Returns 1 on success,
1091 - *               0 on error
1092 - */
1093 -
1094  static int brcmnand_EDU_write_is_complete(struct mtd_info *mtd, int* outp_needBBT)
1095  {
1096         uint32_t hif_err, edu_err;
1097 @@ -1581,45 +1287,37 @@
1098  
1099  
1100  #ifdef CONFIG_MTD_BRCMNAND_USE_ISR
1101 -  #if 0 // No need in Batch mode
1102         // Unlike the Read case where we retry on everything, we either complete the write or die trying.
1103 -       // Here we use retry only for ERESTARTSYS, relying on the fact that we write the same data 
1104 -       // over the flash.
1105 -       // Caution: Since this can be called from an interrupt context, we cannot call the regular brcmnand_wait()
1106 -       // call, since those call schedule()
1107 +       // Here we use retry only for ERESTARTSYS, relying on the fact that we write the same data over the flash.
1108         hif_err = ISR_wait_for_completion();
1109         if ((hif_err == ERESTARTSYS) || (hif_err & HIF_INTR2_EBI_TIMEOUT))
1110                 return hif_err;
1111 -  #endif // Batch mode
1112 +
1113  #else
1114         hif_err = EDU_poll(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_STATUS, 
1115 -               HIF_INTR2_EDU_DONE|HIF_INTR2_CTRL_READY, 
1116 +               HIF_INTR2_EDU_DONE, 
1117                 HIF_INTR2_EDU_ERR, 
1118 -               HIF_INTR2_EDU_DONE_MASK|HIF_INTR2_CTRL_READY);
1119 -
1120 +               HIF_INTR2_EDU_DONE_MASK);
1121  #endif
1122  
1123 -
1124         if (hif_err != 0) // No timeout
1125         {
1126 -               uint32_t flashStatus; // = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
1127 +               int flashStatus; // = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
1128  
1129 -#if 0
1130 -if (!(hif_err & HIF_INTR2_EDU_DONE))
1131 -printk("hif_err=%08x\n", hif_err);
1132 -#endif                 
1133                 
1134 +                       
1135 +               
1136                 /******************* BUG BUG BUG *****************
1137                  * THT 01/06/09: What if EDU returns bus error?  We should not mark the block bad then.
1138                  */
1139                  //Get status:  should we check HIF_INTR2_ERR?
1140 -               if (hif_err & HIF_INTR2_EDU_ERR)
1141 -                       edu_err = EDU_get_error_status_register();
1142 -               else
1143 -                       edu_err = 0;
1144 +               edu_err = EDU_get_error_status_register();
1145  
1146                 //Clear interrupt:
1147                 //EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_DONE, 0x00000000);
1148 +               EDU_reset_done();
1149 +               EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_ERR_STATUS, 0x00000000);
1150 +               EDU_volatileWrite(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_STATUS, HIF_INTR2_EDU_CLEAR_MASK);
1151  
1152                 flashStatus = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
1153  
1154 @@ -1627,56 +1325,39 @@
1155                 if (!(flashStatus & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK)) {
1156                         ret = brcmnand_ctrl_write_is_complete(mtd, outp_needBBT); 
1157                         // No need to check on the EDU side, already done inside ctrl_write_is_complete
1158 -                       udelay(1000);
1159 -                       //dump_nand_regs(chip, 0, 0, numDumps++);
1160 -                       goto out;
1161 +                       return ret;
1162                 }
1163 -
1164 -#ifdef EDU_DEBUG_5
1165 -/* else */ {
1166 -
1167 -// 2nd dump after CTRL_READY is asserted
1168 -//udelay(1000);
1169 -//dump_nand_regs(chip, 0, 0, numDumps++);
1170 -}
1171 -#endif
1172                         
1173                 if ((edu_err & EDU_ERR_STATUS_NandWrite) || (flashStatus & 0x01)) {
1174                         /* Write did not complete, flash error, will mark block bad */
1175                         *outp_needBBT = 1;
1176                         printk("EDU_write_is_complete(): error 0x%08X\n", edu_err);
1177 -                       ret = 0;
1178 -                       goto out;
1179 +                       return 0;
1180                 }
1181                 else if (edu_err) {
1182                         /* Write did not complete, bus error, will NOT mark block bad */
1183                         *outp_needBBT = 0;
1184                         printk("EDU_write_is_complete(): error 0x%08X\n", edu_err);
1185 -                       ret = 0;
1186 -                       goto out;
1187 +                       return 0;
1188                 }
1189  
1190 -               ret = 1; // Success    brcmnand_ctrl_write_is_complete(mtd, outp_needBBT);  
1191 -               goto out;
1192 +               return 1; // Success    brcmnand_ctrl_write_is_complete(mtd, outp_needBBT);  
1193         }
1194         else { // Write timeout
1195                 printk("%s: Write has timed out\n", __FUNCTION__);
1196                 //*outp_needBBT = 1;
1197 -               ret = 0;
1198 -               goto out;
1199 +               EDU_reset_done();
1200 +               EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_ERR_STATUS, 0x00000000);
1201 +               EDU_volatileWrite(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_STATUS, HIF_INTR2_EDU_CLEAR_MASK);
1202 +               
1203 +               return 0;
1204         }
1205  
1206 -out:
1207  
1208 -       EDU_reset_done();
1209 -       EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_ERR_STATUS, 0x00000000);
1210 -       EDU_volatileWrite(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_STATUS, HIF_INTR2_EDU_CLEAR_MASK);
1211 +       printk("EDU_write_is_complete(): error 2 hif_err: %08x\n", hif_err);
1212  
1213 -
1214 -       //printk("EDU_write_is_complete(): error 2 hif_err: %08x\n", hif_err);
1215 -
1216         //Poll time out or did not return HIF_INTR2_EDU_DONE:
1217 -       return ret;
1218 +       return 0;
1219  }
1220  
1221  
1222 @@ -1689,7 +1370,7 @@
1223  
1224  
1225  /**
1226 - * brcmnand_transfer_oob - [Internal] Transfer oob from chip->oob_poi to client buffer
1227 + * brcmnand_transfer_oob - [Internal] Transfer oob to client buffer
1228   * @chip:      nand chip structure
1229   * @oob:       oob destination address
1230   * @ops:       oob ops structure
1231 @@ -1727,10 +1408,6 @@
1232                                 bytes = min_t(size_t, len, free->length);
1233                                 boffs = free->offset;
1234                         }
1235 -#ifdef DEBUG_ISR
1236 -printk("%s: AUTO: oob=%p, chip->oob_poi=%p, ooboffs=%d, len=%d, bytes=%d, boffs=%d\n",
1237 -       __FUNCTION__, oob, chip->oob_poi, ops->ooboffs, len, bytes, boffs);
1238 -#endif
1239                         memcpy(oob, chip->oob_poi + boffs, bytes);
1240                         oob += bytes;
1241                 }
1242 @@ -1752,7 +1429,7 @@
1243                 void* buffer, u_char* oobarea, loff_t offset)
1244  {
1245         struct brcmnand_chip* chip = mtd->priv;
1246 -       //int retries = 2;
1247 +       int retries = 2, done = 0;
1248         static uint32_t oobbuf[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
1249         uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oobbuf[0]);
1250         u_char* p8 = (u_char*) p32;
1251 @@ -1769,31 +1446,7 @@
1252         //u_char oobbuf[16];
1253         int erased, allFF;
1254         int i;
1255 -       uint32_t acc, acc0;
1256 -       //int valid;
1257  
1258 -       /*
1259 -        * First disable Read ECC then re-try read OOB, because some times, the controller
1260 -        * just drop the op on ECC errors.
1261 -        */
1262 -
1263 -#if 1 /* Testing 1 2 3 */
1264 -       /* Disable ECC */
1265 -       acc = brcmnand_ctrl_read(BCHP_NAND_ACC_CONTROL);
1266 -       acc0 = acc & ~(BCHP_NAND_ACC_CONTROL_RD_ECC_EN_MASK | BCHP_NAND_ACC_CONTROL_RD_ECC_BLK0_EN_MASK);
1267 -       brcmnand_ctrl_write(BCHP_NAND_ACC_CONTROL, acc0);
1268 -
1269 -       chip->ctrl_writeAddr(chip, offset, 0);
1270 -       PLATFORM_IOFLUSH_WAR();
1271 -       chip->ctrl_write(BCHP_NAND_CMD_START, OP_SPARE_AREA_READ);
1272 -
1273 -       // Wait until cache is filled up, disabling ECC checking
1274 -       (void) brcmnand_spare_is_valid(mtd, FL_READING, 1);
1275 -       
1276 -       // Restore acc
1277 -       brcmnand_ctrl_write(BCHP_NAND_ACC_CONTROL, acc);
1278 -#endif
1279 -
1280         for (i = 0; i < 4; i++) {
1281                 p32[i] = be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
1282         }
1283 @@ -1801,25 +1454,19 @@
1284                 erased = (p8[6] == 0xff && p8[7] == 0xff && p8[8] == 0xff);
1285                 allFF = (p8[6] == 0x00 && p8[7] == 0x00 && p8[8] == 0x00);
1286  if (gdebug > 3 ) 
1287 -{printk("%s: offset=%0llx, erased=%d, allFF=%d\n", 
1288 -__FUNCTION__, offset, erased, allFF);
1289 +{printk("%s: erased=%d, allFF=%d\n", __FUNCTION__, erased, allFF);
1290  print_oobbuf(p8, 16);
1291  }
1292         }
1293         else if (chip->ecclevel >= BRCMNAND_ECC_BCH_1 && chip->ecclevel <= BRCMNAND_ECC_BCH_12) {
1294 -               erased = 1;
1295 -               allFF = 0; // Not sure for BCH.
1296 +               erased = allFF = 1;
1297                 // For BCH-n, the ECC bytes are at the end of the OOB area
1298 -               for (i=chip->eccOobSize-chip->eccbytes; i<min(16,chip->eccOobSize); i++) {
1299 +               for (i=chip->eccOobSize-chip->eccbytes; i<chip->eccOobSize; i++) {
1300                         erased = erased && (p8[i] == 0xff);
1301 -                       if (!erased) {
1302 -                               printk("p8[%d]=%02x\n", i, p8[i]); 
1303 -                               break;
1304 +                       allFF = allFF && (p8[i] == 0x00);
1305                 }
1306 -               }
1307 -if (gdebug > 3 ) 
1308 -{printk("%s: offset=%0llx, i=%d from %d to %d, eccOobSize=%d, eccbytes=%d, erased=%d, allFF=%d\n",
1309 -__FUNCTION__, offset, i, chip->eccOobSize-chip->eccbytes, chip->eccOobSize,
1310 +//if (gdebug > 3 ) 
1311 +{printk("%s: eccOobSize=%d, eccbytes=%d, erased=%d, allFF=%d\n", __FUNCTION__, 
1312  chip->eccOobSize, chip->eccbytes, erased, allFF);}
1313         }
1314         else {
1315 @@ -2134,7 +1781,7 @@
1316         static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
1317         uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oob0[0]);
1318         u_char* p8 = (u_char*) p32;
1319 -       //unsigned long irqflags;       
1320 +       unsigned long irqflags; 
1321         int retries = 5, done=0;
1322         int valid = 0;
1323  
1324 @@ -2257,24 +1904,17 @@
1325  print_databuf(buffer, 32);
1326  }
1327  
1328 -#if defined( EDU_DEBUG ) || defined (BRCMNAND_READ_VERIFY )
1329 -//if (in_verify <=0) 
1330 -if (chip->ecclevel == BRCMNAND_ECC_HAMMING) {
1331 +#ifdef EDU_DEBUG
1332 +if (in_verify <=0) {
1333  u_char edu_sw_ecc[4];
1334  
1335         brcmnand_Hamming_ecc(buffer, edu_sw_ecc);
1336  
1337 -if ((p8[6] != edu_sw_ecc[0] || p8[7] != edu_sw_ecc[1] || p8[8] != edu_sw_ecc[2])
1338 -       && !(p8[6]==0xff && p8[7]==0xff && p8[8]==0xff &&
1339 -               edu_sw_ecc[0]==0x0 && edu_sw_ecc[1]==0x0 && edu_sw_ecc[2]==0x0)
1340 -) {
1341          printk("!!!!!!!!! %s: offset=%0llx ECC=%02x%02x%02x, OOB:",
1342  in_verify < 0 ? "WR" : "RD",
1343  offset, edu_sw_ecc[0], edu_sw_ecc[1], edu_sw_ecc[2]);
1344 -        print_oobbuf(p8, 16);
1345 -        BUG();
1346 +        print_oobbuf(oobarea, 16);
1347  }
1348 -}
1349  #endif
1350  
1351  
1352 @@ -2282,14 +1922,24 @@
1353  }
1354  
1355  
1356 -/*
1357 - * Clear the controller cache by reading at a location we don't normally read
1358 - */
1359 +
1360 +
1361 +#ifdef CONFIG_MTD_BRCMNAND_EDU
1362 +
1363 +
1364 +extern int EDU_buffer_OK(volatile void* addr);
1365 +
1366 +
1367 +#if 1
1368 +static uint32_t debug_buf32[512];
1369 +static u_char* ver_buf = (u_char*) &debug_buf32[0];
1370 +static u_char ver_oob[16];
1371 +
1372  static void debug_clear_ctrl_cache(struct mtd_info* mtd)
1373  {
1374         /* clear the internal cache by writing a new address */
1375         struct brcmnand_chip* chip = mtd->priv;
1376 -       loff_t offset = chip->chipSize-chip->blockSize; // Start of BBT region
1377 +       loff_t offset = chip->chipSize-0x100000; // Start of BBT region
1378         
1379         chip->ctrl_writeAddr(chip, offset, 0); 
1380         PLATFORM_IOFLUSH_WAR();
1381 @@ -2299,20 +1949,6 @@
1382         (void) brcmnand_cache_is_valid(mtd, FL_READING, offset);
1383  }
1384         
1385 -#ifdef CONFIG_MTD_BRCMNAND_EDU
1386 -
1387 -
1388 -extern int EDU_buffer_OK(volatile void* addr, int command);
1389 -
1390 -
1391 -#if 1
1392 -static uint32_t debug_buf32[512];
1393 -static u_char* ver_buf = (u_char*) &debug_buf32[0];
1394 -static u_char ver_oob[16];
1395 -
1396 -
1397 -
1398 -       
1399  static void debug_EDU_read(struct mtd_info* mtd, 
1400          void* edu_buffer, u_char* edu_oob, loff_t offset, uint32_t intr_status, 
1401          uint32_t edu_status, u_char* edu_sw_ecc)
1402 @@ -2373,126 +2009,213 @@
1403  }
1404  #endif
1405  
1406 +/**
1407 + * brcmnand_posted_read_cache - [BrcmNAND Interface] Read the 512B cache area
1408 + * Assuming brcmnand_get_device() has been called to obtain exclusive lock
1409 + * @param mtd        MTD data structure
1410 + * @param oobarea    Spare area, pass NULL if not interested
1411 + * @param buffer    the databuffer to put/get data, pass NULL if only spare area is wanted.
1412 + * @param offset    offset to read from or write to, must be 512B aligned.
1413 + * @param raw: Ignore BBT bytes when raw = 1
1414 + *
1415 + * Caller is responsible to pass a buffer that is
1416 + * (1) large enough for 512B for data and optionally an oobarea large enough for 16B.
1417 + * (2) 4-byte aligned.
1418 + *
1419 + * Read the cache area into buffer.  The size of the cache is mtd-->eccsize and is always 512B.
1420 + */
1421 +//#define EDU_DEBUG_2
1422 +#undef EDU_DEBUG_2
1423  
1424 -#ifdef EDU_DEBUG_4
1425 -int edu_read_verify(struct mtd_info *mtd, char* buffer, char* oobarea, loff_t offset)
1426 -{
1427 -       struct brcmnand_chip* chip = mtd->priv;
1428 -       static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
1429 -       uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oob0[0]);
1430 -int ctrlret;
1431 +// EDU_DEBUG_4: Verify on Read
1432 +//#define EDU_DEBUG_4
1433 +#undef EDU_DEBUG_4
1434  
1435 -PRINTK("%s: buffer=%08x, ctrlbuf=%08x, oobarea=%08x, ctrl_oob=%08x, offset=%08llx\n", __FUNCTION__, 
1436 -       buffer, ctrl_buf, oobarea, ctrl_oob, offset);
1437 +// EDU_DEBUG_5: Verify on Write
1438 +//#define EDU_DEBUG_5
1439 +#undef EDU_DEBUG_5
1440  
1441 +#if defined( EDU_DEBUG_2 ) || defined( EDU_DEBUG_4 ) 
1442 +/* 3548 internal buffer is 4K in size */
1443 +//static uint32_t edu_lbuf[2048];
1444 +static uint32_t* edu_buf32;
1445 +static uint8_t* edu_buf;       // Used by EDU in Debug2
1446 +static uint8_t* ctrl_buf;      // Used by Ctrl in Debug4
1447 +static uint32_t ctrl_oob32[4];
1448 +static uint8_t* ctrl_oob = (uint8_t*) ctrl_oob32;
1449  
1450 +#define PATTERN 0xa55a0000
1451  
1452 -       ctrlret = brcmnand_ctrl_posted_read_cache(mtd, ctrl_buf, ctrl_oob, offset);
1453 -       //verify_edu_buf();
1454 -       // Compare buffer returned from EDU and Ctrl reads:
1455 -       if (0 != memcmp(ctrl_buf, buffer, 512)) {
1456 -printk("$$$$$$$$$$$$ EDU Read: offset=%08llx\n", offset);
1457 -print_databuf(buffer, 512);
1458 -printk("------------ Ctrl Read: \n");
1459 -print_databuf(ctrl_buf, 512);
1460 -               BUG();
1461 -       }
1462 -       if (oobarea) 
1463 -       {
1464 -               if (0 != memcmp(p32, ctrl_oob, 16)) {
1465 -printk("########## Ctrl OOB:\n");
1466 -print_oobbuf(ctrl_oob, 16);
1467 -printk("------------ EDU OOB: \n");
1468 -print_oobbuf(p32, 16);
1469 -/* Which one is correct?  Since the data buffers agree, use Hamming codes */
1470 -                       if (chip->ecclevel == BRCMNAND_ECC_HAMMING) 
1471 -                       {
1472 -                               unsigned char ecc1[3]; // SW ECC, manually calculated
1473 -                               brcmnand_Hamming_WAR(mtd, offset, buffer, &ctrl_oob[6], &ecc1[0]);
1474 -                               printk("Hamming ECC=%02x%02x%02x\n", ecc1[0], ecc1[1], ecc1[2]);
1475 -                       }
1476 +#define EDU_BUFSIZE_B (512)
1477 +// One before and one after
1478 +#define EDU_BUF32_SIZE_B (EDU_BUFSIZE_B*3)
1479 +
1480 +// Same as above in DW instead
1481 +#define EDU_BUFSIZE_DW (EDU_BUFSIZE_B/4)
1482 +#define EDU_BUF32_SIZE_DW (EDU_BUF32_SIZE_B/4)
1483 +
1484 +// Real buffer starts at 1/3 
1485 +#define EDU_BUF_START_DW (EDU_BUF32_SIZE_DW/3)
1486 +
1487 +
1488 +static void init_edu_buf(void)
1489 +{
1490 +       /* Write pattern */
1491 +       int i;
1492 +
1493 +       if (!edu_buf32) {
1494 +               edu_buf32 = (uint32_t*) kmalloc(EDU_BUF32_SIZE_B, GFP_KERNEL);
1495 +               if (!edu_buf32) {
1496 +                       printk("%s: Out of memory\n", __FUNCTION__);
1497                         BUG();
1498                 }
1499 +                       
1500 +               edu_buf = ctrl_buf = (uint8_t*)  &edu_buf32[EDU_BUF_START_DW];
1501 +               printk("%s: Buffer allocated at %p, %d bytes\n", __FUNCTION__, edu_buf32, EDU_BUF32_SIZE_B);
1502 +               printk("Real buffer starts at %p\n", ctrl_buf);
1503         }
1504 -       return 0;
1505 +
1506 +       for (i=0; i<EDU_BUF32_SIZE_DW; i++) {
1507 +               edu_buf32[i] = PATTERN | i;
1508 +       }       
1509  }
1510 -#endif // Verify EDU on Read
1511  
1512 -
1513 -/*
1514 - * Read completion after EDU_Read is called.
1515 - * In ISR mode, this routine is run in interrupt context
1516 - */
1517 -int
1518 -brcmnand_edu_read_comp_intr(struct mtd_info* mtd, 
1519 -        void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status)
1520 +static int verify_edu_buf(void) 
1521  {
1522 -       struct brcmnand_chip* chip = mtd->priv;
1523 -       uint32_t intfc_status;
1524         int i;
1525 -       static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
1526 -       uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oob0[0]);
1527 +       int ret = 0;
1528         
1529 -       if (intr_status & HIF_INTR2_EDU_ERR) {
1530 -               printk("%s: Should not call me with EDU ERR\n", __FUNCTION__);
1531 -               BUG();
1532 +       for (i=0; i<EDU_BUF_START_DW; i++) {
1533 +               if (edu_buf32[i] != (PATTERN | i)) {
1534 +                       printk("############ %s: pattern overwritten at offset %d, expect %08x, found %08x\n", 
1535 +                               __FUNCTION__, i*4, PATTERN | i, edu_buf32[i]);
1536 +                       ret++;
1537 +               }
1538         }
1539 -       intfc_status = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
1540 -       if (!(intfc_status & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK)) {
1541 -               printk("%s: Impossible, HIF_INTR2_CTRL_READY already asserted\n", __FUNCTION__);
1542 -               BUG();          
1543 -       }
1544 -
1545 -       // Remember last good sector read.  Needed for HIF_INTR2 workaround.
1546 -       gLastKnownGoodEcc = offset;
1547 -       if (oobarea) 
1548 -       {
1549 -               PLATFORM_IOFLUSH_WAR();
1550 -               for (i = 0; i < 4; i++) {
1551 -                       p32[i] =  be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
1552 +       for (i=EDU_BUF_START_DW+EDU_BUFSIZE_DW; i<EDU_BUF32_SIZE_DW; i++) {
1553 +               if (edu_buf32[i] != (PATTERN | i)) {
1554 +                       printk("############ %s: pattern overwritten at offset %d, expect %08x, found %08x\n", 
1555 +                               __FUNCTION__, i*4, PATTERN | i, edu_buf32[i]);
1556 +                       ret++;
1557                 }
1558 -if (gdebug > 3) {printk("SUCCESS: %s: offset=%0llx, oob=\n", __FUNCTION__, offset); print_oobbuf((u_char*) &p32[0], 16);}
1559 -       }      
1560 -
1561 -       return 0;       
1562 +       }
1563 +if (ret) printk("+++++++++++++++ %s: %d DW overwritten by EDU\n", __FUNCTION__, ret);
1564 +       return ret;
1565  }
1566  
1567 -/*
1568 - * Read WAR after EDU_Read is called, and EDU returns errors.
1569 - * This routine can only be called in process context
1570 - */
1571 -int
1572 -brcmnand_edu_read_completion(struct mtd_info* mtd, 
1573 -        void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status)
1574 +#endif
1575 +
1576 +static int brcmnand_EDU_posted_read_cache(struct mtd_info* mtd, 
1577 +        void* buffer, u_char* oobarea, loff_t offset)
1578  {
1579 +
1580 +       int ecc;
1581 +
1582         struct brcmnand_chip* chip = mtd->priv;
1583 -       uint32_t edu_err_status;
1584 +       loff_t sliceOffset = offset & (~ (mtd->eccsize - 1));
1585 +       int i, ret = 0;
1586         static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
1587         uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oob0[0]);
1588         u_char* p8 = (u_char*) p32;
1589 -       int ecc;
1590 -       int ret = 0, i;
1591 +       uint32_t EDU_ldw;
1592 +       uint32_t intr_status;
1593 +       unsigned long irqflags;
1594 +       int retries = 5;
1595 +       
1596 +int save_debug;
1597 +uint32_t edu_status;
1598  
1599 -       if (in_interrupt()) {
1600 -               printk(KERN_ERR "%s cannot be run in interrupt context\n", __FUNCTION__);
1601 -               BUG();
1602 +#ifdef EDU_DEBUG_2
1603 +u_char* save_buf = buffer;
1604 +#endif
1605 +
1606 +//if((offset >= (0x3a8148 & ~(0x1FF))) && (offset < ((0x3a8298+0x1F) & ~(0x1FF)))) gdebug=4;
1607 +//gdebug = 4;
1608 +if (gdebug > 3) {
1609 +printk("%s: offset=%0llx, buffer=%p, oobarea=%p\n", __FUNCTION__,  offset, buffer, oobarea);}
1610 +
1611 +#if 0 //def EDU_DEBUG_4
1612 +printk("%s: offset=%0llx, buffer=%p, oobarea=%p\n", __FUNCTION__,  offset, buffer, oobarea);
1613 +#endif
1614 +
1615 +
1616 +       if (unlikely(offset - sliceOffset)) {
1617 +               printk(KERN_ERR "%s: offset %0llx is not cache aligned, sliceOffset=%0llx, CacheSize=%d\n", 
1618 +                __FUNCTION__, offset, sliceOffset, mtd->eccsize);
1619 +               ret = -EINVAL;
1620 +               goto out;
1621         }
1622 -       if (intr_status & HIF_INTR2_EDU_ERR) {
1623 +
1624 +//#if 0 // Testing 1 2 3
1625 +       if (unlikely(!EDU_buffer_OK(buffer))) 
1626 +//#endif
1627 +       {
1628 +if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
1629 +               /* EDU does not work on non-aligned buffers */
1630 +               ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, oobarea, offset);
1631 +               return (ret);
1632 +       }
1633 +
1634 +       if (wr_preempt_en) {
1635 +               // local_irq_save(irqflags);
1636 +       }
1637 +
1638 +#if defined( EDU_DEBUG_2 ) 
1639 +       init_edu_buf();
1640 +
1641 +       buffer = edu_buf;
1642 +
1643 +#elif defined( EDU_DEBUG_4 ) 
1644 +       init_edu_buf();
1645 +       
1646 +#endif
1647 +
1648 +       intr_status = 0;
1649 +       do {
1650 +
1651 +               EDU_ldw =  chip->ctrl_writeAddr(chip, sliceOffset, 0);
1652 +               PLATFORM_IOFLUSH_WAR(); 
1653 +
1654 +               if (intr_status & HIF_INTR2_EBI_TIMEOUT) {
1655 +                       EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_CLEAR, HIF_INTR2_EBI_TIMEOUT);
1656 +               }
1657 +               intr_status = EDU_read(buffer, EDU_ldw);
1658 +
1659 +#if 0
1660 +if ((intr_status == ERESTARTSYS) || (intr_status & HIF_INTR2_EBI_TIMEOUT) ) {
1661 +uint32_t rd_data = ISR_volatileRead(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_STATUS);
1662 +printk("%s: EDU_read returns error %08x , intr=%08x at offset %0llx\n", __FUNCTION__, intr_status, rd_data, offset);
1663 +}
1664 +#endif
1665 +       } while (retries-- > 0 && ((intr_status == ERESTARTSYS) || (intr_status & HIF_INTR2_EBI_TIMEOUT) ));
1666 +
1667 +       if (retries <= 0 && ((intr_status == ERESTARTSYS) || (intr_status & HIF_INTR2_EBI_TIMEOUT))) { // EBI Timeout
1668 +               // Use controller read
1669 +               printk("%s: EBI timeout, use controller read at offset %0llx\n", __FUNCTION__, offset);
1670 +               ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, oobarea, offset);
1671 +               return (ret); 
1672 +       }
1673 +
1674 +       else if (intr_status & HIF_INTR2_EDU_ERR) {
1675                 if (wr_preempt_en) {
1676                         //local_irq_restore(irqflags);
1677                 }
1678 -               edu_err_status = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_ERR_STATUS);
1679 +               edu_status = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_ERR_STATUS);
1680 +//if (edu_status == 0)
1681 +//     printk("+++++++++++ %s:offset=%0llx Intr=%08x but EDU_status=%08x, LKG=%0llx\n", __FUNCTION__, 
1682 +//             offset, intr_status, edu_status, gLastKnownGoodEcc);
1683  
1684 +
1685  /**** WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR WAR */
1686                 /* Do a dummy read on a known good ECC sector to clear error */
1687 -               if (edu_err_status) {
1688 -                       static uint8_t myBuf2[512+31];
1689 -                       // EDU aligned
1690 -                       uint8_t* tmpBuf = (uint8_t*)  ((((unsigned int) &myBuf2[0]) + 31) & (~31));
1691 -                       
1692 +               if (edu_status) {
1693 +                       static uint32_t tmpBuf[128];
1694                         // We start from the BBT, since these would (hopefully) always be good sectors.
1695                         loff_t tmpOffset = chip->chipSize - 512;
1696  
1697 +//printk("Handle HIF_INTR2_UNC_ERR: Step 1: @offset %0llx\n", offset);
1698 +//print_oobreg(chip);
1699 +
1700                         // First make sure that there is a last known good sector
1701                         while (gLastKnownGoodEcc == 0 && tmpOffset >= 0) {
1702                                 ret = brcmnand_ctrl_posted_read_cache(mtd, tmpBuf, NULL, tmpOffset);
1703 @@ -2502,21 +2225,22 @@
1704                                 uint32_t lkgs;
1705                                 // Clear the error condition
1706                                 //(void) brcmnand_EDU_posted_read_cache(mtd, tmpBuf, NULL, gLastKnownGoodEcc);
1707 +                               lkgs =  chip->ctrl_writeAddr(chip, gLastKnownGoodEcc, 0);
1708 +                               PLATFORM_IOFLUSH_WAR(); 
1709  
1710 -
1711                                  // Use Register Array
1712                                 // EDU_ldw = BCHP_PHYSICAL_OFFSET + BCHP_NAND_FLASH_CACHEi_ARRAY_BASE;
1713 -#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
1714 -                               // Reset EDU
1715 -                               ISR_push_request(mtd, tmpBuf, NULL, tmpOffset);
1716 -#else
1717 -                               lkgs =  chip->ctrl_writeAddr(chip, gLastKnownGoodEcc, 0);
1718 -                               PLATFORM_IOFLUSH_WAR(); 
1719                                 intr_status = EDU_read(buffer, lkgs);
1720 -#endif
1721 -
1722 +//printk("intr_status returns from dummy read at offset %0llx: %08x\n", gLastKnownGoodEcc, intr_status);
1723 +//printk("Handle HIF_INTR2_UNC_ERR: Step 2:\n");
1724 +//print_oobreg(chip);
1725                                 ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, p8, offset);
1726 -
1727 +//printk("Handle HIF_INTR2_UNC_ERR: Step 3:\n");
1728 +//print_oobreg(chip);
1729 +//if (oobarea) 
1730 +{
1731 +//     printk("Unc Error WAR OOB="); print_oobbuf(p8, 16);
1732 +}
1733                                 return ret;
1734                         }
1735                         // else there can be no workaround possible, use controller read
1736 @@ -2525,8 +2249,16 @@
1737                         }
1738                 }
1739  /**** ENDWAR ENDWAR ENDWAR ENDWAR */
1740 +
1741 +               // If error was not due to UNC or COR errors, or poll timeout, try the old-fashioned way
1742 +               //ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, oobarea, offset);
1743 +               //return (ret);
1744         }
1745                 
1746 +
1747 +//if (intr_status & HIF_INTR2_EDU_ERR)
1748 +//     printk("%s: EDU_read returns error at offset=%0llx, intr_status=%08x\n", __FUNCTION__, offset, intr_status);
1749 +
1750         /*
1751          * Wait for Controller ready, which indicates the OOB and buffer are ready to be read.
1752          */
1753 @@ -2563,7 +2295,7 @@
1754                         for (i = 0; i < 4; i++) {
1755                                 p32[i] =  be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
1756                         }
1757 -if (gdebug > 3) {printk("SUCCESS: %s: offset=%0llx, oob=\n", __FUNCTION__, offset); print_oobbuf((u_char*) &p32[0], 16);}
1758 +if (gdebug > 3) {printk("SUCCESS: %s: offset=%0llx, oob=\n", __FUNCTION__, sliceOffset); print_oobbuf((u_char*) &p32[0], 16);}
1759                 }      
1760                 ret = 0;            // Success!
1761                 break;
1762 @@ -2571,7 +2303,9 @@
1763         case BRCMEDU_CORRECTABLE_ECC_ERROR:
1764                 /* FALLTHRU */                
1765        case BRCMNAND_CORRECTABLE_ECC_ERROR:
1766 -
1767 +{save_debug = gdebug;
1768 +//gdebug = 4;
1769 +//edu_debug = 4;
1770  printk("+++++++++++++++ CORRECTABLE_ECC: offset=%0llx  ++++++++++++++++++++\n", offset);
1771                 // Have to manually copy.  EDU drops the buffer on error - even correctable errors
1772                 if (buffer) {
1773 @@ -2584,7 +2318,7 @@
1774                         for (i = 0; i < 4; i++) {
1775                                 p32[i] =  be32_to_cpu (chip->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4));
1776                         }
1777 -if (gdebug > 3) {printk("CORRECTABLE: %s: offset=%0llx, oob=\n", __FUNCTION__, offset); print_oobbuf(oobarea, 16);}
1778 +if (gdebug > 3) {printk("CORRECTABLE: %s: offset=%0llx, oob=\n", __FUNCTION__, sliceOffset); print_oobbuf(oobarea, 16);}
1779                 }
1780  
1781  #ifndef DEBUG_HW_ECC // Comment out for debugging
1782 @@ -2604,7 +2338,7 @@
1783                                 }
1784                         }
1785                 }
1786 -       
1787 +gdebug = edu_debug = save_debug;}
1788              break;
1789  
1790         case BRCMEDU_UNCORRECTABLE_ECC_ERROR:
1791 @@ -2612,13 +2346,16 @@
1792                 {
1793                         int valid;
1794                 
1795 -
1796 +save_debug = gdebug;
1797 +//gdebug = 4;
1798 +//edu_debug = 4;
1799 +//
1800  PRINTK("************* UNCORRECTABLE_ECC (offset=%0llx) ********************\n", offset);
1801                         /*
1802                          * THT: Since EDU does not handle OOB area, unlike the UNC ERR case of the ctrl read,
1803                          * we have to explicitly read the OOB, before calling the WAR routine.
1804                          */
1805 -                       chip->ctrl_writeAddr(chip, offset, 0);
1806 +                       chip->ctrl_writeAddr(chip, sliceOffset, 0);
1807                         chip->ctrl_write(BCHP_NAND_CMD_START, OP_SPARE_AREA_READ);
1808  
1809                         // Wait until spare area is filled up
1810 @@ -2635,6 +2372,8 @@
1811  printk("************* UNCORRECTABLE_ECC (offset=%0llx) valid!=0 ********************\n", offset);
1812                                 ret = -EBADMSG;
1813                         }
1814 +if (!ret)
1815 +{gdebug = edu_debug = save_debug;}
1816                 }
1817                 break;
1818                 
1819 @@ -2661,126 +2400,73 @@
1820      
1821  out:
1822  
1823 -
1824 -//gdebug=0;
1825 -    return ret;
1826 -}
1827 -
1828 -
1829 -  #ifndef CONFIG_MTD_BRCMNAND_ISR_QUEUE
1830 -/**
1831 - * brcmnand_posted_read_cache - [BrcmNAND Interface] Read the 512B cache area
1832 - * Assuming brcmnand_get_device() has been called to obtain exclusive lock
1833 - * @param mtd        MTD data structure
1834 - * @param oobarea    Spare area, pass NULL if not interested
1835 - * @param buffer    the databuffer to put/get data, pass NULL if only spare area is wanted.
1836 - * @param offset    offset to read from or write to, must be 512B aligned.
1837 - * @param raw: Ignore BBT bytes when raw = 1
1838 - *
1839 - * Caller is responsible to pass a buffer that is
1840 - * (1) large enough for 512B for data and optionally an oobarea large enough for 16B.
1841 - * (2) 4-byte aligned.
1842 - *
1843 - * Read the cache area into buffer.  The size of the cache is mtd-->eccsize and is always 512B.
1844 - */
1845 -
1846 -
1847 -static int brcmnand_EDU_posted_read_cache(struct mtd_info* mtd, 
1848 -        void* buffer, u_char* oobarea, loff_t offset)
1849 +#if 0
1850  {
1851 +//if (!ret) 
1852 +       u_char edu_sw_ecc[4];
1853  
1854 -       //int ecc;
1855 +       debug_EDU_read(mtd, buffer, oobarea, offset, intr_status, edu_status, edu_sw_ecc);
1856  
1857 -       struct brcmnand_chip* chip = mtd->priv;
1858 -       loff_t sliceOffset = offset & (~ (mtd->eccsize - 1));
1859 -       int i, ret = 0;
1860 -       //static uint32_t oob0[4]; // Sparea Area to handle ECC workaround, aligned on DW boundary
1861 -       //uint32_t* p32 = (oobarea ?  (uint32_t*) oobarea :  (uint32_t*) &oob0[0]);
1862 -       //u_char* p8 = (u_char*) p32;
1863 -       uint32_t EDU_ldw;
1864 -       uint32_t intr_status;
1865 -       unsigned long irqflags;
1866 -       int retries = 5;
1867 -       
1868 -int save_debug;
1869 -uint32_t edu_status;
1870 -
1871 -#ifdef EDU_DEBUG_2
1872 -u_char* save_buf = buffer;
1873 +        printk("!!!!!!!!! RD: offset=%0llx ECC=%02x%02x%02x, OOB:",
1874 +offset, edu_sw_ecc[0], edu_sw_ecc[1], edu_sw_ecc[2]);
1875 +        print_oobbuf(oobarea, 16);
1876 +}
1877  #endif
1878  
1879 -//if((offset >= (0x3a8148 & ~(0x1FF))) && (offset < ((0x3a8298+0x1F) & ~(0x1FF)))) gdebug=4;
1880 -//gdebug = 4;
1881 -if (gdebug > 3) {
1882 -printk("%s: offset=%0llx, buffer=%p, oobarea=%p\n", __FUNCTION__,  offset, buffer, oobarea);}
1883 -
1884 -#if 0 //def EDU_DEBUG_4
1885 -printk("%s: offset=%0llx, buffer=%p, oobarea=%p\n", __FUNCTION__,  offset, buffer, oobarea);
1886 +#if 0
1887 +if (offset <= 0x3a3600 && (offset+512) > 0x3a3600) {
1888 +printk("@@@@@@@@@ Dump EDU Read around 0x3a3600:\n");
1889 +print_databuf(buffer, 512);print_oobbuf(p32, 16);
1890 +}
1891  #endif
1892  
1893 +#ifdef EDU_DEBUG_4
1894 +{
1895 +int ctrlret;
1896  
1897 -       if (unlikely(offset - sliceOffset)) {
1898 -               printk(KERN_ERR "%s: offset %0llx is not cache aligned, sliceOffset=%0llx, CacheSize=%d\n", 
1899 -                __FUNCTION__, offset, sliceOffset, mtd->eccsize);
1900 -               ret = -EINVAL;
1901 -               return (ret);
1902 +       ctrlret = brcmnand_ctrl_posted_read_cache(mtd, ctrl_buf, ctrl_oob, offset);
1903 +       //verify_edu_buf();
1904 +       // Compare buffer returned from EDU and Ctrl reads:
1905 +       if (0 != memcmp(ctrl_buf, buffer, 512)) {
1906 +printk("$$$$$$$$$$$$ Read buffer from Ctrl & EDU read-ops differ at offset %0llx, intr_status=%08x, ecc=%d\n", 
1907 +       offset, intr_status, ecc);
1908 +printk("$$$$$$$$$$$$ EDU Read:\n");
1909 +print_databuf(buffer, 512);
1910 +printk("------------ Ctrl Read: \n");
1911 +print_databuf(edu_buf, 512);
1912 +               BUG();
1913         }
1914 -
1915 -//#if 0 // Testing 1 2 3
1916 -       if (unlikely(!EDU_buffer_OK(buffer, EDU_READ))) 
1917 -//#endif
1918 +       //if (oobarea) 
1919         {
1920 -if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
1921 -               /* EDU does not work on non-aligned buffers */
1922 -               ret = brcmnand_ctrl_posted_read_cache(mtd, buffer, oobarea, offset);
1923 -               return (ret);
1924 +               if (0 != memcmp(p32, ctrl_oob, 16)) {
1925 +printk("########## Read OOB from Ctrl & EDU read-ops differ at offset %0llx, intr_status=%08x, ecc=%d\n", 
1926 +       offset,  intr_status, ecc);
1927 +printk("########## Ctrl OOB:\n");
1928 +print_oobbuf(ctrl_oob, 16);
1929 +printk("------------ EDU OOB: \n");
1930 +print_oobbuf(p32, 16);
1931 +/* Which one is correct?  Since the data buffers agree, use Hamming codes */
1932 +                       if (chip->ecclevel == BRCMNAND_ECC_HAMMING) 
1933 +                       {
1934 +                               unsigned char ecc1[3]; // SW ECC, manually calculated
1935 +                               brcmnand_Hamming_WAR(mtd, offset, buffer, &ctrl_oob[6], &ecc1[0]);
1936 +                               printk("Hamming ECC=%02x%02x%02x\n", ecc1[0], ecc1[1], ecc1[2]);
1937 +                       }
1938 +                       BUG();
1939 +               }
1940         }
1941 +}
1942 +#endif // Verify EDU on Read
1943  
1944 -       if (wr_preempt_en) {
1945 -               // local_irq_save(irqflags);
1946 -       }
1947 -
1948 -#if defined( EDU_DEBUG_2 ) 
1949 -       init_edu_buf();
1950 -
1951 -       buffer = edu_buf;
1952 -
1953 -#elif defined( EDU_DEBUG_4 )
1954 -       init_edu_buf();
1955 -       
1956 -#endif
1957 -
1958 -       intr_status = 0;
1959 -       do {
1960 -
1961 -               EDU_ldw =  chip->ctrl_writeAddr(chip, sliceOffset, 0);
1962 -               PLATFORM_IOFLUSH_WAR(); 
1963 -
1964 -               if (intr_status & HIF_INTR2_EBI_TIMEOUT) {
1965 -                       EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_CLEAR, HIF_INTR2_EBI_TIMEOUT);
1966 -               }
1967 -               intr_status = EDU_read(buffer, EDU_ldw);
1968 -               
1969 -       } while (retries-- > 0 && ((intr_status == ERESTARTSYS) || (intr_status & HIF_INTR2_EBI_TIMEOUT) ));
1970 -
1971 -
1972 -       ret = brcmnand_edu_read_completion(mtd, buffer, oobarea, offset, intr_status);
1973 -
1974 -//gdebug=0;
1975 +gdebug=0;
1976      return ret;
1977  }
1978  
1979  
1980 -
1981  static int (*brcmnand_posted_read_cache)(struct mtd_info*, 
1982                 void*, u_char*, loff_t) = brcmnand_EDU_posted_read_cache;
1983 -  
1984 -  #else /* Queue Mode */
1985 -static int (*brcmnand_posted_read_cache)(struct mtd_info*, 
1986 -               void*, u_char*, loff_t) = brcmnand_ctrl_posted_read_cache;
1987 -  #endif
1988  
1989 -#else 
1990 +#else
1991  static int (*brcmnand_posted_read_cache)(struct mtd_info*, 
1992                 void*, u_char*, loff_t) = brcmnand_ctrl_posted_read_cache;
1993  #endif
1994 @@ -2805,33 +2491,16 @@
1995         loff_t sliceOffset = offset & (~(mtd->eccsize - 1));
1996         int i, ret = 0, valid, done = 0;
1997         int retries = 5;
1998 -       //unsigned long irqflags;
1999 +       unsigned long irqflags;
2000         
2001  //char msg[20];
2002  
2003 -#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_3_0
2004 -       static uint8_t myBuf2[512+31]; // Place holder only.
2005 -       static uint8_t* myBuf = NULL;
2006 -
2007 -       /*
2008 -        * Force alignment on 32B boundary
2009 -        */
2010 -       if (!myBuf) {
2011 -               myBuf = (uint8_t*)  ((((unsigned int) &myBuf2[0]) + 31) & (~31));
2012 -       }
2013 -       
2014 -  #if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_3_0
2015 -       {
2016 -               // PR2516.  Not a very good WAR, but the affected chips (3548A0,7443A0) have been EOL'ed
2017 -               return brcmnand_ctrl_posted_read_cache(mtd, (void*) myBuf, oobarea, offset);
2018 -       }
2019 -
2020 -  #else /* 3.1 or later */
2021 -       // If BCH codes, force full page read to activate ECC correction on OOB bytes.
2022 -       if (chip->ecclevel != BRCMNAND_ECC_HAMMING && chip->ecclevel != BRCMNAND_ECC_DISABLE) {
2023 -               return brcmnand_ctrl_posted_read_cache(mtd, (void*) myBuf, oobarea, offset);
2024 -       }
2025 -  #endif
2026 +#if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_3_0
2027 +{
2028 +       // PR2516.  Not a very good WAR, but the affected chips (3548A0,7443A0) have been EOL'ed
2029 +       static uint32_t myBuf[128]; // Place holder only.
2030 +       return brcmnand_ctrl_posted_read_cache(mtd, (void*) myBuf, oobarea, offset);
2031 +}
2032  #endif
2033  
2034  if (gdebug > 3 ) PRINTK("->%s: offset=%0llx\n", __FUNCTION__, offset);
2035 @@ -2921,151 +2590,6 @@
2036         return ret;
2037  }
2038  
2039 -
2040 -//#ifdef CONFIG_MTD_BRCMNAND_EDU
2041 -
2042 -//#define EDU_DEBUG_3
2043 -#undef EDU_DEBUG_3
2044 -
2045 -#if 0 //defined( EDU_DEBUG_3 ) || defined( EDU_DEBUG_5 ) || defined(BRCMNAND_WRITE_VERIFY )
2046 -
2047 -
2048 -/*
2049 - * Returns 0 on no errors.
2050 - * THis should never be called, because partial writes may screw up the verify-read.
2051 - */
2052 -static int edu_write_verify(struct mtd_info *mtd,
2053 -        const void* buffer, const u_char* oobarea, loff_t offset)
2054 -{
2055 -       struct brcmnand_chip* chip = mtd->priv;
2056 -       static uint8_t sw_ecc[4];
2057 -       static uint32_t read_oob[4];
2058 -       static uint8_t write_oob[16];
2059 -       uint8_t* oobpoi = (uint8_t*) &read_oob[0];
2060 -       int ret = 0;
2061 -
2062 -       // Dump the register, done immediately after EDU_Write returns
2063 -       // dump_nand_regs(chip, offset);
2064 -
2065 -       if ( chip->ecclevel != BRCMNAND_ECC_HAMMING) {
2066 -               // Read back the data, but first clear the internal cache first.
2067 -               debug_clear_ctrl_cache(mtd);
2068 -
2069 -               ret = brcmnand_ctrl_posted_read_cache(mtd, edu_write_buf, oobpoi, offset);
2070 -               if (ret) {
2071 -                       printk("+++++++++++++++++++++++ %s: Read Verify returns %d\n", __FUNCTION__, ret);
2072 -                       goto out;
2073 -               }
2074 -               if (0 != memcmp(buffer, edu_write_buf, 512)) {
2075 -                       printk("+++++++++++++++++++++++ %s: WRITE buffer differ with READ-Back buffer\n",
2076 -                       __FUNCTION__);
2077 -                       ret = (-1);
2078 -                       goto out;
2079 -               }
2080 -               if (oobarea) { /* For BCH, the ECC is at the end */
2081 -                       // Number of bytes to compare (with ECC bytes taken out)
2082 -                       int numFree = min(16, chip->eccOobSize - chip->eccbytes);
2083 -                       
2084 -                       if (memcmp(oobarea, oobpoi, numFree)) {
2085 -                               printk("+++++++++++++++++++++++ %s: BCH-%-d OOB comp failed, numFree=%d\n", 
2086 -                                       __FUNCTION__, chip->ecclevel, numFree);
2087 -                               printk("In OOB:\n"); print_oobbuf(oobarea, 16);
2088 -                               printk("\nVerify OOB:\n"); print_oobbuf(oobpoi, 16);
2089 -                               ret = (-2);
2090 -                               goto out;
2091 -                       }
2092 -               }
2093 -               return 0;
2094 -       }
2095 -       
2096 -       // Calculate the ECC
2097 -       // brcmnand_Hamming_ecc(buffer, sw_ecc);
2098 -
2099 -       // Read back the data, but first clear the internal cache first.
2100 -       debug_clear_ctrl_cache(mtd);
2101 -
2102 -in_verify = -1;                
2103 -       ret = brcmnand_ctrl_posted_read_cache(mtd, edu_write_buf, oobpoi, offset);
2104 -in_verify = 0;
2105 -
2106 -       if (ret) {
2107 -               printk("+++++++++++++++++++++++ %s: Read Verify returns %d\n", __FUNCTION__, ret);
2108 -               goto out;
2109 -       }
2110 -
2111 -#if 0
2112 -       if (sw_ecc[0] != oobpoi[6] || sw_ecc[1] != oobpoi[7] || sw_ecc[2] != oobpoi[8]) {
2113 -printk("+++++++++++++++++++++++ %s: SWECC=%02x%02x%02x ReadOOB=%02x%02x%02x, buffer=%p, offset=%0llx\n",
2114 -                       __FUNCTION__, 
2115 -                       sw_ecc[0], sw_ecc[1], sw_ecc[2], oobpoi[6], oobpoi[7], oobpoi[8], buffer, offset);
2116 -               
2117 -               ret = (-1);
2118 -               goto out;
2119 -       }
2120 -#endif
2121 -
2122 -       // Verify the OOB if not NULL
2123 -       if (oobarea) {
2124 -               //memcpy(write_oob, oobarea, 16);
2125 -               //write_oob[6] = sw_ecc[0];
2126 -               //write_oob[7] = sw_ecc[1];
2127 -               //write_oob[8] = sw_ecc[2];
2128 -               if (memcmp(oobarea, oobpoi, 6) || memcmp(&oobarea[9], &oobpoi[9],7)) {
2129 -                       printk("+++++++++++++++++++++++ %s: OOB comp Hamming failed\n", __FUNCTION__);
2130 -                       printk("In OOB:\n"); print_oobbuf(oobarea, 16);
2131 -                       printk("\nVerify OOB:\n"); print_oobbuf(oobpoi, 16);
2132 -                       ret = (-2);
2133 -                       goto out;
2134 -               }
2135 -       }
2136 -
2137 -out:
2138 -if (ret) {
2139 -       int i, j, k;
2140 -       uint8_t* writeBuf = (uint8_t*) buffer;
2141 -//for (i=0; i<2; i++) 
2142 -{
2143 -// Let user land completes its run to avoid garbled printout
2144 -//schedule();
2145 -for (j=0; j<512; j++) {
2146 -       if (writeBuf[j] != edu_write_buf[j]) {
2147 -               printk("Buffers differ at offset %04x\n", j);
2148 -               break;
2149 -       }
2150 -}
2151 -printk("$$$$$$$$$$$$$$$$$ Register dump:\n");
2152 -printk("\n");
2153 -printk("\n");
2154 -printk("\n");
2155 -printk("\n");
2156 -for (k=0; k<numDumps; k++) {
2157 -printk("\n");
2158 -printk("\n");
2159 -printk("$$$$$$$$$$$$$$$$$ Register dump snapshot #%d:\n", k+1);
2160 -print_dump_nand_regs(k);
2161 -printk("\n");
2162 -}
2163 -printk("\n");
2164 -printk("\n");
2165 -printk("EDU_write 99, ret=%d, offset=%0llx, buffer=%p\n", ret, offset, buffer);
2166 -printk("Write buffer:\n"); print_databuf(buffer, 512);
2167 -if (oobarea) { printk("Write OOB: "); print_oobbuf(oobarea, 512); }
2168 -printk("Read back buffer:\n"); print_databuf(edu_write_buf, 512);
2169 -if (oobarea) { printk("Read OOB: "); print_oobbuf(write_oob, 512); }
2170 -
2171 -//printk("$$$$$$$$$$$$$$$$$ Register dump:\n");
2172 -//print_dump_nand_regs();
2173 -}
2174 -}
2175 -       return ret;
2176 -}
2177 -
2178 -
2179 -#else
2180 -#define edu_write_verify(...) (0)
2181 -#endif
2182 -
2183 -
2184  /**
2185   * brcmnand_posted_write - [BrcmNAND Interface] Write a buffer to the flash cache
2186   * Assuming brcmnand_get_device() has been called to obtain exclusive lock
2187 @@ -3160,136 +2684,88 @@
2188  }
2189  
2190  
2191 -
2192  #ifdef CONFIG_MTD_BRCMNAND_EDU
2193 -   #ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
2194  
2195 -   /*
2196 -    * Performs WAR for queue-write. Currently, it is always called with needBBT=1
2197 -    * Runs in process context.
2198 -    * Return 0 on success, error codes on errors.
2199 -    */
2200 -int
2201 -brcmnand_edu_write_war(struct mtd_info *mtd,
2202 -        const void* buffer, const u_char* oobarea, loff_t offset, uint32_t intr_status, 
2203 -        int needBBT)
2204 -{
2205 -       struct brcmnand_chip* chip = mtd->priv;
2206 -       int ret = 0;
2207 +//#define EDU_DEBUG_3
2208 +#undef EDU_DEBUG_3
2209  
2210 +#ifdef EDU_DEBUG_3
2211  
2212 -       if (!(intr_status & HIF_INTR2_CTRL_READY)) {
2213 -               printk("%s: Impossible, ctrl-ready asserted in interrupt handler\n", __FUNCTION__);
2214 -               BUG();
2215 -       }
2216 +static uint8_t edu_write_buf[512];
2217  
2218 -       if (!needBBT) 
2219 -       {
2220 -               ret = 0;
2221 -       }
2222 -       else
2223 -       { // Need BBT
2224 -#if 1 //defined (ECC_CORRECTABLE_SIMULATION) || defined(ECC_UNCORRECTABLE_SIMULATION) || defined(WR_BADBLOCK_SIMULATION)
2225 -               printk("%s: Marking bad block @%0llx\n", __FUNCTION__, offset);
2226 -#endif            
2227 -               ret = chip->block_markbad(mtd, offset);
2228 -               ret = -EINVAL;
2229 -       }
2230 -
2231 -#if defined(EDU_DEBUG_5) // || defined( CONFIG_MTD_BRCMNAND_VERIFY_WRITE )
2232 -//gdebug = 0;
2233 -       if (0 == ret) {
2234 -               if (edu_write_verify(mtd, buffer, oobarea, offset)) {
2235 -                       BUG();
2236 -               }
2237 -       }
2238 -
2239 -#endif
2240 -       return ret;
2241 -}
2242 -
2243 -// When buffer is nor aligned as per EDU requirement, use controller-write
2244 -static int (*brcmnand_posted_write_cache)(struct mtd_info*, 
2245 -               const void*, const u_char*, loff_t) = brcmnand_ctrl_posted_write_cache; 
2246 -
2247 -  #else //#ifndef CONFIG_MTD_BRCMNAND_ISR_QUEUE
2248 -
2249 -/*
2250 - * Write completion after EDU_Read is called.
2251 - * Non-Queue mode
2252 - */
2253 -static int
2254 -brcmnand_edu_write_completion(struct mtd_info *mtd,
2255 -        const void* buffer, const u_char* oobarea, loff_t offset, uint32_t intr_status, uint32_t physAddr)
2256 +static int edu_write_verify(struct mtd_info *mtd,
2257 +        const void* buffer, const u_char* oobarea, loff_t offset)
2258  {
2259         struct brcmnand_chip* chip = mtd->priv;
2260 -       int comp;
2261 -       int needBBT;
2262 -       int ret;
2263 +       static uint8_t sw_ecc[4];
2264 +       static uint32_t read_oob[4];
2265 +       static uint8_t write_oob[16];
2266 +       uint8_t* oobpoi = (uint8_t*) &read_oob[0];
2267 +       int ret = 0;
2268  
2269 -
2270 -#ifdef CONFIG_MTD_BRCMNAND_USE_ISR
2271 -       if (!(intr_status & HIF_INTR2_CTRL_READY)) {
2272 -               printk("%s: Impossible, ctrl-ready asserted in interrupt handler\n", __FUNCTION__);
2273 -               BUG();
2274 -       }
2275 -#else
2276 -       // Wait until flash is ready.  
2277 -       // Becareful here.  Since this can be called in interrupt context,
2278 -       // we cannot call sleep or schedule()
2279 -       comp = brcmnand_EDU_write_is_complete(mtd, &needBBT);
2280 -
2281 -       // Already done in interrupt handler
2282 -       (void) dma_unmap_single(NULL, physAddr, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
2283 -#endif
2284 -
2285 -       if (comp) 
2286 -       {
2287 -               if (!needBBT) 
2288 -               {
2289 -                       ret = 0;
2290 -                       goto out;
2291 +       if (chip->ecclevel != BRCMNAND_ECC_HAMMING) {
2292 +               ret = brcmnand_ctrl_posted_read_cache(mtd, edu_write_buf, oobpoi, offset);
2293 +               if (ret) {
2294 +                       printk("+++++++++++++++++++++++ %s: Read Verify returns %d\n", __FUNCTION__, ret);
2295 +                       return ret;
2296                 }
2297 -               else
2298 -               { // Need BBT
2299 -#if 1 //defined (ECC_CORRECTABLE_SIMULATION) || defined(ECC_UNCORRECTABLE_SIMULATION) || defined(WR_BADBLOCK_SIMULATION)
2300 -                       printk("%s: Marking bad block @%0llx\n", __FUNCTION__, offset);
2301 -#endif            
2302 -                       ret = chip->block_markbad(mtd, offset);
2303 -                       ret = -EINVAL;
2304 -                       //ret = -EINVAL;
2305 -                       goto out;
2306 +               if (0 != memcmp(buffer, edu_write_buf, 512)) {
2307 +                       printk("+++++++++++++++++++++++ %s: WRITE buffer differ with READ-Back buffer\n",
2308 +                       __FUNCTION__);
2309 +                       return (-1);
2310                 }
2311 +               if (oobarea) {
2312 +                       if (memcmp(oobarea, oobpoi, 16)) {
2313 +                               printk("+++++++++++++++++++++++ %s: OOB comp failed\n", __FUNCTION__);
2314 +                               printk("In OOB:\n"); print_oobbuf(oobarea, 16);
2315 +                               printk("\nVerify OOB:\n"); print_oobbuf(oobpoi, 16);
2316 +                       }
2317 +               }
2318 +               return 0;
2319         }
2320 +       
2321 +       // Calculate the ECC
2322 +       brcmnand_Hamming_ecc(buffer, sw_ecc);
2323  
2324 -       //Write has timed out or read found bad block. TBD: Find out which is which
2325 -       printk(KERN_INFO "%s: Timeout at offset %0llx\n", __FUNCTION__, offset);
2326 -       // Marking bad block
2327 -       if (needBBT) {
2328 -               printk("%s: Marking bad block @%0llx\n", __FUNCTION__, offset);
2329 -    
2330 -               ret = chip->block_markbad(mtd, offset);
2331 -               ret = -EINVAL;
2332 -               //ret = -EINVAL;
2333 -               goto out;
2334 -       }               
2335 -       ret = -ETIMEDOUT;
2336 +       // Read back the data, but first clear the internal cache first.
2337 +       debug_clear_ctrl_cache(mtd);
2338  
2339 -out:
2340 +in_verify = -1;                
2341 +       ret = brcmnand_ctrl_posted_read_cache(mtd, edu_write_buf, oobpoi, offset);
2342 +in_verify = 0;
2343  
2344 -#if defined(EDU_DEBUG_5) // || defined( CONFIG_MTD_BRCMNAND_VERIFY_WRITE )
2345 -//gdebug = 0;
2346 -       if (0 == ret) {
2347 -               if (edu_write_verify(mtd, buffer, oobarea, offset)) {
2348 -                       BUG();
2349 -               }
2350 -       }
2351 +       if (ret) {
2352 +               printk("+++++++++++++++++++++++ %s: Read Verify returns %d\n", __FUNCTION__, ret);
2353 +               return ret;
2354 +       }
2355  
2356 -#endif
2357 +       if (sw_ecc[0] != oobpoi[6] || sw_ecc[1] != oobpoi[7] || sw_ecc[2] != oobpoi[8]) {
2358 +               printk("+++++++++++++++++++++++ %s: SWECC=%02x%02x%02x ReadOOB=%02x%02x%02x\n",
2359 +                       __FUNCTION__, 
2360 +                       sw_ecc[0], sw_ecc[1], sw_ecc[2], oobpoi[6], oobpoi[7], oobpoi[8]);
2361 +               return (-1);
2362 +       }
2363 +
2364 +       // Verify the OOB if not NULL
2365 +       if (oobarea) {
2366 +               memcpy(write_oob, oobarea, 16);
2367 +               write_oob[6] = sw_ecc[0];
2368 +               write_oob[7] = sw_ecc[1];
2369 +               write_oob[8] = sw_ecc[2];
2370 +               if (memcmp(write_oob, oobpoi, 16)) {
2371 +                       printk("+++++++++++++++++++++++ %s: OOB comp failed\n", __FUNCTION__);
2372 +                       printk("In OOB:\n"); print_oobbuf(write_oob, 16);
2373 +                       printk("\nVerify OOB:\n"); print_oobbuf(oobpoi, 16);
2374 +               }
2375 +       }
2376         return ret;
2377  }
2378  
2379  
2380 +#else
2381 +#define edu_write_verify(...) (0)
2382 +#endif
2383 +
2384  /**
2385   * brcmnand_posted_write - [BrcmNAND Interface] Write a buffer to the flash cache
2386   * Assuming brcmnand_get_device() has been called to obtain exclusive lock
2387 @@ -3307,14 +2783,12 @@
2388         uint32_t* p32;
2389         int i; 
2390         int ret;
2391 -       int comp = 0;
2392  
2393         struct brcmnand_chip* chip = mtd->priv;    
2394         int needBBT=0;
2395         loff_t sliceOffset = offset & (~ (mtd->eccsize - 1));
2396         uint32_t EDU_ldw;
2397         int retries = 5;
2398 -       uint32_t physAddr;
2399  
2400  #ifdef WR_BADBLOCK_SIMULATION
2401         unsigned long tmp = (unsigned long) offset;
2402 @@ -3333,7 +2807,7 @@
2403                 goto out;
2404         }
2405  
2406 -       if (unlikely(!EDU_buffer_OK(buffer, EDU_WRITE))) {
2407 +       if (unlikely(!EDU_buffer_OK(buffer))) {
2408                 // EDU requires the buffer to be DW-aligned
2409  PRINTK("%s: Buffer %p not suitable for EDU at %0llx, trying ctrl read op\n", __FUNCTION__, buffer, offset);
2410                 ret = brcmnand_ctrl_posted_write_cache(mtd, buffer, oobarea, offset);
2411 @@ -3362,26 +2836,23 @@
2412  
2413                 PLATFORM_IOFLUSH_WAR(); // Check if this line may be taken-out
2414  
2415 +       //chip->ctrl_write(BCHP_NAND_CMD_START, OP_PROGRAM_PAGE);
2416  
2417                 if (ret & HIF_INTR2_EBI_TIMEOUT) {
2418                         EDU_volatileWrite(EDU_BASE_ADDRESS + BCHP_HIF_INTR2_CPU_CLEAR, HIF_INTR2_EBI_TIMEOUT);
2419                 }
2420 -               ret = EDU_write(buffer, EDU_ldw, &physAddr);
2421 -
2422 +               ret = EDU_write(buffer, EDU_ldw);
2423                 if (ret) {
2424                         // Nothing we can do, because, unlike read op, where we can just call the traditional read,
2425                         // here we may need to erase the flash first before we can write again.
2426 -//printk("EDU_write returns %d, trying ctrl write \n", ret);
2427 -//                     ret = brcmnand_ctrl_posted_write_cache(mtd, buffer, oobarea, offset);
2428 +                       ret = brcmnand_ctrl_posted_write_cache(mtd, buffer, oobarea, offset);
2429                         goto out;
2430                 }
2431         
2432 -//printk("EDU50\n");
2433 +// printk("EDU50\n");
2434  
2435                 // Wait until flash is ready
2436 -               comp = brcmnand_EDU_write_is_complete(mtd, &needBBT);
2437 -
2438 -               (void) dma_unmap_single(NULL, physAddr, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
2439 +               ret = brcmnand_EDU_write_is_complete(mtd, &needBBT);
2440         }while (retries-- > 0 && ((ret == ERESTARTSYS) || (ret & HIF_INTR2_EBI_TIMEOUT)));
2441  
2442         if (retries <= 0 && ((ret == ERESTARTSYS) || (ret & HIF_INTR2_EBI_TIMEOUT))) { 
2443 @@ -3390,9 +2861,18 @@
2444                 goto out;
2445         }
2446  
2447 +#ifdef WR_BADBLOCK_SIMULATION
2448 +       if((tmp == wrBadBlockFailLocation) && (bScanBypass_badBlock == 0))
2449 +       {
2450 +               wrFailLocationOffset.s.high = 0;
2451 +               wrFailLocationOffset.s.low = wrBadBlockFailLocation;
2452 +               printk("Creating new bad block @ %0llx\n", EDU_sprintf(brcmNandMsg, wrFailLocationOffset.ll, this->xor_invert_val));
2453 +               needBBT = 1;
2454 +               ret = 1;
2455 +       }
2456 +#endif 
2457  
2458 -
2459 -       if (comp) 
2460 +       if (ret) 
2461         {
2462                 if (!needBBT) 
2463                 {
2464 @@ -3425,10 +2905,10 @@
2465         ret = -ETIMEDOUT;
2466  
2467  out:
2468 +// printk("EDU99\n");
2469 +//gdebug = 0;
2470  
2471 -
2472  #if defined(EDU_DEBUG_5) // || defined( CONFIG_MTD_BRCMNAND_VERIFY_WRITE )
2473 -//gdebug = 0;
2474         if (0 == ret) {
2475                 if (edu_write_verify(mtd, buffer, oobarea, offset)) {
2476                         BUG();
2477 @@ -3440,11 +2920,17 @@
2478      return ret;
2479  }
2480  
2481 +#if 1
2482 +
2483  static int (*brcmnand_posted_write_cache)(struct mtd_info*, 
2484                 const void*, const u_char*, loff_t) = brcmnand_EDU_posted_write_cache; 
2485 -  #endif
2486 +#else
2487 +/* Testing 1 2 3, use controller write */
2488 +static int (*brcmnand_posted_write_cache)(struct mtd_info*, 
2489 +               const void*, const u_char*, loff_t) = brcmnand_ctrl_posted_write_cache;
2490 +#endif
2491  
2492 -#else /* No EDU */
2493 +#else
2494  static int (*brcmnand_posted_write_cache)(struct mtd_info*, 
2495                 const void*, const u_char*, loff_t) = brcmnand_ctrl_posted_write_cache;
2496  
2497 @@ -3564,7 +3050,7 @@
2498                         set_current_state(TASK_UNINTERRUPTIBLE);
2499                         add_wait_queue(&chip->wq, &wait);
2500                         spin_unlock(&chip->chip_lock);
2501 -                       if (!wr_preempt_en && !in_interrupt())
2502 +                       if (!wr_preempt_en)
2503                                 schedule();
2504                         remove_wait_queue(&chip->wq, &wait);
2505                 }
2506 @@ -3616,7 +3102,6 @@
2507  }
2508  
2509  
2510 -
2511  /**
2512   * brcmnand_read_page - {REPLACEABLE] hardware ecc based page read function
2513   * @mtd:       mtd info structure
2514 @@ -3722,7 +3207,7 @@
2515  #ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
2516  static int brcmnand_refresh_blk(struct mtd_info *mtd, loff_t from)
2517  {
2518 -       struct brcmnand_chip *chip = mtd->priv;
2519 +       struct brcmnand_chip *this = mtd->priv;
2520         int i, j, k, numpages, ret, count = 0, nonecccount = 0;
2521         uint8_t *blk_buf;       /* Store one block of data (including OOB) */
2522         unsigned int  pg_idx, oob_idx;
2523 @@ -3737,9 +3222,9 @@
2524  
2525  
2526  #if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
2527 -       chip->ctrl_write(BCHP_NAND_ECC_CORR_EXT_ADDR, 0);
2528 +       this->ctrl_write(BCHP_NAND_ECC_CORR_EXT_ADDR, 0);
2529  #endif
2530 -       chip->ctrl_write(BCHP_NAND_ECC_CORR_ADDR, 0);
2531 +       this->ctrl_write(BCHP_NAND_ECC_CORR_ADDR, 0);
2532  
2533         DEBUG(MTD_DEBUG_LEVEL3, "Inside %s: from=%0llx\n", __FUNCTION__, from);
2534         printk(KERN_INFO "%s: Performing block refresh for correctable ECC error at %0llx\n",
2535 @@ -3747,9 +3232,9 @@
2536         pg_idx = 0;
2537         oob_idx = mtd->writesize;
2538         numpages = mtd->erasesize/mtd->writesize;
2539 -       block_size = (1 << chip->erase_shift);
2540 +       block_size = (1 << this->erase_shift);
2541         blkbegin = (from & (~(mtd->erasesize-1)));
2542 -       realpage = blkbegin >> chip->page_shift;
2543 +       realpage = blkbegin >> this->page_shift;
2544  
2545  #ifdef CONFIG_MTD_BRCMNAND_EDU
2546         if (!gblk_buf) {
2547 @@ -3777,7 +3262,7 @@
2548         /* Read an entire block */
2549         brcmnand_get_device(mtd, FL_READING);
2550         for (i = 0; i < numpages; i++) {
2551 -               ret = chip->read_page(mtd, blk_buf+pg_idx, blk_buf+oob_idx, realpage);
2552 +               ret = brcmnand_read_page(mtd, blk_buf+pg_idx, blk_buf+oob_idx, realpage);
2553                 if (ret < 0) {
2554  #ifndef CONFIG_MTD_BRCMNAND_EDU
2555                         BRCMNAND_free(blk_buf);
2556 @@ -3795,7 +3280,7 @@
2557         if (unlikely(gdebug > 0)) {
2558                 printk("---> %s:  Read -> erase\n", __FUNCTION__);
2559         }
2560 -       chip->state = FL_ERASING;
2561 +       this->state = FL_ERASING;
2562  
2563         /* Erase the block */
2564         instr = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
2565 @@ -3813,7 +3298,7 @@
2566         instr->addr = blkbegin;
2567         instr->len = mtd->erasesize;
2568         if (unlikely(gdebug > 0)) {
2569 -               printk("DEBUG -> erasing %0llx, %x %d\n",instr->addr, instr->len, chip->state);
2570 +               printk("DEBUG -> erasing %0llx, %x %d\n",instr->addr, instr->len, this->state);
2571         }
2572         ret = brcmnand_erase_nolock(mtd, instr, 0);
2573         if (ret) {
2574 @@ -3831,12 +3316,12 @@
2575         /* Write the entire block */
2576         pg_idx = 0;
2577         oob_idx = mtd->writesize;
2578 -       realpage = blkbegin >> chip->page_shift;
2579 +       realpage = blkbegin >> this->page_shift;
2580         if (unlikely(gdebug > 0)) {
2581 -               printk("---> %s: Erase -> write ... %d\n", __FUNCTION__, chip->state);
2582 +               printk("---> %s: Erase -> write ... %d\n", __FUNCTION__, this->state);
2583         }
2584 -       oobinfo = chip->ecclayout;
2585 -       chip->state = FL_WRITING;
2586 +       oobinfo = this->ecclayout;
2587 +       this->state = FL_WRITING;
2588         for (i = 0; i < numpages; i++) {
2589                 /* Avoid writing empty pages */
2590                 count = 0;
2591 @@ -3858,7 +3343,7 @@
2592                 }
2593                 /* Skip this page, but write the OOB */
2594                 if (count == j && nonecccount != k) {
2595 -                       ret = chip->write_page_oob(mtd, blk_buf + oob_idx, realpage);
2596 +                       ret = this->write_page_oob(mtd, blk_buf + oob_idx, realpage);
2597                         if (ret) {
2598  #ifndef CONFIG_MTD_BRCMNAND_EDU
2599                                 BRCMNAND_free(blk_buf);
2600 @@ -3875,7 +3360,7 @@
2601                 for (j = 0; j < oobinfo->eccbytes; j++) {
2602                         oobptr[oobinfo->eccpos[j]] = 0xff;
2603                 }
2604 -               ret = chip->write_page(mtd, blk_buf+pg_idx, blk_buf+oob_idx, realpage);
2605 +               ret = this->write_page(mtd, blk_buf+pg_idx, blk_buf+oob_idx, realpage);
2606                 if (ret) {
2607  #ifndef CONFIG_MTD_BRCMNAND_EDU
2608                         BRCMNAND_free(blk_buf);
2609 @@ -3900,463 +3385,7 @@
2610  #endif
2611  
2612  
2613 -#ifdef CONFIG_MTD_BRCMNAND_USE_ISR
2614 -/*
2615 - * EDU ISR Implementation
2616 - */
2617 -
2618
2619 -/*
2620 - * Submit the read op, then return immediately, without waiting for completion.
2621 - * Assuming queue lock held (with interrupt disable).
2622 - */
2623 -static void 
2624 -EDU_submit_read(eduIsrNode_t* req)
2625 -{
2626 -       struct brcmnand_chip* chip = (struct brcmnand_chip*) req->mtd->priv;
2627 -       uint32_t edu_status;
2628 -       
2629 -       // THT: TBD: Need to adjust for cache line size here, especially on 7420.
2630 -       req->physAddr = dma_map_single(NULL, req->buffer, EDU_LENGTH_VALUE, DMA_FROM_DEVICE);
2631 -
2632 -if (edu_debug) PRINTK("%s: vBuff: %p physDev: %08x, PA=%08x\n", __FUNCTION__,
2633 -req->buffer, external_physical_device_address, phys_mem);
2634 -
2635 -       spin_lock(&req->lock);
2636 -
2637 -       req->edu_ldw =  chip->ctrl_writeAddr(chip, req->offset, 0);
2638 -       PLATFORM_IOFLUSH_WAR(); 
2639 -
2640 -       //req->cmd = EDU_READ;
2641 -       req->opComplete = ISR_OP_SUBMITTED;
2642 -       req->status = 0;
2643 -
2644 -       // We must also wait for Ctlr_Ready, otherwise the OOB is not correct, since we read the OOB bytes off the controller
2645 -
2646 -       req->mask = HIF_INTR2_EDU_CLEAR_MASK|HIF_INTR2_CTRL_READY;
2647 -       req->expect = HIF_INTR2_EDU_DONE;
2648 -       // On error we also want Ctrlr-Ready because for COR ERR, the Hamming WAR depends on the OOB bytes.
2649 -       req->error = HIF_INTR2_EDU_ERR;
2650 -       req->intr = HIF_INTR2_EDU_DONE_MASK;
2651 -       req->expired = jiffies + 3*HZ;
2652 -
2653 -       edu_status = EDU_volatileRead(EDU_BASE_ADDRESS+EDU_STATUS);
2654 -       // Enable HIF_INTR2 only when we submit the first job in double buffering scheme
2655 -       if (0 == (edu_status & BCHP_EDU_STATUS_Active_MASK)) {
2656 -               ISR_enable_irq(req);
2657 -       }
2658 -
2659 -        //EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_DONE, 0x00000000);
2660 -       EDU_reset_done();
2661 -
2662 -       EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_ERR_STATUS, 0x00000000);
2663 -        
2664 -       EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_LENGTH, EDU_LENGTH_VALUE);
2665 -
2666 -       EDU_waitForNoPendingAndActiveBit();
2667 -
2668 -       EDU_issue_command(req->physAddr , req->edu_ldw, EDU_READ);
2669 -
2670 -       spin_unlock(&req->lock);
2671 -       return;
2672 -
2673 -} 
2674 -
2675 -int EDU_submit_write(eduIsrNode_t* req)
2676 -{
2677 -       struct brcmnand_chip* chip = (struct brcmnand_chip*) req->mtd->priv;
2678 -       uint32_t edu_status;
2679 -       uint32_t* p32;
2680 -       int i;
2681 -
2682 -       spin_lock(&req->lock);
2683 -       // EDU is not a PCI device
2684 -       // THT: TBD: Need to adjust for cache line size here, especially on 7420.
2685 -       req->physAddr  = dma_map_single(NULL, req->buffer, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
2686 -
2687 -       if (!(req->physAddr)) {
2688 -               spin_unlock(&req->lock);
2689 -               return (-1);
2690 -       }
2691 -
2692 -
2693 -       req->edu_ldw = chip->ctrl_writeAddr(chip, req->offset, 0);
2694 -
2695 -
2696 -       if (req->oobarea) {
2697 -               p32 = (uint32_t*) req->oobarea;
2698 -if (gdebug) {printk("%s: oob=\n", __FUNCTION__); print_oobbuf(req->oobarea, 16);}
2699 -       }
2700 -       else {
2701 -               // Fill with 0xFF if don't want to change OOB
2702 -               p32 = (uint32_t*) &ffchars[0];
2703 -       }
2704 -
2705 -// printk("EDU40\n");
2706 -       for (i = 0; i < 4; i++) {
2707 -               chip->ctrl_write(BCHP_NAND_SPARE_AREA_WRITE_OFS_0 + i*4, cpu_to_be32(p32[i]));
2708 -       }
2709 -
2710 -       PLATFORM_IOFLUSH_WAR(); // Check if this line may be taken-out
2711 -       
2712 -       /*
2713 -        * Enable L2 Interrupt
2714 -        */
2715 -       //req->cmd = EDU_WRITE;
2716 -       req->opComplete = ISR_OP_SUBMITTED;
2717 -       req->status = 0;
2718 -       
2719 -       /* On write we wait for both DMA done|error and Flash Status */
2720 -       req->mask = HIF_INTR2_EDU_CLEAR_MASK|HIF_INTR2_CTRL_READY;
2721 -       req->expect = HIF_INTR2_EDU_DONE;
2722 -       req->error = HIF_INTR2_EDU_ERR;
2723 -       req->intr = HIF_INTR2_EDU_DONE_MASK|HIF_INTR2_CTRL_READY;
2724 -
2725 -       
2726 -       ISR_enable_irq(req);
2727 -
2728 -       //EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_DONE, 0x00000000); 
2729 -       EDU_reset_done();
2730 -       EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_ERR_STATUS, 0x00000000); 
2731 -
2732 -       EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_LENGTH, EDU_LENGTH_VALUE);
2733 -
2734 -       EDU_issue_command(req->physAddr, req->edu_ldw, EDU_WRITE); /* 1: Is a Read, 0 Is a Write */
2735 -       spin_unlock(&req->lock);
2736 -       return 0;
2737 -}
2738 -
2739 -
2740 -/*
2741 - * Submit the first entry that is in queued state,
2742 - * assuming queue lock has been held by caller.
2743 - * 
2744 - * @doubleBuffering indicates whether we need to submit just 1 job or until EDU is full (double buffering)
2745 - * Return the number of job submitted (either 1 or zero), as we don't support doublebuffering yet.
2746 - *
2747 - * In current version (v3.3 controller), since EDU only have 1 register for EDU_ERR_STATUS,
2748 - * we can't really do double-buffering without losing the returned status of the previous read-op.
2749 - */
2750 -int
2751 -brcmnand_isr_submit_job(void)
2752 -{
2753 -       uint32_t edu_pending;
2754 -       eduIsrNode_t* req;
2755 -       //struct list_head* node;
2756 -       int numReq = 0;
2757 -
2758 -//printk("-->%s\n", __FUNCTION__);
2759 -//ISR_print_queue();
2760 -
2761 -       list_for_each_entry(req, &gJobQ.jobQ, list) {
2762 -               //req = container_of(node, eduIsrNode_t, list);
2763 -               switch (req->opComplete) {
2764 -               case ISR_OP_QUEUED:
2765 -                       edu_pending = EDU_volatileRead(EDU_BASE_ADDRESS  + EDU_STATUS); 
2766 -                       if (!(BCHP_EDU_STATUS_Pending_MASK & edu_pending)) {
2767 -                               if (gJobQ.cmd == EDU_READ) {
2768 -                                       EDU_submit_read(req);
2769 -                               }
2770 -                               else if (gJobQ.cmd == EDU_WRITE) {
2771 -                                       EDU_submit_write(req);
2772 -                               }
2773 -                               else {
2774 -                                       printk("%s: Invalid op\n", __FUNCTION__);
2775 -                                       BUG();
2776 -                               }
2777 -                               numReq++;
2778 -#ifdef EDU_DOUBLE_BUFFER_READ
2779 -                               if (/*doubleBuffering &&*/ numReq < 2) {
2780 -                                       continue;
2781 -                               }
2782 -#endif
2783 -                       }
2784 -PRINTK("<-- %s: numReq=%d\n", __FUNCTION__, numReq);
2785 -                       return numReq; 
2786 -                       
2787 -               case ISR_OP_COMPLETED:
2788 -               case ISR_OP_SUBMITTED:
2789 -               case ISR_OP_NEED_WAR:
2790 -               case ISR_OP_TIMEDOUT:
2791 -                       /* next entry */
2792 -                       continue;
2793 -               }
2794 -       }
2795 -PRINTK("<-- %s: numReq=%d\n", __FUNCTION__, numReq);
2796 -       return numReq;
2797 -}
2798 -
2799 -/*
2800 - * Queue the entire page, then wait for completion
2801 - */
2802 -static int
2803 -brcmnand_isr_read_page(struct mtd_info *mtd,
2804 -                               uint8_t *outp_buf, uint8_t* outp_oob, uint64_t page)
2805 -{
2806 -       struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
2807 -       int eccstep;
2808 -       int dataRead = 0;
2809 -       int oobRead = 0;
2810 -       int ret = 0;
2811 -       uint64_t offset = ((uint64_t) page) << chip->page_shift;
2812 -       uint32_t edu_pending;
2813 -       int submitted = 0;
2814 -       unsigned long flags;
2815 -
2816 -//if (1/* (int) offset <= 0x2000 /*gdebug > 3 */) {
2817 -//printk("-->%s, offset=%08x\n", __FUNCTION__, (uint32_t) offset);}
2818 -if (gdebug > 3 ) {
2819 -printk("-->%s, page=%0llx, buffer=%p\n", __FUNCTION__, page, outp_buf);}
2820 -
2821 -
2822 -#if 0 // No need to check, we are aligned on a page
2823 -       if (unlikely(offset - sliceOffset)) {
2824 -               printk(KERN_ERR "%s: offset %0llx is not cache aligned, sliceOffset=%0llx, CacheSize=%d\n", 
2825 -                __FUNCTION__, offset, sliceOffset, mtd->eccsize);
2826 -               ret = -EINVAL;
2827 -               goto out;
2828 -       }
2829 -#endif
2830 -
2831 -
2832 -       if (unlikely(!EDU_buffer_OK(outp_buf, EDU_READ))) 
2833 -       {
2834 -if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
2835 -               /* EDU does not work on non-aligned buffers */
2836 -               ret = brcmnand_read_page(mtd, outp_buf, outp_oob, page);
2837 -               return (ret);
2838 -       }
2839 -
2840 -       chip->pagebuf = page;
2841 -
2842 -       spin_lock_irqsave(&gJobQ.lock, flags);
2843 -       if (!list_empty(&gJobQ.jobQ)) {
2844 -               printk("%s: Start read page but job queue not empty\n", __FUNCTION__);
2845 -//ISR_print_queue();
2846 -               BUG();
2847 -       }
2848 -       gJobQ.cmd = EDU_READ;
2849 -       gJobQ.needWakeUp = 0;
2850 -       
2851 -       for (eccstep = 0; eccstep < chip->eccsteps && ret == 0; eccstep++) {
2852 -               eduIsrNode_t* req;
2853 -               /*
2854 -                * Queue the 512B sector read, then read the EDU pending bit, 
2855 -                * and issue read command, if EDU is available for read.
2856 -                */
2857 -               req = ISR_queue_read_request(mtd, &outp_buf[dataRead], 
2858 -                                       outp_oob ? &outp_oob[oobRead] : NULL, 
2859 -                                       offset + dataRead);
2860 -                               
2861 -               dataRead += chip->eccsize;
2862 -               oobRead += chip->eccOobSize;
2863 -       }
2864 -       //BUG_ON(submitted != 1);
2865 -       
2866 -       
2867 -
2868 -       /* Kick start it.  The ISR will submit the next job */
2869 -       if (!submitted) {
2870 -               submitted = brcmnand_isr_submit_job();
2871 -       }
2872 -       
2873 -       while (!list_empty(&gJobQ.jobQ)) {
2874 -               spin_unlock_irqrestore(&gJobQ.lock, flags);
2875 -               ret = ISR_wait_for_queue_completion();
2876 -               spin_lock_irqsave(&gJobQ.lock, flags);
2877 -       }
2878 -       spin_unlock_irqrestore(&gJobQ.lock, flags);
2879 -       return ret;
2880 -}
2881 -
2882 -
2883 -/*
2884 - * Queue several pages for small page SLC, then wait for completion,
2885 - * assuming that 
2886 - * (1) offset is aligned on a 512B boundary
2887 - * (2) that outp_buf is aligned on a 32B boundary.
2888 - * (3) Not in raw mode
2889 - * This routine only works when ECC-size = Page-Size (Small SLC flashes), and relies on the fact
2890 - * that the internal buffer can hold several data+OOB buffers for several small pages at once.
2891 - *
2892 - * The OOB are read into chip->buffers->OOB.
2893 - * The Queue Size and chip->buffers->oob are chosen such that the OOB
2894 - * will all fit inside the buffers.
2895 - * After a batch of jobs is completed, the OOB is then copied to the output OOB parameter.
2896 - * To keep it simple stupid, this routine cannot handle Raw mode Read.
2897 - *
2898 - * Arguments:
2899 - * @mtd:               MTD handle
2900 - * @outp_buf           Data buffer, passed from file system driver
2901 - * @inoutpp_oob        Address of OOB buffer, passed INOUT from file system driver
2902 - * @startPage  page 0 of batch
2903 - * @numPages   nbr of pages in batch
2904 - * @ops                        MTD ops from file system driver.  We only look at the OOB mode (raw vs auto vs inplace)
2905 - */
2906 -static int
2907 -brcmnand_isr_read_pages(struct mtd_info *mtd,
2908 -                               uint8_t *outp_buf, uint8_t** inoutpp_oob, uint64_t startPage, int numPages,
2909 -                               struct mtd_oob_ops *ops)
2910 -{
2911 -       struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
2912 -       int eccstep;
2913 -       int dataRead = 0;
2914 -       int oobRead = 0;
2915 -       int ret = 0;
2916 -       uint64_t offset = ((uint64_t) startPage) << chip->page_shift;
2917 -       uint32_t edu_pending;
2918 -       int submitted = 0;
2919 -       unsigned long flags;
2920 -       int page;
2921 -       u_char* oob = inoutpp_oob ? *inoutpp_oob : NULL;
2922 -       u_char* oobpoi = NULL;
2923 -       u_char* buf = outp_buf;
2924 -
2925 -
2926 -       /* Paranoia */
2927 -       if (chip->pageSize != chip->eccsize) {
2928 -               printk("%s: Can only be called on small page flash\n", __FUNCTION__);
2929 -               BUG();
2930 -       }
2931 -
2932 -       if (ops->mode == MTD_OOB_RAW) {
2933 -               printk("%s: Can only be called when not in RAW mode\n", __FUNCTION__);
2934 -               BUG();
2935 -       }
2936 -#ifdef DEBUG_ISR
2937 -printk("-->%s: mtd=%p, buf=%p, &oob=%p, oob=%p\n", __FUNCTION__, 
2938 -mtd, outp_buf, inoutpp_oob, inoutpp_oob? *inoutpp_oob: NULL);
2939 -#endif 
2940 -
2941 -       spin_lock_irqsave(&gJobQ.lock, flags);
2942 -       if (!list_empty(&gJobQ.jobQ)) {
2943 -               printk("%s: Start read page but job queue not empty\n", __FUNCTION__);
2944 -//ISR_print_queue();
2945 -               BUG();
2946 -       }
2947 -       gJobQ.cmd = EDU_READ;
2948 -       gJobQ.needWakeUp = 0;
2949 -
2950 -       if (inoutpp_oob && *inoutpp_oob) {
2951 -               // In batch mode, read OOB into internal OOB buffer first.
2952 -               // This pointer will be advanced because oob_transfer depends on it.
2953 -               chip->oob_poi= BRCMNAND_OOBBUF(chip->buffers);
2954 -               oobpoi = chip->oob_poi; // This pointer remains fixed
2955 -       }
2956 -//gdebug=4;    
2957 -       for (page = 0; page < numPages && ret == 0; page++) {
2958 -               eduIsrNode_t* req;
2959 -
2960 -               req = ISR_queue_read_request(mtd, buf, 
2961 -                                       (inoutpp_oob && *inoutpp_oob) ? &oobpoi[oobRead] : NULL, 
2962 -                                       offset + dataRead);
2963 -                               
2964 -               dataRead += chip->eccsize;
2965 -               oobRead += chip->eccOobSize;
2966 -               buf += chip->eccsize;
2967 -       }
2968 -//gdebug=0;
2969 -       //BUG_ON(submitted != 1);
2970 -       
2971 -       /* Kick start it.  The ISR will submit the next job */
2972 -       if (!submitted) {
2973 -               submitted = brcmnand_isr_submit_job();
2974 -       }
2975 -       
2976 -       while (!list_empty(&gJobQ.jobQ)) {
2977 -               spin_unlock_irqrestore(&gJobQ.lock, flags);
2978 -               ret = ISR_wait_for_queue_completion();
2979 -               spin_lock_irqsave(&gJobQ.lock, flags);
2980 -       }
2981 -       spin_unlock_irqrestore(&gJobQ.lock, flags);
2982 -
2983 -       if (ret) {
2984 -               /* Abort, and return error to file system */
2985 -               return ret;
2986 -       }
2987 -
2988 -
2989 -       /* Format OOB, from chip->OOB buffers */
2990 -       
2991 -       buf = outp_buf;
2992 -       oob = (inoutpp_oob && *inoutpp_oob) ? *inoutpp_oob : NULL;
2993 -       dataRead = 0;
2994 -       oobRead = 0;
2995 -PRINTK("%s: B4 transfer OOB: buf=%08x, chip->buffers=%08x, offset=%08llx\n",
2996 -__FUNCTION__, (uint32_t) buf, chip->buffers, offset + dataRead);
2997 -
2998 -       // Reset oob_poi to beginning of OOB buffer.  
2999 -       // This will get advanced, cuz brcmnand_transfer_oob depends on it.
3000 -       chip->oob_poi = BRCMNAND_OOBBUF(chip->buffers);
3001 -       // oobpoi pointer does not change in for loop
3002 -       oobpoi = chip->oob_poi; 
3003 -
3004 -       for (page=0; page < numPages && ret == 0; page++) {
3005 -               u_char* newoob = NULL;
3006 -
3007 -#ifdef EDU_DEBUG_4 /* Read verify */
3008 -               ret = edu_read_verify(mtd, buf, 
3009 -                               (inoutpp_oob && *inoutpp_oob) ? &oobpoi[oobRead] : NULL, 
3010 -                               offset + dataRead);
3011 -       
3012 -               if (ret) BUG();
3013 -#endif
3014 -
3015 -               if (unlikely(inoutpp_oob && *inoutpp_oob)) {
3016 -                       newoob = brcmnand_transfer_oob(chip, oob, ops);
3017 -                       chip->oob_poi += chip->eccOobSize;
3018 -                       oob = newoob;
3019 -                       // oobpoi stays the same
3020 -               }
3021 -
3022 -               dataRead += chip->eccsize;
3023 -               oobRead += chip->eccOobSize;
3024 -               buf += chip->eccsize;
3025 -
3026 -       } /* for */
3027 -
3028 -       if (unlikely(inoutpp_oob && *inoutpp_oob)) {
3029 -               *inoutpp_oob = oob;
3030 -       }
3031 -
3032 -PRINTK("<-- %s\n", __FUNCTION__);
3033 -       
3034 -       return 0;
3035 -}
3036 -
3037 -
3038  /**
3039 - * brcmnand_isr_read_page_oob - {REPLACABLE] hardware ecc based page read function
3040 - * @mtd:       mtd info structure
3041 - * @chip:      nand chip info structure.  The OOB buf is stored in the oob_poi ptr on return
3042 - *
3043 - * Not for syndrome calculating ecc controllers which need a special oob layout
3044 - */
3045 -static int 
3046 -brcmnand_isr_read_page_oob(struct mtd_info *mtd, 
3047 -                               uint8_t* outp_oob, uint64_t  page)
3048 -{
3049 -       struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
3050 -
3051 -       /*
3052 -        * if BCH codes, use full page read to activate ECC on OOB area
3053 -        */
3054 -       if (chip->ecclevel != BRCMNAND_ECC_HAMMING && chip->ecclevel != BRCMNAND_ECC_DISABLE) {
3055 -               return brcmnand_isr_read_page(mtd, chip->buffers->databuf, outp_oob, page);
3056 -       }
3057 -       
3058 -       else {
3059 -               return brcmnand_read_page_oob(mtd, outp_oob, page);
3060 -       }
3061 -}
3062 -
3063 -
3064 -
3065 -
3066 -#endif
3067 -
3068 -
3069 -/**
3070   * brcmnand_do_read_ops - [Internal] Read data with ECC
3071   *
3072   * @mtd:       MTD device structure
3073 @@ -4390,13 +3419,17 @@
3074         //int sndcmd = 1;
3075         int ret = 0;
3076         uint32_t readlen = ops->len;
3077 -       uint32_t oobread = 0;
3078         uint8_t *bufpoi, *oob, *buf;
3079 -       int numPages;
3080 -       int buffer_aligned = 0;
3081 -//int nonBatch = 0;
3082  
3083  
3084 +if (gdebug > 3 ) 
3085 +{
3086 +printk("-->%s, buf=%p, oob=%p, offset=%0llx, len=%d, end=%0llx\n", __FUNCTION__, 
3087 +       ops->datbuf, ops->oobbuf, from, readlen, from+readlen);
3088 +printk("chip->buffers=%p, chip->oob=%p\n", 
3089 +       chip->buffers, BRCMNAND_OOBBUF(chip->buffers));
3090 +}
3091 +
3092         stats = mtd->ecc_stats;
3093  
3094         // THT: BrcmNAND controller treats multiple chip as one logical chip.
3095 @@ -4407,7 +3440,6 @@
3096         //page = realpage & chip->pagemask;
3097  
3098         col = mtd64_ll_low(from & (mtd->writesize - 1));
3099 -       
3100  #ifndef EDU_DEBUG_1 
3101  /* Debugging 12/27/08 */
3102         chip->oob_poi = BRCMNAND_OOBBUF(chip->buffers);
3103 @@ -4419,91 +3451,38 @@
3104         buf = ops->datbuf;
3105         oob = ops->oobbuf;
3106  
3107 -#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE   
3108 -       /*
3109 -        * Group several pages for submission for small page NAND
3110 -        */
3111 -       if (chip->pageSize == chip->eccsize && ops->mode != MTD_OOB_RAW) {
3112 -               while(1) {
3113 -//nonBatch = 0;
3114 -                       bytes = min(mtd->writesize - col, readlen);
3115 -                       // (1) Writing partial or full page
3116 -                       aligned = (bytes == mtd->writesize);
3117 +       while(1) {
3118 +               bytes = min(mtd->writesize - col, readlen);
3119 +               aligned = (bytes == mtd->writesize);
3120  
3121 -                       // If writing full page, use user buffer, otherwise, internal buffer
3122 +               /* Is the current page in the buffer ? */
3123 +               if ( 1 /* (int64_t) realpage != chip->pagebuf */ || oob) {
3124 +#ifndef EDU_DEBUG_1
3125                         bufpoi = aligned ? buf : chip->buffers->databuf;
3126 -                       
3127 -                       // (2) Buffer satisfies 32B alignment required by EDU?
3128 -                       buffer_aligned = EDU_buffer_OK(bufpoi, EDU_READ);
3129 -
3130 -                       // (3) Batch mode if writing more than 1 pages.
3131 -                       numPages = min(MAX_JOB_QUEUE_SIZE, readlen>>chip->page_shift);
3132 -
3133 -                       // Only do Batch mode if all 3 conditions are satisfied.
3134 -                       if (!aligned || !buffer_aligned || numPages <= 1) {
3135 -                               /* Submit 1 page at a time */
3136 -
3137 -                               numPages = 1; // We count partial page read
3138 -                               ret = chip->read_page(mtd, bufpoi, chip->oob_poi, realpage);                            
3139 -
3140 -                               if (ret < 0)
3141 -                                       break;
3142 -
3143 -                               /* Transfer not aligned data */
3144 -                               if (!aligned) {
3145 -                                       chip->pagebuf = realpage;
3146 -                                       memcpy(buf, &bufpoi[col], bytes);
3147 -                               }
3148 -                               buf += bytes;
3149 -
3150 -                               if (unlikely(oob)) {
3151 -                                       /* if (ops->mode != MTD_OOB_RAW) */
3152 -                                       oob = brcmnand_transfer_oob(chip, oob, ops);
3153 -                                       
3154 -                               }
3155 -
3156 -                       }
3157 -                       else {
3158 -                               /* 
3159 -                                 * Batch job possible, all 3 conditions are met
3160 -                                 * bufpoi = Data buffer from FS driver
3161 -                                 * oob = OOB buffer from FS driver
3162 -                                 */    
3163 -                               bytes = numPages*mtd->writesize;
3164 -
3165 -                               ret = brcmnand_isr_read_pages(mtd, bufpoi, oob? &oob : NULL, realpage, numPages, ops);
3166 -
3167 -                               if (ret < 0)
3168 -                                       break;
3169 -
3170 -                               buf += bytes; /* Advance Read pointer */
3171 -
3172 -                       }
3173 -
3174 -
3175 -                       readlen -= bytes;
3176 -
3177 -                       if (!readlen)
3178 -                               break;
3179 -
3180 -                       /* For subsequent reads align to page boundary. */
3181 -                       col = 0;
3182 -                       /* Increment page address */
3183 -                       realpage += numPages;
3184 -               }
3185 -               goto out;       
3186 -       }
3187 -       else 
3188 +#else
3189 +/* EDU Testing */
3190 +                       aligned=0;
3191 +                       bufpoi = &debug_dbuf.databuf;
3192 +                       // rely on size of buffer to be 4096
3193 +                       memcpy(&bufpoi[mtd->writesize], debug_sig, 1+strlen(debug_sig));
3194  #endif
3195 -       {
3196 -               while(1) {
3197 -                       bytes = min(mtd->writesize - col, readlen);
3198 -                       aligned = (bytes == mtd->writesize);
3199 -                       
3200 -                       bufpoi = aligned ? buf : chip->buffers->databuf;
3201 +if (gdebug > 3 )  
3202 +       printk("%s: aligned=%d, buf=%p, bufpoi=%p, oob_poi=%p, bytes=%d, readlen=%d\n",
3203 +       __FUNCTION__, aligned, buf, bufpoi, chip->oob_poi, bytes, readlen);
3204  
3205 +//gdebug=4;
3206                         ret = chip->read_page(mtd, bufpoi, chip->oob_poi, realpage);
3207 +//gdebug=0;
3208 +#ifdef EDU_DEBUG_1
3209 +                       if (0 != strcmp(&bufpoi[mtd->writesize], debug_sig)) {
3210 +                               printk("$$$$$$$$$$$$$$ Memory smash at end of buffer at %0llx, expect=%s\n",
3211 +                                       from, debug_sig);
3212 +                               printk(".... found\n"); print_oobbuf(&bufpoi[mtd->writesize], 1+strlen(debug_sig));
3213 +                       }
3214 +                       if (buf) memcpy(buf, &bufpoi[col], bytes);
3215 +                       if (oob) memcpy(oob, chip->oob_poi, mtd->oobsize);
3216  
3217 +#endif
3218                         if (ret < 0)
3219                                 break;
3220  
3221 @@ -4524,25 +3503,45 @@
3222                                 }
3223                         }
3224  
3225 +#if 0
3226 +                       if (!(chip->options & NAND_NO_READRDY)) {
3227 +                               /*
3228 +                                * Apply delay or wait for ready/busy pin. Do
3229 +                                * this before the AUTOINCR check, so no
3230 +                                * problems arise if a chip which does auto
3231 +                                * increment is marked as NOAUTOINCR by the
3232 +                                * board driver.
3233 +                                */
3234 +                               if (!chip->dev_ready)
3235 +                                       udelay(chip->chip_delay);
3236 +                               else
3237 +                                       nand_wait_ready(mtd);
3238 +                       }
3239 +#endif
3240 +               } else {
3241 +printk("%s: Should never get here\n", __FUNCTION__);
3242 +BUG();
3243 +                       memcpy(buf, chip->buffers->databuf + col, bytes);
3244 +                       buf += bytes;
3245 +               }
3246  
3247 -                       readlen -= bytes;
3248 +               readlen -= bytes;
3249  
3250 -                       if (!readlen)
3251 -                               break;
3252 +               if (!readlen)
3253 +                       break;
3254  
3255 -                       /* For subsequent reads align to page boundary. */
3256 -                       col = 0;
3257 -                       /* Increment page address */
3258 -                       realpage++;
3259 +               /* For subsequent reads align to page boundary. */
3260 +               col = 0;
3261 +               /* Increment page address */
3262 +               realpage++;
3263  
3264 -               }
3265         }
3266 -       
3267 -out:
3268 -//gdebug=0;
3269  
3270         ops->retlen = ops->len - (size_t) readlen;
3271  
3272 +//#ifndef EDU_DEBUG_1
3273 +if (gdebug > 3 ) printk("<-- %s, ret=%d\n", __FUNCTION__, ret);
3274 +//#endif
3275  
3276         if (ret)
3277                 return ret;
3278 @@ -4577,7 +3576,7 @@
3279         DEBUG(MTD_DEBUG_LEVEL3, "%s: from=%0llx\n", __FUNCTION__, from);
3280  
3281  if (gdebug > 3 ) {
3282 -printk("-->%s, offset=%0llx, len=%08x\n", __FUNCTION__, from, len);}
3283 +printk("-->%s, offset=%0llx\n", __FUNCTION__, from);}
3284  
3285  
3286         /* Do not allow reads past end of device */
3287 @@ -4610,20 +3609,11 @@
3288                 if (likely(chip->cet)) {
3289                         if (likely(chip->cet->flags != BRCMNAND_CET_DISABLED)) {
3290                                 if (brcmnand_cet_update(mtd, from, &status) == 0) {
3291 -
3292 -/*
3293 - * PR57272: Provide workaround for BCH-n ECC HW bug when # error bits >= 4 
3294 - * We will not mark a block bad when the a correctable error already happened on the same page
3295 - */
3296 -#if CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_3_4
3297 -                                       ret = 0;
3298 -#else
3299                                         if (status) {
3300                                                 ret = -EUCLEAN;
3301                                         } else {
3302                                                 ret = 0;
3303                                         }
3304 -#endif
3305                                 }
3306                                 if (gdebug > 3) {
3307                                         printk(KERN_INFO "DEBUG -> %s ret = %d, status = %d\n", __FUNCTION__, ret, status);
3308 @@ -4879,7 +3869,7 @@
3309                 //struct nand_oobinfo noauto_oobsel;
3310  
3311                 printk("Comparison Failed\n");
3312 -               print_diagnostics(chip);
3313 +               print_diagnostics();
3314                 
3315                 //noauto_oobsel = *oobsel;
3316                 //noauto_oobsel.useecc = MTD_NANDECC_PLACEONLY;
3317 @@ -4917,7 +3907,7 @@
3318  {
3319         struct brcmnand_chip * chip = mtd->priv;
3320         
3321 -       int ret = 0; // Matched
3322 +       int ret = 0;
3323         //int ooblen=0, datalen=0;
3324         //int complen;
3325         u_char* oobbuf = v_oob_buf;
3326 @@ -4929,12 +3919,7 @@
3327  
3328  if (gdebug > 3) printk("-->%s: addr=%0llx\n", __FUNCTION__, addr);
3329  
3330 -       /* 
3331 -        * Only do it for Hamming codes because
3332 -        * (1) We can't do it for BCH until we can read the full OOB area for BCH-8
3333 -        * (2) OOB area is included in ECC calculation for BCH, so no need to check it
3334 -        *      separately.
3335 -        */
3336 +       /* Only do it for Hamming codes */
3337         if (chip->ecclevel != BRCMNAND_ECC_HAMMING) {
3338                 return 0;
3339         }
3340 @@ -4942,7 +3927,7 @@
3341  #if 1
3342         page = ((uint64_t) addr) >> chip->page_shift;
3343         // Must read entire page
3344 -       ret = chip->read_page(mtd, vbuf, oobbuf, page);
3345 +       ret = brcmnand_read_page(mtd, vbuf, oobbuf, page);
3346         if (ret) {
3347                 printk(KERN_ERR "%s: brcmnand_read_page at %08x failed ret=%d\n", 
3348                         __FUNCTION__, (unsigned int) addr, ret);
3349 @@ -4967,28 +3952,12 @@
3350                 brcmnand_Hamming_ecc(&dbuf[pageOffset], sw_ecc);
3351  
3352                 if (sw_ecc[0] != oobp[6] || sw_ecc[1] != oobp[7] || sw_ecc[2] != oobp[8]) {
3353 -                       if (oobp[6] == 0xff && oobp[7] == 0xff && oobp[8] == 0xff 
3354 -                               && sw_ecc[0] == 0 && sw_ecc[1] == 0 && sw_ecc[2] == 0) 
3355 -                               ; // OK
3356 -                       else {
3357 -                               printk("%s: Verification failed at %0llx.  HW ECC=%02x%02x%02x, SW ECC=%02x%02x%02x\n",
3358 -                                       __FUNCTION__, addr,
3359 -                                       oobp[6], oobp[7], oobp[8], sw_ecc[0], sw_ecc[1], sw_ecc[2]);
3360 -                               ret = 1;
3361 -                               break;
3362 -                       }
3363 +                       printk("%s: Verification failed at %0llx.  HW ECC=%02x%02x%02x, SW ECC=%02x%02x%02x\n",
3364 +                               __FUNCTION__, addr,
3365 +                               oobp[6], oobp[7], oobp[8], sw_ecc[0], sw_ecc[1], sw_ecc[2]);
3366 +                       ret = 1;
3367 +                       break;
3368                 }
3369 -
3370 -               // Verify the OOB if not NULL
3371 -               if (inp_oob) {
3372 -                       if (memcmp(&inp_oob[oobOffset], oobp, 6) || memcmp(&inp_oob[oobOffset+9], &oobp[9],7)) {
3373 -                               printk("+++++++++++++++++++++++ %s: OOB comp Hamming failed\n", __FUNCTION__);
3374 -                               printk("In OOB:\n"); print_oobbuf(&inp_oob[oobOffset], 16);
3375 -                               printk("\nVerify OOB:\n"); print_oobbuf(oobp, 16);
3376 -                               ret = (-2);
3377 -                               break;
3378 -                       }
3379 -               }
3380         }
3381  
3382         return ret;
3383 @@ -5062,8 +4031,7 @@
3384   * @page:      page number to write
3385   * @cached:    cached programming [removed]
3386   */
3387 -static int 
3388 -brcmnand_write_page(struct mtd_info *mtd,
3389 +static int brcmnand_write_page(struct mtd_info *mtd,
3390                            const uint8_t *inp_buf, const uint8_t* inp_oob, uint64_t page)
3391  {
3392         struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
3393 @@ -5094,206 +4062,17 @@
3394         }
3395  
3396         // TBD
3397 -#ifdef BRCMNAND_WRITE_VERIFY
3398 -if (0 == ret) {
3399 -int vret;
3400 +if (0) {
3401 +int save_debug = gdebug;
3402  //gdebug = 0;
3403 -       vret = brcmnand_verify_page(mtd, offset, inp_buf, mtd->writesize, inp_oob, chip->eccOobSize);
3404 +       ret = brcmnand_verify_page(mtd, offset, inp_buf, mtd->writesize, inp_oob, chip->eccOobSize);
3405  //gdebug=save_debug;
3406 -       if (vret) BUG();
3407  }
3408 -#endif
3409 -
3410         
3411         return ret;
3412  }
3413  
3414 -#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
3415  
3416 -/*
3417 - * Queue the entire page, then wait for completion
3418 - */
3419 -static int
3420 -brcmnand_isr_write_page(struct mtd_info *mtd,
3421 -                          const uint8_t *inp_buf, const uint8_t* inp_oob, uint64_t page)
3422 -{
3423 -       struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
3424 -       int eccstep;
3425 -       int dataWritten = 0;
3426 -       int oobWritten = 0;
3427 -       int ret = 0;
3428 -       uint64_t offset = page << chip->page_shift;
3429 -
3430 -       uint32_t edu_pending;
3431 -       int submitted = 0;
3432 -       unsigned long flags;
3433 -
3434 -if (gdebug > 3 ) {
3435 -printk("-->%s, page=%0llx\n", __FUNCTION__, page);}
3436 -
3437 -
3438 -#if 0 // No need to check, we are aligned on a page
3439 -       if (unlikely(offset - sliceOffset)) {
3440 -               printk(KERN_ERR "%s: offset %0llx is not cache aligned, sliceOffset=%0llx, CacheSize=%d\n", 
3441 -                __FUNCTION__, offset, sliceOffset, mtd->eccsize);
3442 -               ret = -EINVAL;
3443 -               goto out;
3444 -       }
3445 -#endif
3446 -
3447 -
3448 -       if (unlikely(!EDU_buffer_OK(inp_buf, EDU_WRITE))) 
3449 -       {
3450 -if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
3451 -               /* EDU does not work on non-aligned buffers */
3452 -               ret = brcmnand_write_page(mtd, inp_buf, inp_oob, page);
3453 -               return (ret);
3454 -       }
3455 -
3456 -       chip->pagebuf = page;
3457 -
3458 -       spin_lock_irqsave(&gJobQ.lock, flags);
3459 -       if (!list_empty(&gJobQ.jobQ)) {
3460 -               printk("%s: Start read page but job queue not empty\n", __FUNCTION__);
3461 -               BUG();
3462 -       }
3463 -       gJobQ.cmd = EDU_WRITE;
3464 -       gJobQ.needWakeUp = 0;
3465 -
3466 -
3467 -       for (eccstep = 0; eccstep < chip->eccsteps && ret == 0; eccstep++) {
3468 -               eduIsrNode_t* req;
3469 -               /*
3470 -                * Queue the 512B sector read, then read the EDU pending bit, 
3471 -                * and issue read command, if EDU is available for read.
3472 -                */
3473 -               req = ISR_queue_write_request(mtd, &inp_buf[dataWritten], 
3474 -                                       inp_oob ? &inp_oob[oobWritten]  : NULL, 
3475 -                                       offset + dataWritten);
3476 -               
3477 -               dataWritten += chip->eccsize;
3478 -               oobWritten += chip->eccOobSize;
3479 -       }
3480 -       
3481 -       
3482 -       /*
3483 -        * Kick start it.  The ISR will submit the next job
3484 -        */
3485 -       if (!submitted) {
3486 -               submitted = brcmnand_isr_submit_job();
3487 -       }
3488 -       
3489 -       while (!list_empty(&gJobQ.jobQ)) {
3490 -               spin_unlock_irqrestore(&gJobQ.lock, flags);
3491 -               ret = ISR_wait_for_queue_completion();
3492 -               spin_lock_irqsave(&gJobQ.lock, flags);
3493 -       }
3494 -       spin_unlock_irqrestore(&gJobQ.lock, flags);
3495 -       return ret;
3496 -
3497 -}
3498 -
3499 -/*
3500 - * Queue the several pages, then wait for completion
3501 - * For 512B page sizes only.
3502 - */
3503 -static int
3504 -brcmnand_isr_write_pages(struct mtd_info *mtd,
3505 -                          const uint8_t *inp_buf, const uint8_t* inp_oob, uint64_t startPage, int numPages)
3506 -{
3507 -       struct brcmnand_chip *chip = (struct brcmnand_chip*) mtd->priv;
3508 -       int eccstep;
3509 -       int dataWritten = 0;
3510 -       int oobWritten = 0;
3511 -       int ret = 0;
3512 -       uint64_t offset = startPage << chip->page_shift;
3513 -       int page;
3514 -
3515 -       uint32_t edu_pending;
3516 -       int submitted = 0;
3517 -       unsigned long flags;
3518 -
3519 -#if 0
3520 - /* Already checked by caller */
3521 -       if (unlikely(!EDU_buffer_OK(inp_buf, EDU_WRITE))) 
3522 -       {
3523 -if (gdebug>3) printk("++++++++++++++++++++++++ %s: buffer not 32B aligned, trying non-EDU read\n", __FUNCTION__);
3524 -               /* EDU does not work on non-aligned buffers */
3525 -               ret = brcmnand_write_page(mtd, inp_buf, inp_oob, startPage);
3526 -               return (ret);
3527 -       }
3528 -#endif
3529 -       /* Paranoia */
3530 -       if (chip->pageSize != chip->eccsize) {
3531 -               printk("%s: Can only be called on small page flash\n", __FUNCTION__);
3532 -               BUG();
3533 -       }
3534 -
3535 -       spin_lock_irqsave(&gJobQ.lock, flags);
3536 -       if (!list_empty(&gJobQ.jobQ)) {
3537 -               printk("%s: Start read page but job queue not empty\n", __FUNCTION__);
3538 -               BUG();
3539 -       }
3540 -       gJobQ.cmd = EDU_WRITE;
3541 -       gJobQ.needWakeUp = 0;
3542 -
3543 -//gdebug=4;
3544 -       for (page = 0; page < numPages && ret == 0; page++) {
3545 -               eduIsrNode_t* req;
3546 -               /*
3547 -                * Queue the 512B sector read, then read the EDU pending bit, 
3548 -                * and issue read command, if EDU is available for read.
3549 -                */
3550 -
3551 -               req = ISR_queue_write_request(mtd, &inp_buf[dataWritten], 
3552 -                                       inp_oob ? &inp_oob[oobWritten]  : NULL, 
3553 -                                       offset + dataWritten);
3554 -               
3555 -               dataWritten += chip->eccsize;
3556 -               oobWritten += chip->eccOobSize;
3557 -       }
3558 -//gdebug=0;    
3559 -       
3560 -       
3561 -       /*
3562 -        * Kick start it.  The ISR will submit the next job
3563 -        * We do it here, in order to avoid having to obtain the queue lock
3564 -        * inside the ISR, in preparation for an RCU implementation.
3565 -        */
3566 -       if (!submitted) {
3567 -               submitted = brcmnand_isr_submit_job();
3568 -       }
3569 -       
3570 -       while (!list_empty(&gJobQ.jobQ)) {
3571 -               spin_unlock_irqrestore(&gJobQ.lock, flags);
3572 -               ret = ISR_wait_for_queue_completion();
3573 -               spin_lock_irqsave(&gJobQ.lock, flags);
3574 -       }
3575 -       spin_unlock_irqrestore(&gJobQ.lock, flags);
3576 -
3577 -
3578 -#ifdef EDU_DEBUG_5
3579 -/* Verify */
3580 -       dataWritten = 0;
3581 -       oobWritten = 0;
3582 -       for (page = 0; page < numPages && ret == 0; page++) {
3583 -               ret = edu_write_verify(mtd, &inp_buf[dataWritten], 
3584 -                                       inp_oob ? &inp_oob[oobWritten]  : NULL, 
3585 -                                       offset + dataWritten);
3586 -               if (ret) BUG();
3587 -               dataWritten += chip->eccsize;
3588 -               oobWritten += chip->eccOobSize;
3589 -       }
3590 -#endif
3591 -       return ret;
3592 -
3593 -}
3594 -
3595 -
3596 -#endif
3597 -
3598 -
3599 -
3600  /**
3601   * brcmnand_fill_oob - [Internal] Transfer client buffer to oob
3602   * @chip:      nand chip structure
3603 @@ -5307,7 +4086,6 @@
3604  {
3605         size_t len = ops->ooblen;
3606  
3607 -       
3608         switch(ops->mode) {
3609  
3610         case MTD_OOB_PLACE:
3611 @@ -5320,8 +4098,6 @@
3612                 uint32_t boffs = 0, woffs = ops->ooboffs;
3613                 size_t bytes = 0;
3614  
3615 -               memset(chip->oob_poi + ops->ooboffs, 0xff, chip->eccOobSize-ops->ooboffs);
3616 -
3617                 for(; free->length && len; free++, len -= bytes) {
3618                         /* Write request not from offset 0 ? */
3619                         if (unlikely(woffs)) {
3620 @@ -5370,8 +4146,6 @@
3621         uint8_t *buf = ops->datbuf;
3622         int bytes = mtd->writesize;
3623         int ret = 0;
3624 -       int numPages; 
3625 -       int buffer_aligned = 0;
3626  
3627  DEBUG(MTD_DEBUG_LEVEL3, "-->%s, offset=%0llx\n", __FUNCTION__, to);
3628  
3629 @@ -5392,8 +4166,13 @@
3630         chip->select_chip(mtd, chipnr);
3631  */
3632  
3633 +#if 0
3634 +/* THT TBD */
3635 +       /* Check, if it is write protected */
3636 +       if (nand_check_wp(mtd))
3637 +               return -EIO;
3638 +#endif
3639  
3640 -
3641         realpage = to >> chip->page_shift;
3642         //page = realpage & chip->pagemask;
3643         blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
3644 @@ -5414,70 +4193,15 @@
3645                 chip->oob_poi = NULL;
3646         }
3647  
3648 -#ifdef  CONFIG_MTD_BRCMNAND_ISR_QUEUE  
3649 -       /* Buffer must be aligned for EDU */
3650 -       buffer_aligned = EDU_buffer_OK(buf, EDU_WRITE);
3651 -
3652 -#else /* Dont care */
3653 -       buffer_aligned = 0;
3654 -#endif
3655 -
3656         while(1) {
3657 -
3658 -#ifdef  CONFIG_MTD_BRCMNAND_ISR_QUEUE  
3659 -               /*
3660 -                * Group several pages for submission for small page NAND
3661 -                */
3662 -               numPages = min(MAX_JOB_QUEUE_SIZE, writelen>>chip->page_shift);
3663 -
3664 -               // If Batch mode                
3665 -               if (buffer_aligned && numPages > 1 && chip->pageSize == chip->eccsize) {
3666 -                       int j;
3667 -
3668 -                       /* Submit min(queueSize, len/512B) at a time */
3669 -                       //numPages = min(MAX_JOB_QUEUE_SIZE, writelen>>chip->page_shift);                       
3670 -                       bytes = chip->eccsize*numPages;
3671 -
3672 -                       if (unlikely(oob)) {
3673 -                               //u_char* newoob;
3674 -                               for (j=0; j<numPages; j++) {
3675 -                                       oob = brcmnand_fill_oob(chip, oob, ops);
3676 -                                       /* THT: oob now points to where to read next, 
3677 -                                        * chip->oob_poi contains the OOB to be written
3678 -                                        */
3679 -                                       /* In batch mode, we advance the OOB pointer to the next OOB slot 
3680 -                                        * using chip->oob_poi
3681 -                                        */
3682 -                                       chip->oob_poi += chip->eccOobSize;
3683 -                               }
3684 -                               // Reset chip->oob_poi to beginning of OOB buffer for submission.
3685 -                               chip->oob_poi = BRCMNAND_OOBBUF(chip->buffers);
3686 -                       }
3687 -                       
3688 -                       ret = brcmnand_isr_write_pages(mtd, buf, chip->oob_poi, realpage, numPages);
3689 -
3690 +               if (unlikely(oob)) {
3691 +                       oob = brcmnand_fill_oob(chip, oob, ops);
3692 +                       /* THT: oob now points to where to read next, 
3693 +                        * chip->oob_poi contains the OOB to be written
3694 +                        */
3695                 }
3696 -               
3697 -               else /* Else submit one page at a time */
3698  
3699 -#endif
3700 -               /* Submit one page at a time */
3701 -               { 
3702 -                       numPages = 1;
3703 -                       bytes = mtd->writesize;
3704 -                       
3705 -                       if (unlikely(oob)) {
3706 -                               chip->oob_poi = BRCMNAND_OOBBUF(chip->buffers);
3707 -                               oob = brcmnand_fill_oob(chip, oob, ops);
3708 -                               /* THT: oob now points to where to read next, 
3709 -                                * chip->oob_poi contains the OOB to be written
3710 -                                */
3711 -                       }
3712 -
3713 -                       ret = chip->write_page(mtd, buf, chip->oob_poi, realpage);
3714 -
3715 -               }
3716 -
3717 +               ret = chip->write_page(mtd, buf, chip->oob_poi, realpage);
3718                 if (ret)
3719                         break;
3720  
3721 @@ -5486,9 +4210,21 @@
3722                         break;
3723  
3724                 buf += bytes;
3725 -               realpage += numPages;
3726 +               realpage++;
3727 +
3728 +#if 0
3729 +               page = realpage & chip->pagemask;
3730 +               /* Check, if we cross a chip boundary */
3731 +               if (!page) {
3732 +                       chipnr++;
3733 +                       chip->select_chip(mtd, -1);
3734 +                       chip->select_chip(mtd, chipnr);
3735 +               }
3736 +#endif
3737         }
3738  
3739 +       if (unlikely(oob))
3740 +               memset(chip->oob_poi, 0xff, mtd->oobsize);
3741  
3742         ops->retlen = ops->len - writelen;
3743         DEBUG(MTD_DEBUG_LEVEL3, "<-- %s\n", __FUNCTION__);
3744 @@ -6593,29 +5329,12 @@
3745   */
3746  static void brcmnand_adjust_timings(struct brcmnand_chip *this, brcmnand_chip_Id* chip)
3747  {
3748 -       unsigned long nand_timing1 = this->ctrl_read(BCHP_NAND_TIMING_1);
3749 -       unsigned long nand_timing1_b4;
3750 -       unsigned long nand_timing2 = this->ctrl_read(BCHP_NAND_TIMING_2);
3751 -       unsigned long nand_timing2_b4;
3752 -       extern uint32_t gNandTiming1;
3753 -       extern uint32_t gNandTiming2;
3754 -
3755 -       /*
3756 -        * Override database values with kernel command line values
3757 -        */
3758 -        if (0 != gNandTiming1 || 0 != gNandTiming2) {
3759 -               if (0 != gNandTiming1) {
3760 -                       chip->timing1 = gNandTiming1;
3761 -                       //this->ctrl_write(BCHP_NAND_TIMING_1, gNandTiming1);
3762 -               }
3763 -               if (0 != gNandTiming2) {
3764 -                       chip->timing2 = gNandTiming2;
3765 -                       //this->ctrl_write(BCHP_NAND_TIMING_2, gNandTiming2);
3766 -               }
3767 -               //return;
3768 -        }
3769 -       
3770 -       // Adjust NAND timings from database or command line
3771 +               unsigned long nand_timing1 = this->ctrl_read(BCHP_NAND_TIMING_1);
3772 +               unsigned long nand_timing1_b4;
3773 +               unsigned long nand_timing2 = this->ctrl_read(BCHP_NAND_TIMING_2);
3774 +               unsigned long nand_timing2_b4;
3775 +               
3776 +       // Adjust NAND timings:
3777         if (chip->timing1) {
3778                 nand_timing1_b4 = nand_timing1;
3779  
3780 @@ -6688,61 +5407,20 @@
3781  brcmnand_read_id(struct mtd_info *mtd, unsigned int chipSelect, unsigned long* dev_id)
3782  {
3783         struct brcmnand_chip * chip = mtd->priv;
3784 -       uint32_t status;
3785 -       uint32_t nandConfig = chip->ctrl_read(BCHP_NAND_CONFIG);
3786 -       uint32_t csNandSelect = 0;
3787 -       uint32_t nandSelect = 0;
3788 -
3789 -       if (chipSelect > 0) { // Do not re-initialize when on CS0, Bootloader already done that
3790 -
3791 -#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_0_1
3792 -               nandSelect = chip->ctrl_read(BCHP_NAND_CS_NAND_SELECT);
3793 -
3794 -printk("B4: NandSelect=%08x, nandConfig=%08x, chipSelect=%d\n", nandSelect, nandConfig, chipSelect);
3795 -
3796         
3797 -  #if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
3798 -       /* Older version do not have EXT_ADDR registers */
3799 -               chip->ctrl_write(BCHP_NAND_CMD_ADDRESS, 0);
3800 -               chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, chipSelect << BCHP_NAND_CMD_EXT_ADDRESS_CS_SEL_SHIFT);
3801 -  #endif  // Set EXT address if version >= 1.0
3802 +#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_1_0
3803 +       /* Set correct chip Select */
3804 +       chip->ctrl_write(BCHP_NAND_CMD_ADDRESS, BCHP_NAND_CMD_START_OPCODE_DEVICE_ID_READ);
3805 +       chip->ctrl_write(BCHP_NAND_CMD_EXT_ADDRESS, chipSelect << 16);
3806 +#endif
3807  
3808 -               // Has CFE initialized the register?  
3809 -               if (0 == (nandSelect & BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK)) {
3810 -                       
3811 -  #if CONFIG_MTD_BRCMNAND_VERSION == CONFIG_MTD_BRCMNAND_VERS_0_1
3812 -                       csNandSelect = 1<<(BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_SHIFT + chipSelect);
3813 +PRINTK("-->%s: this=%p, chip->ctrl_read=%p\n", __FUNCTION__, chip, chip->ctrl_read);
3814  
3815 -  // v1.0 does not define it
3816 -  #elif CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_2_0
3817 -                       csNandSelect = 1<<(BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT + chipSelect);
3818 -
3819 -  #endif // If brcmNAND Version >= 1.0
3820 -       
3821 -                       nandSelect = BCHP_NAND_CS_NAND_SELECT_AUTO_DEVICE_ID_CONFIG_MASK | csNandSelect;
3822 -                       chip->ctrl_write(BCHP_NAND_CS_NAND_SELECT, nandSelect);
3823 -               }
3824 -
3825 -               /* Send the command for reading device ID from controller */
3826 -               chip->ctrl_write(BCHP_NAND_CMD_START, OP_DEVICE_ID_READ);
3827 -               
3828 -               /* Wait for CTRL_Ready */
3829 -               brcmnand_wait(mtd, FL_READY, &status);
3830 -                                
3831 -#endif // if BrcmNAND Version >= 0.1
3832 -       }
3833 -
3834 +       /* Send the command for reading device ID from controller */
3835         *dev_id = chip->ctrl_read(BCHP_NAND_FLASH_DEVICE_ID);
3836  
3837         printk(KERN_INFO "brcmnand_probe: CS%1d: dev_id=%08x\n", chipSelect, (unsigned int) *dev_id);
3838  
3839 -#if CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_0_1
3840 -       nandSelect = chip->ctrl_read(BCHP_NAND_CS_NAND_SELECT);
3841 -#endif
3842 -
3843 -       nandConfig = chip->ctrl_read(BCHP_NAND_CONFIG);
3844 -
3845 -printk("After: NandSelect=%08x, nandConfig=%08x\n", nandSelect, nandConfig);
3846  }
3847  
3848  
3849 @@ -6764,8 +5442,6 @@
3850         int version_id;
3851         //int density;
3852         int i;
3853 -
3854 -//gdebug=4;
3855         
3856         /* Read manufacturer and device IDs from Controller */
3857         brcmnand_read_id(mtd, chipSelect, &chip->device_id);
3858 @@ -7169,10 +5845,9 @@
3859         /* Version ID */
3860         version_id = chip->ctrl_read(BCHP_NAND_REVISION);
3861  
3862 -       printk(KERN_INFO "BrcmNAND version = 0x%04x %dMB @%08lx\n", 
3863 -               version_id, mtd64_ll_low(chip->chipSize>>20), chip->pbase);
3864 +       printk(KERN_INFO "BrcmNAND version = 0x%04x %dMB @%p\n", 
3865 +               version_id, mtd64_ll_low(chip->chipSize>>20), chip->vbase);
3866  
3867 -//gdebug=0;
3868  
3869         return 0;
3870  }
3871 @@ -7615,92 +6290,51 @@
3872         }
3873         
3874  #elif CONFIG_MTD_BRCMNAND_VERSION >= CONFIG_MTD_BRCMNAND_VERS_2_0
3875 -       {
3876 -               int i;
3877 -               uint32_t nand_xor;
3878 +       /* 
3879 +        * Starting with version 2.0 (bcm7325 and later), 
3880 +        * we can use EBI_CS_USES_NAND  Registers to find out where the NAND
3881 +        * chips are (which CS) 
3882 +        */
3883 +       if (gNumNand > 0) { /* Kernel argument nandcs=<comma-sep-list> override CFE settings */
3884 +               if (brcmnand_sort_chipSelects(mtd, maxchips, gNandCS, chip->CS))
3885 +                       return (-EINVAL);
3886 +               cs = chip->CS[chip->numchips - 1];
3887 +PRINTK("gNumNand=%d, cs=%d\n", gNumNand, cs);
3888 +       }
3889 +       else {
3890                 
3891 -               /* 
3892 -                * Starting with version 2.0 (bcm7325 and later), 
3893 -                * we can use EBI_CS_USES_NAND  Registers to find out where the NAND
3894 -                * chips are (which CS) 
3895 -                */
3896 +               /* Load the gNandCS_priv[] array from EBI_CS_USES_NAND values,
3897 +                * same way that get_options() does, i.e. first entry is gNumNand
3898 +                */
3899 +               int nandCsShift, i;
3900 +               int numNand = 0;
3901 +               int nandCS[MAX_NAND_CS];
3902  
3903 -
3904 -               if (gNumNand > 0) { /* Kernel argument nandcs=<comma-sep-list> override CFE settings */
3905 -                       if (brcmnand_sort_chipSelects(mtd, maxchips, gNandCS, chip->CS))
3906 -                               return (-EINVAL);
3907 -                       cs = chip->CS[chip->numchips - 1];
3908 -       PRINTK("gNumNand=%d, cs=%d\n", gNumNand, cs);
3909 -               }
3910 -               else {
3911 -                       
3912 -                       /* Load the gNandCS_priv[] array from EBI_CS_USES_NAND values,
3913 -                        * same way that get_options() does, i.e. first entry is gNumNand
3914 -                        */
3915 -                       int nandCsShift;
3916 -                       int numNand = 0; // Number of NAND chips
3917 -                       int nandCS[MAX_NAND_CS];
3918 -
3919 -                       for (i = 0; i< MAX_NAND_CS; i++) {
3920 -                               nandCS[i] = -1;
3921 -                       }
3922 -                       
3923 -                       nand_select = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
3924 -                       // Be careful here, the last bound depends on chips.  Some chips allow 8 CS'es (3548a0) some only 2 (3548b0)
3925 -                       // Here we rely on BCHP_NAND_CS_NAND_SELECT_reserved1_SHIFT being the next bit.
3926 -                       for (i=0, nandCsShift = BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT;
3927 -                               nandCsShift < BCHP_NAND_CS_NAND_SELECT_reserved1_SHIFT;
3928 -                               nandCsShift ++)
3929 -                       {
3930 -                               if (nand_select & (1 << nandCsShift)) {
3931 -                                       nandCS[i] = nandCsShift - BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT;
3932 -                                       PRINTK("Found NAND on CS%1d\n", nandCS[i]);
3933 -                                       i++;
3934 -                               }
3935 -                       }
3936 -                       numNand = i;
3937 -                       if (brcmnand_sort_chipSelects(mtd, maxchips, nandCS, chip->CS))
3938 -                               return (-EINVAL);
3939 -                       cs = chip->CS[chip->numchips - 1];
3940 -       PRINTK("gNumNand=%d, cs=%d\n", gNumNand, cs);
3941 -
3942 -                               
3943 -
3944 -                       
3945 -
3946 +               for (i = 0; i< MAX_NAND_CS; i++) {
3947 +                       nandCS[i] = -1;
3948                 }
3949 -
3950 -               /*
3951 -                * 2618-7.3: For v2.0 or later, set xor_disable according to NAND_CS_NAND_XOR:00 bit
3952 -                */     
3953 -
3954 -               nand_xor = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_XOR);
3955 -               printk("NAND_CS_NAND_XOR=%08x\n", nand_xor);
3956 -               //
3957 -#ifdef CONFIG_MTD_BRCMNAND_DISABLE_XOR
3958 -       /* Testing 1,2,3: Force XOR disable on CS0, if not done by CFE */
3959 -               if (chip->CS[0] == 0) { 
3960 -                       printk("Disabling XOR: Before: SEL=%08x, XOR=%08x\n", nand_select, nand_xor);
3961 -                       
3962 -                       nand_select &= ~BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_SEL_MASK;
3963 -                       nand_xor &= ~BCHP_NAND_CS_NAND_XOR_EBI_CS_0_ADDR_1FC0_XOR_MASK;
3964 -
3965 -                       brcmnand_ctrl_write(BCHP_NAND_CS_NAND_SELECT, nand_select);
3966 -                       brcmnand_ctrl_write(BCHP_NAND_CS_NAND_XOR, nand_xor);
3967 -
3968 -                       printk("Disabling XOR: After: SEL=%08x, XOR=%08x\n", nand_select, nand_xor);
3969 -               }
3970 -#endif
3971 -               /* Translate nand_xor into our internal flag, for brcmnand_writeAddr */
3972 -               for (i=0; i<chip->numchips; i++) {
3973 -                                               
3974 -                       /* Set xor_disable, 1 for each NAND chip */
3975 -                       if (!(nand_xor & (BCHP_NAND_CS_NAND_XOR_EBI_CS_0_ADDR_1FC0_XOR_MASK<<i))) {
3976 -printk("Disabling XOR on CS#%1d\n", chip->CS[i]);
3977 -                               chip->xor_disable[i] = 1;
3978 +               
3979 +               nand_select = brcmnand_ctrl_read(BCHP_NAND_CS_NAND_SELECT);
3980 +               // Be careful here, the last bound depends on chips.  Some chips allow 8 CS'es (3548a0) some only 2 (3548b0)
3981 +               // Here we rely on BCHP_NAND_CS_NAND_SELECT_reserved1_SHIFT being the next bit.
3982 +               for (i=0, nandCsShift = BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT;
3983 +                       nandCsShift < BCHP_NAND_CS_NAND_SELECT_reserved1_SHIFT;
3984 +                       nandCsShift ++)
3985 +               {
3986 +                       if (nand_select & (1 << nandCsShift)) {
3987 +                               nandCS[i] = nandCsShift - BCHP_NAND_CS_NAND_SELECT_EBI_CS_0_USES_NAND_SHIFT;
3988 +                               PRINTK("Found NAND on CS%1d\n", nandCS[i]);
3989 +                               i++;
3990                         }
3991                 }
3992 +               numNand = i;
3993 +               if (brcmnand_sort_chipSelects(mtd, maxchips, nandCS, chip->CS))
3994 +                       return (-EINVAL);
3995 +               cs = chip->CS[chip->numchips - 1];
3996 +PRINTK("gNumNand=%d, cs=%d\n", gNumNand, cs);
3997         }
3998 +
3999 +  
4000  #else
4001         #error "Unknown Broadcom NAND controller version"
4002  #endif /* Versions >= 1.0 */
4003 @@ -7728,15 +6362,24 @@
4004                 volatile unsigned long acc_control;
4005  
4006                 chip->numchips = 1;
4007 +               if (chip->chipSize >= (128 << 20)) {
4008 +                       chip->pbase = 0x11000000; /* Skip 16MB EBI Registers */
4009  
4010 -               /* Set up base, based on flash size */
4011 -               if (chip->chipSize >= (256 << 20)) {
4012 -                       chip->pbase = 0x12000000;
4013 -                       mtd->size = 0x20000000 - chip->pbase; // THT: This is different than chip->chipSize
4014 -               } else {
4015 -                       /* We know that flash endAddr is 0x2000_0000 */
4016 -                       chip->pbase = 0x20000000 - chip->chipSize;
4017 +                       mtd->num_eraseblocks = (chip->chipSize - (16<<20)) >> chip->erase_shift; // Maximum size on a 128MB/256MB flash
4018 +                       chip->mtdSize = device_size(mtd);
4019 +               }
4020 +/*
4021 +               else if (chip->chipSize == (256 << 20)) {
4022 +                       chip->pbase = 0x11000000; // Skip 16MB EBI Registers 
4023 +                       mtd->size = 240<<20; // Maximum size on a 256MB flash, provided CS0/NOR is disabled
4024 +               }
4025 + */
4026 +               else {
4027 +                       chip->pbase = 0x18000000 - chip->chipSize;
4028                         mtd->size = chip->chipSize;
4029 +                       chip->mtdSize = mtd->size;
4030 +
4031 +                       //mtd->size_hi = 0;
4032                 }
4033  
4034                 printk("Found NAND chip on Chip Select %d, chipSize=%dMB, usable size=%dMB, base=%08x\n", 
4035 @@ -7926,7 +6569,7 @@
4036                                 printk("ACC: %d OOB bytes per 512B ECC step; from ID probe: %d\n", eccOobSize, chip->eccOobSize);
4037                                 // We have recorded chip->eccOobSize during probe, let's compare it against value from ACC
4038                                 if (chip->eccOobSize < eccOobSize) {
4039 -                                       printk("Flash says it has %d OOB bytes, but ECC level %lu need %d bytes\n",
4040 +                                       printk("Flash says it has %d OOB bytes, but ECC level %d need %d bytes\n",
4041                                                 chip->eccOobSize, eccLevel, eccOobSize);
4042                                         printk(KERN_INFO "Please fix your board straps. Aborting to avoid file system damage\n");
4043                                         BUG();
4044 @@ -7941,7 +6584,7 @@
4045                                 break;
4046  
4047                         default:
4048 -                               printk(KERN_ERR "Unsupported ECC level %lu\n", eccLevel);
4049 +                               printk(KERN_ERR "Unsupported ECC level %d\n", eccLevel);
4050                                 BUG();
4051                                 
4052                         }
4053 @@ -7963,11 +6606,11 @@
4054                                 brcmnand_ctrl_write(BCHP_NAND_ACC_CONTROL, acc_control );
4055                                 printk("Corrected PARTIAL_PAGE_EN: ACC_CONTROL = %08lx\n", acc_control);
4056                         }                       
4057 -#ifdef CONFIG_MIPS_BCM3548
4058 -                       /* THT PR50928: Disable WR_PREEMPT for 3548L and 3556 */
4059 -                       acc_control &= ~(BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK);
4060 +#if 1
4061 +                       /* THT Disable Optimization for 2K page */
4062 +                       acc_control &= ~(BCHP_NAND_ACC_CONTROL_WR_PREEMPT_EN_MASK|BCHP_NAND_ACC_CONTROL_PAGE_HIT_EN_MASK);
4063                         brcmnand_ctrl_write(BCHP_NAND_ACC_CONTROL, acc_control );
4064 -                       printk("Disable WR_PREEMPT: ACC_CONTROL = %08lx\n", acc_control);
4065 +                       printk("Disable WR_PREEMPT and PAGE_HIT_EN: ACC_CONTROL = %08lx\n", acc_control);
4066  #endif
4067                         printk("ACC_CONTROL for MLC NAND: %08lx\n", acc_control);
4068                 }
4069 @@ -8010,58 +6653,7 @@
4070                                 printk("SLC flash: Corrected ACC_CONTROL = %08lx from %08lx\n", acc_control, org_acc_control);
4071                         }
4072                 }
4073 -
4074 -
4075 -#if CONFIG_MTD_BRCMNAND_VERSION <= CONFIG_MTD_BRCMNAND_VERS_3_4
4076 -               /*
4077 -                * PR57272: Workaround for BCH-n error, 
4078 -                * reporting correctable errors with 4 or more bits as uncorrectable:
4079 -                */
4080 -               if (chip->ecclevel != 0 && chip->ecclevel != BRCMNAND_ECC_HAMMING) {
4081 -                       int corr_threshold;
4082 -
4083 -                       if ( chip->ecclevel >=  BRCMNAND_ECC_BCH_4) {
4084 -                               corr_threshold = 2;
4085 -                       } 
4086 -                       else {
4087 -                               corr_threshold = 1;  // 1 , default for Hamming
4088 -                       }
4089 -
4090 -                       printk(KERN_INFO "%s: CORR ERR threshold set to %d bits\n", __FUNCTION__, corr_threshold);
4091 -                       corr_threshold <<= BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_SHIFT;
4092 -                       brcmnand_ctrl_write(BCHP_NAND_CORR_STAT_THRESHOLD, corr_threshold);
4093 -               }
4094 -
4095 -#else
4096 -               /*
4097 -                * If ECC level is BCH, set CORR Threshold according to # bits corrected
4098 -                */
4099 -               if (chip->ecclevel != 0 && chip->ecclevel != BRCMNAND_ECC_HAMMING) {
4100 -                       int corr_threshold;
4101 -
4102 -                       if (chip->ecclevel >= BRCMNAND_ECC_BCH_8) {
4103 -                               corr_threshold = 6;  // 6 out of 8
4104 -                       } 
4105 -                       else if ( chip->ecclevel >=  BRCMNAND_ECC_BCH_4) {
4106 -                               corr_threshold = 3;  // 3 out of 4
4107 -                       } 
4108 -                       else {
4109 -                               corr_threshold = 1;  // 1 , default for Hamming
4110 -                       }
4111 -                       printk(KERN_INFO "%s: CORR ERR threshold set to %d bits\n", __FUNCTION__, corr_threshold);
4112 -                       corr_threshold <<= BCHP_NAND_CORR_STAT_THRESHOLD_CORR_STAT_THRESHOLD_SHIFT;
4113 -                       brcmnand_ctrl_write(BCHP_NAND_CORR_STAT_THRESHOLD, corr_threshold);
4114 -               }
4115 -#endif
4116 -                       
4117         }
4118 -
4119 -#else
4120 -       /* Version 2.x, Hamming codes only */
4121 -       /* If chip Select is not zero, the CFE may not have initialized the NAND flash */
4122 -       if (chip->CS[0]) {
4123 -               /* Nothing for now */
4124 -       }
4125  #endif // Version 3.0+
4126  #endif // Version 1.0+
4127  
4128 @@ -8112,17 +6704,12 @@
4129  #ifdef EDU_DEBUG_3
4130  printk("++++++++++++ EDU_DEBUG_3 enabled\n");
4131  #endif
4132 -#if defined( EDU_DEBUG_4 ) || defined( EDU_DEBUG_5 )
4133 -init_edu_buf();
4134 -
4135 -  #ifdef EDU_DEBUG_4
4136 -  printk("++++++++++++ EDU_DEBUG_4 (read verify) enabled\n");
4137 -  #endif
4138 -
4139 -  #ifdef EDU_DEBUG_5
4140 -  printk("++++++++++++ EDU_DEBUG_5 (write verify) enabled\n");
4141 -  #endif
4142 +#ifdef EDU_DEBUG_4
4143 +printk("++++++++++++ EDU_DEBUG_4 (read verify) enabled\n");
4144  #endif
4145 +#ifdef EDU_DEBUG_5
4146 +printk("++++++++++++ EDU_DEBUG_5 (write verify) enabled\n");
4147 +#endif
4148  
4149  PRINTK("%s 30\n", __FUNCTION__);
4150         /*
4151 @@ -8200,22 +6787,8 @@
4152                                 }
4153                         }
4154                         else {
4155 -                               switch (mtd->writesize) {
4156 -                               case 4096:
4157 -                                       if (chip->ecclevel == BRCMNAND_ECC_HAMMING) {
4158 -                                               printk(KERN_WARNING "This SLC-4K-page flash may not be suitable for Hamming codes\n");
4159 -                                               chip->ecclayout = &brcmnand_oob_128;
4160 -                                       }
4161 -                                       else {
4162 -                                               chip->ecclayout = &brcmnand_oob_bch4_4k;
4163 -                                       }
4164 -                                       break;
4165 -
4166 -                               default:
4167 -                                       printk(KERN_ERR "Unsupported page size of %d\n", mtd->writesize);
4168 -                                       BUG();
4169 -                                       break;
4170 -                               }
4171 +                               printk(KERN_ERR "Unsupported SLC NAND with page size of %d\n", mtd->writesize);
4172 +                               BUG();
4173                         }
4174                         break;
4175                         
4176 @@ -8239,18 +6812,7 @@
4177         //chip->eccOobSize = (mtd->oobsize*512) /mtd->writesize; 
4178         printk(KERN_INFO "mtd->oobsize=%d, mtd->eccOobSize=%d\n", mtd->oobsize, chip->eccOobSize);
4179  
4180 -#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
4181         if (!chip->read_page)
4182 -               chip->read_page = brcmnand_isr_read_page;
4183 -       if (!chip->write_page)
4184 -               chip->write_page = brcmnand_isr_write_page;
4185 -       if (!chip->read_page_oob)
4186 -               chip->read_page_oob = brcmnand_isr_read_page_oob;
4187 -       /* There is no brcmnand_isr_write_page_oob */
4188 -       if (!chip->write_page_oob)
4189 -               chip->write_page_oob = brcmnand_write_page_oob;
4190 -#else
4191 -       if (!chip->read_page)
4192                 chip->read_page = brcmnand_read_page;
4193         if (!chip->write_page)
4194                 chip->write_page = brcmnand_write_page;
4195 @@ -8258,7 +6820,6 @@
4196                 chip->read_page_oob = brcmnand_read_page_oob;
4197         if (!chip->write_page_oob)
4198                 chip->write_page_oob = brcmnand_write_page_oob;
4199 -#endif
4200         if (!chip->read_oob)
4201                 chip->read_oob = brcmnand_do_read_ops;
4202         if (!chip->write_oob)
4203 @@ -8387,17 +6948,21 @@
4204         EDU_init();
4205  #endif
4206  
4207 +gdebug=0;
4208 +if (0) {
4209 +       char oob[128];
4210 +       
4211 +       printk("------------------> Dry-run\n");
4212 +       brcmnand_posted_read_oob(mtd, oob, device_size(mtd) - mtd->erasesize, 1);
4213 +       print_oobbuf(oob, 16);
4214 +       printk("<------------------ End Dry-run\n");
4215 +}
4216  
4217 +if (gdebug > 3) printk("%s 60 Calling scan_bbt\n", __FUNCTION__);
4218  
4219 -#ifdef CONFIG_MTD_BRCMNAND_DISABLE_XOR
4220 -gdebug=4;
4221 -       printk("-----------------------------------------------------\n");
4222 -       print_nand_ctrl_regs();
4223 -       printk("-----------------------------------------------------\n");
4224 -#endif
4225 -
4226 -
4227         err =  chip->scan_bbt(mtd);
4228 +if (gdebug > 3) printk("%s 80 Done scan_bbt\n", __FUNCTION__); 
4229 +//gdebug=0;
4230  
4231  
4232  #ifdef CONFIG_MTD_BRCMNAND_CORRECTABLE_ERR_HANDLING
4233 @@ -8411,9 +6976,9 @@
4234         }
4235  #endif
4236  
4237 -//gdebug=0;
4238  PRINTK("%s 99\n", __FUNCTION__);
4239  
4240 +if (gdebug) print_diagnostics();
4241         return err;
4242  
4243  }
4244 Index: drivers/mtd/brcmnand/brcmnand_cet.c
4245 ===================================================================
4246 --- drivers/mtd/brcmnand/brcmnand_cet.c (revision 1)
4247 +++ drivers/mtd/brcmnand/brcmnand_cet.c (working copy)
4248 @@ -72,20 +72,12 @@
4249  
4250  #define CET_SYNC_FREQ  (10*60*HZ)      
4251  
4252 -
4253  static char cet_pattern[] = {'C', 'E', 'T', 0};
4254  static struct brcmnand_cet_descr cet_descr = {
4255         .offs = 9,
4256         .len = 4,
4257         .pattern = cet_pattern
4258  };
4259 -
4260 -/* 
4261 - * This also applies to Large Page SLC flashes with BCH-4 ECC.
4262 - * We don't support BCH-4 on Small Page SLCs because there are not 
4263 - * enough free bytes for the OOB, but we don't enforce it,
4264 - * in order to allow page aggregation like in YAFFS2 on small page SLCs.
4265 - */
4266  static struct brcmnand_cet_descr cet_descr_mlc = {
4267         .offs = 1,
4268         .len = 4,
4269 @@ -685,18 +677,10 @@
4270         if (unlikely(gdebug)) {
4271                 printk(KERN_INFO "brcmnandCET: Creating correctable error table ...\n");
4272         }
4273 -       
4274 -       if (NAND_IS_MLC(this) || /* MLC flashes */
4275 -          /* SLC w/ BCH-n; We don't check for pageSize, and let it be */
4276 -          (this->ecclevel >= BRCMNAND_ECC_BCH_1 && this->ecclevel <= BRCMNAND_ECC_BCH_12)) 
4277 -       {
4278 +       if (NAND_IS_MLC(this)) {
4279                 this->cet = cet = &cet_descr_mlc;
4280 -if (gdebug) printk("%s: CET = cet_desc_mlc\n", __FUNCTION__);
4281 -       } 
4282 -
4283 -       else {
4284 +       } else {
4285                 this->cet = cet = &cet_descr;
4286 -if (gdebug) printk("%s: CET = cet_descr\n", __FUNCTION__);
4287         }
4288         cet->flags = 0x00;
4289         /* Check that BBT table and mirror exist */
4290 Index: drivers/mtd/brcmnand/brcmnand_isr.c
4291 ===================================================================
4292 --- drivers/mtd/brcmnand/brcmnand_isr.c (revision 1)
4293 +++ drivers/mtd/brcmnand/brcmnand_isr.c (working copy)
4294 @@ -22,705 +22,189 @@
4295   * 20090318    tht             Original coding
4296   */
4297  
4298 -//#define ISR_DEBUG_SMP
4299 -#undef ISR_DEBUG_SMP
4300  
4301 -#ifdef ISR_DEBUG_SMP
4302 -#include <asm/atomic.h>
4303 -#endif
4304 -
4305 -
4306  #include "brcmnand_priv.h"
4307  #include "edu.h"
4308  
4309 -#include <linux/dma-mapping.h>
4310 -
4311  #define PRINTK(...)
4312 -//#define PRINTK printk
4313 -
4314 -#ifdef ISR_DEBUG_SMP
4315 -static atomic_t v = ATOMIC_INIT(1);
4316 -#define PRINTK1(...) if (!atomic_dec_and_test(&v)) printk("<")
4317 -#define PRINTK2(...) atomic_inc(&v)  //, printk(">"))
4318 -#define PRINTK5(...) if (!atomic_dec_and_test(&v))  printk("+");
4319 -#define PRINTK6(...) atomic_inc(&v)  // printk("-");
4320 -#define PRINTK3(...) if (!atomic_dec_and_test(&v)) printk("[");
4321 -#define PRINTK4(...) atomic_inc(&v) // printk("]");
4322 -
4323 -#else
4324 -#define PRINTK1(...)
4325 -#define PRINTK2(...)
4326 -#define PRINTK3(...)
4327 -#define PRINTK4(...)
4328 -#define PRINTK5(...)
4329 -#define PRINTK6(...)
4330 -#endif
4331 +//define PRINTK printk
4332   
4333  
4334   // Wakes up the sleeping calling thread.
4335  static DECLARE_WAIT_QUEUE_HEAD(gEduWaitQ);
4336  
4337 -//eduIsrNode_t gEduIsrData; 
4338 -eduIsrNode_t gEduIsrPool[MAX_JOB_QUEUE_SIZE+2]; /* ReadOp Pool, add 2 for Pushed WAR jobs */
4339 +eduIsrData_t gEduIsrData;
4340  
4341 -isrJobQ_t gJobQ; /* Job Queue */
4342 -
4343 -extern int gdebug;
4344 -
4345 -
4346 -/*
4347 - * Queue next sector for read/write, assuming caller holds queue lock
4348 - */
4349 -eduIsrNode_t* 
4350 -ISR_queue_read_request(struct mtd_info *mtd,
4351 -        void* buffer, u_char* oobarea, loff_t offset)
4352 +static irqreturn_t ISR_isr(int irq, void *devid, struct pt_regs *regs)
4353  {
4354 -       eduIsrNode_t* entry; 
4355 -       struct list_head* node;
4356 -
4357 -       // Grab one request from avail list
4358 -       if (list_empty(&gJobQ.availList)) {
4359 -               printk("%s: Empty avail list\n", __FUNCTION__);
4360 -               BUG();
4361 -       }
4362 -       node = gJobQ.availList.next;
4363 -       if (!node) {
4364 -               printk("%s: Empty avail list\n", __FUNCTION__);
4365 -               BUG();
4366 -       }
4367 -       entry = list_entry(node, eduIsrNode_t, list);
4368 -       list_del(node);
4369 -
4370 -       // Queue entry
4371 -       list_add_tail(node, &gJobQ.jobQ);
4372 -       spin_lock_init(&entry->lock);
4373 -       entry->mtd = mtd;
4374 -       entry->buffer = buffer;
4375 -       entry->oobarea = oobarea;
4376 -       entry->offset = offset;
4377 -       entry->ret = -1;
4378 -       entry->refCount = 1;
4379 -       entry->opComplete = ISR_OP_QUEUED;
4380 -       
4381 -       return entry;
4382 -}
4383 -
4384 -eduIsrNode_t* 
4385 -ISR_queue_write_request(struct mtd_info *mtd,
4386 -        const void* buffer, const u_char* oobarea, loff_t offset)
4387 -{
4388 -       eduIsrNode_t* entry; 
4389 -       struct list_head* node;
4390 -
4391 -       // Grab one request from avail list
4392 -       if (list_empty(&gJobQ.availList)) {
4393 -               printk("%s: Empty avail list\n", __FUNCTION__);
4394 -               BUG();
4395 -       }
4396 -       node = gJobQ.availList.next;
4397 -       if (!node) {
4398 -               printk("%s: Empty avail list\n", __FUNCTION__);
4399 -               BUG();
4400 -       }
4401 -       entry = list_entry(node, eduIsrNode_t, list);
4402 -       list_del(node);
4403 -
4404 -       // Queue entry
4405 -       list_add_tail(node, &gJobQ.jobQ);
4406 -       spin_lock_init(&entry->lock);
4407 -       entry->mtd = mtd;
4408 -       entry->buffer = buffer;
4409 -       entry->oobarea = oobarea;
4410 -       entry->offset = offset;
4411 -       entry->ret = -1;
4412 -       entry->refCount = 1;
4413 -       entry->opComplete = ISR_OP_QUEUED;
4414 -
4415 -       return entry;
4416 -}
4417 -
4418 -
4419 -/*
4420 - * Push next sector for dummy read to head of queue, assuming caller holds queue lock
4421 - * Job will be next to be executed
4422 - */
4423 -eduIsrNode_t*  
4424 -ISR_push_request(struct mtd_info *mtd,
4425 -        void* buffer, u_char* oobarea, loff_t offset) 
4426 -{
4427 -       eduIsrNode_t* entry; 
4428 -       struct list_head* node;
4429 -
4430 -       // Grab one request from avail list
4431 -       if (list_empty(&gJobQ.availList)) {
4432 -               printk("%s: Empty avail list\n", __FUNCTION__);
4433 -               BUG();
4434 -       }
4435 -       node = gJobQ.availList.next;
4436 -       if (!node) {
4437 -               printk("%s: Empty avail list\n", __FUNCTION__);
4438 -               BUG();
4439 -       }
4440 -       entry = list_entry(node, eduIsrNode_t, list);
4441 -       list_del(node);
4442 -
4443 -       // Push to head of queue
4444 -       list_add(node, &gJobQ.jobQ);
4445 -       spin_lock_init(&entry->lock);
4446 -       entry->mtd = mtd;
4447 -       entry->buffer = buffer;
4448 -       entry->oobarea = oobarea;
4449 -       entry->offset = offset;
4450 -       entry->ret = -1;
4451 -       entry->refCount = 1;
4452 -       entry->opComplete = ISR_OP_QUEUED;
4453 -
4454 -       return entry;   
4455 -}
4456 -
4457 -
4458 -/*
4459 - * Called with ReqdQ Read lock held
4460 - * Returns pointer to node that satisfies opStatus, 
4461 - * with spin lock held (spin_lock()'ed assuming queue lock has been held))
4462 - */
4463 -eduIsrNode_t*
4464 -ISR_find_request( isrOpStatus_t opStatus)
4465 -{
4466 -       eduIsrNode_t* req;
4467 -
4468 -       list_for_each_entry(req, &gJobQ.jobQ, list) {
4469 -               
4470 -               // We called this with spin_lock_irqsave on queue lock, so no need for the irq variant
4471 -               spin_lock(&req->lock);
4472 -               if (req->opComplete == opStatus) {
4473 -                       return req;
4474 -               }
4475 -               spin_unlock(&req->lock);
4476 -       }
4477 -       return (eduIsrNode_t*) 0;;
4478 -}
4479 -
4480 -#if 0
4481 -static void
4482 -ISR_print_queue(void)
4483 -{
4484 -       eduIsrNode_t* req;
4485 -       int i=0;
4486 -
4487 -       list_for_each_entry(req, &gJobQ.jobQ, list) {
4488 -               
4489 -               // We called this with spin_lock_irqsave on queue lock, so no need for the irq variant
4490 -               printk("I=%d req=%p, offset=%0llx, opComp=%d, list=%p, next=%p, prev=%p\n",
4491 -                       i, req, req->offset, req->opComplete, &req->list, req->list.next, req->list.prev);
4492 -               i++;
4493 -       }
4494 -       return (eduIsrNode_t*) 0;;
4495 -}
4496 -#endif
4497 -
4498 -
4499 -/*
4500 - * We've got interrupted, and verified that job is complete. 
4501 - * Job lock has been held by caller.
4502 - * Do Read completion routines
4503 - * runs in interrupt context.
4504 - * Return returned value of read-op.
4505 - */
4506 -
4507 -
4508 -
4509 -#if 0 //def EDU_DOUBLE_BUFFER_READ
4510 -
4511 -/* Save this to be revived when we are sure that EDU's double buffering works */
4512 -static int
4513 -ISR_read_completion(eduIsrNode_t* req)
4514 -{
4515 -       /* Make sure that the current request does not cause an UNC ERR, as
4516 -        * that would require a read from the LKGS to reset EDU
4517 -        */
4518 -       if (req->status & HIF_INTR2_EDU_ERR) {
4519 -               uint32_t edu_err_status;
4520 -
4521 -               edu_err_status = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_ERR_STATUS);
4522 -               if (edu_err_status && edu_err_status != EDU_ERR_STATUS_NandECCcor) {
4523 -
4524 -                       /* If error, we must stop the on-going EDU op, because it will be dropped by EDU.  
4525 -                        * This is VLSI PR2389
4526 -                        */
4527 -                       edu_status = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_STATUS);
4528 -                       if (edu_status & BCHP_EDU_STATUS_Active_MASK) {
4529 -                               uint32_t edu_done = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_DONE);
4530 -
4531 -
4532 -                               // Abort current command
4533 -                               EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_STOP, BCHP_EDU_STOP_Stop_MASK);
4534 -
4535 -                               // Wait for Done to increment
4536 -                               while (edu_done == EDU_volatileRead(EDU_BASE_ADDRESS + EDU_DONE))
4537 -                                       udelay(10);
4538 -                               // Wait for Pending and Active to Clear
4539 -                               while (0 != (edu_status = EDU_volatileRead(EDU_BASE_ADDRESS + EDU_STATUS)))
4540 -                                       udelay(10);
4541 -                               // Reset Stop
4542 -                               EDU_volatileWrite(EDU_BASE_ADDRESS + EDU_STOP, 0);
4543 -                               // Let the process context thread handle the WAR,
4544 -                               // But we need to requeue the current op (req2)
4545 -                               req2 = req->list.next;
4546 -                               down(&req2->lock);
4547 -                               if (req2 && req2->opComplete == ISR_OP_SUBMITTED) {
4548 -                                       req2->opComplete = ISR_OP_QUEUED;
4549 -                               }
4550 -                               up(&req2->lock);
4551 -                       }
4552 -               }
4553 -                       
4554 -       }
4555 -        // ReadOp completes with no errors, queue next requests until Pending is set
4556 -                       
4557 -
4558 -}
4559 -
4560 -#endif 
4561 -
4562 -/*
4563 - * The requests are queued, some with ISR_OP_SUBMITTED status, some with ISR_OP_QUEUED
4564 - * When an interrupt comes in, we just look for the one that are in submitted status, and mark them
4565 - * as ISR_OP_COMPLETE, and wake up the wait queue.
4566 - * However, if (1) there is an error that requires a workaround, or (2) that the operation is not yet completed,
4567 - * we need to take appropriate action depending on the case.
4568 - * In (1), we have a false uncorrectable error, that need a read from the last known good sector, 
4569 - * so if double buffering is in effect, we need to abort the current EDU job, in order to do the workaround.
4570 - * In (2) we just update the current job, and let the HW interrupt us again.
4571 - * 
4572 - * Runs in interrupt context.
4573 - */ 
4574 -static irqreturn_t 
4575 -ISR_isr(int irq, void *devid, struct pt_regs *regs)
4576 -{
4577         uint32_t status, rd_data;
4578         uint32_t intrMask;  
4579 -       eduIsrNode_t* req;
4580 -       //struct list_head* node;
4581 -       uint32_t flashAddr;
4582         unsigned long flags;
4583  
4584         /*
4585          * Not mine
4586          */
4587 -       if (devid != (void*) &gJobQ) {
4588 +       if (devid != (void*) &gEduIsrData) {
4589                 return IRQ_NONE;
4590         }
4591  
4592 -       spin_lock_irqsave(&gJobQ.lock, flags);
4593 -       /* TBD: How to tell Read Request from Write Request */
4594 -       if (list_empty(&gJobQ.jobQ)) { 
4595 -               printk("%s: Impossible no job to process\n", __FUNCTION__);
4596 -               //BUG();
4597 -               // CLear interrupt and return
4598 -               intrMask = ISR_volatileRead(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_MASK_STATUS);
4599 -               ISR_disable_irq(intrMask);
4600 -               spin_unlock_irqrestore(&gJobQ.lock, flags);
4601 -               return IRQ_HANDLED;
4602 -       } 
4603 -       
4604 -       flashAddr = EDU_volatileRead(EDU_BASE_ADDRESS+EDU_EXT_ADDR) - (EDU_LENGTH_VALUE-1);
4605 -
4606 -       flashAddr &= ~(EDU_LENGTH_VALUE-1);
4607 -       
4608 -       req = ISR_find_request(ISR_OP_SUBMITTED);
4609 -
4610 -       // Paranoia
4611 -       if (!req) {
4612 -               printk("%s: Impossible failed to find queued job\n", __FUNCTION__);
4613 -               BUG();
4614 -       }
4615 -
4616 -       // req->lock held here.
4617 -                       
4618 -       /*
4619 -        * Remember the status, as there can be several L1 interrupts before completion.
4620 -        * Grab the lock first, we don't want any race condition.
4621 -        */
4622 -       // spin_lock(&req->lock);  Already locked by ISR_find_request
4623         intrMask = ISR_volatileRead(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_MASK_STATUS);
4624         rd_data = ISR_volatileRead(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_STATUS);
4625         
4626 -PRINTK("==> %s: Awaken rd_data=%08x, intrMask=%08x, cmd=%d, flashAddr=%08x\n", __FUNCTION__, 
4627 -       rd_data, intrMask, gJobQ.cmd, req->edu_ldw);
4628 +PRINTK("%s: Awaken rd_data=%08x, intrMask=%08x, cmd=%d, flashAddr=%08x\n", __FUNCTION__, 
4629 +       rd_data, intrMask, gEduIsrData.cmd, gEduIsrData.flashAddr);
4630  
4631 -       req->status |= rd_data;
4632 -       status = req->status & req->mask;
4633 -       
4634         /*
4635 -        * Evaluate exit/completion condition. 
4636 +        * Remember the status, as there can be several L1 interrupts before completion
4637          */
4638 -       switch (gJobQ.cmd) {
4639 +       spin_lock_irqsave(&gEduIsrData.lock, flags);
4640 +       gEduIsrData.status |= rd_data;
4641 +       status = gEduIsrData.status & gEduIsrData.mask;
4642 +       
4643 +       // Evaluate exit/completion condition
4644 +       switch (gEduIsrData.cmd) {
4645         case EDU_READ:
4646         case NAND_CTRL_READY:
4647 -               if  ((req->expect == (req->status & req->expect)) || 
4648 -                                                               (req->status & req->error))
4649 -               {
4650 -                       req->opComplete = ISR_OP_COMPLETED;
4651 -               }
4652 +               gEduIsrData.opComplete = ((gEduIsrData.expect == (gEduIsrData.status & gEduIsrData.expect)) || 
4653 +                                                               (gEduIsrData.status & gEduIsrData.error));
4654                 break;
4655                 
4656         case EDU_WRITE:
4657                 /* 
4658                  * We wait for both DONE|ERR +CTRL_READY
4659                  */
4660 -               if ((req->expect == (req->status & req->expect) ||
4661 -                                                                       (req->status & req->error))
4662 +               gEduIsrData.opComplete = ((gEduIsrData.expect == (gEduIsrData.status & gEduIsrData.expect) ||
4663 +                                                                       (gEduIsrData.status & gEduIsrData.error))
4664                                                                 &&
4665 -                                                               (req->status & HIF_INTR2_CTRL_READY))
4666 -               {
4667 -                       req->opComplete = ISR_OP_COMPLETED;
4668 -                       (void) dma_unmap_single(NULL, req->physAddr, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
4669 -               }
4670 -               break;  
4671 -               
4672 -       default:
4673 -               printk("%s: Invalid command %08x\n", __FUNCTION__, gJobQ.cmd);
4674 -               BUG();
4675 +                                                               (gEduIsrData.status & HIF_INTR2_CTRL_READY));
4676 +               break;                                                  
4677         }
4678 -       if (ISR_OP_COMPLETED == req->opComplete) {
4679 -               int submitted;
4680 -
4681 -               /* ACK interrupt */
4682 -               ISR_disable_irq(req->intr);
4683 -
4684 -               // Do we need to do WAR for EDU, since EDU stop dead in its track regardless of the kind of errors.  Bummer!
4685 -               if (req->status & HIF_INTR2_EDU_ERR) {
4686 -                       uint32_t edu_err_status;
4687 -
4688 -                       /*
4689 -                        * We need to do WAR for EDU, which just stops dead on its tracks if there is any error, correctable or not.
4690 -                        * Problem is, the WAR needs to be done in process context,
4691 -                        * so we wake up the process context thread, and handle the WAR there.
4692 -                        */
4693 -PRINTK("%s: Awaken process context thread for EDU WAR, flashAddr=%08x, status=%08x, hif_intr2=%08x\n", 
4694 -__FUNCTION__, req->edu_ldw, req->status, HIF_INTR2_EDU_ERR);
4695 -                       gJobQ.needWakeUp= 1;
4696 -                       req->opComplete = ISR_OP_NEED_WAR;
4697 -                       wake_up(&gEduWaitQ);
4698 -                       spin_unlock(&req->lock);
4699 -                       spin_unlock_irqrestore(&gJobQ.lock, flags);
4700 -                       return IRQ_HANDLED;
4701 -               }
4702 -
4703 -               /*
4704 -                * Get here only if there are no errors, call job completion routine.
4705 -                */
4706 -               switch (gJobQ.cmd) {
4707 -               case EDU_READ:
4708 -                       /* All is left to do is to handle the OOB read */
4709 -                       req->ret = brcmnand_edu_read_comp_intr(req->mtd, req->buffer, req->oobarea, req->offset,
4710 -                                               req->status);
4711 -                       break;
4712 -
4713 -               case EDU_WRITE:
4714 -                       {
4715 -                               /*
4716 -                                * Even if there are no HIF_INTR2_ERR, we still need to check
4717 -                                * the flash status.  If it is set, we need to update the BBT
4718 -                                * which requires process context WAR
4719 -                                */
4720 -                               struct brcmnand_chip *chip = req->mtd->priv;
4721 -                               uint32_t flashStatus = chip->ctrl_read(BCHP_NAND_INTFC_STATUS);
4722 -
4723 -                               req->needBBT=0;
4724 -                               /* Just to be dead sure */
4725 -                               if (!(flashStatus & BCHP_NAND_INTFC_STATUS_CTLR_READY_MASK)) {
4726 -                                       printk("%s: Impossible, CTRL-READY already asserted\n", __FUNCTION__);
4727 -                                       BUG();
4728 -                               }
4729 -                               /* Check for flash write error, in which case tell process context thread to handle it */
4730 -                               if (flashStatus & 0x1) {
4731 -                                       req->needBBT = 1;
4732 -                                       gJobQ.needWakeUp= 1;
4733 -                                       req->opComplete = ISR_OP_NEED_WAR;
4734 -                                       wake_up(&gEduWaitQ);
4735 -                                       spin_unlock(&req->lock);
4736 -                                       spin_unlock_irqrestore(&gJobQ.lock, flags);
4737 -                                       return IRQ_HANDLED;
4738 -                               }
4739 -                               /* Nothing to be done when everything is OK 
4740 -                               *else
4741 -                               *       req->ret = brcmnand_edu_write_completion(req->mtd, req->buffer, req->oobarea, req->offset,
4742 -                               *               req->status, req->physAddr, rq->needBBT);
4743 -                               */
4744 -                       }
4745 -                       break;
4746 -               }
4747 -
4748 -               // Jop completes with no errors, queue next requests until Pending is set
4749 -               list_del(&req->list);
4750 -
4751 -               list_add_tail(&req->list, &gJobQ.availList);
4752 -               spin_unlock(&req->lock);
4753 -               
4754 -               submitted = brcmnand_isr_submit_job();
4755 -
4756 -               if (!submitted) { /* No more job to submit, we are done, wake up process context thread */
4757 -                       wake_up(&gEduWaitQ);
4758 -               }
4759 -
4760 +       if (gEduIsrData.opComplete) {
4761 +               ISR_disable_irq(gEduIsrData.intr);
4762 +               wake_up_interruptible(&gEduWaitQ);
4763         }
4764 -               
4765         else {
4766                 /* Ack only the ones that show */
4767 -               uint32_t ack = req->status & req->intr;
4768 +               uint32_t ack = gEduIsrData.status & gEduIsrData.intr;
4769                 
4770 -PRINTK("%s: opComp=0, intr=%08x, mask=%08x, expect=%08x, err=%08x, status=%08x, rd_data=%08x, intrMask=%08x, flashAddr=%08x, DRAM=%08x\n", __FUNCTION__, 
4771 -req->intr, req->mask, req->expect, req->error, req->status, rd_data, intrMask, req->flashAddr, req->dramAddr);
4772 +printk("%s: opComp=0, intr=%08x, mask=%08x, expect=%08x, err=%08x, status=%08x, rd_data=%08x, intrMask=%08x, flashAddr=%08x, DRAM=%08x\n", __FUNCTION__, 
4773 +gEduIsrData.intr, gEduIsrData.mask, gEduIsrData.expect, gEduIsrData.error, gEduIsrData.status, rd_data, intrMask, gEduIsrData.flashAddr, gEduIsrData.dramAddr);
4774  
4775                 // Just disable the ones that are triggered
4776                 ISR_disable_irq(ack);
4777 -               req->intr &= ~ack;
4778 +               gEduIsrData.intr &= ~ack;
4779  
4780 -               if (req->intr) {
4781 +               if (gEduIsrData.intr) {
4782                         // Re-arm
4783 -                       ISR_enable_irq(req);
4784 +                       ISR_enable_irq();
4785                 }
4786                 else {
4787                         printk(KERN_ERR "%s: Lost interrupt\n", __FUNCTION__);
4788                         BUG();
4789                 }
4790 -               spin_unlock(&req->lock);
4791         }
4792 -       
4793 -       spin_unlock_irqrestore(&gJobQ.lock, flags);
4794 -       
4795 -PRINTK2("<== %s: \n", __FUNCTION__);
4796 +       spin_unlock_irqrestore(&gEduIsrData.lock, flags);
4797         return IRQ_HANDLED;
4798  }
4799  
4800 -
4801 -
4802 -/*
4803 - * Called with no lock
4804 - * Wait until the Read Queue is empty
4805 - * Run in process context. 
4806 - * Return 0 if all jobs complete successfully
4807 - * Return error codes and abort if any job returned un-correctable errors.
4808 - */
4809 -int
4810 -ISR_wait_for_queue_completion(void)
4811 +uint32_t ISR_wait_for_completion(void)
4812  {
4813         //uint32_t rd_data;
4814 -//volatile unsigned int c = 0xfedeadad;
4815 -       int ret = -ERESTARTSYS;
4816 -       int waitret;
4817 +       int ret;
4818         unsigned long to_jiffies = 3*HZ; /* 3 secs */
4819 -       //unsigned long cur_jiffies = jiffies;
4820 -       unsigned long expired = jiffies + to_jiffies;
4821         int cmd;
4822 -       eduIsrNode_t* req;
4823 -       eduIsrNode_t saveReq;
4824 -       int submitted;
4825         unsigned long flags;
4826         
4827 -       /* Loop is for wait_event_interruptible_timeout */
4828 -       do {
4829 -               waitret = wait_event_timeout(gEduWaitQ, list_empty(&gJobQ.jobQ) || gJobQ.needWakeUp, to_jiffies);
4830 -               if (waitret == 0) { /* TimeOut */
4831 -                       ret = BRCMNAND_TIMED_OUT;
4832 -                       break;
4833 -               }
4834 -               spin_lock_irqsave(&gJobQ.lock, flags);
4835 -               if (gJobQ.needWakeUp) { /* Need to do process context WAR */                    
4836 -                       req = ISR_find_request(ISR_OP_NEED_WAR);
4837 +       ret = wait_event_interruptible_timeout(gEduWaitQ, gEduIsrData.opComplete, to_jiffies);
4838  
4839 -                       if (!req) {
4840 -                               printk("%s: Cannot find job that need WAR\n", __FUNCTION__);
4841 -                               BUG();
4842 -                       }
4843 +       spin_lock_irqsave(&gEduIsrData.lock, flags);
4844  
4845 -                       // Make a copy 
4846 -                       saveReq = *req;
4847 +       cmd = gEduIsrData.cmd;
4848 +       gEduIsrData.cmd = -1;
4849  
4850 -                       /* Mark the job as complete and free it */
4851 -                       req->opComplete = ISR_OP_COMPLETED;
4852 -                       gJobQ.needWakeUp = 0;
4853 -                       
4854 -                       // Job, with error, is now complete, remove it from queue, and submit next request
4855 -                       list_del(&req->list);
4856 -
4857 -                       list_add_tail(&req->list, &gJobQ.availList);
4858 -                       
4859 -                       spin_unlock(&req->lock);
4860 -
4861 -                       // req lock held inside ISR_find_request
4862 -                       switch (gJobQ.cmd) {
4863 -                       case EDU_READ:
4864 -                               ret = brcmnand_edu_read_completion(
4865 -                                                               saveReq.mtd, saveReq.buffer, saveReq.oobarea, saveReq.offset,
4866 -                                                               saveReq.status);
4867 -                               break;
4868 -                       case EDU_WRITE:
4869 -                               ret = brcmnand_edu_write_war(
4870 -                                                       saveReq.mtd, saveReq.buffer, saveReq.oobarea, saveReq.offset,
4871 -                                                       saveReq.status, saveReq.needBBT);
4872 -                               break;
4873 -                       default:
4874 -                               printk("%s: Unknown command %d\n", __FUNCTION__, gJobQ.cmd);
4875 -                               BUG();
4876 -                       }
4877 -                       if (ret == 0) { /* WAR worked */
4878 -                               // Submit next job (which is our dummy job in WAR)
4879 -                               submitted = brcmnand_isr_submit_job();
4880 -                       }
4881 -                       else {
4882 -                               eduIsrNode_t* tmp;
4883 -
4884 -                               // Abort queue, TBD
4885 -                               list_for_each_entry_safe(req, tmp, &gJobQ.jobQ, list) {
4886 -                                       list_del(&req->list);
4887 -
4888 -                                       list_add_tail(&req->list, &gJobQ.availList);
4889 -                               }
4890 -                       }
4891 +       if (!gEduIsrData.opComplete && ret <= 0) {
4892 +               ISR_disable_irq(gEduIsrData.intr);
4893 +               if (ret == -ERESTARTSYS) {
4894 +                       spin_unlock_irqrestore(&gEduIsrData.lock, flags);
4895 +                       return (uint32_t) (ERESTARTSYS);  // Retry on Read
4896 +               }       
4897 +               else if (ret == 0) { 
4898 +                       //gEduIsrData.opComplete = 1;
4899 +                       printk("%s: DMA timedout\n", __FUNCTION__);
4900 +                       spin_unlock_irqrestore(&gEduIsrData.lock, flags);
4901 +                       return 0; // Timed Out
4902                 }
4903 -               else { // List is empty
4904 -                       ret = 0; // Loop exit condition
4905 -               }
4906 -               spin_unlock_irqrestore(&gJobQ.lock, flags);     
4907 -       } while ((ret == -ERESTARTSYS) && time_before(jiffies, expired));
4908 -       return ret;
4909 +       
4910 +               // DMA completes on Done or Error.
4911 +               //rd_data = ISR_volatileRead(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_STATUS);
4912 +       
4913 +               printk("%s: EDU completes but Status is %08x\n", __FUNCTION__, gEduIsrData.status);
4914 +               //rd_data = 0; // Treat as a timeout
4915 +       }
4916 +       spin_unlock_irqrestore(&gEduIsrData.lock, flags);
4917 +       return gEduIsrData.status;
4918  }
4919  
4920  
4921 -#if 0  //ndef CONFIG_MTD_BRCMNAND_ISR_QUEUE
4922 -
4923 -/*
4924 - * Wait for completion when not using queue
4925 - */
4926 -uint32_t ISR_wait_for_completion(void)
4927 +uint32_t ISR_cache_is_valid(uint32_t clearMask)
4928  {
4929 -       //uint32_t rd_data;
4930 -//volatile unsigned int c = 0xfedeadad;
4931 -       int ret = -ERESTARTSYS;
4932 -       unsigned long to_jiffies = 3*HZ; /* 3 secs */
4933 -       //unsigned long cur_jiffies = jiffies;
4934 -       unsigned long expired = jiffies + to_jiffies;
4935 -       int cmd;
4936 -       int retries = 2;
4937 -       //unsigned long flags;
4938 -//volatile unsigned int counter = 0xAABBCCDD;
4939 -//static int erestartsys = 0;
4940 +       uint32_t rd_data = ISR_volatileRead(BCM_BASE_ADDRESS+BCHP_HIF_INTR2_CPU_STATUS);
4941 +       unsigned long flags;
4942  
4943 -       
4944 -       while (ret == -ERESTARTSYS ) {
4945 -//printk("%s: jiffies=%08lx, expired=%08lx\n", __FUNCTION__, jiffies, expired);
4946 -               if (((retries--) < 0) || time_after(jiffies, expired)) {
4947 -                       ret = 0; // Timed out
4948 -                       return ERESTARTSYS;
4949 -               }
4950 -               else  {
4951 -                       // Recalculate TO, for retries
4952 -                       to_jiffies = expired - jiffies;
4953 -                       //ret = wait_event_interruptible_timeout(gEduWaitQ, gEduIsrData.opComplete, to_jiffies);
4954 -                       ret = wait_event_timeout(gEduWaitQ, gEduIsrData.opComplete, to_jiffies);
4955 -               }
4956 +       /*
4957 +        * Already there, no need to wait
4958 +        */
4959 +       if (rd_data & HIF_INTR2_CTRL_READY)
4960 +               return rd_data;
4961  
4962 -PRINTK3("==>%s\n", __FUNCTION__);
4963 -               down(&gEduIsrData.lock);
4964 -
4965 -               cmd = gEduIsrData.cmd;
4966 -               gEduIsrData.cmd = -1;
4967 -
4968 -               if (!gEduIsrData.opComplete && ret <= 0) {
4969 -                       ISR_disable_irq(gEduIsrData.intr);
4970 -
4971 -                       if (ret == -ERESTARTSYS) {
4972 -                               up(&gEduIsrData.lock);
4973 -
4974 -//if (5 >= erestartsys++)
4975 -//printk("Pending signals: %08lx-%08lx-%08lx-%08lx\n", 
4976 -//current->pending.signal.sig[0], current->pending.signal.sig[1],current->pending.signal.sig[2], current->pending.signal.sig[3]);
4977 -                               continue;
4978 -                       }       
4979 -                       else if (ret == 0) { 
4980 -                               //gEduIsrData.opComplete = 1;
4981 -                               PRINTK("%s: DMA timedout\n", __FUNCTION__);
4982 -
4983 -                               up(&gEduIsrData.lock);
4984 -//printk("<==%s, ret=0 TimeOut\n", __FUNCTION__);
4985 -PRINTK4("<==%s, ret=0 TimeOut\n", __FUNCTION__);
4986 -
4987 -                               return 0; // Timed Out
4988 -                       }
4989 -
4990 -                       
4991 -                       
4992 -                       // DMA completes on Done or Error.
4993 -                       //rd_data = ISR_volatileRead(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_STATUS);
4994 +       // Clear existing interrupt
4995 +       ISR_volatileWrite(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_MASK_SET, clearMask);
4996 +       
4997 +        do {
4998 +               spin_lock_irqsave(&gEduIsrData.lock, flags);
4999 +               gEduIsrData.flashAddr = 0;
5000 +               gEduIsrData.dramAddr = 0;
5001                 
5002 -PRINTK("%s: EDU completes but Status is %08x\n", __FUNCTION__, gEduIsrData.status);
5003 -                       //rd_data = 0; // Treat as a timeout
5004 -               }
5005 +               /*
5006 +                * Enable L2 Interrupt
5007 +                */
5008 +               gEduIsrData.cmd = NAND_CTRL_READY;
5009 +               gEduIsrData.opComplete = 0;
5010 +               gEduIsrData.status = 0;
5011 +               
5012 +               gEduIsrData.mask = HIF_INTR2_CTRL_READY;
5013 +               gEduIsrData.expect = HIF_INTR2_CTRL_READY;
5014 +               gEduIsrData.error = 0;
5015 +               gEduIsrData.intr = HIF_INTR2_CTRL_READY;
5016  
5017 -               up(&gEduIsrData.lock);
5018 -       }
5019 +               spin_unlock_irqrestore(&gEduIsrData.lock, flags);
5020  
5021 -       return gEduIsrData.status;
5022 -}
5023 -#endif
5024 +               ISR_enable_irq();
5025 +       
5026 +               rd_data = ISR_wait_for_completion();
5027 +       } while (rd_data != 0 && !(rd_data & HIF_INTR2_CTRL_READY));
5028 +       return rd_data;
5029  
5030 -/*
5031 - * Since we cannot use the interrupt, or call schedule, we will have to busy-wait for controller ready.
5032 - * Executes in interrupt context
5033 - */
5034 -int 
5035 -ISR_cache_is_valid(void)
5036 -{
5037 -       uint32_t rd_data; 
5038 -       unsigned long expired = jiffies + HZ/10000; /* 100 usec, enough for any flash op to complete */
5039 -
5040 -       do {
5041 -               rd_data = ISR_volatileRead(BCM_BASE_ADDRESS+BCHP_HIF_INTR2_CPU_STATUS);
5042 -
5043 -       } while (!(rd_data & HIF_INTR2_CTRL_READY) && time_before(jiffies, expired));
5044 -       return (0 != (rd_data & HIF_INTR2_CTRL_READY)) ;
5045  }
5046  
5047  void ISR_init(void)
5048  {
5049 -       int i, ret;
5050 +       int ret;
5051         uint32_t intrMask;
5052 -       unsigned long flags;
5053  
5054 -       //init_MUTEX(&gEduIsrData.lock); // Write lock
5055 -       spin_lock_init(&gJobQ.lock);            // Read queue lock
5056 +       spin_lock_init(&gEduIsrData.lock);
5057         
5058 -       INIT_LIST_HEAD(&gJobQ.jobQ);
5059 -       INIT_LIST_HEAD(&gJobQ.availList);
5060 -       /* Add all nodes from pool to avail list */
5061 -
5062 -       spin_lock_irqsave(&gJobQ.lock, flags);
5063 -PRINTK("%s: B4\n", __FUNCTION__);
5064 -ISR_print_avail_list();
5065 -       for (i=0; i<MAX_JOB_QUEUE_SIZE;i++) {
5066 -               eduIsrNode_t* e = &gEduIsrPool[i];
5067 -
5068 -               //init_MUTEX(&e->lock);
5069 -               list_add_tail(&e->list, &gJobQ.availList);
5070 -       }
5071 -       spin_unlock_irqrestore(&gJobQ.lock, flags);
5072 -PRINTK("%s: After\n", __FUNCTION__);
5073 -ISR_print_avail_list();
5074 -//BUG();
5075 -
5076         // Mask all L2 interrupts
5077         intrMask = ISR_volatileRead(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_MASK_STATUS);
5078         ISR_volatileWrite(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_MASK_SET, ~intrMask);
5079         BARRIER;
5080  
5081 -       ret = request_irq(BCM_LINUX_CPU_INTR1_IRQ, ISR_isr, SA_SHIRQ, "brcmnand EDU", &gJobQ);
5082 +       ret = request_irq(BCM_LINUX_CPU_INTR1_IRQ, ISR_isr, SA_SHIRQ, "brcmnand EDU", &gEduIsrData);
5083         if (ret) {
5084                 printk(KERN_INFO "%s: request_irq(BCM_LINUX_CPU_INTR1_IRQ) failed ret=%d.  Someone not sharing?\n", 
5085                         __FUNCTION__, ret);
5086         }
5087 +       
5088  }
5089  
5090  
5091 Index: drivers/mtd/brcmnand/eduproto.h
5092 ===================================================================
5093 --- drivers/mtd/brcmnand/eduproto.h     (revision 1)
5094 +++ drivers/mtd/brcmnand/eduproto.h     (working copy)
5095 @@ -77,7 +77,7 @@
5096  
5097  
5098  extern void EDU_init(void);
5099 -extern int EDU_write(volatile const void*, uint32_t, uint32_t*);
5100 +extern int EDU_write(volatile const void*, uint32_t);
5101  extern int EDU_read(volatile void*, uint32_t);
5102  
5103  extern uint32_t EDU_get_error_status_register(void);
5104 Index: drivers/mtd/brcmnand/brcmnand_priv.h
5105 ===================================================================
5106 --- drivers/mtd/brcmnand/brcmnand_priv.h        (revision 1)
5107 +++ drivers/mtd/brcmnand/brcmnand_priv.h        (working copy)
5108 @@ -38,27 +38,13 @@
5109  #include <linux/wait.h>
5110  #include <linux/spinlock.h>
5111  #include <linux/interrupt.h>
5112 -#include <linux/list.h>
5113  
5114  //#include "edu.h"
5115  #endif
5116  
5117 -#define BRCMNAND_CORRECTABLE_ECC_ERROR         (1)
5118 -#define BRCMNAND_SUCCESS                                               (0)
5119 -#define BRCMNAND_UNCORRECTABLE_ECC_ERROR       (-1)
5120 -#define BRCMNAND_FLASH_STATUS_ERROR                    (-2)
5121 -#define BRCMNAND_TIMED_OUT                                     (-3)
5122 -
5123 -#ifdef CONFIG_MTD_BRCMNAND_EDU
5124 -#define BRCMEDU_CORRECTABLE_ECC_ERROR          (4)
5125 -#define BRCMEDU_UNCORRECTABLE_ECC_ERROR      (-4)
5126 -
5127 -#define  BRCMEDU_MEM_BUS_ERROR                         (-5)
5128 -
5129 -
5130 +#if defined( CONFIG_MTD_BRCMNAND_EDU )
5131  #define BRCMNAND_malloc(size) kmalloc(size, GFP_DMA)
5132  #define BRCMNAND_free(addr) kfree(addr)
5133 -
5134  #else
5135  #define BRCMNAND_malloc(size) vmalloc(size)
5136  #define BRCMNAND_free(addr) vfree(addr)
5137 @@ -77,125 +63,31 @@
5138                                      "nop; nop; nop; nop; nop; nop;\n\t" \
5139                                      ".set reorder\n\t")
5140  
5141 -/* 
5142 - * Right now we submit a full page Read for queueing, so with a 8KB page,
5143 - * and an ECC step of 512B, the queue depth is 16. Add 2 for dummy elements
5144 - * during EDU WAR
5145 - */
5146 -#if CONFIG_MTD_BRCMNAND_VERSION <=  CONFIG_MTD_BRCMNAND_VERS_3_3
5147 -#define MAX_NAND_PAGE_SIZE     (4<<10)
5148  
5149 -#else
5150 -#define MAX_NAND_PAGE_SIZE     (8<<10)
5151 -#endif
5152 +typedef struct eduIsrData {
5153 +       spinlock_t lock; // For SMP and future double buffering on Read.
5154 +       int cmd;        // 1 == Read, 0 == Write
5155  
5156 -/* Max queue size is (PageSize/512B_ECCSize)+2 spare for WAR */
5157 -#define MAX_JOB_QUEUE_SIZE     ((MAX_NAND_PAGE_SIZE>>9))
5158 -
5159 -typedef enum {
5160 -       ISR_OP_QUEUED = 0, 
5161 -       ISR_OP_SUBMITTED = 1, 
5162 -       ISR_OP_NEED_WAR = 2,
5163 -       ISR_OP_COMPLETED = 3, 
5164 -       ISR_OP_TIMEDOUT = 4
5165 -} isrOpStatus_t;
5166 -
5167 -typedef struct eduIsrNode {
5168 -       struct list_head list;
5169 -       spinlock_t lock; // per Node update lock
5170 -       // int cmd;     // 1 == Read, 0 == Write
5171 -
5172 -       // ISR stuffs
5173         uint32_t mask;  /* Clear status mask */
5174         uint32_t expect;        /* Status on success */
5175         uint32_t error; /* Status on error */
5176         uint32_t intr;          /* Interrupt bits */
5177         uint32_t status;        /* Status read during ISR.  There may be several interrupts before completion */
5178 -       isrOpStatus_t opComplete;       /* Completion status */
5179 +       int opComplete; /* Completion criterium */
5180  
5181 -       /* Controller Level params (for queueing)  */
5182 -       struct mtd_info* mtd;
5183 -       void*   buffer;
5184 -       u_char*         oobarea;
5185 -       loff_t  offset;
5186 -       int             ret;
5187 -       int             needBBT;
5188 +       /* For debugging only */
5189 +       uint32_t flashAddr;
5190 +       uint32_t dramAddr;
5191 +} eduIsrData_t;
5192  
5193 -       /* EDU level params (for ISR) */
5194 -       uint32_t edu_ldw;
5195 -       uint32_t physAddr;
5196 -       uint32_t hif_intr2;
5197 -       uint32_t edu_status;
5198 +extern eduIsrData_t gEduIsrData;
5199  
5200 -       int refCount;           /* Marked for re-use when refCount=0 */
5201 -       unsigned long expired; /* Time stamp for expiration, 3 secs from submission */
5202 -} eduIsrNode_t;
5203 -
5204 -/*
5205 - * Read/Write Job Q.
5206 - * Process one page at a time, and queue 512B sector Read or Write EDU jobs.
5207 - * ISR will wake up the process context thread iff
5208 - * 1-EDU reports an error, in which case the process context thread need to be awaken
5209 - *             in order to do WAR
5210 - * 2-Q is empty, in which case the page read/write op is complete.
5211 - */
5212 -typedef struct jobQ_t {
5213 -       struct list_head        jobQ;           /* Nodes queued for EDU jobs */
5214 -       struct list_head        availList;      /* Free Nodes */
5215 -       spinlock_t              lock;           /* Queues guarding spin lock */
5216 -       int                             needWakeUp;     /* Wake up Process context thread to do EDU WAR */
5217 -       int                             cmd;            /* 1 == Read, 0 == Write */
5218 -} isrJobQ_t;
5219 -
5220 -extern isrJobQ_t gJobQ; 
5221 -
5222  void ISR_init(void);
5223  
5224 -/*
5225 - * Submit the first entry that is in queued state,
5226 - * assuming queue lock has been held by caller.
5227 - * 
5228 - * @doubleBuffering indicates whether we need to submit just 1 job or until EDU is full (double buffering)
5229 - * Return the number of job submitted for read.
5230 - *
5231 - * In current version (v3.3 controller), since EDU only have 1 register for EDU_ERR_STATUS,
5232 - * we can't really do double-buffering without losing the returned status of the previous read-op.
5233 - */
5234 -#undef EDU_DOUBLE_BUFFER_READ
5235 -
5236 -int brcmnand_isr_submit_job(void);
5237 -
5238 -eduIsrNode_t*  ISR_queue_read_request(struct mtd_info *mtd,
5239 -        void* buffer, u_char* oobarea, loff_t offset);
5240 -eduIsrNode_t* ISR_queue_write_request(struct mtd_info *mtd,
5241 -        const void* buffer, const u_char* oobarea, loff_t offset);
5242 -eduIsrNode_t*  ISR_push_request(struct mtd_info *mtd,
5243 -        void* buffer, u_char* oobarea, loff_t offset);
5244 -
5245 -
5246 -int brcmnand_edu_read_completion(struct mtd_info* mtd, 
5247 -        void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status);
5248 -
5249 -int brcmnand_edu_read_comp_intr(struct mtd_info* mtd, 
5250 -        void* buffer, u_char* oobarea, loff_t offset, uint32_t intr_status);
5251 -
5252 -#ifdef CONFIG_MTD_BRCMNAND_ISR_QUEUE
5253 -int brcmnand_edu_write_completion(struct mtd_info *mtd,
5254 -        const void* buffer, const u_char* oobarea, loff_t offset, uint32_t intr_status, 
5255 -        int needBBT);
5256 -#endif
5257 -eduIsrNode_t* ISR_find_request( isrOpStatus_t opStatus);
5258 -
5259  uint32_t ISR_wait_for_completion(void);
5260 +uint32_t ISR_cache_is_valid(uint32_t clearMask);
5261  
5262 -/*
5263 - *  wait for completion with read/write Queue
5264 - */
5265 -int ISR_wait_for_queue_completion(void);
5266 -
5267 -int ISR_cache_is_valid(void);
5268 -
5269 -static __inline__ uint32_t ISR_volatileRead(uint32_t addr)
5270 +static inline uint32_t ISR_volatileRead(uint32_t addr)
5271  {
5272          volatile uint32_t* pAddr;
5273          
5274 @@ -204,7 +96,7 @@
5275          return *(uint32_t *)pAddr;
5276  }
5277  
5278 -static __inline__ void ISR_volatileWrite(uint32_t addr, uint32_t data)
5279 +static inline void ISR_volatileWrite(uint32_t addr, uint32_t data)
5280  {
5281          volatile uint32_t* pAddr;
5282  
5283 @@ -212,7 +104,7 @@
5284          *pAddr = (volatile uint32_t)data;
5285  }
5286  
5287 -static __inline__ void ISR_enable_irq(eduIsrNode_t* req)
5288 +static inline void ISR_enable_irq(void)
5289  {
5290         uint32_t intrMask; 
5291         //unsigned long flags;
5292 @@ -220,68 +112,42 @@
5293         //spin_lock_irqsave(&gEduIsrData.lock, flags);
5294         
5295         // Clear status bits
5296 -       ISR_volatileWrite(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_CLEAR, req->mask);
5297 +       ISR_volatileWrite(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_CLEAR, gEduIsrData.mask);
5298  
5299 +#if 0
5300 +       // Disable everything that may screw us up
5301 +       intrMask = EDU_volatileRead(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_MASK_STATUS);
5302 +       EDU_volatileWrite(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_MASK_SET, ~intrMask);
5303 +PRINTK("%s-1: intrMask=%08x\n", __FUNCTION__, intrMask);
5304 +
5305 +       BARRIER;
5306 +#endif
5307 +
5308         // Enable interrupt
5309 -       ISR_volatileWrite(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_MASK_CLEAR, req->intr);
5310 +       ISR_volatileWrite(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_MASK_CLEAR, gEduIsrData.intr);
5311  
5312 +#if 0  
5313 +intrMask = EDU_volatileRead(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_MASK_STATUS);
5314 +PRINTK("%s-2: intrMask=%08x\n", __FUNCTION__, intrMask);
5315 +#endif
5316         //spin_unlock_irqrestore(&gEduIsrData.lock, flags);
5317  }
5318  
5319 -static __inline__ void ISR_disable_irq(uint32_t mask)
5320 +static inline void ISR_disable_irq(uint32_t mask)
5321  {
5322  
5323         /* Disable L2 interrupts */
5324         ISR_volatileWrite(BCM_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_MASK_SET, mask);
5325  
5326 +       /* Clear L2 interrupts */
5327 +       //EDU_volatileWrite(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_CLEAR, mask);
5328  }
5329  
5330 +#endif
5331  
5332 -/*
5333 - * For debugging
5334 - */
5335  
5336 -#ifdef DEBUG_ISR
5337  
5338 -static void __inline__
5339 -ISR_print_queue(void)
5340 -{
5341 -       eduIsrNode_t* req;
5342 -       //struct list_head* node;
5343 -       int i = 0;
5344  
5345 -       list_for_each_entry(req, &gJobQ.jobQ, list) {
5346 -               
5347 -               printk("i=%d, cmd=%d, offset=%08llx, flashAddr=%08x, opComp=%d, status=%08x\n",
5348 -                       i, gJobQ.cmd, req->offset, req->edu_ldw,req->opComplete, req->status);
5349 -               i++;
5350 -       }       
5351 -}
5352 -
5353 -static void __inline__
5354 -ISR_print_avail_list(void)
5355 -{
5356 -       eduIsrNode_t* req;
5357 -       //struct list_head* node;
5358 -       int i = 0;
5359 -
5360 -       printk("AvailList=%p, next=%p\n", &gJobQ.availList, gJobQ.availList.next);
5361 -       list_for_each_entry(req, &gJobQ.availList, list) {
5362 -               printk("i=%d, req=%p, list=%p\n", i, req, &req->list);
5363 -               i++;
5364 -       }       
5365 -}
5366 -#else
5367 -#define IS_print_queue()
5368 -#define ISR_print_avail_list()
5369 -#endif // DEBUG_ISR
5370 -
5371 -
5372 -#endif // CONFIG_MTD_BRCMNAND_USE_ISR
5373 -
5374 -
5375 -
5376 -
5377  /**
5378   * brcmnand_scan - [BrcmNAND Interface] Scan for the BrcmNAND device
5379   * @param mtd          MTD device structure
5380 Index: drivers/mtd/brcmnand/edu.c
5381 ===================================================================
5382 --- drivers/mtd/brcmnand/edu.c  (revision 1)
5383 +++ drivers/mtd/brcmnand/edu.c  (working copy)
5384 @@ -37,7 +37,6 @@
5385  
5386  
5387  #include <linux/mm.h>
5388 -#include <linux/dma-mapping.h>
5389  #include <asm/page.h>
5390  
5391  
5392 @@ -134,11 +133,11 @@
5393   * Returns 1 if OK
5394   *             0 otherwise
5395   */
5396 -int EDU_buffer_OK(volatile void* vaddr, int command)
5397 +int EDU_buffer_OK(volatile void* vaddr)
5398  {
5399         unsigned long addr = (unsigned long) vaddr;
5400  
5401 -#if !defined(CONFIG_MIPS_BCM7440) && !defined(CONFIG_MIPS_BCM7601) && !defined(CONFIG_MIPS_BCM7635)
5402 +#if !defined(CONFIG_MIPS_BCM7440) && !defined(CONFIG_MIPS_BCM7601)
5403  // Requires 32byte alignment only of platforms other than 7440 and 7601 (and Dune)
5404         if (addr & 0x1f) {
5405                 // Must be 32-byte-aligned
5406 @@ -155,14 +154,11 @@
5407                 return 0;
5408         }
5409  #endif
5410 -
5411         else if (!(addr & KSEG0)) { 
5412                 // User Space
5413                 return 0;
5414         }
5415  
5416 -       
5417 -
5418         // TBD: Since we only enable block for MEM0, we should make sure that the physical
5419         // address falls in MEM0.
5420         
5421 @@ -170,13 +166,6 @@
5422                 // VM Address
5423                 return 0;
5424         }
5425 -
5426 -#if 0 //def CONFIG_MIPS_BCM7420
5427 -       else if (command == EDU_WRITE && (addr & 0xff)) { // Write must be aligned on 256B
5428 -printk("Write must be aligned on 128B (addr=%08x)\n", addr);
5429 -               return 0;
5430 -       }
5431 -#endif
5432         return 1;
5433  }
5434  
5435 @@ -518,10 +507,6 @@
5436   * Read data on success or error.
5437   */
5438  
5439 -extern void 
5440 -dump_nand_regs(struct brcmnand_chip* chip, loff_t offset, uint32_t pa, int which);
5441 -#define MAX_DUMPS 10
5442 -extern int numDumps;
5443  
5444  uint32_t EDU_poll(uint32_t address, uint32_t expect, uint32_t error, uint32_t mask)
5445  {
5446 @@ -535,11 +520,6 @@
5447         address, expect, mask, error);
5448          __sync();
5449          rd_data = EDU_volatileRead(address);
5450 -if (numDumps < MAX_DUMPS)
5451 - {
5452 - dump_nand_regs(NULL, 0, 0, numDumps++);
5453 - }
5454 -   
5455  //edu_debug = 0;
5456           
5457          timeout = jiffies + msecs_to_jiffies(1000); // 3 sec timeout for now (testing)
5458 @@ -548,23 +528,18 @@
5459  //      while ((rd_data & mask) != (expect & mask)) /* && (i<cnt) */
5460          while (((rd_data & mask) != (expect & mask)) && !((rd_data & mask) & error))
5461          {
5462 -
5463                    if ( 0 /*(i %1000000) == 1 */) 
5464                            {PRINTK("Polling addr=%08x, expect=%08x, mask=%08x!\n", address, expect, mask);
5465                             PRINTK("EDU_poll read: %08x\n", rd_data);}
5466                           
5467                  //__sync(); //PLATFORM_IOFLUSH_WAR();
5468                  rd_data = EDU_volatileRead(address);
5469 -
5470 -             // JR+ 2008-02-01 Allow other tasks to run while waiting
5471 +                
5472 +                // JR+ 2008-02-01 Allow other tasks to run while waiting
5473                  //cond_resched();
5474                  cond_resched();
5475                  // JR- 2008-02-01 Allow other tasks to run while waiting
5476 -if (numDumps < MAX_DUMPS)
5477 - {
5478 - dump_nand_regs(NULL, 0, 0, numDumps++);
5479 - }
5480 -                   
5481 +                
5482                  i++;
5483                  if(!time_before(jiffies, timeout))
5484                  {
5485 @@ -662,7 +637,7 @@
5486           // SUN_GISB_ARB_TIMER = 0x10000
5487          EDU_volatileWrite(0xb040600c, 0x00010000);
5488  
5489 -#elif defined( CONFIG_MIPS_BCM7601 ) || defined( CONFIG_MIPS_BCM7635 )
5490 +#elif defined( CONFIG_MIPS_BCM7601 )
5491         {
5492  #define ENABLE_256MB_GISB_WINDOW 0x1
5493                 volatile unsigned long* PCI_GEN_GISB_WINDOW_SIZE = 
5494 @@ -691,7 +666,7 @@
5495         }
5496  
5497  #elif defined( CONFIG_MIPS_BCM7420 )
5498 -       // Make sure that RTS grants some cycle to EDU, or we have to steal some from RR
5499 +       // Make sure that RTS grant some cycle to EDU, or we have to steal some
5500         {
5501  #define BLOCKED_OUT 0x001fff00
5502  #define RR_ENABLED     0x80   /* Bit 7 */
5503 @@ -708,29 +683,6 @@
5504                 volatile unsigned long* PCI_GEN_PCI_CTRL = 
5505                         (volatile unsigned long*) KSEG1ADDR(0x10440104);
5506                 volatile unsigned long pci_gen_pci_ctrl;
5507 -
5508 -#if 0 // Block out MoCA
5509 -               volatile unsigned long* MEMC_0_1_CLIENT_INFO_59= 
5510 -                       (volatile unsigned long*) KSEG1ADDR(0x103b10f0);
5511 -               volatile unsigned long memc_client_59;
5512 -               volatile unsigned long* MEMC_0_1_CLIENT_INFO_62= 
5513 -                       (volatile unsigned long*) KSEG1ADDR(0x103b10fc);
5514 -               volatile unsigned long memc_client_62;
5515 -
5516 -               /* Bits 08-20 are all 1 == Blocked */
5517 -               memc_client_59 = *MEMC_0_1_CLIENT_INFO_59;
5518 -               printk("MEMC_0_1_CLIENT_INFO_59 Before=%08lx\n", memc_client_59);
5519 -               *MEMC_0_1_CLIENT_INFO_59 = memc_client_59|0x001fff00;
5520 -               *MEMC_0_1_CLIENT_INFO_59 &= ~RR_ENABLED;
5521 -               printk("MEMC_0_1_CLIENT_INFO_59 After blocked out=%08lx\n", *MEMC_0_1_CLIENT_INFO_59);
5522 -
5523 -               memc_client_62 = *MEMC_0_1_CLIENT_INFO_62;
5524 -               printk("MEMC_0_1_CLIENT_INFO_62 Before=%08lx\n", memc_client_62);
5525 -               *MEMC_0_1_CLIENT_INFO_62 = memc_client_62|0x001fff00;
5526 -               *MEMC_0_1_CLIENT_INFO_62 &= ~RR_ENABLED;
5527 -               printk("MEMC_0_1_CLIENT_INFO_62 After blocked out=%08lx\n", *MEMC_0_1_CLIENT_INFO_62);
5528 -               
5529 -#endif
5530          
5531                 /* Bits 08-20 are all 1 == Blocked */
5532                 memc_client_17 = *MEMC_0_1_CLIENT_INFO_17;
5533 @@ -753,7 +705,6 @@
5534                   pci_gen_pci_ctrl = *PCI_GEN_PCI_CTRL;
5535                   pci_gen_pci_ctrl &= ~PARK_ON_MASK;
5536                   pci_gen_pci_ctrl |= PARK_ON_EBI;
5537 -                 EDU_volatileWrite(PCI_GEN_PCI_CTRL, pci_gen_pci_ctrl);
5538         }
5539  #endif
5540  
5541 @@ -771,50 +722,37 @@
5542  //edu_debug = 0;
5543  }
5544  
5545 -#ifndef CONFIG_MTD_BRCMNAND_ISR_QUEUE // batch mode
5546 -
5547  /*
5548   * THT: 07/31/08: This does not work.  One has to write the 512B Array from the NAND controller into 
5549   * the EXT registers for it to work.  Will fix it when I come back.
5550   */
5551 -int EDU_write(volatile const void* virtual_addr_buffer, 
5552 -       uint32_t external_physical_device_address,
5553 -       uint32_t* physAddr)
5554 +int EDU_write(volatile const void* virtual_addr_buffer, uint32_t external_physical_device_address)
5555  {
5556 -       //uint32_t  phys_mem;
5557 +       uint32_t  phys_mem;
5558         // uint32_t  rd_data;
5559 -       //unsigned long flags;
5560 +       unsigned long flags;
5561  
5562  edu_debug = gdebug;
5563 -
5564 -#if 0
5565         phys_mem = EDU_virt_to_phys((void *)virtual_addr_buffer);
5566 -
5567 -#else
5568 -       // EDU is not a PCI device
5569 -       // THT: TBD: Need to adjust for cache line size here, especially on 7420.
5570 -       *physAddr = dma_map_single(NULL, virtual_addr_buffer, EDU_LENGTH_VALUE, DMA_TO_DEVICE);
5571 -#endif
5572 -
5573 -       if (!(*physAddr)) {
5574 +       if (!phys_mem) {
5575                 return (-1);
5576         }
5577  
5578  //edu_debug = 4;
5579         
5580 -//printk("EDU_write: vBuff: %p physDev: %08x, PA=%08x\n", 
5581 -//     virtual_addr_buffer, external_physical_device_address, phys_mem);
5582 +//PRINTK("EDU_write: vBuff: %p physDev: %08x, PA=%08x\n", 
5583 +//virtual_addr_buffer, external_physical_device_address, phys_mem);
5584  
5585  #ifdef CONFIG_MTD_BRCMNAND_USE_ISR
5586 -       down(&gEduIsrData.lock);
5587 -       gEduIsrData.edu_ldw = external_physical_device_address;
5588 -       gEduIsrData.physAddr = *physAddr;
5589 +       spin_lock_irqsave(&gEduIsrData.lock, flags);
5590 +       gEduIsrData.flashAddr = external_physical_device_address;
5591 +       gEduIsrData.dramAddr = phys_mem;
5592         
5593         /*
5594          * Enable L2 Interrupt
5595          */
5596         gEduIsrData.cmd = EDU_WRITE;
5597 -       gEduIsrData.opComplete = ISR_OP_SUBMITTED;
5598 +       gEduIsrData.opComplete = 0;
5599         gEduIsrData.status = 0;
5600         
5601         /* On write we wait for both DMA done|error and Flash Status */
5602 @@ -823,8 +761,8 @@
5603         gEduIsrData.error = HIF_INTR2_EDU_ERR;
5604         gEduIsrData.intr = HIF_INTR2_EDU_DONE_MASK|HIF_INTR2_CTRL_READY;
5605  
5606 -       up(&gEduIsrData.lock);
5607 -       ISR_enable_irq(&gEduIsrData);
5608 +       spin_unlock_irqrestore(&gEduIsrData.lock, flags);
5609 +       ISR_enable_irq();
5610  
5611  #else
5612         EDU_volatileWrite(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_CLEAR, HIF_INTR2_EDU_CLEAR_MASK);
5613 @@ -838,17 +776,15 @@
5614  
5615         //EDU_waitForNoPendingAndActiveBit();
5616  
5617 -//     Already covered by dma_map_single()
5618 -//     dma_cache_wback((unsigned long) virtual_addr_buffer, EDU_LENGTH_VALUE);
5619  
5620 -       EDU_issue_command(*physAddr, external_physical_device_address, EDU_WRITE); /* 1: Is a Read, 0 Is a Write */
5621 +       dma_cache_wback((unsigned long) virtual_addr_buffer, 512);
5622  
5623 +       EDU_issue_command(phys_mem, external_physical_device_address, EDU_WRITE); /* 1: Is a Read, 0 Is a Write */
5624 +
5625  //      rd_data = EDU_poll(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_STATUS, HIF_INTR2_EDU_DONE, HIF_INTR2_EDU_DONE);
5626  //      EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_DONE, 0x00000000);
5627  
5628  //edu_debug = 0;
5629 -//printk("<-- %s\n", __FUNCTION__);
5630 -
5631         return 0;
5632  }
5633  
5634 @@ -863,7 +799,7 @@
5635         // uint32_t  rd_data;
5636         int ret;
5637         int retries = 4;
5638 -       //unsigned long flags;
5639 +       unsigned long flags;
5640                 
5641  
5642  static int toggle;
5643 @@ -877,31 +813,33 @@
5644  #endif
5645  
5646  //PRINTK("--> %s: vAddr=%p, ext=%08x\n", __FUNCTION__, virtual_addr_buffer, external_physical_device_address);
5647 -#if 0
5648         phys_mem = EDU_virt_to_phys((void *)virtual_addr_buffer);
5649         if (!phys_mem) {
5650                 return (-1);
5651         }
5652 -#else
5653 -       // THT: TBD: Need to adjust for cache line size here, especially on 7420.
5654 -       phys_mem = dma_map_single(NULL, virtual_addr_buffer, EDU_LENGTH_VALUE, DMA_FROM_DEVICE);
5655 -#endif
5656  
5657  if (edu_debug) PRINTK("EDU_read: vBuff: %p physDev: %08x, PA=%08x\n", 
5658  virtual_addr_buffer, external_physical_device_address, phys_mem);
5659  
5660   #ifdef CONFIG_MTD_BRCMNAND_USE_ISR
5661 -       down(&gEduIsrData.lock);
5662 -       gEduIsrData.edu_ldw = external_physical_device_address;
5663 -       gEduIsrData.physAddr = phys_mem;
5664 +       spin_lock_irqsave(&gEduIsrData.lock, flags);
5665 +       gEduIsrData.flashAddr = external_physical_device_address;
5666 +       gEduIsrData.dramAddr = phys_mem;
5667         
5668         /*
5669          * Enable L2 Interrupt
5670          */
5671         gEduIsrData.cmd = EDU_READ;
5672 -       gEduIsrData.opComplete = ISR_OP_SUBMITTED;
5673 +       gEduIsrData.opComplete = 0;
5674         gEduIsrData.status = 0;
5675  
5676 +#if 0
5677 +       /* On Read we only wait for DMA completion or Error */
5678 +       gEduIsrData.mask = HIF_INTR2_EDU_CLEAR_MASK|HIF_INTR2_CTRL_READY;
5679 +       gEduIsrData.expect = HIF_INTR2_EDU_DONE;
5680 +       gEduIsrData.error = HIF_INTR2_EDU_ERR;
5681 +       gEduIsrData.intr = HIF_INTR2_EDU_DONE_MASK;
5682 +#endif
5683  
5684         // We must also wait for Ctlr_Ready, otherwise the OOB is not correct, since we read the OOB bytes off the controller
5685  
5686 @@ -910,9 +848,9 @@
5687         // On error we also want Ctrlr-Ready because for COR ERR, the Hamming WAR depends on the OOB bytes.
5688         gEduIsrData.error = HIF_INTR2_EDU_ERR;
5689         gEduIsrData.intr = HIF_INTR2_EDU_DONE_MASK;
5690 -       up(&gEduIsrData.lock);
5691 +       spin_unlock_irqrestore(&gEduIsrData.lock, flags);
5692         
5693 -       ISR_enable_irq(&gEduIsrData);
5694 +       ISR_enable_irq();
5695  #else
5696  
5697          EDU_volatileWrite(EDU_BASE_ADDRESS  + BCHP_HIF_INTR2_CPU_CLEAR, HIF_INTR2_EDU_CLEAR_MASK);
5698 @@ -928,7 +866,29 @@
5699          //EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_DONE, 0x00000000);
5700          EDU_reset_done();
5701  
5702 +#if 0
5703 +        if( (EDU_volatileRead(EDU_BASE_ADDRESS  + EDU_DONE) && 0x00000003) != 0)
5704 +        {
5705 +                PRINTK("EDU_DONE != 0!!!\n");
5706 +        }
5707 +#endif
5708          EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_ERR_STATUS, 0x00000000);
5709 +#if 0
5710 +        if( EDU_volatileRead(EDU_BASE_ADDRESS  + EDU_ERR_STATUS) != 0)
5711 +        {
5712 +                PRINTK("EDU_ERR_STATUS != 0!!!\n");
5713 +        }
5714 +
5715 +#endif
5716 +#if 1 //ndef CONFIG_BMIPS4380
5717 +        dma_cache_inv((unsigned long) virtual_addr_buffer, EDU_LENGTH_VALUE);
5718 +#else
5719 +       {
5720 +               extern void (*flush_cache_all)(void);
5721 +
5722 +               flush_cache_all();
5723 +       }
5724 +#endif
5725          
5726          EDU_volatileWrite(EDU_BASE_ADDRESS  + EDU_LENGTH, EDU_LENGTH_VALUE);
5727  
5728 @@ -956,13 +916,8 @@
5729                 HIF_INTR2_EDU_DONE_MASK);
5730  #endif
5731  
5732 -       (void) dma_unmap_single(NULL, phys_mem, EDU_LENGTH_VALUE, DMA_FROM_DEVICE);
5733 -
5734  if (edu_debug) PRINTK("<-- %s ret=%08x\n", __FUNCTION__, ret);
5735  //edu_debug = 0;
5736  if (edu_debug > 3 && ret) {show_stack(current,NULL);dump_stack();}
5737          return ret;
5738  } 
5739 -
5740 -#endif // Batch mode
5741 -