]> rtime.felk.cvut.cz Git - mcf548x/linux.git/blob - drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c
Initial 2.6.37
[mcf548x/linux.git] / drivers / staging / westbridge / astoria / arch / arm / mach-omap2 / cyashalomap_kernel.c
1 /* Cypress WestBridge OMAP3430 Kernel Hal source file (cyashalomap_kernel.c)
2 ## ===========================
3 ## Copyright (C) 2010  Cypress Semiconductor
4 ##
5 ## This program is free software; you can redistribute it and/or
6 ## modify it under the terms of the GNU General Public License
7 ## as published by the Free Software Foundation; either version 2
8 ## of the License, or (at your option) any later version.
9 ##
10 ## This program is distributed in the hope that it will be useful,
11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 ## GNU General Public License for more details.
14 ##
15 ## You should have received a copy of the GNU General Public License
16 ## along with this program; if not, write to the Free Software
17 ## Foundation, Inc., 51 Franklin Street, Fifth Floor,
18 ## Boston, MA  02110-1301, USA.
19 ## ===========================
20 */
21
22 #ifdef CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL
23
24 #include <linux/fs.h>
25 #include <linux/ioport.h>
26 #include <linux/timer.h>
27 #include <linux/gpio.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/scatterlist.h>
31 #include <linux/mm.h>
32 #include <linux/irq.h>
33 #include <linux/slab.h>
34 #include <linux/sched.h>
35 /* include seems broken moving for patch submission
36  * #include <mach/mux.h>
37  * #include <mach/gpmc.h>
38  * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h>
39  * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h>
40  * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h>
41  * #include <linux/westbridge/cyaserr.h>
42  * #include <linux/westbridge/cyasregs.h>
43  * #include <linux/westbridge/cyasdma.h>
44  * #include <linux/westbridge/cyasintr.h>
45  */
46 #include <linux/../../arch/arm/plat-omap/include/plat/mux.h>
47 #include <linux/../../arch/arm/plat-omap/include/plat/gpmc.h>
48 #include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h"
49 #include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h"
50 #include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h"
51 #include "../../../include/linux/westbridge/cyaserr.h"
52 #include "../../../include/linux/westbridge/cyasregs.h"
53 #include "../../../include/linux/westbridge/cyasdma.h"
54 #include "../../../include/linux/westbridge/cyasintr.h"
55
56 #define HAL_REV "1.1.0"
57
58 /*
59  * uncomment to enable 16bit pnand interface
60  */
61 #define PNAND_16BIT_MODE
62
63 /*
64  * selects one of 3 versions of pnand_lbd_read()
65  * PNAND_LBD_READ_NO_PFE - original 8/16 bit code
66  *    reads through the gpmc CONTROLLER REGISTERS
67  * ENABLE_GPMC_PF_ENGINE - USES GPMC PFE FIFO reads, in 8 bit mode,
68  *     same speed as the above
69  * PFE_LBD_READ_V2 - slightly diffrenet, performance same as above
70  */
71 #define PNAND_LBD_READ_NO_PFE
72 /* #define ENABLE_GPMC_PF_ENGINE */
73 /* #define  PFE_LBD_READ_V2 */
74
75 /*
76  * westbrige astoria ISR options to limit number of
77  * back to back DMA transfers per ISR interrupt
78  */
79 #define MAX_DRQ_LOOPS_IN_ISR 4
80
81 /*
82  * debug prints enabling
83  *#define DBGPRN_ENABLED
84  *#define DBGPRN_DMA_SETUP_RD
85  *#define DBGPRN_DMA_SETUP_WR
86  */
87
88
89 /*
90  * For performance reasons, we handle storage endpoint transfers upto 4 KB
91  * within the HAL itself.
92  */
93  #define CYASSTORAGE_WRITE_EP_NUM       (4)
94  #define CYASSTORAGE_READ_EP_NUM        (8)
95
96 /*
97  *  size of DMA packet HAL can accept from Storage API
98  *  HAL will fragment it into smaller chunks that the P port can accept
99  */
100 #define CYASSTORAGE_MAX_XFER_SIZE       (2*32768)
101
102 /*
103  *  P port MAX DMA packet size according to interface/ep configurartion
104  */
105 #define HAL_DMA_PKT_SZ 512
106
107 #define is_storage_e_p(ep) (((ep) == 2) || ((ep) == 4) || \
108                                 ((ep) == 6) || ((ep) == 8))
109
110 /*
111  * persistant, stores current GPMC interface cfg mode
112  */
113 static uint8_t pnand_16bit;
114
115 /*
116  * keep processing new WB DRQ in ISR untill all handled (performance feature)
117  */
118 #define PROCESS_MULTIPLE_DRQ_IN_ISR (1)
119
120
121 /*
122  * ASTORIA PNAND IF COMMANDS, CASDO - READ, CASDI - WRITE
123  */
124 #define CASDO 0x05
125 #define CASDI 0x85
126 #define RDPAGE_B1   0x00
127 #define RDPAGE_B2   0x30
128 #define PGMPAGE_B1  0x80
129 #define PGMPAGE_B2  0x10
130
131 /*
132  * The type of DMA operation, per endpoint
133  */
134 typedef enum cy_as_hal_dma_type {
135         cy_as_hal_read,
136         cy_as_hal_write,
137         cy_as_hal_none
138 } cy_as_hal_dma_type;
139
140
141 /*
142  * SG list halpers defined in scaterlist.h
143 #define sg_is_chain(sg)         ((sg)->page_link & 0x01)
144 #define sg_is_last(sg)          ((sg)->page_link & 0x02)
145 #define sg_chain_ptr(sg)        \
146         ((struct scatterlist *) ((sg)->page_link & ~0x03))
147 */
148 typedef struct cy_as_hal_endpoint_dma {
149         cy_bool buffer_valid;
150         uint8_t *data_p;
151         uint32_t size;
152         /*
153          * sg_list_enabled - if true use, r/w DMA transfers use sg list,
154          *              FALSE use pointer to a buffer
155          * sg_p - pointer to the owner's sg list, of there is such
156          *              (like blockdriver)
157          * dma_xfer_sz - size of the next dma xfer on P port
158          * seg_xfer_cnt -  counts xfered bytes for in current sg_list
159          *              memory segment
160          * req_xfer_cnt - total number of bytes transfered so far in
161          *              current request
162          * req_length - total request length
163          */
164         bool sg_list_enabled;
165         struct scatterlist *sg_p;
166         uint16_t dma_xfer_sz;
167         uint32_t seg_xfer_cnt;
168         uint16_t req_xfer_cnt;
169         uint16_t req_length;
170         cy_as_hal_dma_type type;
171         cy_bool pending;
172 } cy_as_hal_endpoint_dma;
173
174 /*
175  * The list of OMAP devices (should be one)
176  */
177 static cy_as_omap_dev_kernel *m_omap_list_p;
178
179 /*
180  * The callback to call after DMA operations are complete
181  */
182 static cy_as_hal_dma_complete_callback callback;
183
184 /*
185  * Pending data size for the endpoints
186  */
187 static cy_as_hal_endpoint_dma end_points[16];
188
189 /*
190  * Forward declaration
191  */
192 static void cy_handle_d_r_q_interrupt(cy_as_omap_dev_kernel *dev_p);
193
194 static uint16_t intr_sequence_num;
195 static uint8_t intr__enable;
196 spinlock_t int_lock;
197
198 static u32 iomux_vma;
199 static u32 csa_phy;
200
201 /*
202  * gpmc I/O registers VMA
203  */
204 static u32 gpmc_base;
205
206 /*
207  * gpmc data VMA associated with CS4 (ASTORIA CS on GPMC)
208  */
209 static u32 gpmc_data_vma;
210 static u32 ndata_reg_vma;
211 static u32 ncmd_reg_vma;
212 static u32 naddr_reg_vma;
213
214 /*
215  * fwd declarations
216  */
217 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff);
218 static void p_nand_lbd_write(u16 col_addr, u32 row_addr, u16 count, void *buff);
219 static inline u16 __attribute__((always_inline))
220                         ast_p_nand_casdo_read(u8 reg_addr8);
221 static inline void __attribute__((always_inline))
222                         ast_p_nand_casdi_write(u8 reg_addr8, u16 data);
223
224 /*
225  * prints given number of omap registers
226  */
227 static void cy_as_hal_print_omap_regs(char *name_prefix,
228                                 u8 name_base, u32 virt_base, u16 count)
229 {
230         u32 reg_val, reg_addr;
231         u16 i;
232         cy_as_hal_print_message(KERN_INFO "\n");
233         for (i = 0; i < count; i++) {
234
235                 reg_addr = virt_base + (i*4);
236                 /* use virtual addresses here*/
237                 reg_val = __raw_readl(reg_addr);
238                 cy_as_hal_print_message(KERN_INFO "%s_%d[%8.8x]=%8.8x\n",
239                                                 name_prefix, name_base+i,
240                                                 reg_addr, reg_val);
241         }
242 }
243
244 /*
245  * setMUX function for a pad + additional pad flags
246  */
247 static u16 omap_cfg_reg_L(u32 pad_func_index)
248 {
249         static u8 sanity_check = 1;
250
251         u32 reg_vma;
252         u16 cur_val, wr_val, rdback_val;
253
254         /*
255          * do sanity check on the omap_mux_pin_cfg[] table
256          */
257         cy_as_hal_print_message(KERN_INFO" OMAP pins user_pad cfg ");
258         if (sanity_check) {
259                 if ((omap_mux_pin_cfg[END_OF_TABLE].name[0] == 'E') &&
260                         (omap_mux_pin_cfg[END_OF_TABLE].name[1] == 'N') &&
261                         (omap_mux_pin_cfg[END_OF_TABLE].name[2] == 'D')) {
262
263                         cy_as_hal_print_message(KERN_INFO
264                                         "table is good.\n");
265                 } else {
266                         cy_as_hal_print_message(KERN_WARNING
267                                         "table is bad, fix it");
268                 }
269                 /*
270                  * do it only once
271                  */
272                 sanity_check = 0;
273         }
274
275         /*
276          * get virtual address to the PADCNF_REG
277          */
278         reg_vma = (u32)iomux_vma + omap_mux_pin_cfg[pad_func_index].offset;
279
280         /*
281          * add additional USER PU/PD/EN flags
282          */
283         wr_val = omap_mux_pin_cfg[pad_func_index].mux_val;
284         cur_val = IORD16(reg_vma);
285
286         /*
287          * PADCFG regs 16 bit long, packed into 32 bit regs,
288          * can also be accessed as u16
289          */
290         IOWR16(reg_vma, wr_val);
291         rdback_val = IORD16(reg_vma);
292
293         /*
294          * in case if the caller wants to save the old value
295          */
296         return wr_val;
297 }
298
299 #define BLKSZ_4K 0x1000
300
301 /*
302  * switch GPMC DATA bus mode
303  */
304 void cy_as_hal_gpmc_enable_16bit_bus(bool dbus16_enabled)
305 {
306         uint32_t tmp32;
307
308         /*
309          * disable gpmc CS4 operation 1st
310          */
311         tmp32 = gpmc_cs_read_reg(AST_GPMC_CS,
312                                 GPMC_CS_CONFIG7) & ~GPMC_CONFIG7_CSVALID;
313         gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7, tmp32);
314
315         /*
316          * GPMC NAND data bus can be 8 or 16 bit wide
317          */
318         if (dbus16_enabled) {
319                 DBGPRN("enabling 16 bit bus\n");
320                 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
321                                 (GPMC_CONFIG1_DEVICETYPE(2) |
322                                 GPMC_CONFIG1_WAIT_PIN_SEL(2) |
323                                 GPMC_CONFIG1_DEVICESIZE_16)
324                                 );
325         } else {
326                 DBGPRN(KERN_INFO "enabling 8 bit bus\n");
327                 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
328                                 (GPMC_CONFIG1_DEVICETYPE(2) |
329                                 GPMC_CONFIG1_WAIT_PIN_SEL(2))
330                                 );
331         }
332
333         /*
334          * re-enable astoria CS operation on GPMC
335          */
336          gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
337                         (tmp32 | GPMC_CONFIG7_CSVALID));
338
339         /*
340          *remember the state
341          */
342         pnand_16bit = dbus16_enabled;
343 }
344
345 static int cy_as_hal_gpmc_init(void)
346 {
347         u32 tmp32;
348         int err;
349         struct gpmc_timings     timings;
350         /*
351          * get GPMC i/o registers base(already been i/o mapped
352          * in kernel, no need for separate i/o remap)
353          */
354         gpmc_base = phys_to_virt(OMAP34XX_GPMC_BASE);
355         DBGPRN(KERN_INFO "kernel has gpmc_base=%x , val@ the base=%x",
356                 gpmc_base, __raw_readl(gpmc_base)
357         );
358
359         /*
360          * these are globals are full VMAs of the gpmc_base above
361          */
362         ncmd_reg_vma = GPMC_VMA(AST_GPMC_NAND_CMD);
363         naddr_reg_vma = GPMC_VMA(AST_GPMC_NAND_ADDR);
364         ndata_reg_vma = GPMC_VMA(AST_GPMC_NAND_DATA);
365
366         /*
367          * request GPMC CS for ASTORIA request
368          */
369         if (gpmc_cs_request(AST_GPMC_CS, SZ_16M, (void *)&csa_phy) < 0) {
370                 cy_as_hal_print_message(KERN_ERR "error failed to request"
371                                         "ncs4 for ASTORIA\n");
372                         return -1;
373         } else {
374                 DBGPRN(KERN_INFO "got phy_addr:%x for "
375                                 "GPMC CS%d GPMC_CFGREG7[CS4]\n",
376                                  csa_phy, AST_GPMC_CS);
377         }
378
379         /*
380          * request VM region for 4K addr space for chip select 4 phy address
381          * technically we don't need it for NAND devices, but do it anyway
382          * so that data read/write bus cycle can be triggered by reading
383          * or writing this mem region
384          */
385         if (!request_mem_region(csa_phy, BLKSZ_4K, "AST_OMAP_HAL")) {
386                 err = -EBUSY;
387                 cy_as_hal_print_message(KERN_ERR "error MEM region "
388                                         "request for phy_addr:%x failed\n",
389                                         csa_phy);
390                         goto out_free_cs;
391         }
392
393         /*
394          * REMAP mem region associated with our CS
395          */
396         gpmc_data_vma = (u32)ioremap_nocache(csa_phy, BLKSZ_4K);
397         if (!gpmc_data_vma) {
398                 err = -ENOMEM;
399                 cy_as_hal_print_message(KERN_ERR "error- ioremap()"
400                                         "for phy_addr:%x failed", csa_phy);
401
402                 goto out_release_mem_region;
403         }
404         cy_as_hal_print_message(KERN_INFO "ioremap(%x) returned vma=%x\n",
405                                                         csa_phy, gpmc_data_vma);
406
407         gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
408                                                 (GPMC_CONFIG1_DEVICETYPE(2) |
409                                                 GPMC_CONFIG1_WAIT_PIN_SEL(2)));
410
411         memset(&timings, 0, sizeof(timings));
412
413         /* cs timing */
414         timings.cs_on = WB_GPMC_CS_t_o_n;
415         timings.cs_wr_off = WB_GPMC_BUSCYC_t;
416         timings.cs_rd_off = WB_GPMC_BUSCYC_t;
417
418         /* adv timing */
419         timings.adv_on = WB_GPMC_ADV_t_o_n;
420         timings.adv_rd_off = WB_GPMC_BUSCYC_t;
421         timings.adv_wr_off = WB_GPMC_BUSCYC_t;
422
423         /* oe timing */
424         timings.oe_on = WB_GPMC_OE_t_o_n;
425         timings.oe_off = WB_GPMC_OE_t_o_f_f;
426         timings.access = WB_GPMC_RD_t_a_c_c;
427         timings.rd_cycle = WB_GPMC_BUSCYC_t;
428
429         /* we timing */
430         timings.we_on = WB_GPMC_WE_t_o_n;
431         timings.we_off = WB_GPMC_WE_t_o_f_f;
432         timings.wr_access = WB_GPMC_WR_t_a_c_c;
433         timings.wr_cycle = WB_GPMC_BUSCYC_t;
434
435         timings.page_burst_access = WB_GPMC_BUSCYC_t;
436         timings.wr_data_mux_bus = WB_GPMC_BUSCYC_t;
437         gpmc_cs_set_timings(AST_GPMC_CS, &timings);
438
439         cy_as_hal_print_omap_regs("GPMC_CONFIG", 1,
440                         GPMC_VMA(GPMC_CFG_REG(1, AST_GPMC_CS)), 7);
441
442         /*
443          * DISABLE cs4, NOTE GPMC REG7 is already configured
444          * at this point by gpmc_cs_request
445          */
446         tmp32 = gpmc_cs_read_reg(AST_GPMC_CS, GPMC_CS_CONFIG7) &
447                                                 ~GPMC_CONFIG7_CSVALID;
448         gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7, tmp32);
449
450         /*
451          * PROGRAM chip select Region, (see OMAP3430 TRM PAGE 1088)
452          */
453         gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
454                                         (AS_CS_MASK | AS_CS_BADDR));
455
456         /*
457          * by default configure GPMC into 8 bit mode
458          * (to match astoria default mode)
459          */
460         gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
461                                         (GPMC_CONFIG1_DEVICETYPE(2) |
462                                         GPMC_CONFIG1_WAIT_PIN_SEL(2)));
463
464         /*
465          * ENABLE astoria cs operation on GPMC
466          */
467         gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
468                                         (tmp32 | GPMC_CONFIG7_CSVALID));
469
470         /*
471          * No method currently exists to write this register through GPMC APIs
472          * need to change WAIT2 polarity
473          */
474         tmp32 = IORD32(GPMC_VMA(GPMC_CONFIG_REG));
475         tmp32 = tmp32 | NAND_FORCE_POSTED_WRITE_B | 0x40;
476         IOWR32(GPMC_VMA(GPMC_CONFIG_REG), tmp32);
477
478         tmp32 = IORD32(GPMC_VMA(GPMC_CONFIG_REG));
479         cy_as_hal_print_message("GPMC_CONFIG_REG=0x%x\n", tmp32);
480
481         return 0;
482
483 out_release_mem_region:
484         release_mem_region(csa_phy, BLKSZ_4K);
485
486 out_free_cs:
487         gpmc_cs_free(AST_GPMC_CS);
488
489         return err;
490 }
491
492 /*
493  * west bridge astoria ISR (Interrupt handler)
494  */
495 static irqreturn_t cy_astoria_int_handler(int irq,
496                                 void *dev_id, struct pt_regs *regs)
497 {
498         cy_as_omap_dev_kernel *dev_p;
499         uint16_t                  read_val = 0;
500         uint16_t                  mask_val = 0;
501
502         /*
503         * debug stuff, counts number of loops per one intr trigger
504         */
505         uint16_t                  drq_loop_cnt = 0;
506         uint8_t            irq_pin;
507         /*
508          * flags to watch
509          */
510         const uint16_t  sentinel = (CY_AS_MEM_P0_INTR_REG_MCUINT |
511                                 CY_AS_MEM_P0_INTR_REG_MBINT |
512                                 CY_AS_MEM_P0_INTR_REG_PMINT |
513                                 CY_AS_MEM_P0_INTR_REG_PLLLOCKINT);
514
515         /*
516          * sample IRQ pin level (just for statistics)
517          */
518         irq_pin = __gpio_get_value(AST_INT);
519
520         /*
521          * this one just for debugging
522          */
523         intr_sequence_num++;
524
525         /*
526          * astoria device handle
527          */
528         dev_p = dev_id;
529
530         /*
531          * read Astoria intr register
532          */
533         read_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
534                                                 CY_AS_MEM_P0_INTR_REG);
535
536         /*
537          * save current mask value
538          */
539         mask_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
540                                                 CY_AS_MEM_P0_INT_MASK_REG);
541
542         DBGPRN("<1>HAL__intr__enter:_seq:%d, P0_INTR_REG:%x\n",
543                         intr_sequence_num, read_val);
544
545         /*
546          * Disable WB interrupt signal generation while we are in ISR
547          */
548         cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
549                                         CY_AS_MEM_P0_INT_MASK_REG, 0x0000);
550
551         /*
552         * this is a DRQ Interrupt
553         */
554         if (read_val & CY_AS_MEM_P0_INTR_REG_DRQINT) {
555
556                 do {
557                         /*
558                          * handle DRQ interrupt
559                          */
560                         drq_loop_cnt++;
561
562                         cy_handle_d_r_q_interrupt(dev_p);
563
564                         /*
565                          * spending to much time in ISR may impact
566                          * average system performance
567                          */
568                         if (drq_loop_cnt >= MAX_DRQ_LOOPS_IN_ISR)
569                                 break;
570
571                 /*
572                  * Keep processing if there is another DRQ int flag
573                  */
574                 } while (cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
575                                         CY_AS_MEM_P0_INTR_REG) &
576                                         CY_AS_MEM_P0_INTR_REG_DRQINT);
577         }
578
579         if (read_val & sentinel)
580                 cy_as_intr_service_interrupt((cy_as_hal_device_tag)dev_p);
581
582         DBGPRN("<1>_hal:_intr__exit seq:%d, mask=%4.4x,"
583                         "int_pin:%d DRQ_jobs:%d\n",
584                         intr_sequence_num,
585                         mask_val,
586                         irq_pin,
587                         drq_loop_cnt);
588
589         /*
590          * re-enable WB hw interrupts
591          */
592         cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
593                                         CY_AS_MEM_P0_INT_MASK_REG, mask_val);
594
595         return IRQ_HANDLED;
596 }
597
598 static int cy_as_hal_configure_interrupts(void *dev_p)
599 {
600         int result;
601         int irq_pin  = AST_INT;
602
603         set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW);
604
605         /*
606          * for shared IRQS must provide non NULL device ptr
607          * othervise the int won't register
608          * */
609         result = request_irq(OMAP_GPIO_IRQ(irq_pin),
610                                         (irq_handler_t)cy_astoria_int_handler,
611                                         IRQF_SHARED, "AST_INT#", dev_p);
612
613         if (result == 0) {
614                 /*
615                  * OMAP_GPIO_IRQ(irq_pin) - omap logical IRQ number
616                  *              assigned to this interrupt
617                  * OMAP_GPIO_BIT(AST_INT, GPIO_IRQENABLE1) - print status
618                  *              of AST_INT GPIO IRQ_ENABLE FLAG
619                  */
620                 cy_as_hal_print_message(KERN_INFO"AST_INT omap_pin:"
621                                 "%d assigned IRQ #%d IRQEN1=%d\n",
622                                 irq_pin,
623                                 OMAP_GPIO_IRQ(irq_pin),
624                                 OMAP_GPIO_BIT(AST_INT, GPIO_IRQENABLE1)
625                                 );
626         } else {
627                 cy_as_hal_print_message("cyasomaphal: interrupt "
628                                 "failed to register\n");
629                 gpio_free(irq_pin);
630                 cy_as_hal_print_message(KERN_WARNING
631                                 "ASTORIA: can't get assigned IRQ"
632                                 "%i for INT#\n", OMAP_GPIO_IRQ(irq_pin));
633         }
634
635         return result;
636 }
637
638 /*
639  * initialize OMAP pads/pins to user defined functions
640  */
641 static void cy_as_hal_init_user_pads(user_pad_cfg_t *pad_cfg_tab)
642 {
643         /*
644          * browse through the table an dinitiaze the pins
645          */
646         u32 in_level = 0;
647         u16 tmp16, mux_val;
648
649         while (pad_cfg_tab->name != NULL) {
650
651                 if (gpio_request(pad_cfg_tab->pin_num, NULL) == 0) {
652
653                         pad_cfg_tab->valid = 1;
654                         mux_val = omap_cfg_reg_L(pad_cfg_tab->mux_func);
655
656                         /*
657                          * always set drv level before changing out direction
658                          */
659                         __gpio_set_value(pad_cfg_tab->pin_num,
660                                                         pad_cfg_tab->drv);
661
662                         /*
663                          * "0" - OUT, "1", input omap_set_gpio_direction
664                          * (pad_cfg_tab->pin_num, pad_cfg_tab->dir);
665                          */
666                         if (pad_cfg_tab->dir)
667                                 gpio_direction_input(pad_cfg_tab->pin_num);
668                         else
669                                 gpio_direction_output(pad_cfg_tab->pin_num,
670                                                         pad_cfg_tab->drv);
671
672                         /*  sample the pin  */
673                         in_level = __gpio_get_value(pad_cfg_tab->pin_num);
674
675                         cy_as_hal_print_message(KERN_INFO "configured %s to "
676                                         "OMAP pad_%d, DIR=%d "
677                                         "DOUT=%d, DIN=%d\n",
678                                         pad_cfg_tab->name,
679                                         pad_cfg_tab->pin_num,
680                                         pad_cfg_tab->dir,
681                                         pad_cfg_tab->drv,
682                                         in_level
683                         );
684                 } else {
685                         /*
686                          * get the pad_mux value to check on the pin_function
687                          */
688                         cy_as_hal_print_message(KERN_INFO "couldn't cfg pin %d"
689                                         "for signal %s, its already taken\n",
690                                         pad_cfg_tab->pin_num,
691                                         pad_cfg_tab->name);
692                 }
693
694                 tmp16 = *(u16 *)PADCFG_VMA
695                         (omap_mux_pin_cfg[pad_cfg_tab->mux_func].offset);
696
697                 cy_as_hal_print_message(KERN_INFO "GPIO_%d(PAD_CFG=%x,OE=%d"
698                         "DOUT=%d, DIN=%d IRQEN=%d)\n\n",
699                         pad_cfg_tab->pin_num, tmp16,
700                         OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_OE),
701                         OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_DATA_OUT),
702                         OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_DATA_IN),
703                         OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_IRQENABLE1)
704                         );
705
706                 /*
707                  * next pad_cfg deriptor
708                  */
709                 pad_cfg_tab++;
710         }
711
712         cy_as_hal_print_message(KERN_INFO"pads configured\n");
713 }
714
715
716 /*
717  * release gpios taken by the module
718  */
719 static void cy_as_hal_release_user_pads(user_pad_cfg_t *pad_cfg_tab)
720 {
721         while (pad_cfg_tab->name != NULL) {
722
723                 if (pad_cfg_tab->valid) {
724                         gpio_free(pad_cfg_tab->pin_num);
725                         pad_cfg_tab->valid = 0;
726                         cy_as_hal_print_message(KERN_INFO "GPIO_%d "
727                                         "released from %s\n",
728                                         pad_cfg_tab->pin_num,
729                                         pad_cfg_tab->name);
730                 } else {
731                         cy_as_hal_print_message(KERN_INFO "no release "
732                                         "for %s, GPIO_%d, wasn't acquired\n",
733                                         pad_cfg_tab->name,
734                                         pad_cfg_tab->pin_num);
735                 }
736                 pad_cfg_tab++;
737         }
738 }
739
740 void cy_as_hal_config_c_s_mux(void)
741 {
742         /*
743          * FORCE the GPMC CS4 pin (it is in use by the  zoom system)
744          */
745         omap_cfg_reg_L(T8_OMAP3430_GPMC_n_c_s4);
746 }
747 EXPORT_SYMBOL(cy_as_hal_config_c_s_mux);
748
749 /*
750  * inits all omap h/w
751  */
752 uint32_t cy_as_hal_processor_hw_init(void)
753 {
754         int i, err;
755
756         cy_as_hal_print_message(KERN_INFO "init OMAP3430 hw...\n");
757
758         iomux_vma = (u32)ioremap_nocache(
759                                 (u32)CTLPADCONF_BASE_ADDR, CTLPADCONF_SIZE);
760         cy_as_hal_print_message(KERN_INFO "PADCONF_VMA=%x val=%x\n",
761                                 iomux_vma, IORD32(iomux_vma));
762
763         /*
764          * remap gpio banks
765          */
766         for (i = 0; i < 6; i++) {
767                 gpio_vma_tab[i].virt_addr = (u32)ioremap_nocache(
768                                         gpio_vma_tab[i].phy_addr,
769                                         gpio_vma_tab[i].size);
770
771                 cy_as_hal_print_message(KERN_INFO "%s virt_addr=%x\n",
772                                         gpio_vma_tab[i].name,
773                                         (u32)gpio_vma_tab[i].virt_addr);
774         };
775
776         /*
777          * force OMAP_GPIO_126  to rleased state,
778          * will be configured to drive reset
779          */
780         gpio_free(AST_RESET);
781
782         /*
783          *same thing with AStoria CS pin
784          */
785         gpio_free(AST_CS);
786
787         /*
788          * initialize all the OMAP pads connected to astoria
789          */
790         cy_as_hal_init_user_pads(user_pad_cfg);
791
792         err = cy_as_hal_gpmc_init();
793         if (err < 0)
794                 cy_as_hal_print_message(KERN_INFO"gpmc init failed:%d", err);
795
796         cy_as_hal_config_c_s_mux();
797
798         return gpmc_data_vma;
799 }
800 EXPORT_SYMBOL(cy_as_hal_processor_hw_init);
801
802 void cy_as_hal_omap_hardware_deinit(cy_as_omap_dev_kernel *dev_p)
803 {
804         /*
805          * free omap hw resources
806          */
807         if (gpmc_data_vma != 0)
808                 iounmap((void *)gpmc_data_vma);
809
810         if (csa_phy != 0)
811                 release_mem_region(csa_phy, BLKSZ_4K);
812
813         gpmc_cs_free(AST_GPMC_CS);
814
815         free_irq(OMAP_GPIO_IRQ(AST_INT), dev_p);
816
817         cy_as_hal_release_user_pads(user_pad_cfg);
818 }
819
820 /*
821  * These are the functions that are not part of the
822  * HAL layer, but are required to be called for this HAL
823  */
824
825 /*
826  * Called On AstDevice LKM exit
827  */
828 int stop_o_m_a_p_kernel(const char *pgm, cy_as_hal_device_tag tag)
829 {
830         cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag;
831
832         /*
833          * TODO: Need to disable WB interrupt handlere 1st
834          */
835         if (0 == dev_p)
836                 return 1;
837
838         cy_as_hal_print_message("<1>_stopping OMAP34xx HAL layer object\n");
839         if (dev_p->m_sig != CY_AS_OMAP_KERNEL_HAL_SIG) {
840                 cy_as_hal_print_message("<1>%s: %s: bad HAL tag\n",
841                                                                 pgm, __func__);
842                 return 1;
843         }
844
845         /*
846          * disable interrupt
847          */
848         cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
849                         CY_AS_MEM_P0_INT_MASK_REG, 0x0000);
850
851 #if 0
852         if (dev_p->thread_flag == 0) {
853                 dev_p->thread_flag = 1;
854                 wait_for_completion(&dev_p->thread_complete);
855                 cy_as_hal_print_message("cyasomaphal:"
856                         "done cleaning thread\n");
857                 cy_as_hal_destroy_sleep_channel(&dev_p->thread_sc);
858         }
859 #endif
860
861         cy_as_hal_omap_hardware_deinit(dev_p);
862
863         /*
864          * Rearrange the list
865          */
866         if (m_omap_list_p == dev_p)
867                 m_omap_list_p = dev_p->m_next_p;
868
869         cy_as_hal_free(dev_p);
870
871         cy_as_hal_print_message(KERN_INFO"OMAP_kernel_hal stopped\n");
872         return 0;
873 }
874
875 int omap_start_intr(cy_as_hal_device_tag tag)
876 {
877         cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag;
878         int ret = 0;
879         const uint16_t mask = CY_AS_MEM_P0_INTR_REG_DRQINT |
880                                 CY_AS_MEM_P0_INTR_REG_MBINT;
881
882         /*
883          * register for interrupts
884          */
885         ret = cy_as_hal_configure_interrupts(dev_p);
886
887         /*
888          * enable only MBox & DRQ interrupts for now
889          */
890         cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
891                                 CY_AS_MEM_P0_INT_MASK_REG, mask);
892
893         return 1;
894 }
895
896 /*
897  * Below are the functions that communicate with the WestBridge device.
898  * These are system dependent and must be defined by the HAL layer
899  * for a given system.
900  */
901
902 /*
903  * GPMC NAND command+addr write phase
904  */
905 static inline void nand_cmd_n_addr(u8 cmdb1, u16 col_addr, u32 row_addr)
906 {
907         /*
908          * byte order on the bus <cmd> <CA0,CA1,RA0,RA1, RA2>
909          */
910         u32 tmpa32 = ((row_addr << 16) | col_addr);
911         u8 RA2 = (u8)(row_addr >> 16);
912
913         if (!pnand_16bit) {
914                 /*
915                  * GPMC PNAND 8bit BUS
916                  */
917                 /*
918                  * CMD1
919                  */
920                 IOWR8(ncmd_reg_vma, cmdb1);
921
922                 /*
923                  *pnand bus: <CA0,CA1,RA0,RA1>
924                  */
925                 IOWR32(naddr_reg_vma, tmpa32);
926
927                 /*
928                  * <RA2> , always zero
929                  */
930                 IOWR8(naddr_reg_vma, RA2);
931
932         } else {
933                 /*
934                  * GPMC PNAND 16bit BUS , in 16 bit mode CMD
935                  * and ADDR sent on [d7..d0]
936                  */
937                 uint8_t CA0, CA1, RA0, RA1;
938                 CA0 = tmpa32 & 0x000000ff;
939                 CA1 = (tmpa32 >> 8) &  0x000000ff;
940                 RA0 = (tmpa32 >> 16) & 0x000000ff;
941                 RA1 = (tmpa32 >> 24) & 0x000000ff;
942
943                 /*
944                  * can't use 32 bit writes here omap will not serialize
945                  * them to lower half in16 bit mode
946                  */
947
948                 /*
949                  *pnand bus: <CMD1, CA0,CA1,RA0,RA1, RA2 (always zero)>
950                  */
951                 IOWR8(ncmd_reg_vma, cmdb1);
952                 IOWR8(naddr_reg_vma, CA0);
953                 IOWR8(naddr_reg_vma, CA1);
954                 IOWR8(naddr_reg_vma, RA0);
955                 IOWR8(naddr_reg_vma, RA1);
956                 IOWR8(naddr_reg_vma, RA2);
957         }
958 }
959
960 /*
961  * spin until r/b goes high
962  */
963 inline int wait_rn_b_high(void)
964 {
965         u32 w_spins = 0;
966
967         /*
968          * TODO: note R/b may go low here, need to spin until high
969          * while (omap_get_gpio_datain(AST_RnB) == 0) {
970          * w_spins++;
971          * }
972          * if (OMAP_GPIO_BIT(AST_RnB, GPIO_DATA_IN)  == 0) {
973          *
974          * while (OMAP_GPIO_BIT(AST_RnB, GPIO_DATA_IN)  == 0) {
975          * w_spins++;
976          * }
977          * printk("<1>RnB=0!:%d\n",w_spins);
978          * }
979          */
980         return w_spins;
981 }
982
983 #ifdef ENABLE_GPMC_PF_ENGINE
984 /* #define PFE_READ_DEBUG
985  * PNAND  block read with OMAP PFE enabled
986  * status: Not tested, NW, broken , etc
987  */
988 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
989 {
990         uint16_t w32cnt;
991         uint32_t *ptr32;
992         uint8_t *ptr8;
993         uint8_t  bytes_in_fifo;
994
995         /* debug vars*/
996 #ifdef PFE_READ_DEBUG
997         uint32_t loop_limit;
998         uint16_t bytes_read = 0;
999 #endif
1000
1001         /*
1002          * configure the prefetch engine
1003          */
1004         uint32_t tmp32;
1005         uint32_t pfe_status;
1006
1007         /*
1008          * DISABLE GPMC CS4 operation 1st, this is
1009          * in case engine is be already disabled
1010          */
1011         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x0);
1012         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG1), GPMC_PREFETCH_CONFIG1_VAL);
1013         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG2), count);
1014
1015 #ifdef PFE_READ_DEBUG
1016         tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_CONFIG1));
1017         if (tmp32 != GPMC_PREFETCH_CONFIG1_VAL) {
1018                 printk(KERN_INFO "<1> prefetch is CONFIG1 read val:%8.8x, != VAL written:%8.8x\n",
1019                                 tmp32, GPMC_PREFETCH_CONFIG1_VAL);
1020                 tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1021                 printk(KERN_INFO "<1> GPMC_PREFETCH_STATUS : %8.8x\n", tmp32);
1022         }
1023
1024         /*
1025          *sanity check 2
1026          */
1027         tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_CONFIG2));
1028         if (tmp32 != (count))
1029                 printk(KERN_INFO "<1> GPMC_PREFETCH_CONFIG2 read val:%d, "
1030                                 "!= VAL written:%d\n", tmp32, count);
1031 #endif
1032
1033         /*
1034          * ISSUE PNAND CMD+ADDR, note gpmc puts 32b words
1035          * on the bus least sig. byte 1st
1036          */
1037         nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1038
1039         IOWR8(ncmd_reg_vma, RDPAGE_B2);
1040
1041         /*
1042          * start the prefetch engine
1043          */
1044         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x1);
1045
1046         ptr32 = buff;
1047
1048         while (1) {
1049                 /*
1050                  * GPMC PFE service loop
1051                  */
1052                 do {
1053                         /*
1054                          * spin until PFE fetched some
1055                          * PNAND bus words in the FIFO
1056                          */
1057                         pfe_status = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1058                         bytes_in_fifo = (pfe_status >> 24) & 0x7f;
1059                 } while (bytes_in_fifo == 0);
1060
1061                 /* whole 32 bit words in fifo */
1062                 w32cnt = bytes_in_fifo >> 2;
1063
1064 #if 0
1065            /*
1066                 *NOTE: FIFO_PTR indicates number of NAND bus words bytes
1067                 *   already received in the FIFO and available to be read
1068                 *   by DMA or MPU whether COUNTVAL indicates number of BUS
1069                 *   words yet to be read from PNAND bus words
1070                 */
1071                 printk(KERN_ERR "<1> got PF_STATUS:%8.8x FIFO_PTR:%d, COUNTVAL:%d, w32cnt:%d\n",
1072                                         pfe_status, bytes_in_fifo,
1073                                         (pfe_status & 0x3fff), w32cnt);
1074 #endif
1075
1076                 while (w32cnt--)
1077                         *ptr32++ = IORD32(gpmc_data_vma);
1078
1079                 if ((pfe_status & 0x3fff) == 0) {
1080                         /*
1081                          * PFE acc angine done, there still may be data leftover
1082                          * in the FIFO re-read FIFO BYTE counter (check for
1083                          * leftovers from 32 bit read accesses above)
1084                          */
1085                         bytes_in_fifo = (IORD32(
1086                                 GPMC_VMA(GPMC_PREFETCH_STATUS)) >> 24) & 0x7f;
1087
1088                         /*
1089                          * NOTE we may still have one word left in the fifo
1090                          * read it out
1091                          */
1092                         ptr8 = ptr32;
1093                         switch (bytes_in_fifo) {
1094
1095                         case 0:
1096                                 /*
1097                                  * nothing to do we already read the
1098                                  * FIFO out with 32 bit accesses
1099                                  */
1100                                 break;
1101                         case 1:
1102                                 /*
1103                                 * this only possible
1104                                 * for 8 bit pNAND only
1105                                 */
1106                                 *ptr8 = IORD8(gpmc_data_vma);
1107                                 break;
1108
1109                         case 2:
1110                                 /*
1111                                  * this one can occur in either modes
1112                                  */
1113                                 *(uint16_t *)ptr8 = IORD16(gpmc_data_vma);
1114                                 break;
1115
1116                         case 3:
1117                                 /*
1118                                  * this only possible for 8 bit pNAND only
1119                                  */
1120                                 *(uint16_t *)ptr8 = IORD16(gpmc_data_vma);
1121                                 ptr8 += 2;
1122                                 *ptr8 = IORD8(gpmc_data_vma);
1123                                 break;
1124
1125                         case 4:
1126                                 /*
1127                                  * shouldn't happen, but has been seen
1128                                  * in 8 bit mode
1129                                  */
1130                                 *ptr32 = IORD32(gpmc_data_vma);
1131                                 break;
1132
1133                         default:
1134                                 printk(KERN_ERR"<1>_error: PFE FIFO bytes leftover is not read:%d\n",
1135                                                                 bytes_in_fifo);
1136                                 break;
1137                         }
1138                         /*
1139                          * read is completed, get out of the while(1) loop
1140                          */
1141                         break;
1142                 }
1143         }
1144 }
1145 #endif
1146
1147 #ifdef PFE_LBD_READ_V2
1148 /*
1149  * PFE engine assisted reads with the 64 byte blocks
1150  */
1151 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
1152 {
1153         uint8_t rd_cnt;
1154         uint32_t *ptr32;
1155         uint8_t  *ptr8;
1156         uint16_t reminder;
1157         uint32_t pfe_status;
1158
1159         /*
1160          * ISSUE PNAND CMD+ADDR
1161          * note gpmc puts 32b words on the bus least sig. byte 1st
1162          */
1163         nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1164         IOWR8(ncmd_reg_vma, RDPAGE_B2);
1165
1166         /*
1167          * setup PFE block
1168          * count - OMAP number of bytes to access on pnand bus
1169          */
1170
1171         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG1), GPMC_PREFETCH_CONFIG1_VAL);
1172         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG2), count);
1173         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x1);
1174
1175         ptr32 = buff;
1176
1177         do {
1178                 pfe_status = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1179                 rd_cnt =  pfe_status >> (24+2);
1180
1181                 while (rd_cnt--)
1182                         *ptr32++ = IORD32(gpmc_data_vma);
1183
1184         } while (pfe_status & 0x3fff);
1185
1186         /*
1187          * read out the leftover
1188          */
1189         ptr8 = ptr32;
1190         rd_cnt = (IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS))  >> 24) & 0x7f;
1191
1192         while (rd_cnt--)
1193                 *ptr8++ = IORD8(gpmc_data_vma);
1194 }
1195 #endif
1196
1197 #ifdef PNAND_LBD_READ_NO_PFE
1198 /*
1199  * Endpoint buffer read  w/o OMAP GPMC Prefetch Engine
1200  * the original working code, works at max speed for 8 bit xfers
1201  * for 16 bit the bus diagram has gaps
1202  */
1203 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
1204 {
1205         uint16_t w32cnt;
1206         uint32_t *ptr32;
1207         uint16_t *ptr16;
1208         uint16_t remainder;
1209
1210         DBGPRN("<1> %s(): NO_PFE\n", __func__);
1211
1212         ptr32 = buff;
1213         /* number of whole 32 bit words in the transfer */
1214         w32cnt = count >> 2;
1215
1216         /* remainder, in bytes(0..3) */
1217         remainder =  count & 03;
1218
1219         /*
1220          * note gpmc puts 32b words on the bus least sig. byte 1st
1221          */
1222         nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1223         IOWR8(ncmd_reg_vma, RDPAGE_B2);
1224
1225         /*
1226          * read data by 32 bit chunks
1227          */
1228         while (w32cnt--)
1229                 *ptr32++ = IORD32(ndata_reg_vma);
1230
1231         /*
1232          * now do the remainder(it can be 0, 1, 2 or 3)
1233          * same code for both 8 & 16 bit bus
1234          * do 1 or 2 MORE words
1235          */
1236         ptr16 = (uint16_t *)ptr32;
1237
1238         switch (remainder) {
1239         case 1:
1240                 /*  read one 16 bit word
1241                  * IN 8 BIT WE NEED TO READ even number of bytes
1242                  */
1243         case 2:
1244                 *ptr16 = IORD16(ndata_reg_vma);
1245                 break;
1246         case 3:
1247                 /*
1248                  * for 3 bytes read 2 16 bit words
1249                  */
1250                 *ptr16++ = IORD16(ndata_reg_vma);
1251                 *ptr16   = IORD16(ndata_reg_vma);
1252                 break;
1253         default:
1254                 /*
1255                  * remainder is 0
1256                  */
1257                 break;
1258         }
1259 }
1260 #endif
1261
1262 /*
1263  * uses LBD mode to write N bytes into astoria
1264  * Status: Working, however there are 150ns idle
1265  * timeafter every 2 (16 bit or 4(8 bit) bus cycles
1266  */
1267 static void p_nand_lbd_write(u16 col_addr, u32 row_addr, u16 count, void *buff)
1268 {
1269         uint16_t w32cnt;
1270         uint16_t remainder;
1271         uint8_t  *ptr8;
1272         uint16_t *ptr16;
1273         uint32_t *ptr32;
1274
1275         remainder =  count & 03;
1276         w32cnt = count >> 2;
1277         ptr32 = buff;
1278         ptr8 = buff;
1279
1280         /*
1281          * send: CMDB1, CA0,CA1,RA0,RA1,RA2
1282          */
1283         nand_cmd_n_addr(PGMPAGE_B1, col_addr, row_addr);
1284
1285         /*
1286          * blast the data out in 32bit chunks
1287          */
1288         while (w32cnt--)
1289                 IOWR32(ndata_reg_vma, *ptr32++);
1290
1291         /*
1292          * do the reminder if there is one
1293          * same handling for both 8 & 16 bit pnand: mode
1294          */
1295         ptr16 = (uint16_t *)ptr32; /* do 1 or 2  words */
1296
1297         switch (remainder) {
1298         case 1:
1299                 /*
1300                  * read one 16 bit word
1301                  */
1302         case 2:
1303                 IOWR16(ndata_reg_vma, *ptr16);
1304                 break;
1305
1306         case 3:
1307                 /*
1308                  * for 3 bytes read 2 16 bit words
1309                  */
1310                 IOWR16(ndata_reg_vma, *ptr16++);
1311                 IOWR16(ndata_reg_vma, *ptr16);
1312                 break;
1313         default:
1314                 /*
1315                  * reminder is 0
1316                  */
1317                 break;
1318         }
1319         /*
1320          * finally issue a PGM cmd
1321          */
1322         IOWR8(ncmd_reg_vma, PGMPAGE_B2);
1323 }
1324
1325 /*
1326  * write Astoria register
1327  */
1328 static inline void ast_p_nand_casdi_write(u8 reg_addr8, u16 data)
1329 {
1330         unsigned long flags;
1331         u16 addr16;
1332         /*
1333          * throw an error if called from multiple threads
1334          */
1335         static atomic_t rdreg_usage_cnt = { 0 };
1336
1337         /*
1338          * disable interrupts
1339          */
1340         local_irq_save(flags);
1341
1342         if (atomic_read(&rdreg_usage_cnt) != 0) {
1343                 cy_as_hal_print_message(KERN_ERR "cy_as_omap_hal:"
1344                                 "* cy_as_hal_write_register usage:%d\n",
1345                                 atomic_read(&rdreg_usage_cnt));
1346         }
1347
1348         atomic_inc(&rdreg_usage_cnt);
1349
1350         /*
1351          * 2 flavors of GPMC -> PNAND  access
1352          */
1353         if (pnand_16bit) {
1354                 /*
1355                  *  16 BIT gpmc NAND mode
1356                  */
1357
1358                 /*
1359                  * CMD1, CA1, CA2,
1360                  */
1361                 IOWR8(ncmd_reg_vma, 0x85);
1362                 IOWR8(naddr_reg_vma, reg_addr8);
1363                 IOWR8(naddr_reg_vma, 0x0c);
1364
1365                 /*
1366                  * this should be sent on the 16 bit bus
1367                  */
1368                 IOWR16(ndata_reg_vma, data);
1369         } else {
1370                 /*
1371                  * 8 bit nand mode GPMC will automatically
1372                  * seriallize 16bit or 32 bit writes into
1373                  * 8 bit onesto the lower 8 bit in LE order
1374                  */
1375                 addr16 = 0x0c00 | reg_addr8;
1376
1377                 /*
1378                  * CMD1, CA1, CA2,
1379                  */
1380                 IOWR8(ncmd_reg_vma, 0x85);
1381                 IOWR16(naddr_reg_vma, addr16);
1382                 IOWR16(ndata_reg_vma, data);
1383         }
1384
1385         /*
1386          * re-enable interrupts
1387          */
1388         atomic_dec(&rdreg_usage_cnt);
1389         local_irq_restore(flags);
1390 }
1391
1392
1393 /*
1394  * read astoria register via pNAND interface
1395  */
1396 static inline u16 ast_p_nand_casdo_read(u8 reg_addr8)
1397 {
1398         u16 data;
1399         u16 addr16;
1400         unsigned long flags;
1401         /*
1402          * throw an error if called from multiple threads
1403          */
1404         static atomic_t wrreg_usage_cnt = { 0 };
1405
1406         /*
1407          * disable interrupts
1408          */
1409         local_irq_save(flags);
1410
1411         if (atomic_read(&wrreg_usage_cnt) != 0) {
1412                 /*
1413                  * if it gets here ( from other threads), this function needs
1414                  * need spin_lock_irq save() protection
1415                  */
1416                 cy_as_hal_print_message(KERN_ERR"cy_as_omap_hal: "
1417                                 "cy_as_hal_write_register usage:%d\n",
1418                                 atomic_read(&wrreg_usage_cnt));
1419         }
1420         atomic_inc(&wrreg_usage_cnt);
1421
1422         /*
1423          * 2 flavors of GPMC -> PNAND  access
1424          */
1425         if (pnand_16bit) {
1426                 /*
1427                  *  16 BIT gpmc NAND mode
1428                  *  CMD1, CA1, CA2,
1429                  */
1430
1431                 IOWR8(ncmd_reg_vma, 0x05);
1432                 IOWR8(naddr_reg_vma, reg_addr8);
1433                 IOWR8(naddr_reg_vma, 0x0c);
1434                 IOWR8(ncmd_reg_vma, 0x00E0);
1435
1436                 udelay(1);
1437
1438                 /*
1439                  * much faster through the gPMC Register space
1440                  */
1441                 data = IORD16(ndata_reg_vma);
1442         } else {
1443                 /*
1444                  *  8 BIT gpmc NAND mode
1445                  *  CMD1, CA1, CA2, CMD2
1446                  */
1447                 addr16 = 0x0c00 | reg_addr8;
1448                 IOWR8(ncmd_reg_vma, 0x05);
1449                 IOWR16(naddr_reg_vma, addr16);
1450                 IOWR8(ncmd_reg_vma, 0xE0);
1451                 udelay(1);
1452                 data = IORD16(ndata_reg_vma);
1453         }
1454
1455         /*
1456          * re-enable interrupts
1457          */
1458         atomic_dec(&wrreg_usage_cnt);
1459         local_irq_restore(flags);
1460
1461         return data;
1462 }
1463
1464
1465 /*
1466  * This function must be defined to write a register within the WestBridge
1467  * device.  The addr value is the address of the register to write with
1468  * respect to the base address of the WestBridge device.
1469  */
1470 void cy_as_hal_write_register(
1471                                         cy_as_hal_device_tag tag,
1472                                         uint16_t addr, uint16_t data)
1473 {
1474         ast_p_nand_casdi_write((u8)addr, data);
1475 }
1476
1477 /*
1478  * This function must be defined to read a register from the WestBridge
1479  * device.  The addr value is the address of the register to read with
1480  * respect to the base address of the WestBridge device.
1481  */
1482 uint16_t cy_as_hal_read_register(cy_as_hal_device_tag tag, uint16_t addr)
1483 {
1484         uint16_t data  = 0;
1485
1486         /*
1487          * READ ASTORIA REGISTER USING CASDO
1488          */
1489         data = ast_p_nand_casdo_read((u8)addr);
1490
1491         return data;
1492 }
1493
1494 /*
1495  * preps Ep pointers & data counters for next packet
1496  * (fragment of the request) xfer returns true if
1497  * there is a next transfer, and false if all bytes in
1498  * current request have been xfered
1499  */
1500 static inline bool prep_for_next_xfer(cy_as_hal_device_tag tag, uint8_t ep)
1501 {
1502
1503         if (!end_points[ep].sg_list_enabled) {
1504                 /*
1505                  * no further transfers for non storage EPs
1506                  * (like EP2 during firmware download, done
1507                  * in 64 byte chunks)
1508                  */
1509                 if (end_points[ep].req_xfer_cnt >= end_points[ep].req_length) {
1510                         DBGPRN("<1> %s():RQ sz:%d non-_sg EP:%d completed\n",
1511                                 __func__, end_points[ep].req_length, ep);
1512
1513                         /*
1514                          * no more transfers, we are done with the request
1515                          */
1516                         return false;
1517                 }
1518
1519                 /*
1520                  * calculate size of the next DMA xfer, corner
1521                  * case for non-storage EPs where transfer size
1522                  * is not egual N * HAL_DMA_PKT_SZ xfers
1523                  */
1524                 if ((end_points[ep].req_length - end_points[ep].req_xfer_cnt)
1525                 >= HAL_DMA_PKT_SZ) {
1526                                 end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
1527                 } else {
1528                         /*
1529                          * that would be the last chunk less
1530                          * than P-port max size
1531                          */
1532                         end_points[ep].dma_xfer_sz = end_points[ep].req_length -
1533                                         end_points[ep].req_xfer_cnt;
1534                 }
1535
1536                 return true;
1537         }
1538
1539         /*
1540          * for SG_list assisted dma xfers
1541          * are we done with current SG ?
1542          */
1543         if (end_points[ep].seg_xfer_cnt ==  end_points[ep].sg_p->length) {
1544                 /*
1545                  *  was it the Last SG segment on the list ?
1546                  */
1547                 if (sg_is_last(end_points[ep].sg_p)) {
1548                         DBGPRN("<1> %s: EP:%d completed,"
1549                                         "%d bytes xfered\n",
1550                                         __func__,
1551                                         ep,
1552                                         end_points[ep].req_xfer_cnt
1553                         );
1554
1555                         return false;
1556                 } else {
1557                         /*
1558                          * There are more SG segments in current
1559                          * request's sg list setup new segment
1560                          */
1561
1562                         end_points[ep].seg_xfer_cnt = 0;
1563                         end_points[ep].sg_p = sg_next(end_points[ep].sg_p);
1564                         /* set data pointer for next DMA sg transfer*/
1565                         end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
1566                         DBGPRN("<1> %s new SG:_va:%p\n\n",
1567                                         __func__, end_points[ep].data_p);
1568                 }
1569
1570         }
1571
1572         /*
1573          * for sg list xfers it will always be 512 or 1024
1574          */
1575         end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
1576
1577         /*
1578          * next transfer is required
1579          */
1580
1581         return true;
1582 }
1583
1584 /*
1585  * Astoria DMA read request, APP_CPU reads from WB ep buffer
1586  */
1587 static void cy_service_e_p_dma_read_request(
1588                         cy_as_omap_dev_kernel *dev_p, uint8_t ep)
1589 {
1590         cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p;
1591         uint16_t  v, size;
1592         void    *dptr;
1593         uint16_t col_addr = 0x0000;
1594         uint32_t row_addr = CYAS_DEV_CALC_EP_ADDR(ep);
1595         uint16_t ep_dma_reg = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1596
1597         /*
1598          * get the XFER size frtom WB eP DMA REGISTER
1599          */
1600         v = cy_as_hal_read_register(tag, ep_dma_reg);
1601
1602         /*
1603          * amount of data in EP buff in  bytes
1604          */
1605         size =  v & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK;
1606
1607         /*
1608          * memory pointer for this DMA packet xfer (sub_segment)
1609          */
1610         dptr = end_points[ep].data_p;
1611
1612         DBGPRN("<1>HAL:_svc_dma_read on EP_%d sz:%d, intr_seq:%d, dptr:%p\n",
1613                 ep,
1614                 size,
1615                 intr_sequence_num,
1616                 dptr
1617         );
1618
1619         cy_as_hal_assert(size != 0);
1620
1621         if (size) {
1622                 /*
1623                  * the actual WB-->OMAP memory "soft" DMA xfer
1624                  */
1625                 p_nand_lbd_read(col_addr, row_addr, size, dptr);
1626         }
1627
1628         /*
1629          * clear DMAVALID bit indicating that the data has been read
1630          */
1631         cy_as_hal_write_register(tag, ep_dma_reg, 0);
1632
1633         end_points[ep].seg_xfer_cnt += size;
1634         end_points[ep].req_xfer_cnt += size;
1635
1636         /*
1637          *  pre-advance data pointer (if it's outside sg
1638          * list it will be reset anyway
1639          */
1640         end_points[ep].data_p += size;
1641
1642         if (prep_for_next_xfer(tag, ep)) {
1643                 /*
1644                  * we have more data to read in this request,
1645                  * setup next dma packet due tell WB how much
1646                  * data we are going to xfer next
1647                  */
1648                 v = end_points[ep].dma_xfer_sz/*HAL_DMA_PKT_SZ*/ |
1649                                 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1650                 cy_as_hal_write_register(tag, ep_dma_reg, v);
1651         } else {
1652                 end_points[ep].pending    = cy_false;
1653                 end_points[ep].type              = cy_as_hal_none;
1654                 end_points[ep].buffer_valid = cy_false;
1655
1656                 /*
1657                  * notify the API that we are done with rq on this EP
1658                  */
1659                 if (callback) {
1660                         DBGPRN("<1>trigg rd_dma completion cb: xfer_sz:%d\n",
1661                                 end_points[ep].req_xfer_cnt);
1662                                 callback(tag, ep,
1663                                         end_points[ep].req_xfer_cnt,
1664                                         CY_AS_ERROR_SUCCESS);
1665                 }
1666         }
1667 }
1668
1669 /*
1670  * omap_cpu needs to transfer data to ASTORIA EP buffer
1671  */
1672 static void cy_service_e_p_dma_write_request(
1673                         cy_as_omap_dev_kernel *dev_p, uint8_t ep)
1674 {
1675         uint16_t  addr;
1676         uint16_t v  = 0;
1677         uint32_t  size;
1678         uint16_t col_addr = 0x0000;
1679         uint32_t row_addr = CYAS_DEV_CALC_EP_ADDR(ep);
1680         void    *dptr;
1681
1682         cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p;
1683         /*
1684          * note: size here its the size of the dma transfer could be
1685          * anything > 0 && < P_PORT packet size
1686          */
1687         size = end_points[ep].dma_xfer_sz;
1688         dptr = end_points[ep].data_p;
1689
1690         /*
1691          * perform the soft DMA transfer, soft in this case
1692          */
1693         if (size)
1694                 p_nand_lbd_write(col_addr, row_addr, size, dptr);
1695
1696         end_points[ep].seg_xfer_cnt += size;
1697         end_points[ep].req_xfer_cnt += size;
1698         /*
1699          * pre-advance data pointer
1700          * (if it's outside sg list it will be reset anyway)
1701          */
1702         end_points[ep].data_p += size;
1703
1704         /*
1705          * now clear DMAVAL bit to indicate we are done
1706          * transferring data and that the data can now be
1707          * sent via USB to the USB host, sent to storage,
1708          * or used internally.
1709          */
1710
1711         addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1712         cy_as_hal_write_register(tag, addr, size);
1713
1714         /*
1715          * finally, tell the USB subsystem that the
1716          * data is gone and we can accept the
1717          * next request if one exists.
1718          */
1719         if (prep_for_next_xfer(tag, ep)) {
1720                 /*
1721                  * There is more data to go. Re-init the WestBridge DMA side
1722                  */
1723                 v = end_points[ep].dma_xfer_sz |
1724                         CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1725                 cy_as_hal_write_register(tag, addr, v);
1726         } else {
1727
1728            end_points[ep].pending         = cy_false;
1729            end_points[ep].type           = cy_as_hal_none;
1730            end_points[ep].buffer_valid = cy_false;
1731
1732                 /*
1733                  * notify the API that we are done with rq on this EP
1734                  */
1735                 if (callback) {
1736                         /*
1737                          * this callback will wake up the process that might be
1738                          * sleeping on the EP which data is being transferred
1739                          */
1740                         callback(tag, ep,
1741                                         end_points[ep].req_xfer_cnt,
1742                                         CY_AS_ERROR_SUCCESS);
1743                 }
1744         }
1745 }
1746
1747 /*
1748  * HANDLE DRQINT from Astoria (called in AS_Intr context
1749  */
1750 static void cy_handle_d_r_q_interrupt(cy_as_omap_dev_kernel *dev_p)
1751 {
1752         uint16_t v;
1753         static uint8_t service_ep = 2;
1754
1755         /*
1756          * We've got DRQ INT, read DRQ STATUS Register */
1757         v = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
1758                         CY_AS_MEM_P0_DRQ);
1759
1760         if (v == 0) {
1761 #ifndef WESTBRIDGE_NDEBUG
1762                 cy_as_hal_print_message("stray DRQ interrupt detected\n");
1763 #endif
1764                 return;
1765         }
1766
1767         /*
1768          * Now, pick a given DMA request to handle, for now, we just
1769          * go round robin.  Each bit position in the service_mask
1770          * represents an endpoint from EP2 to EP15.  We rotate through
1771          * each of the endpoints to find one that needs to be serviced.
1772          */
1773         while ((v & (1 << service_ep)) == 0) {
1774
1775                 if (service_ep == 15)
1776                         service_ep = 2;
1777                 else
1778                         service_ep++;
1779         }
1780
1781         if (end_points[service_ep].type == cy_as_hal_write) {
1782                 /*
1783                  * handle DMA WRITE REQUEST: app_cpu will
1784                  * write data into astoria EP buffer
1785                  */
1786                 cy_service_e_p_dma_write_request(dev_p, service_ep);
1787         } else if (end_points[service_ep].type == cy_as_hal_read) {
1788                 /*
1789                  * handle DMA READ REQUEST: cpu will
1790                  * read EP buffer from Astoria
1791                  */
1792                 cy_service_e_p_dma_read_request(dev_p, service_ep);
1793         }
1794 #ifndef WESTBRIDGE_NDEBUG
1795         else
1796                 cy_as_hal_print_message("cyashalomap:interrupt,"
1797                                         " w/o pending DMA job,"
1798                                         "-check DRQ_MASK logic\n");
1799 #endif
1800
1801         /*
1802          * Now bump the EP ahead, so other endpoints get
1803          * a shot before the one we just serviced
1804          */
1805         if (end_points[service_ep].type == cy_as_hal_none) {
1806                 if (service_ep == 15)
1807                         service_ep = 2;
1808                 else
1809                         service_ep++;
1810         }
1811
1812 }
1813
1814 void cy_as_hal_dma_cancel_request(cy_as_hal_device_tag tag, uint8_t ep)
1815 {
1816         DBGPRN("cy_as_hal_dma_cancel_request on ep:%d", ep);
1817         if (end_points[ep].pending)
1818                 cy_as_hal_write_register(tag,
1819                                 CY_AS_MEM_P0_EP2_DMA_REG + ep - 2, 0);
1820
1821         end_points[ep].buffer_valid = cy_false;
1822         end_points[ep].type = cy_as_hal_none;
1823 }
1824
1825 /*
1826  * enables/disables SG list assisted DMA xfers for the given EP
1827  * sg_list assisted XFERS can use physical addresses of mem pages in case if the
1828  * xfer is performed by a h/w DMA controller rather then the CPU on P port
1829  */
1830 void cy_as_hal_set_ep_dma_mode(uint8_t ep, bool sg_xfer_enabled)
1831 {
1832         end_points[ep].sg_list_enabled = sg_xfer_enabled;
1833         DBGPRN("<1> EP:%d sg_list assisted DMA mode set to = %d\n",
1834                         ep, end_points[ep].sg_list_enabled);
1835 }
1836 EXPORT_SYMBOL(cy_as_hal_set_ep_dma_mode);
1837
1838 /*
1839  * This function must be defined to transfer a block of data to
1840  * the WestBridge device.  This function can use the burst write
1841  * (DMA) capabilities of WestBridge to do this, or it can just copy
1842  * the data using writes.
1843  */
1844 void cy_as_hal_dma_setup_write(cy_as_hal_device_tag tag,
1845                                                 uint8_t ep, void *buf,
1846                                                 uint32_t size, uint16_t maxsize)
1847 {
1848         uint32_t addr = 0;
1849         uint16_t v  = 0;
1850
1851         /*
1852          * Note: "size" is the actual request size
1853          * "maxsize" - is the P port fragment size
1854          * No EP0 or EP1 traffic should get here
1855          */
1856         cy_as_hal_assert(ep != 0 && ep != 1);
1857
1858         /*
1859          * If this asserts, we have an ordering problem.  Another DMA request
1860          * is coming down before the previous one has completed.
1861          */
1862         cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);
1863         end_points[ep].buffer_valid = cy_true;
1864         end_points[ep].type = cy_as_hal_write;
1865         end_points[ep].pending = cy_true;
1866
1867         /*
1868          * total length of the request
1869          */
1870         end_points[ep].req_length = size;
1871
1872         if (size >= maxsize) {
1873                 /*
1874                  * set xfer size for very 1st DMA xfer operation
1875                  * port max packet size ( typically 512 or 1024)
1876                  */
1877                 end_points[ep].dma_xfer_sz = maxsize;
1878         } else {
1879                 /*
1880                  * smaller xfers for non-storage EPs
1881                  */
1882                 end_points[ep].dma_xfer_sz = size;
1883         }
1884
1885         /*
1886          * check the EP transfer mode uses sg_list rather then a memory buffer
1887          * block devices pass it to the HAL, so the hAL could get to the real
1888          * physical address for each segment and set up a DMA controller
1889          * hardware ( if there is one)
1890          */
1891         if (end_points[ep].sg_list_enabled) {
1892                 /*
1893                  * buf -  pointer to the SG list
1894                  * data_p - data pointer to the 1st DMA segment
1895                  * seg_xfer_cnt - keeps track of N of bytes sent in current
1896                  *              sg_list segment
1897                  * req_xfer_cnt - keeps track of the total N of bytes
1898                  *              transferred for the request
1899                  */
1900                 end_points[ep].sg_p = buf;
1901                 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
1902                 end_points[ep].seg_xfer_cnt = 0;
1903                 end_points[ep].req_xfer_cnt = 0;
1904
1905 #ifdef DBGPRN_DMA_SETUP_WR
1906                 DBGPRN("cyasomaphal:%s: EP:%d, buf:%p, buf_va:%p,"
1907                                 "req_sz:%d, maxsz:%d\n",
1908                                 __func__,
1909                                 ep,
1910                                 buf,
1911                                 end_points[ep].data_p,
1912                                 size,
1913                                 maxsize);
1914 #endif
1915
1916         } else {
1917                 /*
1918                  * setup XFER for non sg_list assisted EPs
1919                  */
1920
1921                 #ifdef DBGPRN_DMA_SETUP_WR
1922                         DBGPRN("<1>%s non storage or sz < 512:"
1923                                         "EP:%d, sz:%d\n", __func__, ep, size);
1924                 #endif
1925
1926                 end_points[ep].sg_p = NULL;
1927
1928                 /*
1929                  * must be a VMA of a membuf in kernel space
1930                  */
1931                 end_points[ep].data_p = buf;
1932
1933                 /*
1934                  * will keep track No of bytes xferred for the request
1935                  */
1936                 end_points[ep].req_xfer_cnt = 0;
1937         }
1938
1939         /*
1940          * Tell WB we are ready to send data on the given endpoint
1941          */
1942         v = (end_points[ep].dma_xfer_sz & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK)
1943                         | CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1944
1945         addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1946
1947         cy_as_hal_write_register(tag, addr, v);
1948 }
1949
1950 /*
1951  * This function must be defined to transfer a block of data from
1952  * the WestBridge device.  This function can use the burst read
1953  * (DMA) capabilities of WestBridge to do this, or it can just
1954  * copy the data using reads.
1955  */
1956 void cy_as_hal_dma_setup_read(cy_as_hal_device_tag tag,
1957                                         uint8_t ep, void *buf,
1958                                         uint32_t size, uint16_t maxsize)
1959 {
1960         uint32_t addr;
1961         uint16_t v;
1962
1963         /*
1964          * Note: "size" is the actual request size
1965          * "maxsize" - is the P port fragment size
1966          * No EP0 or EP1 traffic should get here
1967          */
1968         cy_as_hal_assert(ep != 0 && ep != 1);
1969
1970         /*
1971          * If this asserts, we have an ordering problem.
1972          * Another DMA request is coming down before the
1973          * previous one has completed. we should not get
1974          * new requests if current is still in process
1975          */
1976
1977         cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);
1978
1979         end_points[ep].buffer_valid = cy_true;
1980         end_points[ep].type = cy_as_hal_read;
1981         end_points[ep].pending = cy_true;
1982         end_points[ep].req_xfer_cnt = 0;
1983         end_points[ep].req_length = size;
1984
1985         if (size >= maxsize) {
1986                 /*
1987                  * set xfer size for very 1st DMA xfer operation
1988                  * port max packet size ( typically 512 or 1024)
1989                  */
1990                 end_points[ep].dma_xfer_sz = maxsize;
1991         } else {
1992                 /*
1993                  * so that we could handle small xfers on in case
1994                  * of non-storage EPs
1995                  */
1996                 end_points[ep].dma_xfer_sz = size;
1997         }
1998
1999         addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
2000
2001         if (end_points[ep].sg_list_enabled) {
2002                 /*
2003                  * Handle sg-list assisted EPs
2004                  * seg_xfer_cnt - keeps track of N of sent packets
2005                  * buf - pointer to the SG list
2006                  * data_p - data pointer for the 1st DMA segment
2007                  */
2008                 end_points[ep].seg_xfer_cnt = 0;
2009                 end_points[ep].sg_p = buf;
2010                 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
2011
2012                 #ifdef DBGPRN_DMA_SETUP_RD
2013                 DBGPRN("cyasomaphal:DMA_setup_read sg_list EP:%d, "
2014                            "buf:%p, buf_va:%p, req_sz:%d, maxsz:%d\n",
2015                                 ep,
2016                                 buf,
2017                                 end_points[ep].data_p,
2018                                 size,
2019                                 maxsize);
2020                 #endif
2021                 v = (end_points[ep].dma_xfer_sz &
2022                                 CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
2023                                 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
2024                 cy_as_hal_write_register(tag, addr, v);
2025         } else {
2026                 /*
2027                  * Non sg list EP passed  void *buf rather then scatterlist *sg
2028                  */
2029                 #ifdef DBGPRN_DMA_SETUP_RD
2030                         DBGPRN("%s:non-sg_list EP:%d,"
2031                                         "RQ_sz:%d, maxsz:%d\n",
2032                                         __func__, ep, size,  maxsize);
2033                 #endif
2034
2035                 end_points[ep].sg_p = NULL;
2036
2037                 /*
2038                  * must be a VMA of a membuf in kernel space
2039                  */
2040                 end_points[ep].data_p = buf;
2041
2042                 /*
2043                  * Program the EP DMA register for Storage endpoints only.
2044                  */
2045                 if (is_storage_e_p(ep)) {
2046                         v = (end_points[ep].dma_xfer_sz &
2047                                         CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
2048                                         CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
2049                         cy_as_hal_write_register(tag, addr, v);
2050                 }
2051         }
2052 }
2053
2054 /*
2055  * This function must be defined to allow the WB API to
2056  * register a callback function that is called when a
2057  * DMA transfer is complete.
2058  */
2059 void cy_as_hal_dma_register_callback(cy_as_hal_device_tag tag,
2060                                         cy_as_hal_dma_complete_callback cb)
2061 {
2062         DBGPRN("<1>\n%s: WB API has registered a dma_complete callback:%x\n",
2063                         __func__, (uint32_t)cb);
2064         callback = cb;
2065 }
2066
2067 /*
2068  * This function must be defined to return the maximum size of
2069  * DMA request that can be handled on the given endpoint.  The
2070  * return value should be the maximum size in bytes that the DMA
2071  * module can handle.
2072  */
2073 uint32_t cy_as_hal_dma_max_request_size(cy_as_hal_device_tag tag,
2074                                         cy_as_end_point_number_t ep)
2075 {
2076         /*
2077          * Storage reads and writes are always done in 512 byte blocks.
2078          * So, we do the count handling within the HAL, and save on
2079          * some of the data transfer delay.
2080          */
2081         if ((ep == CYASSTORAGE_READ_EP_NUM) ||
2082         (ep == CYASSTORAGE_WRITE_EP_NUM)) {
2083                 /* max DMA request size HAL can handle by itself */
2084                 return CYASSTORAGE_MAX_XFER_SIZE;
2085         } else {
2086         /*
2087          * For the USB - Processor endpoints, the maximum transfer
2088          * size depends on the speed of USB operation. So, we use
2089          * the following constant to indicate to the API that
2090          * splitting of the data into chunks less that or equal to
2091          * the max transfer size should be handled internally.
2092          */
2093
2094                 /* DEFINED AS 0xffffffff in cyasdma.h */
2095                 return CY_AS_DMA_MAX_SIZE_HW_SIZE;
2096         }
2097 }
2098
2099 /*
2100  * This function must be defined to set the state of the WAKEUP pin
2101  * on the WestBridge device.  Generally this is done via a GPIO of
2102  * some type.
2103  */
2104 cy_bool cy_as_hal_set_wakeup_pin(cy_as_hal_device_tag tag, cy_bool state)
2105 {
2106         /*
2107          * Not supported as of now.
2108          */
2109         return cy_false;
2110 }
2111
2112 void cy_as_hal_pll_lock_loss_handler(cy_as_hal_device_tag tag)
2113 {
2114         cy_as_hal_print_message("error: astoria PLL lock is lost\n");
2115         cy_as_hal_print_message("please check the input voltage levels");
2116         cy_as_hal_print_message("and clock, and restart the system\n");
2117 }
2118
2119 /*
2120  * Below are the functions that must be defined to provide the basic
2121  * operating system services required by the API.
2122  */
2123
2124 /*
2125  * This function is required by the API to allocate memory.
2126  * This function is expected to work exactly like malloc().
2127  */
2128 void *cy_as_hal_alloc(uint32_t cnt)
2129 {
2130         void *ret_p;
2131
2132         ret_p = kmalloc(cnt, GFP_ATOMIC);
2133         return ret_p;
2134 }
2135
2136 /*
2137  * This function is required by the API to free memory allocated
2138  * with CyAsHalAlloc().  This function is'expected to work exacly
2139  * like free().
2140  */
2141 void cy_as_hal_free(void *mem_p)
2142 {
2143         kfree(mem_p);
2144 }
2145
2146 /*
2147  * Allocator that can be used in interrupt context.
2148  * We have to ensure that the kmalloc call does not
2149  * sleep in this case.
2150  */
2151 void *cy_as_hal_c_b_alloc(uint32_t cnt)
2152 {
2153         void *ret_p;
2154
2155         ret_p = kmalloc(cnt, GFP_ATOMIC);
2156         return ret_p;
2157 }
2158
2159 /*
2160  * This function is required to set a block of memory to a
2161  * specific value.  This function is expected to work exactly
2162  * like memset()
2163  */
2164 void cy_as_hal_mem_set(void *ptr, uint8_t value, uint32_t cnt)
2165 {
2166         memset(ptr, value, cnt);
2167 }
2168
2169 /*
2170  * This function is expected to create a sleep channel.
2171  * The data structure that represents the sleep channel object
2172  * sleep channel (which is Linux "wait_queue_head_t wq" for this paticular HAL)
2173  * passed as a pointer, and allpocated by the caller
2174  * (typically as a local var on the stack) "Create" word should read as
2175  * "SleepOn", this func doesn't actually create anything
2176  */
2177 cy_bool cy_as_hal_create_sleep_channel(cy_as_hal_sleep_channel *channel)
2178 {
2179         init_waitqueue_head(&channel->wq);
2180         return cy_true;
2181 }
2182
2183 /*
2184  * for this particular HAL it doesn't actually destroy anything
2185  * since no actual sleep object is created in CreateSleepChannel()
2186  * sleep channel is given by the pointer in the argument.
2187  */
2188 cy_bool cy_as_hal_destroy_sleep_channel(cy_as_hal_sleep_channel *channel)
2189 {
2190         return cy_true;
2191 }
2192
2193 /*
2194  * platform specific wakeable Sleep implementation
2195  */
2196 cy_bool cy_as_hal_sleep_on(cy_as_hal_sleep_channel *channel, uint32_t ms)
2197 {
2198         wait_event_interruptible_timeout(channel->wq, 0, ((ms * HZ)/1000));
2199         return cy_true;
2200 }
2201
2202 /*
2203  * wakes up the process waiting on the CHANNEL
2204  */
2205 cy_bool cy_as_hal_wake(cy_as_hal_sleep_channel *channel)
2206 {
2207         wake_up_interruptible_all(&channel->wq);
2208         return cy_true;
2209 }
2210
2211 uint32_t cy_as_hal_disable_interrupts()
2212 {
2213         if (0 == intr__enable)
2214                 ;
2215
2216         intr__enable++;
2217         return 0;
2218 }
2219
2220 void cy_as_hal_enable_interrupts(uint32_t val)
2221 {
2222         intr__enable--;
2223         if (0 == intr__enable)
2224                 ;
2225 }
2226
2227 /*
2228  * Sleep atleast 150ns, cpu dependent
2229  */
2230 void cy_as_hal_sleep150(void)
2231 {
2232         uint32_t i, j;
2233
2234         j = 0;
2235         for (i = 0; i < 1000; i++)
2236                 j += (~i);
2237 }
2238
2239 void cy_as_hal_sleep(uint32_t ms)
2240 {
2241         cy_as_hal_sleep_channel channel;
2242
2243         cy_as_hal_create_sleep_channel(&channel);
2244         cy_as_hal_sleep_on(&channel, ms);
2245         cy_as_hal_destroy_sleep_channel(&channel);
2246 }
2247
2248 cy_bool cy_as_hal_is_polling()
2249 {
2250         return cy_false;
2251 }
2252
2253 void cy_as_hal_c_b_free(void *ptr)
2254 {
2255         cy_as_hal_free(ptr);
2256 }
2257
2258 /*
2259  * suppose to reinstate the astoria registers
2260  * that may be clobbered in sleep mode
2261  */
2262 void cy_as_hal_init_dev_registers(cy_as_hal_device_tag tag,
2263                                         cy_bool is_standby_wakeup)
2264 {
2265         /* specific to SPI, no implementation required */
2266         (void) tag;
2267         (void) is_standby_wakeup;
2268 }
2269
2270 void cy_as_hal_read_regs_before_standby(cy_as_hal_device_tag tag)
2271 {
2272         /* specific to SPI, no implementation required */
2273         (void) tag;
2274 }
2275
2276 cy_bool cy_as_hal_sync_device_clocks(cy_as_hal_device_tag tag)
2277 {
2278         /*
2279          * we are in asynchronous mode. so no need to handle this
2280          */
2281         return true;
2282 }
2283
2284 /*
2285  * init OMAP h/w resources
2286  */
2287 int start_o_m_a_p_kernel(const char *pgm,
2288                                 cy_as_hal_device_tag *tag, cy_bool debug)
2289 {
2290         cy_as_omap_dev_kernel *dev_p;
2291         int i;
2292         u16 data16[4];
2293         u8 pncfg_reg;
2294
2295         /*
2296          * No debug mode support through argument as of now
2297          */
2298         (void)debug;
2299
2300         DBGPRN(KERN_INFO"starting OMAP34xx HAL...\n");
2301
2302         /*
2303          * Initialize the HAL level endpoint DMA data.
2304          */
2305         for (i = 0; i < sizeof(end_points)/sizeof(end_points[0]); i++) {
2306                 end_points[i].data_p = 0;
2307                 end_points[i].pending = cy_false;
2308                 end_points[i].size = 0;
2309                 end_points[i].type = cy_as_hal_none;
2310                 end_points[i].sg_list_enabled = cy_false;
2311
2312                 /*
2313                  * by default the DMA transfers to/from the E_ps don't
2314                  * use sg_list that implies that the upper devices like
2315                  * blockdevice have to enable it for the E_ps in their
2316                  * initialization code
2317                  */
2318         }
2319
2320         /*
2321          * allocate memory for OMAP HAL
2322          */
2323         dev_p = (cy_as_omap_dev_kernel *)cy_as_hal_alloc(
2324                                                 sizeof(cy_as_omap_dev_kernel));
2325         if (dev_p == 0) {
2326                 cy_as_hal_print_message("out of memory allocating OMAP"
2327                                         "device structure\n");
2328                 return 0;
2329         }
2330
2331         dev_p->m_sig = CY_AS_OMAP_KERNEL_HAL_SIG;
2332
2333         /*
2334          * initialize OMAP hardware and StartOMAPKernelall gpio pins
2335          */
2336         dev_p->m_addr_base = (void *)cy_as_hal_processor_hw_init();
2337
2338         /*
2339          * Now perform a hard reset of the device to have
2340          * the new settings take effect
2341          */
2342         __gpio_set_value(AST_WAKEUP, 1);
2343
2344         /*
2345          * do Astoria  h/w reset
2346          */
2347         DBGPRN(KERN_INFO"-_-_pulse -> westbridge RST pin\n");
2348
2349         /*
2350          * NEGATIVE PULSE on RST pin
2351          */
2352         __gpio_set_value(AST_RESET, 0);
2353         mdelay(1);
2354         __gpio_set_value(AST_RESET, 1);
2355         mdelay(50);
2356
2357         /*
2358         * note AFTER reset PNAND interface is 8 bit mode
2359         * so if gpmc Is configured in 8 bit mode upper half will be FF
2360         */
2361         pncfg_reg = ast_p_nand_casdo_read(CY_AS_MEM_PNAND_CFG);
2362
2363 #ifdef PNAND_16BIT_MODE
2364
2365         /*
2366          * switch to 16 bit mode, force NON-LNA LBD mode, 3 RA addr bytes
2367          */
2368         ast_p_nand_casdi_write(CY_AS_MEM_PNAND_CFG, 0x0001);
2369
2370         /*
2371          * now in order to continue to talk to astoria
2372          * sw OMAP GPMC into 16 bit mode as well
2373          */
2374         cy_as_hal_gpmc_enable_16bit_bus(cy_true);
2375 #else
2376    /* Astoria and GPMC are already in 8 bit mode, jsut initialize PNAND_CFG */
2377         ast_p_nand_casdi_write(CY_AS_MEM_PNAND_CFG, 0x0000);
2378 #endif
2379
2380    /*
2381         *  NOTE: if you want to capture bus activity on the LA,
2382         *  don't use printks in between the activities you want to capture.
2383         *  prinks may take milliseconds, and the data of interest
2384         *  will fall outside the LA capture window/buffer
2385         */
2386         data16[0] = ast_p_nand_casdo_read(CY_AS_MEM_CM_WB_CFG_ID);
2387         data16[1] = ast_p_nand_casdo_read(CY_AS_MEM_PNAND_CFG);
2388
2389         if (data16[0] != 0xA200) {
2390                 /*
2391                  * astoria device is not found
2392                  */
2393                 printk(KERN_ERR "ERROR: astoria device is not found, CY_AS_MEM_CM_WB_CFG_ID ");
2394                 printk(KERN_ERR "read returned:%4.4X: CY_AS_MEM_PNAND_CFG:%4.4x !\n",
2395                                 data16[0], data16[0]);
2396                 goto bus_acc_error;
2397         }
2398
2399         cy_as_hal_print_message(KERN_INFO" register access CASDO test:"
2400                                 "\n CY_AS_MEM_CM_WB_CFG_ID:%4.4x\n"
2401                                 "PNAND_CFG after RST:%4.4x\n "
2402                                 "CY_AS_MEM_PNAND_CFG"
2403                                 "after cfg_wr:%4.4x\n\n",
2404                                 data16[0], pncfg_reg, data16[1]);
2405
2406         dev_p->thread_flag = 1;
2407         spin_lock_init(&int_lock);
2408         dev_p->m_next_p = m_omap_list_p;
2409
2410         m_omap_list_p = dev_p;
2411         *tag = dev_p;
2412
2413         cy_as_hal_configure_interrupts((void *)dev_p);
2414
2415         cy_as_hal_print_message(KERN_INFO"OMAP3430__hal started tag:%p"
2416                                 ", kernel HZ:%d\n", dev_p, HZ);
2417
2418         /*
2419          *make processor to storage endpoints SG assisted by default
2420          */
2421         cy_as_hal_set_ep_dma_mode(4, true);
2422         cy_as_hal_set_ep_dma_mode(8, true);
2423
2424         return 1;
2425
2426         /*
2427          * there's been a NAND bus access error or
2428          * astoria device is not connected
2429          */
2430 bus_acc_error:
2431         /*
2432          * at this point hal tag hasn't been set yet
2433          * so the device will not call omap_stop
2434          */
2435         cy_as_hal_omap_hardware_deinit(dev_p);
2436         cy_as_hal_free(dev_p);
2437         return 0;
2438 }
2439
2440 #else
2441 /*
2442  * Some compilers do not like empty C files, so if the OMAP hal is not being
2443  * compiled, we compile this single function.  We do this so that for a
2444  * given target HAL there are not multiple sources for the HAL functions.
2445  */
2446 void my_o_m_a_p_kernel_hal_dummy_function(void)
2447 {
2448 }
2449
2450 #endif