1 /* Cypress WestBridge OMAP3430 Kernel Hal source file (cyashalomap_kernel.c)
2 ## ===========================
3 ## Copyright (C) 2010 Cypress Semiconductor
5 ## This program is free software; you can redistribute it and/or
6 ## modify it under the terms of the GNU General Public License
7 ## as published by the Free Software Foundation; either version 2
8 ## of the License, or (at your option) any later version.
10 ## This program is distributed in the hope that it will be useful,
11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ## GNU General Public License for more details.
15 ## You should have received a copy of the GNU General Public License
16 ## along with this program; if not, write to the Free Software
17 ## Foundation, Inc., 51 Franklin Street, Fifth Floor,
18 ## Boston, MA 02110-1301, USA.
19 ## ===========================
22 #ifdef CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL
25 #include <linux/ioport.h>
26 #include <linux/timer.h>
27 #include <linux/gpio.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/scatterlist.h>
32 #include <linux/irq.h>
33 #include <linux/slab.h>
34 #include <linux/sched.h>
35 /* include seems broken moving for patch submission
36 * #include <mach/mux.h>
37 * #include <mach/gpmc.h>
38 * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h>
39 * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h>
40 * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h>
41 * #include <linux/westbridge/cyaserr.h>
42 * #include <linux/westbridge/cyasregs.h>
43 * #include <linux/westbridge/cyasdma.h>
44 * #include <linux/westbridge/cyasintr.h>
46 #include <linux/../../arch/arm/plat-omap/include/plat/mux.h>
47 #include <linux/../../arch/arm/plat-omap/include/plat/gpmc.h>
48 #include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h"
49 #include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h"
50 #include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h"
51 #include "../../../include/linux/westbridge/cyaserr.h"
52 #include "../../../include/linux/westbridge/cyasregs.h"
53 #include "../../../include/linux/westbridge/cyasdma.h"
54 #include "../../../include/linux/westbridge/cyasintr.h"
56 #define HAL_REV "1.1.0"
59 * uncomment to enable 16bit pnand interface
61 #define PNAND_16BIT_MODE
64 * selects one of 3 versions of pnand_lbd_read()
65 * PNAND_LBD_READ_NO_PFE - original 8/16 bit code
66 * reads through the gpmc CONTROLLER REGISTERS
67 * ENABLE_GPMC_PF_ENGINE - USES GPMC PFE FIFO reads, in 8 bit mode,
68 * same speed as the above
69 * PFE_LBD_READ_V2 - slightly diffrenet, performance same as above
71 #define PNAND_LBD_READ_NO_PFE
72 /* #define ENABLE_GPMC_PF_ENGINE */
73 /* #define PFE_LBD_READ_V2 */
76 * westbrige astoria ISR options to limit number of
77 * back to back DMA transfers per ISR interrupt
79 #define MAX_DRQ_LOOPS_IN_ISR 4
82 * debug prints enabling
83 *#define DBGPRN_ENABLED
84 *#define DBGPRN_DMA_SETUP_RD
85 *#define DBGPRN_DMA_SETUP_WR
90 * For performance reasons, we handle storage endpoint transfers upto 4 KB
91 * within the HAL itself.
93 #define CYASSTORAGE_WRITE_EP_NUM (4)
94 #define CYASSTORAGE_READ_EP_NUM (8)
97 * size of DMA packet HAL can accept from Storage API
98 * HAL will fragment it into smaller chunks that the P port can accept
100 #define CYASSTORAGE_MAX_XFER_SIZE (2*32768)
103 * P port MAX DMA packet size according to interface/ep configurartion
105 #define HAL_DMA_PKT_SZ 512
107 #define is_storage_e_p(ep) (((ep) == 2) || ((ep) == 4) || \
108 ((ep) == 6) || ((ep) == 8))
111 * persistant, stores current GPMC interface cfg mode
113 static uint8_t pnand_16bit;
116 * keep processing new WB DRQ in ISR untill all handled (performance feature)
118 #define PROCESS_MULTIPLE_DRQ_IN_ISR (1)
122 * ASTORIA PNAND IF COMMANDS, CASDO - READ, CASDI - WRITE
126 #define RDPAGE_B1 0x00
127 #define RDPAGE_B2 0x30
128 #define PGMPAGE_B1 0x80
129 #define PGMPAGE_B2 0x10
132 * The type of DMA operation, per endpoint
134 typedef enum cy_as_hal_dma_type {
138 } cy_as_hal_dma_type;
142 * SG list halpers defined in scaterlist.h
143 #define sg_is_chain(sg) ((sg)->page_link & 0x01)
144 #define sg_is_last(sg) ((sg)->page_link & 0x02)
145 #define sg_chain_ptr(sg) \
146 ((struct scatterlist *) ((sg)->page_link & ~0x03))
148 typedef struct cy_as_hal_endpoint_dma {
149 cy_bool buffer_valid;
153 * sg_list_enabled - if true use, r/w DMA transfers use sg list,
154 * FALSE use pointer to a buffer
155 * sg_p - pointer to the owner's sg list, of there is such
157 * dma_xfer_sz - size of the next dma xfer on P port
158 * seg_xfer_cnt - counts xfered bytes for in current sg_list
160 * req_xfer_cnt - total number of bytes transfered so far in
162 * req_length - total request length
164 bool sg_list_enabled;
165 struct scatterlist *sg_p;
166 uint16_t dma_xfer_sz;
167 uint32_t seg_xfer_cnt;
168 uint16_t req_xfer_cnt;
170 cy_as_hal_dma_type type;
172 } cy_as_hal_endpoint_dma;
175 * The list of OMAP devices (should be one)
177 static cy_as_omap_dev_kernel *m_omap_list_p;
180 * The callback to call after DMA operations are complete
182 static cy_as_hal_dma_complete_callback callback;
185 * Pending data size for the endpoints
187 static cy_as_hal_endpoint_dma end_points[16];
190 * Forward declaration
192 static void cy_handle_d_r_q_interrupt(cy_as_omap_dev_kernel *dev_p);
194 static uint16_t intr_sequence_num;
195 static uint8_t intr__enable;
198 static u32 iomux_vma;
202 * gpmc I/O registers VMA
204 static u32 gpmc_base;
207 * gpmc data VMA associated with CS4 (ASTORIA CS on GPMC)
209 static u32 gpmc_data_vma;
210 static u32 ndata_reg_vma;
211 static u32 ncmd_reg_vma;
212 static u32 naddr_reg_vma;
217 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff);
218 static void p_nand_lbd_write(u16 col_addr, u32 row_addr, u16 count, void *buff);
219 static inline u16 __attribute__((always_inline))
220 ast_p_nand_casdo_read(u8 reg_addr8);
221 static inline void __attribute__((always_inline))
222 ast_p_nand_casdi_write(u8 reg_addr8, u16 data);
225 * prints given number of omap registers
227 static void cy_as_hal_print_omap_regs(char *name_prefix,
228 u8 name_base, u32 virt_base, u16 count)
230 u32 reg_val, reg_addr;
232 cy_as_hal_print_message(KERN_INFO "\n");
233 for (i = 0; i < count; i++) {
235 reg_addr = virt_base + (i*4);
236 /* use virtual addresses here*/
237 reg_val = __raw_readl(reg_addr);
238 cy_as_hal_print_message(KERN_INFO "%s_%d[%8.8x]=%8.8x\n",
239 name_prefix, name_base+i,
245 * setMUX function for a pad + additional pad flags
247 static u16 omap_cfg_reg_L(u32 pad_func_index)
249 static u8 sanity_check = 1;
252 u16 cur_val, wr_val, rdback_val;
255 * do sanity check on the omap_mux_pin_cfg[] table
257 cy_as_hal_print_message(KERN_INFO" OMAP pins user_pad cfg ");
259 if ((omap_mux_pin_cfg[END_OF_TABLE].name[0] == 'E') &&
260 (omap_mux_pin_cfg[END_OF_TABLE].name[1] == 'N') &&
261 (omap_mux_pin_cfg[END_OF_TABLE].name[2] == 'D')) {
263 cy_as_hal_print_message(KERN_INFO
266 cy_as_hal_print_message(KERN_WARNING
267 "table is bad, fix it");
276 * get virtual address to the PADCNF_REG
278 reg_vma = (u32)iomux_vma + omap_mux_pin_cfg[pad_func_index].offset;
281 * add additional USER PU/PD/EN flags
283 wr_val = omap_mux_pin_cfg[pad_func_index].mux_val;
284 cur_val = IORD16(reg_vma);
287 * PADCFG regs 16 bit long, packed into 32 bit regs,
288 * can also be accessed as u16
290 IOWR16(reg_vma, wr_val);
291 rdback_val = IORD16(reg_vma);
294 * in case if the caller wants to save the old value
299 #define BLKSZ_4K 0x1000
302 * switch GPMC DATA bus mode
304 void cy_as_hal_gpmc_enable_16bit_bus(bool dbus16_enabled)
309 * disable gpmc CS4 operation 1st
311 tmp32 = gpmc_cs_read_reg(AST_GPMC_CS,
312 GPMC_CS_CONFIG7) & ~GPMC_CONFIG7_CSVALID;
313 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7, tmp32);
316 * GPMC NAND data bus can be 8 or 16 bit wide
318 if (dbus16_enabled) {
319 DBGPRN("enabling 16 bit bus\n");
320 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
321 (GPMC_CONFIG1_DEVICETYPE(2) |
322 GPMC_CONFIG1_WAIT_PIN_SEL(2) |
323 GPMC_CONFIG1_DEVICESIZE_16)
326 DBGPRN(KERN_INFO "enabling 8 bit bus\n");
327 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
328 (GPMC_CONFIG1_DEVICETYPE(2) |
329 GPMC_CONFIG1_WAIT_PIN_SEL(2))
334 * re-enable astoria CS operation on GPMC
336 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
337 (tmp32 | GPMC_CONFIG7_CSVALID));
342 pnand_16bit = dbus16_enabled;
345 static int cy_as_hal_gpmc_init(void)
349 struct gpmc_timings timings;
351 * get GPMC i/o registers base(already been i/o mapped
352 * in kernel, no need for separate i/o remap)
354 gpmc_base = phys_to_virt(OMAP34XX_GPMC_BASE);
355 DBGPRN(KERN_INFO "kernel has gpmc_base=%x , val@ the base=%x",
356 gpmc_base, __raw_readl(gpmc_base)
360 * these are globals are full VMAs of the gpmc_base above
362 ncmd_reg_vma = GPMC_VMA(AST_GPMC_NAND_CMD);
363 naddr_reg_vma = GPMC_VMA(AST_GPMC_NAND_ADDR);
364 ndata_reg_vma = GPMC_VMA(AST_GPMC_NAND_DATA);
367 * request GPMC CS for ASTORIA request
369 if (gpmc_cs_request(AST_GPMC_CS, SZ_16M, (void *)&csa_phy) < 0) {
370 cy_as_hal_print_message(KERN_ERR "error failed to request"
371 "ncs4 for ASTORIA\n");
374 DBGPRN(KERN_INFO "got phy_addr:%x for "
375 "GPMC CS%d GPMC_CFGREG7[CS4]\n",
376 csa_phy, AST_GPMC_CS);
380 * request VM region for 4K addr space for chip select 4 phy address
381 * technically we don't need it for NAND devices, but do it anyway
382 * so that data read/write bus cycle can be triggered by reading
383 * or writing this mem region
385 if (!request_mem_region(csa_phy, BLKSZ_4K, "AST_OMAP_HAL")) {
387 cy_as_hal_print_message(KERN_ERR "error MEM region "
388 "request for phy_addr:%x failed\n",
394 * REMAP mem region associated with our CS
396 gpmc_data_vma = (u32)ioremap_nocache(csa_phy, BLKSZ_4K);
397 if (!gpmc_data_vma) {
399 cy_as_hal_print_message(KERN_ERR "error- ioremap()"
400 "for phy_addr:%x failed", csa_phy);
402 goto out_release_mem_region;
404 cy_as_hal_print_message(KERN_INFO "ioremap(%x) returned vma=%x\n",
405 csa_phy, gpmc_data_vma);
407 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
408 (GPMC_CONFIG1_DEVICETYPE(2) |
409 GPMC_CONFIG1_WAIT_PIN_SEL(2)));
411 memset(&timings, 0, sizeof(timings));
414 timings.cs_on = WB_GPMC_CS_t_o_n;
415 timings.cs_wr_off = WB_GPMC_BUSCYC_t;
416 timings.cs_rd_off = WB_GPMC_BUSCYC_t;
419 timings.adv_on = WB_GPMC_ADV_t_o_n;
420 timings.adv_rd_off = WB_GPMC_BUSCYC_t;
421 timings.adv_wr_off = WB_GPMC_BUSCYC_t;
424 timings.oe_on = WB_GPMC_OE_t_o_n;
425 timings.oe_off = WB_GPMC_OE_t_o_f_f;
426 timings.access = WB_GPMC_RD_t_a_c_c;
427 timings.rd_cycle = WB_GPMC_BUSCYC_t;
430 timings.we_on = WB_GPMC_WE_t_o_n;
431 timings.we_off = WB_GPMC_WE_t_o_f_f;
432 timings.wr_access = WB_GPMC_WR_t_a_c_c;
433 timings.wr_cycle = WB_GPMC_BUSCYC_t;
435 timings.page_burst_access = WB_GPMC_BUSCYC_t;
436 timings.wr_data_mux_bus = WB_GPMC_BUSCYC_t;
437 gpmc_cs_set_timings(AST_GPMC_CS, &timings);
439 cy_as_hal_print_omap_regs("GPMC_CONFIG", 1,
440 GPMC_VMA(GPMC_CFG_REG(1, AST_GPMC_CS)), 7);
443 * DISABLE cs4, NOTE GPMC REG7 is already configured
444 * at this point by gpmc_cs_request
446 tmp32 = gpmc_cs_read_reg(AST_GPMC_CS, GPMC_CS_CONFIG7) &
447 ~GPMC_CONFIG7_CSVALID;
448 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7, tmp32);
451 * PROGRAM chip select Region, (see OMAP3430 TRM PAGE 1088)
453 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
454 (AS_CS_MASK | AS_CS_BADDR));
457 * by default configure GPMC into 8 bit mode
458 * (to match astoria default mode)
460 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
461 (GPMC_CONFIG1_DEVICETYPE(2) |
462 GPMC_CONFIG1_WAIT_PIN_SEL(2)));
465 * ENABLE astoria cs operation on GPMC
467 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
468 (tmp32 | GPMC_CONFIG7_CSVALID));
471 * No method currently exists to write this register through GPMC APIs
472 * need to change WAIT2 polarity
474 tmp32 = IORD32(GPMC_VMA(GPMC_CONFIG_REG));
475 tmp32 = tmp32 | NAND_FORCE_POSTED_WRITE_B | 0x40;
476 IOWR32(GPMC_VMA(GPMC_CONFIG_REG), tmp32);
478 tmp32 = IORD32(GPMC_VMA(GPMC_CONFIG_REG));
479 cy_as_hal_print_message("GPMC_CONFIG_REG=0x%x\n", tmp32);
483 out_release_mem_region:
484 release_mem_region(csa_phy, BLKSZ_4K);
487 gpmc_cs_free(AST_GPMC_CS);
493 * west bridge astoria ISR (Interrupt handler)
495 static irqreturn_t cy_astoria_int_handler(int irq,
496 void *dev_id, struct pt_regs *regs)
498 cy_as_omap_dev_kernel *dev_p;
499 uint16_t read_val = 0;
500 uint16_t mask_val = 0;
503 * debug stuff, counts number of loops per one intr trigger
505 uint16_t drq_loop_cnt = 0;
510 const uint16_t sentinel = (CY_AS_MEM_P0_INTR_REG_MCUINT |
511 CY_AS_MEM_P0_INTR_REG_MBINT |
512 CY_AS_MEM_P0_INTR_REG_PMINT |
513 CY_AS_MEM_P0_INTR_REG_PLLLOCKINT);
516 * sample IRQ pin level (just for statistics)
518 irq_pin = __gpio_get_value(AST_INT);
521 * this one just for debugging
526 * astoria device handle
531 * read Astoria intr register
533 read_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
534 CY_AS_MEM_P0_INTR_REG);
537 * save current mask value
539 mask_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
540 CY_AS_MEM_P0_INT_MASK_REG);
542 DBGPRN("<1>HAL__intr__enter:_seq:%d, P0_INTR_REG:%x\n",
543 intr_sequence_num, read_val);
546 * Disable WB interrupt signal generation while we are in ISR
548 cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
549 CY_AS_MEM_P0_INT_MASK_REG, 0x0000);
552 * this is a DRQ Interrupt
554 if (read_val & CY_AS_MEM_P0_INTR_REG_DRQINT) {
558 * handle DRQ interrupt
562 cy_handle_d_r_q_interrupt(dev_p);
565 * spending to much time in ISR may impact
566 * average system performance
568 if (drq_loop_cnt >= MAX_DRQ_LOOPS_IN_ISR)
572 * Keep processing if there is another DRQ int flag
574 } while (cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
575 CY_AS_MEM_P0_INTR_REG) &
576 CY_AS_MEM_P0_INTR_REG_DRQINT);
579 if (read_val & sentinel)
580 cy_as_intr_service_interrupt((cy_as_hal_device_tag)dev_p);
582 DBGPRN("<1>_hal:_intr__exit seq:%d, mask=%4.4x,"
583 "int_pin:%d DRQ_jobs:%d\n",
590 * re-enable WB hw interrupts
592 cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
593 CY_AS_MEM_P0_INT_MASK_REG, mask_val);
598 static int cy_as_hal_configure_interrupts(void *dev_p)
601 int irq_pin = AST_INT;
603 set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW);
606 * for shared IRQS must provide non NULL device ptr
607 * othervise the int won't register
609 result = request_irq(OMAP_GPIO_IRQ(irq_pin),
610 (irq_handler_t)cy_astoria_int_handler,
611 IRQF_SHARED, "AST_INT#", dev_p);
615 * OMAP_GPIO_IRQ(irq_pin) - omap logical IRQ number
616 * assigned to this interrupt
617 * OMAP_GPIO_BIT(AST_INT, GPIO_IRQENABLE1) - print status
618 * of AST_INT GPIO IRQ_ENABLE FLAG
620 cy_as_hal_print_message(KERN_INFO"AST_INT omap_pin:"
621 "%d assigned IRQ #%d IRQEN1=%d\n",
623 OMAP_GPIO_IRQ(irq_pin),
624 OMAP_GPIO_BIT(AST_INT, GPIO_IRQENABLE1)
627 cy_as_hal_print_message("cyasomaphal: interrupt "
628 "failed to register\n");
630 cy_as_hal_print_message(KERN_WARNING
631 "ASTORIA: can't get assigned IRQ"
632 "%i for INT#\n", OMAP_GPIO_IRQ(irq_pin));
639 * initialize OMAP pads/pins to user defined functions
641 static void cy_as_hal_init_user_pads(user_pad_cfg_t *pad_cfg_tab)
644 * browse through the table an dinitiaze the pins
649 while (pad_cfg_tab->name != NULL) {
651 if (gpio_request(pad_cfg_tab->pin_num, NULL) == 0) {
653 pad_cfg_tab->valid = 1;
654 mux_val = omap_cfg_reg_L(pad_cfg_tab->mux_func);
657 * always set drv level before changing out direction
659 __gpio_set_value(pad_cfg_tab->pin_num,
663 * "0" - OUT, "1", input omap_set_gpio_direction
664 * (pad_cfg_tab->pin_num, pad_cfg_tab->dir);
666 if (pad_cfg_tab->dir)
667 gpio_direction_input(pad_cfg_tab->pin_num);
669 gpio_direction_output(pad_cfg_tab->pin_num,
673 in_level = __gpio_get_value(pad_cfg_tab->pin_num);
675 cy_as_hal_print_message(KERN_INFO "configured %s to "
676 "OMAP pad_%d, DIR=%d "
679 pad_cfg_tab->pin_num,
686 * get the pad_mux value to check on the pin_function
688 cy_as_hal_print_message(KERN_INFO "couldn't cfg pin %d"
689 "for signal %s, its already taken\n",
690 pad_cfg_tab->pin_num,
694 tmp16 = *(u16 *)PADCFG_VMA
695 (omap_mux_pin_cfg[pad_cfg_tab->mux_func].offset);
697 cy_as_hal_print_message(KERN_INFO "GPIO_%d(PAD_CFG=%x,OE=%d"
698 "DOUT=%d, DIN=%d IRQEN=%d)\n\n",
699 pad_cfg_tab->pin_num, tmp16,
700 OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_OE),
701 OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_DATA_OUT),
702 OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_DATA_IN),
703 OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_IRQENABLE1)
707 * next pad_cfg deriptor
712 cy_as_hal_print_message(KERN_INFO"pads configured\n");
717 * release gpios taken by the module
719 static void cy_as_hal_release_user_pads(user_pad_cfg_t *pad_cfg_tab)
721 while (pad_cfg_tab->name != NULL) {
723 if (pad_cfg_tab->valid) {
724 gpio_free(pad_cfg_tab->pin_num);
725 pad_cfg_tab->valid = 0;
726 cy_as_hal_print_message(KERN_INFO "GPIO_%d "
727 "released from %s\n",
728 pad_cfg_tab->pin_num,
731 cy_as_hal_print_message(KERN_INFO "no release "
732 "for %s, GPIO_%d, wasn't acquired\n",
734 pad_cfg_tab->pin_num);
740 void cy_as_hal_config_c_s_mux(void)
743 * FORCE the GPMC CS4 pin (it is in use by the zoom system)
745 omap_cfg_reg_L(T8_OMAP3430_GPMC_n_c_s4);
747 EXPORT_SYMBOL(cy_as_hal_config_c_s_mux);
752 uint32_t cy_as_hal_processor_hw_init(void)
756 cy_as_hal_print_message(KERN_INFO "init OMAP3430 hw...\n");
758 iomux_vma = (u32)ioremap_nocache(
759 (u32)CTLPADCONF_BASE_ADDR, CTLPADCONF_SIZE);
760 cy_as_hal_print_message(KERN_INFO "PADCONF_VMA=%x val=%x\n",
761 iomux_vma, IORD32(iomux_vma));
766 for (i = 0; i < 6; i++) {
767 gpio_vma_tab[i].virt_addr = (u32)ioremap_nocache(
768 gpio_vma_tab[i].phy_addr,
769 gpio_vma_tab[i].size);
771 cy_as_hal_print_message(KERN_INFO "%s virt_addr=%x\n",
772 gpio_vma_tab[i].name,
773 (u32)gpio_vma_tab[i].virt_addr);
777 * force OMAP_GPIO_126 to rleased state,
778 * will be configured to drive reset
780 gpio_free(AST_RESET);
783 *same thing with AStoria CS pin
788 * initialize all the OMAP pads connected to astoria
790 cy_as_hal_init_user_pads(user_pad_cfg);
792 err = cy_as_hal_gpmc_init();
794 cy_as_hal_print_message(KERN_INFO"gpmc init failed:%d", err);
796 cy_as_hal_config_c_s_mux();
798 return gpmc_data_vma;
800 EXPORT_SYMBOL(cy_as_hal_processor_hw_init);
802 void cy_as_hal_omap_hardware_deinit(cy_as_omap_dev_kernel *dev_p)
805 * free omap hw resources
807 if (gpmc_data_vma != 0)
808 iounmap((void *)gpmc_data_vma);
811 release_mem_region(csa_phy, BLKSZ_4K);
813 gpmc_cs_free(AST_GPMC_CS);
815 free_irq(OMAP_GPIO_IRQ(AST_INT), dev_p);
817 cy_as_hal_release_user_pads(user_pad_cfg);
821 * These are the functions that are not part of the
822 * HAL layer, but are required to be called for this HAL
826 * Called On AstDevice LKM exit
828 int stop_o_m_a_p_kernel(const char *pgm, cy_as_hal_device_tag tag)
830 cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag;
833 * TODO: Need to disable WB interrupt handlere 1st
838 cy_as_hal_print_message("<1>_stopping OMAP34xx HAL layer object\n");
839 if (dev_p->m_sig != CY_AS_OMAP_KERNEL_HAL_SIG) {
840 cy_as_hal_print_message("<1>%s: %s: bad HAL tag\n",
848 cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
849 CY_AS_MEM_P0_INT_MASK_REG, 0x0000);
852 if (dev_p->thread_flag == 0) {
853 dev_p->thread_flag = 1;
854 wait_for_completion(&dev_p->thread_complete);
855 cy_as_hal_print_message("cyasomaphal:"
856 "done cleaning thread\n");
857 cy_as_hal_destroy_sleep_channel(&dev_p->thread_sc);
861 cy_as_hal_omap_hardware_deinit(dev_p);
866 if (m_omap_list_p == dev_p)
867 m_omap_list_p = dev_p->m_next_p;
869 cy_as_hal_free(dev_p);
871 cy_as_hal_print_message(KERN_INFO"OMAP_kernel_hal stopped\n");
875 int omap_start_intr(cy_as_hal_device_tag tag)
877 cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag;
879 const uint16_t mask = CY_AS_MEM_P0_INTR_REG_DRQINT |
880 CY_AS_MEM_P0_INTR_REG_MBINT;
883 * register for interrupts
885 ret = cy_as_hal_configure_interrupts(dev_p);
888 * enable only MBox & DRQ interrupts for now
890 cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
891 CY_AS_MEM_P0_INT_MASK_REG, mask);
897 * Below are the functions that communicate with the WestBridge device.
898 * These are system dependent and must be defined by the HAL layer
899 * for a given system.
903 * GPMC NAND command+addr write phase
905 static inline void nand_cmd_n_addr(u8 cmdb1, u16 col_addr, u32 row_addr)
908 * byte order on the bus <cmd> <CA0,CA1,RA0,RA1, RA2>
910 u32 tmpa32 = ((row_addr << 16) | col_addr);
911 u8 RA2 = (u8)(row_addr >> 16);
915 * GPMC PNAND 8bit BUS
920 IOWR8(ncmd_reg_vma, cmdb1);
923 *pnand bus: <CA0,CA1,RA0,RA1>
925 IOWR32(naddr_reg_vma, tmpa32);
928 * <RA2> , always zero
930 IOWR8(naddr_reg_vma, RA2);
934 * GPMC PNAND 16bit BUS , in 16 bit mode CMD
935 * and ADDR sent on [d7..d0]
937 uint8_t CA0, CA1, RA0, RA1;
938 CA0 = tmpa32 & 0x000000ff;
939 CA1 = (tmpa32 >> 8) & 0x000000ff;
940 RA0 = (tmpa32 >> 16) & 0x000000ff;
941 RA1 = (tmpa32 >> 24) & 0x000000ff;
944 * can't use 32 bit writes here omap will not serialize
945 * them to lower half in16 bit mode
949 *pnand bus: <CMD1, CA0,CA1,RA0,RA1, RA2 (always zero)>
951 IOWR8(ncmd_reg_vma, cmdb1);
952 IOWR8(naddr_reg_vma, CA0);
953 IOWR8(naddr_reg_vma, CA1);
954 IOWR8(naddr_reg_vma, RA0);
955 IOWR8(naddr_reg_vma, RA1);
956 IOWR8(naddr_reg_vma, RA2);
961 * spin until r/b goes high
963 inline int wait_rn_b_high(void)
968 * TODO: note R/b may go low here, need to spin until high
969 * while (omap_get_gpio_datain(AST_RnB) == 0) {
972 * if (OMAP_GPIO_BIT(AST_RnB, GPIO_DATA_IN) == 0) {
974 * while (OMAP_GPIO_BIT(AST_RnB, GPIO_DATA_IN) == 0) {
977 * printk("<1>RnB=0!:%d\n",w_spins);
983 #ifdef ENABLE_GPMC_PF_ENGINE
984 /* #define PFE_READ_DEBUG
985 * PNAND block read with OMAP PFE enabled
986 * status: Not tested, NW, broken , etc
988 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
993 uint8_t bytes_in_fifo;
996 #ifdef PFE_READ_DEBUG
998 uint16_t bytes_read = 0;
1002 * configure the prefetch engine
1005 uint32_t pfe_status;
1008 * DISABLE GPMC CS4 operation 1st, this is
1009 * in case engine is be already disabled
1011 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x0);
1012 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG1), GPMC_PREFETCH_CONFIG1_VAL);
1013 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG2), count);
1015 #ifdef PFE_READ_DEBUG
1016 tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_CONFIG1));
1017 if (tmp32 != GPMC_PREFETCH_CONFIG1_VAL) {
1018 printk(KERN_INFO "<1> prefetch is CONFIG1 read val:%8.8x, != VAL written:%8.8x\n",
1019 tmp32, GPMC_PREFETCH_CONFIG1_VAL);
1020 tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1021 printk(KERN_INFO "<1> GPMC_PREFETCH_STATUS : %8.8x\n", tmp32);
1027 tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_CONFIG2));
1028 if (tmp32 != (count))
1029 printk(KERN_INFO "<1> GPMC_PREFETCH_CONFIG2 read val:%d, "
1030 "!= VAL written:%d\n", tmp32, count);
1034 * ISSUE PNAND CMD+ADDR, note gpmc puts 32b words
1035 * on the bus least sig. byte 1st
1037 nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1039 IOWR8(ncmd_reg_vma, RDPAGE_B2);
1042 * start the prefetch engine
1044 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x1);
1050 * GPMC PFE service loop
1054 * spin until PFE fetched some
1055 * PNAND bus words in the FIFO
1057 pfe_status = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1058 bytes_in_fifo = (pfe_status >> 24) & 0x7f;
1059 } while (bytes_in_fifo == 0);
1061 /* whole 32 bit words in fifo */
1062 w32cnt = bytes_in_fifo >> 2;
1066 *NOTE: FIFO_PTR indicates number of NAND bus words bytes
1067 * already received in the FIFO and available to be read
1068 * by DMA or MPU whether COUNTVAL indicates number of BUS
1069 * words yet to be read from PNAND bus words
1071 printk(KERN_ERR "<1> got PF_STATUS:%8.8x FIFO_PTR:%d, COUNTVAL:%d, w32cnt:%d\n",
1072 pfe_status, bytes_in_fifo,
1073 (pfe_status & 0x3fff), w32cnt);
1077 *ptr32++ = IORD32(gpmc_data_vma);
1079 if ((pfe_status & 0x3fff) == 0) {
1081 * PFE acc angine done, there still may be data leftover
1082 * in the FIFO re-read FIFO BYTE counter (check for
1083 * leftovers from 32 bit read accesses above)
1085 bytes_in_fifo = (IORD32(
1086 GPMC_VMA(GPMC_PREFETCH_STATUS)) >> 24) & 0x7f;
1089 * NOTE we may still have one word left in the fifo
1093 switch (bytes_in_fifo) {
1097 * nothing to do we already read the
1098 * FIFO out with 32 bit accesses
1103 * this only possible
1104 * for 8 bit pNAND only
1106 *ptr8 = IORD8(gpmc_data_vma);
1111 * this one can occur in either modes
1113 *(uint16_t *)ptr8 = IORD16(gpmc_data_vma);
1118 * this only possible for 8 bit pNAND only
1120 *(uint16_t *)ptr8 = IORD16(gpmc_data_vma);
1122 *ptr8 = IORD8(gpmc_data_vma);
1127 * shouldn't happen, but has been seen
1130 *ptr32 = IORD32(gpmc_data_vma);
1134 printk(KERN_ERR"<1>_error: PFE FIFO bytes leftover is not read:%d\n",
1139 * read is completed, get out of the while(1) loop
1147 #ifdef PFE_LBD_READ_V2
1149 * PFE engine assisted reads with the 64 byte blocks
1151 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
1157 uint32_t pfe_status;
1160 * ISSUE PNAND CMD+ADDR
1161 * note gpmc puts 32b words on the bus least sig. byte 1st
1163 nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1164 IOWR8(ncmd_reg_vma, RDPAGE_B2);
1168 * count - OMAP number of bytes to access on pnand bus
1171 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG1), GPMC_PREFETCH_CONFIG1_VAL);
1172 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG2), count);
1173 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x1);
1178 pfe_status = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1179 rd_cnt = pfe_status >> (24+2);
1182 *ptr32++ = IORD32(gpmc_data_vma);
1184 } while (pfe_status & 0x3fff);
1187 * read out the leftover
1190 rd_cnt = (IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS)) >> 24) & 0x7f;
1193 *ptr8++ = IORD8(gpmc_data_vma);
1197 #ifdef PNAND_LBD_READ_NO_PFE
1199 * Endpoint buffer read w/o OMAP GPMC Prefetch Engine
1200 * the original working code, works at max speed for 8 bit xfers
1201 * for 16 bit the bus diagram has gaps
1203 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
1210 DBGPRN("<1> %s(): NO_PFE\n", __func__);
1213 /* number of whole 32 bit words in the transfer */
1214 w32cnt = count >> 2;
1216 /* remainder, in bytes(0..3) */
1217 remainder = count & 03;
1220 * note gpmc puts 32b words on the bus least sig. byte 1st
1222 nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1223 IOWR8(ncmd_reg_vma, RDPAGE_B2);
1226 * read data by 32 bit chunks
1229 *ptr32++ = IORD32(ndata_reg_vma);
1232 * now do the remainder(it can be 0, 1, 2 or 3)
1233 * same code for both 8 & 16 bit bus
1234 * do 1 or 2 MORE words
1236 ptr16 = (uint16_t *)ptr32;
1238 switch (remainder) {
1240 /* read one 16 bit word
1241 * IN 8 BIT WE NEED TO READ even number of bytes
1244 *ptr16 = IORD16(ndata_reg_vma);
1248 * for 3 bytes read 2 16 bit words
1250 *ptr16++ = IORD16(ndata_reg_vma);
1251 *ptr16 = IORD16(ndata_reg_vma);
1263 * uses LBD mode to write N bytes into astoria
1264 * Status: Working, however there are 150ns idle
1265 * timeafter every 2 (16 bit or 4(8 bit) bus cycles
1267 static void p_nand_lbd_write(u16 col_addr, u32 row_addr, u16 count, void *buff)
1275 remainder = count & 03;
1276 w32cnt = count >> 2;
1281 * send: CMDB1, CA0,CA1,RA0,RA1,RA2
1283 nand_cmd_n_addr(PGMPAGE_B1, col_addr, row_addr);
1286 * blast the data out in 32bit chunks
1289 IOWR32(ndata_reg_vma, *ptr32++);
1292 * do the reminder if there is one
1293 * same handling for both 8 & 16 bit pnand: mode
1295 ptr16 = (uint16_t *)ptr32; /* do 1 or 2 words */
1297 switch (remainder) {
1300 * read one 16 bit word
1303 IOWR16(ndata_reg_vma, *ptr16);
1308 * for 3 bytes read 2 16 bit words
1310 IOWR16(ndata_reg_vma, *ptr16++);
1311 IOWR16(ndata_reg_vma, *ptr16);
1320 * finally issue a PGM cmd
1322 IOWR8(ncmd_reg_vma, PGMPAGE_B2);
1326 * write Astoria register
1328 static inline void ast_p_nand_casdi_write(u8 reg_addr8, u16 data)
1330 unsigned long flags;
1333 * throw an error if called from multiple threads
1335 static atomic_t rdreg_usage_cnt = { 0 };
1338 * disable interrupts
1340 local_irq_save(flags);
1342 if (atomic_read(&rdreg_usage_cnt) != 0) {
1343 cy_as_hal_print_message(KERN_ERR "cy_as_omap_hal:"
1344 "* cy_as_hal_write_register usage:%d\n",
1345 atomic_read(&rdreg_usage_cnt));
1348 atomic_inc(&rdreg_usage_cnt);
1351 * 2 flavors of GPMC -> PNAND access
1355 * 16 BIT gpmc NAND mode
1361 IOWR8(ncmd_reg_vma, 0x85);
1362 IOWR8(naddr_reg_vma, reg_addr8);
1363 IOWR8(naddr_reg_vma, 0x0c);
1366 * this should be sent on the 16 bit bus
1368 IOWR16(ndata_reg_vma, data);
1371 * 8 bit nand mode GPMC will automatically
1372 * seriallize 16bit or 32 bit writes into
1373 * 8 bit onesto the lower 8 bit in LE order
1375 addr16 = 0x0c00 | reg_addr8;
1380 IOWR8(ncmd_reg_vma, 0x85);
1381 IOWR16(naddr_reg_vma, addr16);
1382 IOWR16(ndata_reg_vma, data);
1386 * re-enable interrupts
1388 atomic_dec(&rdreg_usage_cnt);
1389 local_irq_restore(flags);
1394 * read astoria register via pNAND interface
1396 static inline u16 ast_p_nand_casdo_read(u8 reg_addr8)
1400 unsigned long flags;
1402 * throw an error if called from multiple threads
1404 static atomic_t wrreg_usage_cnt = { 0 };
1407 * disable interrupts
1409 local_irq_save(flags);
1411 if (atomic_read(&wrreg_usage_cnt) != 0) {
1413 * if it gets here ( from other threads), this function needs
1414 * need spin_lock_irq save() protection
1416 cy_as_hal_print_message(KERN_ERR"cy_as_omap_hal: "
1417 "cy_as_hal_write_register usage:%d\n",
1418 atomic_read(&wrreg_usage_cnt));
1420 atomic_inc(&wrreg_usage_cnt);
1423 * 2 flavors of GPMC -> PNAND access
1427 * 16 BIT gpmc NAND mode
1431 IOWR8(ncmd_reg_vma, 0x05);
1432 IOWR8(naddr_reg_vma, reg_addr8);
1433 IOWR8(naddr_reg_vma, 0x0c);
1434 IOWR8(ncmd_reg_vma, 0x00E0);
1439 * much faster through the gPMC Register space
1441 data = IORD16(ndata_reg_vma);
1444 * 8 BIT gpmc NAND mode
1445 * CMD1, CA1, CA2, CMD2
1447 addr16 = 0x0c00 | reg_addr8;
1448 IOWR8(ncmd_reg_vma, 0x05);
1449 IOWR16(naddr_reg_vma, addr16);
1450 IOWR8(ncmd_reg_vma, 0xE0);
1452 data = IORD16(ndata_reg_vma);
1456 * re-enable interrupts
1458 atomic_dec(&wrreg_usage_cnt);
1459 local_irq_restore(flags);
1466 * This function must be defined to write a register within the WestBridge
1467 * device. The addr value is the address of the register to write with
1468 * respect to the base address of the WestBridge device.
1470 void cy_as_hal_write_register(
1471 cy_as_hal_device_tag tag,
1472 uint16_t addr, uint16_t data)
1474 ast_p_nand_casdi_write((u8)addr, data);
1478 * This function must be defined to read a register from the WestBridge
1479 * device. The addr value is the address of the register to read with
1480 * respect to the base address of the WestBridge device.
1482 uint16_t cy_as_hal_read_register(cy_as_hal_device_tag tag, uint16_t addr)
1487 * READ ASTORIA REGISTER USING CASDO
1489 data = ast_p_nand_casdo_read((u8)addr);
1495 * preps Ep pointers & data counters for next packet
1496 * (fragment of the request) xfer returns true if
1497 * there is a next transfer, and false if all bytes in
1498 * current request have been xfered
1500 static inline bool prep_for_next_xfer(cy_as_hal_device_tag tag, uint8_t ep)
1503 if (!end_points[ep].sg_list_enabled) {
1505 * no further transfers for non storage EPs
1506 * (like EP2 during firmware download, done
1507 * in 64 byte chunks)
1509 if (end_points[ep].req_xfer_cnt >= end_points[ep].req_length) {
1510 DBGPRN("<1> %s():RQ sz:%d non-_sg EP:%d completed\n",
1511 __func__, end_points[ep].req_length, ep);
1514 * no more transfers, we are done with the request
1520 * calculate size of the next DMA xfer, corner
1521 * case for non-storage EPs where transfer size
1522 * is not egual N * HAL_DMA_PKT_SZ xfers
1524 if ((end_points[ep].req_length - end_points[ep].req_xfer_cnt)
1525 >= HAL_DMA_PKT_SZ) {
1526 end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
1529 * that would be the last chunk less
1530 * than P-port max size
1532 end_points[ep].dma_xfer_sz = end_points[ep].req_length -
1533 end_points[ep].req_xfer_cnt;
1540 * for SG_list assisted dma xfers
1541 * are we done with current SG ?
1543 if (end_points[ep].seg_xfer_cnt == end_points[ep].sg_p->length) {
1545 * was it the Last SG segment on the list ?
1547 if (sg_is_last(end_points[ep].sg_p)) {
1548 DBGPRN("<1> %s: EP:%d completed,"
1549 "%d bytes xfered\n",
1552 end_points[ep].req_xfer_cnt
1558 * There are more SG segments in current
1559 * request's sg list setup new segment
1562 end_points[ep].seg_xfer_cnt = 0;
1563 end_points[ep].sg_p = sg_next(end_points[ep].sg_p);
1564 /* set data pointer for next DMA sg transfer*/
1565 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
1566 DBGPRN("<1> %s new SG:_va:%p\n\n",
1567 __func__, end_points[ep].data_p);
1573 * for sg list xfers it will always be 512 or 1024
1575 end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
1578 * next transfer is required
1585 * Astoria DMA read request, APP_CPU reads from WB ep buffer
1587 static void cy_service_e_p_dma_read_request(
1588 cy_as_omap_dev_kernel *dev_p, uint8_t ep)
1590 cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p;
1593 uint16_t col_addr = 0x0000;
1594 uint32_t row_addr = CYAS_DEV_CALC_EP_ADDR(ep);
1595 uint16_t ep_dma_reg = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1598 * get the XFER size frtom WB eP DMA REGISTER
1600 v = cy_as_hal_read_register(tag, ep_dma_reg);
1603 * amount of data in EP buff in bytes
1605 size = v & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK;
1608 * memory pointer for this DMA packet xfer (sub_segment)
1610 dptr = end_points[ep].data_p;
1612 DBGPRN("<1>HAL:_svc_dma_read on EP_%d sz:%d, intr_seq:%d, dptr:%p\n",
1619 cy_as_hal_assert(size != 0);
1623 * the actual WB-->OMAP memory "soft" DMA xfer
1625 p_nand_lbd_read(col_addr, row_addr, size, dptr);
1629 * clear DMAVALID bit indicating that the data has been read
1631 cy_as_hal_write_register(tag, ep_dma_reg, 0);
1633 end_points[ep].seg_xfer_cnt += size;
1634 end_points[ep].req_xfer_cnt += size;
1637 * pre-advance data pointer (if it's outside sg
1638 * list it will be reset anyway
1640 end_points[ep].data_p += size;
1642 if (prep_for_next_xfer(tag, ep)) {
1644 * we have more data to read in this request,
1645 * setup next dma packet due tell WB how much
1646 * data we are going to xfer next
1648 v = end_points[ep].dma_xfer_sz/*HAL_DMA_PKT_SZ*/ |
1649 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1650 cy_as_hal_write_register(tag, ep_dma_reg, v);
1652 end_points[ep].pending = cy_false;
1653 end_points[ep].type = cy_as_hal_none;
1654 end_points[ep].buffer_valid = cy_false;
1657 * notify the API that we are done with rq on this EP
1660 DBGPRN("<1>trigg rd_dma completion cb: xfer_sz:%d\n",
1661 end_points[ep].req_xfer_cnt);
1663 end_points[ep].req_xfer_cnt,
1664 CY_AS_ERROR_SUCCESS);
1670 * omap_cpu needs to transfer data to ASTORIA EP buffer
1672 static void cy_service_e_p_dma_write_request(
1673 cy_as_omap_dev_kernel *dev_p, uint8_t ep)
1678 uint16_t col_addr = 0x0000;
1679 uint32_t row_addr = CYAS_DEV_CALC_EP_ADDR(ep);
1682 cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p;
1684 * note: size here its the size of the dma transfer could be
1685 * anything > 0 && < P_PORT packet size
1687 size = end_points[ep].dma_xfer_sz;
1688 dptr = end_points[ep].data_p;
1691 * perform the soft DMA transfer, soft in this case
1694 p_nand_lbd_write(col_addr, row_addr, size, dptr);
1696 end_points[ep].seg_xfer_cnt += size;
1697 end_points[ep].req_xfer_cnt += size;
1699 * pre-advance data pointer
1700 * (if it's outside sg list it will be reset anyway)
1702 end_points[ep].data_p += size;
1705 * now clear DMAVAL bit to indicate we are done
1706 * transferring data and that the data can now be
1707 * sent via USB to the USB host, sent to storage,
1708 * or used internally.
1711 addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1712 cy_as_hal_write_register(tag, addr, size);
1715 * finally, tell the USB subsystem that the
1716 * data is gone and we can accept the
1717 * next request if one exists.
1719 if (prep_for_next_xfer(tag, ep)) {
1721 * There is more data to go. Re-init the WestBridge DMA side
1723 v = end_points[ep].dma_xfer_sz |
1724 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1725 cy_as_hal_write_register(tag, addr, v);
1728 end_points[ep].pending = cy_false;
1729 end_points[ep].type = cy_as_hal_none;
1730 end_points[ep].buffer_valid = cy_false;
1733 * notify the API that we are done with rq on this EP
1737 * this callback will wake up the process that might be
1738 * sleeping on the EP which data is being transferred
1741 end_points[ep].req_xfer_cnt,
1742 CY_AS_ERROR_SUCCESS);
1748 * HANDLE DRQINT from Astoria (called in AS_Intr context
1750 static void cy_handle_d_r_q_interrupt(cy_as_omap_dev_kernel *dev_p)
1753 static uint8_t service_ep = 2;
1756 * We've got DRQ INT, read DRQ STATUS Register */
1757 v = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
1761 #ifndef WESTBRIDGE_NDEBUG
1762 cy_as_hal_print_message("stray DRQ interrupt detected\n");
1768 * Now, pick a given DMA request to handle, for now, we just
1769 * go round robin. Each bit position in the service_mask
1770 * represents an endpoint from EP2 to EP15. We rotate through
1771 * each of the endpoints to find one that needs to be serviced.
1773 while ((v & (1 << service_ep)) == 0) {
1775 if (service_ep == 15)
1781 if (end_points[service_ep].type == cy_as_hal_write) {
1783 * handle DMA WRITE REQUEST: app_cpu will
1784 * write data into astoria EP buffer
1786 cy_service_e_p_dma_write_request(dev_p, service_ep);
1787 } else if (end_points[service_ep].type == cy_as_hal_read) {
1789 * handle DMA READ REQUEST: cpu will
1790 * read EP buffer from Astoria
1792 cy_service_e_p_dma_read_request(dev_p, service_ep);
1794 #ifndef WESTBRIDGE_NDEBUG
1796 cy_as_hal_print_message("cyashalomap:interrupt,"
1797 " w/o pending DMA job,"
1798 "-check DRQ_MASK logic\n");
1802 * Now bump the EP ahead, so other endpoints get
1803 * a shot before the one we just serviced
1805 if (end_points[service_ep].type == cy_as_hal_none) {
1806 if (service_ep == 15)
1814 void cy_as_hal_dma_cancel_request(cy_as_hal_device_tag tag, uint8_t ep)
1816 DBGPRN("cy_as_hal_dma_cancel_request on ep:%d", ep);
1817 if (end_points[ep].pending)
1818 cy_as_hal_write_register(tag,
1819 CY_AS_MEM_P0_EP2_DMA_REG + ep - 2, 0);
1821 end_points[ep].buffer_valid = cy_false;
1822 end_points[ep].type = cy_as_hal_none;
1826 * enables/disables SG list assisted DMA xfers for the given EP
1827 * sg_list assisted XFERS can use physical addresses of mem pages in case if the
1828 * xfer is performed by a h/w DMA controller rather then the CPU on P port
1830 void cy_as_hal_set_ep_dma_mode(uint8_t ep, bool sg_xfer_enabled)
1832 end_points[ep].sg_list_enabled = sg_xfer_enabled;
1833 DBGPRN("<1> EP:%d sg_list assisted DMA mode set to = %d\n",
1834 ep, end_points[ep].sg_list_enabled);
1836 EXPORT_SYMBOL(cy_as_hal_set_ep_dma_mode);
1839 * This function must be defined to transfer a block of data to
1840 * the WestBridge device. This function can use the burst write
1841 * (DMA) capabilities of WestBridge to do this, or it can just copy
1842 * the data using writes.
1844 void cy_as_hal_dma_setup_write(cy_as_hal_device_tag tag,
1845 uint8_t ep, void *buf,
1846 uint32_t size, uint16_t maxsize)
1852 * Note: "size" is the actual request size
1853 * "maxsize" - is the P port fragment size
1854 * No EP0 or EP1 traffic should get here
1856 cy_as_hal_assert(ep != 0 && ep != 1);
1859 * If this asserts, we have an ordering problem. Another DMA request
1860 * is coming down before the previous one has completed.
1862 cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);
1863 end_points[ep].buffer_valid = cy_true;
1864 end_points[ep].type = cy_as_hal_write;
1865 end_points[ep].pending = cy_true;
1868 * total length of the request
1870 end_points[ep].req_length = size;
1872 if (size >= maxsize) {
1874 * set xfer size for very 1st DMA xfer operation
1875 * port max packet size ( typically 512 or 1024)
1877 end_points[ep].dma_xfer_sz = maxsize;
1880 * smaller xfers for non-storage EPs
1882 end_points[ep].dma_xfer_sz = size;
1886 * check the EP transfer mode uses sg_list rather then a memory buffer
1887 * block devices pass it to the HAL, so the hAL could get to the real
1888 * physical address for each segment and set up a DMA controller
1889 * hardware ( if there is one)
1891 if (end_points[ep].sg_list_enabled) {
1893 * buf - pointer to the SG list
1894 * data_p - data pointer to the 1st DMA segment
1895 * seg_xfer_cnt - keeps track of N of bytes sent in current
1897 * req_xfer_cnt - keeps track of the total N of bytes
1898 * transferred for the request
1900 end_points[ep].sg_p = buf;
1901 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
1902 end_points[ep].seg_xfer_cnt = 0;
1903 end_points[ep].req_xfer_cnt = 0;
1905 #ifdef DBGPRN_DMA_SETUP_WR
1906 DBGPRN("cyasomaphal:%s: EP:%d, buf:%p, buf_va:%p,"
1907 "req_sz:%d, maxsz:%d\n",
1911 end_points[ep].data_p,
1918 * setup XFER for non sg_list assisted EPs
1921 #ifdef DBGPRN_DMA_SETUP_WR
1922 DBGPRN("<1>%s non storage or sz < 512:"
1923 "EP:%d, sz:%d\n", __func__, ep, size);
1926 end_points[ep].sg_p = NULL;
1929 * must be a VMA of a membuf in kernel space
1931 end_points[ep].data_p = buf;
1934 * will keep track No of bytes xferred for the request
1936 end_points[ep].req_xfer_cnt = 0;
1940 * Tell WB we are ready to send data on the given endpoint
1942 v = (end_points[ep].dma_xfer_sz & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK)
1943 | CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1945 addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1947 cy_as_hal_write_register(tag, addr, v);
1951 * This function must be defined to transfer a block of data from
1952 * the WestBridge device. This function can use the burst read
1953 * (DMA) capabilities of WestBridge to do this, or it can just
1954 * copy the data using reads.
1956 void cy_as_hal_dma_setup_read(cy_as_hal_device_tag tag,
1957 uint8_t ep, void *buf,
1958 uint32_t size, uint16_t maxsize)
1964 * Note: "size" is the actual request size
1965 * "maxsize" - is the P port fragment size
1966 * No EP0 or EP1 traffic should get here
1968 cy_as_hal_assert(ep != 0 && ep != 1);
1971 * If this asserts, we have an ordering problem.
1972 * Another DMA request is coming down before the
1973 * previous one has completed. we should not get
1974 * new requests if current is still in process
1977 cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);
1979 end_points[ep].buffer_valid = cy_true;
1980 end_points[ep].type = cy_as_hal_read;
1981 end_points[ep].pending = cy_true;
1982 end_points[ep].req_xfer_cnt = 0;
1983 end_points[ep].req_length = size;
1985 if (size >= maxsize) {
1987 * set xfer size for very 1st DMA xfer operation
1988 * port max packet size ( typically 512 or 1024)
1990 end_points[ep].dma_xfer_sz = maxsize;
1993 * so that we could handle small xfers on in case
1994 * of non-storage EPs
1996 end_points[ep].dma_xfer_sz = size;
1999 addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
2001 if (end_points[ep].sg_list_enabled) {
2003 * Handle sg-list assisted EPs
2004 * seg_xfer_cnt - keeps track of N of sent packets
2005 * buf - pointer to the SG list
2006 * data_p - data pointer for the 1st DMA segment
2008 end_points[ep].seg_xfer_cnt = 0;
2009 end_points[ep].sg_p = buf;
2010 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
2012 #ifdef DBGPRN_DMA_SETUP_RD
2013 DBGPRN("cyasomaphal:DMA_setup_read sg_list EP:%d, "
2014 "buf:%p, buf_va:%p, req_sz:%d, maxsz:%d\n",
2017 end_points[ep].data_p,
2021 v = (end_points[ep].dma_xfer_sz &
2022 CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
2023 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
2024 cy_as_hal_write_register(tag, addr, v);
2027 * Non sg list EP passed void *buf rather then scatterlist *sg
2029 #ifdef DBGPRN_DMA_SETUP_RD
2030 DBGPRN("%s:non-sg_list EP:%d,"
2031 "RQ_sz:%d, maxsz:%d\n",
2032 __func__, ep, size, maxsize);
2035 end_points[ep].sg_p = NULL;
2038 * must be a VMA of a membuf in kernel space
2040 end_points[ep].data_p = buf;
2043 * Program the EP DMA register for Storage endpoints only.
2045 if (is_storage_e_p(ep)) {
2046 v = (end_points[ep].dma_xfer_sz &
2047 CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
2048 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
2049 cy_as_hal_write_register(tag, addr, v);
2055 * This function must be defined to allow the WB API to
2056 * register a callback function that is called when a
2057 * DMA transfer is complete.
2059 void cy_as_hal_dma_register_callback(cy_as_hal_device_tag tag,
2060 cy_as_hal_dma_complete_callback cb)
2062 DBGPRN("<1>\n%s: WB API has registered a dma_complete callback:%x\n",
2063 __func__, (uint32_t)cb);
2068 * This function must be defined to return the maximum size of
2069 * DMA request that can be handled on the given endpoint. The
2070 * return value should be the maximum size in bytes that the DMA
2071 * module can handle.
2073 uint32_t cy_as_hal_dma_max_request_size(cy_as_hal_device_tag tag,
2074 cy_as_end_point_number_t ep)
2077 * Storage reads and writes are always done in 512 byte blocks.
2078 * So, we do the count handling within the HAL, and save on
2079 * some of the data transfer delay.
2081 if ((ep == CYASSTORAGE_READ_EP_NUM) ||
2082 (ep == CYASSTORAGE_WRITE_EP_NUM)) {
2083 /* max DMA request size HAL can handle by itself */
2084 return CYASSTORAGE_MAX_XFER_SIZE;
2087 * For the USB - Processor endpoints, the maximum transfer
2088 * size depends on the speed of USB operation. So, we use
2089 * the following constant to indicate to the API that
2090 * splitting of the data into chunks less that or equal to
2091 * the max transfer size should be handled internally.
2094 /* DEFINED AS 0xffffffff in cyasdma.h */
2095 return CY_AS_DMA_MAX_SIZE_HW_SIZE;
2100 * This function must be defined to set the state of the WAKEUP pin
2101 * on the WestBridge device. Generally this is done via a GPIO of
2104 cy_bool cy_as_hal_set_wakeup_pin(cy_as_hal_device_tag tag, cy_bool state)
2107 * Not supported as of now.
2112 void cy_as_hal_pll_lock_loss_handler(cy_as_hal_device_tag tag)
2114 cy_as_hal_print_message("error: astoria PLL lock is lost\n");
2115 cy_as_hal_print_message("please check the input voltage levels");
2116 cy_as_hal_print_message("and clock, and restart the system\n");
2120 * Below are the functions that must be defined to provide the basic
2121 * operating system services required by the API.
2125 * This function is required by the API to allocate memory.
2126 * This function is expected to work exactly like malloc().
2128 void *cy_as_hal_alloc(uint32_t cnt)
2132 ret_p = kmalloc(cnt, GFP_ATOMIC);
2137 * This function is required by the API to free memory allocated
2138 * with CyAsHalAlloc(). This function is'expected to work exacly
2141 void cy_as_hal_free(void *mem_p)
2147 * Allocator that can be used in interrupt context.
2148 * We have to ensure that the kmalloc call does not
2149 * sleep in this case.
2151 void *cy_as_hal_c_b_alloc(uint32_t cnt)
2155 ret_p = kmalloc(cnt, GFP_ATOMIC);
2160 * This function is required to set a block of memory to a
2161 * specific value. This function is expected to work exactly
2164 void cy_as_hal_mem_set(void *ptr, uint8_t value, uint32_t cnt)
2166 memset(ptr, value, cnt);
2170 * This function is expected to create a sleep channel.
2171 * The data structure that represents the sleep channel object
2172 * sleep channel (which is Linux "wait_queue_head_t wq" for this paticular HAL)
2173 * passed as a pointer, and allpocated by the caller
2174 * (typically as a local var on the stack) "Create" word should read as
2175 * "SleepOn", this func doesn't actually create anything
2177 cy_bool cy_as_hal_create_sleep_channel(cy_as_hal_sleep_channel *channel)
2179 init_waitqueue_head(&channel->wq);
2184 * for this particular HAL it doesn't actually destroy anything
2185 * since no actual sleep object is created in CreateSleepChannel()
2186 * sleep channel is given by the pointer in the argument.
2188 cy_bool cy_as_hal_destroy_sleep_channel(cy_as_hal_sleep_channel *channel)
2194 * platform specific wakeable Sleep implementation
2196 cy_bool cy_as_hal_sleep_on(cy_as_hal_sleep_channel *channel, uint32_t ms)
2198 wait_event_interruptible_timeout(channel->wq, 0, ((ms * HZ)/1000));
2203 * wakes up the process waiting on the CHANNEL
2205 cy_bool cy_as_hal_wake(cy_as_hal_sleep_channel *channel)
2207 wake_up_interruptible_all(&channel->wq);
2211 uint32_t cy_as_hal_disable_interrupts()
2213 if (0 == intr__enable)
2220 void cy_as_hal_enable_interrupts(uint32_t val)
2223 if (0 == intr__enable)
2228 * Sleep atleast 150ns, cpu dependent
2230 void cy_as_hal_sleep150(void)
2235 for (i = 0; i < 1000; i++)
2239 void cy_as_hal_sleep(uint32_t ms)
2241 cy_as_hal_sleep_channel channel;
2243 cy_as_hal_create_sleep_channel(&channel);
2244 cy_as_hal_sleep_on(&channel, ms);
2245 cy_as_hal_destroy_sleep_channel(&channel);
2248 cy_bool cy_as_hal_is_polling()
2253 void cy_as_hal_c_b_free(void *ptr)
2255 cy_as_hal_free(ptr);
2259 * suppose to reinstate the astoria registers
2260 * that may be clobbered in sleep mode
2262 void cy_as_hal_init_dev_registers(cy_as_hal_device_tag tag,
2263 cy_bool is_standby_wakeup)
2265 /* specific to SPI, no implementation required */
2267 (void) is_standby_wakeup;
2270 void cy_as_hal_read_regs_before_standby(cy_as_hal_device_tag tag)
2272 /* specific to SPI, no implementation required */
2276 cy_bool cy_as_hal_sync_device_clocks(cy_as_hal_device_tag tag)
2279 * we are in asynchronous mode. so no need to handle this
2285 * init OMAP h/w resources
2287 int start_o_m_a_p_kernel(const char *pgm,
2288 cy_as_hal_device_tag *tag, cy_bool debug)
2290 cy_as_omap_dev_kernel *dev_p;
2296 * No debug mode support through argument as of now
2300 DBGPRN(KERN_INFO"starting OMAP34xx HAL...\n");
2303 * Initialize the HAL level endpoint DMA data.
2305 for (i = 0; i < sizeof(end_points)/sizeof(end_points[0]); i++) {
2306 end_points[i].data_p = 0;
2307 end_points[i].pending = cy_false;
2308 end_points[i].size = 0;
2309 end_points[i].type = cy_as_hal_none;
2310 end_points[i].sg_list_enabled = cy_false;
2313 * by default the DMA transfers to/from the E_ps don't
2314 * use sg_list that implies that the upper devices like
2315 * blockdevice have to enable it for the E_ps in their
2316 * initialization code
2321 * allocate memory for OMAP HAL
2323 dev_p = (cy_as_omap_dev_kernel *)cy_as_hal_alloc(
2324 sizeof(cy_as_omap_dev_kernel));
2326 cy_as_hal_print_message("out of memory allocating OMAP"
2327 "device structure\n");
2331 dev_p->m_sig = CY_AS_OMAP_KERNEL_HAL_SIG;
2334 * initialize OMAP hardware and StartOMAPKernelall gpio pins
2336 dev_p->m_addr_base = (void *)cy_as_hal_processor_hw_init();
2339 * Now perform a hard reset of the device to have
2340 * the new settings take effect
2342 __gpio_set_value(AST_WAKEUP, 1);
2345 * do Astoria h/w reset
2347 DBGPRN(KERN_INFO"-_-_pulse -> westbridge RST pin\n");
2350 * NEGATIVE PULSE on RST pin
2352 __gpio_set_value(AST_RESET, 0);
2354 __gpio_set_value(AST_RESET, 1);
2358 * note AFTER reset PNAND interface is 8 bit mode
2359 * so if gpmc Is configured in 8 bit mode upper half will be FF
2361 pncfg_reg = ast_p_nand_casdo_read(CY_AS_MEM_PNAND_CFG);
2363 #ifdef PNAND_16BIT_MODE
2366 * switch to 16 bit mode, force NON-LNA LBD mode, 3 RA addr bytes
2368 ast_p_nand_casdi_write(CY_AS_MEM_PNAND_CFG, 0x0001);
2371 * now in order to continue to talk to astoria
2372 * sw OMAP GPMC into 16 bit mode as well
2374 cy_as_hal_gpmc_enable_16bit_bus(cy_true);
2376 /* Astoria and GPMC are already in 8 bit mode, jsut initialize PNAND_CFG */
2377 ast_p_nand_casdi_write(CY_AS_MEM_PNAND_CFG, 0x0000);
2381 * NOTE: if you want to capture bus activity on the LA,
2382 * don't use printks in between the activities you want to capture.
2383 * prinks may take milliseconds, and the data of interest
2384 * will fall outside the LA capture window/buffer
2386 data16[0] = ast_p_nand_casdo_read(CY_AS_MEM_CM_WB_CFG_ID);
2387 data16[1] = ast_p_nand_casdo_read(CY_AS_MEM_PNAND_CFG);
2389 if (data16[0] != 0xA200) {
2391 * astoria device is not found
2393 printk(KERN_ERR "ERROR: astoria device is not found, CY_AS_MEM_CM_WB_CFG_ID ");
2394 printk(KERN_ERR "read returned:%4.4X: CY_AS_MEM_PNAND_CFG:%4.4x !\n",
2395 data16[0], data16[0]);
2399 cy_as_hal_print_message(KERN_INFO" register access CASDO test:"
2400 "\n CY_AS_MEM_CM_WB_CFG_ID:%4.4x\n"
2401 "PNAND_CFG after RST:%4.4x\n "
2402 "CY_AS_MEM_PNAND_CFG"
2403 "after cfg_wr:%4.4x\n\n",
2404 data16[0], pncfg_reg, data16[1]);
2406 dev_p->thread_flag = 1;
2407 spin_lock_init(&int_lock);
2408 dev_p->m_next_p = m_omap_list_p;
2410 m_omap_list_p = dev_p;
2413 cy_as_hal_configure_interrupts((void *)dev_p);
2415 cy_as_hal_print_message(KERN_INFO"OMAP3430__hal started tag:%p"
2416 ", kernel HZ:%d\n", dev_p, HZ);
2419 *make processor to storage endpoints SG assisted by default
2421 cy_as_hal_set_ep_dma_mode(4, true);
2422 cy_as_hal_set_ep_dma_mode(8, true);
2427 * there's been a NAND bus access error or
2428 * astoria device is not connected
2432 * at this point hal tag hasn't been set yet
2433 * so the device will not call omap_stop
2435 cy_as_hal_omap_hardware_deinit(dev_p);
2436 cy_as_hal_free(dev_p);
2442 * Some compilers do not like empty C files, so if the OMAP hal is not being
2443 * compiled, we compile this single function. We do this so that for a
2444 * given target HAL there are not multiple sources for the HAL functions.
2446 void my_o_m_a_p_kernel_hal_dummy_function(void)