reg = <ec100000 10000>;
interrupt-parent = <&opb_intc_0>;
interrupts = <1 0>; // got this from the opb_intc parameters
+ port-number = <0>;
current-speed = <d#115200>; // standard serial device prop
clock-frequency = <d#50000000>; // standard serial device prop
xlnx,data-bits = <8>;
Requred properties:
- current-speed : Baud rate of uartlite
-
+ Optional properties:
+ - port-number : unique ordinal index of the device. This
+ property is required for a console on uartlite.
+
v) Xilinx hwicap
Xilinx hwicap devices provide access to the configuration logic
- reg-offset : A value of 3 is required
- reg-shift : A value of 2 is required
-
p) Freescale Synchronous Serial Interface
The SSI is a serial device that communicates with audio codecs. It can
--- /dev/null
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/config-language.txt.
+
+mainmenu "Linux/Microblaze Kernel Configuration"
+
+config MICROBLAZE
+ bool
+ default y
+
+config MMU
+ bool
+ default n
+
+config SWAP
+ bool
+ default n
+
+config RWSEM_GENERIC_SPINLOCK
+ bool
+ default y
+
+config RWSEM_XCHGADD_ALGORITHM
+ bool
+
+
+config ARCH_HAS_ILOG2_U32
+ bool
+ default n
+
+config ARCH_HAS_ILOG2_U64
+ bool
+ default n
+
+config GENERIC_FIND_NEXT_BIT
+ bool
+ default y
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
+config GENERIC_HARDIRQS
+ bool
+ default y
+
+config GENERIC_IRQ_PROBE
+ bool
+ default y
+
+config GENERIC_CALIBRATE_DELAY
+ bool
+ default y
+
+config OF
+ def_bool n
+
+config PCI
+ bool
+ default n
+
+config UID16
+ bool
+ default y
+
+config DEFCONFIG_LIST
+ string
+ default "arch/$ARCH/defconfig"
+
+config NO_IOPORT
+ bool
+ default y
+
+# source arch/microblaze/Kconfig.auto
+
+source "init/Kconfig"
+
+menu "Processor type and features"
+
+choice
+ prompt "Subarchitecture Type"
+ default MICROBLAZE_ML401
+
+config MICROBLAZE_ML401
+ bool "ML401"
+ help
+ Choose this option if you are using the Xilinx ML401 reference design
+
+config MICROBLAZE_SP3E
+ bool "SP3E/1600"
+ help
+ Choose this option if you are using the Xilinx Spartan-3E/1600 reference design
+
+config MICROBLAZE_OTHER
+ bool "Other"
+ help
+ Choose this option if you are using some other design with an automatically generated BSP.
+
+endchoice
+
+config CMDLINE_BOOL
+ bool "Default bootloader kernel arguments"
+
+config CMDLINE
+ string "Initial kernel command string"
+ depends on CMDLINE_BOOL
+ default "console=ttyS0,9600"
+ help
+ On some platforms, there is currently no way for the boot loader to
+ pass arguments to the kernel. For these platforms, you can supply
+ some command-line options at build time by entering them here. In
+ most cases you will need to specify the root device here.
+
+config WANT_DEVICE_TREE
+ bool "Static device tree configuration"
+ select OF
+ help
+ Enable configuration using a static device tree. If a static
+ device tree is not used, then configuration will default to
+ using platform_devices via the xparameters mechanism.
+
+config DEVICE_TREE
+ string "Static device tree source file"
+ depends on WANT_DEVICE_TREE
+ help
+ This specifies the device tree source (.dts) file to be
+ compiled and included when building the bootwrapper. If a
+ relative filename is given, then it will be relative to
+ arch/microblaze/boot/dts.
+
+ For example, this is required when building a cuImage target
+ for an older U-Boot, which cannot pass a device tree itself.
+ Such a kernel will not work with a newer U-Boot that tries to
+ pass a device tree (unless you tell it not to). If your U-Boot
+ does not mention a device tree in "help bootm", then use the
+ cuImage target and specify a device tree here. Otherwise, use
+ the uImage target and leave this field blank.
+
+config ML401
+ bool
+ depends on MICROBLAZE_ML401
+ default y
+
+config SP3E
+ bool
+ depends on MICROBLAZE_SP3E
+ default y
+
+config XILINX_UNCACHED_SHADOW
+ bool "Are you using uncached shadow for RAM ?"
+ depends on MICROBLAZE
+ default y
+ help
+ This is needed to be able to allocate uncachable memory regions.
+ The feature requires the design to define the RAM memory controller window
+ to be twice as large as the actual physical memory.
+
+config LARGE_ALLOCS
+ bool "Allow allocating large blocks (> 1MB) of memory"
+ help
+ Allow the slab memory allocator to keep chains for very large
+ memory sizes - upto 32MB. You may need this if your system has
+ a lot of RAM, and you need to able to allocate very large
+ contiguous chunks. If unsure, say N.
+
+comment "Boot options"
+
+config CMDLINE
+ string "Default kernel command string"
+ default ""
+ help
+ On some architectures there is currently no way for the boot loader
+ to pass arguments to the kernel. For these architectures, you should
+ supply some command-line options at build time by entering them
+ here.
+
+config CMDLINE_FORCE
+ bool "Force default kernel command string"
+ help
+ Set this to have arguments from the default kernel command string
+ override those passed by the boot loader
+
+endmenu
+
+config APM_EMULATION
+ bool
+
+source "mm/Kconfig"
+
+menu "Exectuable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "arch/microblaze/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
--- /dev/null
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config EARLY_PRINTK
+ bool
+ default y
+
+config EARLY_PRINTK_UARTLITE_ADDRESS
+ hex "Physical address where UART Lite for early printk is mapped"
+ default "0x40600000"
+ help
+ Please enter physcal address where your uart lite is mapped
+
+config DEBUG_BOOTMEM
+ depends on DEBUG_KERNEL
+ bool "Debug BOOTMEM initialization"
+
+endmenu
+
+
--- /dev/null
+UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\"
+
+
+# r31 holds current when in kernel mode
+CFLAGS += -ffixed-r31
+
+ifdef CONFIG_MICROBLAZE_USE_BARREL
+CFLAGS += -mxl-barrel-shift
+endif
+
+ifdef CONFIG_MICROBLAZE_USE_FPU
+CFLAGS += -mhard-float
+endif
+
+ifdef CONFIG_MICROBLAZE_USE_PCMP_INSTR
+CFLAGS += -mxl-pattern-compare
+endif
+
+ifdef CONFIG_MICROBLAZE_USE_HW_MUL
+CFLAGS += -mno-xl-soft-mul
+endif
+
+ifdef CONFIG_MICROBLAZE_USE_DIV
+CFLAGS += -mno-xl-soft-div
+endif
+
+LDFLAGS_BLOB := --format binary --oformat elf32-microblaze
+
+LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name)
+
+head-y := arch/microblaze/kernel/head.o
+libs-y += arch/microblaze/lib/ $(LIBGCC)
+core-y += arch/microblaze/kernel/ arch/microblaze/mm/
+
+
+boot := arch/microblaze/boot
+
+all: linux.bin
+
+
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+
+linux.bin linux.bin.gz: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+
+define archhelp
+ echo '* linux.bin - Create raw binary'
+ echo ' linux.bin.gz - Create compressed raw binary'
+endef
--- /dev/null
+# arch/microblaze/boot/Makefile
+
+targets := linux.bin linux.bin.gz
+
+OBJCOPYFLAGS_linux.bin := -O binary
+
+$(obj)/linux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+ @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
+
+$(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
+ $(call if_changed,gzip)
+ @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
+
+CLEAN_FILES += arch/$(ARCH)/linux.bin arch/$(ARCH)/linux.bin.gz
--- /dev/null
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.23xlnx
+# Wed Jan 2 14:32:47 2008
+#
+CONFIG_MICROBLAZE=y
+# CONFIG_MMU is not set
+# CONFIG_SWAP is not set
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+# CONFIG_OF is not set
+# CONFIG_PCI is not set
+CONFIG_UID16=y
+CONFIG_DEFCONFIG_LIST="arch/$ARCH/defconfig"
+CONFIG_NO_IOPORT=y
+
+#
+# General setup
+#
+# CONFIG_EXPERIMENTAL is not set
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+# CONFIG_SYSVIPC is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_INITRAMFS_NO_CHECK is not set
+CONFIG_INITRAMFS_SOURCE="microblaze-rootfs.cpio"
+CONFIG_INITRAMFS_ROOT_UID=1000
+CONFIG_INITRAMFS_ROOT_GID=1000
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+# CONFIG_BASE_FULL is not set
+# CONFIG_FUTEX is not set
+CONFIG_ANON_INODES=y
+# CONFIG_EPOLL is not set
+CONFIG_SIGNALFD=y
+CONFIG_EVENTFD=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_TINY_SHMEM=y
+CONFIG_BASE_SMALL=1
+# CONFIG_MODULES is not set
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+
+#
+# Processor type and features
+#
+# CONFIG_MICROBLAZE_ML401 is not set
+CONFIG_MICROBLAZE_SP3E=y
+# CONFIG_MICROBLAZE_OTHER is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,9600"
+# CONFIG_WANT_DEVICE_TREE is not set
+CONFIG_SP3E=y
+# CONFIG_XILINX_UNCACHED_SHADOW is not set
+CONFIG_LARGE_ALLOCS=y
+
+#
+# Boot options
+#
+# CONFIG_CMDLINE_FORCE is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+
+#
+# Exectuable file formats
+#
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+CONFIG_BINFMT_SHARED_FLAT=y
+CONFIG_BINFMT_MISC=y
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_PACKET is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_XILINX_SYSACE is not set
+# CONFIG_XILINX_SYSACE_OLD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_XILINX_DRIVERS=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_NET_ETHERNET is not set
+CONFIG_NETDEV_1000=y
+# CONFIG_XILINX_TEMAC is not set
+# CONFIG_XILINX_LLTEMAC is not set
+CONFIG_NETDEV_10000=y
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_XILINXPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_XILINX_UARTLITE is not set
+# CONFIG_UNIX98_PTYS is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+CONFIG_XILINX_GPIO=y
+CONFIG_XILINX_HWICAP=y
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+# CONFIG_SPI_TLE62X0 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+CONFIG_DAB=y
+
+#
+# Graphics support
+#
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+# CONFIG_HID is not set
+CONFIG_USB_SUPPORT=y
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+# CONFIG_MMC is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_RTC_CLASS is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+CONFIG_XILINX_EDK=y
+
+#
+# Userspace I/O
+#
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_SLAB_LEAK is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_EARLY_PRINTK=y
+CONFIG_EARLY_PRINTK_UARTLITE_ADDRESS=0x40600000
+# CONFIG_DEBUG_BOOTMEM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC32 is not set
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_DMA=y
--- /dev/null
+#
+# Makefile
+#
+
+extra-y := head.o vmlinux.lds
+
+obj-y += init_task.o setup.o traps.o process.o time.o ptrace.o irq.o \
+ semaphore.o opb_intc.o opb_timer.o entry.o heartbeat.o \
+ sys_microblaze.o signal.o platform.o \
+ hw_exception_handler.o exceptions.o
+
+obj-y += cpu/
+
+obj-$(CONFIG_WANT_DEVICE_TREE) += of_platform.o of_device.o prom.o prom_parse.o system.dtb.o
+
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+dts = $(if $(shell echo $(CONFIG_DEVICE_TREE) | grep '^/'),\
+ ,$(srctree)/$(src)/../boot/dts/)$(CONFIG_DEVICE_TREE:"%"=%)
+
+$(obj)/system.dtb: $(dts)
+ dtc -f -O dtb -b 0 -V 16 $< > $@
+
+%.dtb.o: %.dtb
+ ${CROSS_COMPILE}objcopy -I binary -O elf32-big $< $@
--- /dev/null
+/*
+ * arch/microblaze/kernel/asm-offset.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/thread_info.h>
+
+#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+#define BLANK() asm volatile("\n->" : : )
+
+int main(int argc, char *argv[])
+{
+ /* struct pt_regs */
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ DEFINE(PT_MSR, offsetof(struct pt_regs, msr));
+ DEFINE(PT_EAR, offsetof(struct pt_regs, ear));
+ DEFINE(PT_ESR, offsetof(struct pt_regs, esr));
+ DEFINE(PT_FSR, offsetof(struct pt_regs, fsr));
+ DEFINE(PT_PC, offsetof(struct pt_regs, pc));
+ DEFINE(PT_R0, offsetof(struct pt_regs, r0));
+ DEFINE(PT_R1, offsetof(struct pt_regs, r1));
+ DEFINE(PT_R2, offsetof(struct pt_regs, r2));
+ DEFINE(PT_R3, offsetof(struct pt_regs, r3));
+ DEFINE(PT_R4, offsetof(struct pt_regs, r4));
+ DEFINE(PT_R5, offsetof(struct pt_regs, r5));
+ DEFINE(PT_R6, offsetof(struct pt_regs, r6));
+ DEFINE(PT_R7, offsetof(struct pt_regs, r7));
+ DEFINE(PT_R8, offsetof(struct pt_regs, r8));
+ DEFINE(PT_R9, offsetof(struct pt_regs, r9));
+ DEFINE(PT_R10, offsetof(struct pt_regs, r10));
+ DEFINE(PT_R11, offsetof(struct pt_regs, r11));
+ DEFINE(PT_R12, offsetof(struct pt_regs, r12));
+ DEFINE(PT_R13, offsetof(struct pt_regs, r13));
+ DEFINE(PT_R14, offsetof(struct pt_regs, r14));
+ DEFINE(PT_R15, offsetof(struct pt_regs, r15));
+ DEFINE(PT_R16, offsetof(struct pt_regs, r16));
+ DEFINE(PT_R17, offsetof(struct pt_regs, r17));
+ DEFINE(PT_R18, offsetof(struct pt_regs, r18));
+ DEFINE(PT_R19, offsetof(struct pt_regs, r19));
+ DEFINE(PT_R20, offsetof(struct pt_regs, r20));
+ DEFINE(PT_R21, offsetof(struct pt_regs, r21));
+ DEFINE(PT_R22, offsetof(struct pt_regs, r22));
+ DEFINE(PT_R23, offsetof(struct pt_regs, r23));
+ DEFINE(PT_R24, offsetof(struct pt_regs, r24));
+ DEFINE(PT_R25, offsetof(struct pt_regs, r25));
+ DEFINE(PT_R26, offsetof(struct pt_regs, r26));
+ DEFINE(PT_R27, offsetof(struct pt_regs, r27));
+ DEFINE(PT_R28, offsetof(struct pt_regs, r28));
+ DEFINE(PT_R29, offsetof(struct pt_regs, r29));
+ DEFINE(PT_R30, offsetof(struct pt_regs, r30));
+ DEFINE(PT_R31, offsetof(struct pt_regs, r31));
+ DEFINE(PT_MODE, offsetof(struct pt_regs, kernel_mode));
+ BLANK();
+
+ /* Magic offsets for PTRACE PEEK/POKE etc */
+ DEFINE(PT_TEXT_ADDR, sizeof(struct pt_regs)+1);
+ DEFINE(PT_TEXT_LEN, sizeof(struct pt_regs)+2);
+ DEFINE(PT_DATA_ADDR, sizeof(struct pt_regs)+3);
+ BLANK();
+
+ /* struct task_struct */
+ DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
+
+ /* struct thread_info */
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_STATUS, offsetof(struct thread_info, status));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_RESTART_BLOCK, offsetof(struct thread_info, restart_block));
+ DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
+ BLANK();
+
+ /* struct cpu_context */
+ DEFINE(CC_SP, offsetof(struct cpu_context, sp)); /* r1 */
+ DEFINE(CC_R2, offsetof(struct cpu_context, r2));
+ /* dedicated registers */
+ DEFINE(CC_R13, offsetof(struct cpu_context, r13));
+ DEFINE(CC_R14, offsetof(struct cpu_context, r14));
+ DEFINE(CC_R15, offsetof(struct cpu_context, r15));
+ DEFINE(CC_R16, offsetof(struct cpu_context, r16));
+ DEFINE(CC_R17, offsetof(struct cpu_context, r17));
+ DEFINE(CC_R18, offsetof(struct cpu_context, r18));
+ /* non-volatile registers */
+ DEFINE(CC_R19, offsetof(struct cpu_context, r19));
+ DEFINE(CC_R20, offsetof(struct cpu_context, r20));
+ DEFINE(CC_R21, offsetof(struct cpu_context, r21));
+ DEFINE(CC_R22, offsetof(struct cpu_context, r22));
+ DEFINE(CC_R23, offsetof(struct cpu_context, r23));
+ DEFINE(CC_R24, offsetof(struct cpu_context, r24));
+ DEFINE(CC_R25, offsetof(struct cpu_context, r25));
+ DEFINE(CC_R26, offsetof(struct cpu_context, r26));
+ DEFINE(CC_R27, offsetof(struct cpu_context, r27));
+ DEFINE(CC_R28, offsetof(struct cpu_context, r28));
+ DEFINE(CC_R29, offsetof(struct cpu_context, r29));
+ DEFINE(CC_R30, offsetof(struct cpu_context, r30));
+ /* special purpose registers */
+ DEFINE(CC_MSR, offsetof(struct cpu_context, msr));
+ DEFINE(CC_EAR, offsetof(struct cpu_context, ear));
+ DEFINE(CC_ESR, offsetof(struct cpu_context, esr));
+ DEFINE(CC_FSR, offsetof(struct cpu_context, fsr));
+ BLANK();
+
+ return 0;
+}
--- /dev/null
+
+# Build the appropriate CPU version support
+obj-y += mb.o
+
+obj-y += cache.o
+
+obj-y += pvr.o cpuinfo.o cpuinfo-static.o cpuinfo-pvr-full.o
+
+
--- /dev/null
+/*
+ * arch/microblaze/kernel/microblaze_cache.c --
+ * Cache control for MicroBlaze cache memories
+ *
+ * Copyright (C) 2007 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <asm/cache.h>
+#include <asm/cpuinfo.h>
+
+/* We always align cache instructions. Previously, this was done with
+ FSL memory interfaces, but not PLB interfaces. Since PLB
+ interfaces are not present in current microblazes, we just assume
+ that these always have to be aligned.
+ */
+#define ALIGN_DCACHE_INSTRUCTIONS 1
+#define ALIGN_ICACHE_INSTRUCTIONS 1
+
+/* Exported functions. */
+
+void __invalidate_icache_all (void)
+{
+ unsigned int i;
+ unsigned flags;
+ unsigned int cache_size = cpuinfo->icache_size;
+ unsigned int line_size = cpuinfo->icache_line;
+
+ if(!cpuinfo->use_icache)
+ return;
+
+ local_irq_save(flags);
+ __disable_icache();
+
+ /* Just loop through cache size and invalidate, no need to add
+ CACHE_BASE address */
+ for(i=0;i<cache_size;i+=line_size)
+ __invalidate_icache(i);
+
+ /* Note that the cache will be returned to its original state
+ when the status register is restored.*/
+ local_irq_restore(flags);
+}
+
+void __invalidate_icache_range (unsigned long start, unsigned long end)
+{
+ unsigned int i;
+ unsigned flags;
+ unsigned int cache_size = cpuinfo->icache_size;
+ unsigned int line_size = cpuinfo->icache_line;
+#if ALIGN_ICACHE_INSTRUCTIONS==1
+ unsigned int align = ~(line_size - 1);
+#endif
+
+ if(!cpuinfo->use_icache)
+ return;
+
+/* No need to cover entire cache range, just cover cache footprint */
+ end=min(start+cache_size, end);
+#if ALIGN_ICACHE_INSTRUCTIONS==1
+ start &= align; /* Make sure we are aligned */
+ end = ((end & align) + line_size); /* Push end up to the next cache line */
+#endif
+ local_irq_save(flags);
+ __disable_icache();
+
+ for(i=start;i<end;i+=line_size)
+ __invalidate_icache(i);
+
+ /* Note that the cache will be returned to its original state
+ when the status register is restored.*/
+ local_irq_restore(flags);
+}
+
+void __invalidate_dcache_all (void)
+{
+ unsigned int i;
+ unsigned flags;
+ unsigned int cache_size = cpuinfo->dcache_size;
+ unsigned int line_size = cpuinfo->dcache_line;
+
+ if(!cpuinfo->use_dcache)
+ return;
+
+ local_irq_save(flags);
+ __disable_dcache();
+
+ /* Just loop through cache size and invalidate, no need to add
+ CACHE_BASE address */
+ for(i=0;i<cache_size;i+=line_size)
+ __invalidate_dcache(i);
+
+ /* Note that the cache will be returned to its original state
+ when the status register is restored.*/
+ local_irq_restore(flags);
+}
+
+void __invalidate_dcache_range (unsigned long start, unsigned long end)
+{
+ unsigned int i;
+ unsigned flags;
+ unsigned int cache_size = cpuinfo->dcache_size;
+ unsigned int line_size = cpuinfo->dcache_line;
+#if ALIGN_DCACHE_INSTRUCTIONS==1
+ unsigned int align = ~(line_size - 1);
+#endif
+
+ if(!cpuinfo->use_dcache)
+ return;
+
+ /* No need to cover entire cache range, just cover cache footprint */
+ end=min(start+cache_size, end);
+#if ALIGN_DCACHE_INSTRUCTIONS==1
+ start &= align; /* Make sure we are aligned */
+ end = ((end & align) + line_size); /* Push end up to the next cache line */
+#endif
+ local_irq_save(flags);
+ __disable_dcache();
+
+ for(i=start;i<end;i+=line_size)
+ __invalidate_dcache(i);
+
+ /* Note that the cache will be returned to its original state
+ when the status register is restored.*/
+ local_irq_restore(flags);
+}
+
--- /dev/null
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/autoconf.h>
+#include <asm/pvr.h>
+#include <asm/cpuinfo.h>
+
+/* Helper macro to map between fields in our struct cpuinfo, and
+ the PVR macros in pvr.h.
+ */
+#define CI(c,p) ci->c=PVR_##p(pvr)
+
+void set_cpuinfo_pvr_full(struct cpuinfo *ci)
+{
+ struct pvr_s pvr;
+ get_pvr(&pvr);
+
+ CI(use_barrel,USE_BARREL);
+ CI(use_divider,USE_DIV);
+ CI(use_mult,USE_HW_MUL);
+ CI(use_fpu,USE_FPU);
+
+ CI(use_mul_64,USE_MUL64);
+ CI(use_msr_instr,USE_MSR_INSTR);
+ CI(use_pcmp_instr, USE_PCMP_INSTR);
+ CI(ver_code, VERSION);
+
+ CI(use_icache, USE_ICACHE);
+ CI(icache_tagbits, ICACHE_ADDR_TAG_BITS);
+ CI(icache_write, ICACHE_ALLOW_WR);
+ CI(icache_line, ICACHE_LINE_LEN);
+ CI(icache_size, ICACHE_BYTE_SIZE);
+ CI(icache_base, ICACHE_BASEADDR);
+ CI(icache_high, ICACHE_HIGHADDR);
+
+ CI(use_dcache, USE_DCACHE);
+ CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS);
+ CI(dcache_write, DCACHE_ALLOW_WR);
+ CI(dcache_line, DCACHE_LINE_LEN);
+ CI(dcache_size, DCACHE_BYTE_SIZE);
+ CI(dcache_base, DCACHE_BASEADDR);
+ CI(dcache_high, DCACHE_HIGHADDR);
+
+ CI(use_dopb, D_OPB);
+ CI(use_iopb, I_OPB);
+ CI(use_dlmb, D_LMB);
+ CI(use_ilmb, I_LMB);
+ CI(num_fsl, FSL_LINKS);
+
+ CI(irq_edge, INTERRUPT_IS_EDGE);
+ CI(irq_positive, EDGE_IS_POSITIVE);
+
+ CI(area_optimised, AREA_OPTIMISED);
+ CI(opcode_0_illegal, OPCODE_0x0_ILLEGAL);
+ CI(exc_unaligned, UNALIGNED_EXCEPTION);
+ CI(exc_ill_opcode, ILL_OPCODE_EXCEPTION);
+ CI(exc_iopb, IOPB_BUS_EXCEPTION);
+ CI(exc_dopb, DOPB_BUS_EXCEPTION);
+ CI(exc_div_zero, DIV_ZERO_EXCEPTION);
+ CI(exc_fpu, FPU_EXCEPTION);
+
+ CI(hw_debug, DEBUG_ENABLED);
+ CI(num_pc_brk, NUMBER_OF_PC_BRK);
+ CI(num_rd_brk, NUMBER_OF_RD_ADDR_BRK);
+ CI(num_wr_brk, NUMBER_OF_WR_ADDR_BRK);
+
+ CI(fpga_family_code, TARGET_FAMILY);
+}
+
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/autoconf.h>
+#include <asm/cpuinfo.h>
+
+#define CPUINFO(x) XPAR_MICROBLAZE_0_##x
+
+static struct cpuinfo cpuinfo_static={
+ use_barrel : CPUINFO(USE_BARREL),
+ use_divider : CPUINFO(USE_DIV),
+ use_mult : CPUINFO(USE_HW_MUL)>0,
+ use_fpu : CPUINFO(USE_FPU),
+ use_exception : (CPUINFO(UNALIGNED_EXCEPTIONS) || \
+ CPUINFO(ILL_OPCODE_EXCEPTION) ||
+ CPUINFO(IOPB_BUS_EXCEPTION) ||
+ CPUINFO(DOPB_BUS_EXCEPTION) ||
+ CPUINFO(DIV_ZERO_EXCEPTION) ||
+ CPUINFO(FPU_EXCEPTION) != 0),
+ use_mul_64 : CPUINFO(USE_HW_MUL)==2,
+ use_msr_instr : CPUINFO(USE_MSR_INSTR),
+ use_pcmp_instr : CPUINFO(USE_PCMP_INSTR),
+ ver_code : -1,
+
+ use_icache : CPUINFO(USE_ICACHE),
+ icache_tagbits : CPUINFO(ADDR_TAG_BITS),
+ icache_write : CPUINFO(ALLOW_ICACHE_WR),
+#if XPAR_MICROBLAZE_0_ICACHE_USE_FSL==1
+ icache_line : 16,
+#else
+ icache_line : 4,
+#endif
+ icache_size : CPUINFO(CACHE_BYTE_SIZE),
+ icache_base : CPUINFO(ICACHE_BASEADDR),
+ icache_high : CPUINFO(ICACHE_HIGHADDR),
+
+ use_dcache : CPUINFO(USE_DCACHE),
+ dcache_tagbits : CPUINFO(DCACHE_ADDR_TAG),
+ dcache_write : CPUINFO(ALLOW_DCACHE_WR),
+#if XPAR_MICROBLAZE_0_DCACHE_USE_FSL==1
+ dcache_line : 16,
+#else
+ dcache_line : 4,
+#endif
+ dcache_size : CPUINFO(DCACHE_BYTE_SIZE),
+ dcache_base : CPUINFO(DCACHE_BASEADDR),
+ dcache_high : CPUINFO(DCACHE_HIGHADDR),
+
+ /* Bus connections */
+ use_dopb : CPUINFO(D_OPB),
+ use_iopb : CPUINFO(I_OPB),
+ use_dlmb : CPUINFO(D_LMB),
+ use_ilmb : CPUINFO(I_LMB),
+ num_fsl : CPUINFO(FSL_LINKS),
+
+ /* CPU interrupt line info */
+ irq_edge : CPUINFO(INTERRUPT_IS_EDGE),
+ irq_positive : CPUINFO(EDGE_IS_POSITIVE),
+
+ area_optimised : -1,
+
+ /* HW support for CPU exceptions */
+ opcode_0_illegal: -1,
+ exc_unaligned : CPUINFO(UNALIGNED_EXCEPTIONS),
+ exc_ill_opcode : CPUINFO(ILL_OPCODE_EXCEPTION),
+ exc_iopb : CPUINFO(IOPB_BUS_EXCEPTION),
+ exc_dopb : CPUINFO(DOPB_BUS_EXCEPTION),
+ exc_div_zero : CPUINFO(DIV_ZERO_EXCEPTION),
+ exc_fpu : CPUINFO(FPU_EXCEPTION),
+
+ /* HW debug support */
+ hw_debug : CPUINFO(DEBUG_ENABLED),
+// -wgr- num_pc_brk : CPUINFO(NUMBER_OF_PC_BRK),
+ num_rd_brk : CPUINFO(NUMBER_OF_RD_ADDR_BRK),
+ num_wr_brk : CPUINFO(NUMBER_OF_WR_ADDR_BRK),
+
+ /* FPGA family */
+ fpga_family_code: -1,
+};
+
+void __init set_cpuinfo_static(struct cpuinfo *ci)
+{
+ printk(KERN_INFO "set_cpuinfo_static: Using static CPU info.\n");
+ /* Copy our static CPUINFO descriptor into place */
+ memcpy(ci,&cpuinfo_static,sizeof(struct cpuinfo));
+}
+
--- /dev/null
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/autoconf.h>
+#include <asm/cpuinfo.h>
+#include <asm/pvr.h>
+
+static struct cpuinfo the_cpuinfo;
+struct cpuinfo *cpuinfo = &the_cpuinfo;
+
+void __init setup_cpuinfo(void)
+{
+ int pvr_level = cpu_has_pvr();
+ printk(KERN_INFO "%s: initialising\n", __FUNCTION__);
+
+ switch(pvr_level) {
+ case 0 :
+ printk(KERN_WARNING "%s: No PVR support in CPU. Using static compile-time info\n", __FUNCTION__);
+ set_cpuinfo_static(cpuinfo);
+ break;
+#if 0
+ case 1 : set_cpuinfo_pvr_partial(cpuinfo);
+ break;
+#endif
+ case 2 :
+ printk(KERN_INFO "%s: Using full CPU PVR support\n",__FUNCTION__);
+ set_cpuinfo_pvr_full(cpuinfo);
+ break;
+ default:
+ WARN_ON(1);
+ set_cpuinfo_static(cpuinfo);
+ }
+}
+
--- /dev/null
+/*
+ * arch/microblaze/kernel/cpu/mb4.c
+ *
+ * CPU-version specific code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 PetaLogix
+ */
+
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/cpu.h>
+#include <linux/initrd.h>
+
+#include <asm/setup.h>
+#include <asm/sections.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/bug.h>
+#include <asm/param.h>
+
+static int show_cpuinfo (struct seq_file *m, void *v)
+{
+ extern unsigned long loops_per_jiffy;
+ int count=0;
+ count = seq_printf (m,
+ "CPU-Family: Microblaze\n"
+ "FPGA-Arch: %s\n"
+ "CPU-Ver: %s\n"
+ "CPU-MHz: %d.%02d\n"
+ "BogoMips: %lu.%02lu\n",
+ XPAR_MICROBLAZE_0_FAMILY,
+ XPAR_MICROBLAZE_0_HW_VER,
+ XPAR_CPU_CLOCK_FREQ/1000000,
+ XPAR_CPU_CLOCK_FREQ % 1000000,
+ loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ)) % 100);
+
+ count += seq_printf(m,
+ "HW-Div: %s\n"
+ "HW-Shift: %s\n",
+ XPAR_MICROBLAZE_0_USE_DIV ? "yes":"no",
+ XPAR_MICROBLAZE_0_USE_BARREL ? "yes":"no");
+
+ if(XPAR_MICROBLAZE_0_USE_ICACHE)
+ count +=seq_printf (m,
+ "Icache: %ukB\n",
+ XPAR_MICROBLAZE_0_CACHE_BYTE_SIZE >> 10);
+ else
+ count +=seq_printf (m,
+ "Icache: no\n");
+
+ if(XPAR_MICROBLAZE_0_USE_DCACHE)
+ count +=seq_printf (m,
+ "Dcache: %ukB\n",
+ XPAR_MICROBLAZE_0_DCACHE_BYTE_SIZE >> 10);
+ else
+ count +=seq_printf (m,
+ "Dcache: no\n");
+
+ count += seq_printf(m,
+ "HW-Debug: %s\n",
+ XPAR_MICROBLAZE_0_DEBUG_ENABLED ? "yes":"no");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ int i = *pos;
+
+ return i < NR_CPUS? (void *) (i + 1): NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+ .start =c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
+
--- /dev/null
+/*
+ arch/microblaze/kernel/cpu/pvr.c
+
+ Support for MicroBlaze PVR (processor version register)
+
+ (c) 2007 John Williams <john.williams@petalogix.com>
+ (c) 2007 PetaLogix
+
+ */
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <asm/system.h>
+#include <asm/exceptions.h>
+#include <asm/pvr.h>
+
+/* Until we get an assembler that knows about the pvr registers,
+ this horrible cruft will have to do.
+ That hardcoded opcode is mfs r3, rpvrNN */
+#if 1
+#define get_single_pvr(pvrid, val) \
+{ \
+ register unsigned tmp __asm__ ("r3"); \
+ tmp = 0x0; /* Prevent warning about unused */ \
+ __asm__ __volatile__ (".byte 0x94,0x60,0xa0, " #pvrid "\n\t" : "=r" (tmp):: "memory"); \
+ val=tmp; \
+}
+#else
+#define get_single_pvr(pvrid, val) \
+ __asm__ __volatile__ ("mfs %0, rpvr" #pvrid "\n\t" : "=r" (val))
+
+#endif
+
+/* Does the CPU support the PVR register?
+ return value:
+ 0: no PVR
+ 1: simple PVR
+ 2: full PVR
+
+ This must work on all CPU versions, including those before the
+ PVR was even an option.
+*/
+int cpu_has_pvr(void)
+{
+ unsigned flags;
+ unsigned pvr0;
+ int ret=0;
+
+ local_irq_save(flags);
+
+ /* PVR bit in MSR tells us if there is any support */
+ if(!(flags & PVR_MSR_BIT))
+ goto out;
+
+ get_single_pvr(0x00,pvr0);
+ pr_debug("%s: pvr0 is 0x%08x\n",__FUNCTION__, pvr0);
+
+ if(pvr0 & PVR0_PVR_FULL_MASK)
+ ret=2;
+ else
+ ret=1;
+
+out:
+ local_irq_restore(flags);
+ return ret;
+}
+
+
+void get_pvr(struct pvr_s *p)
+{
+ get_single_pvr(0, p->pvr[0]);
+ get_single_pvr(1, p->pvr[1]);
+ get_single_pvr(2, p->pvr[2]);
+ get_single_pvr(3, p->pvr[3]);
+ get_single_pvr(4, p->pvr[4]);
+ get_single_pvr(5, p->pvr[5]);
+ get_single_pvr(6, p->pvr[6]);
+ get_single_pvr(7, p->pvr[7]);
+ get_single_pvr(8, p->pvr[8]);
+ get_single_pvr(9, p->pvr[9]);
+ get_single_pvr(10, p->pvr[10]);
+ get_single_pvr(11, p->pvr[11]);
+}
+
+
+
--- /dev/null
+/*
+ * arch/microblaze/kernel/early_printk.c
+ *
+ * Copyright (c) 2003-2006 Yasushi SHOJI <yashi@atmark-techno.com>
+ *
+ * Early printk support for Microblaze.
+ *
+ * - Once we got some system without uart light, we need to refactor.
+ */
+
+#include <linux/console.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/fcntl.h>
+#include <asm/xparameters.h>
+
+#ifdef CONFIG_EARLY_PRINTK_UARTLITE_ADDRESS
+#define BASE_ADDR ((unsigned char *)CONFIG_EARLY_PRINTK_UARTLITE_ADDRESS)
+#else
+#define BASE_ADDR ((unsigned char *)XPAR_UART_0_BASEADDR)
+#endif
+#define RX_FIFO BASE_ADDR
+#define TX_FIFO ((unsigned long *)(BASE_ADDR + 4))
+#define STATUS ((unsigned long *)(BASE_ADDR + 8))
+#define CONTROL ((unsigned long *)(BASE_ADDR + 12))
+
+static void early_printk_putc(char c)
+{
+ while (ioread32(STATUS) & (1<<3));
+ iowrite32((c & 0xff), TX_FIFO);
+}
+
+static void early_printk_write(struct console * unused, const char *s, unsigned n)
+{
+ while (*s && n-- > 0) {
+ early_printk_putc(*s);
+ if (*s == '\n')
+ early_printk_putc('\r');
+ s++;
+ }
+}
+
+static struct console early_serial_console = {
+ .name = "earlyser",
+ .write = early_printk_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+/* Direct interface for emergencies */
+struct console *early_console = &early_serial_console;
+static int early_console_initialized = 0;
+
+void early_printk(const char *fmt, ...)
+{
+ char buf[512];
+ int n;
+ va_list ap;
+
+ if (early_console_initialized) {
+ va_start(ap, fmt);
+ n = vscnprintf(buf, 512, fmt, ap);
+ early_console->write(early_console, buf, n);
+ va_end(ap);
+ }
+}
+
+static int __initdata keep_early;
+
+int __init setup_early_printk(char *opt)
+{
+ char *space;
+ char buf[256];
+
+ if (early_console_initialized)
+ return 1;
+
+ strlcpy(buf, opt, sizeof(buf));
+ space = strchr(buf, ' ');
+ if (space)
+ *space = 0;
+
+ if (strstr(buf,"keep"))
+ keep_early = 1;
+
+ early_console = &early_serial_console;
+ early_console_initialized = 1;
+ register_console(early_console);
+ return 0;
+}
+
+void __init disable_early_printk(void)
+{
+ if (!early_console_initialized || !early_console)
+ return;
+ if (!keep_early) {
+ printk("disabling early console\n");
+ unregister_console(early_console);
+ early_console_initialized = 0;
+ }
+ else
+ printk("keeping early console\n");
+}
+
+__setup("earlyprintk=", setup_early_printk);
--- /dev/null
+/*
+ * arch/microblaze/kernel/entry.S
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/xparameters.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/asm-offsets.h>
+#include <asm/registers.h>
+#include <asm/unistd.h>
+#include <asm/percpu.h>
+#include <asm/signal.h>
+
+#define PER_CPU(var) per_cpu__##var
+
+#if XPAR_MICROBLAZE_0_USE_MSR_INSTR
+ .macro disable_irq
+ msrclr r0, MSR_IE
+ .endm
+
+ .macro enable_irq
+ msrset r0, MSR_IE
+ .endm
+
+ .macro clear_bip
+ msrclr r0, MSR_BIP
+ .endm
+#else
+ .macro disable_irq
+ mfs r11, rmsr
+ andi r11, r11, ~MSR_IE
+ mts rmsr, r11
+ .endm
+
+ .macro enable_irq
+ mfs r11, rmsr
+ ori r11, r11, MSR_IE
+ mts rmsr, r11
+ .endm
+
+ .macro clear_bip
+ mfs r11, rmsr
+ andi r11, r11, ~MSR_BIP
+ mts rmsr, r11
+ .endm
+#endif
+
+ENTRY(_interrupt)
+ swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
+ swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
+ lwi r11, r0, PER_CPU(KM) /* load mode indicator */
+ beqid r11, 1f
+ nop
+ brid 2f /* jump over */
+ addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */
+1: /* switch to kernel stack */
+ lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
+ lwi r1, r1, TS_THREAD_INFO /* get the thread info */
+ addik r1, r1, THREAD_SIZE - PT_SIZE /* calculate kernel stack pointer */
+2:
+ swi r11, r1, PT_MODE /* store the mode */
+ lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
+ swi r2, r1, PT_R2
+ swi r3, r1, PT_R3
+ swi r4, r1, PT_R4
+ swi r5, r1, PT_R5
+ swi r6, r1, PT_R6
+ swi r7, r1, PT_R7
+ swi r8, r1, PT_R8
+ swi r9, r1, PT_R9
+ swi r10, r1, PT_R10
+ swi r11, r1, PT_R11
+ swi r12, r1, PT_R12
+ swi r13, r1, PT_R13
+ swi r14, r1, PT_R14
+ swi r14, r1, PT_PC
+ swi r15, r1, PT_R15
+ swi r16, r1, PT_R16
+ swi r17, r1, PT_R17
+ swi r18, r1, PT_R18
+ swi r19, r1, PT_R19
+ swi r20, r1, PT_R20
+ swi r21, r1, PT_R21
+ swi r22, r1, PT_R22
+ swi r23, r1, PT_R23
+ swi r24, r1, PT_R24
+ swi r25, r1, PT_R25
+ swi r26, r1, PT_R26
+ swi r27, r1, PT_R27
+ swi r28, r1, PT_R28
+ swi r29, r1, PT_R29
+ swi r30, r1, PT_R30
+ swi r31, r1, PT_R31
+ /* special purpose registers */
+ mfs r11, rmsr
+ swi r11, r1, PT_MSR
+ mfs r11, rear
+ swi r11, r1, PT_EAR
+ mfs r11, resr
+ swi r11, r1, PT_ESR
+ mfs r11, rfsr
+ swi r11, r1, PT_FSR
+ /* reload original stack pointer and save it */
+ lwi r11, r0, PER_CPU(ENTRY_SP)
+ swi r11, r1, PT_R1
+ /* update mode indicator we are in kernel mode */
+ addik r11, r0, 1
+ swi r11, r0, PER_CPU(KM)
+ /* restore r31 */
+ lwi r31, r0, PER_CPU(CURRENT_SAVE)
+ /* prepare the link register, the argument and jump */
+ la r15, r0, ret_from_intr - 8
+ addk r6, r0, r15
+ braid do_IRQ
+ add r5, r0, r1
+
+ret_from_intr:
+ lwi r11, r1, PT_MODE
+ bneid r11, 3f
+
+
+ lwi r6, r31, TS_THREAD_INFO /* get thread info */
+ lwi r19, r6, TI_FLAGS /* get flags in thread info */
+ /* do an extra work if any bits are set */
+
+ andi r11, r19, _TIF_NEED_RESCHED
+ beqi r11, 1f
+ bralid r15, schedule
+ nop
+1: andi r11, r19, _TIF_SIGPENDING
+ beqid r11, no_intr_reshed
+ addk r5, r1, r0
+ addk r7, r0, r0
+ bralid r15, do_signal
+ addk r6, r0, r0
+
+no_intr_reshed:
+ /* save mode indicator */
+ lwi r11, r1, PT_MODE
+3:
+ swi r11, r0, PER_CPU(KM)
+
+ /* save r31 */
+ swi r31, r0, PER_CPU(CURRENT_SAVE)
+restore_context:
+ /* special purpose registers */
+ lwi r11, r1, PT_FSR
+ mts rfsr, r11
+ lwi r11, r1, PT_ESR
+ mts resr, r11
+ lwi r11, r1, PT_EAR
+ mts rear, r11
+ lwi r11, r1, PT_MSR
+ mts rmsr, r11
+
+ lwi r31, r1, PT_R31
+ lwi r30, r1, PT_R30
+ lwi r29, r1, PT_R29
+ lwi r28, r1, PT_R28
+ lwi r27, r1, PT_R27
+ lwi r26, r1, PT_R26
+ lwi r25, r1, PT_R25
+ lwi r24, r1, PT_R24
+ lwi r23, r1, PT_R23
+ lwi r22, r1, PT_R22
+ lwi r21, r1, PT_R21
+ lwi r20, r1, PT_R20
+ lwi r19, r1, PT_R19
+ lwi r18, r1, PT_R18
+ lwi r17, r1, PT_R17
+ lwi r16, r1, PT_R16
+ lwi r15, r1, PT_R15
+ lwi r14, r1, PT_PC
+ lwi r13, r1, PT_R13
+ lwi r12, r1, PT_R12
+ lwi r11, r1, PT_R11
+ lwi r10, r1, PT_R10
+ lwi r9, r1, PT_R9
+ lwi r8, r1, PT_R8
+ lwi r7, r1, PT_R7
+ lwi r6, r1, PT_R6
+ lwi r5, r1, PT_R5
+ lwi r4, r1, PT_R4
+ lwi r3, r1, PT_R3
+ lwi r2, r1, PT_R2
+ lwi r1, r1, PT_R1
+ rtid r14, 0
+ nop
+
+ENTRY(_reset)
+ bri 0
+
+ENTRY(_user_exception)
+ swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
+ swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
+ lwi r11, r0, PER_CPU(KM) /* load mode indicator */
+ beqid r11, 1f /* Already in kernel mode? */
+ nop
+ brid 2f /* jump over */
+ addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */
+1: /* Switch to kernel stack */
+ lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
+ lwi r1, r1, TS_THREAD_INFO /* get the thread info */
+ addik r1, r1, THREAD_SIZE - PT_SIZE /* calculate kernel stack pointer */
+ swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
+ lwi r11, r0, PER_CPU(KM) /* load mode indicator */
+2:
+ swi r11, r1, PT_MODE /* store the mode */
+ lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
+ /* save them on stack */
+ swi r2, r1, PT_R2
+ swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
+ swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
+ swi r5, r1, PT_R5
+ swi r6, r1, PT_R6
+ swi r7, r1, PT_R7
+ swi r8, r1, PT_R8
+ swi r9, r1, PT_R9
+ swi r10, r1, PT_R10
+ swi r11, r1, PT_R11
+ swi r12, r1, PT_R12 /* r12: _always_ in clobber list; see unistd.h */
+ swi r13, r1, PT_R13
+ swi r14, r1, PT_R14 /* r14: _always_ in clobber list; see unistd.h */
+ addik r14, r14, 0x4 /* but we want to return to the next inst. */
+ swi r14, r1, PT_PC /* increment by 4 and store in pc */
+ swi r15, r1, PT_R15
+ swi r16, r1, PT_R16
+ swi r17, r1, PT_R17
+ swi r18, r1, PT_R18
+ swi r19, r1, PT_R19
+ swi r20, r1, PT_R20
+ swi r21, r1, PT_R21
+ swi r22, r1, PT_R22
+ swi r23, r1, PT_R23
+ swi r24, r1, PT_R24
+ swi r25, r1, PT_R25
+ swi r26, r1, PT_R26
+ swi r27, r1, PT_R27
+ swi r28, r1, PT_R28
+ swi r29, r1, PT_R29
+ swi r30, r1, PT_R30
+ swi r31, r1, PT_R31
+
+ disable_irq
+ nop /* make sure IE bit is in effect */
+ clear_bip /* once IE is in effect it is safe to clear BIP */
+ nop
+
+ /* special purpose registers */
+ mfs r11, rmsr
+ swi r11, r1, PT_MSR
+ mfs r11, rear
+ swi r11, r1, PT_EAR
+ mfs r11, resr
+ swi r11, r1, PT_ESR
+ mfs r11, rfsr
+ swi r11, r1, PT_FSR
+ /* reload original stack pointer and save it */
+ lwi r11, r0, PER_CPU(ENTRY_SP)
+ swi r11, r1, PT_R1
+ /* update mode indicator we are in kernel mode */
+ addik r11, r0, 1
+ swi r11, r0, PER_CPU(KM)
+ /* restore r31 */
+ lwi r31, r0, PER_CPU(CURRENT_SAVE)
+ /* re-enable interrupts now we are in kernel mode */
+ enable_irq
+
+ addi r11, r12, -NR_syscalls /* See if the system call number is valid. */
+ bgei r11, 1f /* return to user if not valid
+ /* Figure out which function to use for this system call. */
+ /* Note Microblaze barrel shift is optional, so don't rely on it */
+ add r12, r12, r12 /* convert num -> ptr */
+ add r12, r12, r12
+ lwi r12, r12, sys_call_table /* Get function pointer */
+ la r15, r0, ret_to_user-8 /* set return address */
+ bra r12 /* Make the system call. */
+ bri 0 /* won't reach here */
+1:
+ brid ret_to_user /* jump to syscall epilogue */
+ addi r3, r0, -ENOSYS /* set errno in delay slot */
+
+
+/* Debug traps are like a system call, but entered via brki r14, 0x60
+ All we need to do is send the SIGTRAP signal to current, ptrace and do_signal
+ will handle the rest */
+ENTRY(_debug_exception)
+ swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
+ lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
+ lwi r1, r1, TS_THREAD_INFO /* get the thread info */
+ addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */
+ swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
+ lwi r11, r0, PER_CPU(KM) /* load mode indicator */
+//save_context:
+ swi r11, r1, PT_MODE /* store the mode */
+ lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
+ /* save them on stack */
+ swi r2, r1, PT_R2
+ swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
+ swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
+ swi r5, r1, PT_R5
+ swi r6, r1, PT_R6
+ swi r7, r1, PT_R7
+ swi r8, r1, PT_R8
+ swi r9, r1, PT_R9
+ swi r10, r1, PT_R10
+ swi r11, r1, PT_R11
+ swi r12, r1, PT_R12 /* r12: _always_ in clobber list; see unistd.h */
+ swi r13, r1, PT_R13
+ swi r14, r1, PT_R14 /* r14: _always_ in clobber list; see unistd.h */
+ swi r14, r1, PT_PC /* Will return to interrupted instruction */
+ swi r15, r1, PT_R15
+ swi r16, r1, PT_R16
+ swi r17, r1, PT_R17
+ swi r18, r1, PT_R18
+ swi r19, r1, PT_R19
+ swi r20, r1, PT_R20
+ swi r21, r1, PT_R21
+ swi r22, r1, PT_R22
+ swi r23, r1, PT_R23
+ swi r24, r1, PT_R24
+ swi r25, r1, PT_R25
+ swi r26, r1, PT_R26
+ swi r27, r1, PT_R27
+ swi r28, r1, PT_R28
+ swi r29, r1, PT_R29
+ swi r30, r1, PT_R30
+ swi r31, r1, PT_R31
+
+ disable_irq
+ nop /* make sure IE bit is in effect */
+ clear_bip /* once IE is in effect it is safe to clear BIP */
+ nop
+
+ /* special purpose registers */
+ mfs r11, rmsr
+ swi r11, r1, PT_MSR
+ mfs r11, rear
+ swi r11, r1, PT_EAR
+ mfs r11, resr
+ swi r11, r1, PT_ESR
+ mfs r11, rfsr
+ swi r11, r1, PT_FSR
+ /* reload original stack pointer and save it */
+ lwi r11, r0, PER_CPU(ENTRY_SP)
+ swi r11, r1, PT_R1
+ /* update mode indicator we are in kernel mode */
+ addik r11, r0, 1
+ swi r11, r0, PER_CPU(KM)
+ /* restore r31 */
+ lwi r31, r0, PER_CPU(CURRENT_SAVE)
+ /* re-enable interrupts now we are in kernel mode */
+ enable_irq
+
+ addi r5, r0, SIGTRAP /* sending the trap signal */
+ add r6, r0, r31 /* to current */
+ bralid r15, send_sig
+ add r7, r0, r0 /* 3rd param zero */
+
+ /* Restore r3/r4 to work around how ret_to_user works */
+ lwi r3, r1, PT_R3
+ lwi r4, r1, PT_R4
+ bri ret_to_user
+
+ENTRY(_break)
+ bri 0
+
+
+/* struct task_struct *_switch_to(struct thread_info *prev, struct thread_info *next); */
+ENTRY(_switch_to)
+ /* prepare return value */
+ addk r3, r0, r31
+
+ /* save registers in cpu_context */
+ /* use r11 and r12, volatile registers, as temp register */
+ addik r11, r5, TI_CPU_CONTEXT
+ swi r1, r11, CC_SP
+ swi r2, r11, CC_R2
+ /* skip volatile registers.
+ * they are saved on stack when we jumped to _switch_to() */
+ /* dedicated registers */
+ swi r13, r11, CC_R13
+ swi r14, r11, CC_R14
+ swi r15, r11, CC_R15
+ swi r16, r11, CC_R16
+ swi r17, r11, CC_R17
+ swi r18, r11, CC_R18
+ /* save non-volatile registers */
+ swi r19, r11, CC_R19
+ swi r20, r11, CC_R20
+ swi r21, r11, CC_R21
+ swi r22, r11, CC_R22
+ swi r23, r11, CC_R23
+ swi r24, r11, CC_R24
+ swi r25, r11, CC_R25
+ swi r26, r11, CC_R26
+ swi r27, r11, CC_R27
+ swi r28, r11, CC_R28
+ swi r29, r11, CC_R29
+ swi r30, r11, CC_R30
+ /* special purpose registers */
+ mfs r12, rmsr
+ swi r12, r11, CC_MSR
+ mfs r12, rear
+ swi r12, r11, CC_EAR
+ mfs r12, resr
+ swi r12, r11, CC_ESR
+ mfs r12, rfsr
+ swi r12, r11, CC_FSR
+
+ /* update r31, the current */
+ lwi r31, r6, TI_TASK
+ swi r31, r0, PER_CPU(CURRENT_SAVE)
+
+ /* get new process' cpu context and restore */
+ addik r11, r6, TI_CPU_CONTEXT
+
+ /* special purpose registers */
+ lwi r12, r11, CC_FSR
+ mts rfsr, r12
+ lwi r12, r11, CC_ESR
+ mts resr, r12
+ lwi r12, r11, CC_EAR
+ mts rear, r12
+ lwi r12, r11, CC_MSR
+ mts rmsr, r12
+ /* non-volatile registers */
+ lwi r30, r11, CC_R30
+ lwi r29, r11, CC_R29
+ lwi r28, r11, CC_R28
+ lwi r27, r11, CC_R27
+ lwi r26, r11, CC_R26
+ lwi r25, r11, CC_R25
+ lwi r24, r11, CC_R24
+ lwi r23, r11, CC_R23
+ lwi r22, r11, CC_R22
+ lwi r21, r11, CC_R21
+ lwi r20, r11, CC_R20
+ lwi r19, r11, CC_R19
+ /* dedicated registers */
+ lwi r18, r11, CC_R18
+ lwi r17, r11, CC_R17
+ lwi r16, r11, CC_R16
+ lwi r15, r11, CC_R15
+ lwi r14, r11, CC_R14
+ lwi r13, r11, CC_R13
+ /* skip volatile registers */
+ lwi r2, r11, CC_R2
+ lwi r1, r11, CC_SP
+
+ rtsd r15, 8
+ nop
+
+
+ENTRY(ret_from_fork)
+ addk r5, r0, r3
+ addk r6, r0, r1
+ brlid r15, schedule_tail
+ nop
+ swi r31, r1, PT_R31 /* save r31 in user context. */
+ /* will soon be restored to r31 in ret_to_user */
+ addk r3, r0, r0
+ brid ret_to_user
+ nop
+
+
+work_pending:
+ andi r11, r19, _TIF_NEED_RESCHED
+ beqi r11, 1f
+ bralid r15, schedule
+ nop
+1: andi r11, r19, _TIF_SIGPENDING
+ beqi r11, no_work_pending
+ addk r5, r1, r0
+ addik r7, r0, 1
+ bralid r15, do_signal
+ addk r6, r0, r0
+ bri no_work_pending
+
+ENTRY(ret_to_user)
+ disable_irq
+
+ swi r4, r1, PT_R4 /* return val */
+ swi r3, r1, PT_R3 /* return val */
+
+ lwi r6, r31, TS_THREAD_INFO /* get thread info */
+ lwi r19, r6, TI_FLAGS /* get flags in thread info */
+ bnei r19, work_pending /* do an extra work if any bits are set */
+no_work_pending:
+ disable_irq
+
+ /* save r31 */
+ swi r31, r0, PER_CPU(CURRENT_SAVE)
+ /* save mode indicator */
+ lwi r18, r1, PT_MODE
+ swi r18, r0, PER_CPU(KM)
+//restore_context:
+ /* special purpose registers */
+ lwi r18, r1, PT_FSR
+ mts rfsr, r18
+ lwi r18, r1, PT_ESR
+ mts resr, r18
+ lwi r18, r1, PT_EAR
+ mts rear, r18
+ lwi r18, r1, PT_MSR
+ mts rmsr, r18
+
+ lwi r31, r1, PT_R31
+ lwi r30, r1, PT_R30
+ lwi r29, r1, PT_R29
+ lwi r28, r1, PT_R28
+ lwi r27, r1, PT_R27
+ lwi r26, r1, PT_R26
+ lwi r25, r1, PT_R25
+ lwi r24, r1, PT_R24
+ lwi r23, r1, PT_R23
+ lwi r22, r1, PT_R22
+ lwi r21, r1, PT_R21
+ lwi r20, r1, PT_R20
+ lwi r19, r1, PT_R19
+ lwi r18, r1, PT_R18
+ lwi r17, r1, PT_R17
+ lwi r16, r1, PT_R16
+ lwi r15, r1, PT_R15
+ lwi r14, r1, PT_PC
+ lwi r13, r1, PT_R13
+ lwi r12, r1, PT_R12
+ lwi r11, r1, PT_R11
+ lwi r10, r1, PT_R10
+ lwi r9, r1, PT_R9
+ lwi r8, r1, PT_R8
+ lwi r7, r1, PT_R7
+ lwi r6, r1, PT_R6
+ lwi r5, r1, PT_R5
+ lwi r4, r1, PT_R4 /* return val */
+ lwi r3, r1, PT_R3 /* return val */
+ lwi r2, r1, PT_R2
+ lwi r1, r1, PT_R1
+
+ rtid r14, 0
+ nop
+
+
+sys_vfork_wrapper:
+ brid sys_vfork
+ addk r5, r1, r0
+
+sys_clone_wrapper:
+ brid sys_clone
+ addk r7, r1, r0
+
+sys_execve_wrapper:
+ brid sys_execve
+ addk r8, r1, r0
+
+sys_sigreturn_wrapper:
+ brid sys_sigreturn
+ addk r5, r1, r0
+
+sys_rt_sigreturn_wrapper:
+ brid sys_rt_sigreturn
+ addk r5, r1, r0
+
+sys_sigsuspend_wrapper:
+ brid sys_rt_sigsuspend
+ addk r6, r1, r0
+
+sys_rt_sigsuspend_wrapper:
+ brid sys_rt_sigsuspend
+ addk r7, r1, r0
+
+/* Interrupt vector table */
+.section .init.ivt, "ax"
+.org 0x0
+ brai _reset
+ brai _user_exception
+ brai _interrupt
+ brai _break
+ brai _hw_exception_handler
+.org 0x60
+ brai _debug_exception
+
+
+.section .rodata,"a"
+#include "syscall_table.S"
+
+syscall_table_size=(.-sys_call_table)
--- /dev/null
+/*
+ * arch/microblaze/kernel/exceptions.c - HW exception handling
+ *
+ * Copyright 2007 Xilinx, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <asm/exceptions.h>
+#include <asm/entry.h> /* For KM CPU var */
+
+/* Initialize_exception_handlers() - called from setup.c/trap_init() */
+void initialize_exception_handlers(void)
+{
+ // Note that the exception handlers are initialized
+ // along with the interrupt vectors in setup.c/machine_early_init().
+}
+
+#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
+#define MICROBLAZE_IOPB_BUS_EXCEPTION 0x03
+#define MICROBLAZE_DOPB_BUS_EXCEPTION 0x04
+#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05
+#define MICROBLAZE_FPU_EXCEPTION 0x06
+
+static void handle_unexpected_exception(unsigned int esr,
+ unsigned int kernel_mode, unsigned int addr)
+{
+ printk(KERN_WARNING "Unexpected exception %02x in %s mode, PC=%08x\n",
+ esr, kernel_mode ? "kernel" : "user", addr);
+}
+
+static void handle_exception(const char *message, int signal,
+ unsigned int kernel_mode, unsigned int addr)
+{
+ if (kernel_mode) {
+ panic("%s in the kernel mode, PC=%08x\n", message, addr);
+ } else {
+ force_sig(signal, current);
+ }
+}
+
+asmlinkage void other_exception_handler(unsigned int esr, unsigned int addr)
+{
+ unsigned int kernel_mode = per_cpu(KM,0);
+
+ switch (esr) {
+
+ case MICROBLAZE_ILL_OPCODE_EXCEPTION:
+ handle_exception("Illegal instruction", SIGILL, kernel_mode, addr);
+ break;
+
+ case MICROBLAZE_IOPB_BUS_EXCEPTION:
+ handle_exception("Instruction bus error", SIGBUS, kernel_mode, addr);
+ break;
+
+ case MICROBLAZE_DOPB_BUS_EXCEPTION:
+ handle_exception("Data bus error", SIGBUS, kernel_mode, addr);
+ break;
+
+ case MICROBLAZE_DIV_ZERO_EXCEPTION:
+ handle_exception("Divide by zero", SIGILL, kernel_mode, addr);
+ break;
+
+ case MICROBLAZE_FPU_EXCEPTION:
+ handle_exception("FPU error", SIGFPE, kernel_mode, addr);
+ break;
+
+ default:
+ handle_unexpected_exception(esr, kernel_mode, addr);
+ }
+
+ return;
+}
--- /dev/null
+/*
+ * arch/microblaze/kernel/head.S
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Portions Copyright 2007 Xilinx, Inc.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+
+ .text
+ENTRY(_start)
+ mfs r1, rmsr
+ andi r1, r1, ~2
+ mts rmsr, r1
+
+ /* Initialize small data anchors */
+ la r13, r0, _KERNEL_SDA_BASE_
+ la r2, r0, _KERNEL_SDA2_BASE_
+
+ /* Initialize stack pointer */
+ la r1, r0, init_thread_union + THREAD_SIZE - 4
+
+ /* Initialize r31 with current task address */
+ la r31, r0, init_task
+
+ /* Call platform dependent initialize function.
+ * Please see $(ARCH)/mach-$(SUBARCH)/setup.c for
+ * the function. */
+/* la r5, r0, 0 no command line */
+ la r6, r0, machine_early_init
+ brald r15, r6
+ nop
+
+ la r15, r0, machine_halt
+ braid start_kernel
+ nop
+
--- /dev/null
+/*
+ * arch/microblaze/kernel/heartbeat.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/sched.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/xparameters.h>
+
+void heartbeat(void)
+{
+#ifdef XPAR_LEDS_4BIT_BASEADDR
+ static unsigned int cnt = 0, period = 0, dist = 0;
+
+ if (cnt == 0 || cnt == dist) {
+ iowrite32(1, XPAR_LEDS_4BIT_BASEADDR);
+ } else if (cnt == 7 || cnt == dist + 7) {
+ iowrite32(0, XPAR_LEDS_4BIT_BASEADDR);
+ }
+
+ if (++cnt > period) {
+ cnt = 0;
+
+ /*
+ * The hyperbolic function below modifies the heartbeat period
+ * length in dependency of the current (5min) load. It goes
+ * through the points f(0)=126, f(1)=86, f(5)=51, f(inf)->30.
+ */
+ period = ((672 << FSHIFT) / (5 * avenrun[0] +
+ (7 << FSHIFT))) + 30;
+ dist = period / 4;
+ }
+#endif
+}
--- /dev/null
+/*///////////////////////////////////////////////////////////////////////////
+//
+// arch/microblaze/kernel/hw_exception_handler.S
+//
+// Unaligned exception handling for Microblaze
+//
+// Portions Copyright 2007 Xilinx, Inc.
+//
+// uClinux customisation (c) 2005 John Williams
+//
+// Original code
+// Copyright (c) 2004 Xilinx, Inc. All rights reserved.
+//
+// Xilinx, Inc.
+// XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A
+// COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS
+// ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR
+// STANDARD, XILINX IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION
+// IS FREE FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE
+// FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION.
+// XILINX EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO
+// THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO
+// ANY WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE
+// FROM CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS FOR A PARTICULAR PURPOSE.
+//
+// $Id: hw_exception_handler.S,v 1.1.2.1 2007/03/16 16:09:57 imanuilov Exp $
+//
+//////////////////////////////////////////////////////////////////////////////*/
+
+/*
+ * Microblaze HW Exception Handler
+ * - Non self-modifying exception handler for the following exception conditions
+ * - Unalignment
+ * - Instruction bus error
+ * - Data bus error
+ * - Illegal instruction opcode
+ * - Divide-by-zero
+ *
+ * Note we disable interrupts during exception handling, otherwise we will possibly
+ * get multiple re-entrancy if interrupt handles themselves cause exceptions. JW
+ */
+
+#include <asm/exceptions.h>
+#include <asm/registers.h>
+#include <asm/unistd.h>
+
+#if MICROBLAZE_EXCEPTIONS_ENABLED
+
+/* Helpful Macros */
+#define EX_HANDLER_STACK_SIZ (4*19)
+#define RMSR_OFFSET 0
+#define REG_OFFSET(regnum) (4*regnum)
+#define NUM_TO_REG(num) r ## num
+
+#define R3_TO_STACK(regnum) swi r3, r1, REG_OFFSET(regnum)
+#define R3_FROM_STACK(regnum) lwi r3, r1, REG_OFFSET(regnum)
+
+#define PUSH_REG(regnum) swi NUM_TO_REG(regnum), r1, REG_OFFSET(regnum)
+#define POP_REG(regnum) lwi NUM_TO_REG(regnum), r1, REG_OFFSET(regnum)
+
+/* Uses r5 */
+#define PUSH_MSR \
+ mfs r5, rmsr; \
+ swi r5, r1, RMSR_OFFSET;
+
+#define PUSH_MSR_AND_ENABLE_EXC \
+ mfs r5, rmsr; \
+ swi r5, r1, RMSR_OFFSET; \
+ ori r5, r5, 0x100; /* Turn ON the EE bit*/ \
+ andi r5, r5, ~2; /* Disable interrupts */\
+ mts rmsr, r5;
+
+/* Uses r5 */
+#define POP_MSR \
+ lwi r5, r1, RMSR_OFFSET; \
+ mts rmsr, r5;
+
+#define LWREG_NOP \
+ bri ex_handler_unhandled; \
+ nop;
+
+#define SWREG_NOP \
+ bri ex_handler_unhandled; \
+ nop;
+
+/* r3 is the source */
+#define R3_TO_LWREG_V(regnum) \
+ R3_TO_STACK (regnum); \
+ bri ex_handler_done;
+
+/* r3 is the source */
+#define R3_TO_LWREG(regnum) \
+ or NUM_TO_REG (regnum), r0, r3; \
+ bri ex_handler_done;
+
+/* r3 is the target */
+#define SWREG_TO_R3_V(regnum) \
+ R3_FROM_STACK (regnum); \
+ bri ex_sw_tail;
+
+/* r3 is the target */
+#define SWREG_TO_R3(regnum) \
+ or r3, r0, NUM_TO_REG (regnum); \
+ bri ex_sw_tail;
+
+
+#if OTHER_EXCEPTIONS_ENABLED
+.extern other_exception_handler /* Defined in exception.c */
+#endif
+
+/*
+ * hw_exception_handler - Handler for unaligned exceptions
+ * Exception handler notes:
+ * - Does not handle exceptions other than unaligned exceptions
+ * - Does not handle exceptions during load into r17, r1, r0.
+ * - Does not handle exceptions during store from r17 (cannot be done) and r1 (slows down common case)
+ *
+ * Relevant register structures
+ *
+ * EAR - |----|----|----|----|----|----|----|----|
+ * - < ## 32 bit faulting address ## >
+ *
+ * ESR - |----|----|----|----|----| - | - |-----|-----|
+ * - W S REG EXC
+ *
+ *
+ * STACK FRAME STRUCTURE
+ * ---------------------
+ *
+ * +-------------+ + 0
+ * | MSR |
+ * +-------------+ + 4
+ * | r1 |
+ * | . |
+ * | . |
+ * | . |
+ * | . |
+ * | r18 |
+ * +-------------+ + 76
+ * | . |
+ * | . |
+ */
+
+
+.global _hw_exception_handler
+.section .text
+.align 4
+.ent _hw_exception_handler
+_hw_exception_handler:
+ addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
+ PUSH_REG(3);
+ PUSH_REG(4);
+ PUSH_REG(5);
+ PUSH_REG(6);
+ mfs r3, resr;
+#if XPAR_MICROBLAZE_0_EX_HANDLE_DELAY_SLOT
+ andi r5, r3, 0x1000;
+ beqi r5, not_in_delay_slot;
+ mfs r17, rbtr;
+not_in_delay_slot:
+#endif
+ PUSH_REG(17);
+ PUSH_MSR_AND_ENABLE_EXC; /* Exceptions enabled here. This will allow nested exceptions */
+
+ andi r5, r3, 0x1F; /* Extract ESR[EXC] */
+
+#if XPAR_MICROBLAZE_0_UNALIGNED_EXCEPTIONS
+ xori r6, r5, 1; /* 00001 = Unaligned Exception */
+ beqi r6, handle_unaligned_ex ; /* Jump to unalignment exception handler*/
+#endif
+
+#if OTHER_EXCEPTIONS_ENABLED
+handle_other_ex: /* Handle Other exceptions here */
+ PUSH_REG(7); /* Save other volatiles before we make procedure calls below */
+ PUSH_REG(8);
+ PUSH_REG(9);
+ PUSH_REG(10);
+ PUSH_REG(11);
+ PUSH_REG(12);
+ PUSH_REG(14);
+ PUSH_REG(15);
+ PUSH_REG(18);
+
+ andi r5, r3, 0x1F; /* Load ESR[EC] */
+ addk r6, r17, r0; /* Load exception address */
+ bralid r15, other_exception_handler; /* Branch to the handler */
+ nop;
+
+ /*
+ * Trigger execution of the signal handler by enabling
+ * interrupts and calling an invalid syscall.
+ */
+ mfs r5, rmsr;
+ ori r5, r5, 2;
+ mts rmsr, r5;
+ addi r12, r0, NR_syscalls;
+ brki r14, 0x08;
+ mfs r5, rmsr;
+ andi r5, r5, ~2;
+ mts rmsr, r5;
+
+ POP_REG(7); /* Restore other volatiles */
+ POP_REG(8);
+ POP_REG(9);
+ POP_REG(10);
+ POP_REG(11);
+ POP_REG(12);
+ POP_REG(14);
+ POP_REG(15);
+ POP_REG(18);
+#endif /* OTHER_EXCEPTIONS_ENABLED */
+
+#if XPAR_MICROBLAZE_0_UNALIGNED_EXCEPTIONS
+ bri ex_handler_done; /* Complete exception handling */
+handle_unaligned_ex:
+ andi r6, r3, 0x3E0; /* Mask and extract the register operand */
+ srl r6, r6; /* r6 >> 5 */
+ srl r6, r6;
+ srl r6, r6;
+ srl r6, r6;
+ srl r6, r6;
+ sbi r6, r0, ex_reg_op; /* Store the register operand in a temporary location */
+ mfs r4, rear;
+ andi r6, r3, 0x400; /* Extract ESR[S] */
+ bnei r6, ex_sw;
+ex_lw:
+ andi r6, r3, 0x800; /* Extract ESR[W] */
+ beqi r6, ex_lhw;
+ lbui r5, r4, 0; /* Exception address in r4 */
+ sbi r5, r0, ex_tmp_data_loc_0; /* Load a word, byte-by-byte from destination address and save it in tmp space */
+ lbui r5, r4, 1;
+ sbi r5, r0, ex_tmp_data_loc_1;
+ lbui r5, r4, 2;
+ sbi r5, r0, ex_tmp_data_loc_2;
+ lbui r5, r4, 3;
+ sbi r5, r0, ex_tmp_data_loc_3;
+ lwi r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
+ bri ex_lw_tail;
+ex_lhw:
+ lbui r5, r4, 0; /* Exception address in r4 */
+ sbi r5, r0, ex_tmp_data_loc_0; /* Load a half-word, byte-by-byte from destination address and save it in tmp space */
+ lbui r5, r4, 1;
+ sbi r5, r0, ex_tmp_data_loc_1;
+ lhui r3, r0, ex_tmp_data_loc_0; /* Get the destination register value into r3 */
+ex_lw_tail:
+ lbui r5, r0, ex_reg_op; /* Get the destination register number into r5 */
+ la r6, r0, lw_table; /* Form load_word jump table offset (lw_table + (8 * regnum)) */
+ addk r5, r5, r5;
+ addk r5, r5, r5;
+ addk r5, r5, r5;
+ addk r5, r5, r6;
+ bra r5;
+ex_lw_end: /* Exception handling of load word, ends */
+ex_sw:
+ lbui r5, r0, ex_reg_op; /* Get the destination register number into r5 */
+ la r6, r0, sw_table; /* Form store_word jump table offset (sw_table + (8 * regnum)) */
+ add r5, r5, r5;
+ add r5, r5, r5;
+ add r5, r5, r5;
+ add r5, r5, r6;
+ bra r5;
+ex_sw_tail:
+ mfs r6, resr;
+ andi r6, r6, 0x800; /* Extract ESR[W] */
+ beqi r6, ex_shw;
+ swi r3, r0, ex_tmp_data_loc_0;
+ lbui r3, r0, ex_tmp_data_loc_0; /* Store the word, byte-by-byte into destination address */
+ sbi r3, r4, 0;
+ lbui r3, r0, ex_tmp_data_loc_1;
+ sbi r3, r4, 1;
+ lbui r3, r0, ex_tmp_data_loc_2;
+ sbi r3, r4, 2;
+ lbui r3, r0, ex_tmp_data_loc_3;
+ sbi r3, r4, 3;
+ bri ex_handler_done;
+ex_shw:
+ swi r3, r0, ex_tmp_data_loc_0; /* Store the lower half-word, byte-by-byte into destination address */
+ lbui r3, r0, ex_tmp_data_loc_2;
+ sbi r3, r4, 0;
+ lbui r3, r0, ex_tmp_data_loc_3;
+ sbi r3, r4, 1;
+ex_sw_end: /* Exception handling of store word, ends. */
+
+#endif /* XPAR_MICROBLAZE_0_UNALIGNED_EXCEPTIONS */
+
+ex_handler_done:
+ POP_MSR;
+ POP_REG(3);
+ POP_REG(4);
+ POP_REG(5);
+ POP_REG(6);
+ POP_REG(17);
+ rted r17, 0
+ addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
+ex_handler_unhandled:
+ bri 0 /* UNHANDLED. TRAP HERE */
+.end _hw_exception_handler
+
+#if XPAR_MICROBLAZE_0_UNALIGNED_EXCEPTIONS
+/*
+ * hw_exception_handler Jump Table
+ * - Contains code snippets for each register that caused the unaligned exception.
+ * - Hence exception handler is NOT self-modifying
+ * - Separate table for load exceptions and store exceptions.
+ * - Each table is of size: (8 * 32) = 256 bytes
+ */
+
+.section .text
+.align 4
+lw_table:
+lw_r0: R3_TO_LWREG (0);
+lw_r1: LWREG_NOP;
+lw_r2: R3_TO_LWREG (2);
+lw_r3: R3_TO_LWREG_V (3);
+lw_r4: R3_TO_LWREG_V (4);
+lw_r5: R3_TO_LWREG_V (5);
+lw_r6: R3_TO_LWREG_V (6);
+lw_r7: R3_TO_LWREG (7);
+lw_r8: R3_TO_LWREG (8);
+lw_r9: R3_TO_LWREG (9);
+lw_r10: R3_TO_LWREG (10);
+lw_r11: R3_TO_LWREG (11);
+lw_r12: R3_TO_LWREG (12);
+lw_r13: R3_TO_LWREG (13);
+lw_r14: R3_TO_LWREG (14);
+lw_r15: R3_TO_LWREG (15);
+lw_r16: R3_TO_LWREG (16);
+lw_r17: LWREG_NOP;
+lw_r18: R3_TO_LWREG (18);
+lw_r19: R3_TO_LWREG (19);
+lw_r20: R3_TO_LWREG (20);
+lw_r21: R3_TO_LWREG (21);
+lw_r22: R3_TO_LWREG (22);
+lw_r23: R3_TO_LWREG (23);
+lw_r24: R3_TO_LWREG (24);
+lw_r25: R3_TO_LWREG (25);
+lw_r26: R3_TO_LWREG (26);
+lw_r27: R3_TO_LWREG (27);
+lw_r28: R3_TO_LWREG (28);
+lw_r29: R3_TO_LWREG (29);
+lw_r30: R3_TO_LWREG (30);
+lw_r31: R3_TO_LWREG (31);
+
+sw_table:
+sw_r0: SWREG_TO_R3 (0);
+sw_r1: SWREG_NOP;
+sw_r2: SWREG_TO_R3 (2);
+sw_r3: SWREG_TO_R3_V (3);
+sw_r4: SWREG_TO_R3_V (4);
+sw_r5: SWREG_TO_R3_V (5);
+sw_r6: SWREG_TO_R3_V (6);
+sw_r7: SWREG_TO_R3 (7);
+sw_r8: SWREG_TO_R3 (8);
+sw_r9: SWREG_TO_R3 (9);
+sw_r10: SWREG_TO_R3 (10);
+sw_r11: SWREG_TO_R3 (11);
+sw_r12: SWREG_TO_R3 (12);
+sw_r13: SWREG_TO_R3 (13);
+sw_r14: SWREG_TO_R3 (14);
+sw_r15: SWREG_TO_R3 (15);
+sw_r16: SWREG_TO_R3 (16);
+sw_r17: SWREG_NOP;
+sw_r18: SWREG_TO_R3 (18);
+sw_r19: SWREG_TO_R3 (19);
+sw_r20: SWREG_TO_R3 (20);
+sw_r21: SWREG_TO_R3 (21);
+sw_r22: SWREG_TO_R3 (22);
+sw_r23: SWREG_TO_R3 (23);
+sw_r24: SWREG_TO_R3 (24);
+sw_r25: SWREG_TO_R3 (25);
+sw_r26: SWREG_TO_R3 (26);
+sw_r27: SWREG_TO_R3 (27);
+sw_r28: SWREG_TO_R3 (28);
+sw_r29: SWREG_TO_R3 (29);
+sw_r30: SWREG_TO_R3 (30);
+sw_r31: SWREG_TO_R3 (31);
+
+/* Temporary data structures used in the handler */
+.section .data
+.align 4
+ex_tmp_data_loc_0:
+ .byte 0
+ex_tmp_data_loc_1:
+ .byte 0
+ex_tmp_data_loc_2:
+ .byte 0
+ex_tmp_data_loc_3:
+ .byte 0
+ex_reg_op:
+ .byte 0
+#endif /* XPAR_MICROBLAZE_0_UNALIGNED_EXCEPTIONS */
+
+#else /* Dummy exception handler, in case exceptions are not present in the processor */
+
+.global _hw_exception_handler
+.section .text
+.align 4
+.ent _hw_exception_handler
+_hw_exception_handler:
+ bri 0;
+.end _hw_exception_handler
+
+#endif /* MICROBLAZE_EXCEPTIONS_ENABLED */
--- /dev/null
+/*
+ * arch/microblaze/kernel/init_task.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <linux/mqueue.h>
+
+#include <asm/pgtable.h>
+
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+union thread_union init_thread_union
+ __attribute__((__section__(".data.init_task"))) =
+{ INIT_THREAD_INFO(init_task) };
+
+
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
--- /dev/null
+/*
+ * arch/microblaze/kernel/process.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
+#include <linux/irqflags.h>
+#include <linux/seq_file.h>
+#include <linux/kernel_stat.h>
+
+#ifdef CONFIG_OF
+
+#include <asm/prom.h>
+
+unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
+{
+ struct of_irq oirq;
+
+ if (of_irq_map_one(dev, index, &oirq))
+ return NO_IRQ;
+
+ return oirq.specifier[0];
+ //irq_create_of_mapping(oirq.controller, oirq.specifier,
+ // oirq.size);
+}
+EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
+
+#endif
+
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ printk("unexpected IRQ trap at vector %02x\n", irq);
+}
+
+extern void ledoff(void);
+
+void do_IRQ(struct pt_regs *regs)
+{
+ unsigned int irq;
+
+ irq_enter();
+ set_irq_regs(regs);
+ irq = get_irq(regs);
+ BUG_ON(irq == -1U);
+ __do_IRQ(irq);
+
+ irq_exit();
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v, j;
+ struct irqaction * action;
+ unsigned long flags;
+
+ if (i == 0) {
+ seq_printf(p, " ");
+ for_each_online_cpu(j)
+ seq_printf(p, "CPU%-8d",j);
+ seq_putc(p, '\n');
+ }
+
+ if (i < NR_IRQS) {
+ spin_lock_irqsave(&irq_desc[i].lock, flags);
+ action = irq_desc[i].action;
+ if (!action)
+ goto skip;
+ seq_printf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#endif
+ seq_printf(p, " %8s", irq_desc[i].status & IRQ_LEVEL ? "level": "edge");
+ seq_printf(p, " %8s", irq_desc[i].chip->name);
+ seq_printf(p, " %s", action->name);
+
+ for (action=action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+
+ seq_putc(p, '\n');
+skip:
+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ }
+ return 0;
+}
--- /dev/null
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+
+#include <asm/errno.h>
+#include <asm/of_device.h>
+
+ssize_t of_device_get_modalias(struct of_device *ofdev,
+ char *str, ssize_t len)
+{
+ const char *compat;
+ int cplen, i;
+ ssize_t tsize, csize, repend;
+
+ /* Name & Type */
+ csize = snprintf(str, len, "of:N%sT%s",
+ ofdev->node->name, ofdev->node->type);
+
+ /* Get compatible property if any */
+ compat = of_get_property(ofdev->node, "compatible", &cplen);
+ if (!compat)
+ return csize;
+
+ /* Find true end (we tolerate multiple \0 at the end */
+ for (i=(cplen-1); i>=0 && !compat[i]; i--)
+ cplen--;
+ if (!cplen)
+ return csize;
+ cplen++;
+
+ /* Check space (need cplen+1 chars including final \0) */
+ tsize = csize + cplen;
+ repend = tsize;
+
+ if (csize>=len) /* @ the limit, all is already filled */
+ return tsize;
+
+ if (tsize>=len) { /* limit compat list */
+ cplen = len-csize-1;
+ repend = len;
+ }
+
+ /* Copy and do char replacement */
+ memcpy(&str[csize+1], compat, cplen);
+ for (i=csize; i<repend; i++) {
+ char c = str[i];
+ if (c=='\0')
+ str[i] = 'C';
+ else if (c==' ')
+ str[i] = '_';
+ }
+
+ return tsize;
+}
+
+int of_device_uevent(struct device *dev,
+ char **envp, int num_envp, char *buffer, int buffer_size)
+{
+ struct of_device *ofdev;
+ const char *compat;
+ int i = 0, length = 0, seen = 0, cplen, sl;
+
+ if (!dev)
+ return -ENODEV;
+
+ ofdev = to_of_device(dev);
+
+ if (add_uevent_var(envp, num_envp, &i,
+ buffer, buffer_size, &length,
+ "OF_NAME=%s", ofdev->node->name))
+ return -ENOMEM;
+
+ if (add_uevent_var(envp, num_envp, &i,
+ buffer, buffer_size, &length,
+ "OF_TYPE=%s", ofdev->node->type))
+ return -ENOMEM;
+
+ /* Since the compatible field can contain pretty much anything
+ * it's not really legal to split it out with commas. We split it
+ * up using a number of environment variables instead. */
+
+ compat = of_get_property(ofdev->node, "compatible", &cplen);
+ while (compat && *compat && cplen > 0) {
+ if (add_uevent_var(envp, num_envp, &i,
+ buffer, buffer_size, &length,
+ "OF_COMPATIBLE_%d=%s", seen, compat))
+ return -ENOMEM;
+
+ sl = strlen (compat) + 1;
+ compat += sl;
+ cplen -= sl;
+ seen++;
+ }
+
+ if (add_uevent_var(envp, num_envp, &i,
+ buffer, buffer_size, &length,
+ "OF_COMPATIBLE_N=%d", seen))
+ return -ENOMEM;
+
+ /* modalias is trickier, we add it in 2 steps */
+ if (add_uevent_var(envp, num_envp, &i,
+ buffer, buffer_size, &length,
+ "MODALIAS="))
+ return -ENOMEM;
+
+ sl = of_device_get_modalias(ofdev, &buffer[length-1],
+ buffer_size-length);
+ if (sl >= (buffer_size-length))
+ return -ENOMEM;
+
+ length += sl;
+
+ envp[i] = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(of_device_uevent);
+EXPORT_SYMBOL(of_device_get_modalias);
--- /dev/null
+/*
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ * and Arnd Bergmann, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+
+#include <asm/errno.h>
+#include <asm/of_device.h>
+#include <asm/of_platform.h>
+#include <asm/topology.h>
+#include <asm/pci-bridge.h>
+#include <asm/atomic.h>
+
+/*
+ * The list of OF IDs below is used for matching bus types in the
+ * system whose devices are to be exposed as of_platform_devices.
+ *
+ * This is the default list valid for most platforms. This file provides
+ * functions who can take an explicit list if necessary though
+ *
+ * The search is always performed recursively looking for children of
+ * the provided device_node and recursively if such a children matches
+ * a bus type in the list
+ */
+
+static struct of_device_id of_default_bus_ids[] = {
+ { .type = "soc", },
+ { .compatible = "soc", },
+ { .type = "spider", },
+ { .type = "axon", },
+ { .type = "plb5", },
+ { .type = "plb4", },
+ { .type = "opb", },
+ { .type = "ebc", },
+ {},
+};
+
+static atomic_t bus_no_reg_magic;
+
+struct bus_type of_platform_bus_type = {
+ .uevent = of_device_uevent,
+};
+EXPORT_SYMBOL(of_platform_bus_type);
+
+static int __init of_bus_driver_init(void)
+{
+ return of_bus_type_init(&of_platform_bus_type, "of_platform");
+}
+
+postcore_initcall(of_bus_driver_init);
+
+int of_register_platform_driver(struct of_platform_driver *drv)
+{
+ /* initialize common driver fields */
+ drv->driver.name = drv->name;
+ drv->driver.bus = &of_platform_bus_type;
+
+ /* register with core */
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(of_register_platform_driver);
+
+void of_unregister_platform_driver(struct of_platform_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(of_unregister_platform_driver);
+
+static void of_platform_make_bus_id(struct of_device *dev)
+{
+ struct device_node *node = dev->node;
+ char *name = dev->dev.bus_id;
+ const u32 *reg;
+ u64 addr;
+ int magic;
+
+ /*
+ * If it's a DCR based device, use 'd' for native DCRs
+ * and 'D' for MMIO DCRs.
+ */
+#ifdef CONFIG_PPC_DCR
+ reg = of_get_property(node, "dcr-reg", NULL);
+ if (reg) {
+#ifdef CONFIG_PPC_DCR_NATIVE
+ snprintf(name, BUS_ID_SIZE, "d%x.%s",
+ *reg, node->name);
+#else /* CONFIG_PPC_DCR_NATIVE */
+ addr = of_translate_dcr_address(node, *reg, NULL);
+ if (addr != OF_BAD_ADDR) {
+ snprintf(name, BUS_ID_SIZE,
+ "D%llx.%s", (unsigned long long)addr,
+ node->name);
+ return;
+ }
+#endif /* !CONFIG_PPC_DCR_NATIVE */
+ }
+#endif /* CONFIG_PPC_DCR */
+
+ /*
+ * For MMIO, get the physical address
+ */
+ reg = of_get_property(node, "reg", NULL);
+ if (reg) {
+ addr = of_translate_address(node, reg);
+ if (addr != OF_BAD_ADDR) {
+ snprintf(name, BUS_ID_SIZE,
+ "%llx.%s", (unsigned long long)addr,
+ node->name);
+ return;
+ }
+ }
+
+ /*
+ * No BusID, use the node name and add a globally incremented
+ * counter (and pray...)
+ */
+ magic = atomic_add_return(1, &bus_no_reg_magic);
+ snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1);
+}
+
+struct of_device* of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent)
+{
+ struct of_device *dev;
+
+ dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+ memset(dev, 0, sizeof(*dev));
+
+ dev->node = of_node_get(np);
+ dev->dma_mask = 0xffffffffUL;
+ dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.parent = parent;
+ dev->dev.bus = &of_platform_bus_type;
+ dev->dev.release = of_release_dev;
+ dev->dev.archdata.of_node = np;
+ dev->dev.archdata.numa_node = of_node_to_nid(np);
+
+ /* We do not fill the DMA ops for platform devices by default.
+ * This is currently the responsibility of the platform code
+ * to do such, possibly using a device notifier
+ */
+
+ if (bus_id)
+ strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
+ else
+ of_platform_make_bus_id(dev);
+
+ if (of_device_register(dev) != 0) {
+ kfree(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL(of_platform_device_create);
+
+
+
+/**
+ * of_platform_bus_create - Create an OF device for a bus node and all its
+ * children. Optionally recursively instanciate matching busses.
+ * @bus: device node of the bus to instanciate
+ * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
+ * disallow recursive creation of child busses
+ */
+static int of_platform_bus_create(struct device_node *bus,
+ struct of_device_id *matches,
+ struct device *parent)
+{
+ struct device_node *child;
+ struct of_device *dev;
+ int rc = 0;
+
+ for (child = NULL; (child = of_get_next_child(bus, child)); ) {
+ pr_debug(" create child: %s\n", child->full_name);
+ dev = of_platform_device_create(child, NULL, parent);
+ if (dev == NULL)
+ rc = -ENOMEM;
+ else if (!of_match_node(matches, child))
+ continue;
+ if (rc == 0) {
+ pr_debug(" and sub busses\n");
+ rc = of_platform_bus_create(child, matches, &dev->dev);
+ } if (rc) {
+ of_node_put(child);
+ break;
+ }
+ }
+ return rc;
+}
+
+/**
+ * of_platform_bus_probe - Probe the device-tree for platform busses
+ * @root: parent of the first level to probe or NULL for the root of the tree
+ * @matches: match table, NULL to use the default
+ * @parent: parent to hook devices from, NULL for toplevel
+ *
+ * Note that children of the provided root are not instanciated as devices
+ * unless the specified root itself matches the bus list and is not NULL.
+ */
+
+int of_platform_bus_probe(struct device_node *root,
+ struct of_device_id *matches,
+ struct device *parent)
+{
+ struct device_node *child;
+ struct of_device *dev;
+ int rc = 0;
+
+ if (matches == NULL)
+ matches = of_default_bus_ids;
+ if (matches == OF_NO_DEEP_PROBE)
+ return -EINVAL;
+ if (root == NULL)
+ root = of_find_node_by_path("/");
+ else
+ of_node_get(root);
+
+ pr_debug("of_platform_bus_probe()\n");
+ pr_debug(" starting at: %s\n", root->full_name);
+
+ /* Do a self check of bus type, if there's a match, create
+ * children
+ */
+ if (of_match_node(matches, root)) {
+ pr_debug(" root match, create all sub devices\n");
+ dev = of_platform_device_create(root, NULL, parent);
+ if (dev == NULL) {
+ rc = -ENOMEM;
+ goto bail;
+ }
+ pr_debug(" create all sub busses\n");
+ rc = of_platform_bus_create(root, matches, &dev->dev);
+ goto bail;
+ }
+ for (child = NULL; (child = of_get_next_child(root, child)); ) {
+ if (!of_match_node(matches, child))
+ continue;
+
+ pr_debug(" match: %s\n", child->full_name);
+ dev = of_platform_device_create(child, NULL, parent);
+ if (dev == NULL)
+ rc = -ENOMEM;
+ else
+ rc = of_platform_bus_create(child, matches, &dev->dev);
+ if (rc) {
+ of_node_put(child);
+ break;
+ }
+ }
+ bail:
+ of_node_put(root);
+ return rc;
+}
+EXPORT_SYMBOL(of_platform_bus_probe);
+
+static int of_dev_node_match(struct device *dev, void *data)
+{
+ return to_of_device(dev)->node == data;
+}
+
+struct of_device *of_find_device_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&of_platform_bus_type,
+ NULL, np, of_dev_node_match);
+ if (dev)
+ return to_of_device(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_node);
+
+static int of_dev_phandle_match(struct device *dev, void *data)
+{
+ phandle *ph = data;
+ return to_of_device(dev)->node->linux_phandle == *ph;
+}
+
+struct of_device *of_find_device_by_phandle(phandle ph)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&of_platform_bus_type,
+ NULL, &ph, of_dev_phandle_match);
+ if (dev)
+ return to_of_device(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_phandle);
+
+
+#ifdef CONFIG_PPC_OF_PLATFORM_PCI
+
+/* The probing of PCI controllers from of_platform is currently
+ * 64 bits only, mostly due to gratuitous differences between
+ * the 32 and 64 bits PCI code on PowerPC and the 32 bits one
+ * lacking some bits needed here.
+ */
+
+static int __devinit of_pci_phb_probe(struct of_device *dev,
+ const struct of_device_id *match)
+{
+ struct pci_controller *phb;
+
+ /* Check if we can do that ... */
+ if (ppc_md.pci_setup_phb == NULL)
+ return -ENODEV;
+
+ printk(KERN_INFO "Setting up PCI bus %s\n", dev->node->full_name);
+
+ /* Alloc and setup PHB data structure */
+ phb = pcibios_alloc_controller(dev->node);
+ if (!phb)
+ return -ENODEV;
+
+ /* Setup parent in sysfs */
+ phb->parent = &dev->dev;
+
+ /* Setup the PHB using arch provided callback */
+ if (ppc_md.pci_setup_phb(phb)) {
+ pcibios_free_controller(phb);
+ return -ENODEV;
+ }
+
+ /* Process "ranges" property */
+ pci_process_bridge_OF_ranges(phb, dev->node, 0);
+
+ /* Setup IO space. We use the non-dynamic version of that code here,
+ * which doesn't quite support unplugging. Next kernel release will
+ * have a better fix for this.
+ * Note also that we don't do ISA, this will also be fixed with a
+ * more massive rework.
+ */
+ pci_setup_phb_io(phb, pci_io_base == 0);
+
+ /* Init pci_dn data structures */
+ pci_devs_phb_init_dynamic(phb);
+
+ /* Register devices with EEH */
+#ifdef CONFIG_EEH
+ if (dev->node->child)
+ eeh_add_device_tree_early(dev->node);
+#endif /* CONFIG_EEH */
+
+ /* Scan the bus */
+ scan_phb(phb);
+
+ /* Claim resources. This might need some rework as well depending
+ * wether we are doing probe-only or not, like assigning unassigned
+ * resources etc...
+ */
+ pcibios_claim_one_bus(phb->bus);
+
+ /* Finish EEH setup */
+#ifdef CONFIG_EEH
+ eeh_add_device_tree_late(phb->bus);
+#endif
+
+ /* Add probed PCI devices to the device model */
+ pci_bus_add_devices(phb->bus);
+
+ return 0;
+}
+
+static struct of_device_id of_pci_phb_ids[] = {
+ { .type = "pci", },
+ { .type = "pcix", },
+ { .type = "pcie", },
+ { .type = "pciex", },
+ { .type = "ht", },
+ {}
+};
+
+static struct of_platform_driver of_pci_phb_driver = {
+ .name = "of-pci",
+ .match_table = of_pci_phb_ids,
+ .probe = of_pci_phb_probe,
+};
+
+static __init int of_pci_phb_init(void)
+{
+ return of_register_platform_driver(&of_pci_phb_driver);
+}
+
+device_initcall(of_pci_phb_init);
+
+#endif /* CONFIG_PPC_OF_PLATFORM_PCI */
--- /dev/null
+/*
+ * arch/microblaze/kernel/opb_intc.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/xparameters.h>
+
+/* No one else should require these constants, so define them locally here. */
+#define ISR 0x00 /* Interrupt Status Register */
+#define IPR 0x04 /* Interrupt Pending Register */
+#define IER 0x08 /* Interrupt Enable Register */
+#define IAR 0x0c /* Interrupt Acknowledge Register */
+#define SIE 0x10 /* Set Interrupt Enable bits */
+#define CIE 0x14 /* Clear Interrupt Enable bits */
+#define IVR 0x18 /* Interrupt Vector Register */
+#define MER 0x1c /* Master Enable Register */
+
+#define MER_ME (1<<0)
+#define MER_HIE (1<<1)
+
+#define BASE_ADDR XPAR_INTC_0_BASEADDR
+
+static void opb_intc_enable(unsigned int irq)
+{
+ unsigned long mask = (0x00000001 << (irq & 31));
+ pr_debug("enable: %d\n", irq);
+ iowrite32(mask, BASE_ADDR + SIE);
+}
+
+static void opb_intc_disable(unsigned int irq)
+{
+ unsigned long mask = (0x00000001 << (irq & 31));
+ pr_debug("disable: %d\n", irq);
+ iowrite32(mask, BASE_ADDR + CIE);
+}
+
+static void opb_intc_disable_and_ack(unsigned int irq)
+{
+ unsigned long mask = (0x00000001 << (irq & 31));
+ pr_debug("disable_and_ack: %d\n", irq);
+ iowrite32(mask, BASE_ADDR + CIE);
+ if (!(irq_desc[irq].status & IRQ_LEVEL))
+ iowrite32(mask, BASE_ADDR + IAR); /* ack edge triggered intr */
+}
+
+static void opb_intc_end(unsigned int irq)
+{
+ unsigned long mask = (0x00000001 << (irq & 31));
+
+ pr_debug("end: %d\n", irq);
+ if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
+ iowrite32(mask, BASE_ADDR + SIE);
+ /* ack level sensitive intr */
+ if (irq_desc[irq].status & IRQ_LEVEL)
+ iowrite32(mask, BASE_ADDR + IAR);
+ }
+}
+
+static struct irq_chip obp_intc = {
+ .name = "OPB Interrupt Controller",
+ .enable = opb_intc_enable,
+ .disable = opb_intc_disable,
+ .ack = opb_intc_disable_and_ack,
+ .end = opb_intc_end,
+};
+
+unsigned int get_irq(struct pt_regs *regs)
+{
+ int irq;
+
+ /*
+ * NOTE: This function is the one that needs to be improved in
+ * order to handle multiple interrupt controllers. It currently
+ * is hardcoded to check for interrupts only on the first INTC.
+ */
+
+ irq = ioread32(BASE_ADDR + IVR);
+
+ /* If no interrupt is pending then all bits of the IVR are set to 1. As
+ * the IVR is as many bits wide as numbers of inputs are available.
+ * Therefore, if all bits of the IVR are set to one, its content will
+ * be bigger than XPAR_INTC_MAX_NUM_INTR_INPUTS.
+ */
+ if (irq >= XPAR_INTC_MAX_NUM_INTR_INPUTS)
+ irq = -1; /* report no pending interrupt. */
+
+ pr_debug("get_irq: %d\n", irq);
+
+ return irq;
+}
+
+void __init init_IRQ(void)
+{
+ int i;
+
+ printk(KERN_INFO "OPB INTC #0 at 0x%08lX\n",
+ (unsigned long) BASE_ADDR);
+
+ /*
+ * Disable all external interrupts until they are
+ * explicity requested.
+ */
+ iowrite32(0, BASE_ADDR + IER);
+
+ /* Acknowledge any pending interrupts just in case. */
+ iowrite32(0xffffffff, BASE_ADDR + IAR);
+
+ /* Turn on the Master Enable. */
+ iowrite32(MER_HIE|MER_ME, BASE_ADDR + MER);
+
+ for (i = 0; i < NR_IRQS; ++i) {
+ irq_desc[i].chip = &obp_intc;
+
+ if (XPAR_INTC_0_KIND_OF_INTR & (0x00000001 << i))
+ irq_desc[i].status &= ~IRQ_LEVEL;
+ else
+ irq_desc[i].status |= IRQ_LEVEL;
+ }
+}
+
+void irq_early_init(void)
+{
+ iowrite32(0, BASE_ADDR + IER);
+}
+
--- /dev/null
+/*
+ * arch/microblaze/kernel/opb_timer.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/interrupt.h>
+#include <linux/profile.h>
+#include <linux/irq.h>
+#include <asm/io.h>
+#include <asm/xparameters.h>
+
+#define BASE_ADDR XPAR_TIMER_0_BASEADDR
+
+#define TCSR0 (0x00)
+#define TLR0 (0x04)
+#define TCR0 (0x08)
+#define TCSR1 (0x10)
+#define TLR1 (0x14)
+#define TCR1 (0x18)
+
+#define TCSR_MDT (1<<0)
+#define TCSR_UDT (1<<1)
+#define TCSR_GENT (1<<2)
+#define TCSR_CAPT (1<<3)
+#define TCSR_ARHT (1<<4)
+#define TCSR_LOAD (1<<5)
+#define TCSR_ENIT (1<<6)
+#define TCSR_ENT (1<<7)
+#define TCSR_TINT (1<<8)
+#define TCSR_PWMA (1<<9)
+#define TCSR_ENALL (1<<10)
+
+extern void heartbeat(void);
+
+static void timer_ack(void)
+{
+ iowrite32(ioread32(BASE_ADDR + TCSR0), BASE_ADDR + TCSR0);
+}
+
+irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ heartbeat();
+
+ timer_ack();
+
+ write_seqlock(&xtime_lock);
+
+ do_timer(1);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
+
+ write_sequnlock(&xtime_lock);
+
+ return IRQ_HANDLED;
+}
+
+struct irqaction timer_irqaction = {
+ .handler = timer_interrupt,
+ .flags = IRQF_DISABLED,
+ .name = "timer",
+};
+
+void system_timer_init(void)
+{
+ /* set the initial value to the load register */
+ iowrite32(XPAR_CPU_CLOCK_FREQ/HZ, BASE_ADDR + TLR0);
+
+ /* load the initial value */
+ iowrite32(TCSR_LOAD, BASE_ADDR + TCSR0);
+
+ /* see opb timer data sheet for detail
+ * !ENALL - don't enable 'em all
+ * !PWMA - disable pwm
+ * TINT - clear interrupt status
+ * ENT - enable timer itself
+ * EINT - enable interrupt
+ * !LOAD - clear the bit to let go
+ * ARHT - auto reload
+ * !CAPT - no external trigger
+ * !GENT - no external signal
+ * UDT - set the timer as down counter
+ * !MDT0 - generate mode
+ *
+ */
+ iowrite32(TCSR_TINT|TCSR_ENT|TCSR_ENIT|TCSR_ARHT|TCSR_UDT, BASE_ADDR + TCSR0);
+
+ setup_irq(XPAR_TIMER_0_IRQ, &timer_irqaction);
+}
+
+unsigned long do_gettimeoffset(void)
+{
+ /* Current counter value */
+ unsigned int tcr=ioread32(BASE_ADDR + TCR0);
+
+ /* Load register value (couting down */
+ unsigned int tcmp=ioread32(BASE_ADDR + TLR0);
+
+ /* Offset, in nanoseconds */
+ unsigned long offset =(tcmp-tcr)/(XPAR_CPU_CLOCK_FREQ/1000000);
+
+ return offset;
+}
+
--- /dev/null
+/*
+ * arch/microblaze/kernel/platform.c
+ *
+ * Copyright 2007 Xilinx, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/resource.h>
+#include <linux/xilinx_devices.h>
+#include <linux/serial_8250.h>
+#include <linux/serial.h>
+#include <asm/xparameters.h>
+#include <asm/io.h>
+#include <asm/of_platform.h>
+
+#ifdef XPAR_SPI_0_BASEADDR
+
+static struct xspi_platform_data xspi_0_pdata = {
+ .device_flags = (XPAR_SPI_0_FIFO_EXIST ? XSPI_HAS_FIFOS : 0) |
+ (XPAR_SPI_0_SPI_SLAVE_ONLY ? XSPI_SLAVE_ONLY : 0),
+ .num_slave_bits = XPAR_SPI_0_NUM_SS_BITS
+};
+
+static struct platform_device xilinx_spi_0_device = {
+ .name = "xilinx_spi",
+ .id = XPAR_SPI_0_DEVICE_ID,
+ .dev.platform_data = &xspi_0_pdata,
+ .num_resources = 2,
+ .resource = (struct resource[]) {
+ {
+ .start = XPAR_SPI_0_BASEADDR,
+ .end = XPAR_SPI_0_HIGHADDR,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .start = XPAR_INTC_0_SPI_0_VEC_ID,
+ .end = XPAR_INTC_0_SPI_0_VEC_ID,
+ .flags = IORESOURCE_IRQ
+ }
+ }
+};
+
+#endif /* XPAR_SPI_0_BASEADDR */
+
+/*
+ * EMAC: shortcut macro for single instance
+ */
+#define XPAR_EMAC(num) { \
+ .name = "xilinx_emac", \
+ .id = num, \
+ .num_resources = 2, \
+ .resource = (struct resource[]) { \
+ { \
+ .start = XPAR_EMAC_##num##_BASEADDR, \
+ .end = XPAR_EMAC_##num##_HIGHADDR, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = XPAR_EMAC_##num##_IRQ, \
+ .flags = IORESOURCE_IRQ, \
+ }, \
+ }, \
+ .dev.platform_data = &(struct xemac_platform_data) { \
+ .dma_mode = XPAR_EMAC_##num##_DMA_PRESENT, \
+ .has_mii = XPAR_EMAC_##num##_MII_EXIST, \
+ .has_cam = XPAR_EMAC_##num##_CAM_EXIST, \
+ .has_err_cnt = XPAR_EMAC_##num##_ERR_COUNT_EXIST, \
+ .has_jumbo = XPAR_EMAC_##num##_JUMBO_EXIST, \
+ .tx_dre = XPAR_EMAC_##num##_TX_DRE_TYPE, \
+ .rx_dre = XPAR_EMAC_##num##_RX_DRE_TYPE, \
+ .tx_hw_csum = XPAR_EMAC_##num##_TX_INCLUDE_CSUM, \
+ .rx_hw_csum = XPAR_EMAC_##num##_RX_INCLUDE_CSUM, \
+ /* locally administered default address */ \
+ .mac_addr = {0x00, 0x0A, 0x35, 5, 5, 5}, \
+ }, \
+}
+
+#ifdef XPAR_EMAC_0_BASEADDR
+static struct platform_device xilinx_emac_0_device = XPAR_EMAC(0);
+#endif
+
+#ifdef XPAR_GPIO_0_BASEADDR
+
+static struct platform_device xilinx_gpio_0_device = {
+ .name = "xilinx_gpio",
+ .id = 0,
+ .dev.platform_data = (XPAR_GPIO_0_IS_DUAL ? XGPIO_IS_DUAL : 0),
+#if XPAR_GPIO_0_INTERRUPT_PRESENT
+ .num_resources = 2,
+#else
+ .num_resources = 1,
+#endif
+ .resource = (struct resource[]) {
+ {
+ .start = XPAR_GPIO_0_BASEADDR,
+ .end = XPAR_GPIO_0_HIGHADDR,
+ .flags = IORESOURCE_MEM
+ },
+#if XPAR_GPIO_0_INTERRUPT_PRESENT
+ {
+ .start = XPAR_GPIO_0_IRQ,
+ .end = XPAR_GPIO_0_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+#endif
+ }
+};
+
+#endif /* XPAR_GPIO_0_BASEADDR */
+
+#ifdef XPAR_GPIO_1_BASEADDR
+
+static struct platform_device xilinx_gpio_1_device = {
+ .name = "xilinx_gpio",
+ .id = 1,
+ .dev.platform_data = (XPAR_GPIO_1_IS_DUAL ? XGPIO_IS_DUAL : 0),
+#if XPAR_GPIO_1_INTERRUPT_PRESENT
+ .num_resources = 2,
+#else
+ .num_resources = 1,
+#endif
+ .resource = (struct resource[]) {
+ {
+ .start = XPAR_GPIO_1_BASEADDR,
+ .end = XPAR_GPIO_1_HIGHADDR,
+ .flags = IORESOURCE_MEM
+ },
+#if XPAR_GPIO_1_INTERRUPT_PRESENT
+ {
+ .start = XPAR_GPIO_1_IRQ,
+ .end = XPAR_GPIO_1_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+#endif
+ }
+};
+
+#endif /* XPAR_GPIO_1_BASEADDR */
+
+#ifdef XPAR_GPIO_2_BASEADDR
+
+static struct platform_device xilinx_gpio_2_device = {
+ .name = "xilinx_gpio",
+ .id = 2,
+ .dev.platform_data = (XPAR_GPIO_2_IS_DUAL ? XGPIO_IS_DUAL : 0),
+#if XPAR_GPIO_2_INTERRUPT_PRESENT
+ .num_resources = 2,
+#else
+ .num_resources = 1,
+#endif
+ .resource = (struct resource[]) {
+ {
+ .start = XPAR_GPIO_2_BASEADDR,
+ .end = XPAR_GPIO_2_HIGHADDR,
+ .flags = IORESOURCE_MEM
+ },
+#if XPAR_GPIO_2_INTERRUPT_PRESENT
+ {
+ .start = XPAR_GPIO_2_IRQ,
+ .end = XPAR_GPIO_2_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+#endif
+ }
+};
+
+#endif /* XPAR_GPIO_2_BASEADDR */
+
+#ifdef XPAR_GPIO_3_BASEADDR
+
+static struct platform_device xilinx_gpio_3_device = {
+ .name = "xilinx_gpio",
+ .id = 3,
+ .dev.platform_data = (XPAR_GPIO_3_IS_DUAL ? XGPIO_IS_DUAL : 0),
+#if XPAR_GPIO_3_INTERRUPT_PRESENT
+ .num_resources = 2,
+#else
+ .num_resources = 1,
+#endif
+ .resource = (struct resource[]) {
+ {
+ .start = XPAR_GPIO_3_BASEADDR,
+ .end = XPAR_GPIO_3_HIGHADDR,
+ .flags = IORESOURCE_MEM
+ },
+#if XPAR_GPIO_3_INTERRUPT_PRESENT
+ {
+ .start = XPAR_GPIO_3_IRQ,
+ .end = XPAR_GPIO_3_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+#endif
+ }
+};
+
+#endif /* XPAR_GPIO_3_BASEADDR */
+
+#ifdef XPAR_GPIO_4_BASEADDR
+
+static struct platform_device xilinx_gpio_4_device = {
+ .name = "xilinx_gpio",
+ .id = 4,
+ .dev.platform_data = (XPAR_GPIO_4_IS_DUAL ? XGPIO_IS_DUAL : 0),
+#if XPAR_GPIO_4_INTERRUPT_PRESENT
+ .num_resources = 2,
+#else
+ .num_resources = 1,
+#endif
+ .resource = (struct resource[]) {
+ {
+ .start = XPAR_GPIO_4_BASEADDR,
+ .end = XPAR_GPIO_4_HIGHADDR,
+ .flags = IORESOURCE_MEM
+ },
+#if XPAR_GPIO_4_INTERRUPT_PRESENT
+ {
+ .start = XPAR_GPIO_4_IRQ,
+ .end = XPAR_GPIO_4_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+#endif
+ }
+};
+
+#endif /* XPAR_GPIO_4_BASEADDR */
+
+#ifdef XPAR_GPIO_5_BASEADDR
+
+static struct platform_device xilinx_gpio_5_device = {
+ .name = "xilinx_gpio",
+ .id = 5,
+ .dev.platform_data = (XPAR_GPIO_5_IS_DUAL ? XGPIO_IS_DUAL : 0),
+#if XPAR_GPIO_5_INTERRUPT_PRESENT
+ .num_resources = 2,
+#else
+ .num_resources = 1,
+#endif
+ .resource = (struct resource[]) {
+ {
+ .start = XPAR_GPIO_5_BASEADDR,
+ .end = XPAR_GPIO_5_HIGHADDR,
+ .flags = IORESOURCE_MEM
+ },
+#if XPAR_GPIO_5_INTERRUPT_PRESENT
+ {
+ .start = XPAR_GPIO_5_IRQ,
+ .end = XPAR_GPIO_5_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+#endif
+ }
+};
+
+#endif /* XPAR_GPIO_5_BASEADDR */
+
+#ifdef XPAR_GPIO_6_BASEADDR
+
+static struct platform_device xilinx_gpio_6_device = {
+ .name = "xilinx_gpio",
+ .id = 6,
+ .dev.platform_data = (XPAR_GPIO_6_IS_DUAL ? XGPIO_IS_DUAL : 0),
+#if XPAR_GPIO_6_INTERRUPT_PRESENT
+ .num_resources = 2,
+#else
+ .num_resources = 1,
+#endif
+ .resource = (struct resource[]) {
+ {
+ .start = XPAR_GPIO_6_BASEADDR,
+ .end = XPAR_GPIO_6_HIGHADDR,
+ .flags = IORESOURCE_MEM
+ },
+#if XPAR_GPIO_6_INTERRUPT_PRESENT
+ {
+ .start = XPAR_GPIO_6_IRQ,
+ .end = XPAR_GPIO_6_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+#endif
+ }
+};
+
+#endif /* XPAR_GPIO_6_BASEADDR */
+
+#if defined(XPAR_OPB_UART16550_0_BASEADDR) || defined(XPAR_OPB_UART16550_1_BASEADDR)
+#define XPAR_HAVE_UART16550
+#endif
+
+#if defined(XPAR_HAVE_UART16550) && defined(CONFIG_SERIAL_8250)
+
+#define XPAR_UART(num) { \
+ .mapbase = XPAR_OPB_UART16550_##num##_BASEADDR + 0x1003, \
+ .irq = XPAR_OPB_INTC_0_OPB_UART16550_##num##_IRQ, \
+ .iotype = UPIO_MEM, \
+ .uartclk = XPAR_CPU_CLOCK_FREQ, \
+ .flags = UPF_BOOT_AUTOCONF, \
+ .regshift = 2, \
+ }
+
+static struct uart_port xilinx_16550_port[] = {
+#ifdef XPAR_OPB_UART16550_0_BASEADDR
+ XPAR_UART(0),
+#endif
+#ifdef XPAR_OPB_UART16550_1_BASEADDR
+ XPAR_UART(1),
+#endif
+ { }, /* terminated by empty record */
+};
+
+#endif /* defined(XPAR_HAVE_UART16550) && defined(CONFIG_SERIAL_8250) */
+
+void __init uart_16550_early_init(void)
+{
+#if defined(XPAR_HAVE_UART16550) && defined(CONFIG_SERIAL_8250)
+ int i;
+
+ for (i = 0; xilinx_16550_port[i].flags; i++) {
+
+ xilinx_16550_port[i].membase = ioremap(xilinx_16550_port[i].mapbase, 0x100);
+
+ if (early_serial_setup(&xilinx_16550_port[i]) != 0) {
+ printk("Early serial init of port %d failed\n", i);
+ }
+
+ }
+#endif /* defined(XPAR_HAVE_UART16550) && defined(CONFIG_SERIAL_8250) */
+}
+
+#ifdef CONFIG_DEVICE_TREE
+static struct of_device_id xilinx_of_bus_ids[] = {
+ { .compatible = "simple-bus", },
+ { .compatible = "xlnx,plb-v46-1.00.a", },
+ { .compatible = "xlnx,plb-v34-1.01.a", },
+ { .compatible = "xlnx,plb-v34-1.02.a", },
+ { .compatible = "xlnx,opb-v20-1.10.c", },
+ { .compatible = "xlnx,dcr-v29-1.00.a", },
+ { .compatible = "xlnx,compound", },
+ {},
+};
+#endif
+
+static int __init xilinx_platform_init(void)
+{
+#ifdef CONFIG_DEVICE_TREE
+ of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
+#else
+
+#ifdef XPAR_SPI_0_BASEADDR
+ platform_device_register(&xilinx_spi_0_device);
+#endif /* XPAR_SPI_0_BASEADDR */
+
+/* EMAC instances */
+#if defined(XPAR_EMAC_0_BASEADDR)
+ platform_device_register(&xilinx_emac_0_device);
+#endif
+
+#ifdef XPAR_GPIO_0_BASEADDR
+ platform_device_register(&xilinx_gpio_0_device);
+#endif /* XPAR_GPIO_0_BASEADDR */
+#ifdef XPAR_GPIO_1_BASEADDR
+ platform_device_register(&xilinx_gpio_1_device);
+#endif /* XPAR_GPIO_1_BASEADDR */
+#ifdef XPAR_GPIO_2_BASEADDR
+ platform_device_register(&xilinx_gpio_2_device);
+#endif /* XPAR_GPIO_2_BASEADDR */
+#ifdef XPAR_GPIO_3_BASEADDR
+ platform_device_register(&xilinx_gpio_3_device);
+#endif /* XPAR_GPIO_3_BASEADDR */
+#ifdef XPAR_GPIO_4_BASEADDR
+ platform_device_register(&xilinx_gpio_4_device);
+#endif /* XPAR_GPIO_4_BASEADDR */
+#ifdef XPAR_GPIO_5_BASEADDR
+ platform_device_register(&xilinx_gpio_5_device);
+#endif /* XPAR_GPIO_5_BASEADDR */
+#ifdef XPAR_GPIO_6_BASEADDR
+ platform_device_register(&xilinx_gpio_6_device);
+#endif /* XPAR_GPIO_6_BASEADDR */
+
+#endif /* CONFIG_DEVICE_TREE */
+ return 0;
+}
+
+subsys_initcall(xilinx_platform_init);
--- /dev/null
+/*
+ * arch/microblaze/kernel/process.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+
+/* FIXME */
+void show_regs(struct pt_regs *regs)
+{
+ unsigned long *p;
+ int i;
+ printk("pc:\t0x%08lx\tsp:\t0x%08lx\n", regs->pc, regs->r1);
+ printk("flags:\t0x%08lx\tear:\t0x%08lx\tesr:\t0x%08lx\tfsr:\t0x%08lx\n",
+ regs->msr, regs->ear, regs->esr, regs->fsr);
+ printk("r0:\t0x%08lx\tr1:\t0x%08lx\tr2:\t0x%08lx\tr3\t0x%08lx\n",
+ 0L, regs->r1, regs->r2, regs->r3);
+ for(i=4,p=&(regs->r4);i<32;i+=4,p+=4) {
+ printk("r%i:\t0x%08lx\tr%i:\t0x%08lx\tr%i:\t0x%08lx\tr%i:\t0x%08lx\n",
+ i,*p, i+1,*(p+1),i+2,*(p+2),i+3,*(p+3));
+ }
+ printk("\n");
+}
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
+void cpu_idle(void)
+{
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ while (1) {
+ while (!need_resched()) {
+ cpu_relax();
+ }
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
+}
+
+void flush_thread(void)
+{
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+ unsigned long unused,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ struct pt_regs *childregs = task_pt_regs(p);
+ struct thread_info *ti = task_thread_info(p);
+ extern void ret_from_fork(void);
+
+ *childregs = *regs;
+
+ if (user_mode(regs))
+ childregs->r1 = usp;
+ else
+ childregs->r1 = ((unsigned long) ti) + THREAD_SIZE;
+
+ memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
+ ti->cpu_context.sp = (unsigned long)childregs;
+ ti->cpu_context.msr = (unsigned long)childregs->msr;
+ ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
+
+ if (clone_flags & CLONE_SETTLS)
+ ;/* FIXME: not sure what to do */
+
+ return 0;
+}
+
+/*
+ * Return saved PC of a blocked thread.
+ * FIXME this needs to be checked
+ */
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+ struct cpu_context *ctx=&(((struct thread_info *)(tsk->stack))->cpu_context);
+
+ /* Check whether the thread is blocked in resume() */
+ if (in_sched_functions(ctx->r15))
+ return ((unsigned long)ctx->r15);
+ else
+ return ctx->r14;
+}
+
+static void kernel_thread_helper(int (*fn)(void *), void *arg)
+{
+ fn(arg);
+ do_exit(-1);
+}
+
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ struct pt_regs regs;
+ int ret;
+
+ memset(®s, 0, sizeof(regs));
+ /* store them in non-volatile registers */
+ regs.r5 = (unsigned long)fn;
+ regs.r6 = (unsigned long)arg;
+ local_save_flags(regs.msr);
+ regs.pc = (unsigned long)kernel_thread_helper;
+ regs.kernel_mode = 1;
+
+ ret = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);
+
+ return ret;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+/* TBD (used by procfs) */
+ return 0;
+}
--- /dev/null
+/*
+ * Procedures for creating, accessing and interpreting the device tree.
+ *
+ * Paul Mackerras August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ * {engebret|bergner}@us.ibm.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <stdarg.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/stringify.h>
+#include <linux/delay.h>
+#include <linux/initrd.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/kexec.h>
+#include <linux/debugfs.h>
+#include <linux/irq.h>
+
+#include <asm/prom.h>
+#include <asm/lmb.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/pci.h>
+#include <asm/sections.h>
+#include <asm/pci-bridge.h>
+
+#ifdef DEBUG
+#define DBG(fmt...) printk(KERN_ERR fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+extern char command_line[COMMAND_LINE_SIZE];
+extern u32 boot_cpuid;
+
+static int __initdata dt_root_addr_cells;
+static int __initdata dt_root_size_cells;
+
+typedef u32 cell_t;
+
+#if 0
+static struct boot_param_header *initial_boot_params __initdata;
+#else
+struct boot_param_header *initial_boot_params;
+#endif
+
+extern struct device_node *allnodes; /* temporary while merging */
+
+extern rwlock_t devtree_lock; /* temporary while merging */
+
+/* export that to outside world */
+struct device_node *of_chosen;
+
+static inline char *find_flat_dt_string(u32 offset)
+{
+ return ((char *)initial_boot_params) +
+ initial_boot_params->off_dt_strings + offset;
+}
+
+/**
+ * This function is used to scan the flattened device-tree, it is
+ * used to extract the memory informations at boot before we can
+ * unflatten the tree
+ */
+int __init of_scan_flat_dt(int (*it)(unsigned long node,
+ const char *uname, int depth,
+ void *data),
+ void *data)
+{
+ unsigned long p = ((unsigned long)initial_boot_params) +
+ initial_boot_params->off_dt_struct;
+ int rc = 0;
+ int depth = -1;
+
+ do {
+ u32 tag = *((u32 *)p);
+ char *pathp;
+
+ p += 4;
+ if (tag == OF_DT_END_NODE) {
+ depth --;
+ continue;
+ }
+ if (tag == OF_DT_NOP)
+ continue;
+ if (tag == OF_DT_END)
+ break;
+ if (tag == OF_DT_PROP) {
+ u32 sz = *((u32 *)p);
+ p += 8;
+ if (initial_boot_params->version < 0x10)
+ p = _ALIGN(p, sz >= 8 ? 8 : 4);
+ p += sz;
+ p = _ALIGN(p, 4);
+ continue;
+ }
+ if (tag != OF_DT_BEGIN_NODE) {
+ printk(KERN_WARNING "Invalid tag %x scanning flattened"
+ " device tree !\n", tag);
+ return -EINVAL;
+ }
+ depth++;
+ pathp = (char *)p;
+ p = _ALIGN(p + strlen(pathp) + 1, 4);
+ if ((*pathp) == '/') {
+ char *lp, *np;
+ for (lp = NULL, np = pathp; *np; np++)
+ if ((*np) == '/')
+ lp = np+1;
+ if (lp != NULL)
+ pathp = lp;
+ }
+ rc = it(p, pathp, depth, data);
+ if (rc != 0)
+ break;
+ } while(1);
+
+ return rc;
+}
+
+unsigned long __init of_get_flat_dt_root(void)
+{
+ unsigned long p = ((unsigned long)initial_boot_params) +
+ initial_boot_params->off_dt_struct;
+
+ while(*((u32 *)p) == OF_DT_NOP)
+ p += 4;
+ BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE);
+ p += 4;
+ return _ALIGN(p + strlen((char *)p) + 1, 4);
+}
+
+/**
+ * This function can be used within scan_flattened_dt callback to get
+ * access to properties
+ */
+void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
+ unsigned long *size)
+{
+ unsigned long p = node;
+
+ do {
+ u32 tag = *((u32 *)p);
+ u32 sz, noff;
+ const char *nstr;
+
+ p += 4;
+ if (tag == OF_DT_NOP)
+ continue;
+ if (tag != OF_DT_PROP)
+ return NULL;
+
+ sz = *((u32 *)p);
+ noff = *((u32 *)(p + 4));
+ p += 8;
+ if (initial_boot_params->version < 0x10)
+ p = _ALIGN(p, sz >= 8 ? 8 : 4);
+
+ nstr = find_flat_dt_string(noff);
+ if (nstr == NULL) {
+ printk(KERN_WARNING "Can't find property index"
+ " name !\n");
+ return NULL;
+ }
+ if (strcmp(name, nstr) == 0) {
+ if (size)
+ *size = sz;
+ return (void *)p;
+ }
+ p += sz;
+ p = _ALIGN(p, 4);
+ } while(1);
+}
+
+int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
+{
+ const char* cp;
+ unsigned long cplen, l;
+
+ cp = of_get_flat_dt_prop(node, "compatible", &cplen);
+ if (cp == NULL)
+ return 0;
+ while (cplen > 0) {
+ if (strncasecmp(cp, compat, strlen(compat)) == 0)
+ return 1;
+ l = strlen(cp) + 1;
+ cp += l;
+ cplen -= l;
+ }
+
+ return 0;
+}
+
+static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
+ unsigned long align)
+{
+ void *res;
+
+ *mem = _ALIGN(*mem, align);
+ res = (void *)*mem;
+ *mem += size;
+
+ return res;
+}
+
+static unsigned long __init unflatten_dt_node(unsigned long mem,
+ unsigned long *p,
+ struct device_node *dad,
+ struct device_node ***allnextpp,
+ unsigned long fpsize)
+{
+ struct device_node *np;
+ struct property *pp, **prev_pp = NULL;
+ char *pathp;
+ u32 tag;
+ unsigned int l, allocl;
+ int has_name = 0;
+ int new_format = 0;
+
+ tag = *((u32 *)(*p));
+ if (tag != OF_DT_BEGIN_NODE) {
+ printk("Weird tag at start of node: %x\n", tag);
+ return mem;
+ }
+ *p += 4;
+ pathp = (char *)*p;
+ l = allocl = strlen(pathp) + 1;
+ *p = _ALIGN(*p + l, 4);
+
+ /* version 0x10 has a more compact unit name here instead of the full
+ * path. we accumulate the full path size using "fpsize", we'll rebuild
+ * it later. We detect this because the first character of the name is
+ * not '/'.
+ */
+ if ((*pathp) != '/') {
+ new_format = 1;
+ if (fpsize == 0) {
+ /* root node: special case. fpsize accounts for path
+ * plus terminating zero. root node only has '/', so
+ * fpsize should be 2, but we want to avoid the first
+ * level nodes to have two '/' so we use fpsize 1 here
+ */
+ fpsize = 1;
+ allocl = 2;
+ } else {
+ /* account for '/' and path size minus terminal 0
+ * already in 'l'
+ */
+ fpsize += l;
+ allocl = fpsize;
+ }
+ }
+
+
+ np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
+ __alignof__(struct device_node));
+ if (allnextpp) {
+ memset(np, 0, sizeof(*np));
+ np->full_name = ((char*)np) + sizeof(struct device_node);
+ if (new_format) {
+ char *p = np->full_name;
+ /* rebuild full path for new format */
+ if (dad && dad->parent) {
+ strcpy(p, dad->full_name);
+#ifdef DEBUG
+ if ((strlen(p) + l + 1) != allocl) {
+ DBG("%s: p: %d, l: %d, a: %d\n",
+ pathp, (int)strlen(p), l, allocl);
+ }
+#endif
+ p += strlen(p);
+ }
+ *(p++) = '/';
+ memcpy(p, pathp, l);
+ } else
+ memcpy(np->full_name, pathp, l);
+ prev_pp = &np->properties;
+ **allnextpp = np;
+ *allnextpp = &np->allnext;
+ if (dad != NULL) {
+ np->parent = dad;
+ /* we temporarily use the next field as `last_child'*/
+ if (dad->next == 0)
+ dad->child = np;
+ else
+ dad->next->sibling = np;
+ dad->next = np;
+ }
+ kref_init(&np->kref);
+ }
+ while(1) {
+ u32 sz, noff;
+ char *pname;
+
+ tag = *((u32 *)(*p));
+ if (tag == OF_DT_NOP) {
+ *p += 4;
+ continue;
+ }
+ if (tag != OF_DT_PROP)
+ break;
+ *p += 4;
+ sz = *((u32 *)(*p));
+ noff = *((u32 *)((*p) + 4));
+ *p += 8;
+ if (initial_boot_params->version < 0x10)
+ *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
+
+ pname = find_flat_dt_string(noff);
+ if (pname == NULL) {
+ printk("Can't find property name in list !\n");
+ break;
+ }
+ if (strcmp(pname, "name") == 0)
+ has_name = 1;
+ l = strlen(pname) + 1;
+ pp = unflatten_dt_alloc(&mem, sizeof(struct property),
+ __alignof__(struct property));
+ if (allnextpp) {
+ if (strcmp(pname, "linux,phandle") == 0) {
+ np->node = *((u32 *)*p);
+ if (np->linux_phandle == 0)
+ np->linux_phandle = np->node;
+ }
+ if (strcmp(pname, "ibm,phandle") == 0)
+ np->linux_phandle = *((u32 *)*p);
+ pp->name = pname;
+ pp->length = sz;
+ pp->value = (void *)*p;
+ *prev_pp = pp;
+ prev_pp = &pp->next;
+ }
+ *p = _ALIGN((*p) + sz, 4);
+ }
+ /* with version 0x10 we may not have the name property, recreate
+ * it here from the unit name if absent
+ */
+ if (!has_name) {
+ char *p = pathp, *ps = pathp, *pa = NULL;
+ int sz;
+
+ while (*p) {
+ if ((*p) == '@')
+ pa = p;
+ if ((*p) == '/')
+ ps = p + 1;
+ p++;
+ }
+ if (pa < ps)
+ pa = p;
+ sz = (pa - ps) + 1;
+ pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
+ __alignof__(struct property));
+ if (allnextpp) {
+ pp->name = "name";
+ pp->length = sz;
+ pp->value = pp + 1;
+ *prev_pp = pp;
+ prev_pp = &pp->next;
+ memcpy(pp->value, ps, sz - 1);
+ ((char *)pp->value)[sz - 1] = 0;
+ DBG("fixed up name for %s -> %s\n", pathp,
+ (char *)pp->value);
+ }
+ }
+ if (allnextpp) {
+ *prev_pp = NULL;
+ np->name = of_get_property(np, "name", NULL);
+ np->type = of_get_property(np, "device_type", NULL);
+
+ if (!np->name)
+ np->name = "<NULL>";
+ if (!np->type)
+ np->type = "<NULL>";
+ }
+ while (tag == OF_DT_BEGIN_NODE) {
+ mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
+ tag = *((u32 *)(*p));
+ }
+ if (tag != OF_DT_END_NODE) {
+ printk("Weird tag at end of node: %x\n", tag);
+ return mem;
+ }
+ *p += 4;
+ return mem;
+}
+
+/**
+ * unflattens the device-tree passed by the firmware, creating the
+ * tree of struct device_node. It also fills the "name" and "type"
+ * pointers of the nodes so the normal device-tree walking functions
+ * can be used (this used to be done by finish_device_tree)
+ */
+void __init unflatten_device_tree(void)
+{
+ unsigned long start, mem, size;
+ struct device_node **allnextp = &allnodes;
+
+ DBG(" -> unflatten_device_tree()\n");
+
+ /* First pass, scan for size */
+ start = ((unsigned long)initial_boot_params) +
+ initial_boot_params->off_dt_struct;
+ size = unflatten_dt_node(0, &start, NULL, NULL, 0);
+ size = (size | 3) + 1;
+
+ DBG(" size is %lx, allocating...\n", size);
+
+ /* Allocate memory for the expanded device tree */
+ mem = lmb_alloc(size + 4, __alignof__(struct device_node));
+ mem = (unsigned long) __va(mem);
+
+ ((u32 *)mem)[size / 4] = 0xdeadbeef;
+
+ DBG(" unflattening %lx...\n", mem);
+
+ /* Second pass, do actual unflattening */
+ start = ((unsigned long)initial_boot_params) +
+ initial_boot_params->off_dt_struct;
+ unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
+ if (*((u32 *)start) != OF_DT_END)
+ printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
+ if (((u32 *)mem)[size / 4] != 0xdeadbeef)
+ printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
+ ((u32 *)mem)[size / 4] );
+ *allnextp = NULL;
+
+ /* Get pointer to OF "/chosen" node for use everywhere */
+ of_chosen = of_find_node_by_path("/chosen");
+ if (of_chosen == NULL)
+ of_chosen = of_find_node_by_path("/chosen@0");
+
+ DBG(" <- unflatten_device_tree()\n");
+}
+
+static int __init early_init_dt_scan_cpus(unsigned long node,
+ const char *uname, int depth,
+ void *data)
+{
+ static int logical_cpuid = 0;
+ char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const u32 *prop;
+ const u32 *intserv;
+ int i, nthreads;
+ unsigned long len;
+ int found = 0;
+
+ /* We are scanning "cpu" nodes only */
+ if (type == NULL || strcmp(type, "cpu") != 0)
+ return 0;
+
+ /* Get physical cpuid */
+ intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
+ if (intserv) {
+ nthreads = len / sizeof(int);
+ } else {
+ intserv = of_get_flat_dt_prop(node, "reg", NULL);
+ nthreads = 1;
+ }
+
+ /*
+ * Now see if any of these threads match our boot cpu.
+ * NOTE: This must match the parsing done in smp_setup_cpu_maps.
+ */
+ for (i = 0; i < nthreads; i++) {
+ /*
+ * version 2 of the kexec param format adds the phys cpuid of
+ * booted proc.
+ */
+ if (initial_boot_params && initial_boot_params->version >= 2) {
+ if (intserv[i] ==
+ initial_boot_params->boot_cpuid_phys) {
+ found = 1;
+ break;
+ }
+ } else {
+ /*
+ * Check if it's the boot-cpu, set it's hw index now,
+ * unfortunately this format did not support booting
+ * off secondary threads.
+ */
+ if (of_get_flat_dt_prop(node,
+ "linux,boot-cpu", NULL) != NULL) {
+ found = 1;
+ break;
+ }
+ }
+
+#ifdef CONFIG_SMP
+ /* logical cpu id is always 0 on UP kernels */
+ logical_cpuid++;
+#endif
+ }
+
+ if (found) {
+ DBG("boot cpu: logical %d physical %d\n", logical_cpuid,
+ intserv[i]);
+ boot_cpuid = logical_cpuid;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+static void __init early_init_dt_check_for_initrd(unsigned long node)
+{
+ unsigned long l;
+ u32 *prop;
+
+ DBG("Looking for initrd properties... ");
+
+ prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
+ if (prop) {
+ initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4));
+
+ prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
+ if (prop) {
+ initrd_end = (unsigned long)
+ __va(of_read_ulong(prop, l/4));
+ initrd_below_start_ok = 1;
+ } else {
+ initrd_start = 0;
+ }
+ }
+
+ DBG("initrd_start=0x%lx initrd_end=0x%lx\n", initrd_start, initrd_end);
+}
+#else
+static inline void early_init_dt_check_for_initrd(unsigned long node)
+{
+}
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+static int __init early_init_dt_scan_chosen(unsigned long node,
+ const char *uname, int depth, void *data)
+{
+ unsigned long *lprop;
+ unsigned long l;
+ char *p;
+
+ DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
+
+ if (depth != 1 ||
+ (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
+ return 0;
+
+#ifdef CONFIG_KEXEC
+ lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
+ if (lprop)
+ crashk_res.start = *lprop;
+
+ lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
+ if (lprop)
+ crashk_res.end = crashk_res.start + *lprop - 1;
+#endif
+
+ early_init_dt_check_for_initrd(node);
+
+ /* Retreive command line */
+ p = of_get_flat_dt_prop(node, "bootargs", &l);
+ if (p != NULL && l > 0)
+ strlcpy(command_line, p, min((int)l, COMMAND_LINE_SIZE));
+
+#ifdef CONFIG_CMDLINE
+ if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
+ strlcpy(command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#endif /* CONFIG_CMDLINE */
+
+ DBG("Command line is: %s\n", command_line);
+
+ /* break now */
+ return 1;
+}
+
+static int __init early_init_dt_scan_root(unsigned long node,
+ const char *uname, int depth, void *data)
+{
+ u32 *prop;
+
+ if (depth != 0)
+ return 0;
+
+ prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
+ dt_root_size_cells = (prop == NULL) ? 1 : *prop;
+ DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
+
+ prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
+ dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
+ DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
+
+ /* break now */
+ return 1;
+}
+
+static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
+{
+ cell_t *p = *cellp;
+
+ *cellp = p + s;
+ return of_read_ulong(p, s);
+}
+
+
+static int __init early_init_dt_scan_memory(unsigned long node,
+ const char *uname, int depth, void *data)
+{
+ char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ cell_t *reg, *endp;
+ unsigned long l;
+
+ /* We are scanning "memory" nodes only */
+ if (type == NULL) {
+ /*
+ * The longtrail doesn't have a device_type on the
+ * /memory node, so look for the node called /memory@0.
+ */
+ if (depth != 1 || strcmp(uname, "memory@0") != 0)
+ return 0;
+ } else if (strcmp(type, "memory") != 0)
+ return 0;
+
+ reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
+ if (reg == NULL)
+ reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
+ if (reg == NULL)
+ return 0;
+
+ endp = reg + (l / sizeof(cell_t));
+
+ DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
+ uname, l, reg[0], reg[1], reg[2], reg[3]);
+
+ while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
+ unsigned long base, size;
+
+ base = dt_mem_next_cell(dt_root_addr_cells, ®);
+ size = dt_mem_next_cell(dt_root_size_cells, ®);
+
+ if (size == 0)
+ continue;
+ DBG(" - %lx , %lx\n", base, size);
+
+ lmb_add(base, size);
+ }
+ return 0;
+}
+
+static void __init early_reserve_mem(void)
+{
+ u64 base, size;
+ u64 *reserve_map;
+ unsigned long self_base;
+ unsigned long self_size;
+
+ reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
+ initial_boot_params->off_mem_rsvmap);
+
+ /* before we do anything, lets reserve the dt blob */
+ self_base = __pa((unsigned long)initial_boot_params);
+ self_size = initial_boot_params->totalsize;
+ lmb_reserve(self_base, self_size);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* then reserve the initrd, if any */
+ if (initrd_start && (initrd_end > initrd_start))
+ lmb_reserve(__pa(initrd_start), initrd_end - initrd_start);
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+ while (1) {
+ base = *(reserve_map++);
+ size = *(reserve_map++);
+ if (size == 0)
+ break;
+ DBG("reserving: %llx -> %llx\n", base, size);
+ lmb_reserve(base, size);
+ }
+
+#if 0
+ DBG("memory reserved, lmbs :\n");
+ lmb_dump_all();
+#endif
+}
+
+void __init early_init_devtree(void *params)
+{
+ DBG(" -> early_init_devtree(%p)\n", params);
+
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+
+ /* Retrieve various informations from the /chosen node of the
+ * device-tree, including the platform type, initrd location and
+ * size, TCE reserve, and more ...
+ */
+ of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
+
+ /* Scan memory nodes and rebuild LMBs */
+ lmb_init();
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+ /* Save command line for /proc/cmdline and then parse parameters */
+ strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+ parse_early_param();
+
+/* /\* Reserve LMB regions used by kernel, initrd, dt, etc... *\/ */
+/* lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); */
+/* reserve_kdump_trampoline(); */
+/* reserve_crashkernel(); */
+/* early_reserve_mem(); */
+
+/* lmb_enforce_memory_limit(memory_limit); */
+ lmb_analyze();
+
+ DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
+
+ /* We may need to relocate the flat tree, do it now.
+ * FIXME .. and the initrd too? */
+/* move_device_tree(); */
+
+ DBG("Scanning CPUs ...\n");
+
+ /* Retreive CPU related informations from the flat tree
+ * (altivec support, boot CPU ID, ...)
+ */
+ of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
+
+ DBG(" <- early_init_devtree()\n");
+}
+
+/**
+ * Indicates whether the root node has a given value in its
+ * compatible property.
+ */
+int machine_is_compatible(const char *compat)
+{
+ struct device_node *root;
+ int rc = 0;
+
+ root = of_find_node_by_path("/");
+ if (root) {
+ rc = of_device_is_compatible(root, compat);
+ of_node_put(root);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(machine_is_compatible);
+
+/*******
+ *
+ * New implementation of the OF "find" APIs, return a refcounted
+ * object, call of_node_put() when done. The device tree and list
+ * are protected by a rw_lock.
+ *
+ * Note that property management will need some locking as well,
+ * this isn't dealt with yet.
+ *
+ *******/
+
+
+/**
+ * of_find_node_by_phandle - Find a node given a phandle
+ * @handle: phandle of the node to find
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_find_node_by_phandle(phandle handle)
+{
+ struct device_node *np;
+
+ read_lock(&devtree_lock);
+ for (np = allnodes; np != 0; np = np->allnext)
+ if (np->linux_phandle == handle)
+ break;
+ of_node_get(np);
+ read_unlock(&devtree_lock);
+ return np;
+}
+EXPORT_SYMBOL(of_find_node_by_phandle);
+
+/**
+ * of_find_all_nodes - Get next node in global list
+ * @prev: Previous node or NULL to start iteration
+ * of_node_put() will be called on it
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_find_all_nodes(struct device_node *prev)
+{
+ struct device_node *np;
+
+ read_lock(&devtree_lock);
+ np = prev ? prev->allnext : allnodes;
+ for (; np != 0; np = np->allnext)
+ if (of_node_get(np))
+ break;
+ of_node_put(prev);
+ read_unlock(&devtree_lock);
+ return np;
+}
+EXPORT_SYMBOL(of_find_all_nodes);
+
+/**
+ * of_node_get - Increment refcount of a node
+ * @node: Node to inc refcount, NULL is supported to
+ * simplify writing of callers
+ *
+ * Returns node.
+ */
+struct device_node *of_node_get(struct device_node *node)
+{
+ if (node)
+ kref_get(&node->kref);
+ return node;
+}
+EXPORT_SYMBOL(of_node_get);
+
+static inline struct device_node * kref_to_device_node(struct kref *kref)
+{
+ return container_of(kref, struct device_node, kref);
+}
+
+/**
+ * of_node_release - release a dynamically allocated node
+ * @kref: kref element of the node to be released
+ *
+ * In of_node_put() this function is passed to kref_put()
+ * as the destructor.
+ */
+static void of_node_release(struct kref *kref)
+{
+ struct device_node *node = kref_to_device_node(kref);
+ struct property *prop = node->properties;
+
+ /* We should never be releasing nodes that haven't been detached. */
+ if (!of_node_check_flag(node, OF_DETACHED)) {
+ printk("WARNING: Bad of_node_put() on %s\n", node->full_name);
+ dump_stack();
+ kref_init(&node->kref);
+ return;
+ }
+
+ if (!of_node_check_flag(node, OF_DYNAMIC))
+ return;
+
+ while (prop) {
+ struct property *next = prop->next;
+ kfree(prop->name);
+ kfree(prop->value);
+ kfree(prop);
+ prop = next;
+
+ if (!prop) {
+ prop = node->deadprops;
+ node->deadprops = NULL;
+ }
+ }
+ kfree(node->full_name);
+ kfree(node->data);
+ kfree(node);
+}
+
+/**
+ * of_node_put - Decrement refcount of a node
+ * @node: Node to dec refcount, NULL is supported to
+ * simplify writing of callers
+ *
+ */
+void of_node_put(struct device_node *node)
+{
+ if (node)
+ kref_put(&node->kref, of_node_release);
+}
+EXPORT_SYMBOL(of_node_put);
+
+/*
+ * Plug a device node into the tree and global list.
+ */
+void of_attach_node(struct device_node *np)
+{
+ write_lock(&devtree_lock);
+ np->sibling = np->parent->child;
+ np->allnext = allnodes;
+ np->parent->child = np;
+ allnodes = np;
+ write_unlock(&devtree_lock);
+}
+
+/*
+ * "Unplug" a node from the device tree. The caller must hold
+ * a reference to the node. The memory associated with the node
+ * is not freed until its refcount goes to zero.
+ */
+void of_detach_node(struct device_node *np)
+{
+ struct device_node *parent;
+
+ write_lock(&devtree_lock);
+
+ parent = np->parent;
+ if (!parent)
+ goto out_unlock;
+
+ if (allnodes == np)
+ allnodes = np->allnext;
+ else {
+ struct device_node *prev;
+ for (prev = allnodes;
+ prev->allnext != np;
+ prev = prev->allnext)
+ ;
+ prev->allnext = np->allnext;
+ }
+
+ if (parent->child == np)
+ parent->child = np->sibling;
+ else {
+ struct device_node *prevsib;
+ for (prevsib = np->parent->child;
+ prevsib->sibling != np;
+ prevsib = prevsib->sibling)
+ ;
+ prevsib->sibling = np->sibling;
+ }
+
+ of_node_set_flag(np, OF_DETACHED);
+
+out_unlock:
+ write_unlock(&devtree_lock);
+}
+
+/*
+ * Add a property to a node
+ */
+int prom_add_property(struct device_node* np, struct property* prop)
+{
+ struct property **next;
+
+ prop->next = NULL;
+ write_lock(&devtree_lock);
+ next = &np->properties;
+ while (*next) {
+ if (strcmp(prop->name, (*next)->name) == 0) {
+ /* duplicate ! don't insert it */
+ write_unlock(&devtree_lock);
+ return -1;
+ }
+ next = &(*next)->next;
+ }
+ *next = prop;
+ write_unlock(&devtree_lock);
+
+#ifdef CONFIG_PROC_DEVICETREE
+ /* try to add to proc as well if it was initialized */
+ if (np->pde)
+ proc_device_tree_add_prop(np->pde, prop);
+#endif /* CONFIG_PROC_DEVICETREE */
+
+ return 0;
+}
+
+/*
+ * Remove a property from a node. Note that we don't actually
+ * remove it, since we have given out who-knows-how-many pointers
+ * to the data using get-property. Instead we just move the property
+ * to the "dead properties" list, so it won't be found any more.
+ */
+int prom_remove_property(struct device_node *np, struct property *prop)
+{
+ struct property **next;
+ int found = 0;
+
+ write_lock(&devtree_lock);
+ next = &np->properties;
+ while (*next) {
+ if (*next == prop) {
+ /* found the node */
+ *next = prop->next;
+ prop->next = np->deadprops;
+ np->deadprops = prop;
+ found = 1;
+ break;
+ }
+ next = &(*next)->next;
+ }
+ write_unlock(&devtree_lock);
+
+ if (!found)
+ return -ENODEV;
+
+#ifdef CONFIG_PROC_DEVICETREE
+ /* try to remove the proc node as well */
+ if (np->pde)
+ proc_device_tree_remove_prop(np->pde, prop);
+#endif /* CONFIG_PROC_DEVICETREE */
+
+ return 0;
+}
+
+/*
+ * Update a property in a node. Note that we don't actually
+ * remove it, since we have given out who-knows-how-many pointers
+ * to the data using get-property. Instead we just move the property
+ * to the "dead properties" list, and add the new property to the
+ * property list
+ */
+int prom_update_property(struct device_node *np,
+ struct property *newprop,
+ struct property *oldprop)
+{
+ struct property **next;
+ int found = 0;
+
+ write_lock(&devtree_lock);
+ next = &np->properties;
+ while (*next) {
+ if (*next == oldprop) {
+ /* found the node */
+ newprop->next = oldprop->next;
+ *next = newprop;
+ oldprop->next = np->deadprops;
+ np->deadprops = oldprop;
+ found = 1;
+ break;
+ }
+ next = &(*next)->next;
+ }
+ write_unlock(&devtree_lock);
+
+ if (!found)
+ return -ENODEV;
+
+#ifdef CONFIG_PROC_DEVICETREE
+ /* try to add to proc as well if it was initialized */
+ if (np->pde)
+ proc_device_tree_update_prop(np->pde, newprop, oldprop);
+#endif /* CONFIG_PROC_DEVICETREE */
+
+ return 0;
+}
+
+
+/* Find the device node for a given logical cpu number, also returns the cpu
+ * local thread number (index in ibm,interrupt-server#s) if relevant and
+ * asked for (non NULL)
+ */
+struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
+{
+ int hardid;
+ struct device_node *np;
+
+ hardid = boot_cpuid; //get_hard_smp_processor_id(cpu);
+
+ for_each_node_by_type(np, "cpu") {
+ const u32 *intserv;
+ unsigned int plen, t;
+
+ /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
+ * fallback to "reg" property and assume no threads
+ */
+ intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
+ &plen);
+ if (intserv == NULL) {
+ const u32 *reg = of_get_property(np, "reg", NULL);
+ if (reg == NULL)
+ continue;
+ if (*reg == hardid) {
+ if (thread)
+ *thread = 0;
+ return np;
+ }
+ } else {
+ plen /= sizeof(u32);
+ for (t = 0; t < plen; t++) {
+ if (hardid == intserv[t]) {
+ if (thread)
+ *thread = t;
+ return np;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_cpu_node);
+
+#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
+static struct debugfs_blob_wrapper flat_dt_blob;
+
+static int __init export_flat_device_tree(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir("microblaze", NULL);
+ if (!d)
+ return 1;
+
+ flat_dt_blob.data = initial_boot_params;
+ flat_dt_blob.size = initial_boot_params->totalsize;
+
+ d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
+ d, &flat_dt_blob);
+ if (!d)
+ return 1;
+
+ return 0;
+}
+__initcall(export_flat_device_tree);
+#endif
--- /dev/null
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/pci_regs.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/etherdevice.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+
+#ifdef CONFIG_PPC64
+#define PRu64 "%lx"
+#else
+#define PRu64 "%llx"
+#endif
+
+/* Max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
+ (ns) > 0)
+
+static struct of_bus *of_match_bus(struct device_node *np);
+static int __of_address_to_resource(struct device_node *dev,
+ const u32 *addrp, u64 size, unsigned int flags,
+ struct resource *r);
+
+
+/* Debug utility */
+#ifdef DEBUG
+static void of_dump_addr(const char *s, const u32 *addr, int na)
+{
+ printk("%s", s);
+ while(na--)
+ printk(" %08x", *(addr++));
+ printk("\n");
+}
+#else
+static void of_dump_addr(const char *s, const u32 *addr, int na) { }
+#endif
+
+
+/* Callbacks for bus specific translators */
+struct of_bus {
+ const char *name;
+ const char *addresses;
+ int (*match)(struct device_node *parent);
+ void (*count_cells)(struct device_node *child,
+ int *addrc, int *sizec);
+ u64 (*map)(u32 *addr, const u32 *range,
+ int na, int ns, int pna);
+ int (*translate)(u32 *addr, u64 offset, int na);
+ unsigned int (*get_flags)(const u32 *addr);
+};
+
+
+/*
+ * Default translator (generic bus)
+ */
+
+static void of_bus_default_count_cells(struct device_node *dev,
+ int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = of_n_addr_cells(dev);
+ if (sizec)
+ *sizec = of_n_size_cells(dev);
+}
+
+static u64 of_bus_default_map(u32 *addr, const u32 *range,
+ int na, int ns, int pna)
+{
+ u64 cp, s, da;
+
+ cp = of_read_number(range, na);
+ s = of_read_number(range + na + pna, ns);
+ da = of_read_number(addr, na);
+
+ pr_debug("OF: default map, cp="PRu64", s="PRu64", da="PRu64"\n",
+ cp, s, da);
+
+ if (da < cp || da >= (cp + s))
+ return OF_BAD_ADDR;
+ return da - cp;
+}
+
+static int of_bus_default_translate(u32 *addr, u64 offset, int na)
+{
+ u64 a = of_read_number(addr, na);
+ memset(addr, 0, na * 4);
+ a += offset;
+ if (na > 1)
+ addr[na - 2] = a >> 32;
+ addr[na - 1] = a & 0xffffffffu;
+
+ return 0;
+}
+
+static unsigned int of_bus_default_get_flags(const u32 *addr)
+{
+ return IORESOURCE_MEM;
+}
+
+
+#ifdef CONFIG_PCI
+/*
+ * PCI bus specific translator
+ */
+
+static int of_bus_pci_match(struct device_node *np)
+{
+ /* "vci" is for the /chaos bridge on 1st-gen PCI powermacs */
+ return !strcmp(np->type, "pci") || !strcmp(np->type, "vci");
+}
+
+static void of_bus_pci_count_cells(struct device_node *np,
+ int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = 3;
+ if (sizec)
+ *sizec = 2;
+}
+
+static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna)
+{
+ u64 cp, s, da;
+
+ /* Check address type match */
+ if ((addr[0] ^ range[0]) & 0x03000000)
+ return OF_BAD_ADDR;
+
+ /* Read address values, skipping high cell */
+ cp = of_read_number(range + 1, na - 1);
+ s = of_read_number(range + na + pna, ns);
+ da = of_read_number(addr + 1, na - 1);
+
+ pr_debug("OF: PCI map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da);
+
+ if (da < cp || da >= (cp + s))
+ return OF_BAD_ADDR;
+ return da - cp;
+}
+
+static int of_bus_pci_translate(u32 *addr, u64 offset, int na)
+{
+ return of_bus_default_translate(addr + 1, offset, na - 1);
+}
+
+static unsigned int of_bus_pci_get_flags(const u32 *addr)
+{
+ unsigned int flags = 0;
+ u32 w = addr[0];
+
+ switch((w >> 24) & 0x03) {
+ case 0x01:
+ flags |= IORESOURCE_IO;
+ break;
+ case 0x02: /* 32 bits */
+ case 0x03: /* 64 bits */
+ flags |= IORESOURCE_MEM;
+ break;
+ }
+ if (w & 0x40000000)
+ flags |= IORESOURCE_PREFETCH;
+ return flags;
+}
+
+const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
+ unsigned int *flags)
+{
+ const u32 *prop;
+ unsigned int psize;
+ struct device_node *parent;
+ struct of_bus *bus;
+ int onesize, i, na, ns;
+
+ /* Get parent & match bus type */
+ parent = of_get_parent(dev);
+ if (parent == NULL)
+ return NULL;
+ bus = of_match_bus(parent);
+ if (strcmp(bus->name, "pci")) {
+ of_node_put(parent);
+ return NULL;
+ }
+ bus->count_cells(dev, &na, &ns);
+ of_node_put(parent);
+ if (!OF_CHECK_COUNTS(na, ns))
+ return NULL;
+
+ /* Get "reg" or "assigned-addresses" property */
+ prop = of_get_property(dev, bus->addresses, &psize);
+ if (prop == NULL)
+ return NULL;
+ psize /= 4;
+
+ onesize = na + ns;
+ for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
+ if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
+ if (size)
+ *size = of_read_number(prop + na, ns);
+ if (flags)
+ *flags = bus->get_flags(prop);
+ return prop;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_pci_address);
+
+int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ const u32 *addrp;
+ u64 size;
+ unsigned int flags;
+
+ addrp = of_get_pci_address(dev, bar, &size, &flags);
+ if (addrp == NULL)
+ return -EINVAL;
+ return __of_address_to_resource(dev, addrp, size, flags, r);
+}
+EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
+
+static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
+{
+ return (((pin - 1) + slot) % 4) + 1;
+}
+
+int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+{
+ struct device_node *dn, *ppnode;
+ struct pci_dev *ppdev;
+ u32 lspec;
+ u32 laddr[3];
+ u8 pin;
+ int rc;
+
+ /* Check if we have a device node, if yes, fallback to standard OF
+ * parsing
+ */
+ dn = pci_device_to_OF_node(pdev);
+ if (dn)
+ return of_irq_map_one(dn, 0, out_irq);
+
+ /* Ok, we don't, time to have fun. Let's start by building up an
+ * interrupt spec. we assume #interrupt-cells is 1, which is standard
+ * for PCI. If you do different, then don't use that routine.
+ */
+ rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
+ if (rc != 0)
+ return rc;
+ /* No pin, exit */
+ if (pin == 0)
+ return -ENODEV;
+
+ /* Now we walk up the PCI tree */
+ lspec = pin;
+ for (;;) {
+ /* Get the pci_dev of our parent */
+ ppdev = pdev->bus->self;
+
+ /* Ouch, it's a host bridge... */
+ if (ppdev == NULL) {
+#ifdef CONFIG_PPC64
+ ppnode = pci_bus_to_OF_node(pdev->bus);
+#else
+ struct pci_controller *host;
+ host = pci_bus_to_host(pdev->bus);
+ ppnode = host ? host->arch_data : NULL;
+#endif
+ /* No node for host bridge ? give up */
+ if (ppnode == NULL)
+ return -EINVAL;
+ } else
+ /* We found a P2P bridge, check if it has a node */
+ ppnode = pci_device_to_OF_node(ppdev);
+
+ /* Ok, we have found a parent with a device-node, hand over to
+ * the OF parsing code.
+ * We build a unit address from the linux device to be used for
+ * resolution. Note that we use the linux bus number which may
+ * not match your firmware bus numbering.
+ * Fortunately, in most cases, interrupt-map-mask doesn't include
+ * the bus number as part of the matching.
+ * You should still be careful about that though if you intend
+ * to rely on this function (you ship a firmware that doesn't
+ * create device nodes for all PCI devices).
+ */
+ if (ppnode)
+ break;
+
+ /* We can only get here if we hit a P2P bridge with no node,
+ * let's do standard swizzling and try again
+ */
+ lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
+ pdev = ppdev;
+ }
+
+ laddr[0] = (pdev->bus->number << 16)
+ | (pdev->devfn << 8);
+ laddr[1] = laddr[2] = 0;
+ return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
+}
+EXPORT_SYMBOL_GPL(of_irq_map_pci);
+#endif /* CONFIG_PCI */
+
+/*
+ * ISA bus specific translator
+ */
+
+static int of_bus_isa_match(struct device_node *np)
+{
+ return !strcmp(np->name, "isa");
+}
+
+static void of_bus_isa_count_cells(struct device_node *child,
+ int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = 2;
+ if (sizec)
+ *sizec = 1;
+}
+
+static u64 of_bus_isa_map(u32 *addr, const u32 *range, int na, int ns, int pna)
+{
+ u64 cp, s, da;
+
+ /* Check address type match */
+ if ((addr[0] ^ range[0]) & 0x00000001)
+ return OF_BAD_ADDR;
+
+ /* Read address values, skipping high cell */
+ cp = of_read_number(range + 1, na - 1);
+ s = of_read_number(range + na + pna, ns);
+ da = of_read_number(addr + 1, na - 1);
+
+ pr_debug("OF: ISA map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da);
+
+ if (da < cp || da >= (cp + s))
+ return OF_BAD_ADDR;
+ return da - cp;
+}
+
+static int of_bus_isa_translate(u32 *addr, u64 offset, int na)
+{
+ return of_bus_default_translate(addr + 1, offset, na - 1);
+}
+
+static unsigned int of_bus_isa_get_flags(const u32 *addr)
+{
+ unsigned int flags = 0;
+ u32 w = addr[0];
+
+ if (w & 1)
+ flags |= IORESOURCE_IO;
+ else
+ flags |= IORESOURCE_MEM;
+ return flags;
+}
+
+
+/*
+ * Array of bus specific translators
+ */
+
+static struct of_bus of_busses[] = {
+#ifdef CONFIG_PCI
+ /* PCI */
+ {
+ .name = "pci",
+ .addresses = "assigned-addresses",
+ .match = of_bus_pci_match,
+ .count_cells = of_bus_pci_count_cells,
+ .map = of_bus_pci_map,
+ .translate = of_bus_pci_translate,
+ .get_flags = of_bus_pci_get_flags,
+ },
+#endif /* CONFIG_PCI */
+ /* ISA */
+ {
+ .name = "isa",
+ .addresses = "reg",
+ .match = of_bus_isa_match,
+ .count_cells = of_bus_isa_count_cells,
+ .map = of_bus_isa_map,
+ .translate = of_bus_isa_translate,
+ .get_flags = of_bus_isa_get_flags,
+ },
+ /* Default */
+ {
+ .name = "default",
+ .addresses = "reg",
+ .match = NULL,
+ .count_cells = of_bus_default_count_cells,
+ .map = of_bus_default_map,
+ .translate = of_bus_default_translate,
+ .get_flags = of_bus_default_get_flags,
+ },
+};
+
+static struct of_bus *of_match_bus(struct device_node *np)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(of_busses); i ++)
+ if (!of_busses[i].match || of_busses[i].match(np))
+ return &of_busses[i];
+ BUG();
+ return NULL;
+}
+
+static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+ struct of_bus *pbus, u32 *addr,
+ int na, int ns, int pna)
+{
+ const u32 *ranges;
+ unsigned int rlen;
+ int rone;
+ u64 offset = OF_BAD_ADDR;
+
+ /* Normally, an absence of a "ranges" property means we are
+ * crossing a non-translatable boundary, and thus the addresses
+ * below the current not cannot be converted to CPU physical ones.
+ * Unfortunately, while this is very clear in the spec, it's not
+ * what Apple understood, and they do have things like /uni-n or
+ * /ht nodes with no "ranges" property and a lot of perfectly
+ * useable mapped devices below them. Thus we treat the absence of
+ * "ranges" as equivalent to an empty "ranges" property which means
+ * a 1:1 translation at that level. It's up to the caller not to try
+ * to translate addresses that aren't supposed to be translated in
+ * the first place. --BenH.
+ */
+ ranges = of_get_property(parent, "ranges", &rlen);
+ if (ranges == NULL || rlen == 0) {
+ offset = of_read_number(addr, na);
+ memset(addr, 0, pna * 4);
+ pr_debug("OF: no ranges, 1:1 translation\n");
+ goto finish;
+ }
+
+ pr_debug("OF: walking ranges...\n");
+
+ /* Now walk through the ranges */
+ rlen /= 4;
+ rone = na + pna + ns;
+ for (; rlen >= rone; rlen -= rone, ranges += rone) {
+ offset = bus->map(addr, ranges, na, ns, pna);
+ if (offset != OF_BAD_ADDR)
+ break;
+ }
+ if (offset == OF_BAD_ADDR) {
+ pr_debug("OF: not found !\n");
+ return 1;
+ }
+ memcpy(addr, ranges + na, 4 * pna);
+
+ finish:
+ of_dump_addr("OF: parent translation for:", addr, pna);
+ pr_debug("OF: with offset: "PRu64"\n", offset);
+
+ /* Translate it into parent bus space */
+ return pbus->translate(addr, offset, pna);
+}
+
+
+/*
+ * Translate an address from the device-tree into a CPU physical address,
+ * this walks up the tree and applies the various bus mappings on the
+ * way.
+ *
+ * Note: We consider that crossing any level with #size-cells == 0 to mean
+ * that translation is impossible (that is we are not dealing with a value
+ * that can be mapped to a cpu physical address). This is not really specified
+ * that way, but this is traditionally the way IBM at least do things
+ */
+u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
+{
+ struct device_node *parent = NULL;
+ struct of_bus *bus, *pbus;
+ u32 addr[OF_MAX_ADDR_CELLS];
+ int na, ns, pna, pns;
+ u64 result = OF_BAD_ADDR;
+
+ pr_debug("OF: ** translation for device %s **\n", dev->full_name);
+
+ /* Increase refcount at current level */
+ of_node_get(dev);
+
+ /* Get parent & match bus type */
+ parent = of_get_parent(dev);
+ if (parent == NULL)
+ goto bail;
+ bus = of_match_bus(parent);
+
+ /* Cound address cells & copy address locally */
+ bus->count_cells(dev, &na, &ns);
+ if (!OF_CHECK_COUNTS(na, ns)) {
+ printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
+ dev->full_name);
+ goto bail;
+ }
+ memcpy(addr, in_addr, na * 4);
+
+ pr_debug("OF: bus is %s (na=%d, ns=%d) on %s\n",
+ bus->name, na, ns, parent->full_name);
+ of_dump_addr("OF: translating address:", addr, na);
+
+ /* Translate */
+ for (;;) {
+ /* Switch to parent bus */
+ of_node_put(dev);
+ dev = parent;
+ parent = of_get_parent(dev);
+
+ /* If root, we have finished */
+ if (parent == NULL) {
+ pr_debug("OF: reached root node\n");
+ result = of_read_number(addr, na);
+ break;
+ }
+
+ /* Get new parent bus and counts */
+ pbus = of_match_bus(parent);
+ pbus->count_cells(dev, &pna, &pns);
+ if (!OF_CHECK_COUNTS(pna, pns)) {
+ printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
+ dev->full_name);
+ break;
+ }
+
+ pr_debug("OF: parent bus is %s (na=%d, ns=%d) on %s\n",
+ pbus->name, pna, pns, parent->full_name);
+
+ /* Apply bus translation */
+ if (of_translate_one(dev, bus, pbus, addr, na, ns, pna))
+ break;
+
+ /* Complete the move up one level */
+ na = pna;
+ ns = pns;
+ bus = pbus;
+
+ of_dump_addr("OF: one level translation:", addr, na);
+ }
+ bail:
+ of_node_put(parent);
+ of_node_put(dev);
+
+ return result;
+}
+EXPORT_SYMBOL(of_translate_address);
+
+const u32 *of_get_address(struct device_node *dev, int index, u64 *size,
+ unsigned int *flags)
+{
+ const u32 *prop;
+ unsigned int psize;
+ struct device_node *parent;
+ struct of_bus *bus;
+ int onesize, i, na, ns;
+
+ /* Get parent & match bus type */
+ parent = of_get_parent(dev);
+ if (parent == NULL)
+ return NULL;
+ bus = of_match_bus(parent);
+ bus->count_cells(dev, &na, &ns);
+ of_node_put(parent);
+ if (!OF_CHECK_COUNTS(na, ns))
+ return NULL;
+
+ /* Get "reg" or "assigned-addresses" property */
+ prop = of_get_property(dev, bus->addresses, &psize);
+ if (prop == NULL)
+ return NULL;
+ psize /= 4;
+
+ onesize = na + ns;
+ for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
+ if (i == index) {
+ if (size)
+ *size = of_read_number(prop + na, ns);
+ if (flags)
+ *flags = bus->get_flags(prop);
+ return prop;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_address);
+
+static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
+ u64 size, unsigned int flags,
+ struct resource *r)
+{
+ u64 taddr;
+
+ if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
+ return -EINVAL;
+ taddr = of_translate_address(dev, addrp);
+ if (taddr == OF_BAD_ADDR)
+ return -EINVAL;
+ memset(r, 0, sizeof(struct resource));
+ if (flags & IORESOURCE_IO) {
+ unsigned long port;
+ port = -1;//pci_address_to_pio(taddr);
+ if (port == (unsigned long)-1)
+ return -EINVAL;
+ r->start = port;
+ r->end = port + size - 1;
+ } else {
+ r->start = taddr;
+ r->end = taddr + size - 1;
+ }
+ r->flags = flags;
+ r->name = dev->name;
+ return 0;
+}
+
+int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r)
+{
+ const u32 *addrp;
+ u64 size;
+ unsigned int flags;
+
+ addrp = of_get_address(dev, index, &size, &flags);
+ if (addrp == NULL)
+ return -EINVAL;
+ return __of_address_to_resource(dev, addrp, size, flags, r);
+}
+EXPORT_SYMBOL_GPL(of_address_to_resource);
+
+void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
+ unsigned long *busno, unsigned long *phys, unsigned long *size)
+{
+ const u32 *dma_window;
+ u32 cells;
+ const unsigned char *prop;
+
+ dma_window = dma_window_prop;
+
+ /* busno is always one cell */
+ *busno = *(dma_window++);
+
+ prop = of_get_property(dn, "ibm,#dma-address-cells", NULL);
+ if (!prop)
+ prop = of_get_property(dn, "#address-cells", NULL);
+
+ cells = prop ? *(u32 *)prop : of_n_addr_cells(dn);
+ *phys = of_read_number(dma_window, cells);
+
+ dma_window += cells;
+
+ prop = of_get_property(dn, "ibm,#dma-size-cells", NULL);
+ cells = prop ? *(u32 *)prop : of_n_size_cells(dn);
+ *size = of_read_number(dma_window, cells);
+}
+
+/*
+ * Interrupt remapper
+ */
+
+static unsigned int of_irq_workarounds;
+static struct device_node *of_irq_dflt_pic;
+
+static struct device_node *of_irq_find_parent(struct device_node *child)
+{
+ struct device_node *p;
+ const phandle *parp;
+
+ if (!of_node_get(child))
+ return NULL;
+
+ do {
+ parp = of_get_property(child, "interrupt-parent", NULL);
+ if (parp == NULL)
+ p = of_get_parent(child);
+ else {
+ if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
+ p = of_node_get(of_irq_dflt_pic);
+ else
+ p = of_find_node_by_phandle(*parp);
+ }
+ of_node_put(child);
+ child = p;
+ } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL);
+
+ return p;
+}
+
+/* This doesn't need to be called if you don't have any special workaround
+ * flags to pass
+ */
+void of_irq_map_init(unsigned int flags)
+{
+ of_irq_workarounds = flags;
+
+ /* OldWorld, don't bother looking at other things */
+ if (flags & OF_IMAP_OLDWORLD_MAC)
+ return;
+
+ /* If we don't have phandles, let's try to locate a default interrupt
+ * controller (happens when booting with BootX). We do a first match
+ * here, hopefully, that only ever happens on machines with one
+ * controller.
+ */
+ if (flags & OF_IMAP_NO_PHANDLE) {
+ struct device_node *np;
+
+ for(np = NULL; (np = of_find_all_nodes(np)) != NULL;) {
+ if (of_get_property(np, "interrupt-controller", NULL)
+ == NULL)
+ continue;
+ /* Skip /chosen/interrupt-controller */
+ if (strcmp(np->name, "chosen") == 0)
+ continue;
+ /* It seems like at least one person on this planet wants
+ * to use BootX on a machine with an AppleKiwi controller
+ * which happens to pretend to be an interrupt
+ * controller too.
+ */
+ if (strcmp(np->name, "AppleKiwi") == 0)
+ continue;
+ /* I think we found one ! */
+ of_irq_dflt_pic = np;
+ break;
+ }
+ }
+
+}
+
+int of_irq_map_raw(struct device_node *parent, const u32 *intspec, u32 ointsize,
+ const u32 *addr, struct of_irq *out_irq)
+{
+ struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
+ const u32 *tmp, *imap, *imask;
+ u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
+ int imaplen, match, i;
+
+ pr_debug("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n",
+ parent->full_name, intspec[0], intspec[1], ointsize);
+
+ ipar = of_node_get(parent);
+
+ /* First get the #interrupt-cells property of the current cursor
+ * that tells us how to interpret the passed-in intspec. If there
+ * is none, we are nice and just walk up the tree
+ */
+ do {
+ tmp = of_get_property(ipar, "#interrupt-cells", NULL);
+ if (tmp != NULL) {
+ intsize = *tmp;
+ break;
+ }
+ tnode = ipar;
+ ipar = of_irq_find_parent(ipar);
+ of_node_put(tnode);
+ } while (ipar);
+ if (ipar == NULL) {
+ pr_debug(" -> no parent found !\n");
+ goto fail;
+ }
+
+ pr_debug("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
+
+ if (ointsize != intsize)
+ return -EINVAL;
+
+ /* Look for this #address-cells. We have to implement the old linux
+ * trick of looking for the parent here as some device-trees rely on it
+ */
+ old = of_node_get(ipar);
+ do {
+ tmp = of_get_property(old, "#address-cells", NULL);
+ tnode = of_get_parent(old);
+ of_node_put(old);
+ old = tnode;
+ } while(old && tmp == NULL);
+ of_node_put(old);
+ old = NULL;
+ addrsize = (tmp == NULL) ? 2 : *tmp;
+
+ pr_debug(" -> addrsize=%d\n", addrsize);
+
+ /* Now start the actual "proper" walk of the interrupt tree */
+ while (ipar != NULL) {
+ /* Now check if cursor is an interrupt-controller and if it is
+ * then we are done
+ */
+ if (of_get_property(ipar, "interrupt-controller", NULL) !=
+ NULL) {
+ pr_debug(" -> got it !\n");
+ memcpy(out_irq->specifier, intspec,
+ intsize * sizeof(u32));
+ out_irq->size = intsize;
+ out_irq->controller = ipar;
+ of_node_put(old);
+ return 0;
+ }
+
+ /* Now look for an interrupt-map */
+ imap = of_get_property(ipar, "interrupt-map", &imaplen);
+ /* No interrupt map, check for an interrupt parent */
+ if (imap == NULL) {
+ pr_debug(" -> no map, getting parent\n");
+ newpar = of_irq_find_parent(ipar);
+ goto skiplevel;
+ }
+ imaplen /= sizeof(u32);
+
+ /* Look for a mask */
+ imask = of_get_property(ipar, "interrupt-map-mask", NULL);
+
+ /* If we were passed no "reg" property and we attempt to parse
+ * an interrupt-map, then #address-cells must be 0.
+ * Fail if it's not.
+ */
+ if (addr == NULL && addrsize != 0) {
+ pr_debug(" -> no reg passed in when needed !\n");
+ goto fail;
+ }
+
+ /* Parse interrupt-map */
+ match = 0;
+ while (imaplen > (addrsize + intsize + 1) && !match) {
+ /* Compare specifiers */
+ match = 1;
+ for (i = 0; i < addrsize && match; ++i) {
+ u32 mask = imask ? imask[i] : 0xffffffffu;
+ match = ((addr[i] ^ imap[i]) & mask) == 0;
+ }
+ for (; i < (addrsize + intsize) && match; ++i) {
+ u32 mask = imask ? imask[i] : 0xffffffffu;
+ match =
+ ((intspec[i-addrsize] ^ imap[i]) & mask) == 0;
+ }
+ imap += addrsize + intsize;
+ imaplen -= addrsize + intsize;
+
+ pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
+
+ /* Get the interrupt parent */
+ if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
+ newpar = of_node_get(of_irq_dflt_pic);
+ else
+ newpar = of_find_node_by_phandle((phandle)*imap);
+ imap++;
+ --imaplen;
+
+ /* Check if not found */
+ if (newpar == NULL) {
+ pr_debug(" -> imap parent not found !\n");
+ goto fail;
+ }
+
+ /* Get #interrupt-cells and #address-cells of new
+ * parent
+ */
+ tmp = of_get_property(newpar, "#interrupt-cells", NULL);
+ if (tmp == NULL) {
+ pr_debug(" -> parent lacks #interrupt-cells !\n");
+ goto fail;
+ }
+ newintsize = *tmp;
+ tmp = of_get_property(newpar, "#address-cells", NULL);
+ newaddrsize = (tmp == NULL) ? 0 : *tmp;
+
+ pr_debug(" -> newintsize=%d, newaddrsize=%d\n",
+ newintsize, newaddrsize);
+
+ /* Check for malformed properties */
+ if (imaplen < (newaddrsize + newintsize))
+ goto fail;
+
+ imap += newaddrsize + newintsize;
+ imaplen -= newaddrsize + newintsize;
+
+ pr_debug(" -> imaplen=%d\n", imaplen);
+ }
+ if (!match)
+ goto fail;
+
+ of_node_put(old);
+ old = of_node_get(newpar);
+ addrsize = newaddrsize;
+ intsize = newintsize;
+ intspec = imap - intsize;
+ addr = intspec - addrsize;
+
+ skiplevel:
+ /* Iterate again with new parent */
+ pr_debug(" -> new parent: %s\n", newpar ? newpar->full_name : "<>");
+ of_node_put(ipar);
+ ipar = newpar;
+ newpar = NULL;
+ }
+ fail:
+ of_node_put(ipar);
+ of_node_put(old);
+ of_node_put(newpar);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(of_irq_map_raw);
+
+int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq)
+{
+ struct device_node *p;
+ const u32 *intspec, *tmp, *addr;
+ u32 intsize, intlen;
+ int res;
+
+ pr_debug("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index);
+
+ /* Get the interrupts property */
+ intspec = of_get_property(device, "interrupts", &intlen);
+ if (intspec == NULL)
+ return -EINVAL;
+ intlen /= sizeof(u32);
+
+ pr_debug(" intspec=%d intlen=%d\n", *intspec, intlen);
+
+ /* Get the reg property (if any) */
+ addr = of_get_property(device, "reg", NULL);
+
+ /* Look for the interrupt parent. */
+ p = of_irq_find_parent(device);
+ if (p == NULL)
+ return -EINVAL;
+
+ /* Get size of interrupt specifier */
+ tmp = of_get_property(p, "#interrupt-cells", NULL);
+ if (tmp == NULL) {
+ of_node_put(p);
+ return -EINVAL;
+ }
+ intsize = *tmp;
+
+ pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);
+
+ /* Check index */
+ if ((index + 1) * intsize > intlen)
+ return -EINVAL;
+
+ /* Get new specifier and map it */
+ res = of_irq_map_raw(p, intspec + index * intsize, intsize,
+ addr, out_irq);
+ of_node_put(p);
+ return res;
+}
+EXPORT_SYMBOL_GPL(of_irq_map_one);
+
+/**
+ * Search the device tree for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address. If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the device tree, but were not set by U-Boot. For example, the
+ * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
+ * addresses. Some older U-Boots only initialized 'local-mac-address'. In
+ * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
+ * but is all zeros.
+*/
+const void *of_get_mac_address(struct device_node *np)
+{
+ struct property *pp;
+
+ pp = of_find_property(np, "mac-address", NULL);
+ if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
+ return pp->value;
+
+ pp = of_find_property(np, "local-mac-address", NULL);
+ if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
+ return pp->value;
+
+ pp = of_find_property(np, "address", NULL);
+ if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
+ return pp->value;
+
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_mac_address);
+
+int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
+{
+ struct of_irq out_irq;
+ int irq;
+ int res;
+
+
+ res = of_irq_map_one(dev, index, &out_irq);
+
+ /* Get irq for the device */
+ if(res) {
+ pr_debug("IRQ not found... code = %d", res);
+ return NO_IRQ;
+ }
+
+ irq = out_irq.specifier[0]; // Assuming single interrupt controller...
+
+ pr_debug("IRQ found = %d", irq);
+
+ /* Only dereference the resource if both the
+ * resource and the irq are valid. */
+ if (r && irq != NO_IRQ) {
+ r->start = r->end = irq;
+ r->flags = IORESOURCE_IRQ;
+ }
+
+ return irq;
+}
+EXPORT_SYMBOL_GPL(of_irq_to_resource);
+
+void __iomem *of_iomap(struct device_node *np, int index)
+{
+ struct resource res;
+
+ if (of_address_to_resource(np, index, &res))
+ return NULL;
+
+ return ioremap(res.start, 1 + res.end - res.start);
+}
+EXPORT_SYMBOL(of_iomap);
--- /dev/null
+/*
+ * arch/microblaze/kernel/ptrace.c -- `ptrace' system call
+ *
+ * Copyright (C) 2007 PetaLogix
+ * Copyright (C) 2004-07 John Williams <john.williams@petalogix.com>
+ *
+ * derived from arch/v850/kernel/ptrace.c
+ *
+ * Copyright (C) 2002,03 NEC Electronics Corporation
+ * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
+ *
+ * Derived from arch/mips/kernel/ptrace.c:
+ *
+ * Copyright (C) 1992 Ross Biro
+ * Copyright (C) Linus Torvalds
+ * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999 MIPS Technologies, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/ptrace.h>
+#include <linux/signal.h>
+
+#include <asm/errno.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/asm-offsets.h>
+
+#if 0
+#define DBPRINTK(...) printk(__VA_ARGS__)
+#else
+#define DBPRINTK(...)
+#endif
+
+/* Returns the address where the register at REG_OFFS in P is stashed away. */
+static microblaze_reg_t *reg_save_addr (unsigned reg_offs, struct task_struct *t)
+{
+ struct pt_regs *regs;
+
+ /* Three basic cases:
+
+ (1) A register normally saved before calling the scheduler, is
+ available in the kernel entry pt_regs structure at the top
+ of the kernel stack. The kernel trap/irq exit path takes
+ care to save/restore almost all registers for ptrace'd
+ processes.
+
+ (2) A call-clobbered register, where the process P entered the
+ kernel via [syscall] trap, is not stored anywhere; that's
+ OK, because such registers are not expected to be preserved
+ when the trap returns anyway (so we don't actually bother to
+ test for this case).
+
+ (3) A few registers not used at all by the kernel, and so
+ normally never saved except by context-switches, are in the
+ context switch state. */
+
+ /* Register saved during kernel entry (or not available). */
+ regs = task_pt_regs (t);
+
+ return (microblaze_reg_t *)((char *)regs + reg_offs);
+}
+
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+{
+ int rval;
+
+ switch (request) {
+ unsigned long val, copied;
+
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA:
+ DBPRINTK("PEEKTEXT/PEEKDATA at %08lX\n",addr);
+ copied = access_process_vm(child, addr, &val, sizeof(val), 0);
+ rval = -EIO;
+ if (copied != sizeof(val))
+ break;
+ rval = put_user(val, (unsigned long *)data);
+ goto out;
+
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ DBPRINTK("POKETEXT/POKEDATA to %08lX\n",addr);
+ rval = 0;
+ if (access_process_vm(child, addr, &data, sizeof(data), 1)
+ == sizeof(data))
+ break;
+ rval = -EIO;
+ goto out;
+
+ /* Read/write the word at location ADDR in the registers. */
+ case PTRACE_PEEKUSR:
+ case PTRACE_POKEUSR:
+ DBPRINTK("PEEKUSR/POKEUSR : 0x%08lx\n",addr);
+ rval = 0;
+ if (addr >= PT_SIZE && request == PTRACE_PEEKUSR) {
+ /* Special requests that don't actually correspond
+ to offsets in struct pt_regs. */
+ if (addr == PT_TEXT_ADDR)
+ {
+ val = child->mm->start_code;
+ }
+ else if (addr == PT_DATA_ADDR)
+ {
+ val = child->mm->start_data;
+ }
+ else if (addr == PT_TEXT_LEN)
+ {
+ val = child->mm->end_code
+ - child->mm->start_code;
+ }
+ else
+ {
+ rval = -EIO;
+ }
+ } else if (addr >= 0 && addr < PT_SIZE && (addr & 0x3) == 0) {
+ microblaze_reg_t *reg_addr = reg_save_addr(addr, child);
+ if (request == PTRACE_PEEKUSR)
+ {
+ val = *reg_addr;
+ }
+ else
+ {
+ *reg_addr = data;
+ }
+ } else
+ rval = -EIO;
+
+ if (rval == 0 && request == PTRACE_PEEKUSR)
+ {
+ rval = put_user (val, (unsigned long *)data);
+ }
+ goto out;
+
+ /* Continue and stop at next (return from) syscall */
+ case PTRACE_SYSCALL:
+ DBPRINTK("PTRACE_SYSCALL\n");
+ case PTRACE_SINGLESTEP:
+ DBPRINTK("PTRACE_SINGLESTEP\n");
+ /* Restart after a signal. */
+ case PTRACE_CONT:
+ DBPRINTK("PTRACE_CONT\n");
+ rval = -EIO;
+ if (!valid_signal(data))
+ break;
+
+ if (request == PTRACE_SYSCALL)
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+
+ child->exit_code = data;
+ DBPRINTK("wakeup_process\n");
+ wake_up_process(child);
+ rval = 0;
+ break;
+
+ /*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL:
+ DBPRINTK("PTRACE_KILL\n");
+ rval = 0;
+ if (child->exit_state == EXIT_ZOMBIE) /* already dead */
+ break;
+ child->exit_code = SIGKILL;
+ wake_up_process(child);
+ break;
+
+ case PTRACE_DETACH: /* detach a process that was attached. */
+ DBPRINTK("PTRACE_DETACH\n");
+ rval = ptrace_detach(child, data);
+ break;
+
+ default:
+ rval = -EIO;
+ goto out;
+ }
+ out:
+ return rval;
+}
+
+asmlinkage void syscall_trace(void)
+{
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+ /* The 0x80 provides a way for the tracing parent to distinguish
+ between a syscall stop and SIGTRAP delivery */
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
+
+void ptrace_disable (struct task_struct *child)
+{
+ /* nothing to do */
+}
--- /dev/null
+/*
+ * arch/xtensa/kernel/semaphore.c
+ *
+ * Generic semaphore code. Buyer beware. Do your own specific changes
+ * in <asm/semaphore-helper.h>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Kevin Chea
+ */
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/init.h>
+#include <asm/semaphore.h>
+#include <asm/errno.h>
+
+/*
+ * These two _must_ execute atomically wrt each other.
+ */
+
+static __inline__ void wake_one_more(struct semaphore * sem)
+{
+ atomic_inc((atomic_t *)&sem->sleepers);
+}
+
+static __inline__ int waking_non_zero(struct semaphore *sem)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->sleepers > 0) {
+ sem->sleepers--;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+/*
+ * waking_non_zero_interruptible:
+ * 1 got the lock
+ * 0 go to sleep
+ * -EINTR interrupted
+ *
+ * We must undo the sem->count down_interruptible() increment while we are
+ * protected by the spinlock in order to make atomic this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+
+static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->sleepers > 0) {
+ sem->sleepers--;
+ ret = 1;
+ } else if (signal_pending(tsk)) {
+ atomic_inc(&sem->count);
+ ret = -EINTR;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+/*
+ * waking_non_zero_trylock:
+ * 1 failed to lock
+ * 0 got the lock
+ *
+ * We must undo the sem->count down_trylock() increment while we are
+ * protected by the spinlock in order to make atomic this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+
+static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
+{
+ unsigned long flags;
+ int ret = 1;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->sleepers <= 0)
+ atomic_inc(&sem->count);
+ else {
+ sem->sleepers--;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+spinlock_t semaphore_wake_lock;
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to sleep, while the "waking" variable is
+ * incremented when the "up()" code goes to wake up waiting
+ * processes.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * waking_non_zero() (from asm/semaphore.h) must execute
+ * atomically.
+ *
+ * When __up() is called, the count was negative before
+ * incrementing it, and we need to wake up somebody.
+ *
+ * This routine adds one to the count of processes that need to
+ * wake up and exit. ALL waiting processes actually wake up but
+ * only the one that gets to the "waking" field first will gate
+ * through and acquire the semaphore. The others will go back
+ * to sleep.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+
+void __up(struct semaphore *sem)
+{
+ wake_one_more(sem);
+ wake_up(&sem->wait);
+}
+
+/*
+ * Perform the "down" function. Return zero for semaphore acquired,
+ * return negative for signalled out of the function.
+ *
+ * If called from __down, the return is ignored and the wait loop is
+ * not interruptible. This means that a task waiting on a semaphore
+ * using "down()" cannot be killed until someone does an "up()" on
+ * the semaphore.
+ *
+ * If called from __down_interruptible, the return value gets checked
+ * upon return. If the return value is negative then the task continues
+ * with the negative value in the return register (it can be tested by
+ * the caller).
+ *
+ * Either form may be used in conjunction with "up()".
+ *
+ */
+
+#define DOWN_VAR \
+ struct task_struct *tsk = current; \
+ wait_queue_t wait; \
+ init_waitqueue_entry(&wait, tsk);
+
+#define DOWN_HEAD(task_state) \
+ \
+ \
+ tsk->state = (task_state); \
+ add_wait_queue(&sem->wait, &wait); \
+ \
+ /* \
+ * Ok, we're set up. sem->count is known to be less than zero \
+ * so we must wait. \
+ * \
+ * We can let go the lock for purposes of waiting. \
+ * We re-acquire it after awaking so as to protect \
+ * all semaphore operations. \
+ * \
+ * If "up()" is called before we call waking_non_zero() then \
+ * we will catch it right away. If it is called later then \
+ * we will have to go through a wakeup cycle to catch it. \
+ * \
+ * Multiple waiters contend for the semaphore lock to see \
+ * who gets to gate through and who has to wait some more. \
+ */ \
+ for (;;) {
+
+#define DOWN_TAIL(task_state) \
+ tsk->state = (task_state); \
+ } \
+ tsk->state = TASK_RUNNING; \
+ remove_wait_queue(&sem->wait, &wait);
+
+void __sched __down(struct semaphore * sem)
+{
+ DOWN_VAR
+ DOWN_HEAD(TASK_UNINTERRUPTIBLE)
+ if (waking_non_zero(sem))
+ break;
+ schedule();
+ DOWN_TAIL(TASK_UNINTERRUPTIBLE)
+}
+
+int __sched __down_interruptible(struct semaphore * sem)
+{
+ int ret = 0;
+ DOWN_VAR
+ DOWN_HEAD(TASK_INTERRUPTIBLE)
+
+ ret = waking_non_zero_interruptible(sem, tsk);
+ if (ret)
+ {
+ if (ret == 1)
+ /* ret != 0 only if we get interrupted -arca */
+ ret = 0;
+ break;
+ }
+ schedule();
+ DOWN_TAIL(TASK_INTERRUPTIBLE)
+ return ret;
+}
+
+int __down_trylock(struct semaphore * sem)
+{
+ return waking_non_zero_trylock(sem);
+}
--- /dev/null
+/*
+ * arch/microblaze/kernel/setup.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/cpu.h>
+#include <linux/initrd.h>
+#include <linux/console.h>
+
+#include <asm/setup.h>
+#include <asm/sections.h>
+#include <asm/page.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/bug.h>
+#include <asm/param.h>
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+#include <asm/entry.h>
+#include <asm/cpuinfo.h>
+
+#if defined CONFIG_MTD_ATTACHED_ROMFS
+#include <linux/romfs_fs.h>
+#endif
+
+DEFINE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
+DEFINE_PER_CPU(unsigned int, KM); /* Kernel/user mode */
+DEFINE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */
+DEFINE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
+DEFINE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
+
+u32 boot_cpuid;
+EXPORT_SYMBOL_GPL(boot_cpuid);
+u32 memory_limit;
+EXPORT_SYMBOL_GPL(memory_limit);
+
+char __attribute ((weak)) _binary_arch_microblaze_kernel_system_dtb_start[];
+char __attribute ((weak)) _binary_arch_microblaze_kernel_system_dtb_end[];
+
+extern void early_printk(const char *fmt, ...);
+extern void irq_early_init(void);
+extern int __init setup_early_printk(char *opt);
+extern void __init paging_init(void);
+
+static char default_command_line[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+char command_line[COMMAND_LINE_SIZE];
+
+
+void __init setup_arch(char **cmdline_p)
+{
+ setup_cpuinfo();
+ console_verbose();
+
+#ifdef CONFIG_DEVICE_TREE
+ early_init_devtree(_binary_arch_microblaze_kernel_system_dtb_start);
+ unflatten_device_tree();
+#else
+ strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+#endif
+
+ *cmdline_p = command_line;
+ parse_early_param();
+
+ /* Invalidate and enable all the caches, if necessary. */
+ invalidate_icache();
+ enable_icache();
+ invalidate_dcache();
+ enable_dcache();
+
+ panic_timeout = 120;
+
+ setup_memory();
+ paging_init();
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_XILINX_CONSOLE)
+ conswitchp = &xil_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+
+#endif
+}
+
+#ifdef CONFIG_MTD_UCLINUX_EBSS
+/* Return starting point of romfs image */
+inline unsigned *get_romfs_base(void)
+{
+ /* For now, assume "Standard" model of bss_start */
+ return (unsigned *)&__bss_start;
+}
+
+/* Handle both romfs and cramfs types, without generating unnecessary
+ code (ie no point checking for CRAMFS if it's not even enabled) */
+inline unsigned get_romfs_len(unsigned *addr)
+{
+#ifdef CONFIG_ROMFS_FS
+ if (memcmp(&addr[0], "-rom1fs-", 8) == 0) /* romfs */
+ return be32_to_cpu(addr[2]);
+#endif
+
+#ifdef CONFIG_CRAMFS
+ if (addr[0] == le32_to_cpu(0x28cd3d45)) /* cramfs */
+ return le32_to_cpu(addr[1]);
+#endif
+ return 0;
+}
+#endif /* CONFIG_MTD_UCLINUX_EBSS */
+
+static void initialize_interrupt_and_exception_table() {
+ unsigned long *src, *dst = (unsigned long *)0x0;
+
+ /* Initialize the interrupt vector table, which is in low memory. */
+ for (src = __ivt_start; src < __ivt_end; src++, dst++)
+ *dst = *src;
+}
+
+/* This code is called before the kernel proper is started */
+void machine_early_init(const char *cmdline)
+{
+#ifdef CONFIG_MTD_UCLINUX_EBSS
+ {
+ int size;
+ extern char *klimit;
+ extern char *_ebss;
+
+ /* if CONFIG_MTD_UCLINUX_EBSS is defined, assume ROMFS is at the
+ * end of kernel, which is ROMFS_LOCATION defined above. */
+ size = PAGE_ALIGN(get_romfs_len(get_romfs_base()));
+ early_printk("Found romfs @ 0x%08x (0x%08x)\n",
+ get_romfs_base(), size);
+ BUG_ON(size < 0); /* What else can we do? */
+
+ /* Use memmove to handle likely case of memory overlap */
+ memmove(&_ebss, get_romfs_base(), size);
+
+ /* update klimit */
+ klimit += PAGE_ALIGN(size);
+ }
+#endif
+
+
+ memset(__bss_start, 0, __bss_stop-__bss_start);
+ memset(_ssbss, 0, _esbss-_ssbss);
+
+ /* Copy command line passed from bootloader, or use default
+ if none provided, or forced */
+#ifndef CONFIG_CMDLINE_FORCE
+ if (cmdline && cmdline[0]!='\0')
+ strlcpy(command_line, cmdline, COMMAND_LINE_SIZE);
+ else
+#endif
+ strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
+
+ initialize_interrupt_and_exception_table();
+
+ /* Initialize global data */
+ per_cpu(KM,0)= 0x1; /* We start in kernel mode */
+ per_cpu(CURRENT_SAVE,0) = (unsigned long)current;
+
+ irq_early_init();
+}
+
+void machine_restart(char * cmd)
+{
+ printk("Machine restart...\n");
+ dump_stack();
+ while(1)
+ ;
+}
+
+void machine_shutdown(char * cmd)
+{
+ printk("Machine shutdown...\n");
+ while(1)
+ ;
+}
+
+void machine_halt(void)
+{
+ printk("Machine halt...\n");
+ while(1)
+ ;
+}
+
+void machine_power_off(void)
+{
+ printk("Machine power off...\n");
+ while(1)
+ ;
+}
+
--- /dev/null
+/*
+ * arch/microblaze/kernel/signal.c -- Signal handling
+ *
+ * Copyright (C) 2003,2004 John Williams <jwilliams@itee.uq.edu.au>
+ * Copyright (C) 2001 NEC Corporation
+ * Copyright (C) 2001 Miles Bader <miles@gnu.org>
+ * Copyright (C) 1999,2000 Niibe Yutaka & Kaz Kojima
+ * Copyright (C) 1991,1992 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ *
+ * This file was was derived from the sh version, arch/sh/kernel/signal.c
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/personality.h>
+#include <linux/percpu.h>
+#include <asm/entry.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset, int in_sycall);
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(old_sigset_t mask, struct pt_regs *regs)
+{
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(¤t->sighand->siglock);
+ saveset = current->blocked;
+ siginitset(¤t->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ regs->r3 = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(regs, &saveset, 1))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
+ struct pt_regs *regs)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sighand->siglock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ regs->r3 = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(regs, &saveset, 1))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction *act,
+ struct old_sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t *uss, stack_t *uoss,
+ struct pt_regs *regs)
+{
+ return do_sigaltstack(uss, uoss, regs->r1);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+struct sigframe
+{
+ struct sigcontext sc;
+ unsigned long extramask[_NSIG_WORDS-1];
+ unsigned long tramp[2]; /* signal trampoline */
+};
+
+struct rt_sigframe
+{
+ struct siginfo info;
+ struct ucontext uc;
+ unsigned long tramp[2]; /* signal trampoline */
+};
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *rval_p)
+{
+ unsigned int err = 0;
+
+#define COPY(x) err |= __get_user(regs->x, &sc->regs.x)
+ COPY(r0); COPY(r1);
+ COPY(r2); COPY(r3); COPY(r4); COPY(r5);
+ COPY(r6); COPY(r7); COPY(r8); COPY(r9);
+ COPY(r10); COPY(r11); COPY(r12); COPY(r13);
+ COPY(r14); COPY(r15); COPY(r16); COPY(r17);
+ COPY(r18); COPY(r19); COPY(r20); COPY(r21);
+ COPY(r22); COPY(r23); COPY(r24); COPY(r25);
+ COPY(r26); COPY(r27); COPY(r28); COPY(r29);
+ COPY(r30); COPY(r31);
+ COPY(pc); COPY(ear); COPY(esr); COPY(fsr);
+#undef COPY
+
+ *rval_p = regs->r3;
+
+ return err;
+}
+
+asmlinkage int sys_sigreturn(struct pt_regs *regs)
+{
+ struct sigframe *frame = (struct sigframe *)regs->r1;
+ sigset_t set;
+ int rval;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_NSIG_WORDS > 1
+ && __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+
+ spin_lock_irq(¤t->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->sc, &rval))
+ goto badframe;
+ return rval;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe *frame = (struct rt_sigframe *)regs->r1;
+ sigset_t set;
+ stack_t st;
+ int rval;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval))
+ goto badframe;
+
+ if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ goto badframe;
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ do_sigaltstack(&st, NULL, regs->r1);
+
+ return rval;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ int err = 0;
+
+#define COPY(x) err |= __put_user(regs->x, &sc->regs.x)
+ COPY(r0); COPY(r1);
+ COPY(r2); COPY(r3); COPY(r4); COPY(r5);
+ COPY(r6); COPY(r7); COPY(r8); COPY(r9);
+ COPY(r10); COPY(r11); COPY(r12); COPY(r13);
+ COPY(r14); COPY(r15); COPY(r16); COPY(r17);
+ COPY(r18); COPY(r19); COPY(r20); COPY(r21);
+ COPY(r22); COPY(r23); COPY(r24); COPY(r25);
+ COPY(r26); COPY(r27); COPY(r28); COPY(r29);
+ COPY(r30); COPY(r31);
+ COPY(pc); COPY(ear); COPY(esr); COPY(fsr);
+#undef COPY
+
+ err |= __put_user(mask, &sc->oldmask);
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void *
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
+{
+ /* Default to using normal stack */
+ unsigned long sp = regs->r1;
+
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ return (void *)((sp - frame_size) & -8UL);
+}
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
+
+ if (_NSIG_WORDS > 1) {
+ err |= __copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+ }
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ /* minus 8 is offset to cater for "rtsd r15,8" offset */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ regs->r15 = ((unsigned long)ka->sa.sa_restorer)-8;
+ } else {
+ /* Note, these encodings are _big endian_! */
+
+ /* addi r12, r0, __NR_sigreturn */
+ err |= __put_user(0x31800000 | __NR_sigreturn ,
+ frame->tramp + 0);
+ /* brki r14, 0x8 */
+ err |= __put_user(0xb9cc0008, frame->tramp + 1);
+
+ /* Return from sighandler will jump to the tramp.
+ Negative 8 offset because return is rtsd r15, 8 */
+ regs->r15 = ((unsigned long)frame->tramp)-8;
+
+#if 0
+ flush_cache_sigtramp ((unsigned long)frame->tramp);
+#endif
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ regs->r1 = (unsigned long) frame;
+ /* Signal handler args: */
+ regs->r5 = signal; /* Arg 0: signum */
+ regs->r6 = (unsigned long) &frame->sc; /* arg 1: sigcontext */
+
+ /* Offset of 4 to handle microblaze rtid r14, 0 */
+ regs->pc = (unsigned long)ka->sa.sa_handler;
+
+ set_fs(USER_DS);
+
+#ifdef DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%08lx\n",
+ current->comm, current->pid, frame, regs->pc );
+#endif
+
+ return;
+
+give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user((void *)current->sas_ss_sp,
+ &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->r1),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext,
+ regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ /* minus 8 is offset to cater for "rtsd r15,8" */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ regs->r15 = ((unsigned long)ka->sa.sa_restorer)-8;
+ } else {
+ /* addi r12, r0, __NR_sigreturn */
+ err |= __put_user(0x31800000 | __NR_rt_sigreturn ,
+ frame->tramp + 0);
+ /* brki r14, 0x8 */
+ err |= __put_user(0xb9cc0008, frame->tramp + 1);
+
+ /* Return from sighandler will jump to the tramp.
+ Negative 8 offset because return is rtsd r15, 8 */
+ regs->r15 = ((unsigned long)frame->tramp)-8;
+
+#if 0
+ flush_cache_sigtramp ((unsigned long)frame->tramp);
+#endif
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ regs->r1 = (unsigned long) frame;
+ /* Signal handler args: */
+ regs->r5 = signal; /* arg 0: signum */
+ regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */
+ regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */
+ /* Offset to handle microblaze rtid r14, 0 */
+ regs->pc = (unsigned long)ka->sa.sa_handler;
+
+ set_fs(USER_DS);
+
+#ifdef DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%08lx\n",
+ current->comm, current->pid, frame, regs->pc);
+#endif
+
+ return;
+
+give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+/* Handle restarting system calls */
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+ switch (regs->r3) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ if (!has_handler)
+ goto do_restart;
+ regs->r3 = -EINTR;
+ break;
+ case -ERESTARTSYS:
+ if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+ regs->r3 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ do_restart:
+ /* offset of 4 bytes to re-execute trap (brki) instruction */
+ regs->pc -= 4;
+ break;
+ }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+{
+ /* Set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ setup_frame(sig, ka, oldset, regs);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(¤t->sighand->siglock);
+ sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
+ sigaddset(¤t->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+int do_signal(struct pt_regs *regs, sigset_t *oldset, int in_syscall)
+{
+ siginfo_t info;
+ int signr;
+ struct k_sigaction ka;
+#ifdef DEBUG_SIG
+ printk("do signal: %p %p %d\n", regs, oldset, in_syscall);
+ printk("do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1, regs->r12, current_thread_info()->flags);
+#endif
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return 1;
+
+ if (!oldset)
+ oldset = ¤t->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ /* Whee! Actually deliver the signal. */
+ if (in_syscall) handle_restart(regs, &ka, 1);
+ handle_signal(signr, &ka, &info, oldset, regs);
+ return 1;
+ }
+
+
+ if (in_syscall) handle_restart(regs, NULL, 0);
+
+ /* Did we come from a system call? */
+ return 0;
+}
--- /dev/null
+/*
+ * arch/microblaze/kernel/sys_microblaze.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ * (C) 2007 PetaLogix
+ *
+ * Authors:
+ * John Williams <john.williams@petalogix.com>
+ * Yasushi SHOJI <yashi@atmark-techno.com>
+ * Tetsuya OHKAWA <tetsuya@atmark-techno.com>
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/syscalls.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/sys.h>
+#include <linux/ipc.h>
+#include <linux/utsname.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+#include <asm/unistd.h>
+
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+int
+sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth)
+{
+ int version, ret;
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ ret = -EINVAL;
+ switch (call) {
+ case SEMOP:
+ ret = sys_semop (first, (struct sembuf *)ptr, second);
+ break;
+ case SEMGET:
+ ret = sys_semget (first, second, third);
+ break;
+ case SEMCTL:
+ {
+ union semun fourth;
+
+ if (!ptr)
+ break;
+ if ((ret = access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT)
+ || (ret = get_user(fourth.__pad, (void **)ptr)))
+ break;
+ ret = sys_semctl (first, second, third, fourth);
+ break;
+ }
+ case MSGSND:
+ ret = sys_msgsnd (first, (struct msgbuf *) ptr, second, third);
+ break;
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+
+ if (!ptr)
+ break;
+ if ((ret = access_ok(VERIFY_READ, ptr, sizeof(tmp)) ? 0 : -EFAULT)
+ || (ret = copy_from_user(&tmp,
+ (struct ipc_kludge *) ptr,
+ sizeof (tmp))))
+ break;
+ ret = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp,
+ third);
+ break;
+ }
+ default:
+ ret = sys_msgrcv (first, (struct msgbuf *) ptr,
+ second, fifth, third);
+ break;
+ }
+ break;
+ case MSGGET:
+ ret = sys_msgget ((key_t) first, second);
+ break;
+ case MSGCTL:
+ ret = sys_msgctl (first, second, (struct msqid_ds *) ptr);
+ break;
+ case SHMAT:
+ switch (version) {
+ default: {
+ ulong raddr;
+
+ if ((ret = access_ok(VERIFY_WRITE, (ulong*) third,
+ sizeof(ulong)) ? 0 : -EFAULT))
+ break;
+ ret = do_shmat (first, (char *) ptr, second, &raddr);
+ if (ret)
+ break;
+ ret = put_user (raddr, (ulong *) third);
+ break;
+ }
+ case 1: /* iBCS2 emulator entry point */
+ if (!segment_eq(get_fs(), get_ds()))
+ break;
+ ret = do_shmat (first, (char *) ptr, second,
+ (ulong *) third);
+ break;
+ }
+ break;
+ case SHMDT:
+ ret = sys_shmdt ((char *)ptr);
+ break;
+ case SHMGET:
+ ret = sys_shmget (first, second, third);
+ break;
+ case SHMCTL:
+ ret = sys_shmctl (first, second, (struct shmid_ds *) ptr);
+ break;
+ }
+
+ return ret;
+}
+
+long execve(const char *filename, char **argv, char **envp)
+{
+ struct pt_regs regs;
+ int ret;
+
+ memset(®s, 0, sizeof(struct pt_regs));
+ local_save_flags(regs.msr);
+ ret = do_execve((char *)filename, (char __user * __user *)argv,
+ (char __user * __user *)envp, ®s);
+
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Save argc to the register structure for userspace.
+ */
+ regs.r5 = ret; /* FIXME */
+
+ /*
+ * We were successful. We won't be returning to our caller, but
+ * instead to user space by manipulating the kernel stack.
+ */
+ asm volatile ("addk r5, r0, %0 \n\t"
+ "addk r6, r0, %1 \n\t"
+ "brlid r15, memmove \n\t" /* copy regs to top of stack */
+ "addik r7, r0, %2 \n\t"
+ "brid ret_to_user \n\t"
+ "addk r1, r0, r3 \n\t" /* reposition stack pointer */
+ :
+ : "r" (task_pt_regs(current)),
+ "r" (®s),
+ "i" (sizeof(regs))
+ : "r1", "r3", "r5", "r6", "r7", "r15", "memory");
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(execve);
+
+asmlinkage int sys_vfork(struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->r1, regs, 0, NULL, NULL);
+}
+
+asmlinkage int sys_clone(int flags, unsigned long stack, struct pt_regs *regs)
+{
+ if (!stack) stack = regs->r1;
+ return do_fork(flags, stack, regs, 0, NULL, NULL);
+}
+
+asmlinkage int sys_execve(char __user *filenamei, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs)
+{
+ int error;
+ char * filename;
+
+ filename = getname(filenamei);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+out:
+ return error;
+}
+
+asmlinkage int sys_pipe(unsigned long __user *fildes)
+{
+ int fd[2];
+ int error;
+
+ error = do_pipe(fd);
+ if (!error) {
+ if (copy_to_user(fildes, fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+static inline unsigned long
+do_mmap2 (unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ struct file * file = NULL;
+ int ret = -EBADF;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (! (flags & MAP_ANONYMOUS)) {
+ if (!(file = fget (fd))) {
+ printk("no fd in mmap\r\n");
+ goto out;
+ }
+ }
+
+ down_write (¤t->mm->mmap_sem);
+ ret = do_mmap_pgoff (file, addr, len, prot, flags, pgoff);
+ up_write (¤t->mm->mmap_sem);
+ if (file)
+ fput (file);
+out:
+ return ret;
+}
+
+unsigned long sys_mmap2 (unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ return do_mmap2 (addr, len, prot, flags, fd, pgoff);
+}
+
+unsigned long sys_mmap (unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t offset)
+{
+ int err = -EINVAL;
+
+ if (offset & ~PAGE_MASK) {
+ printk("no pagemask in mmap\r\n");
+ goto out;
+ }
+
+ err = do_mmap2 (addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+out:
+ return err;
+}
+
+
+int sys_uname (struct old_utsname * name)
+{
+ int err = -EFAULT;
+
+ down_read (&uts_sem);
+ if (name && !copy_to_user (name, utsname(), sizeof (*name)))
+ err = 0;
+ up_read (&uts_sem);
+ return err;
+}
+
+int sys_olduname (struct oldold_utsname * name)
+{
+ int error;
+
+ if (!name)
+ return -EFAULT;
+ if (!access_ok (VERIFY_WRITE, name, sizeof (struct oldold_utsname)))
+ return -EFAULT;
+
+ down_read (&uts_sem);
+ error = __copy_to_user (&name->sysname, utsname()->sysname,
+ __OLD_UTS_LEN);
+ error -= __put_user (0, name->sysname + __OLD_UTS_LEN);
+ error -= __copy_to_user (&name->nodename, utsname()->nodename,
+ __OLD_UTS_LEN);
+ error -= __put_user (0, name->nodename + __OLD_UTS_LEN);
+ error -= __copy_to_user (&name->release, utsname()->release,
+ __OLD_UTS_LEN);
+ error -= __put_user (0, name->release + __OLD_UTS_LEN);
+ error -= __copy_to_user (&name->version, utsname()->version,
+ __OLD_UTS_LEN);
+ error -= __put_user (0, name->version + __OLD_UTS_LEN);
+ error -= __copy_to_user (&name->machine, utsname()->machine,
+ __OLD_UTS_LEN);
+ error = __put_user (0, name->machine + __OLD_UTS_LEN);
+ up_read (&uts_sem);
+
+ error = error ? -EFAULT : 0;
+ return error;
+}
+
+/*
+ * Do a system call from kernel instead of calling sys_execve so we
+ * end up with proper pt_regs.
+ */
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+{
+ register const char *__a __asm__ ("r5") = filename;
+ register const void *__b __asm__ ("r6") = argv;
+ register const void *__c __asm__ ("r7") = envp;
+ register unsigned long __syscall __asm__ ("r12") = __NR_execve;
+ register unsigned long __ret __asm__ ("r3");
+ __asm__ __volatile__ ("brki r14, 0x8"
+ : "=r" (__ret), "=r" (__syscall)
+ : "1" (__syscall), "r" (__a), "r" (__b), "r" (__c)
+ : "r4", "r8", "r9",
+ "r10", "r11", "r14", "cc", "memory");
+ return __ret;
+}
--- /dev/null
+ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
+ .long sys_exit
+ .long sys_ni_syscall /* was fork */
+ .long sys_read
+ .long sys_write
+ .long sys_open /* 5 */
+ .long sys_close
+ .long sys_waitpid
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long sys_execve_wrapper
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod /* 15 */
+ .long sys_lchown16
+ .long sys_ni_syscall /* old break syscall holder */
+ .long sys_ni_syscall /* stat */
+ .long sys_lseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_oldumount
+ .long sys_setuid16
+ .long sys_getuid16
+ .long sys_stime /* 25 */
+ .long sys_ptrace
+ .long sys_alarm
+ .long sys_ni_syscall /* fstat */
+ .long sys_pause
+ .long sys_utime /* 30 */
+ .long sys_ni_syscall /* old stty syscall holder */
+ .long sys_ni_syscall /* old gtty syscall holder */
+ .long sys_access
+ .long sys_nice
+ .long sys_ni_syscall /* 35 - old ftime syscall holder */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_ni_syscall /* old prof syscall holder */
+ .long sys_brk /* 45 */
+ .long sys_setgid16
+ .long sys_getgid16
+ .long sys_signal
+ .long sys_geteuid16
+ .long sys_getegid16 /* 50 */
+ .long sys_acct
+ .long sys_umount /* recycled never used phys() */
+ .long sys_ni_syscall /* old lock syscall holder */
+ .long sys_ioctl
+ .long sys_fcntl /* 55 */
+ .long sys_ni_syscall /* old mpx syscall holder */
+ .long sys_setpgid
+ .long sys_ni_syscall /* old ulimit syscall holder */
+ .long sys_ni_syscall /* olduname */
+ .long sys_umask /* 60 */
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp /* 65 */
+ .long sys_setsid
+ .long sys_sigaction
+ .long sys_sgetmask
+ .long sys_ssetmask
+ .long sys_setreuid16 /* 70 */
+ .long sys_setregid16
+ .long sys_sigsuspend_wrapper
+ .long sys_sigpending
+ .long sys_sethostname
+ .long sys_setrlimit /* 75 */
+ .long sys_ni_syscall /* old_getrlimit */
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_getgroups16 /* 80 */
+ .long sys_setgroups16
+ .long sys_ni_syscall /* old_select */
+ .long sys_symlink
+ .long sys_ni_syscall /* lstat */
+ .long sys_readlink /* 85 */
+ .long sys_uselib
+ .long sys_swapon
+ .long sys_reboot
+ .long sys_ni_syscall /* old_readdir */
+ .long sys_mmap /* 90 */ /* old_mmap */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_fchown16 /* 95 */
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* old profil syscall holder */
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_ni_syscall /* ioperm */
+ .long sys_socketcall
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer /* 105 */
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_ni_syscall /* uname */
+ .long sys_ni_syscall /* 110 */ /* iopl */
+ .long sys_vhangup
+ .long sys_ni_syscall /* old "idle" system call */
+ .long sys_ni_syscall /* old sys_vm86old */
+ .long sys_wait4
+ .long sys_swapoff /* 115 */
+ .long sys_sysinfo
+ .long sys_ipc
+ .long sys_fsync
+ .long sys_sigreturn_wrapper
+ .long sys_clone_wrapper /* 120 */
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_ni_syscall /* modify_ldt */
+ .long sys_adjtimex
+ .long sys_ni_syscall /* 125: sys_mprotect */
+ .long sys_sigprocmask
+ .long sys_ni_syscall /* old "create_module" */
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_ni_syscall /* 130: old "get_kernel_syms" */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+ .long sys_sysfs /* 135 */
+ .long sys_personality
+ .long sys_ni_syscall /* reserved for afs_syscall */
+ .long sys_setfsuid16
+ .long sys_setfsgid16
+ .long sys_llseek /* 140 */
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_ni_syscall /* sys_msync */
+ .long sys_readv /* 145 */
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+ .long sys_ni_syscall /* 150: sys_mlock */
+ .long sys_ni_syscall /* sys_munlock */
+ .long sys_ni_syscall /* sys_mlockall */
+ .long sys_ni_syscall /* sys_munlockall */
+ .long sys_sched_setparam
+ .long sys_sched_getparam /* 155 */
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+ .long sys_sched_get_priority_min /* 160 */
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_ni_syscall /* sys_mremap */
+ .long sys_setresuid16
+ .long sys_getresuid16 /* 165 */
+ .long sys_ni_syscall /* sys_vm86 */
+ .long sys_ni_syscall /* Old sys_query_module */
+ .long sys_poll
+ .long sys_nfsservctl
+ .long sys_setresgid16 /* 170 */
+ .long sys_getresgid16
+ .long sys_prctl
+ .long sys_rt_sigreturn_wrapper
+ .long sys_rt_sigaction
+ .long sys_rt_sigprocmask /* 175 */
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend_wrapper
+ .long sys_pread64 /* 180 */
+ .long sys_pwrite64
+ .long sys_chown16
+ .long sys_getcwd
+ .long sys_capget
+ .long sys_capset /* 185 */
+ .long sys_ni_syscall /* sigaltstack */
+ .long sys_sendfile
+ .long sys_ni_syscall /* reserved for streams1 */
+ .long sys_ni_syscall /* reserved for streams2 */
+ .long sys_vfork_wrapper /* 190 */
+ .long sys_getrlimit
+ .long sys_mmap2 /* mmap2 */
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_lchown
+ .long sys_getuid
+ .long sys_getgid /* 200 */
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+ .long sys_getgroups /* 205 */
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setresgid /* 210 */
+ .long sys_getresgid
+ .long sys_chown
+ .long sys_setuid
+ .long sys_setgid
+ .long sys_setfsuid /* 215 */
+ .long sys_setfsgid
+ .long sys_pivot_root
+ .long sys_ni_syscall /* sys_mincore */
+ .long sys_ni_syscall /* sys_madvise */
+ .long sys_getdents64 /* 220 */
+ .long sys_fcntl64
+ .long sys_ni_syscall /* reserved for TUX */
+ .long sys_ni_syscall
+ .long sys_gettid
+ .long sys_readahead /* 225 */
+ .long sys_setxattr
+ .long sys_lsetxattr
+ .long sys_fsetxattr
+ .long sys_getxattr
+ .long sys_lgetxattr /* 230 */
+ .long sys_fgetxattr
+ .long sys_listxattr
+ .long sys_llistxattr
+ .long sys_flistxattr
+ .long sys_removexattr /* 235 */
+ .long sys_lremovexattr
+ .long sys_fremovexattr
+ .long sys_tkill
+ .long sys_sendfile64
+ .long sys_futex /* 240 */
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity
+ .long sys_ni_syscall /* set_thread_area */
+ .long sys_ni_syscall /* get_thread_area */
+ .long sys_io_setup /* 245 */
+ .long sys_io_destroy
+ .long sys_io_getevents
+ .long sys_io_submit
+ .long sys_io_cancel
+ .long sys_fadvise64 /* 250 */
+ .long sys_ni_syscall
+ .long sys_exit_group
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl /* 255 */
+ .long sys_epoll_wait
+ .long sys_ni_syscall /* sys_remap_file_pages */
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime /* 260 */
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime /* 265 */
+ .long sys_clock_getres
+ .long sys_clock_nanosleep
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill /* 270 */
+ .long sys_utimes
+ .long sys_fadvise64_64
+ .long sys_ni_syscall /* sys_vserver */
+ .long sys_mbind
+ .long sys_get_mempolicy
+ .long sys_set_mempolicy
+ .long sys_mq_open
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive /* 280 */
+ .long sys_mq_notify
+ .long sys_mq_getsetattr
+ .long sys_kexec_load
+ .long sys_waitid
+ .long sys_ni_syscall /* 285 */ /* available */
+ .long sys_add_key
+ .long sys_request_key
+ .long sys_keyctl
+ .long sys_ioprio_set
+ .long sys_ioprio_get /* 290 */
+ .long sys_inotify_init
+ .long sys_inotify_add_watch
+ .long sys_inotify_rm_watch
+ .long sys_ni_syscall /* sys_migrate_pages */
+ .long sys_ni_syscall /* 295 */ /* sys_openat */
+ .long sys_ni_syscall /* sys_mkdirat */
+ .long sys_ni_syscall /* sys_mknodat */
+ .long sys_ni_syscall /* sys_fchownat */
+ .long sys_ni_syscall /* sys_futimesat */
+ .long sys_ni_syscall /* 300 */ /* sys_fstatat64 */
+ .long sys_ni_syscall /* sys_unlinkat */
+ .long sys_ni_syscall /* sys_renameat */
+ .long sys_ni_syscall /* sys_linkat */
+ .long sys_ni_syscall /* sys_symlinkat */
+ .long sys_ni_syscall /* 305 */ /* sys_readlinkat */
+ .long sys_ni_syscall /* sys_fchmodat */
+ .long sys_ni_syscall /* sys_faccessat */
+ .long sys_ni_syscall /* pselect6 */
+ .long sys_ni_syscall /* ppoll */
+ .long sys_ni_syscall /* 310 */ /* sys_unshare */
+ .long sys_ni_syscall /* sys_set_robust_list */
+ .long sys_ni_syscall /* sys_get_robust_list */
+ .long sys_ni_syscall /* sys_splice */
+ .long sys_ni_syscall /* sys_sync_file_range */
+ .long sys_ni_syscall /* 315 */ /* sys_tee */
+ .long sys_ni_syscall /* sys_vmsplice */
--- /dev/null
+/*
+ * arch/microblaze/kernel/time.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/hrtimer.h>
+#include <asm/bug.h>
+
+extern void system_timer_init(void);
+extern unsigned long do_gettimeoffset(void);
+
+void time_init(void)
+{
+ system_timer_init();
+}
+
+int do_settimeofday(struct timespec *tv)
+{
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_seqlock_irq (&xtime_lock);
+
+ /* This is revolting. We need to set the xtime.tv_nsec
+ * correctly. However, the value in this location is
+ * is value at the last tick.
+ * Discover what correction gettimeofday
+ * would have done, and then undo it!
+ */
+#if 0
+ tv->tv_nsec -= mach_gettimeoffset() * 1000;
+#endif
+
+ while (tv->tv_nsec < 0) {
+ tv->tv_nsec += NSEC_PER_SEC;
+ tv->tv_sec--;
+ }
+
+ xtime.tv_sec = tv->tv_sec;
+ xtime.tv_nsec = tv->tv_nsec;
+
+ time_adjust = 0;
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+
+ write_sequnlock_irq (&xtime_lock);
+ clock_was_set();
+ return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+/*
+ * This version of gettimeofday has near microsecond resolution.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long seq;
+ unsigned long usec, sec;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ usec = do_gettimeoffset();
+ sec = xtime.tv_sec;
+ usec += (xtime.tv_nsec / 1000);
+ } while (read_seqretry(&xtime_lock, seq));
+
+ while (usec >= 1000000) {
+ usec -= 1000000;
+ sec++;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+unsigned long long sched_clock(void)
+{
+ return (unsigned long long)jiffies * (1000000000 / HZ);
+}
--- /dev/null
+/*
+ * arch/microblaze/kernel/traps.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kallsyms.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/exceptions.h>
+
+void trap_init(void)
+{
+ initialize_exception_handlers();
+ __enable_hw_exceptions();
+}
+
+void __bad_xchg(volatile void *ptr, int size)
+{
+ printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
+ __builtin_return_address(0), ptr, size);
+ BUG();
+}
+EXPORT_SYMBOL(__bad_xchg);
+
+static int kstack_depth_to_print = 24;
+
+void show_trace(struct task_struct *task, unsigned long *stack)
+{
+ unsigned long addr;
+
+ if (!stack)
+ stack = (unsigned long*)&stack;
+
+ printk("Call Trace: ");
+#ifdef CONFIG_KALLSYMS
+ printk("\n");
+#endif
+ while (!kstack_end(stack)) {
+ addr = *stack++;
+ if (__kernel_text_address(addr)) {
+ printk("[<%08lx>] ", addr);
+ print_symbol("%s\n", addr);
+ }
+ }
+ printk("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+ unsigned long *stack;
+ int i;
+
+ if (sp == NULL) {
+ if (task)
+ sp = (unsigned long *) ((struct thread_info *)(task->stack))->cpu_context.sp;
+ else
+ sp = (unsigned long *)&sp;
+ }
+
+ stack = sp;
+
+ printk("\nStack:\n ");
+
+ for(i = 0; i < kstack_depth_to_print; i++) {
+ if (kstack_end(sp))
+ break;
+ if (i && ((i % 8) == 0))
+ printk("\n ");
+ printk("%08lx ", *sp++);
+ }
+ printk("\n");
+ show_trace(task, stack);
+}
+
+void dump_stack(void)
+{
+ show_stack(NULL, NULL);
+}
+
+EXPORT_SYMBOL(dump_stack);
--- /dev/null
+/*
+ * arch/microblaze/kernel/vmlinux.lds.S
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ * Copyright (C) 2007 Xilinx, Inc.
+ */
+
+OUTPUT_FORMAT("elf32-microblaze", "elf32-microblaze", "elf32-microblaze")
+OUTPUT_ARCH(microblaze)
+ENTRY(_start)
+
+#include <asm/xparameters.h>
+#include <asm-generic/vmlinux.lds.h>
+
+jiffies = jiffies_64 + 4;
+
+SECTIONS {
+
+ . = XPAR_ERAM_START;
+
+ .text : {
+ _text = . ;
+ _stext = . ;
+ *(.text)
+ *(.text.exit)
+ *(.text.lock)
+ *(.exitcall.exit)
+ SCHED_TEXT
+ LOCK_TEXT
+ . = ALIGN (4) ;
+ _etext = . ;
+ }
+
+ . = ALIGN(16);
+ RODATA
+
+ /* sdata2 section can go anywhere, but must be word aligned
+ and SDA2_BASE must point to the middle of it */
+ .sdata2 : {
+ _ssrw = .;
+ . = ALIGN(0x8);
+ *(.sdata2)
+ . = ALIGN(8);
+ _essrw = .;
+ _ssrw_size = _essrw - _ssrw;
+ _KERNEL_SDA2_BASE_ = _ssrw + (_ssrw_size / 2);
+ }
+
+
+ _sdata = . ;
+ .data ALIGN (0x4) : {
+ *(.data)
+ }
+ . = ALIGN(32);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+ _edata = . ;
+
+ /* The initial task */
+ . = ALIGN(8192);
+ .data.init_task : { *(.data.init_task) }
+
+ /* Under the microblaze ABI, .sdata and .sbss must be contiguous */
+ . = ALIGN(8);
+ .sdata : {
+ _ssro = .;
+ *(.sdata)
+ }
+
+ .sbss : {
+ _ssbss = .;
+ *(.sbss)
+ _esbss = .;
+ _essro = .;
+ _ssro_size = _essro - _ssro ;
+ _KERNEL_SDA_BASE_ = _ssro + (_ssro_size / 2) ;
+ }
+
+ . = ALIGN(16);
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+
+ . = ALIGN(4096);
+
+ __init_begin = .;
+
+ .init.text : {
+ _sinittext = . ;
+ *(.init.text)
+ *(.exit.text)
+ *(.exit.data)
+ _einittext = .;
+ }
+
+ .init.data : { *(.init.data) }
+
+ . = ALIGN(4);
+ .init.ivt : {
+ __ivt_start = .;
+ *(.init.ivt)
+ __ivt_end = .;
+ }
+
+ .init.setup : {
+ __setup_start = .;
+ *(.init.setup)
+ __setup_end = .;
+ }
+
+ .initcall.init : {
+ __initcall_start = .;
+ INITCALLS
+#if 0
+ *(.initcall1.init)
+ *(.initcall2.init)
+ *(.initcall3.init)
+ *(.initcall4.init)
+ *(.initcall5.init)
+ *(.initcallrootfs.init)
+ *(.initcall6.init)
+ *(.initcall7.init)
+#endif
+ __initcall_end = .;
+ }
+
+ .con_initcall.init : {
+ __con_initcall_start = .;
+ *(.con_initcall.init)
+ __con_initcall_end = .;
+ }
+
+ __init_end = .;
+
+ .init.ramfs ALIGN(4096) : {
+ __initramfs_start = .;
+ *(.init.ramfs)
+ __initramfs_end = .;
+ . = ALIGN(4);
+ LONG(0);
+ }
+
+ .bss ALIGN (4096) : {
+ __bss_start = . ;
+ *(.bss*)
+ *(COMMON)
+ . = ALIGN (4) ;
+ __bss_stop = . ;
+ _ebss = . ;
+ }
+ . = ALIGN(4096);
+ _end = .;
+}
+
+
--- /dev/null
+lib-y := uaccess.o memcpy.o memmove.o memset.o checksum.o
\ No newline at end of file
--- /dev/null
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * IP/TCP/UDP checksumming routines
+ *
+ * Authors: Jorge Cwik, <jorge@laser.satlink.net>
+ * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ * Tom May, <ftom@netcom.com>
+ * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
+ * Lots of code moved from tcp.c and ip.c; see those files
+ * for more names.
+ *
+ * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
+ * Fixed some nasty bugs, causing some horrible crashes.
+ * A: At some points, the sum (%0) was used as
+ * length-counter instead of the length counter
+ * (%1). Thanks to Roman Hodek for pointing this out.
+ * B: GCC seems to mess up if one uses too many
+ * data-registers to hold input values and one tries to
+ * specify d0 and d1 as scratch registers. Letting gcc choose these
+ * registers itself solves the problem.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most
+ of the assembly has to go. */
+
+#include <net/checksum.h>
+#include <asm/checksum.h>
+
+static inline unsigned short from32to16(unsigned long x)
+{
+ /* add up 16-bit and 16-bit for 16+c bit */
+ x = (x & 0xffff) + (x >> 16);
+ /* add up carry.. */
+ x = (x & 0xffff) + (x >> 16);
+ return x;
+}
+
+static unsigned long do_csum(const unsigned char * buff, int len)
+{
+ int odd, count;
+ unsigned long result = 0;
+
+ if (len <= 0)
+ goto out;
+ odd = 1 & (unsigned long) buff;
+ if (odd) {
+ result = *buff;
+ len--;
+ buff++;
+ }
+ count = len >> 1; /* nr of 16-bit words.. */
+ if (count) {
+ if (2 & (unsigned long) buff) {
+ result += *(unsigned short *) buff;
+ count--;
+ len -= 2;
+ buff += 2;
+ }
+ count >>= 1; /* nr of 32-bit words.. */
+ if (count) {
+ unsigned long carry = 0;
+ do {
+ unsigned long w = *(unsigned long *) buff;
+ count--;
+ buff += 4;
+ result += carry;
+ result += w;
+ carry = (w > result);
+ } while (count);
+ result += carry;
+ result = (result & 0xffff) + (result >> 16);
+ }
+ if (len & 2) {
+ result += *(unsigned short *) buff;
+ buff += 2;
+ }
+ }
+ if (len & 1)
+ result += (*buff << 8);
+ result = from32to16(result);
+ if (odd)
+ result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
+out:
+ return result;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+{
+ return ~do_csum(iph,ihl*4);
+}
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
+{
+ unsigned int result = do_csum(buff, len);
+
+ /* add in old sum, and carry.. */
+ result += sum;
+ if(sum > result)
+ result += 1;
+ return result;
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+unsigned short ip_compute_csum(const unsigned char * buff, int len)
+{
+ return ~do_csum(buff,len);
+}
+
+ /*
+ * copy from fs while checksumming, otherwise like csum_partial
+ */
+
+unsigned int
+csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *csum_err)
+{
+ if (csum_err) *csum_err = 0;
+ memcpy(dst, src, len);
+ return csum_partial(dst, len, sum);
+}
+
+/*
+ * copy from ds while checksumming, otherwise like csum_partial
+ */
+
+unsigned int
+csum_partial_copy(const char *src, char *dst, int len, int sum)
+{
+ memcpy(dst, src, len);
+ return csum_partial(dst, len, sum);
+}
--- /dev/null
+/* Filename: memcpy.c
+ *
+ * Reasonably optimised generic C-code for memcpy on Microblaze
+ * This is generic C code to do efficient, alignment-aware memcpy.
+ *
+ * It is based on demo code originally Copyright 2001 by Intel Corp, taken from
+ * http://www.embedded.com/showArticle.jhtml?articleID=19205567
+ *
+ * Attempts were made, unsuccesfully, to contact the original
+ * author of this code (Michael Morrow, Intel). Below is the original
+ * copyright notice.
+ *
+ * This software has been developed by Intel Corporation.
+ * Intel specifically disclaims all warranties, express or
+ * implied, and all liability, including consequential and
+ * other indirect damages, for the use of this program, including
+ * liability for infringement of any proprietary rights,
+ * and including the warranties of merchantability and fitness
+ * for a particular purpose. Intel does not assume any
+ * responsibility for and errors which may appear in this program
+ * not any responsibility to update it.
+ */
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/compiler.h>
+
+#include <asm/string.h>
+
+/* Macro's for bytebliting unaligned data blocks */
+
+/* Big-endian MACROS */
+
+#define BYTE_BLIT_INIT(s,h,o) \
+ (h) = *((unsigned *)(s))++ << (o)
+
+#define BYTE_BLIT_STEP(d,s,h,o) \
+ { register unsigned _v_; _v_ = *((unsigned *)(s))++; \
+ *((unsigned *)(d))++ = (h) | _v_ >> (32-(o)); \
+ (h) = _v_ << (o); \
+ }
+
+void *memcpy(void *d, const void *s, __kernel_size_t c)
+{
+#if 1
+ /* This code was put into place as we transitioned from gcc3 to gcc4.
+ * gcc4 does not support the syntax *((char *)d)++ which causes the
+ * original code below to break. Short of fixing up the original code,
+ * a simpler byte oriented code was put into place.
+ */
+ /* Simple, byte oriented memcpy. */
+ const char *src = s;
+ char *dst = d;
+
+ while (c--) *dst++ = *src++;
+
+ return d;
+
+#else
+ /* The following code tries to optimize the copy by using unsigned
+ * alignment. This will work fine if both source and destination are
+ * aligned on the same boundary. However, if they are aligned on
+ * different boundaries shifts will be necessary. This might result in
+ * bad performance on MicroBlaze systems without a barrel shifter.
+ */
+ void *r = d;
+
+ if (c >= 4) {
+ unsigned x, a, h, align;
+
+ /* Align the destination to a word boundry. */
+ /* This is done in an endian independant manner. */
+ switch ((unsigned) d & 3) {
+ case 1:
+ *((char *) d)++ = *((char *) s)++;
+ c--;
+ case 2:
+ *((char *) d)++ = *((char *) s)++;
+ c--;
+ case 3:
+ *((char *) d)++ = *((char *) s)++;
+ c--;
+ }
+ /* Choose a copy scheme based on the source */
+ /* alignment relative to destination. */
+ switch ((unsigned) s & 3) {
+ case 0x0: /* Both byte offsets are aligned */
+
+ for (; c >= 4; c -= 4)
+ *((unsigned *) d)++ = *((unsigned *) s)++;
+
+ break;
+
+ case 0x1: /* Unaligned - Off by 1 */
+ /* Word align the source */
+ a = (unsigned) s & ~3;
+
+ /* Load the holding buffer */
+ BYTE_BLIT_INIT(a, h, 8);
+
+ for (; c >= 4; c -= 4)
+ BYTE_BLIT_STEP(d, a, h, 8);
+
+ /* Realign the source */
+ (unsigned) s = a - 3;
+ break;
+
+ case 0x2: /* Unaligned - Off by 2 */
+ /* Word align the source */
+ a = (unsigned) s & ~3;
+
+ /* Load the holding buffer */
+ BYTE_BLIT_INIT(a, h, 16);
+
+ for (; c >= 4; c -= 4)
+ BYTE_BLIT_STEP(d, a, h, 16);
+
+ /* Realign the source */
+ (unsigned) s = a - 2;
+ break;
+
+ case 0x3: /* Unaligned - Off by 3 */
+ /* Word align the source */
+ a = (unsigned) s & ~3;
+
+ /* Load the holding buffer */
+ BYTE_BLIT_INIT(a, h, 24);
+
+ for (; c >= 4; c -= 4)
+ BYTE_BLIT_STEP(d, a, h, 24);
+
+ /* Realign the source */
+ (unsigned) s = a - 1;
+ break;
+ }
+
+ }
+
+ /* Finish off any remaining bytes */
+ /* simple fast copy, ... unless a cache boundry is crossed */
+ switch (c) {
+ case 3:
+ *((char *) d)++ = *((char *) s)++;
+ case 2:
+ *((char *) d)++ = *((char *) s)++;
+ case 1:
+ *((char *) d)++ = *((char *) s)++;
+ }
+
+ return r;
+#endif
+}
+
+
+void *cacheable_memcpy(void *d, const void *s, __kernel_size_t c)
+{
+ return memcpy(d, s, c);
+}
--- /dev/null
+/* Filename: memmove.c
+ *
+ * Reasonably optimised generic C-code for memcpy on Microblaze
+ * This is generic C code to do efficient, alignment-aware memmove.
+ *
+ * It is based on demo code originally Copyright 2001 by Intel Corp, taken from
+ * http://www.embedded.com/showArticle.jhtml?articleID=19205567
+ *
+ * Attempts were made, unsuccesfully, to contact the original
+ * author of this code (Michael Morrow, Intel). Below is the original
+ * copyright notice.
+ *
+ * This software has been developed by Intel Corporation.
+ * Intel specifically disclaims all warranties, express or
+ * implied, and all liability, including consequential and
+ * other indirect damages, for the use of this program, including
+ * liability for infringement of any proprietary rights,
+ * and including the warranties of merchantability and fitness
+ * for a particular purpose. Intel does not assume any
+ * responsibility for and errors which may appear in this program
+ * not any responsibility to update it.
+ */
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/compiler.h>
+
+#include <asm/string.h>
+
+/* Macro's for bytebliting unaligned data blocks */
+
+/* Big-endian MACROS */
+
+#define BYTE_BLIT_INIT(s,h,o) \
+ (h) = *(--(unsigned *)(s)) >> (32-(o))
+
+#define BYTE_BLIT_STEP(d,s,h,o) \
+ { register unsigned _v_; _v_ = *(--(unsigned *)(s)); \
+ *(--(unsigned *)(d)) = _v_ << (o) | (h); \
+ (h) = _v_ >> (32-(o)); \
+ }
+
+void *memmove(void *d, const void *s, __kernel_size_t c)
+{
+#if 1
+ /* This code was put into place as we transitioned from gcc3 to gcc4.
+ * gcc4 does not support the syntax *((char *)d)++ which causes the
+ * original code below to break. Short of fixing up the original code,
+ * a simpler byte oriented code was put into place.
+ */
+ /* Simple, byte oriented memmove. */
+ if (d <= s) {
+ return memcpy(d, s, c);
+ }
+ else {
+ const char *src = s;
+ char *dst = d;
+
+ /* copy backwards, from end to beginning */
+ src += c;
+ dst += c;
+
+ while (c--) *--dst = *--src;
+
+ return d;
+ }
+
+#else
+ /* The following code tries to optimize the copy by using unsigned
+ * alignment. This will work fine if both source and destination are
+ * aligned on the same boundary. However, if they are aligned on
+ * different boundaries shifts will be necessary. This might result in
+ * bad performance on MicroBlaze systems without a barrel shifter.
+ */
+ void *r = d;
+
+ /* Use memcpy when source is higher than dest */
+ if (d <= s)
+ return memcpy(d, s, c);
+
+ /* Do a descending copy - this is a bit trickier! */
+ d += c;
+ s += c;
+
+ if (c >= 4) {
+ unsigned x, a, h, align;
+
+ /* Align the destination to a word boundry. */
+ /* This is done in an endian independant manner. */
+ switch ((unsigned) d & 3) {
+ case 3:
+ *(--(char *) d) = *(--(char *) s);
+ c--;
+ case 2:
+ *(--(char *) d) = *(--(char *) s);
+ c--;
+ case 1:
+ *(--(char *) d) = *(--(char *) s);
+ c--;
+ }
+ /* Choose a copy scheme based on the source */
+ /* alignment relative to destination. */
+ switch ((unsigned) s & 3) {
+ case 0x0: /* Both byte offsets are aligned */
+
+ for (; c >= 4; c -= 4)
+ *(--(unsigned *) d) = *(--(unsigned *) s);
+
+ break;
+
+ case 0x1: /* Unaligned - Off by 1 */
+ /* Word align the source */
+ a = (unsigned) (s + 4) & ~3;
+
+ /* Load the holding buffer */
+ BYTE_BLIT_INIT(a, h, 8);
+
+ for (; c >= 4; c -= 4)
+ BYTE_BLIT_STEP(d, a, h, 8);
+
+ /* Realign the source */
+ (unsigned) s = a + 1;
+ break;
+
+ case 0x2: /* Unaligned - Off by 2 */
+ /* Word align the source */
+ a = (unsigned) (s + 4) & ~3;
+
+ /* Load the holding buffer */
+ BYTE_BLIT_INIT(a, h, 16);
+
+ for (; c >= 4; c -= 4)
+ BYTE_BLIT_STEP(d, a, h, 16);
+
+ /* Realign the source */
+ (unsigned) s = a + 2;
+ break;
+
+ case 0x3: /* Unaligned - Off by 3 */
+ /* Word align the source */
+ a = (unsigned) (s + 4) & ~3;
+
+ /* Load the holding buffer */
+ BYTE_BLIT_INIT(a, h, 24);
+
+ for (; c >= 4; c -= 4)
+ BYTE_BLIT_STEP(d, a, h, 24);
+
+ /* Realign the source */
+ (unsigned) s = a + 3;
+ break;
+ }
+
+#if 0
+ /* Finish off any remaining bytes */
+ c &= 3;
+ goto finish;
+#endif
+
+ }
+
+ /* simple fast copy, ... unless a cache boundry is crossed */
+ switch (c) {
+ case 4:
+ *(--(char *) d) = *(--(char *) s);
+ case 3:
+ *(--(char *) d) = *(--(char *) s);
+ case 2:
+ *(--(char *) d) = *(--(char *) s);
+ case 1:
+ *(--(char *) d) = *(--(char *) s);
+ }
+ return r;
+#endif
+}
--- /dev/null
+/* Filename: memset.c
+ *
+ * Reasonably optimised generic C-code for memset on Microblaze
+ * This is generic C code to do efficient, alignment-aware memcpy.
+ *
+ * It is based on demo code originally Copyright 2001 by Intel Corp, taken from
+ * http://www.embedded.com/showArticle.jhtml?articleID=19205567
+ *
+ * Attempts were made, unsuccesfully, to contact the original
+ * author of this code (Michael Morrow, Intel). Below is the original
+ * copyright notice.
+ *
+ * This software has been developed by Intel Corporation.
+ * Intel specifically disclaims all warranties, express or
+ * implied, and all liability, including consequential and
+ * other indirect damages, for the use of this program, including
+ * liability for infringement of any proprietary rights,
+ * and including the warranties of merchantability and fitness
+ * for a particular purpose. Intel does not assume any
+ * responsibility for and errors which may appear in this program
+ * not any responsibility to update it.
+ */
+
+/* Filename: memcpy.c
+ *
+ * Reasonably optimised generic C-code for memset on Microblaze
+ * Based on demo code originally Copyright 2001 by Intel Corp.
+ *
+ * This software has been developed by Intel Corporation.
+ * Intel specifically disclaims all warranties, express or
+ * implied, and all liability, including consequential and
+ * other indirect damages, for the use of this program, including
+ * liability for infringement of any proprietary rights,
+ * and including the warranties of merchantability and fitness
+ * for a particular purpose. Intel does not assume any
+ * responsibility for and errors which may appear in this program
+ * not any responsibility to update it.
+ */
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/compiler.h>
+
+#include <asm/string.h>
+
+void *memset(void *s, int c, __kernel_size_t n)
+{
+ void *r = s;
+ unsigned int w32;
+
+ /* Truncate c to 8 bits */
+ w32 = c = (c & 0xFF);
+
+ /* Make a repeating word out of it */
+ w32 |= w32 << 8;
+ w32 |= w32 << 8;
+ w32 |= w32 << 8;
+
+ if (n >= 4) {
+ /* Align the destination to a word boundary. */
+ /* This is done in an endian independant manner. */
+ switch ((unsigned) s & 3) {
+ case 1: *(char *)s = c; s = (char *)s+1; n--;
+ case 2: *(char *)s = c; s = (char *)s+1; n--;
+ case 3: *(char *)s = c; s = (char *)s+1; n--;
+ }
+
+ /* Do as many full-word copies as we can */
+ for (; n >= 4; n -= 4) {
+ *(unsigned *)s = w32;
+ s = (unsigned *)s +1;
+ }
+
+ }
+
+ /* Finish off the rest as byte sets */
+ while (n--) {
+ *(char *)s = c;
+ s = (char *)s+1;
+ }
+ return r;
+}
+
--- /dev/null
+/*
+ * arch/microblaze/lib/uaccess.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/string.h>
+#include <asm/uaccess.h>
+
+#include <asm/bug.h>
+
+long strnlen_user(const char __user *s, long n)
+{
+ return strlen(s) + 1;
+}
+
+#define __do_strncpy_from_user(dst,src,count,res) \
+ do { \
+ char *tmp; \
+ strncpy(dst, src, count); \
+ for (tmp = dst; *tmp && count > 0; tmp++, count--) \
+ ; \
+ res = (tmp - dst); \
+ } while (0)
+
+long __strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ long res;
+ __do_strncpy_from_user(dst, src, count, res);
+ return res;
+}
+
+long strncpy_from_user(char *dst, const char *src, long count)
+{
+ long res = -EFAULT;
+ if (access_ok(VERIFY_READ, src, 1))
+ __do_strncpy_from_user(dst, src, count, res);
+ return res;
+}
--- /dev/null
+#
+# Makefile
+#
+
+obj-y := init.o consistent.o dma-coherent.o
+
+obj-$(CONFIG_WANT_DEVICE_TREE) += lmb.o
+
--- /dev/null
+/*
+ * Microblaze support for cache consistent memory.
+ * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
+ *
+ * based on
+ *
+ * PowerPC version derived from arch/arm/mm/consistent.c
+ * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
+ *
+ * linux/arch/arm/mm/consistent.c
+ *
+ * Copyright (C) 2000 Russell King
+ *
+ * Consistent memory allocators. Used for DMA devices that want to
+ * share uncached memory with the processor core.
+ * My crufty no-MMU approach is simple. In the HW platform we can optionally
+ * mirror the DDR up above the processor cacheable region. So, memory accessed
+ * in this mirror region will not be cached. It's alloced from the same
+ * pool as normal memory, but the handle we return is shifted up into the
+ * uncached region. This will no doubt cause big problems if memory allocated
+ * here is not also freed properly.
+ * -- JW
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <asm/pgalloc.h>
+#include <asm/io.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+
+void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
+{
+ struct page *page, *end, *free;
+ unsigned long order;
+ void *ret, *virt;
+
+ if (in_interrupt())
+ BUG();
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ page = alloc_pages(gfp, order);
+ if (!page)
+ goto no_page;
+
+ /*
+ * We could do with a page_to_phys and page_to_bus here.
+ */
+ virt = page_address(page);
+ /* *dma_handle = virt_to_bus(virt); */
+ ret = ioremap(virt_to_phys(virt), size);
+ if (!ret)
+ goto no_remap;
+
+ /* Here's the magic! Note if the uncached shadow is not implemented,
+ it's up to the calling code to also test that condition and make
+ other arranegments, such as manually flushing the cache and so on.
+ */
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+ ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
+#endif
+ /* For !MMU, dma_handle is same as physical (shadowed) address */
+ *dma_handle = (dma_addr_t )ret;
+
+ /*
+ * free wasted pages. We skip the first page since we know
+ * that it will have count = 1 and won't require freeing.
+ * We also mark the pages in use as reserved so that
+ * remap_page_range works.
+ */
+ page = virt_to_page(virt);
+ free = page + (size >> PAGE_SHIFT);
+ end = page + (1 << order);
+
+ for (; page < end; page++) {
+ init_page_count(page);
+ if (page >= free)
+ __free_page(page);
+ else
+ SetPageReserved(page);
+ }
+
+ return ret;
+no_remap:
+ __free_pages(page, order);
+no_page:
+ return NULL;
+}
+
+/*
+ * free page(s) as defined by the above mapping.
+ */
+void consistent_free(void *vaddr)
+{
+ if (in_interrupt())
+ BUG();
+
+ /* Clear SHADOW_MASK bit in address, and free as per usual */
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+ vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
+#endif
+ vfree(vaddr);
+}
+
+/*
+ * make an area consistent.
+ */
+void consistent_sync(void *vaddr, size_t size, int direction)
+{
+ unsigned long start;
+ unsigned long end;
+
+ start=(unsigned long)vaddr;
+
+ /* Convert start address back down to unshadowed memory region */
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+ start &= UNCACHED_SHADOW_MASK;
+#endif
+ end = start+size;
+
+ switch (direction) {
+ case PCI_DMA_NONE:
+ BUG();
+ case PCI_DMA_FROMDEVICE: /* invalidate only */
+ invalidate_dcache_range(start, end);
+ break;
+ case PCI_DMA_TODEVICE: /* writeback only */
+ flush_dcache_range(start, end);
+ break;
+ case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
+ invalidate_dcache_range(start, end);
+ flush_dcache_range(start, end);
+ break;
+ }
+}
+
+
+/*
+ * consistent_sync_page makes memory consistent. identical
+ * to consistent_sync, but takes a struct page instead of a
+ * virtual address
+ */
+void consistent_sync_page(struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ unsigned long start = (unsigned long)page_address(page) + offset;
+ consistent_sync((void *)start, size, direction);
+}
--- /dev/null
+/*
+ * Microblaze support for cache consistent memory.
+ *
+ * Copyright (C) 2007 Xilinx, Inc.
+ *
+ * Based on arch/microblaze/mm/consistent.c
+ * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
+ * Based on arch/avr32/mm/dma-coherent.c
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * Consistent memory allocators. Used for DMA devices that want to
+ * share memory with the processor core.
+ *
+ * If CONFIG_XILINX_UNCACHED_SHADOW, then this code assumes that the
+ * HW platform optionally mirrors the memory up above the processor
+ * cacheable region. So, memory accessed in this mirror region will
+ * not be cached. It's alloced from the same pool as normal memory,
+ * but the handle we return is shifted up into the uncached region.
+ * This will no doubt cause big problems if memory allocated here is
+ * not also freed properly.
+ * If this trick is not used, then the memory is not actually coherent.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma-mapping.h>
+
+#include <asm/cacheflush.h>
+
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
+{
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+ /* Convert start address back down to unshadowed memory region */
+ unsigned long start = ((unsigned long)vaddr) & UNCACHED_SHADOW_MASK;
+ /*
+ * No need to sync an uncached area
+ */
+ if(start != (unsigned long)vaddr)
+ return;
+#endif
+
+ switch (direction) {
+ case DMA_FROM_DEVICE: /* invalidate only */
+ invalidate_dcache_range(vaddr, vaddr + size);
+ break;
+ case DMA_TO_DEVICE: /* writeback only */
+ flush_dcache_range(vaddr, vaddr + size);
+ break;
+ case DMA_BIDIRECTIONAL: /* writeback and invalidate */
+ invalidate_dcache_range(vaddr, vaddr + size);
+ flush_dcache_range(vaddr, vaddr + size);
+ break;
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL(dma_cache_sync);
+
+static struct page *__dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp)
+{
+ struct page *page, *free, *end;
+ int order;
+
+ if (in_interrupt())
+ BUG();
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ page = alloc_pages(gfp, order);
+ if (!page)
+ return NULL;
+
+ split_page(page, order);
+
+ /*
+ * When accessing physical memory with valid cache data, we
+ * get a cache hit even if the virtual memory region is marked
+ * as uncached.
+ *
+ * Since the memory is newly allocated, there is no point in
+ * doing a writeback. If the previous owner cares, he should
+ * have flushed the cache before releasing the memory.
+ */
+ invalidate_dcache_range(phys_to_virt(page_to_phys(page)), size);
+
+ *handle = page_to_bus(page);
+ free = page + (size >> PAGE_SHIFT);
+ end = page + (1 << order);
+
+ /*
+ * Free any unused pages
+ */
+ while (free < end) {
+ __free_page(free);
+ free++;
+ }
+
+ return page;
+}
+
+static void __dma_free(struct device *dev, size_t size,
+ struct page *page, dma_addr_t handle)
+{
+ struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
+
+ while (page < end)
+ __free_page(page++);
+}
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp)
+{
+ struct page *page;
+ void *ret = NULL;
+
+ page = __dma_alloc(dev, size, handle, gfp);
+ if (page) {
+ ret = (void *)page_to_phys(page);
+
+ /* Here's the magic! Note if the uncached shadow is
+ not implemented, it's up to the calling code to
+ also test that condition and make other
+ arranegments, such as manually flushing the cache
+ and so on.
+ */
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+ ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
+#endif
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle)
+{
+ void *addr;
+ struct page *page;
+
+ /* Clear SHADOW_MASK bit in address, and free as per usual */
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+ addr = (void *)((unsigned)cpu_addr & ~UNCACHED_SHADOW_MASK);
+#endif
+
+ pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
+ cpu_addr, (unsigned long)handle, (unsigned)size);
+ BUG_ON(!virt_addr_valid(addr));
+ page = virt_to_page(addr);
+ __dma_free(dev, size, page, handle);
+}
+EXPORT_SYMBOL(dma_free_coherent);
--- /dev/null
+/*
+ * arch/microblaze/mm/init.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#include <linux/autoconf.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/bootmem.h>
+#include <linux/pfn.h>
+#include <asm/sections.h>
+#include <asm/xparameters.h>
+
+#include <linux/initrd.h>
+
+char *klimit = _end;
+
+void __init setup_memory(void)
+{
+ unsigned long m_start, m_end, map_size;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ extern char __initramfs_start[], __initramfs_end[];
+
+ initrd_start = (unsigned long) __initramfs_start;
+ initrd_end = (unsigned long) __initramfs_end;
+ initrd_below_start_ok = 1;
+#endif
+
+
+ m_start = PAGE_ALIGN((unsigned long)klimit);
+ m_end = (XPAR_ERAM_START+XPAR_ERAM_SIZE-1);
+
+ min_low_pfn = PFN_UP(XPAR_ERAM_START);
+ max_mapnr = PFN_DOWN((XPAR_ERAM_START+XPAR_ERAM_SIZE-1));
+ num_physpages = max_mapnr - min_low_pfn + 1;
+ /* max_low_pfn is mis-named. it holds number of pages, not
+ * the maximum page frame number in low memory */
+ max_low_pfn = num_physpages;
+ printk("%s: max_mapnr: %#lx\n", __FUNCTION__, max_mapnr);
+ printk("%s: min_low_pfn: %#lx\n", __FUNCTION__, min_low_pfn);
+ printk("%s: max_low_pfn: %#lx\n", __FUNCTION__, max_low_pfn);
+
+ map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(m_start), min_low_pfn, max_mapnr);
+
+ free_bootmem(m_start+map_size, m_end - (m_start+map_size));
+}
+
+void __init paging_init(void)
+{
+ int i;
+ unsigned long zones_size[MAX_NR_ZONES];
+
+ /* we can DMA to/from any address. put all page into
+ * ZONE_NORMAL. */
+ zones_size[ZONE_NORMAL] = max_low_pfn;
+
+ /* every other zones are empty */
+ for (i = 1; i < MAX_NR_ZONES; i++)
+ zones_size[i] = 0;
+
+ free_area_init_node(0, NODE_DATA(0), zones_size,
+ NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT, NULL);
+}
+
+void free_init_pages(char *what, unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ memset((void *)addr, 0xcc, PAGE_SIZE);
+ free_page(addr);
+ totalram_pages++;
+ }
+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+}
+
+void free_initmem(void)
+{
+ unsigned long begin = (unsigned long)(&__init_begin);
+ unsigned long end = (unsigned long)(&__init_end);
+
+ free_init_pages("unused kernel memory", begin, end);
+}
+
+/* FIXME */
+void show_mem(void)
+{
+}
+
+void __init mem_init(void)
+{
+ high_memory = (void *)(XPAR_ERAM_START+XPAR_ERAM_SIZE-1);
+ /* this will put all memory onto the freelists */
+ totalram_pages += free_all_bootmem();
+
+ printk(KERN_INFO "Memory: %luk/%luk available\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ num_physpages << (PAGE_SHIFT-10));
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_init_pages("initrd memory", start, end);
+}
+#endif
+
--- /dev/null
+/*
+ * Procedures for maintaining information about logical memory blocks.
+ *
+ * Peter Bergner, IBM Corp. June 2001.
+ * Copyright (C) 2001 Peter Bergner.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/types.h>
+#include <asm/page.h>
+#include <asm/prom.h>
+#include <asm/lmb.h>
+#ifdef CONFIG_PPC32
+#include "mmu_decl.h" /* for __max_low_memory */
+#endif
+
+#define DEBUG
+
+#define LMB_ALLOC_ANYWHERE 0
+
+struct lmb lmb;
+
+void lmb_dump_all(void)
+{
+#ifdef DEBUG
+ unsigned long i;
+
+ pr_debug("lmb_dump_all:\n");
+ pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
+ pr_debug(" memory.size = 0x%lx\n", lmb.memory.size);
+ for (i=0; i < lmb.memory.cnt ;i++) {
+ pr_debug(" memory.region[0x%x].base = 0x%lx\n",
+ i, lmb.memory.region[i].base);
+ pr_debug(" .size = 0x%lx\n",
+ lmb.memory.region[i].size);
+ }
+
+ pr_debug("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
+ pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size);
+ for (i=0; i < lmb.reserved.cnt ;i++) {
+ pr_debug(" reserved.region[0x%x].base = 0x%lx\n",
+ i, lmb.reserved.region[i].base);
+ pr_debug(" .size = 0x%lx\n",
+ lmb.reserved.region[i].size);
+ }
+#endif /* DEBUG */
+}
+
+static unsigned long __init lmb_addrs_overlap(unsigned long base1,
+ unsigned long size1, unsigned long base2, unsigned long size2)
+{
+ return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
+}
+
+static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
+ unsigned long base2, unsigned long size2)
+{
+ if (base2 == base1 + size1)
+ return 1;
+ else if (base1 == base2 + size2)
+ return -1;
+
+ return 0;
+}
+
+static long __init lmb_regions_adjacent(struct lmb_region *rgn,
+ unsigned long r1, unsigned long r2)
+{
+ unsigned long base1 = rgn->region[r1].base;
+ unsigned long size1 = rgn->region[r1].size;
+ unsigned long base2 = rgn->region[r2].base;
+ unsigned long size2 = rgn->region[r2].size;
+
+ return lmb_addrs_adjacent(base1, size1, base2, size2);
+}
+
+static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
+{
+ unsigned long i;
+
+ for (i = r; i < rgn->cnt - 1; i++) {
+ rgn->region[i].base = rgn->region[i + 1].base;
+ rgn->region[i].size = rgn->region[i + 1].size;
+ }
+ rgn->cnt--;
+}
+
+/* Assumption: base addr of region 1 < base addr of region 2 */
+static void __init lmb_coalesce_regions(struct lmb_region *rgn,
+ unsigned long r1, unsigned long r2)
+{
+ rgn->region[r1].size += rgn->region[r2].size;
+ lmb_remove_region(rgn, r2);
+}
+
+/* This routine called with relocation disabled. */
+void __init lmb_init(void)
+{
+ /* Create a dummy zero size LMB which will get coalesced away later.
+ * This simplifies the lmb_add() code below...
+ */
+ lmb.memory.region[0].base = 0;
+ lmb.memory.region[0].size = 0;
+ lmb.memory.cnt = 1;
+
+ /* Ditto. */
+ lmb.reserved.region[0].base = 0;
+ lmb.reserved.region[0].size = 0;
+ lmb.reserved.cnt = 1;
+}
+
+/* This routine may be called with relocation disabled. */
+void __init lmb_analyze(void)
+{
+ int i;
+
+ lmb.memory.size = 0;
+
+ for (i = 0; i < lmb.memory.cnt; i++)
+ lmb.memory.size += lmb.memory.region[i].size;
+}
+
+/* This routine called with relocation disabled. */
+static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
+ unsigned long size)
+{
+ unsigned long i, coalesced = 0;
+ long adjacent;
+
+ /* First try and coalesce this LMB with another. */
+ for (i=0; i < rgn->cnt; i++) {
+ unsigned long rgnbase = rgn->region[i].base;
+ unsigned long rgnsize = rgn->region[i].size;
+
+ if ((rgnbase == base) && (rgnsize == size))
+ /* Already have this region, so we're done */
+ return 0;
+
+ adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
+ if ( adjacent > 0 ) {
+ rgn->region[i].base -= size;
+ rgn->region[i].size += size;
+ coalesced++;
+ break;
+ }
+ else if ( adjacent < 0 ) {
+ rgn->region[i].size += size;
+ coalesced++;
+ break;
+ }
+ }
+
+ if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
+ lmb_coalesce_regions(rgn, i, i+1);
+ coalesced++;
+ }
+
+ if (coalesced)
+ return coalesced;
+ if (rgn->cnt >= MAX_LMB_REGIONS)
+ return -1;
+
+ /* Couldn't coalesce the LMB, so add it to the sorted table. */
+ for (i = rgn->cnt-1; i >= 0; i--) {
+ if (base < rgn->region[i].base) {
+ rgn->region[i+1].base = rgn->region[i].base;
+ rgn->region[i+1].size = rgn->region[i].size;
+ } else {
+ rgn->region[i+1].base = base;
+ rgn->region[i+1].size = size;
+ break;
+ }
+ }
+ rgn->cnt++;
+
+ return 0;
+}
+
+/* This routine may be called with relocation disabled. */
+long __init lmb_add(unsigned long base, unsigned long size)
+{
+ struct lmb_region *_rgn = &(lmb.memory);
+
+ /* On pSeries LPAR systems, the first LMB is our RMO region. */
+ if (base == 0)
+ lmb.rmo_size = size;
+
+ return lmb_add_region(_rgn, base, size);
+
+}
+
+long __init lmb_reserve(unsigned long base, unsigned long size)
+{
+ struct lmb_region *_rgn = &(lmb.reserved);
+
+ BUG_ON(0 == size);
+
+ return lmb_add_region(_rgn, base, size);
+}
+
+long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
+ unsigned long size)
+{
+ unsigned long i;
+
+ for (i=0; i < rgn->cnt; i++) {
+ unsigned long rgnbase = rgn->region[i].base;
+ unsigned long rgnsize = rgn->region[i].size;
+ if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
+ break;
+ }
+ }
+
+ return (i < rgn->cnt) ? i : -1;
+}
+
+unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
+{
+ return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
+}
+
+unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
+ unsigned long max_addr)
+{
+ unsigned long alloc;
+
+ alloc = __lmb_alloc_base(size, align, max_addr);
+
+ if (alloc == 0)
+ panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
+ size, max_addr);
+
+ return alloc;
+}
+
+unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
+ unsigned long max_addr)
+{
+ long i, j;
+ unsigned long base = 0;
+
+ BUG_ON(0 == size);
+
+#ifdef CONFIG_PPC32
+ /* On 32-bit, make sure we allocate lowmem */
+ if (max_addr == LMB_ALLOC_ANYWHERE)
+ max_addr = __max_low_memory;
+#endif
+ for (i = lmb.memory.cnt-1; i >= 0; i--) {
+ unsigned long lmbbase = lmb.memory.region[i].base;
+ unsigned long lmbsize = lmb.memory.region[i].size;
+
+ if (max_addr == LMB_ALLOC_ANYWHERE)
+ base = _ALIGN_DOWN(lmbbase + lmbsize - size, align);
+ else if (lmbbase < max_addr) {
+ base = min(lmbbase + lmbsize, max_addr);
+ base = _ALIGN_DOWN(base - size, align);
+ } else
+ continue;
+
+ while ((lmbbase <= base) &&
+ ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
+ base = _ALIGN_DOWN(lmb.reserved.region[j].base - size,
+ align);
+
+ if ((base != 0) && (lmbbase <= base))
+ break;
+ }
+
+ if (i < 0)
+ return 0;
+
+ lmb_add_region(&lmb.reserved, base, size);
+
+ return base;
+}
+
+/* You must call lmb_analyze() before this. */
+unsigned long __init lmb_phys_mem_size(void)
+{
+ return lmb.memory.size;
+}
+
+unsigned long __init lmb_end_of_DRAM(void)
+{
+ int idx = lmb.memory.cnt - 1;
+
+ return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
+}
+
+/* You must call lmb_analyze() after this. */
+void __init lmb_enforce_memory_limit(unsigned long memory_limit)
+{
+ unsigned long i, limit;
+ struct lmb_property *p;
+
+ if (! memory_limit)
+ return;
+
+ /* Truncate the lmb regions to satisfy the memory limit. */
+ limit = memory_limit;
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ if (limit > lmb.memory.region[i].size) {
+ limit -= lmb.memory.region[i].size;
+ continue;
+ }
+
+ lmb.memory.region[i].size = limit;
+ lmb.memory.cnt = i + 1;
+ break;
+ }
+
+ if (lmb.memory.region[0].size < lmb.rmo_size)
+ lmb.rmo_size = lmb.memory.region[0].size;
+
+ /* And truncate any reserves above the limit also. */
+ for (i = 0; i < lmb.reserved.cnt; i++) {
+ p = &lmb.reserved.region[i];
+
+ if (p->base > memory_limit)
+ p->size = 0;
+ else if ((p->base + p->size) > memory_limit)
+ p->size = memory_limit - p->base;
+
+ if (p->size == 0) {
+ lmb_remove_region(&lmb.reserved, i);
+ i--;
+ }
+ }
+}
If unsure, say Y. Only embedded should say N here.
+config WANT_DEVICE_TREE
+ bool
+ default n
+
+config BUILD_RAW_IMAGE
+ bool "Build firmware-independent image"
+ select WANT_DEVICE_TREE
+ help
+ If this is enabled, a firmware independent "raw" image will be
+ built, as zImage.raw. This requires a completely filled-in
+ device tree, with the following labels:
+
+ mem_size_cells: on /#address-cells
+ memsize: on the size portion of /memory/reg
+ timebase: on the boot CPU's timebase property
+
+config DEVICE_TREE
+ string "Static device tree source file"
+ depends on WANT_DEVICE_TREE
+ help
+ This specifies the device tree source (.dts) file to be
+ compiled and included when building the bootwrapper. If a
+ relative filename is given, then it will be relative to
+ arch/powerpc/boot/dts. If you are not using the bootwrapper,
+ or do not need to build a dts into the bootwrapper, this
+ field is ignored.
+
+ For example, this is required when building a cuImage target
+ for an older U-Boot, which cannot pass a device tree itself.
+ Such a kernel will not work with a newer U-Boot that tries to
+ pass a device tree (unless you tell it not to). If your U-Boot
+ does not mention a device tree in "help bootm", then use the
+ cuImage target and specify a device tree here. Otherwise, use
+ the uImage target and leave this field blank.
+
+config COMPRESSED_DEVICE_TREE
+ bool "Use compressed device tree"
+ depends on XILINX_VIRTEX
+ depends on WANT_DEVICE_TREE
+ help
+ In Xilinx FPGAs, the hardware can change quite dramatically while
+ still running the same kernel. In this case and other similar
+ ones, it is preferable to associate the device tree with a
+ particular build of the hardware design. This configuration
+ option assumes that the device tree blob has been compressed and
+ stored in Block RAM in the FPGA design. Typically, such a block
+ ram is available in order to provide a bootloop or other code
+ close to the reset vector at the top of the address space. By
+ default, the parameter options associated with this configuration
+ assumes that exactly one block ram (2KB) of storage is available,
+ which should be sufficient for most designs. If necessary in a
+ particular design, due to boot code requirement or a large number
+ of devices, this address (and the corresponding parameters in the
+ EDK design) must be modified.
+
+ Note that in some highly area constrained designs, no block rams
+ may be available in the design, and some other mechanism may be
+ used to hold the processor in reset while external memory is
+ initialized with processor code. In such cases, that mechanism
+ should also be used to load the device tree at an appropriate
+ location, and the parameters associated with this configuration
+ option should be modified to point to that location in external
+ memory.
+
+config COMPRESSED_DTB_START
+ hex "Start of compressed device tree"
+ depends on COMPRESSED_DEVICE_TREE
+ default 0xfffff800
+
+config COMPRESSED_DTB_SIZE
+ hex "Size of compressed device tree"
+ depends on COMPRESSED_DEVICE_TREE
+ default 0x800
+
endmenu
config ISA_DMA_API
zImage.miboot
zImage.pmac
zImage.pseries
+zImage.virtex
zconf.h
zlib.h
zutil.h
fixed-head.S ep88xc.c ep405.c \
cuboot-katmai.c cuboot-rainier.c redboot-8xx.c ep8248e.c \
cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
- virtex405-head.S
+ virtex.c virtex405-head.S
src-boot := $(src-wlib) $(src-plat) empty.c
src-boot := $(addprefix $(obj)/, $(src-boot))
image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800
image-$(CONFIG_PPC_ISERIES) += zImage.iseries
image-$(CONFIG_DEFAULT_UIMAGE) += uImage
+image-$(CONFIG_XILINX_VIRTEX) += zImage.virtex
#
# Targets which embed a device tree blob
$(addprefix $(obj)/, $(initrd-y)): $(obj)/ramdisk.image.gz
+# If CONFIG_WANT_DEVICE_TREE is set and CONFIG_DEVICE_TREE isn't an
+# empty string, define 'dts' to be path to the dts
+# CONFIG_DEVICE_TREE will have "" around it, make sure to strip them
+ifeq ($(CONFIG_WANT_DEVICE_TREE),y)
+ifneq ($(CONFIG_DEVICE_TREE),"")
+dts = $(if $(shell echo $(CONFIG_DEVICE_TREE) | grep '^/'),\
+ ,$(srctree)/$(src)/dts/)$(CONFIG_DEVICE_TREE:"%"=%)
+endif
+endif
+
# Don't put the ramdisk on the pattern rule; when its missing make will try
# the pattern rule with less dependencies that also matches (even with the
# hard dependency listed).
-$(obj)/zImage.initrd.%: vmlinux $(wrapperbits)
- $(call if_changed,wrap,$*,,,$(obj)/ramdisk.image.gz)
-
-$(obj)/zImage.%: vmlinux $(wrapperbits)
- $(call if_changed,wrap,$*)
+$(obj)/zImage.initrd.%: vmlinux $(wrapperbits) $(dts)
+ $(call if_changed,wrap,$*,$(dts),,$(obj)/ramdisk.image.gz)
+$(obj)/zImage.%: vmlinux $(wrapperbits) $(dts)
+ $(call if_changed,wrap,$*,$(dts))
# dtbImage% - a dtbImage is a zImage with an embedded device tree blob
$(obj)/dtbImage.initrd.%: vmlinux $(wrapperbits) $(obj)/%.dtb
$(call if_changed,wrap,$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
$(obj)/%.dtb: $(dtstree)/%.dts $(obj)/dtc
$(obj)/dtc -O dtb -o $(obj)/$*.dtb -b 0 $(DTS_FLAGS) $(dtstree)/$*.dts
+$(obj)/zImage.raw: vmlinux $(dts) $(wrapperbits)
+ $(call if_changed,wrap,raw,$(dts))
+
# If there isn't a platform selected then just strip the vmlinux.
ifeq (,$(image-y))
image-y := vmlinux.strip
--- /dev/null
+/*
+ * (C) Copyright 2007 Michal Simek
+ *
+ * Michal SIMEK <monstr@monstr.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * CAUTION: This file is automatically generated by libgen.
+ * Version: Xilinx EDK 10.1.1 EDK_K_SP1.1
+ */
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,virtex";
+ model = "testing";
+ DDR_SDRAM: memory@0 {
+ device_type = "memory";
+ reg = < 0 8000000 >;
+ } ;
+ chosen {
+ bootargs = "console=ttyS0 ip=on root=/dev/ram";
+ linux,stdout-path = "/plb@0/serial@83e00000";
+ } ;
+ cpus {
+ #address-cells = <1>;
+ #cpus = <1>;
+
+ #size-cells = <0>;
+ ppc405_0: cpu@0 {
+ clock-frequency = <11e1a300>;
+ compatible = "PowerPC,405", "ibm,ppc405";
+
+ d-cache-line-size = <20>;
+ d-cache-size = <4000>;
+ device_type = "cpu";
+ i-cache-line-size = <20>;
+
+ i-cache-size = <4000>;
+ model = "PowerPC,405";
+ reg = <0>;
+ timebase-frequency = <11e1a300>;
+ xlnx,apu-control = <de00>;
+ xlnx,apu-udi-1 = <a18983>;
+ xlnx,apu-udi-2 = <a38983>;
+ xlnx,apu-udi-3 = <a589c3>;
+ xlnx,apu-udi-4 = <a789c3>;
+ xlnx,apu-udi-5 = <a98c03>;
+ xlnx,apu-udi-6 = <ab8c03>;
+ xlnx,apu-udi-7 = <ad8c43>;
+ xlnx,apu-udi-8 = <af8c43>;
+ xlnx,deterministic-mult = <0>;
+ xlnx,disable-operand-forwarding = <1>;
+ xlnx,fastest-plb-clock = "DPLB0";
+ xlnx,generate-plb-timespecs = <1>;
+ xlnx,mmu-enable = <1>;
+ xlnx,pvr-high = <0>;
+ xlnx,pvr-low = <0>;
+ } ;
+ } ;
+ plb: plb@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,plb-v46-1.02.a";
+ ranges ;
+ IIC_EEPROM: i2c@81600000 {
+ compatible = "xlnx,xps-iic-2.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 4 2 >;
+ reg = < 81600000 10000 >;
+ xlnx,clk-freq = <5f5e100>;
+ xlnx,family = "virtex4";
+ xlnx,gpo-width = <1>;
+ xlnx,iic-freq = <186a0>;
+ xlnx,scl-inertial-delay = <0>;
+ xlnx,sda-inertial-delay = <0>;
+ xlnx,ten-bit-adr = <0>;
+ } ;
+ LEDs_4Bit: gpio@81400000 {
+ compatible = "xlnx,xps-gpio-1.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 5 2 >;
+ reg = < 81400000 10000 >;
+ xlnx,all-inputs = <0>;
+ xlnx,all-inputs-2 = <0>;
+ xlnx,dout-default = <0>;
+ xlnx,dout-default-2 = <0>;
+ xlnx,family = "virtex4";
+ xlnx,gpio-width = <4>;
+ xlnx,interrupt-present = <1>;
+ xlnx,is-bidir = <1>;
+ xlnx,is-bidir-2 = <1>;
+ xlnx,is-dual = <0>;
+ xlnx,tri-default = <ffffffff>;
+ xlnx,tri-default-2 = <ffffffff>;
+ } ;
+ RS232_Uart: serial@83e00000 {
+ compatible = "ns16550";
+ device_type = "serial";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 6 2 >;
+ reg = < 83e00000 10000 >;
+ reg-offset = <3>;
+ reg-shift = <2>;
+ clock-frequency = <05f5e100>;
+ xlnx,family = "virtex4";
+ xlnx,has-external-rclk = <0>;
+ xlnx,has-external-xin = <0>;
+ xlnx,is-a-16550 = <1>;
+ } ;
+ SysACE_CompactFlash: sysace@83600000 {
+ compatible = "xlnx,xps-sysace-1.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 3 2 >;
+ reg = < 83600000 10000 >;
+ xlnx,family = "virtex4";
+ xlnx,mem-width = <10>;
+ } ;
+ TriMode_MAC_GMII: xps-ll-temac@81c00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,compound";
+ ethernet@81c00000 {
+ compatible = "xlnx,xps-ll-temac-1.01.a";
+ device_type = "network";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 2 2 >;
+ llink-connected = <&PIM2>;
+ local-mac-address = [ 02 00 00 00 00 01 ];
+ reg = < 81c00000 40 >;
+ xlnx,bus2core-clk-ratio = <1>;
+ xlnx,phy-type = <1>;
+ xlnx,phyaddr = <1>;
+ xlnx,rxcsum = <0>;
+ xlnx,rxfifo = <1000>;
+ xlnx,temac-type = <1>;
+ xlnx,txcsum = <0>;
+ xlnx,txfifo = <1000>;
+ } ;
+ } ;
+ mpmc@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,mpmc-4.00.a";
+ PIM2: sdma@84600100 {
+ compatible = "xlnx,ll-dma-1.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 1 2 0 2 >;
+ reg = < 84600100 80 >;
+ } ;
+ } ;
+ xps_bram_if_cntlr_1: xps-bram-if-cntlr@ffffe000 {
+ compatible = "xlnx,xps-bram-if-cntlr-1.00.a";
+ reg = < ffffe000 2000 >;
+ xlnx,family = "virtex4";
+ } ;
+ xps_intc_0: interrupt-controller@81800000 {
+ #interrupt-cells = <2>;
+ compatible = "xlnx,xps-intc-1.00.a";
+ interrupt-controller ;
+ reg = < 81800000 10000 >;
+ xlnx,num-intr-inputs = <7>;
+ } ;
+ } ;
+ ppc405_0_dplb1: plb@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,plb-v46-1.02.a";
+ ranges ;
+ } ;
+} ;
--- /dev/null
+/*
+ * (C) Copyright 2007-2008 Xilinx, Inc.
+ * (C) Copyright 2007 Michal Simek
+ *
+ * Michal SIMEK <monstr@monstr.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * CAUTION: This file is automatically generated by libgen.
+ * Version: Xilinx EDK 10.1.01 EDK_K_SP1.2
+ */
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,virtex";
+ dcr-parent = <&ppc440_virtex5_0>;
+ model = "testing";
+ chosen {
+ bootargs = "console=ttyS0 ip=on root=/dev/ram";
+ linux,stdout-path = "/plb@0/serial@d0000000";
+ } ;
+ cpus {
+ #address-cells = <1>;
+ #cpus = <1>;
+ #size-cells = <0>;
+ ppc440_virtex5_0: cpu@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clock-frequency = <17d78400>;
+ compatible = "PowerPC,440", "ibm,ppc440";
+ d-cache-line-size = <20>;
+ d-cache-size = <8000>;
+ dcr-access-method = "native";
+ dcr-controller ;
+ device_type = "cpu";
+ i-cache-line-size = <20>;
+ i-cache-size = <8000>;
+ model = "PowerPC,440";
+ reg = <0>;
+ timebase-frequency = <17d78400>;
+ xlnx,apu-control = <1>;
+ xlnx,apu-udi-0 = <c07701>;
+ xlnx,apu-udi-1 = <c47701>;
+ xlnx,apu-udi-10 = <0>;
+ xlnx,apu-udi-11 = <0>;
+ xlnx,apu-udi-12 = <0>;
+ xlnx,apu-udi-13 = <0>;
+ xlnx,apu-udi-14 = <0>;
+ xlnx,apu-udi-15 = <0>;
+ xlnx,apu-udi-2 = <0>;
+ xlnx,apu-udi-3 = <0>;
+ xlnx,apu-udi-4 = <0>;
+ xlnx,apu-udi-5 = <0>;
+ xlnx,apu-udi-6 = <0>;
+ xlnx,apu-udi-7 = <0>;
+ xlnx,apu-udi-8 = <0>;
+ xlnx,apu-udi-9 = <0>;
+ xlnx,dcr-autolock-enable = <1>;
+ xlnx,dcu-rd-ld-cache-plb-prio = <0>;
+ xlnx,dcu-rd-noncache-plb-prio = <0>;
+ xlnx,dcu-rd-touch-plb-prio = <0>;
+ xlnx,dcu-rd-urgent-plb-prio = <0>;
+ xlnx,dcu-wr-flush-plb-prio = <0>;
+ xlnx,dcu-wr-store-plb-prio = <0>;
+ xlnx,dcu-wr-urgent-plb-prio = <0>;
+ xlnx,dma0-control = <0>;
+ xlnx,dma0-plb-prio = <0>;
+ xlnx,dma0-rxchannelctrl = <1010000>;
+ xlnx,dma0-rxirqtimer = <3ff>;
+ xlnx,dma0-txchannelctrl = <1010000>;
+ xlnx,dma0-txirqtimer = <3ff>;
+ xlnx,dma1-control = <0>;
+ xlnx,dma1-plb-prio = <0>;
+ xlnx,dma1-rxchannelctrl = <1010000>;
+ xlnx,dma1-rxirqtimer = <3ff>;
+ xlnx,dma1-txchannelctrl = <1010000>;
+ xlnx,dma1-txirqtimer = <3ff>;
+ xlnx,dma2-control = <0>;
+ xlnx,dma2-plb-prio = <0>;
+ xlnx,dma2-rxchannelctrl = <1010000>;
+ xlnx,dma2-rxirqtimer = <3ff>;
+ xlnx,dma2-txchannelctrl = <1010000>;
+ xlnx,dma2-txirqtimer = <3ff>;
+ xlnx,dma3-control = <0>;
+ xlnx,dma3-plb-prio = <0>;
+ xlnx,dma3-rxchannelctrl = <1010000>;
+ xlnx,dma3-rxirqtimer = <3ff>;
+ xlnx,dma3-txchannelctrl = <1010000>;
+ xlnx,dma3-txirqtimer = <3ff>;
+ xlnx,endian-reset = <0>;
+ xlnx,generate-plb-timespecs = <1>;
+ xlnx,icu-rd-fetch-plb-prio = <0>;
+ xlnx,icu-rd-spec-plb-prio = <0>;
+ xlnx,icu-rd-touch-plb-prio = <0>;
+ xlnx,interconnect-imask = <ffffffff>;
+ xlnx,mplb-allow-lock-xfer = <1>;
+ xlnx,mplb-arb-mode = <0>;
+ xlnx,mplb-awidth = <20>;
+ xlnx,mplb-counter = <500>;
+ xlnx,mplb-dwidth = <80>;
+ xlnx,mplb-max-burst = <8>;
+ xlnx,mplb-native-dwidth = <80>;
+ xlnx,mplb-p2p = <0>;
+ xlnx,mplb-prio-dcur = <2>;
+ xlnx,mplb-prio-dcuw = <3>;
+ xlnx,mplb-prio-icu = <4>;
+ xlnx,mplb-prio-splb0 = <1>;
+ xlnx,mplb-prio-splb1 = <0>;
+ xlnx,mplb-read-pipe-enable = <1>;
+ xlnx,mplb-sync-tattribute = <0>;
+ xlnx,mplb-wdog-enable = <1>;
+ xlnx,mplb-write-pipe-enable = <1>;
+ xlnx,mplb-write-post-enable = <1>;
+ xlnx,num-dma = <1>;
+ xlnx,pir = <f>;
+ xlnx,ppc440mc-addr-base = <0>;
+ xlnx,ppc440mc-addr-high = <1fffffff>;
+ xlnx,ppc440mc-arb-mode = <0>;
+ xlnx,ppc440mc-bank-conflict-mask = <c00000>;
+ xlnx,ppc440mc-control = <f810008f>;
+ xlnx,ppc440mc-max-burst = <8>;
+ xlnx,ppc440mc-prio-dcur = <2>;
+ xlnx,ppc440mc-prio-dcuw = <3>;
+ xlnx,ppc440mc-prio-icu = <4>;
+ xlnx,ppc440mc-prio-splb0 = <1>;
+ xlnx,ppc440mc-prio-splb1 = <0>;
+ xlnx,ppc440mc-row-conflict-mask = <3ffe00>;
+ xlnx,ppcdm-asyncmode = <0>;
+ xlnx,ppcds-asyncmode = <0>;
+ xlnx,user-reset = <0>;
+ DMA0: sdma@80 {
+ compatible = "xlnx,ll-dma-1.00.a";
+ dcr-reg = < 80 11 >;
+ interrupt-parent = <&opb_intc_0>;
+ interrupts = < 5 2 6 2 >;
+ } ;
+ } ;
+ } ;
+ plb_v46_cfb_0: plb@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,plb-v46-1.02.a";
+ ranges ;
+ iic_bus: i2c@d0020000 {
+ compatible = "xlnx,xps-iic-2.00.a";
+ interrupt-parent = <&opb_intc_0>;
+ interrupts = < 7 2 >;
+ reg = < d0020000 200 >;
+ xlnx,clk-freq = <5f5e100>;
+ xlnx,family = "virtex5";
+ xlnx,gpo-width = <1>;
+ xlnx,iic-freq = <186a0>;
+ xlnx,scl-inertial-delay = <0>;
+ xlnx,sda-inertial-delay = <0>;
+ xlnx,ten-bit-adr = <0>;
+ } ;
+ leds_8bit: gpio@d0010200 {
+ compatible = "xlnx,xps-gpio-1.00.a";
+ interrupt-parent = <&opb_intc_0>;
+ interrupts = < 1 2 >;
+ reg = < d0010200 200 >;
+ xlnx,all-inputs = <0>;
+ xlnx,all-inputs-2 = <0>;
+ xlnx,dout-default = <0>;
+ xlnx,dout-default-2 = <0>;
+ xlnx,family = "virtex5";
+ xlnx,gpio-width = <8>;
+ xlnx,interrupt-present = <1>;
+ xlnx,is-bidir = <1>;
+ xlnx,is-bidir-2 = <1>;
+ xlnx,is-dual = <0>;
+ xlnx,tri-default = <ffffffff>;
+ xlnx,tri-default-2 = <ffffffff>;
+ } ;
+ ll_temac_0: xps-ll-temac@91200000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,compound";
+ ethernet@91200000 {
+ compatible = "xlnx,xps-ll-temac-1.01.a";
+ device_type = "network";
+ interrupt-parent = <&opb_intc_0>;
+ interrupts = < 4 2 >;
+ llink-connected = <&DMA0>;
+ local-mac-address = [ 02 00 00 00 00 00 ];
+ reg = < 91200000 40 >;
+ xlnx,bus2core-clk-ratio = <1>;
+ xlnx,phy-type = <1>;
+ xlnx,phyaddr = <1>;
+ xlnx,rxcsum = <0>;
+ xlnx,rxfifo = <4000>;
+ xlnx,temac-type = <0>;
+ xlnx,txcsum = <0>;
+ xlnx,txfifo = <4000>;
+ } ;
+ } ;
+ opb_intc_0: interrupt-controller@d0020200 {
+ #interrupt-cells = <2>;
+ compatible = "xlnx,xps-intc-1.00.a";
+ interrupt-controller ;
+ reg = < d0020200 20 >;
+ xlnx,num-intr-inputs = <8>;
+ } ;
+ plb_bram_if_cntlr_0: xps-bram-if-cntlr@ffff0000 {
+ compatible = "xlnx,xps-bram-if-cntlr-1.00.a";
+ reg = < ffff0000 10000 >;
+ xlnx,family = "virtex5";
+ } ;
+ plb_bram_if_cntlr_1: xps-bram-if-cntlr@eee00000 {
+ compatible = "xlnx,xps-bram-if-cntlr-1.00.a";
+ reg = < eee00000 2000 >;
+ xlnx,family = "virtex5";
+ } ;
+ rs232_uart_0: serial@d0000000 {
+ clock-frequency = <1312d00>;
+ compatible = "xlnx,xps-uart16550-2.00.a", "ns16550";
+ current-speed = <2580>;
+ device_type = "serial";
+ interrupt-parent = <&opb_intc_0>;
+ interrupts = < 0 2 >;
+ reg = < d0000000 2000 >;
+ reg-offset = <3>;
+ reg-shift = <2>;
+ xlnx,family = "virtex5";
+ xlnx,has-external-rclk = <0>;
+ xlnx,has-external-xin = <1>;
+ xlnx,is-a-16550 = <1>;
+ } ;
+ sysace_compactflash: sysace@d0030100 {
+ compatible = "xlnx,xps-sysace-1.00.a";
+ reg = < d0030100 80 >;
+ xlnx,family = "virtex5";
+ xlnx,mem-width = <10>;
+ } ;
+ } ;
+ ppc440mc_ddr2_0: memory@0 {
+ device_type = "memory";
+ reg = < 0 20000000 >;
+ } ;
+} ;
asm volatile("" : : : "memory");
}
+static inline void disable_irq(void)
+{
+ int dummy;
+ asm volatile("mfmsr %0; rlwinm %0, %0, 0, ~(1<<15); mtmsr %0" :
+ "=r" (dummy) : : "memory");
+}
+
#endif /* _IO_H */
--- /dev/null
+/*
+ * The "raw" platform -- for booting from a complete dtb without
+ * any fixups.
+ *
+ * Author: Scott Wood <scottwood@freescale.com>
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include "ops.h"
+#include "types.h"
+#include "io.h"
+
+BSS_STACK(4096);
+
+/* These are labels in the device tree. */
+extern u32 memsize[2], timebase, mem_size_cells;
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ u64 memsize64 = memsize[0];
+ static const unsigned long line_size = 32;
+ static const unsigned long congruence_classes = 256;
+ unsigned long addr;
+ unsigned long dccr;
+
+ /*
+ * Invalidate the data cache if the data cache is turned off.
+ * - The 405 core does not invalidate the data cache on power-up
+ * or reset but does turn off the data cache. We cannot assume
+ * that the cache contents are valid.
+ * - If the data cache is turned on this must have been done by
+ * a bootloader and we assume that the cache contents are
+ * valid.
+ */
+ __asm__("mfdccr %0": "=r" (dccr));
+ if (dccr == 0) {
+ for (addr = 0;
+ addr < (congruence_classes * line_size);
+ addr += line_size) {
+ __asm__("dccci 0,%0": :"b"(addr));
+ }
+ }
+
+ if (mem_size_cells == 2) {
+ memsize64 <<= 32;
+ memsize64 |= memsize[1];
+ }
+
+ if (sizeof(void *) == 4 && memsize64 >= 0x100000000ULL)
+ memsize64 = 0xffffffff;
+
+ disable_irq();
+ timebase_period_ns = 1000000000 / timebase;
+ simple_alloc_init(_end, memsize64 - (unsigned long)_end, 32, 64);
+ ft_init(_dtb_start, _dtb_end - _dtb_start, 32);
+ serial_console_init();
+}
--- /dev/null
+/*
+ * Old U-boot compatibility for Walnut
+ *
+ * Author: Josh Boyer <jwboyer@linux.vnet.ibm.com>
+ *
+ * Copyright 2007 IBM Corporation
+ * Based on cuboot-83xx.c, which is:
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <stddef.h>
+#include <stdio.h>
+#include "ops.h"
+#include "dcr.h"
+#include "4xx.h"
+#include "io.h"
+#include "reg.h"
+
+BSS_STACK(4096);
+
+#include "types.h"
+#include "gunzip_util.h"
+#include <libfdt.h>
+#include "../../../include/linux/autoconf.h"
+
+#define UART_DLL 0 /* Out: Divisor Latch Low */
+#define UART_DLM 1 /* Out: Divisor Latch High */
+#define UART_FCR 2 /* Out: FIFO Control Register */
+#define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */
+#define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */
+#define UART_LCR 3 /* Out: Line Control Register */
+#define UART_MCR 4 /* Out: Modem Control Register */
+#define UART_MCR_RTS 0x02 /* RTS complement */
+#define UART_MCR_DTR 0x01 /* DTR complement */
+#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
+#define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */
+
+/* This function is only needed when there is no boot loader to
+ initialize the UART
+*/
+static int virtex_ns16550_console_init(void *devp)
+{
+ int n;
+ unsigned long reg_phys;
+ unsigned char *regbase;
+ u32 regshift, clk, spd;
+ u16 divisor;
+
+ n = getprop(devp, "virtual-reg", ®base, sizeof(regbase));
+ if (n != sizeof(regbase)) {
+ if (!dt_xlate_reg(devp, 0, ®_phys, NULL))
+ return -1;
+
+ regbase = (void *)reg_phys + 3;
+ }
+ regshift = 2;
+
+ n = getprop(devp, "current-speed", (void *)&spd, sizeof(spd));
+ if (n != sizeof(spd))
+ spd = 9600;
+
+ /* should there be a default clock rate?*/
+ n = getprop(devp, "clock-frequency", (void *)&clk, sizeof(clk));
+ if (n != sizeof(clk))
+ return -1;
+
+ divisor = clk / (16 * spd);
+
+ /* Access baud rate */
+ out_8(regbase + (UART_LCR << regshift), UART_LCR_DLAB);
+
+ /* Baud rate based on input clock */
+ out_8(regbase + (UART_DLL << regshift), divisor & 0xFF);
+ out_8(regbase + (UART_DLM << regshift), divisor >> 8);
+
+ /* 8 data, 1 stop, no parity */
+ out_8(regbase + (UART_LCR << regshift), UART_LCR_WLEN8);
+
+ /* RTS/DTR */
+ out_8(regbase + (UART_MCR << regshift), UART_MCR_RTS | UART_MCR_DTR);
+
+ /* Clear transmitter and receiver */
+ out_8(regbase + (UART_FCR << regshift),
+ UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
+ return 0;
+}
+
+/* For virtex, the kernel may be loaded without using a bootloader and if so
+ some UARTs need more setup than is provided in the normal console init
+*/
+static int virtex_serial_console_init(void)
+{
+ void *devp;
+ char devtype[MAX_PROP_LEN];
+ char path[MAX_PATH_LEN];
+
+ devp = finddevice("/chosen");
+ if (devp == NULL)
+ return -1;
+
+ if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) {
+ devp = finddevice(path);
+ if (devp == NULL)
+ return -1;
+
+ if ((getprop(devp, "device_type", devtype, sizeof(devtype)) > 0)
+ && !strcmp(devtype, "serial")
+ && (dt_is_compatible(devp, "ns16550")))
+ virtex_ns16550_console_init(devp);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_COMPRESSED_DEVICE_TREE
+static struct gunzip_state gzstate;
+#endif
+
+void platform_init(void)
+{
+ u32 memreg[4];
+ u64 start;
+ u64 size = 0x2000000;
+ int naddr, nsize, i;
+ void *root, *memory;
+ static const unsigned long line_size = 32;
+ static const unsigned long congruence_classes = 256;
+ unsigned long addr;
+ unsigned long dccr;
+
+#ifdef CONFIG_COMPRESSED_DEVICE_TREE
+ void *dtbz_start;
+ u32 dtbz_size;
+ void *dtb_addr;
+ u32 dtb_size;
+ struct fdt_header dtb_header;
+ int len;
+#endif
+
+ if((mfpvr() & 0xfffff000) == 0x20011000) {
+ /* PPC errata 213: only for Virtex-4 FX */
+ __asm__("mfccr0 0\n\t"
+ "oris 0,0,0x50000000@h\n\t"
+ "mtccr0 0"
+ : : : "0");
+ }
+
+ /*
+ * Invalidate the data cache if the data cache is turned off.
+ * - The 405 core does not invalidate the data cache on power-up
+ * or reset but does turn off the data cache. We cannot assume
+ * that the cache contents are valid.
+ * - If the data cache is turned on this must have been done by
+ * a bootloader and we assume that the cache contents are
+ * valid.
+ */
+ __asm__("mfdccr %0": "=r" (dccr));
+ if (dccr == 0) {
+ for (addr = 0;
+ addr < (congruence_classes * line_size);
+ addr += line_size) {
+ __asm__("dccci 0,%0": :"b"(addr));
+ }
+ }
+
+#if defined(CONFIG_XILINX_VIRTEX_5_FXT) && defined(CONFIG_MATH_EMULATION)
+ /* Make sure the APU is disabled when using soft FPU emulation */
+ mtdcr(5, 0);
+#endif
+
+ disable_irq();
+
+#ifdef CONFIG_COMPRESSED_DEVICE_TREE
+
+ /** FIXME: flatdevicetrees need the initializer allocated,
+ libfdt will fix this. */
+ dtbz_start = (void *)CONFIG_COMPRESSED_DTB_START;
+ dtbz_size = CONFIG_COMPRESSED_DTB_SIZE;
+ /** get the device tree */
+ gunzip_start(&gzstate, dtbz_start, dtbz_size);
+ gunzip_exactly(&gzstate, &dtb_header, sizeof(dtb_header));
+
+ dtb_size = dtb_header.totalsize;
+ // printf("Allocating 0x%lx bytes for dtb ...\n\r", dtb_size);
+
+ dtb_addr = _end; // Should be allocated?
+
+ gunzip_start(&gzstate, dtbz_start, dtbz_size);
+ len = gunzip_finish(&gzstate, dtb_addr, dtb_size);
+ if (len != dtb_size)
+ fatal("ran out of data! only got 0x%x of 0x%lx bytes.\n\r",
+ len, dtb_size);
+ printf("done 0x%x bytes\n\r", len);
+ simple_alloc_init(0x800000, size - (unsigned long)0x800000, 32, 64);
+ fdt_init(dtb_addr);
+#else
+ /** FIXME: flatdevicetrees need the initializer allocated,
+ libfdt will fix this. */
+ simple_alloc_init(_end, size - (unsigned long)_end, 32, 64);
+ fdt_init(_dtb_start);
+#endif
+
+ root = finddevice("/");
+ if (getprop(root, "#address-cells", &naddr, sizeof(naddr)) < 0)
+ naddr = 2;
+ if (naddr < 1 || naddr > 2)
+ fatal("Can't cope with #address-cells == %d in /\n\r", naddr);
+
+ if (getprop(root, "#size-cells", &nsize, sizeof(nsize)) < 0)
+ nsize = 1;
+ if (nsize < 1 || nsize > 2)
+ fatal("Can't cope with #size-cells == %d in /\n\r", nsize);
+
+ memory = finddevice("/memory@0");
+ if (! memory) {
+ fatal("Need a memory@0 node!\n\r");
+ }
+ if (getprop(memory, "reg", memreg, sizeof(memreg)) < 0)
+ fatal("Need a memory@0 node!\n\r");
+
+ i = 0;
+ start = memreg[i++];
+ if(naddr == 2) {
+ start = (start << 32) | memreg[i++];
+ }
+ size = memreg[i++];
+ if (nsize == 2)
+ size = (size << 32) | memreg[i++];
+
+ // timebase_period_ns = 1000000000 / timebase;
+ virtex_serial_console_init();
+ serial_console_init();
+ if (console_ops.open)
+ console_ops.open();
+
+#ifdef CONFIG_COMPRESSED_DEVICE_TREE
+ printf("Using compressed device tree at 0x%x\n\r", CONFIG_COMPRESSED_DTB_START);
+#else
+#endif
+ printf("booting virtex\n\r");
+ printf("memstart=0x%llx\n\r", start);
+ printf("memsize=0x%llx\n\r", size);
+}
if [ "$platform" != "miboot" ]; then
${CROSS}ld -m elf32ppc -T $lds -o "$ofile" \
- $platformo $tmp $object/wrapper.a
+ $platformo $tmp $dto $object/wrapper.a
rm $tmp
fi
--- /dev/null
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.25-rc9
+# Mon Apr 14 06:46:08 2008
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+# CONFIG_6xx is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+CONFIG_40x=y
+# CONFIG_44x is not set
+# CONFIG_E200 is not set
+CONFIG_4xx=y
+# CONFIG_PPC_MM_SLICES is not set
+CONFIG_NOT_COHERENT_CACHE=y
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+CONFIG_PPC_UDBG_16550=y
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+# CONFIG_DEFAULT_UIMAGE is not set
+CONFIG_PPC_DCR_NATIVE=y
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_PPC_DCR=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+# CONFIG_EMBEDDED is not set
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_CLASSIC_RCU=y
+
+#
+# Platform support
+#
+# CONFIG_PPC_MPC512x is not set
+# CONFIG_PPC_MPC5121 is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+# CONFIG_EP405 is not set
+# CONFIG_KILAUEA is not set
+# CONFIG_MAKALU is not set
+# CONFIG_WALNUT is not set
+CONFIG_XILINX_VIRTEX_GENERIC_BOARD=y
+CONFIG_XILINX_VIRTEX_II_PRO=y
+CONFIG_XILINX_VIRTEX_4_FX=y
+CONFIG_IBM405_ERR77=y
+CONFIG_IBM405_ERR51=y
+# CONFIG_IPIC is not set
+# CONFIG_MPIC is not set
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_FSL_ULI1575 is not set
+CONFIG_XILINX_VIRTEX=y
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_TICK_ONESHOT is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+# CONFIG_PREEMPT_RCU is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_MATH_EMULATION=y
+# CONFIG_IOMMU_HELPER is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+# CONFIG_PM is not set
+CONFIG_SECCOMP=y
+CONFIG_WANT_DEVICE_TREE=y
+# CONFIG_BUILD_RAW_IMAGE is not set
+CONFIG_DEVICE_TREE="ml405.dts"
+# CONFIG_COMPRESSED_DEVICE_TREE is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0xc0000000
+CONFIG_CONSISTENT_START=0xff100000
+CONFIG_CONSISTENT_SIZE=0x00200000
+CONFIG_BOOT_LOAD=0x00400000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NF_CONNTRACK is not set
+CONFIG_NETFILTER_XTABLES=m
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+CONFIG_IP_NF_FILTER=m
+# CONFIG_IP_NF_TARGET_REJECT is not set
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_IP_NF_MANGLE=m
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+# CONFIG_IP6_NF_QUEUE is not set
+# CONFIG_IP6_NF_IPTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+CONFIG_OF_DEVICE=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_XILINX_SYSACE is not set
+# CONFIG_XILINX_SYSACE_OLD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_XILINX_DRIVERS=y
+CONFIG_NEED_XILINX_LLDMA=y
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_IBM_NEW_EMAC is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_B44 is not set
+# CONFIG_XILINX_EMAC is not set
+# CONFIG_XILINX_EMACLITE is not set
+CONFIG_NETDEV_1000=y
+# CONFIG_E1000E_ENABLED is not set
+# CONFIG_XILINX_TEMAC is not set
+CONFIG_XILINX_LLTEMAC=y
+# CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_RGMII is not set
+CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_GMII=y
+# CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_MII is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_XILINXPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=m
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_XILINX_GPIO is not set
+CONFIG_XILINX_HWICAP=m
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+CONFIG_XILINX_IIC=y
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_MPC is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_SYS_FOPS is not set
+CONFIG_FB_DEFERRED_IO=y
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_OF is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_IBM_GXT4500 is not set
+CONFIG_FB_XILINX=y
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+CONFIG_XILINX_EDK=y
+# CONFIG_XILINX_LLDMA_USE_DCR is not set
+
+#
+# Userspace I/O
+#
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+CONFIG_ROMFS_FS=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+# CONFIG_NFSD_TCP is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=y
+# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_SAMPLES is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_SEQIV is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_XTS is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_TEST is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_PPC_CLOCK is not set
--- /dev/null
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.24-rc8-xlnx
+# Wed Apr 9 07:12:04 2008
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+# CONFIG_6xx is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+CONFIG_44x=y
+# CONFIG_E200 is not set
+CONFIG_4xx=y
+CONFIG_BOOKE=y
+CONFIG_PTE_64BIT=y
+CONFIG_PHYS_64BIT=y
+# CONFIG_PPC_MM_SLICES is not set
+CONFIG_NOT_COHERENT_CACHE=y
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+CONFIG_PPC_UDBG_16550=y
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+# CONFIG_DEFAULT_UIMAGE is not set
+CONFIG_PPC_DCR_NATIVE=y
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_PPC_DCR=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_FAIR_USER_SCHED=y
+# CONFIG_FAIR_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+# CONFIG_EMBEDDED is not set
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+
+#
+# Platform support
+#
+# CONFIG_PPC_MPC52xx is not set
+# CONFIG_PPC_MPC5200 is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+# CONFIG_BAMBOO is not set
+# CONFIG_EBONY is not set
+# CONFIG_SEQUOIA is not set
+CONFIG_XILINX_ML507=y
+# CONFIG_XILINX_DISABLE_44x_CACHE is not set
+CONFIG_XILINX_ML5XX=y
+CONFIG_XILINX_VIRTEX_5_FXT=y
+# CONFIG_MPIC is not set
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_CPM2 is not set
+# CONFIG_FSL_ULI1575 is not set
+CONFIG_XILINX_DRIVERS=y
+CONFIG_XILINX_VIRTEX=y
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_TICK_ONESHOT is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_BKL=y
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_MATH_EMULATION=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_RESOURCES_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+CONFIG_SECCOMP=y
+CONFIG_WANT_DEVICE_TREE=y
+# CONFIG_BUILD_RAW_IMAGE is not set
+CONFIG_DEVICE_TREE="ml507.dts"
+# CONFIG_COMPRESSED_DEVICE_TREE is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_SYSCALL=y
+# CONFIG_PCIEPORTBUS is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
+# CONFIG_PCI_MSI is not set
+CONFIG_PCI_LEGACY=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0xc0000000
+CONFIG_CONSISTENT_START=0xff100000
+CONFIG_CONSISTENT_SIZE=0x00200000
+CONFIG_BOOT_LOAD=0x01000000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=m
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK is not set
+# CONFIG_NF_CONNTRACK_ENABLED is not set
+# CONFIG_NF_CONNTRACK is not set
+CONFIG_NETFILTER_XTABLES=m
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+# CONFIG_IP_NF_MATCH_IPRANGE is not set
+# CONFIG_IP_NF_MATCH_TOS is not set
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_OWNER is not set
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+CONFIG_IP_NF_FILTER=m
+# CONFIG_IP_NF_TARGET_REJECT is not set
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_IP_NF_MANGLE=m
+# CONFIG_IP_NF_TARGET_TOS is not set
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP6_NF_QUEUE is not set
+# CONFIG_IP6_NF_IPTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+CONFIG_OF_DEVICE=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_XILINX_SYSACE is not set
+# CONFIG_XILINX_SYSACE_OLD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_PHANTOM is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+CONFIG_NEED_XILINX_LLDMA=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_ARCNET is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_NET_PCI is not set
+# CONFIG_B44 is not set
+# CONFIG_XILINX_EMAC is not set
+# CONFIG_XILINX_EMACLITE is not set
+CONFIG_NETDEV_1000=y
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_E1000E is not set
+# CONFIG_IP1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_QLA3XXX is not set
+# CONFIG_ATL1 is not set
+# CONFIG_XILINX_TEMAC is not set
+CONFIG_XILINX_LLTEMAC=y
+# CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_RGMII is not set
+CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_GMII=y
+# CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_MII is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_TR is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_XILINXPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=m
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_XILINX_GPIO is not set
+# CONFIG_XILINX_HWICAP is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+CONFIG_XILINX_IIC=y
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_MPC is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+CONFIG_I2C_DEBUG_CORE=y
+CONFIG_I2C_DEBUG_ALGO=y
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_SYS_FOPS is not set
+CONFIG_FB_DEFERRED_IO=y
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_OF is not set
+# CONFIG_FB_CT65550 is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_IBM_GXT4500 is not set
+CONFIG_FB_XILINX=y
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_INFINIBAND is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+CONFIG_XILINX_EDK=y
+CONFIG_XILINX_LLDMA_USE_DCR=y
+
+#
+# Userspace I/O
+#
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+CONFIG_ROMFS_FS=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+# CONFIG_NFSD_TCP is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=y
+# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+# CONFIG_UCC_SLOW is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_INSTRUMENTATION=y
+# CONFIG_PROFILING is not set
+# CONFIG_KPROBES is not set
+# CONFIG_MARKERS is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_SAMPLES is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+# CONFIG_PPC_CLOCK is not set
.machine_check = machine_check_440A,
.platform = "ppc440",
},
+ { /* 440 in Xilinx Virtex-5 FXT */
+ .pvr_mask = 0xfffffff0,
+ .pvr_value = 0x7ff21910,
+ .cpu_name = "440 in Virtex-5 FXT",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .platform = "ppc440",
+ },
{ /* 460EX */
.pvr_mask = 0xffff0002,
.pvr_value = 0x13020002,
andc r11,r11,r0
MTMSRD(r11)
isync
+#if defined(CONFIG_XILINX_FPU_LOAD_CORRUPTION_WORKAROUND) || defined(CONFIG_XILINX_FPU_ITLB_EXCEPTION_WORKAROUND)
+ mfspr r5,SPRN_CCR0
+#ifdef CONFIG_XILINX_FPU_LOAD_CORRUPTION_WORKAROUND
+ andis. r5,r5, ~(1<<6)@l
+#endif
+#ifdef CONFIG_XILINX_FPU_ITLB_EXCEPTION_WORKAROUND
+ andi. r5,r5, ~(1<<5)@l
+#endif
+ mtspr SPRN_CCR0,r5
+ isync
+#endif
1: stw r11,_MSR(r1)
mfcr r10
stw r10,_CCR(r1)
* enable the FPU for the current task and return to the task.
*/
_GLOBAL(load_up_fpu)
+#ifdef CONFIG_XILINX_ERRONEOUS_EXCEPTIONS_WORKAROUND
+ li r3,0
+ lis r5,excep_state@h
+ ori r5,r5,excep_state@l
+ stw r3,0(r5)
+#endif /* CONFIG_XILINX_ERRONEOUS_EXCEPTIONS_WORKAROUND */
+
+#if defined(CONFIG_XILINX_FPU_LOAD_CORRUPTION_WORKAROUND) || defined(CONFIG_XILINX_FPU_ITLB_EXCEPTION_WORKAROUND)
+ mfspr r5,SPRN_CCR0
+#ifdef CONFIG_XILINX_FPU_LOAD_CORRUPTION_WORKAROUND
+ /* set CCR0[9] to disable the load miss queue inside the ppc440 */
+ oris r5,r5, (1<<6)
+#endif
+#ifdef CONFIG_XILINX_FPU_ITLB_EXCEPTION_WORKAROUND
+ /* set CCR0[26] to ... */
+ ori r5,r5, (1<<5)
+#endif
+ mtspr SPRN_CCR0,r5
+ isync
+#endif
+
mfmsr r5
ori r5,r5,MSR_FP
SYNC
* Enables the FPU for use in the kernel on return.
*/
_GLOBAL(giveup_fpu)
+#if defined(CONFIG_XILINX_FPU_LOAD_CORRUPTION_WORKAROUND) || defined(CONFIG_XILINX_FPU_ITLB_EXCEPTION_WORKAROUND)
+ mfspr r5,SPRN_CCR0
+#ifdef CONFIG_XILINX_FPU_LOAD_CORRUPTION_WORKAROUND
+ /* set CCR0[9] to disable the load miss queue inside the ppc440 */
+ oris r5,r5, (1<<6)
+#endif
+#ifdef CONFIG_XILINX_FPU_ITLB_EXCEPTION_WORKAROUND
+ /* set CCR0[26] to ... */
+ ori r5,r5, (1<<5)
+#endif
+ mtspr SPRN_CCR0,r5
+ isync
+#endif
+
mfmsr r5
ori r5,r5,MSR_FP
SYNC_601
mtspr SPRN_MMUCR,r5
sync
+ /* Initialize the DVLIM and IVLIM */
+ lis r5,0x0001F800@h
+ ori r5,r5,0x0001F800@l
+
+ mtspr 0x398,r5
+ mtspr 0x399,r5
+
+ li r5,0
+ /* Initialize the DNV0-3 and DTV0-3 */
+ mtspr 0x390,r5
+ mtspr 0x391,r5
+ mtspr 0x392,r5
+ mtspr 0x393,r5
+ mtspr 0x394,r5
+ mtspr 0x395,r5
+ mtspr 0x396,r5
+ mtspr 0x397,r5
+
+ /* Initialize the INV0-3 and ITV0-3 */
+ mtspr 0x370,r5
+ mtspr 0x371,r5
+ mtspr 0x372,r5
+ mtspr 0x373,r5
+ mtspr 0x374,r5
+ mtspr 0x375,r5
+ mtspr 0x376,r5
+ mtspr 0x377,r5
+
/* pageid fields */
clrrwi r3,r3,10 /* Mask off the effective page number */
ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
/* attrib fields */
/* Added guarded bit to protect against speculative loads/stores */
li r5,0
+#ifdef CONFIG_XILINX_DISABLE_44x_CACHE
+ ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G | PPC44x_TLB_I)
+#else
ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
+#endif
li r0,63 /* TLB slot 63 */
tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
/* Force context change */
+#ifdef CONFIG_XILINX_ML5XX
+ /* We can not use the content of the MSR register when we are using XMD
+ * to connect to a ml5xx board as XMD changes the contents of the MSR
+ * register. We load the default value instead.
+ */
+ lis r0,MSR_KERNEL@h
+ ori r0,r0,MSR_KERNEL@l
+#else
mfmsr r0
+#endif
mtspr SPRN_SRR1, r0
lis r0,3f@h
ori r0,r0,3f@l
/* attrib fields */
li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
- li r0,62 /* TLB slot 0 */
+ li r0,62 /* TLB slot 62 */
tlbwe r3,r0,PPC44x_TLB_PAGEID
tlbwe r4,r0,PPC44x_TLB_XLAT
/* find the TLB index that caused the fault. It has to be here. */
tlbsx r10, 0, r10
+#ifdef CONFIG_XILINX_DISABLE_44x_CACHE
+ ori r11, r11, PPC44x_TLB_I
+#endif
tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */
/* Done...restore registers and get out of here.
*/
rlwinm r12, r12, 0, 20, 10
+#ifdef CONFIG_XILINX_DISABLE_44x_CACHE
+ ori r12, r12, PPC44x_TLB_I
+#endif
tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
/* Done...restore registers and get out of here.
EXPORT_SYMBOL(__debugger_fault_handler);
#endif
+#ifdef CONFIG_XILINX_ERRONEOUS_EXCEPTIONS_WORKAROUND
+u8 excep_state = 0;
+#endif
+
/*
* Trap & Exception support
*/
/* fall through on any other errors */
#endif /* CONFIG_MATH_EMULATION */
+#ifdef CONFIG_XILINX_ERRONEOUS_EXCEPTIONS_WORKAROUND
+ if (reason & REASON_ILLEGAL) {
+ if (excep_state < 1) {
+ excep_state++;
+ return;
+ }
+ /* should never get here */
+ BUG();
+ }
+#endif /* CONFIG_XILINX_ERRONEOUS_EXCEPTIONS_WORKAROUND */
+
/* Try to emulate it if we should. */
if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
switch (emulate_instruction(regs)) {
--- /dev/null
+#ifndef __POWERPC_PLATFORMS_40X_40X_H
+#define __POWERPC_PLATFORMS_40X_40X_H
+
+extern void ppc40x_reset_system(char *cmd);
+
+#endif /* __POWERPC_PLATFORMS_44X_44X_H */
default n
select XILINX_VIRTEX_II_PRO
select XILINX_VIRTEX_4_FX
+ select WANT_DEVICE_TREE
help
This option enables generic support for Xilinx Virtex based boards.
config 405GPR
bool
-config XILINX_VIRTEX
- bool
-
config XILINX_VIRTEX_II_PRO
bool
select XILINX_VIRTEX
select IBM405_ERR77
select IBM405_ERR51
+ select WANT_DEVICE_TREE
config XILINX_VIRTEX_4_FX
bool
select XILINX_VIRTEX
+ select WANT_DEVICE_TREE
config STB03xxx
bool
obj-$(CONFIG_KILAUEA) += kilauea.o
obj-$(CONFIG_MAKALU) += makalu.o
obj-$(CONFIG_WALNUT) += walnut.o
-obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o
+obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o misc_40x.o
obj-$(CONFIG_EP405) += ep405.o
+
--- /dev/null
+/*
+ * This file contains miscellaneous low-level functions for PPC 44x.
+ * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/reg.h>
+#include <asm/ppc_asm.h>
+
+ .text
+
+/*
+ * void ppc40x_reset_system(char *cmd)
+ *
+ * At present, this routine just applies a system reset.
+ */
+_GLOBAL(ppc40x_reset_system)
+ mfspr r13,SPRN_DBCR0
+ oris r13,r13,DBCR0_RST_SYSTEM@h
+ mtspr SPRN_DBCR0,r13
+ blr
#include <asm/ppc4xx.h>
static struct of_device_id xilinx_of_bus_ids[] __initdata = {
+ { .compatible = "simple-bus", },
{ .compatible = "xlnx,plb-v46-1.00.a", },
+ { .compatible = "xlnx,plb-v46-1.02.a", },
{ .compatible = "xlnx,plb-v34-1.01.a", },
{ .compatible = "xlnx,plb-v34-1.02.a", },
{ .compatible = "xlnx,opb-v20-1.10.c", },
# help
# This option enables support for the IBM PPC440GX evaluation board.
+config XILINX_ML507
+ bool "Xilinx ML507 Reference System"
+ depends on 44x
+ default n
+ select XILINX_ML5XX
+ select WANT_DEVICE_TREE
+ help
+ This option enables support for the Xilinx ML507 board.
+
+config XILINX_DISABLE_44x_CACHE
+ bool "Disable PPC440 caches"
+ depends on XILINX_ML507
+ help
+ This option allows to disable the caches on the PPC440. Some early
+ PPC440 soft-cores do not work with caches enabled. Also, some early
+ ML507 boards do have a non-functioning cache. If you have any
+ problems running the ML5, try using this option.
+
+config PPC_FPU
+ depends on XILINX_VIRTEX_5_FXT
+ bool "Enable Xilinx Soft FPU"
+ help
+ This option enables the Xilinx Soft FPU attached to the APU
+ interface of the PPC440 (requires DP_FULL FPU pcore).
+
+config XILINX_ERRONEOUS_EXCEPTIONS_WORKAROUND
+ depends on XILINX_VIRTEX_5_FXT && PPC_FPU
+ bool "Enable Spurious Program Exceptions Workaround"
+ default y
+ help
+ This option enables a workaround for a bug in the APU controller of
+ the PPC440 processor block in Virtex-5 FXT. See the answer record
+ at http://www.xilinx.com/support/answers/30579.htm for more details.
+
+config XILINX_FPU_LOAD_CORRUPTION_WORKAROUND
+ depends on XILINX_VIRTEX_5_FXT && PPC_FPU
+ bool "Enable Xilinx FPU prefetch workaround"
+ default y
+ help
+ This option enables a workaround for a bug in the APU controller of
+ the PPC440 processor block in Virtex-5 FXT. See the answer record at
+ http://www.xilinx.com/support/answers/30529.htm for more details.
+
+config XILINX_FPU_ITLB_EXCEPTION_WORKAROUND
+ depends on XILINX_VIRTEX_5_FXT && PPC_FPU
+ bool "Enable Xilinx FPU TLB instruction miss workaround"
+ default y
+ help
+ This option enables a workaround for a bug in the APU controller of
+ the PPC440 processor block in Virtex-5 FXT. See the answer record at
+ http://www.xilinx.com/support/answers/30570.htm for more details.
+
# 44x specific CPU modules, selected based on the board above.
config 440EP
bool
# 44x errata/workaround config symbols, selected by the CPU models above
config IBM440EP_ERR42
bool
+
+
+# Xilinx specific config options.
+config XILINX_ML5XX
+ bool
+ select XILINX_VIRTEX
+
+config XILINX_VIRTEX_5_FXT
+ bool
+ depends on XILINX_ML507
+ default y
+
obj-$(CONFIG_BAMBOO) += bamboo.o
obj-$(CONFIG_YOSEMITE) += bamboo.o
obj-$(CONFIG_SEQUOIA) += sequoia.o
+obj-$(CONFIG_XILINX_ML507) += virtex.o
obj-$(CONFIG_KATMAI) += katmai.o
obj-$(CONFIG_RAINIER) += rainier.o
obj-$(CONFIG_WARP) += warp.o
--- /dev/null
+/*
+ * Xilinx Virtex 5FXT based board support
+ *
+ * Copyright 2007 Secret Lab Technologies Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+#include <asm/xilinx_intc.h>
+#include <asm/reg.h>
+#include <asm/ppc4xx.h>
+#include "44x.h"
+
+static struct of_device_id xilinx_of_bus_ids[] __initdata = {
+ { .compatible = "simple-bus", },
+ { .compatible = "xlnx,plb-v46-1.00.a", },
+ { .compatible = "xlnx,plb-v46-1.02.a", },
+ { .compatible = "xlnx,plb-v34-1.01.a", },
+ { .compatible = "xlnx,plb-v34-1.02.a", },
+ { .compatible = "xlnx,opb-v20-1.10.c", },
+ { .compatible = "xlnx,dcr-v29-1.00.a", },
+ { .compatible = "xlnx,compound", },
+ {}
+};
+
+static int __init virtex_device_probe(void)
+{
+ of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
+
+ return 0;
+}
+machine_device_initcall(virtex, virtex_device_probe);
+
+static int __init virtex_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "xlnx,virtex"))
+ return 0;
+
+ return 1;
+}
+
+define_machine(virtex) {
+ .name = "Xilinx Virtex",
+ .probe = virtex_probe,
+ .init_IRQ = xilinx_intc_init_tree,
+ .get_irq = xilinx_intc_get_irq,
+ .calibrate_decr = generic_calibrate_decr,
+ .restart = ppc4xx_reset_system,
+};
config CPM
bool
+config XILINX_VIRTEX
+ bool
+ select PPC_DCR_MMIO
+ select PPC_DCR_NATIVE
+ help
+ Support for Xilinx Virtex platforms.
+
config OF_RTC
bool
help
#include <asm/prom.h>
#include <asm/dcr.h>
+static struct device_node *find_dcr_parent(struct device_node *node)
+{
+ struct device_node *par, *tmp;
+ const u32 *p;
+
+ for (par = of_node_get(node); par;) {
+ if (of_get_property(par, "dcr-controller", NULL))
+ break;
+ p = of_get_property(par, "dcr-parent", NULL);
+ tmp = par;
+ if (p == NULL)
+ par = of_get_parent(par);
+ else
+ par = of_find_node_by_phandle(*p);
+ of_node_put(tmp);
+ }
+ return par;
+}
+
+#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO)
+
+bool dcr_map_ok_generic(dcr_host_t host)
+{
+ if (host.type == DCR_HOST_NATIVE)
+ return dcr_map_ok_native(host.host.native);
+ else if (host.type == DCR_HOST_MMIO)
+ return dcr_map_ok_mmio(host.host.mmio);
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dcr_map_ok_generic);
+
+dcr_host_t dcr_map_generic(struct device_node *dev,
+ unsigned int dcr_n,
+ unsigned int dcr_c)
+{
+ dcr_host_t host;
+ struct device_node *dp;
+ const char *prop;
+
+ host.type = DCR_HOST_INVALID;
+
+ dp = find_dcr_parent(dev);
+ if (dp == NULL)
+ return host;
+
+ prop = of_get_property(dp, "dcr-access-method", NULL);
+
+ pr_debug("dcr_map_generic(dcr-access-method = %s)\n", prop);
+
+ if (!strcmp(prop, "native")) {
+ host.type = DCR_HOST_NATIVE;
+ host.host.native = dcr_map_native(dev, dcr_n, dcr_c);
+ } else if (!strcmp(prop, "mmio")) {
+ host.type = DCR_HOST_MMIO;
+ host.host.mmio = dcr_map_mmio(dev, dcr_n, dcr_c);
+ }
+
+ of_node_put(dp);
+ return host;
+}
+EXPORT_SYMBOL_GPL(dcr_map_generic);
+
+void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c)
+{
+ if (host.type == DCR_HOST_NATIVE)
+ dcr_unmap_native(host.host.native, dcr_c);
+ else if (host.type == DCR_HOST_MMIO)
+ dcr_unmap_mmio(host.host.mmio, dcr_c);
+ else /* host.type == DCR_HOST_INVALID */
+ WARN_ON(true);
+}
+EXPORT_SYMBOL_GPL(dcr_unmap_generic);
+
+u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n)
+{
+ if (host.type == DCR_HOST_NATIVE)
+ return dcr_read_native(host.host.native, dcr_n);
+ else if (host.type == DCR_HOST_MMIO)
+ return dcr_read_mmio(host.host.mmio, dcr_n);
+ else /* host.type == DCR_HOST_INVALID */
+ WARN_ON(true);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dcr_read_generic);
+
+void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value)
+{
+ if (host.type == DCR_HOST_NATIVE)
+ dcr_write_native(host.host.native, dcr_n, value);
+ else if (host.type == DCR_HOST_MMIO)
+ dcr_write_mmio(host.host.mmio, dcr_n, value);
+ else /* host.type == DCR_HOST_INVALID */
+ WARN_ON(true);
+}
+EXPORT_SYMBOL_GPL(dcr_write_generic);
+
+#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */
+
unsigned int dcr_resource_start(struct device_node *np, unsigned int index)
{
unsigned int ds;
}
EXPORT_SYMBOL_GPL(dcr_resource_len);
-#ifndef CONFIG_PPC_DCR_NATIVE
-
-static struct device_node * find_dcr_parent(struct device_node * node)
-{
- struct device_node *par, *tmp;
- const u32 *p;
-
- for (par = of_node_get(node); par;) {
- if (of_get_property(par, "dcr-controller", NULL))
- break;
- p = of_get_property(par, "dcr-parent", NULL);
- tmp = par;
- if (p == NULL)
- par = of_get_parent(par);
- else
- par = of_find_node_by_phandle(*p);
- of_node_put(tmp);
- }
- return par;
-}
+#ifdef CONFIG_PPC_DCR_MMIO
u64 of_translate_dcr_address(struct device_node *dev,
unsigned int dcr_n,
struct device_node *dp;
const u32 *p;
unsigned int stride;
- u64 ret;
+ u64 ret = OF_BAD_ADDR;
dp = find_dcr_parent(dev);
if (dp == NULL)
if (p == NULL)
p = of_get_property(dp, "dcr-mmio-space", NULL);
if (p == NULL)
- return OF_BAD_ADDR;
+ goto done;
/* Maybe could do some better range checking here */
ret = of_translate_address(dp, p);
ret += (u64)(stride) * (u64)dcr_n;
if (out_stride)
*out_stride = stride;
+
+ done:
+ of_node_put(dp);
return ret;
}
-dcr_host_t dcr_map(struct device_node *dev, unsigned int dcr_n,
- unsigned int dcr_c)
+dcr_host_mmio_t dcr_map_mmio(struct device_node *dev,
+ unsigned int dcr_n,
+ unsigned int dcr_c)
{
- dcr_host_t ret = { .token = NULL, .stride = 0, .base = dcr_n };
+ dcr_host_mmio_t ret = { .token = NULL, .stride = 0, .base = dcr_n };
u64 addr;
pr_debug("dcr_map(%s, 0x%x, 0x%x)\n",
dev->full_name, dcr_n, dcr_c);
addr = of_translate_dcr_address(dev, dcr_n, &ret.stride);
- pr_debug("translates to addr: 0x%lx, stride: 0x%x\n",
- addr, ret.stride);
+ pr_debug("translates to addr: 0x%llx, stride: 0x%x\n",
+ (unsigned long long) addr, ret.stride);
if (addr == OF_BAD_ADDR)
return ret;
pr_debug("mapping 0x%x bytes\n", dcr_c * ret.stride);
ret.token -= dcr_n * ret.stride;
return ret;
}
-EXPORT_SYMBOL_GPL(dcr_map);
+EXPORT_SYMBOL_GPL(dcr_map_mmio);
-void dcr_unmap(dcr_host_t host, unsigned int dcr_c)
+void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c)
{
- dcr_host_t h = host;
+ dcr_host_mmio_t h = host;
if (h.token == NULL)
return;
iounmap(h.token);
h.token = NULL;
}
-EXPORT_SYMBOL_GPL(dcr_unmap);
-#else /* defined(CONFIG_PPC_DCR_NATIVE) */
+EXPORT_SYMBOL_GPL(dcr_unmap_mmio);
+
+#endif /* defined(CONFIG_PPC_DCR_MMIO) */
+
+#ifdef CONFIG_PPC_DCR_NATIVE
DEFINE_SPINLOCK(dcr_ind_lock);
-#endif /* !defined(CONFIG_PPC_DCR_NATIVE) */
+#endif /* defined(CONFIG_PPC_DCR_NATIVE) */
+
}
regs = ioremap(res.start, 32);
- printk(KERN_INFO "Xilinx intc at 0x%08LX mapped to 0x%p\n",
- res.start, regs);
+ printk(KERN_INFO "Xilinx intc at 0x%08X mapped to 0x%p\n",
+ (u32)res.start, regs);
/* Setup interrupt controller */
out_be32(regs + XINTC_IER, 0); /* disable all irqs */
bool
config PCI
- bool "PCI support" if 40x || CPM2 || PPC_MPC52xx
+ bool "PCI support" if 40x || 4xx || CPM2 || PPC_MPC52xx
default y if !40x && !CPM2 && !8xx
default PCI_QSPAN if !4xx && !CPM2 && 8xx
help
# XXX_memory.o file for this to work, as well as editing the
# misc-$(CONFIG_MACHINE) variable.
+ifeq ($(CONFIG_XILINX_EMBED_CONFIG),y)
+EXTRA_CFLAGS += -Idrivers/i2c/algos/xilinx_iic
+EXTRA_CFLAGS += -Idrivers/xilinx_common
+endif
+
boot := arch/ppc/boot
common := $(boot)/common
utils := $(boot)/utils
end-$(CONFIG_EV64360) := ev64360
cacheflag-$(CONFIG_EV64360) := -include $(clear_L2_L3)
+ extra.o-$(CONFIG_XILINX_EMBED_CONFIG) := ../../../../drivers/xilinx_common/xio.o \
+ ../../../../drivers/i2c/algos/xilinx_iic/xiic_l.o
+
# kconfig 'feature', only one of these will ever be 'y' at a time.
# The rest will be unset.
motorola := $(CONFIG_MVME5100)$(CONFIG_PRPMC750) \
boot-$(CONFIG_8xx) += embed_config.o
boot-$(CONFIG_8260) += embed_config.o
boot-$(CONFIG_EP405) += embed_config.o
-boot-$(CONFIG_XILINX_ML300) += embed_config.o
-boot-$(CONFIG_XILINX_ML403) += embed_config.o
+boot-$(CONFIG_XILINX_EMBED_CONFIG) += embed_config.o
boot-$(CONFIG_BSEIP) += iic.o
boot-$(CONFIG_MBX) += iic.o pci.o qspan_pci.o
boot-$(CONFIG_MV64X60) += misc-mv64x60.o
#include <asm/mpc8260.h>
#include <asm/immap_cpm2.h>
#endif
-#ifdef CONFIG_40x
+#if defined (CONFIG_40x) || defined (CONFIG_44x)
#include <asm/io.h>
#endif
+
#ifdef CONFIG_XILINX_VIRTEX
#include <platforms/4xx/xparameters/xparameters.h>
#endif
+
extern unsigned long timebase_period_ns;
/* For those boards that don't provide one.
}
#endif /* WILLOW */
-#if defined(CONFIG_XILINX_ML300) || defined(CONFIG_XILINX_ML403)
+#if defined(CONFIG_XILINX_EMBED_CONFIG)
+
+#if (!defined(CONFIG_XILINX_MLxxx) || !defined(XPAR_IIC_0_BASEADDR) || !defined(XPAR_PERSISTENT_0_IIC_0_BASEADDR))
+int get_cfg_data(unsigned char **cfg_data)
+{
+ /*
+ * The ML300, ML40x and ML50x uses an I2C SEEPROM to store the Ethernet
+ * MAC address, but either an I2C interface or the SEEPROM aren't
+ * configured in. If you are in this situation, you'll need to define
+ * an alternative way of storing the Ethernet MAC address. For now, a
+ * hard-coded MAC will be used. If this is sufficient, you may simply
+ * comment out the followign #warning.
+ */
+#warning I2C needed for obtaining the Ethernet MAC address. Using hard-coded MAC address
+ return 0; /* no cfg data found */
+}
+#else
+#include <xiic_l.h>
+
+#define CFG_DATA_SIZE \
+ (XPAR_PERSISTENT_0_IIC_0_HIGHADDR - XPAR_PERSISTENT_0_IIC_0_BASEADDR + 1)
+
+int get_cfg_data(unsigned char **cfg_data)
+{
+ static unsigned char sdata[CFG_DATA_SIZE]; /* 'static': get sdata off the stack */
+ int i;
+
+ /*
+ * Fill our SEEPROM data array (sdata) from address
+ * XPAR_PERSISTENT_0_IIC_0_BASEADDR of the SEEPROM at slave
+ * address XPAR_PERSISTENT_0_IIC_0_EEPROMADDR. We'll then parse
+ * that data looking for a MAC address. */
+ sdata[0] = XPAR_PERSISTENT_0_IIC_0_BASEADDR >> 8;
+#if defined(XPAR_IIC_0_TEN_BIT_ADR) && (XPAR_IIC_0_TEN_BIT_ADR == 1)
+ sdata[1] = XPAR_PERSISTENT_0_IIC_0_BASEADDR & 0xFF;
+ i = XIic_Send(XPAR_IIC_0_BASEADDR,
+ XPAR_PERSISTENT_0_IIC_0_EEPROMADDR>>1, sdata, 2, XIIC_STOP);
+ if (i != 2)
+ return 0; /* Couldn't send the address. Return error. */
+#else
+ i = XIic_Send(XPAR_IIC_0_BASEADDR,
+ XPAR_PERSISTENT_0_IIC_0_EEPROMADDR>>1, sdata, 1, XIIC_STOP);
+ if (i != 1) {
+ return 0; /* Couldn't send the address. Return error. */
+ }
+#endif
+ i = XIic_Recv(XPAR_IIC_0_BASEADDR,
+ XPAR_PERSISTENT_0_IIC_0_EEPROMADDR>>1,
+ sdata, sizeof(sdata), XIIC_STOP);
+ if (i != sizeof(sdata)) {
+ return 0; /* Didn't read all the data. Return error. */
+ }
+ *cfg_data = sdata;
+ return CFG_DATA_SIZE;
+}
+#endif /* (!defined(XPAR_IIC_0_BASEADDR) || !defined(XPAR_PERSISTENT_0_IIC_0_BASEADDR)) */
+
+static int
+hexdigit(char c)
+{
+ if ('0' <= c && c <= '9')
+ return c - '0';
+ else if ('a' <= c && c <= 'f')
+ return c - 'a' + 10;
+ else if ('A' <= c && c <= 'F')
+ return c - 'A' + 10;
+ else
+ return -1;
+}
+
+typedef struct iic_eeprom_struct {
+ /* Generally used parameters */
+ char which_board[17]; /* 0x000 to 0x010 Plain text ID of which board */
+ char board_rev[5]; /* 0x011 to 0x015 Plain text Board Rev (A, B, C, etc) */
+ char minor_board_rev[5]; /* 0x016 to 0x01A Plain text minor board rev (001, 002, etc) */
+ char which_FPGA[19]; /* 0x01B to 0x02E Plain text which FPGA is on the board (main FPGA if multiple) */
+ char board_sn[9]; /* 0x02F to 0x037 Plain text Serial Number of board */
+ char board_mac_id[13]; /* 0x038 to 0x044 Plain text MAC Address for this board */
+ char last_test_date[12]; /* 0x045 to 0x050 Plain text last date that tests were run (DD-MMM-YYYY) */
+ char manufacture_date[12]; /* 0x051 to 0x05C Plain text Manufacture Date (DD-MMM-YYYY) */
+ char manufacture_id[17]; /* 0x05D to 0x06D Plain text Manufacture ID (Name) */
+ char tested_before[19]; /* 0x06E to 0x080 Plain text set to 'Xilinx Virtex-X Based MLxxx' (?19?) */
+} iic_eeprom_struct;
+
+static int get_mac_addr(unsigned char *mac)
+{
+ iic_eeprom_struct *eeprom;
+ int cfg_size;
+
+ cfg_size = get_cfg_data((unsigned char **)&eeprom);
+
+ if (cfg_size == 0)
+ return 1; /* Failed to read configuration data */
+
+ /* check the manufacture date to make sure we've got the right struct
+ * info */
+ if ((eeprom->board_mac_id[0] == '0') &&
+ (eeprom->board_mac_id[1] == '0') &&
+ (eeprom->board_mac_id[2] == '0') &&
+ (eeprom->board_mac_id[3] == 'A') &&
+ (eeprom->board_mac_id[4] == '3') &&
+ (eeprom->board_mac_id[5] == '5')) {
+ mac[0] = (hexdigit(eeprom->board_mac_id[0]) << 4) | (hexdigit(eeprom->board_mac_id[1]));
+ mac[1] = (hexdigit(eeprom->board_mac_id[2]) << 4) | (hexdigit(eeprom->board_mac_id[3]));
+ mac[2] = (hexdigit(eeprom->board_mac_id[4]) << 4) | (hexdigit(eeprom->board_mac_id[5]));
+ mac[3] = (hexdigit(eeprom->board_mac_id[6]) << 4) | (hexdigit(eeprom->board_mac_id[7]));
+ mac[4] = (hexdigit(eeprom->board_mac_id[8]) << 4) | (hexdigit(eeprom->board_mac_id[9]));
+ mac[5] = (hexdigit(eeprom->board_mac_id[10]) << 4) | (hexdigit(eeprom->board_mac_id[11]));
+
+ /* Success */
+ return 0;
+
+ }
+
+ /* Data not recognized */
+ return 1;
+}
+
void
embed_config(bd_t ** bdp)
{
+#ifdef CONFIG_40x
static const unsigned long line_size = 32;
static const unsigned long congruence_classes = 256;
unsigned long addr;
unsigned long dccr;
- uint8_t* cp;
+#endif
bd_t *bd;
- int i;
/*
* Invalidate the data cache if the data cache is turned off.
* a bootloader and we assume that the cache contents are
* valid.
*/
+#ifdef CONFIG_40x
__asm__("mfdccr %0": "=r" (dccr));
if (dccr == 0) {
for (addr = 0;
__asm__("dccci 0,%0": :"b"(addr));
}
}
+#endif
bd = &bdinfo;
*bdp = bd;
bd->bi_memsize = XPAR_DDR_0_SIZE;
bd->bi_intfreq = XPAR_CORE_CLOCK_FREQ_HZ;
bd->bi_busfreq = XPAR_PLB_CLOCK_FREQ_HZ;
+#ifdef XPAR_PCI_0_CLOCK_FREQ_HZ
bd->bi_pci_busfreq = XPAR_PCI_0_CLOCK_FREQ_HZ;
+#endif
- /* Copy the default ethernet address */
- cp = (u_char *)def_enet_addr;
- for (i=0; i<6; i++)
- bd->bi_enetaddr[i] = *cp++;
+ if (get_mac_addr(bd->bi_enetaddr)) {
+ /* The SEEPROM is corrupted. set the address to
+ * Xilinx's preferred default. However, first to
+ * eliminate a compiler warning because we don't really
+ * use def_enet_addr, we'll reference it. The compiler
+ * optimizes it away so no harm done. */
+ bd->bi_enetaddr[0] = def_enet_addr[0];
+ bd->bi_enetaddr[0] = 0x00;
+ bd->bi_enetaddr[1] = 0x0A;
+ bd->bi_enetaddr[2] = 0x35;
+ bd->bi_enetaddr[3] = 0x01;
+ bd->bi_enetaddr[4] = 0x02;
+ bd->bi_enetaddr[5] = 0x03;
+ }
timebase_period_ns = 1000000000 / bd->bi_tbfreq;
- /* see bi_tbfreq definition in arch/ppc/platforms/4xx/xilinx_ml300.h */
+ /* see bi_tbfreq definition in arch/ppc/platforms/4xx/xilinx_mlxxx.h */
}
-#endif /* CONFIG_XILINX_ML300 || CONFIG_XILINX_ML403 */
+#endif /* defined(CONFIG_XILINX_EMBED_CONFIG) */
#ifdef CONFIG_IBM_OPENBIOS
/* This could possibly work for all treeboot roms.
/*
+ * arch/ppc/boot/simple/head.S
+ *
* Initial board bringup code for many different boards.
*
* Author: Tom Rini
*/
#endif
-#if defined(CONFIG_XILINX_VIRTEX_4_FX)
+#ifdef CONFIG_XILINX_VIRTEX_4_FX
/* PPC errata 213: only for Virtex-4 FX */
- mfccr0 0
- oris 0,0,0x50000000@h
- mtccr0 0
+ mfccr0 0
+ oris 0,0,0x50000000@h
+ mtccr0 0
+#endif
+
+#ifdef CONFIG_XILINX_DISABLE_44x_CACHE
+ mfccr0 0
+ oris 0,0,0x30000000@h
+ mtccr0 0
#endif
mflr r3 /* Save our actual starting address. */
bootinfo_append(BI_INITRD, sizeof(initrd), &initrd);
}
puts("Now booting the kernel\n");
+#if defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_SERIAL_MPSC_CONSOLE)
serial_close(com_port);
+#endif
return rec;
}
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.16-rc1
-# Wed Jan 18 01:11:41 2006
+# Linux kernel version: 2.6.23xlnx
+# Tue Dec 18 10:58:15 2007
#
CONFIG_MMU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_PPC=y
CONFIG_PPC32=y
CONFIG_GENERIC_NVRAM=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
-# Code maturity level options
+# General setup
#
CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
-
-#
-# General setup
-#
CONFIG_LOCALVERSION=""
-CONFIG_LOCALVERSION_AUTO=y
+# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_SYSCTL=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_USER_NS is not set
# CONFIG_AUDIT is not set
# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
# CONFIG_EMBEDDED is not set
+CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_EVENTFD=y
CONFIG_SHMEM=y
-CONFIG_CC_ALIGN_FUNCTIONS=0
-CONFIG_CC_ALIGN_LABELS=0
-CONFIG_CC_ALIGN_LOOPS=0
-CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
-# CONFIG_SLOB is not set
-
-#
-# Loadable module support
-#
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_OBSOLETE_MODPARM=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_KMOD=y
-
-#
-# Block layer
-#
+CONFIG_BLOCK=y
CONFIG_LBD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
#
# IO Schedulers
# CONFIG_6xx is not set
CONFIG_40x=y
# CONFIG_44x is not set
-# CONFIG_POWER3 is not set
# CONFIG_8xx is not set
# CONFIG_E200 is not set
# CONFIG_E500 is not set
+CONFIG_PPC_DCR_NATIVE=y
+CONFIG_PPC_DCR=y
# CONFIG_MATH_EMULATION is not set
# CONFIG_KEXEC is not set
# CONFIG_CPU_FREQ is not set
# CONFIG_SYCAMORE is not set
# CONFIG_WALNUT is not set
# CONFIG_XILINX_ML300 is not set
+# CONFIG_XILINX_XUPV2P is not set
CONFIG_XILINX_ML403=y
+# CONFIG_XILINX_ML41x is not set
CONFIG_IBM405_ERR77=y
CONFIG_IBM405_ERR51=y
+CONFIG_XILINX_VIRTEX_4_FX=y
CONFIG_XILINX_VIRTEX=y
+CONFIG_XILINX_EMBED_CONFIG=y
CONFIG_EMBEDDEDBOOT=y
# CONFIG_PPC4xx_DMA is not set
CONFIG_PPC_GEN550=y
#
# CONFIG_PC_KEYBOARD is not set
# CONFIG_HIGHMEM is not set
+CONFIG_ARCH_POPULATES_NODE_MAP=y
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
CONFIG_PREEMPT_NONE=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0,9600"
+CONFIG_CMDLINE="console=ttyS0,9600 ip=dhcp root=/dev/nfs rw"
# CONFIG_PM is not set
-# CONFIG_HIBERNATION is not set
+CONFIG_SUSPEND_UP_POSSIBLE=y
+CONFIG_HIBERNATION_UP_POSSIBLE=y
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
#
# Bus options
#
+CONFIG_ZONE_DMA=y
# CONFIG_PPC_I8259 is not set
# CONFIG_PCI is not set
# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
#
# PCCARD (PCMCIA/CardBus) support
CONFIG_PACKET=y
CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_BIC=y
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
-
-#
-# DCCP Configuration (EXPERIMENTAL)
-#
# CONFIG_IP_DCCP is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_ATALK is not set
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
-
-#
-# TIPC Configuration (EXPERIMENTAL)
-#
-# CONFIG_TIPC is not set
-# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
#
# Device Drivers
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
# CONFIG_DEBUG_DRIVER is not set
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
-
-#
-# Memory Technology Devices (MTD)
-#
# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-
-#
-# Block devices
-#
+CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_FD is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=65536
-CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
+# CONFIG_XILINX_SYSACE is not set
+# CONFIG_XILINX_SYSACE_OLD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_XILINX_DRIVERS=y
+CONFIG_NEED_XILINX_IPIF=y
# CONFIG_IDE is not set
#
#
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
# CONFIG_MD is not set
-
-#
-# Fusion MPT device support
-#
-# CONFIG_FUSION is not set
-
-#
-# IEEE 1394 (FireWire) support
-#
-
-#
-# I2O device support
-#
-
-#
-# Macintosh device drivers
-#
-# CONFIG_WINDFARM is not set
-
-#
-# Network device support
-#
+# CONFIG_MACINTOSH_DRIVERS is not set
CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
CONFIG_TUN=y
-
-#
-# PHY device support
-#
-
-#
-# Ethernet (10 or 100Mbit)
-#
-# CONFIG_NET_ETHERNET is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
# CONFIG_IBM_EMAC is not set
+CONFIG_XILINX_EMAC=y
+# CONFIG_XILINX_EMACLITE is not set
+CONFIG_NETDEV_1000=y
+# CONFIG_XILINX_TEMAC is not set
+# CONFIG_XILINX_LLTEMAC is not set
+CONFIG_NETDEV_10000=y
#
-# Ethernet (1000 Mbit)
-#
-
-#
-# Ethernet (10000 Mbit)
-#
-
-#
-# Token Ring devices
-#
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
+# Wireless LAN
#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
-
-#
-# ISDN subsystem
-#
# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
# CONFIG_PHONE is not set
#
# Input device support
#
CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
#
# Userland interfaces
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
CONFIG_VT=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
# CONFIG_SERIAL_NONSTANDARD is not set
#
#
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_XILINX_UARTLITE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_LEGACY_PTYS is not set
-
-#
-# IPMI
-#
# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
# CONFIG_WATCHDOG is not set
+CONFIG_HW_RANDOM=m
# CONFIG_NVRAM is not set
# CONFIG_GEN_RTC is not set
-# CONFIG_DTLK is not set
+# CONFIG_XILINX_GPIO is not set
+# CONFIG_XILINX_HWICAP is not set
# CONFIG_R3964 is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_AGP is not set
# CONFIG_RAW_DRIVER is not set
-
-#
-# TPM devices
-#
# CONFIG_TCG_TPM is not set
-# CONFIG_TELCLOCK is not set
-
-#
-# I2C support
-#
# CONFIG_I2C is not set
#
#
# CONFIG_SPI is not set
# CONFIG_SPI_MASTER is not set
-
-#
-# Dallas's 1-wire bus
-#
# CONFIG_W1 is not set
-
-#
-# Hardware Monitoring support
-#
+# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
-# CONFIG_HWMON_VID is not set
-
-#
-# Misc devices
-#
#
-# Multimedia Capabilities Port drivers
+# Multifunction device drivers
#
+# CONFIG_MFD_SM501 is not set
#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+CONFIG_DAB=y
#
-# Digital Video Broadcasting Devices
+# Graphics support
#
-# CONFIG_DVB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
-# Graphics support
+# Display device support
#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_FB is not set
+# CONFIG_FB_IBM_GXT4500 is not set
#
# Console display driver support
# Sound
#
# CONFIG_SOUND is not set
-
-#
-# USB support
-#
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+CONFIG_HID_DEBUG=y
+CONFIG_USB_SUPPORT=y
# CONFIG_USB_ARCH_HAS_HCD is not set
# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
# USB Gadget Support
#
# CONFIG_USB_GADGET is not set
+# CONFIG_MMC is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
#
-# MMC/SD Card support
+# DMA Engine support
#
-# CONFIG_MMC is not set
+# CONFIG_DMA_ENGINE is not set
#
-# InfiniBand support
+# DMA Clients
#
#
-# SN Devices
+# DMA Devices
#
+CONFIG_XILINX_EDK=y
+
+#
+# Userspace I/O
+#
+# CONFIG_UIO is not set
#
# File systems
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
# CONFIG_AUTOFS_FS is not set
#
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
-# CONFIG_RELAYFS_FS is not set
# CONFIG_CONFIGFS_FS is not set
#
#
# Network File Systems
#
-# CONFIG_NFS_FS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
#
# Partition Types
# CONFIG_NLS_KOI8_U is not set
CONFIG_NLS_UTF8=y
+#
+# Distributed Lock Manager
+#
+# CONFIG_DLM is not set
+
#
# IBM 40x options
#
#
# Library routines
#
+CONFIG_BITREVERSE=y
# CONFIG_CRC_CCITT is not set
# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
# CONFIG_PROFILING is not set
#
# Kernel hacking
#
CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_MUST_CHECK=y
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
-CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
+CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_SLAB is not set
-CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
-# CONFIG_DEBUG_FS is not set
# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
# CONFIG_KGDB is not set
# CONFIG_XMON is not set
# CONFIG_BDI_SWITCH is not set
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
# CONFIG_CRYPTO is not set
-
-#
-# Hardware crypto devices
-#
--- /dev/null
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.24-rc8-xlnx
+# Tue Feb 26 15:08:13 2008
+#
+CONFIG_WORD_SIZE=32
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_PPC=y
+CONFIG_PPC32=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_FAIR_USER_SCHED=y
+# CONFIG_FAIR_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+# CONFIG_EMBEDDED is not set
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+CONFIG_LBD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Processor
+#
+# CONFIG_6xx is not set
+CONFIG_40x=y
+# CONFIG_44x is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+# CONFIG_E500 is not set
+CONFIG_PPC_DCR_NATIVE=y
+CONFIG_PPC_DCR=y
+# CONFIG_MATH_EMULATION is not set
+# CONFIG_KEXEC is not set
+# CONFIG_CPU_FREQ is not set
+CONFIG_4xx=y
+# CONFIG_WANT_EARLY_SERIAL is not set
+
+#
+# IBM 4xx options
+#
+# CONFIG_BUBINGA is not set
+# CONFIG_CPCI405 is not set
+# CONFIG_EP405 is not set
+# CONFIG_REDWOOD_5 is not set
+# CONFIG_REDWOOD_6 is not set
+# CONFIG_SYCAMORE is not set
+# CONFIG_WALNUT is not set
+# CONFIG_XILINX_ML300 is not set
+# CONFIG_XILINX_XUPV2P is not set
+# CONFIG_XILINX_ML403 is not set
+CONFIG_XILINX_ML405=y
+# CONFIG_XILINX_ML41x is not set
+CONFIG_IBM405_ERR77=y
+CONFIG_IBM405_ERR51=y
+CONFIG_XILINX_VIRTEX_4_FX=y
+CONFIG_XILINX_VIRTEX=y
+CONFIG_XILINX_EMBED_CONFIG=y
+CONFIG_EMBEDDEDBOOT=y
+# CONFIG_PPC4xx_DMA is not set
+CONFIG_PPC_GEN550=y
+CONFIG_UART0_TTYS0=y
+# CONFIG_UART0_TTYS1 is not set
+CONFIG_NOT_COHERENT_CACHE=y
+
+#
+# Platform options
+#
+# CONFIG_PC_KEYBOARD is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,9600 ip=on root=/dev/ram"
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_CONSISTENT_START=0xff100000
+CONFIG_CONSISTENT_SIZE=0x00200000
+CONFIG_BOOT_LOAD=0x00400000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_XILINX_SYSACE is not set
+# CONFIG_XILINX_SYSACE_OLD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_XILINX_DRIVERS=y
+CONFIG_NEED_XILINX_LLDMA=y
+CONFIG_NEED_XILINX_IPIF=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=y
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+# CONFIG_IBM_EMAC is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_B44 is not set
+CONFIG_XILINX_EMAC=y
+# CONFIG_XILINX_EMACLITE is not set
+CONFIG_NETDEV_1000=y
+# CONFIG_XILINX_TEMAC is not set
+CONFIG_XILINX_LLTEMAC=y
+CONFIG_NETDEV_10000=y
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=m
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_XILINX_GPIO is not set
+# CONFIG_XILINX_HWICAP is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+CONFIG_XILINX_IIC=y
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_MPC is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+CONFIG_DAB=y
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+CONFIG_HID_DEBUG=y
+# CONFIG_HIDRAW is not set
+CONFIG_USB_SUPPORT=y
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+# CONFIG_MMC is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+CONFIG_XILINX_EDK=y
+
+#
+# Userspace I/O
+#
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# IBM 40x options
+#
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_INSTRUMENTATION=y
+# CONFIG_PROFILING is not set
+# CONFIG_KPROBES is not set
+# CONFIG_MARKERS is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SAMPLES is not set
+# CONFIG_KGDB is not set
+# CONFIG_XMON is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
--- /dev/null
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.24-rc8-xlnx
+# Tue Mar 25 08:18:32 2008
+#
+CONFIG_WORD_SIZE=32
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_PPC=y
+CONFIG_PPC32=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_FAIR_USER_SCHED=y
+# CONFIG_FAIR_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+# CONFIG_EMBEDDED is not set
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+CONFIG_LBD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Processor
+#
+# CONFIG_6xx is not set
+# CONFIG_40x is not set
+CONFIG_44x=y
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+# CONFIG_E500 is not set
+# CONFIG_PPC_FPU is not set
+CONFIG_PPC_DCR_NATIVE=y
+CONFIG_PPC_DCR=y
+CONFIG_BOOKE=y
+CONFIG_PTE_64BIT=y
+CONFIG_PHYS_64BIT=y
+# CONFIG_MATH_EMULATION is not set
+# CONFIG_KEXEC is not set
+# CONFIG_CPU_FREQ is not set
+CONFIG_4xx=y
+# CONFIG_WANT_EARLY_SERIAL is not set
+CONFIG_XILINX_ML5XX=y
+
+#
+# IBM 4xx options
+#
+# CONFIG_BAMBOO is not set
+# CONFIG_EBONY is not set
+# CONFIG_LUAN is not set
+# CONFIG_YUCCA is not set
+# CONFIG_OCOTEA is not set
+# CONFIG_TAISHAN is not set
+CONFIG_XILINX_ML507=y
+# CONFIG_XILINX_DISABLE_44x_CACHE is not set
+# CONFIG_XILINX_ERRONEOUS_EXCEPTIONS_WORKAROUND is not set
+# CONFIG_XILINX_INTC_IVR_WORKAROUND is not set
+CONFIG_XILINX_MLxxx=y
+CONFIG_XILINX_VIRTEX_5_FXT=y
+CONFIG_XILINX_VIRTEX=y
+CONFIG_XILINX_EMBED_CONFIG=y
+CONFIG_EMBEDDEDBOOT=y
+# CONFIG_PPC4xx_DMA is not set
+CONFIG_PPC_GEN550=y
+CONFIG_UART0_TTYS0=y
+# CONFIG_UART0_TTYS1 is not set
+CONFIG_NOT_COHERENT_CACHE=y
+
+#
+# Platform options
+#
+# CONFIG_PC_KEYBOARD is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,9600 ip=on root=/dev/ram"
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_CONSISTENT_START=0xff100000
+CONFIG_CONSISTENT_SIZE=0x00200000
+CONFIG_BOOT_LOAD=0x01000000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_XILINX_SYSACE is not set
+# CONFIG_XILINX_SYSACE_OLD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_XILINX_DRIVERS=y
+CONFIG_NEED_XILINX_LLDMA=y
+CONFIG_NEED_XILINX_IPIF=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=y
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+# CONFIG_IBM_EMAC is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_B44 is not set
+CONFIG_XILINX_EMAC=y
+# CONFIG_XILINX_EMACLITE is not set
+CONFIG_NETDEV_1000=y
+# CONFIG_XILINX_TEMAC is not set
+CONFIG_XILINX_LLTEMAC=y
+CONFIG_NETDEV_10000=y
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=m
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_XILINX_GPIO is not set
+# CONFIG_XILINX_HWICAP is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+CONFIG_XILINX_IIC=y
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_MPC is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+CONFIG_DAB=y
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+CONFIG_HID_DEBUG=y
+# CONFIG_HIDRAW is not set
+CONFIG_USB_SUPPORT=y
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+# CONFIG_MMC is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+CONFIG_XILINX_EDK=y
+
+#
+# Userspace I/O
+#
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_INSTRUMENTATION=y
+# CONFIG_PROFILING is not set
+# CONFIG_KPROBES is not set
+# CONFIG_MARKERS is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SAMPLES is not set
+# CONFIG_KGDB is not set
+# CONFIG_XMON is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
andc r11,r11,r0
MTMSRD(r11)
isync
+#if defined(CONFIG_XILINX_FPU_LOAD_CORRUPTION_WORKAROUND) || defined(CONFIG_XILINX_FPU_ITLB_EXCEPTION_WORKAROUND)
+ mfspr r5,SPRN_CCR0
+#ifdef CONFIG_XILINX_FPU_LOAD_CORRUPTION_WORKAROUND
+ andis. r5,r5, ~(1<<6)@l
+#endif
+#ifdef CONFIG_XILINX_FPU_ITLB_EXCEPTION_WORKAROUND
+ andi. r5,r5, ~(1<<5)@l
+#endif
+ mtspr SPRN_CCR0,r5
+ isync
+#endif
1: stw r11,_MSR(r1)
mfcr r10
stw r10,_CCR(r1)
/* ERPN is 0 for first 4GB page */
/* attrib fields */
- /* Added guarded bit to protect against speculative loads/stores */
+ /* Added guarded bit to protect against speculative loads/stores.
+ *
+ * NOTE:
+ * PPC44x_XILINX_TLB_I is defined according to
+ * CONFIG_XILINX_DISABLE_44x_CACHE.
+ */
li r5,0
- ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
+ ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G | PPC44x_XILINX_TLB_I) /* SEG 2005-12-16: turn caches off */
li r0,63 /* TLB slot 63 */
tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
/* Force context change */
+#ifdef CONFIG_XILINX_ML5XX
+ /* We can not use the content of the MSR register when we are using XMD
+ * to connect to a ml5xx board as XMD changes the contents of the MSR
+ * register. We load the default value instead.
+ */
+ lis r0,MSR_KERNEL@h
+ ori r0,r0,MSR_KERNEL@l
+#else
mfmsr r0
+#endif
mtspr SPRN_SRR1, r0
lis r0,3f@h
ori r0,r0,3f@l
*/
/* pageid fields */
lis r3,UART0_IO_BASE@h
+#ifdef CONFIG_XILINX_ML5XX
+ /* In the case of the ML5XX we want to map a 16k page. The 16k page
+ * will cover the UART0 and UART1 address range.
+ */
+ ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_16K
+#else
+ /* Default case. Map 4k range. */
ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K
+#endif
/* xlat fields */
lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */
/* find the TLB index that caused the fault. It has to be here. */
tlbsx r10, 0, r10
+#ifdef CONFIG_XILINX_DISABLE_44x_CACHE
+ ori r11, r11, PPC44x_XILINX_TLB_I /* SEG 2005-12-16: set/unset I */
+#endif
tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */
/* Done...restore registers and get out of here.
rlwimi r12, r10, 0, 26, 31 /* Insert static perms */
rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */
+#ifdef CONFIG_XILINX_DISABLE_44x_CACHE
+ ori r12, r12, PPC44x_XILINX_TLB_I /* SEG: 2005-12-16: set/unset I */
+#endif
tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
/* Done...restore registers and get out of here.
#endif
#endif
+#ifdef CONFIG_XILINX_ERRONEOUS_EXCEPTIONS_WORKAROUND
+u8 excep_state = 0;
+#endif
+
/*
* Trap & Exception support
*/
{
siginfo_t info;
+ printk(KERN_DEBUG
+ "XXX: Exception\n"
+ " Signal: %x\n"
+ " Code: %x\n"
+ " Addr: %x\n",
+ signr, code, addr);
+
if (!user_mode(regs)) {
debugger(regs);
die("Exception in kernel mode", regs, signr);
}
#endif /* CONFIG_MATH_EMULATION */
+#ifdef CONFIG_XILINX_ERRONEOUS_EXCEPTIONS_WORKAROUND
+#if 1
+{
+ u32 insn;
+ int i;
+
+ printk(KERN_DEBUG
+ "Xilinx spurious APU exception workaround.\n"
+ " Reason: %x\n"
+ " NIP: %x\n",
+ reason, regs->nip);
+
+ for(i=-8; i <= 8; i+=4) {
+ (void) get_user(insn, (u32 *)(regs->nip+i));
+ printk(KERN_DEBUG
+ " Instr.: %2d %x\n",
+ i, insn);
+ }
+}
+#endif
+ if (excep_state == 0) {
+ excep_state = 1;
+ return;
+ }
+ printk(KERN_DEBUG
+ "Xilinx spurious APU exception workaround fell through.\n");
+#endif /* CONFIG_XILINX_ERRONEOUS_EXCEPTIONS_WORKAROUND */
+
if (reason & REASON_FP) {
/* IEEE FP exception */
int code = 0;
select SERIAL_8250
default n
+config XILINX_ML5XX
+ bool
+ select XILINX_VIRTEX
+ default n
+
menu "IBM 4xx options"
depends on 4xx
bool "Xilinx-ML300"
select XILINX_VIRTEX_II_PRO
select EMBEDDEDBOOT
+ select XILINX_EMBED_CONFIG
+ select XILINX_MLxxx
help
This option enables support for the Xilinx ML300 evaluation board.
+config XILINX_XUPV2P
+ bool "Xilinx-XUPV2P"
+ select XILINX_VIRTEX_II_PRO
+ select EMBEDDEDBOOT
+ select XILINX_EMBED_CONFIG
+ help
+ This option enables support for the Xilinx University Program (XUP) Virtex 2 Pro board.
+
config XILINX_ML403
bool "Xilinx-ML403"
select XILINX_VIRTEX_4_FX
select EMBEDDEDBOOT
+ select XILINX_EMBED_CONFIG
+ select XILINX_MLxxx
help
This option enables support for the Xilinx ML403 evaluation board.
+
+config XILINX_ML405
+ bool "Xilinx-ML405"
+ select XILINX_VIRTEX_4_FX
+ select EMBEDDEDBOOT
+ select XILINX_EMBED_CONFIG
+ select XILINX_MLxxx
+ help
+ This option enables support for the Xilinx ML405 evaluation board.
+
+config XILINX_ML41x
+ bool "Xilinx-ML41x"
+ select XILINX_VIRTEX_4_FX
+ select EMBEDDEDBOOT
+ select XILINX_EMBED_CONFIG
+ select XILINX_MLxxx
+ help
+ This option enables support for the Xilinx ML410/411 evaluation boards.
+
endchoice
choice
help
This option enables support for the AMCC PPC440GX evaluation board.
+config XILINX_ML507
+ bool "Xilinx-ML507 (Virtex-5 FX70T) Board"
+ select XILINX_ML5XX
+ select EMBEDDEDBOOT
+ select XILINX_EMBED_CONFIG
+ select XILINX_MLxxx
+ help
+ This option enables support for the Xilinx ML507 board.
+
+
endchoice
+config XILINX_DISABLE_44x_CACHE
+ bool "Disable PPC440 caches"
+ depends on XILINX_ML507
+ help
+ This option allows to disable the caches on the PPC440. Some early
+ PPC440 soft-cores do not work with caches enabled. Also, some early
+ ML507 boards do have a non-functioning cache. If you have any
+ problems running the ML5, try using this option.
+
+config PPC_FPU
+ depends on XILINX_VIRTEX_5_FXT
+ bool "Enable Xilinx Soft FPU"
+ help
+ This option enables the Xilinx Soft FPU attached to the APU
+ interface of the PPC440 (requires DP_FULL FPU pcore).
+
+config XILINX_ERRONEOUS_EXCEPTIONS_WORKAROUND
+ depends on XILINX_VIRTEX_5_FXT
+ bool "Enable Spurious Program Exceptions Workaround"
+ help
+ This option enables a workaround for a bug in the APU controller of
+ the PPC440 processor block in Virtex-5 FXT. See the answer records
+ at http://www.xilinx.com for more details and if you need to enable
+ this workaround for your silicon revision.
+
+config XILINX_FPU_LOAD_CORRUPTION_WORKAROUND
+ depends on XILINX_VIRTEX_5_FXT && PPC_FPU
+ bool "Enable Xilinx FPU prefetch workaround"
+ default y
+ help
+ This option enables a workaround for a bug in the APU controller of
+ the PPC440 processor block in Virtex-5 FXT. See the answer record at
+ http://www.xilinx.com/support/answers/30529.htm for more details.
+
+config XILINX_FPU_ITLB_EXCEPTION_WORKAROUND
+ depends on XILINX_VIRTEX_5_FXT && PPC_FPU
+ bool "Enable Xilinx FPU TLB instruction miss workaround"
+ default y
+ help
+ This option enables a workaround for a bug in the APU controller of
+ the PPC440 processor block in Virtex-5 FXT. See the answer record at
+ http://www.xilinx.com/support/answers/30570.htm for more details.
+
+config XILINX_INTC_IVR_WORKAROUND
+ bool "Enable Xilinx INTC IVR workaround"
+ depends on XILINX_ML507
+ help
+ On some versions of the INTC core the IVR register is not working
+ properly. Selecting this option will enable a workaround using the
+ IPR register instead of the IVR register. Enabling this option is
+ save even if the IVR register is working properly but will result in
+ slightly lower performance.
+
config EP405PC
bool "EP405PC Support"
depends on EP405
# It's often necessary to know the specific 4xx processor type.
-# Fortunately, it is impled (so far) from the board type, so we
+# Fortunately, it is implied (so far) from the board type, so we
# don't need to ask more redundant questions.
config NP405H
bool
depends on SYCAMORE
default y
+config XILINX_MLxxx
+ bool
+ help
+ Include platform support for many Xilinx development boards
+ with configuration data stored in IIC eeprom.
+
config XILINX_VIRTEX_II_PRO
bool
select XILINX_VIRTEX
+config XILINX_VIRTEX_5_FXT
+ bool
+ depends on XILINX_ML507
+ default y
+
config XILINX_VIRTEX_4_FX
bool
select XILINX_VIRTEX
config XILINX_VIRTEX
bool
+config XILINX_EMBED_CONFIG
+ bool
+
config STB03xxx
bool
depends on REDWOOD_5 || REDWOOD_6
config EMBEDDEDBOOT
bool
+ depends on EP405 || XILINX_ML300 || XILINX_ML403 || XILINX_ML507
+ default y
config IBM_OPENBIOS
bool
choice
prompt "TTYS0 device and default console"
- depends on 40x
+ depends on 40x || 44x
default UART0_TTYS0
config UART0_TTYS0
obj-$(CONFIG_SYCAMORE) += sycamore.o
obj-$(CONFIG_TAISHAN) += taishan.o
obj-$(CONFIG_WALNUT) += walnut.o
-obj-$(CONFIG_XILINX_ML300) += xilinx_ml300.o
-obj-$(CONFIG_XILINX_ML403) += xilinx_ml403.o
-
+obj-$(CONFIG_XILINX_ML300) += xilinx_generic_ppc.o
+obj-$(CONFIG_XILINX_XUPV2P) += xilinx_generic_ppc.o xilinx_xupv2p.o
+obj-$(CONFIG_XILINX_ML403) += xilinx_generic_ppc.o
+obj-$(CONFIG_XILINX_ML405) += xilinx_generic_ppc.o
+obj-$(CONFIG_XILINX_ML41x) += xilinx_generic_ppc.o
+obj-$(CONFIG_XILINX_MLxxx) += xilinx_generic_mlxxx.o
+obj-$(CONFIG_XILINX_ML507) += xilinx_ml507.o xilinx_generic_mlxxx.o
obj-$(CONFIG_405GP) += ibm405gp.o
obj-$(CONFIG_REDWOOD_5) += ibmstb4.o
obj-$(CONFIG_NP405H) += ibmnp405h.o
#ifndef __ASM_VIRTEX_H__
#define __ASM_VIRTEX_H__
-#include <asm/ibm405.h>
+/* We have to distinguish between the PPC405 based Virtex chips and the PPC440
+ * based chipts (Virtex 5). At this point we are still using virtex.[ch],
+ * however in the future we may be transitioning to the flat device tree and
+ * therefore eliminating virtex.[ch]. For the time being, though, we add the
+ * PPC440 includes here.
+ */
+#if defined(CONFIG_XILINX_ML5XX)
+ /* PPC 440 based boards */
+ #include <asm/ibm44x.h>
+#else
+ /* PPC405 based boards */
+ #include <asm/ibm405.h>
+#endif
#include <asm/ppcboot.h>
/* Ugly, ugly, ugly! BASE_BAUD defined here to keep 8250.c happy. */
#if !defined(BASE_BAUD)
- #define BASE_BAUD (0) /* dummy value; not used */
+#define BASE_BAUD (0) /* dummy value; not used */
#endif
+/* Virtual address used to set up fixed TLB entry for UART mapping if kernel
+ * debugging is enabled. This can be any address as long as it does not overlap
+ * with any other mapped io address space.
+ */
+#define UART0_IO_BASE 0xD0000000
+
#ifndef __ASSEMBLY__
-extern const char* virtex_machine_name;
+extern const char *virtex_machine_name;
#define PPC4xx_MACHINE_NAME (virtex_machine_name)
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLY__ */
/* We don't need anything mapped. Size of zero will accomplish that. */
#define PPC4xx_ONB_IO_PADDR 0u
--- /dev/null
+/*
+ * Xilinx MLxxx board initialization
+ *
+ * 2007 (c) Xilinx, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/xilinx_devices.h>
+#include <linux/platform_device.h>
+
+extern bd_t __res;
+
+int virtex_device_fixup(struct platform_device *dev)
+{
+ static int temac_count = 0;
+ struct xlltemac_platform_data *pdata = dev->dev.platform_data;
+
+#if defined(CONFIG_XILINX_MLxxx)
+
+ if (strcmp(dev->name, "xilinx_lltemac") == 0) {
+
+ /* only copy the mac address into the 1st lltemac if
+ there are multiple */
+
+ if (temac_count++ == 0) {
+ printk(KERN_INFO "Fixup MAC address for %s:%d\n",
+ dev->name, dev->id);
+ /* Set the MAC address from the iic eeprom info in the board data */
+ memcpy(pdata->mac_addr, ((bd_t *) &__res)->bi_enetaddr, 6);
+ }
+ }
+#endif
+
+ return 0;
+}
/*
- * Xilinx ML300 evaluation board initialization
+ * Xilinx Generic PPC evaluation board initialization
*
* Author: MontaVista Software, Inc.
* source@mvista.com
/*
* As an overview of how the following functions (platform_init,
- * ml300_map_io, ml300_setup_arch and ml300_init_IRQ) fit into the
+ * xilinx_generic_ppc_map_io, xilinx_generic_ppc_setup_arch and xilinx_generic_ppc_init_IRQ) fit into the
* kernel startup procedure, here's a call tree:
*
* start_here arch/ppc/kernel/head_4xx.S
* find_bootinfo
* "setup some default ppc_md pointers"
* MMU_init arch/ppc/mm/init.c
- * *ppc_md.setup_io_mappings == ml300_map_io this file
+ * *ppc_md.setup_io_mappings == xilinx_generic_ppc_map_io this file
* ppc4xx_map_io arch/ppc/syslib/ppc4xx_setup.c
* start_kernel init/main.c
* setup_arch arch/ppc/kernel/setup.c
* #if defined(CONFIG_KGDB)
* *ppc_md.kgdb_map_scc() == gen550_kgdb_map_scc
* #endif
- * *ppc_md.setup_arch == ml300_setup_arch this file
+ * *ppc_md.setup_arch == xilinx_generic_ppc_setup_arch this file
* ppc4xx_setup_arch arch/ppc/syslib/ppc4xx_setup.c
* ppc4xx_find_bridges arch/ppc/syslib/ppc405_pci.c
* init_IRQ arch/ppc/kernel/irq.c
- * *ppc_md.init_IRQ == ml300_init_IRQ this file
+ * *ppc_md.init_IRQ == xilinx_generic_ppc_init_IRQ this file
* ppc4xx_init_IRQ arch/ppc/syslib/ppc4xx_setup.c
* ppc4xx_pic_init arch/ppc/syslib/xilinx_pic.c
*/
-const char* virtex_machine_name = "ML300 Reference Design";
+#if defined(CONFIG_XILINX_VIRTEX_II_PRO)
+#define XILINX_ARCH "Virtex-II Pro"
+#elif defined(CONFIG_XILINX_VIRTEX_4_FX)
+#define XILINX_ARCH "Virtex-4 FX"
+#else
+#error "No Xilinx Architecture recognized."
+#endif
+
+#if defined(CONFIG_XILINX_ML300)
+const char *virtex_machine_name = "Xilinx ML300";
+#elif defined(CONFIG_XILINX_XUPV2P)
+const char *virtex_machine_name = "Xilinx XUPV2P";
+#elif defined(CONFIG_XILINX_ML403)
+const char *virtex_machine_name = "Xilinx ML403";
+#elif defined(CONFIG_XILINX_ML405)
+const char *virtex_machine_name = "Xilinx ML405";
+#elif defined(CONFIG_XILINX_ML41x)
+const char *virtex_machine_name = "Xilinx ML41x";
+#else
+const char *virtex_machine_name = "Unknown Xilinx with PowerPC";
+#endif
#if defined(XPAR_POWER_0_POWERDOWN_BASEADDR)
-static volatile unsigned *powerdown_base =
- (volatile unsigned *) XPAR_POWER_0_POWERDOWN_BASEADDR;
+static void __iomem *powerdown_base =
+ (void __iomem *)XPAR_POWER_0_POWERDOWN_BASEADDR;
-static void
-xilinx_power_off(void)
+static void xilinx_power_off(void)
{
local_irq_disable();
out_be32(powerdown_base, XPAR_POWER_0_POWERDOWN_VALUE);
}
#endif
-void __init
-ml300_map_io(void)
+void __init xilinx_generic_ppc_map_io(void)
{
ppc4xx_map_io();
#if defined(XPAR_POWER_0_POWERDOWN_BASEADDR)
- powerdown_base = ioremap((unsigned long) powerdown_base,
+ powerdown_base = ioremap(XPAR_POWER_0_POWERDOWN_BASEADDR
XPAR_POWER_0_POWERDOWN_HIGHADDR -
XPAR_POWER_0_POWERDOWN_BASEADDR + 1);
#endif
}
-void __init
-ml300_setup_arch(void)
+void __init xilinx_generic_ppc_setup_arch(void)
{
virtex_early_serial_map();
ppc4xx_setup_arch(); /* calls ppc4xx_find_bridges() */
/* Identify the system */
- printk(KERN_INFO "Xilinx ML300 Reference System (Virtex-II Pro)\n");
+ printk(KERN_INFO
+ "Xilinx Generic PowerPC board support package (%s) (%s)\n",
+ PPC4xx_MACHINE_NAME, XILINX_ARCH);
}
/* Called after board_setup_irq from ppc4xx_init_IRQ(). */
-void __init
-ml300_init_irq(void)
+void __init xilinx_generic_ppc_init_irq(void)
{
ppc4xx_init_IRQ();
}
-void __init
+void __init __attribute((weak))
platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
ppc4xx_init(r3, r4, r5, r6, r7);
- ppc_md.setup_arch = ml300_setup_arch;
- ppc_md.setup_io_mappings = ml300_map_io;
- ppc_md.init_IRQ = ml300_init_irq;
+ ppc_md.setup_arch = xilinx_generic_ppc_setup_arch;
+ ppc_md.setup_io_mappings = xilinx_generic_ppc_map_io;
+ ppc_md.init_IRQ = xilinx_generic_ppc_init_irq;
#if defined(XPAR_POWER_0_POWERDOWN_BASEADDR)
ppc_md.power_off = xilinx_power_off;
ppc_md.early_serial_map = virtex_early_serial_map;
#endif
}
-
+++ /dev/null
-/*
- * Xilinx ML403 evaluation board initialization
- *
- * Author: Grant Likely <grant.likely@secretlab.ca>
- *
- * 2005-2007 (c) Secret Lab Technologies Ltd.
- * 2002-2004 (c) MontaVista Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/serial_8250.h>
-#include <linux/serialP.h>
-#include <asm/io.h>
-#include <asm/machdep.h>
-
-#include <syslib/gen550.h>
-#include <syslib/virtex_devices.h>
-#include <platforms/4xx/xparameters/xparameters.h>
-
-/*
- * As an overview of how the following functions (platform_init,
- * ml403_map_io, ml403_setup_arch and ml403_init_IRQ) fit into the
- * kernel startup procedure, here's a call tree:
- *
- * start_here arch/ppc/kernel/head_4xx.S
- * early_init arch/ppc/kernel/setup.c
- * machine_init arch/ppc/kernel/setup.c
- * platform_init this file
- * ppc4xx_init arch/ppc/syslib/ppc4xx_setup.c
- * parse_bootinfo
- * find_bootinfo
- * "setup some default ppc_md pointers"
- * MMU_init arch/ppc/mm/init.c
- * *ppc_md.setup_io_mappings == ml403_map_io this file
- * ppc4xx_map_io arch/ppc/syslib/ppc4xx_setup.c
- * start_kernel init/main.c
- * setup_arch arch/ppc/kernel/setup.c
- * #if defined(CONFIG_KGDB)
- * *ppc_md.kgdb_map_scc() == gen550_kgdb_map_scc
- * #endif
- * *ppc_md.setup_arch == ml403_setup_arch this file
- * ppc4xx_setup_arch arch/ppc/syslib/ppc4xx_setup.c
- * ppc4xx_find_bridges arch/ppc/syslib/ppc405_pci.c
- * init_IRQ arch/ppc/kernel/irq.c
- * *ppc_md.init_IRQ == ml403_init_IRQ this file
- * ppc4xx_init_IRQ arch/ppc/syslib/ppc4xx_setup.c
- * ppc4xx_pic_init arch/ppc/syslib/xilinx_pic.c
- */
-
-const char* virtex_machine_name = "ML403 Reference Design";
-
-#if defined(XPAR_POWER_0_POWERDOWN_BASEADDR)
-static volatile unsigned *powerdown_base =
- (volatile unsigned *) XPAR_POWER_0_POWERDOWN_BASEADDR;
-
-static void
-xilinx_power_off(void)
-{
- local_irq_disable();
- out_be32(powerdown_base, XPAR_POWER_0_POWERDOWN_VALUE);
- while (1) ;
-}
-#endif
-
-void __init
-ml403_map_io(void)
-{
- ppc4xx_map_io();
-
-#if defined(XPAR_POWER_0_POWERDOWN_BASEADDR)
- powerdown_base = ioremap((unsigned long) powerdown_base,
- XPAR_POWER_0_POWERDOWN_HIGHADDR -
- XPAR_POWER_0_POWERDOWN_BASEADDR + 1);
-#endif
-}
-
-void __init
-ml403_setup_arch(void)
-{
- virtex_early_serial_map();
- ppc4xx_setup_arch(); /* calls ppc4xx_find_bridges() */
-
- /* Identify the system */
- printk(KERN_INFO "Xilinx ML403 Reference System (Virtex-4 FX)\n");
-}
-
-/* Called after board_setup_irq from ppc4xx_init_IRQ(). */
-void __init
-ml403_init_irq(void)
-{
- ppc4xx_init_IRQ();
-}
-
-void __init
-platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
-{
- ppc4xx_init(r3, r4, r5, r6, r7);
-
- ppc_md.setup_arch = ml403_setup_arch;
- ppc_md.setup_io_mappings = ml403_map_io;
- ppc_md.init_IRQ = ml403_init_irq;
-
-#if defined(XPAR_POWER_0_POWERDOWN_BASEADDR)
- ppc_md.power_off = xilinx_power_off;
-#endif
-
-#ifdef CONFIG_KGDB
- ppc_md.early_serial_map = virtex_early_serial_map;
-#endif
-}
-
--- /dev/null
+/*
+ * arch/ppc/platforms/4xx/xilinx_ml507.c
+ *
+ * Initialization for Xilinx boards with PowerPC 440
+ *
+ * Author: Grant Likely <grant.likely@secretlab.ca>
+ * Wolfgang Reissnegger <w.reissnegger@gmx.net>
+ * Peter Ryser <peter.ryser@xilinx.com>
+ *
+ * 2007 (c) Xilinx, Inc.
+ * 2005 (c) Secret Lab Technologies Ltd.
+ * 2002-2004 (c) MontaVista Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
+#include <linux/serialP.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/ppc4xx_pic.h>
+#include <asm/time.h>
+
+#include <syslib/ibm44x_common.h>
+
+#include <syslib/gen550.h>
+#include <syslib/virtex_devices.h>
+#include <platforms/4xx/xparameters/xparameters.h>
+
+#if defined(CONFIG_XILINX_VIRTEX_5_FXT)
+#define XILINX_ARCH "Virtex-5 FXT"
+#else
+#error "No Xilinx Architecture recognized."
+#endif
+
+#if defined(CONFIG_XILINX_ML507)
+const char *virtex_machine_name = "Xilinx ML507";
+#else
+const char *virtex_machine_name = "Unknown Xilinx with PowerPC 440";
+#endif
+
+extern bd_t __res;
+
+void __init
+ml507_setup_arch(void)
+{
+ virtex_early_serial_map();
+
+ /* Identify the system */
+ printk(KERN_INFO
+ "Xilinx Generic PowerPC 440 board support package (%s) (%s)\n",
+ PPC4xx_MACHINE_NAME, XILINX_ARCH);
+}
+
+void __init
+ml507_init_irq(void)
+{
+ ppc4xx_pic_init();
+
+ /*
+ * For PowerPC 440 cores the default value for NR_IRQS is 32.
+ * See include/asm-ppc/irq.h for details.
+ * This is just fine for ML5xx
+ */
+#if (NR_IRQS != 32)
+#error NR_IRQS must be 32 for ML5xx
+#endif
+}
+
+/*
+ * Return the virtual address representing the top of physical RAM.
+ */
+static unsigned long __init
+ml507_find_end_of_memory(void)
+{
+ bd_t *bip = &__res;
+
+ return bip->bi_memsize;
+}
+
+
+static void __init
+ml507_calibrate_decr(void)
+{
+ bd_t *bip = &__res;
+
+ ibm44x_calibrate_decr(bip->bi_intfreq);
+}
+
+
+void __init
+platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ /* Calling ppc4xx_init will set up the default values for ppc_md.
+ */
+ ibm44x_platform_init(r3, r4, r5, r6, r7);
+
+
+ /* Overwrite the default settings with our platform specific hooks.
+ */
+ ppc_md.setup_arch = ml507_setup_arch;
+ ppc_md.init_IRQ = ml507_init_irq;
+ ppc_md.find_end_of_memory = ml507_find_end_of_memory;
+ ppc_md.calibrate_decr = ml507_calibrate_decr;
+
+#ifdef CONFIG_KGDB
+ ppc_md.early_serial_map = virtex_early_serial_map;
+#endif
+}
--- /dev/null
+/*
+ * Xilinx XUPV2P board initialization
+ *
+ * Author: Stephen.Neuendorffer@xilinx.com
+ *
+ * 2007 (c) Xilinx, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/xilinx_devices.h>
+#include <platforms/4xx/xparameters/xparameters.h>
+
+int virtex_device_fixup(struct platform_device *dev)
+{
+#ifdef XPAR_ONEWIRE_0_BASEADDR
+ int i;
+ // Use the Silicon Serial ID attached on the onewire bus to
+ // generate sensible MAC addresses.
+ unsigned char *p_onewire = ioremap(XPAR_ONEWIRE_0_BASEADDR, 6);
+ struct xemac_platform_data *pdata = dev->dev.platform_data;
+ if (strcmp(dev->name, "xilinx_emac") == 0) {
+ printk(KERN_INFO "Fixup MAC address for %s:%d\n",
+ dev->name, dev->id);
+ // FIXME.. this doesn't seem to return data that is consistent
+ // with the self test... why not?
+ pdata->mac_addr[0] = 0x00;
+ pdata->mac_addr[1] = 0x0A;
+ pdata->mac_addr[2] = 0x35;
+ pdata->mac_addr[3] = dev->id;
+ pdata->mac_addr[4] = p_onewire[4];
+ pdata->mac_addr[5] = p_onewire[5];
+ pr_debug(KERN_INFO
+ "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n",
+ pdata->mac_addr[0], pdata->mac_addr[1],
+ pdata->mac_addr[2], pdata->mac_addr[3],
+ pdata->mac_addr[4], pdata->mac_addr[5]);
+ }
+ iounmap(p_onewire);
+#endif
+ return 0;
+}
#if defined(CONFIG_XILINX_ML300)
#include "xparameters_ml300.h"
- #define XPAR_INTC_0_AC97_CONTROLLER_REF_0_PLAYBACK_VEC_ID \
- XPAR_DCR_INTC_0_OPB_AC97_CONTROLLER_REF_0_PLAYBACK_INTERRUPT_INTR
- #define XPAR_INTC_0_AC97_CONTROLLER_REF_0_RECORD_VEC_ID \
- XPAR_DCR_INTC_0_OPB_AC97_CONTROLLER_REF_0_RECORD_INTERRUPT_INTR
+#elif defined(CONFIG_XILINX_XUPV2P)
+ #include "xparameters_xupv2p.h"
#elif defined(CONFIG_XILINX_ML403)
#include "xparameters_ml403.h"
- #define XPAR_INTC_0_AC97_CONTROLLER_REF_0_PLAYBACK_VEC_ID \
- XPAR_OPB_INTC_0_OPB_AC97_CONTROLLER_REF_0_PLAYBACK_INTERRUPT_INTR
- #define XPAR_INTC_0_AC97_CONTROLLER_REF_0_RECORD_VEC_ID \
- XPAR_OPB_INTC_0_OPB_AC97_CONTROLLER_REF_0_RECORD_INTERRUPT_INTR
+#elif defined(CONFIG_XILINX_ML405)
+ #include "xparameters_ml405.h"
+#elif defined(CONFIG_XILINX_ML507)
+ #include "xparameters_ml507.h"
+#elif defined(CONFIG_XILINX_ML41x)
+ #include "xparameters_ml41x.h"
#else
/* Add other board xparameter includes here before the #else */
#error No xparameters_*.h file included
#define XPAR_GPIO_4_IS_DUAL 0
#endif
+#ifndef XPAR_LLTEMAC_0_LLINK_CONNECTED_FIFO_INTR
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_FIFO_INTR 0xdeadbeef
+#endif
+#ifndef XPAR_LLTEMAC_0_LLINK_CONNECTED_DMATX_INTR
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_DMATX_INTR 0xdeadbeef
+#endif
+#ifndef XPAR_LLTEMAC_0_LLINK_CONNECTED_DMARX_INTR
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_DMARX_INTR 0xdeadbeef
+#endif
+
+#ifndef XPAR_LLTEMAC_1_LLINK_CONNECTED_FIFO_INTR
+#define XPAR_LLTEMAC_1_LLINK_CONNECTED_FIFO_INTR 0xdeadbeef
+#endif
+#ifndef XPAR_LLTEMAC_1_LLINK_CONNECTED_DMATX_INTR
+#define XPAR_LLTEMAC_1_LLINK_CONNECTED_DMATX_INTR 0xdeadbeef
+#endif
+#ifndef XPAR_LLTEMAC_1_LLINK_CONNECTED_DMARX_INTR
+#define XPAR_LLTEMAC_1_LLINK_CONNECTED_DMARX_INTR 0xdeadbeef
+#endif
--- /dev/null
+
+/*******************************************************************
+*
+* CAUTION: This file is automatically generated by libgen.
+* Version: Xilinx EDK 10.1.1 EDK_K_SP1.1
+* DO NOT EDIT.
+*
+* Copyright (c) 2005 Xilinx, Inc. All rights reserved.
+*
+* Description: Driver parameters
+*
+*******************************************************************/
+
+#define STDIN_BASEADDRESS 0x83E00000
+#define STDOUT_BASEADDRESS 0x83E00000
+
+/******************************************************************/
+
+/* Definitions for peripheral XPS_BRAM_IF_CNTLR_1 */
+#define XPAR_XPS_BRAM_IF_CNTLR_1_BASEADDR 0xFFFFE000
+#define XPAR_XPS_BRAM_IF_CNTLR_1_HIGHADDR 0xFFFFFFFF
+
+
+/******************************************************************/
+
+/* Definitions for driver UARTNS550 */
+#define XPAR_XUARTNS550_NUM_INSTANCES 1
+#define XPAR_XUARTNS550_CLOCK_HZ 100000000
+
+/* Definitions for peripheral RS232_UART */
+#define XPAR_RS232_UART_DEVICE_ID 0
+#define XPAR_RS232_UART_BASEADDR 0x83E00000
+#define XPAR_RS232_UART_HIGHADDR 0x83E0FFFF
+
+
+/******************************************************************/
+
+
+/* Canonical definitions for peripheral RS232_UART */
+#define XPAR_UARTNS550_0_CLOCK_FREQ_HZ 100000000
+#define XPAR_UARTNS550_0_DEVICE_ID XPAR_RS232_UART_DEVICE_ID
+#define XPAR_UARTNS550_0_BASEADDR 0x83E00000
+#define XPAR_UARTNS550_0_HIGHADDR 0x83E0FFFF
+#define XPAR_UARTNS550_0_SIO_CHAN -1
+
+
+/******************************************************************/
+
+/* Definitions for driver GPIO */
+#define XPAR_XGPIO_NUM_INSTANCES 1
+
+/* Definitions for peripheral LEDS_4BIT */
+#define XPAR_LEDS_4BIT_BASEADDR 0x81400000
+#define XPAR_LEDS_4BIT_HIGHADDR 0x8140FFFF
+#define XPAR_LEDS_4BIT_DEVICE_ID 0
+#define XPAR_LEDS_4BIT_INTERRUPT_PRESENT 1
+#define XPAR_LEDS_4BIT_IS_DUAL 0
+
+
+/******************************************************************/
+
+/* Definitions for driver IIC */
+#define XPAR_XIIC_NUM_INSTANCES 1
+
+/* Definitions for peripheral IIC_EEPROM */
+#define XPAR_IIC_EEPROM_DEVICE_ID 0
+#define XPAR_IIC_EEPROM_BASEADDR 0x81600000
+#define XPAR_IIC_EEPROM_HIGHADDR 0x8160FFFF
+#define XPAR_IIC_EEPROM_TEN_BIT_ADR 0
+#define XPAR_IIC_EEPROM_GPO_WIDTH 1
+
+
+/******************************************************************/
+
+
+/* Canonical definitions for peripheral IIC_EEPROM */
+#define XPAR_IIC_0_DEVICE_ID XPAR_IIC_EEPROM_DEVICE_ID
+#define XPAR_IIC_0_BASEADDR 0x81600000
+#define XPAR_IIC_0_HIGHADDR 0x8160FFFF
+#define XPAR_IIC_0_TEN_BIT_ADR 0
+#define XPAR_IIC_0_GPO_WIDTH 1
+
+
+/******************************************************************/
+
+#define XPAR_XSYSACE_MEM_WIDTH 16
+/* Definitions for driver SYSACE */
+#define XPAR_XSYSACE_NUM_INSTANCES 1
+
+/* Definitions for peripheral SYSACE_COMPACTFLASH */
+#define XPAR_SYSACE_COMPACTFLASH_DEVICE_ID 0
+#define XPAR_SYSACE_COMPACTFLASH_BASEADDR 0x83600000
+#define XPAR_SYSACE_COMPACTFLASH_HIGHADDR 0x8360FFFF
+#define XPAR_SYSACE_COMPACTFLASH_MEM_WIDTH 16
+
+
+/******************************************************************/
+
+
+/* Canonical definitions for peripheral SYSACE_COMPACTFLASH */
+#define XPAR_SYSACE_0_DEVICE_ID XPAR_SYSACE_COMPACTFLASH_DEVICE_ID
+#define XPAR_SYSACE_0_BASEADDR 0x83600000
+#define XPAR_SYSACE_0_HIGHADDR 0x8360FFFF
+#define XPAR_SYSACE_0_MEM_WIDTH 16
+
+/******************************************************************/
+
+/* Definitions for driver LLTEMAC */
+#define XPAR_XLLTEMAC_NUM_INSTANCES 1
+
+/* Definitions for peripheral TRIMODE_MAC_GMII Channel 0 */
+#define XPAR_TRIMODE_MAC_GMII_CHAN_0_DEVICE_ID 0
+#define XPAR_TRIMODE_MAC_GMII_CHAN_0_BASEADDR 0x81c00000
+#define XPAR_TRIMODE_MAC_GMII_CHAN_0_TXCSUM 0
+#define XPAR_TRIMODE_MAC_GMII_CHAN_0_RXCSUM 0
+#define XPAR_TRIMODE_MAC_GMII_CHAN_0_PHY_TYPE 1
+
+/* Canonical definitions for peripheral TRIMODE_MAC_GMII Channel 0 */
+#define XPAR_LLTEMAC_0_DEVICE_ID 0
+#define XPAR_LLTEMAC_0_BASEADDR 0x81c00000
+#define XPAR_LLTEMAC_0_TXCSUM 0
+#define XPAR_LLTEMAC_0_RXCSUM 0
+#define XPAR_LLTEMAC_0_PHY_TYPE 1
+#define XPAR_LLTEMAC_0_INTR 2
+
+
+/* LocalLink TYPE Enumerations */
+#define XPAR_LL_FIFO 1
+#define XPAR_LL_DMA 2
+
+
+/* Canonical LocalLink parameters for TRIMODE_MAC_GMII */
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_TYPE XPAR_LL_DMA
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_BASEADDR 0x84600100
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_FIFO_INTR 0xFF
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_DMARX_INTR 1
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_DMATX_INTR 0
+
+
+/******************************************************************/
+
+#define XPAR_INTC_MAX_NUM_INTR_INPUTS 7
+#define XPAR_XINTC_HAS_IPR 1
+#define XPAR_XINTC_USE_DCR 0
+/* Definitions for driver INTC */
+#define XPAR_XINTC_NUM_INSTANCES 1
+
+/* Definitions for peripheral XPS_INTC_0 */
+#define XPAR_XPS_INTC_0_DEVICE_ID 0
+#define XPAR_XPS_INTC_0_BASEADDR 0x81800000
+#define XPAR_XPS_INTC_0_HIGHADDR 0x8180FFFF
+#define XPAR_XPS_INTC_0_KIND_OF_INTR 0x00000000
+
+
+/******************************************************************/
+
+#define XPAR_INTC_SINGLE_BASEADDR 0x81800000
+#define XPAR_INTC_SINGLE_HIGHADDR 0x8180FFFF
+#define XPAR_INTC_SINGLE_DEVICE_ID XPAR_XPS_INTC_0_DEVICE_ID
+#define XPAR_DDR_SDRAM_SDMA2_TX_INTOUT_MASK 0X000001
+#define XPAR_XPS_INTC_0_DDR_SDRAM_SDMA2_TX_INTOUT_INTR 0
+#define XPAR_DDR_SDRAM_SDMA2_RX_INTOUT_MASK 0X000002
+#define XPAR_XPS_INTC_0_DDR_SDRAM_SDMA2_RX_INTOUT_INTR 1
+#define XPAR_TRIMODE_MAC_GMII_TEMACINTC0_IRPT_MASK 0X000004
+#define XPAR_XPS_INTC_0_TRIMODE_MAC_GMII_TEMACINTC0_IRPT_INTR 2
+#define XPAR_SYSACE_COMPACTFLASH_SYSACE_IRQ_MASK 0X000008
+#define XPAR_XPS_INTC_0_SYSACE_COMPACTFLASH_SYSACE_IRQ_INTR 3
+#define XPAR_IIC_EEPROM_IIC2INTC_IRPT_MASK 0X000010
+#define XPAR_XPS_INTC_0_IIC_EEPROM_IIC2INTC_IRPT_INTR 4
+#define XPAR_LEDS_4BIT_IP2INTC_IRPT_MASK 0X000020
+#define XPAR_XPS_INTC_0_LEDS_4BIT_IP2INTC_IRPT_INTR 5
+#define XPAR_RS232_UART_IP2INTC_IRPT_MASK 0X000040
+#define XPAR_XPS_INTC_0_RS232_UART_IP2INTC_IRPT_INTR 6
+
+/******************************************************************/
+
+
+/* Canonical definitions for peripheral XPS_INTC_0 */
+#define XPAR_INTC_0_DEVICE_ID XPAR_XPS_INTC_0_DEVICE_ID
+#define XPAR_INTC_0_BASEADDR 0x81800000
+#define XPAR_INTC_0_HIGHADDR 0x8180FFFF
+#define XPAR_INTC_0_KIND_OF_INTR 0x00000000
+
+#define XPAR_INTC_0_MPMC_0_SDMA2_TX_INTOUT_VEC_ID XPAR_XPS_INTC_0_DDR_SDRAM_SDMA2_TX_INTOUT_INTR
+#define XPAR_INTC_0_MPMC_0_SDMA2_RX_INTOUT_VEC_ID XPAR_XPS_INTC_0_DDR_SDRAM_SDMA2_RX_INTOUT_INTR
+#define XPAR_INTC_0_LLTEMAC_0_VEC_ID XPAR_XPS_INTC_0_TRIMODE_MAC_GMII_TEMACINTC0_IRPT_INTR
+#define XPAR_INTC_0_SYSACE_0_VEC_ID XPAR_XPS_INTC_0_SYSACE_COMPACTFLASH_SYSACE_IRQ_INTR
+#define XPAR_INTC_0_IIC_0_VEC_ID XPAR_XPS_INTC_0_IIC_EEPROM_IIC2INTC_IRPT_INTR
+#define XPAR_INTC_0_GPIO_0_VEC_ID XPAR_XPS_INTC_0_LEDS_4BIT_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_UARTNS550_0_VEC_ID XPAR_XPS_INTC_0_RS232_UART_IP2INTC_IRPT_INTR
+
+/******************************************************************/
+
+/* Definitions for driver MPMC */
+#define XPAR_XMPMC_NUM_INSTANCES 1
+
+/* Definitions for peripheral DDR_SDRAM */
+#define XPAR_DDR_SDRAM_DEVICE_ID 0
+#define XPAR_DDR_SDRAM_MPMC_BASEADDR 0x00000000
+#define XPAR_DDR_SDRAM_MPMC_CTRL_BASEADDR 0xFFFFFFFF
+#define XPAR_DDR_SDRAM_INCLUDE_ECC_SUPPORT 0
+#define XPAR_DDR_SDRAM_USE_STATIC_PHY 0
+#define XPAR_DDR_SDRAM_PM_ENABLE 0
+#define XPAR_DDR_SDRAM_NUM_PORTS 3
+
+
+/******************************************************************/
+
+
+/* Definitions for peripheral DDR_SDRAM */
+#define XPAR_DDR_SDRAM_MPMC_BASEADDR 0x00000000
+#define XPAR_DDR_SDRAM_MPMC_HIGHADDR 0x07FFFFFF
+#define XPAR_DDR_SDRAM_SDMA_CTRL_BASEADDR 0x84600000
+#define XPAR_DDR_SDRAM_SDMA_CTRL_HIGHADDR 0x8460FFFF
+
+
+/******************************************************************/
+
+
+/* Canonical definitions for peripheral DDR_SDRAM */
+#define XPAR_MPMC_0_DEVICE_ID XPAR_DDR_SDRAM_DEVICE_ID
+#define XPAR_MPMC_0_MPMC_BASEADDR 0x00000000
+#define XPAR_MPMC_0_MPMC_CTRL_BASEADDR 0xFFFFFFFF
+#define XPAR_MPMC_0_INCLUDE_ECC_SUPPORT 0
+#define XPAR_MPMC_0_USE_STATIC_PHY 0
+#define XPAR_MPMC_0_PM_ENABLE 0
+#define XPAR_MPMC_0_NUM_PORTS 3
+
+
+
+/******************************************************************/
+
+#define XPAR_CPU_PPC405_CORE_CLOCK_FREQ_HZ 300000000
+
+/******************************************************************/
+
+#define XPAR_CPU_ID 0
+#define XPAR_PPC405_VIRTEX4_ID 0
+#define XPAR_PPC405_VIRTEX4_CORE_CLOCK_FREQ_HZ 300000000
+#define XPAR_PPC405_VIRTEX4_DPLB0_DWIDTH 64
+#define XPAR_PPC405_VIRTEX4_DPLB0_NATIVE_DWIDTH 64
+#define XPAR_PPC405_VIRTEX4_IPLB0_DWIDTH 64
+#define XPAR_PPC405_VIRTEX4_IPLB0_NATIVE_DWIDTH 64
+#define XPAR_PPC405_VIRTEX4_DPLB1_DWIDTH 64
+#define XPAR_PPC405_VIRTEX4_DPLB1_NATIVE_DWIDTH 64
+#define XPAR_PPC405_VIRTEX4_IPLB1_DWIDTH 64
+#define XPAR_PPC405_VIRTEX4_IPLB1_NATIVE_DWIDTH 64
+#define XPAR_PPC405_VIRTEX4_DPLB1_ADDR_BASE 0x00000000
+#define XPAR_PPC405_VIRTEX4_DPLB1_ADDR_HIGH 0x07ffffff
+#define XPAR_PPC405_VIRTEX4_IPLB1_ADDR_BASE 0x00000000
+#define XPAR_PPC405_VIRTEX4_IPLB1_ADDR_HIGH 0x07ffffff
+#define XPAR_PPC405_VIRTEX4_FASTEST_PLB_CLOCK DPLB0
+#define XPAR_PPC405_VIRTEX4_GENERATE_PLB_TIMESPECS 1
+#define XPAR_PPC405_VIRTEX4_DPLB0_P2P 0
+#define XPAR_PPC405_VIRTEX4_DPLB1_P2P 1
+#define XPAR_PPC405_VIRTEX4_IPLB0_P2P 0
+#define XPAR_PPC405_VIRTEX4_IPLB1_P2P 1
+#define XPAR_PPC405_VIRTEX4_IDCR_BASEADDR 0x00000100
+#define XPAR_PPC405_VIRTEX4_IDCR_HIGHADDR 0x000001FF
+#define XPAR_PPC405_VIRTEX4_DISABLE_OPERAND_FORWARDING 1
+#define XPAR_PPC405_VIRTEX4_MMU_ENABLE 1
+#define XPAR_PPC405_VIRTEX4_DETERMINISTIC_MULT 0
+#define XPAR_PPC405_VIRTEX4_PLBSYNCBYPASS 1
+#define XPAR_PPC405_VIRTEX4_APU_CONTROL 0b1101111000000000
+#define XPAR_PPC405_VIRTEX4_APU_UDI_1 0b101000011000100110000011
+#define XPAR_PPC405_VIRTEX4_APU_UDI_2 0b101000111000100110000011
+#define XPAR_PPC405_VIRTEX4_APU_UDI_3 0b101001011000100111000011
+#define XPAR_PPC405_VIRTEX4_APU_UDI_4 0b101001111000100111000011
+#define XPAR_PPC405_VIRTEX4_APU_UDI_5 0b101010011000110000000011
+#define XPAR_PPC405_VIRTEX4_APU_UDI_6 0b101010111000110000000011
+#define XPAR_PPC405_VIRTEX4_APU_UDI_7 0b101011011000110001000011
+#define XPAR_PPC405_VIRTEX4_APU_UDI_8 0b101011111000110001000011
+#define XPAR_PPC405_VIRTEX4_PVR_HIGH 0b0000
+#define XPAR_PPC405_VIRTEX4_PVR_LOW 0b0000
+#define XPAR_PPC405_VIRTEX4_HW_VER "2.01.a"
+
+/******************************************************************/
+
+#define XPAR_PLB_CLOCK_FREQ_HZ 100000000
+#define XPAR_CORE_CLOCK_FREQ_HZ XPAR_CPU_PPC405_CORE_CLOCK_FREQ_HZ
+#define XPAR_DDR_0_SIZE 0x4000000
+
+/******************************************************************/
+
+#define XPAR_PCI_0_CLOCK_FREQ_HZ 0
+
+/******************************************************************/
+
+#define XPAR_PERSISTENT_0_IIC_0_BASEADDR 0
+#define XPAR_PERSISTENT_0_IIC_0_HIGHADDR 1024
+#define XPAR_PERSISTENT_0_IIC_0_EEPROMADDR 0xA0
--- /dev/null
+
+/*******************************************************************
+*
+* CAUTION: This file is automatically generated by libgen.
+* Version: Xilinx EDK 8.2.02 EDK_Im_Sp2.4
+* DO NOT EDIT.
+*
+* Copyright (c) 2005 Xilinx, Inc. All rights reserved.
+*
+* Description: Driver parameters
+*
+*******************************************************************/
+
+/* Definitions for driver PLBARB */
+#define XPAR_XPLBARB_NUM_INSTANCES 1
+
+/* Definitions for peripheral PLB */
+#define XPAR_PLB_BASEADDR 0x00000000
+#define XPAR_PLB_HIGHADDR 0x00000000
+#define XPAR_PLB_DEVICE_ID 0
+#define XPAR_PLB_PLB_NUM_MASTERS 3
+
+
+/******************************************************************/
+
+/* Definitions for driver OPBARB */
+#define XPAR_XOPBARB_NUM_INSTANCES 1
+
+/* Definitions for peripheral OPB */
+#define XPAR_OPB_BASEADDR 0xFFFFFFFF
+#define XPAR_OPB_HIGHADDR 0x00000000
+#define XPAR_OPB_DEVICE_ID 0
+#define XPAR_OPB_NUM_MASTERS 1
+
+/******************************************************************/
+
+
+/* Definitions for peripheral OPB_SOCKET_0 */
+#define XPAR_OPB_SOCKET_0_BASEADDR 0x40000000
+#define XPAR_OPB_SOCKET_0_HIGHADDR 0x7FFFFFFF
+#define XPAR_OPB_SOCKET_0_DCR_BASEADDR 0x40700300
+#define XPAR_OPB_SOCKET_0_DCR_HIGHADDR 0x40700307
+
+/******************************************************************/
+
+/* Definitions for driver UARTNS550 */
+#define XPAR_XUARTNS550_NUM_INSTANCES 2
+#define XPAR_XUARTNS550_CLOCK_HZ 100000000
+
+/* Definitions for peripheral RS232_UART_1 */
+#define XPAR_RS232_UART_1_BASEADDR 0x40400000
+#define XPAR_RS232_UART_1_HIGHADDR 0x4040FFFF
+#define XPAR_RS232_UART_1_DEVICE_ID 0
+
+
+/* Definitions for peripheral RS232_UART_2 */
+#define XPAR_RS232_UART_2_BASEADDR 0x40420000
+#define XPAR_RS232_UART_2_HIGHADDR 0x4042FFFF
+#define XPAR_RS232_UART_2_DEVICE_ID 1
+
+
+/******************************************************************/
+
+/* Definitions for driver SPI */
+#define XPAR_XSPI_NUM_INSTANCES 1
+
+/* Definitions for peripheral SPI_EEPROM */
+#define XPAR_SPI_EEPROM_BASEADDR 0x40A00000
+#define XPAR_SPI_EEPROM_HIGHADDR 0x40A0FFFF
+#define XPAR_SPI_EEPROM_DEVICE_ID 0
+#define XPAR_SPI_EEPROM_FIFO_EXIST 1
+#define XPAR_SPI_EEPROM_SPI_SLAVE_ONLY 0
+#define XPAR_SPI_EEPROM_NUM_SS_BITS 1
+
+
+/******************************************************************/
+
+#define XPAR_XSYSACE_MEM_WIDTH 16
+/* Definitions for driver SYSACE */
+#define XPAR_XSYSACE_NUM_INSTANCES 1
+
+/* Definitions for peripheral SYSACE_COMPACTFLASH */
+#define XPAR_SYSACE_COMPACTFLASH_BASEADDR 0x41800000
+#define XPAR_SYSACE_COMPACTFLASH_HIGHADDR 0x4180FFFF
+#define XPAR_SYSACE_COMPACTFLASH_DEVICE_ID 0
+#define XPAR_SYSACE_COMPACTFLASH_MEM_WIDTH 16
+
+
+/******************************************************************/
+
+/* Definitions for driver IIC */
+#define XPAR_XIIC_NUM_INSTANCES 1
+
+/* Definitions for peripheral IIC_BUS */
+#define XPAR_IIC_BUS_BASEADDR 0x40800000
+#define XPAR_IIC_BUS_HIGHADDR 0x4080FFFF
+#define XPAR_IIC_BUS_DEVICE_ID 0
+#define XPAR_IIC_BUS_TEN_BIT_ADR 0
+#define XPAR_IIC_BUS_GPO_WIDTH 1
+
+
+/******************************************************************/
+
+#define XPAR_INTC_MAX_NUM_INTR_INPUTS 6
+#define XPAR_XINTC_HAS_IPR 1
+#define XPAR_XINTC_USE_DCR 0
+/* Definitions for driver INTC */
+#define XPAR_XINTC_NUM_INSTANCES 1
+
+/* Definitions for peripheral OPB_INTC_0 */
+#define XPAR_OPB_INTC_0_BASEADDR 0x41200000
+#define XPAR_OPB_INTC_0_HIGHADDR 0x4120FFFF
+#define XPAR_OPB_INTC_0_DEVICE_ID 0
+#define XPAR_OPB_INTC_0_KIND_OF_INTR 0x00000000
+
+
+/******************************************************************/
+
+#define XPAR_INTC_SINGLE_BASEADDR 0x41200000
+#define XPAR_INTC_SINGLE_HIGHADDR 0x4120FFFF
+#define XPAR_INTC_SINGLE_DEVICE_ID XPAR_OPB_INTC_0_DEVICE_ID
+#define XPAR_ETHERNET_MAC_IP2INTC_IRPT_MASK 0X000001
+#define XPAR_OPB_INTC_0_ETHERNET_MAC_IP2INTC_IRPT_INTR 0
+#define XPAR_IIC_BUS_IP2INTC_IRPT_MASK 0X000002
+#define XPAR_OPB_INTC_0_IIC_BUS_IP2INTC_IRPT_INTR 1
+#define XPAR_SYSACE_COMPACTFLASH_SYSACE_IRQ_MASK 0X000004
+#define XPAR_OPB_INTC_0_SYSACE_COMPACTFLASH_SYSACE_IRQ_INTR 2
+#define XPAR_SPI_EEPROM_IP2INTC_IRPT_MASK 0X000008
+#define XPAR_OPB_INTC_0_SPI_EEPROM_IP2INTC_IRPT_INTR 3
+#define XPAR_RS232_UART_2_IP2INTC_IRPT_MASK 0X000010
+#define XPAR_OPB_INTC_0_RS232_UART_2_IP2INTC_IRPT_INTR 4
+#define XPAR_RS232_UART_1_IP2INTC_IRPT_MASK 0X000020
+#define XPAR_OPB_INTC_0_RS232_UART_1_IP2INTC_IRPT_INTR 5
+
+/******************************************************************/
+
+/* Definitions for driver HWICAP */
+#define XPAR_XHWICAP_NUM_INSTANCES 1
+
+/* Definitions for peripheral OPB_HWICAP_0 */
+#define XPAR_OPB_HWICAP_0_BASEADDR 0x41300000
+#define XPAR_OPB_HWICAP_0_HIGHADDR 0x4130FFFF
+#define XPAR_OPB_HWICAP_0_DEVICE_ID 0
+
+/******************************************************************/
+
+/* Definitions for driver DDR */
+#define XPAR_XDDR_NUM_INSTANCES 1
+
+/* Definitions for peripheral DDR_SDRAM_32MX64 */
+#define XPAR_DDR_SDRAM_32MX64_ECC_BASEADDR 0xFFFFFFFF
+#define XPAR_DDR_SDRAM_32MX64_ECC_HIGHADDR 0x00000000
+#define XPAR_DDR_SDRAM_32MX64_DEVICE_ID 0
+#define XPAR_DDR_SDRAM_32MX64_INCLUDE_ECC_INTR 0
+
+
+/******************************************************************/
+
+/* Definitions for peripheral DDR_SDRAM_32MX64 */
+#define XPAR_DDR_SDRAM_32MX64_MEM0_BASEADDR 0x00000000
+#define XPAR_DDR_SDRAM_32MX64_MEM0_HIGHADDR 0x03FFFFFF
+
+/******************************************************************/
+
+/* Definitions for driver EMAC */
+#define XPAR_XEMAC_NUM_INSTANCES 1
+
+/* Definitions for peripheral ETHERNET_MAC */
+#define XPAR_ETHERNET_MAC_BASEADDR 0x80400000
+#define XPAR_ETHERNET_MAC_HIGHADDR 0x8040FFFF
+#define XPAR_ETHERNET_MAC_DEVICE_ID 0
+#define XPAR_ETHERNET_MAC_ERR_COUNT_EXIST 1
+#define XPAR_ETHERNET_MAC_DMA_PRESENT 1
+#define XPAR_ETHERNET_MAC_MII_EXIST 1
+
+
+/******************************************************************/
+
+
+/* Definitions for peripheral PLB_BRAM_IF_CNTLR_1 */
+#define XPAR_PLB_BRAM_IF_CNTLR_1_BASEADDR 0xfffff000
+#define XPAR_PLB_BRAM_IF_CNTLR_1_HIGHADDR 0xffffffff
+
+
+/******************************************************************/
+
+#define XPAR_CPU_PPC405_CORE_CLOCK_FREQ_HZ 300000000
+
+/******************************************************************/
+
+
+/******************************************************************/
+
+/* Cannonical Constant Names */
+
+/******************************************************************/
+
+#define XPAR_UARTNS550_0_BASEADDR (XPAR_RS232_UART_1_BASEADDR+0x1000)
+#define XPAR_UARTNS550_0_HIGHADDR XPAR_RS232_UART_1_HIGHADDR
+#define XPAR_UARTNS550_0_CLOCK_FREQ_HZ XPAR_XUARTNS550_CLOCK_HZ
+#define XPAR_UARTNS550_0_DEVICE_ID XPAR_RS232_UART_1_DEVICE_ID
+#define XPAR_UARTNS550_1_BASEADDR (XPAR_RS232_UART_2_BASEADDR+0x1000)
+#define XPAR_UARTNS550_1_HIGHADDR XPAR_RS232_UART_2_HIGHADDR
+#define XPAR_UARTNS550_1_CLOCK_FREQ_HZ XPAR_XUARTNS550_CLOCK_HZ
+#define XPAR_UARTNS550_1_DEVICE_ID XPAR_RS232_UART_2_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_SPI_0_BASEADDR XPAR_SPI_EEPROM_BASEADDR
+#define XPAR_SPI_0_HIGHADDR XPAR_SPI_EEPROM_HIGHADDR
+#define XPAR_SPI_0_FIFO_EXIST XPAR_SPI_EEPROM_FIFO_EXIST
+#define XPAR_SPI_0_SPI_SLAVE_ONLY XPAR_SPI_EEPROM_SPI_SLAVE_ONLY
+#define XPAR_SPI_0_NUM_SS_BITS XPAR_SPI_EEPROM_NUM_SS_BITS
+#define XPAR_SPI_0_DEVICE_ID XPAR_SPI_EEPROM_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_SYSACE_0_BASEADDR XPAR_SYSACE_COMPACTFLASH_BASEADDR
+#define XPAR_SYSACE_0_HIGHADDR XPAR_SYSACE_COMPACTFLASH_HIGHADDR
+#define XPAR_SYSACE_0_DEVICE_ID XPAR_SYSACE_COMPACTFLASH_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_IIC_0_BASEADDR XPAR_IIC_BUS_BASEADDR
+#define XPAR_IIC_0_HIGHADDR XPAR_IIC_BUS_HIGHADDR
+#define XPAR_IIC_0_TEN_BIT_ADR XPAR_IIC_BUS_TEN_BIT_ADR
+#define XPAR_IIC_0_DEVICE_ID XPAR_IIC_BUS_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_EMAC_0_BASEADDR XPAR_ETHERNET_MAC_BASEADDR
+#define XPAR_EMAC_0_HIGHADDR XPAR_ETHERNET_MAC_HIGHADDR
+#define XPAR_EMAC_0_DMA_PRESENT XPAR_ETHERNET_MAC_DMA_PRESENT
+#define XPAR_EMAC_0_MII_EXIST XPAR_ETHERNET_MAC_MII_EXIST
+#define XPAR_EMAC_0_ERR_COUNT_EXIST XPAR_ETHERNET_MAC_ERR_COUNT_EXIST
+#define XPAR_EMAC_0_DEVICE_ID XPAR_ETHERNET_MAC_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_HWICAP_0_BASEADDR XPAR_OPB_HWICAP_0_BASEADDR
+#define XPAR_HWICAP_0_HIGHADDR XPAR_OPB_HWICAP_0_HIGHADDR
+#define XPAR_HWICAP_0_DEVICE_ID XPAR_OPB_HWICAP_0_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_INTC_0_BASEADDR XPAR_OPB_INTC_0_BASEADDR
+#define XPAR_INTC_0_HIGHADDR XPAR_OPB_INTC_0_HIGHADDR
+#define XPAR_INTC_0_KIND_OF_INTR XPAR_OPB_INTC_0_KIND_OF_INTR
+#define XPAR_INTC_0_DEVICE_ID XPAR_OPB_INTC_0_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_INTC_0_EMAC_0_VEC_ID XPAR_OPB_INTC_0_ETHERNET_MAC_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_IIC_0_VEC_ID XPAR_OPB_INTC_0_IIC_BUS_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_SYSACE_0_VEC_ID XPAR_OPB_INTC_0_SYSACE_COMPACTFLASH_SYSACE_IRQ_INTR
+#define XPAR_INTC_0_SPI_0_VEC_ID XPAR_OPB_INTC_0_SPI_EEPROM_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_UARTNS550_1_VEC_ID XPAR_OPB_INTC_0_RS232_UART_2_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_UARTNS550_0_VEC_ID XPAR_OPB_INTC_0_RS232_UART_1_IP2INTC_IRPT_INTR
+
+/******************************************************************/
+
+#define XPAR_PLB_CLOCK_FREQ_HZ 100000000
+#define XPAR_CORE_CLOCK_FREQ_HZ XPAR_CPU_PPC405_CORE_CLOCK_FREQ_HZ
+#define XPAR_DDR_0_SIZE 64000000
+
+/******************************************************************/
+
+#define XPAR_PERSISTENT_0_IIC_0_BASEADDR 1024
+#define XPAR_PERSISTENT_0_IIC_0_HIGHADDR 2047
+#define XPAR_PERSISTENT_0_IIC_0_EEPROMADDR 0xA0
+
+/******************************************************************/
+
+#define XPAR_PCI_0_CLOCK_FREQ_HZ 0
+
+/******************************************************************/
+
--- /dev/null
+
+/*******************************************************************
+*
+* CAUTION: This file is automatically generated by libgen.
+* Version: Xilinx EDK 10.1.1 EDK_K_SP1.1
+* DO NOT EDIT.
+*
+* Copyright (c) 2005 Xilinx, Inc. All rights reserved.
+*
+* Description: Driver parameters
+*
+*******************************************************************/
+
+
+/* Definitions for peripheral PLB_BRAM_IF_CNTLR_0 */
+#define XPAR_PLB_BRAM_IF_CNTLR_0_BASEADDR 0xFFFF0000
+#define XPAR_PLB_BRAM_IF_CNTLR_0_HIGHADDR 0xFFFFFFFF
+
+
+/* Definitions for peripheral PLB_BRAM_IF_CNTLR_1 */
+#define XPAR_PLB_BRAM_IF_CNTLR_1_BASEADDR 0xEEE00000
+#define XPAR_PLB_BRAM_IF_CNTLR_1_HIGHADDR 0xEEE01FFF
+
+
+/******************************************************************/
+
+/* Definitions for driver UARTNS550 */
+#define XPAR_XUARTNS550_NUM_INSTANCES 1
+#define XPAR_XUARTNS550_CLOCK_HZ 20000000
+
+/* Definitions for peripheral RS232_UART_0 */
+#define XPAR_RS232_UART_0_DEVICE_ID 0
+#define XPAR_RS232_UART_0_BASEADDR 0xD0000000
+#define XPAR_RS232_UART_0_HIGHADDR 0xD0001FFF
+
+
+/******************************************************************/
+
+
+/* Canonical definitions for peripheral RS232_UART_0 */
+#define XPAR_UARTNS550_0_CLOCK_FREQ_HZ 20000000
+#define XPAR_UARTNS550_0_DEVICE_ID XPAR_RS232_UART_0_DEVICE_ID
+#define XPAR_UARTNS550_0_BASEADDR 0xD0000000
+#define XPAR_UARTNS550_0_HIGHADDR 0xD0001FFF
+#define XPAR_UARTNS550_0_SIO_CHAN -1
+
+
+/******************************************************************/
+
+#define XPAR_INTC_MAX_NUM_INTR_INPUTS 8
+#define XPAR_XINTC_HAS_IPR 1
+#define XPAR_XINTC_USE_DCR 0
+/* Definitions for driver INTC */
+#define XPAR_XINTC_NUM_INSTANCES 1
+
+/* Definitions for peripheral OPB_INTC_0 */
+#define XPAR_OPB_INTC_0_DEVICE_ID 0
+#define XPAR_OPB_INTC_0_BASEADDR 0xD0020200
+#define XPAR_OPB_INTC_0_HIGHADDR 0xD002021F
+#define XPAR_OPB_INTC_0_KIND_OF_INTR 0x00000000
+
+
+/******************************************************************/
+
+#define XPAR_INTC_SINGLE_BASEADDR 0xD0020200
+#define XPAR_INTC_SINGLE_HIGHADDR 0xD002021F
+#define XPAR_INTC_SINGLE_DEVICE_ID XPAR_OPB_INTC_0_DEVICE_ID
+#define XPAR_RS232_UART_0_IP2INTC_IRPT_MASK 0X000001
+#define XPAR_OPB_INTC_0_RS232_UART_0_IP2INTC_IRPT_INTR 0
+#define XPAR_LEDS_8BIT_IP2INTC_IRPT_MASK 0X000002
+#define XPAR_OPB_INTC_0_LEDS_8BIT_IP2INTC_IRPT_INTR 1
+#define XPAR_SYSACE_COMPACTFLASH_SYSACE_IRQ_MASK 0X000004
+#define XPAR_OPB_INTC_0_SYSACE_COMPACTFLASH_SYSACE_IRQ_INTR 2
+#define XPAR_SYSTEM_PHY_MII_INT_N_MASK 0X000008
+#define XPAR_OPB_INTC_0_SYSTEM_PHY_MII_INT_N_INTR 3
+#define XPAR_LL_TEMAC_0_TEMACINTC0_IRPT_MASK 0X000010
+#define XPAR_OPB_INTC_0_LL_TEMAC_0_TEMACINTC0_IRPT_INTR 4
+#define XPAR_PPC440_VIRTEX5_0_DMA0RXIRQ_MASK 0X000020
+#define XPAR_OPB_INTC_0_PPC440_VIRTEX5_0_DMA0RXIRQ_INTR 5
+#define XPAR_PPC440_VIRTEX5_0_DMA0TXIRQ_MASK 0X000040
+#define XPAR_OPB_INTC_0_PPC440_VIRTEX5_0_DMA0TXIRQ_INTR 6
+#define XPAR_IIC_BUS_IIC2INTC_IRPT_MASK 0X000080
+#define XPAR_OPB_INTC_0_IIC_BUS_IIC2INTC_IRPT_INTR 7
+
+/******************************************************************/
+
+
+/* Canonical definitions for peripheral OPB_INTC_0 */
+#define XPAR_INTC_0_DEVICE_ID XPAR_OPB_INTC_0_DEVICE_ID
+#define XPAR_INTC_0_BASEADDR 0xD0020200
+#define XPAR_INTC_0_HIGHADDR 0xD002021F
+#define XPAR_INTC_0_KIND_OF_INTR 0x00000000
+
+#define XPAR_INTC_0_UARTNS550_0_VEC_ID XPAR_OPB_INTC_0_RS232_UART_0_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_GPIO_0_VEC_ID XPAR_OPB_INTC_0_LEDS_8BIT_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_SYSACE_0_VEC_ID XPAR_OPB_INTC_0_SYSACE_COMPACTFLASH_SYSACE_IRQ_INTR
+#define XPAR_INTC_0_LLTEMAC_0_VEC_ID XPAR_OPB_INTC_0_LL_TEMAC_0_TEMACINTC0_IRPT_INTR
+#define XPAR_INTC_0_CPU_PPC440_0_DMA0RXIRQ_VEC_ID XPAR_OPB_INTC_0_PPC440_VIRTEX5_0_DMA0RXIRQ_INTR
+#define XPAR_INTC_0_CPU_PPC440_0_DMA0TXIRQ_VEC_ID XPAR_OPB_INTC_0_PPC440_VIRTEX5_0_DMA0TXIRQ_INTR
+#define XPAR_INTC_0_IIC_0_VEC_ID XPAR_OPB_INTC_0_IIC_BUS_IIC2INTC_IRPT_INTR
+
+/******************************************************************/
+
+#define XPAR_XSYSACE_MEM_WIDTH 16
+/* Definitions for driver SYSACE */
+#define XPAR_XSYSACE_NUM_INSTANCES 1
+
+/* Definitions for peripheral SYSACE_COMPACTFLASH */
+#define XPAR_SYSACE_COMPACTFLASH_DEVICE_ID 0
+#define XPAR_SYSACE_COMPACTFLASH_BASEADDR 0xD0030100
+#define XPAR_SYSACE_COMPACTFLASH_HIGHADDR 0xD003017F
+#define XPAR_SYSACE_COMPACTFLASH_MEM_WIDTH 16
+
+
+/******************************************************************/
+
+
+/* Canonical definitions for peripheral SYSACE_COMPACTFLASH */
+#define XPAR_SYSACE_0_DEVICE_ID XPAR_SYSACE_COMPACTFLASH_DEVICE_ID
+#define XPAR_SYSACE_0_BASEADDR 0xD0030100
+#define XPAR_SYSACE_0_HIGHADDR 0xD003017F
+#define XPAR_SYSACE_0_MEM_WIDTH 16
+
+
+/******************************************************************/
+
+/* Definitions for driver IIC */
+#define XPAR_XIIC_NUM_INSTANCES 1
+
+/* Definitions for peripheral IIC_BUS */
+#define XPAR_IIC_BUS_DEVICE_ID 0
+#define XPAR_IIC_BUS_BASEADDR 0xD0020000
+#define XPAR_IIC_BUS_HIGHADDR 0xD00201FF
+#define XPAR_IIC_BUS_TEN_BIT_ADR 0
+#define XPAR_IIC_BUS_GPO_WIDTH 1
+
+
+/******************************************************************/
+
+
+/* Canonical definitions for peripheral IIC_BUS */
+#define XPAR_IIC_0_DEVICE_ID XPAR_IIC_BUS_DEVICE_ID
+#define XPAR_IIC_0_BASEADDR 0xD0020000
+#define XPAR_IIC_0_HIGHADDR 0xD00201FF
+#define XPAR_IIC_0_TEN_BIT_ADR 0
+#define XPAR_IIC_0_GPO_WIDTH 1
+
+
+/******************************************************************/
+
+/* Definitions for driver GPIO */
+#define XPAR_XGPIO_NUM_INSTANCES 1
+
+/* Definitions for peripheral LEDS_8BIT */
+#define XPAR_LEDS_8BIT_BASEADDR 0xD0010200
+#define XPAR_LEDS_8BIT_HIGHADDR 0xD00103FF
+#define XPAR_LEDS_8BIT_DEVICE_ID 0
+#define XPAR_LEDS_8BIT_INTERRUPT_PRESENT 1
+#define XPAR_LEDS_8BIT_IS_DUAL 0
+
+
+/******************************************************************/
+
+/* Definitions for driver LLTEMAC */
+#define XPAR_XLLTEMAC_NUM_INSTANCES 1
+
+/* Definitions for peripheral LL_TEMAC_0 Channel 0 */
+#define XPAR_LL_TEMAC_0_CHAN_0_DEVICE_ID 0
+#define XPAR_LL_TEMAC_0_CHAN_0_BASEADDR 0x91200000
+#define XPAR_LL_TEMAC_0_CHAN_0_TXCSUM 0
+#define XPAR_LL_TEMAC_0_CHAN_0_RXCSUM 0
+#define XPAR_LL_TEMAC_0_CHAN_0_PHY_TYPE 1
+
+/* Canonical definitions for peripheral LL_TEMAC_0 Channel 0 */
+#define XPAR_LLTEMAC_0_DEVICE_ID 0
+#define XPAR_LLTEMAC_0_BASEADDR 0x91200000
+#define XPAR_LLTEMAC_0_TXCSUM 0
+#define XPAR_LLTEMAC_0_RXCSUM 0
+#define XPAR_LLTEMAC_0_PHY_TYPE 1
+#define XPAR_LLTEMAC_0_INTR 4
+
+
+/* LocalLink TYPE Enumerations */
+#define XPAR_LL_FIFO 1
+#define XPAR_LL_DMA 2
+
+
+/* Canonical LocalLink parameters for LL_TEMAC_0 */
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_TYPE XPAR_LL_DMA
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_BASEADDR 0x80
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_FIFO_INTR 0xFF
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_DMATX_INTR 6
+#define XPAR_LLTEMAC_0_LLINK_CONNECTED_DMARX_INTR 5
+
+
+/******************************************************************/
+
+
+/* Definitions for peripheral PPC440MC_DDR2_0 */
+#define XPAR_PPC440MC_DDR2_0_MEM_BASEADDR 0x00000000
+#define XPAR_PPC440MC_DDR2_0_MEM_HIGHADDR 0x1FFFFFFF
+
+
+/******************************************************************/
+
+#define XPAR_CPU_PPC440_CORE_CLOCK_FREQ_HZ 400000000
+#define XPAR_CPU_PPC440_IDCR_BASEADDR 0x00000000
+#define XPAR_XLLDMA_USE_DCR
+/******************************************************************/
+#define XPAR_CORE_CLOCK_FREQ_HZ XPAR_CPU_PPC440_CORE_CLOCK_FREQ_HZ
+
+/******************************************************************/
+
+#define XPAR_PCI_0_CLOCK_FREQ_HZ 0
+
+#define XPAR_PLB_CLOCK_FREQ_HZ 100000000
+#define XPAR_DDR_0_SIZE 0x10000000
+
+/******************************************************************/
+
+#define XPAR_PERSISTENT_0_IIC_0_BASEADDR 0
+#define XPAR_PERSISTENT_0_IIC_0_HIGHADDR 1024
+#define XPAR_PERSISTENT_0_IIC_0_EEPROMADDR 0xA0
+
--- /dev/null
+
+/*******************************************************************
+*
+* CAUTION: This file is automatically generated by libgen.
+* Version: Xilinx EDK 8.1.02 EDK_I.20.4
+* DO NOT EDIT.
+*
+* Copyright (c) 2005 Xilinx, Inc. All rights reserved.
+*
+* Description: Driver parameters
+*
+*******************************************************************/
+
+#define XPAR_PLB_V46_PPC405_0_BASEADDR 0x00000108
+#define XPAR_PLB_V46_PPC405_0_HIGHADDR 0x0000010F
+#define XPAR_PLB_V46_0_BASEADDR 0x00000100
+#define XPAR_PLB_V46_0_HIGHADDR 0x00000107
+#define XPAR_VGA_DCR_BASEADDR 0x00000110
+#define XPAR_VGA_DCR_HIGHADDR 0x00000111
+
+/******************************************************************/
+
+#define XPAR_PLB_BRAM_IF_CNTLR_0_BASEADDR 0xFFFF0000
+#define XPAR_PLB_BRAM_IF_CNTLR_0_HIGHADDR 0xFFFFFFFF
+
+/******************************************************************/
+
+#define XPAR_XDDR_NUM_INSTANCES 1
+#define XPAR_DDR_SDRAM_32MX64_ECC_BASEADDR 0xFFFFFFFF
+#define XPAR_DDR_SDRAM_32MX64_ECC_HIGHADDR 0x00000000
+#define XPAR_DDR_SDRAM_32MX64_DEVICE_ID 0
+#define XPAR_DDR_SDRAM_32MX64_INCLUDE_ECC_INTR 0
+
+/******************************************************************/
+
+#define XPAR_DDR_SDRAM_32MX64_MEM0_BASEADDR 0x00000000
+#define XPAR_DDR_SDRAM_32MX64_MEM0_HIGHADDR 0x1FFFFFFF
+
+/******************************************************************/
+
+#define XPAR_XEMAC_NUM_INSTANCES 1
+#define XPAR_ETHERNET_MAC_BASEADDR 0xE0000000
+#define XPAR_ETHERNET_MAC_HIGHADDR 0xE000FFFF
+#define XPAR_ETHERNET_MAC_DEVICE_ID 0
+#define XPAR_ETHERNET_MAC_ERR_COUNT_EXIST 1
+#define XPAR_ETHERNET_MAC_DMA_PRESENT 1
+#define XPAR_ETHERNET_MAC_MII_EXIST 1
+#define XPAR_ETHERNET_MAC_CAM_EXIST 0
+#define XPAR_ETHERNET_MAC_JUMBO_EXIST 0
+
+/******************************************************************/
+
+#define XPAR_XOPBARB_NUM_INSTANCES 1
+#define XPAR_OPB_V20_0_BASEADDR 0xFFFFFFFF
+#define XPAR_OPB_V20_0_HIGHADDR 0x00000000
+#define XPAR_OPB_V20_0_DEVICE_ID 0
+#define XPAR_OPB_V20_0_NUM_MASTERS 2
+
+/******************************************************************/
+
+#define XPAR_XUARTNS550_NUM_INSTANCES 2
+#define XPAR_XUARTNS550_CLOCK_HZ 100000000
+#define XPAR_RS232_UART_0_BASEADDR 0xD0000000
+#define XPAR_RS232_UART_0_HIGHADDR 0xD0001FFF
+#define XPAR_RS232_UART_0_DEVICE_ID 0
+#define XPAR_RS232_UART_1_BASEADDR 0xD0002000
+#define XPAR_RS232_UART_1_HIGHADDR 0xD0003FFF
+#define XPAR_RS232_UART_1_DEVICE_ID 1
+
+/******************************************************************/
+
+#define XPAR_XGPIO_NUM_INSTANCES 2
+#define XPAR_LCD_OPTIONAL_BASEADDR 0xD0010000
+#define XPAR_LCD_OPTIONAL_HIGHADDR 0xD00101FF
+#define XPAR_LCD_OPTIONAL_DEVICE_ID 0
+#define XPAR_LEDS_8BIT_BASEADDR 0xD0010200
+#define XPAR_LEDS_8BIT_HIGHADDR 0xD00103FF
+#define XPAR_LEDS_8BIT_DEVICE_ID 1
+
+/******************************************************************/
+
+#define XPAR_XIIC_NUM_INSTANCES 1
+#define XPAR_IIC_BUS_BASEADDR 0xD0020000
+#define XPAR_IIC_BUS_HIGHADDR 0xD00201FF
+#define XPAR_IIC_BUS_DEVICE_ID 0
+#define XPAR_IIC_BUS_TEN_BIT_ADR 0
+#define XPAR_IIC_BUS_GPO_WIDTH 1
+
+/******************************************************************/
+
+#define XPAR_INTC_MAX_NUM_INTR_INPUTS 12
+#define XPAR_XINTC_HAS_IPR 1
+#define XPAR_XINTC_USE_DCR 0
+#define XPAR_XINTC_NUM_INSTANCES 1
+#define XPAR_OPB_INTC_0_BASEADDR 0xD0020200
+#define XPAR_OPB_INTC_0_HIGHADDR 0xD002021F
+#define XPAR_OPB_INTC_0_DEVICE_ID 0
+#define XPAR_OPB_INTC_0_KIND_OF_INTR 0x00000000
+
+/******************************************************************/
+
+#define XPAR_INTC_SINGLE_BASEADDR 0xD0020200
+#define XPAR_INTC_SINGLE_HIGHADDR 0xD002021F
+#define XPAR_INTC_SINGLE_DEVICE_ID XPAR_OPB_INTC_0_DEVICE_ID
+#define XPAR_RS232_UART_0_IP2INTC_IRPT_MASK 0X000001
+#define XPAR_OPB_INTC_0_RS232_UART_0_IP2INTC_IRPT_INTR 0
+#define XPAR_RS232_UART_1_IP2INTC_IRPT_MASK 0X000002
+#define XPAR_OPB_INTC_0_RS232_UART_1_IP2INTC_IRPT_INTR 1
+#define XPAR_IIC_BUS_IP2INTC_IRPT_MASK 0X000004
+#define XPAR_OPB_INTC_0_IIC_BUS_IP2INTC_IRPT_INTR 2
+#define XPAR_SYSACE_COMPACTFLASH_SYSACE_IRQ_MASK 0X000008
+#define XPAR_OPB_INTC_0_SYSACE_COMPACTFLASH_SYSACE_IRQ_INTR 3
+#define XPAR_SPI_EEPROM_IP2INTC_IRPT_MASK 0X000010
+#define XPAR_OPB_INTC_0_SPI_EEPROM_IP2INTC_IRPT_INTR 4
+#define XPAR_ETHERNET_MAC_IP2INTC_IRPT_MASK 0X000020
+#define XPAR_OPB_INTC_0_ETHERNET_MAC_IP2INTC_IRPT_INTR 5
+#define XPAR_SYSTEM_PCI_SBR_INT_MASK 0X000040
+#define XPAR_OPB_INTC_0_SYSTEM_PCI_SBR_INT_INTR 6
+#define XPAR_SYSTEM_PCI_INTD_MASK 0X000080
+#define XPAR_OPB_INTC_0_SYSTEM_PCI_INTD_INTR 7
+#define XPAR_SYSTEM_PCI_INTC_MASK 0X000100
+#define XPAR_OPB_INTC_0_SYSTEM_PCI_INTC_INTR 8
+#define XPAR_SYSTEM_PCI_INTB_MASK 0X000200
+#define XPAR_OPB_INTC_0_SYSTEM_PCI_INTB_INTR 9
+#define XPAR_SYSTEM_PCI_INTA_MASK 0X000400
+#define XPAR_OPB_INTC_0_SYSTEM_PCI_INTA_INTR 10
+#define XPAR_OPB_PCI_1_IP2INTC_IRPT_MASK 0X000800
+#define XPAR_OPB_INTC_0_OPB_PCI_1_IP2INTC_IRPT_INTR 11
+
+/******************************************************************/
+
+#define XPAR_XPCI_NUM_INSTANCES 1
+#define XPAR_OPB_PCI_1_DEVICE_ID 0
+#define XPAR_OPB_PCI_1_BASEADDR 0xDC000000
+#define XPAR_OPB_PCI_1_HIGHADDR 0xDC0001FF
+#define XPAR_OPB_PCI_1_PCIBAR_0 0x00000000
+#define XPAR_OPB_PCI_1_PCIBAR_LEN_0 27
+#define XPAR_OPB_PCI_1_PCIBAR2IPIFBAR_0 0x00000000
+#define XPAR_OPB_PCI_1_PCIBAR_ENDIAN_TRANSLATE_EN_0 0
+#define XPAR_OPB_PCI_1_PCI_PREFETCH_0 1
+#define XPAR_OPB_PCI_1_PCI_SPACETYPE_0 1
+#define XPAR_OPB_PCI_1_PCIBAR_1 0xffffffff
+#define XPAR_OPB_PCI_1_PCIBAR_LEN_1 20
+#define XPAR_OPB_PCI_1_PCIBAR2IPIFBAR_1 0x00000000
+#define XPAR_OPB_PCI_1_PCIBAR_ENDIAN_TRANSLATE_EN_1 0
+#define XPAR_OPB_PCI_1_PCI_PREFETCH_1 1
+#define XPAR_OPB_PCI_1_PCI_SPACETYPE_1 1
+#define XPAR_OPB_PCI_1_PCIBAR_2 0xffffffff
+#define XPAR_OPB_PCI_1_PCIBAR_LEN_2 20
+#define XPAR_OPB_PCI_1_PCIBAR2IPIFBAR_2 0x00000000
+#define XPAR_OPB_PCI_1_PCIBAR_ENDIAN_TRANSLATE_EN_2 0
+#define XPAR_OPB_PCI_1_PCI_PREFETCH_2 1
+#define XPAR_OPB_PCI_1_PCI_SPACETYPE_2 1
+#define XPAR_OPB_PCI_1_IPIFBAR_0 0x20000000
+#define XPAR_OPB_PCI_1_IPIF_HIGHADDR_0 0x2FFFFFFF
+#define XPAR_OPB_PCI_1_IPIFBAR2PCIBAR_0 0x20000000
+#define XPAR_OPB_PCI_1_IPIFBAR_ENDIAN_TRANSLATE_EN_0 0
+#define XPAR_OPB_PCI_1_IPIF_PREFETCH_0 1
+#define XPAR_OPB_PCI_1_IPIF_SPACETYPE_0 1
+#define XPAR_OPB_PCI_1_IPIFBAR_1 0x30000000
+#define XPAR_OPB_PCI_1_IPIF_HIGHADDR_1 0x37FFFFFF
+#define XPAR_OPB_PCI_1_IPIFBAR2PCIBAR_1 0x30000000
+#define XPAR_OPB_PCI_1_IPIFBAR_ENDIAN_TRANSLATE_EN_1 0
+#define XPAR_OPB_PCI_1_IPIF_PREFETCH_1 1
+#define XPAR_OPB_PCI_1_IPIF_SPACETYPE_1 1
+#define XPAR_OPB_PCI_1_IPIFBAR_2 0x38000000
+#define XPAR_OPB_PCI_1_IPIF_HIGHADDR_2 0x3BFFFFFF
+#define XPAR_OPB_PCI_1_IPIFBAR2PCIBAR_2 0x00000000
+#define XPAR_OPB_PCI_1_IPIFBAR_ENDIAN_TRANSLATE_EN_2 0
+#define XPAR_OPB_PCI_1_IPIF_PREFETCH_2 1
+#define XPAR_OPB_PCI_1_IPIF_SPACETYPE_2 0
+#define XPAR_OPB_PCI_1_IPIFBAR_3 0x3E000000
+#define XPAR_OPB_PCI_1_IPIF_HIGHADDR_3 0x3E000FFF
+#define XPAR_OPB_PCI_1_IPIFBAR2PCIBAR_3 0x70000000
+#define XPAR_OPB_PCI_1_IPIFBAR_ENDIAN_TRANSLATE_EN_3 0
+#define XPAR_OPB_PCI_1_IPIF_PREFETCH_3 1
+#define XPAR_OPB_PCI_1_IPIF_SPACETYPE_3 1
+#define XPAR_OPB_PCI_1_IPIFBAR_4 0xffffffff
+#define XPAR_OPB_PCI_1_IPIF_HIGHADDR_4 0x00000000
+#define XPAR_OPB_PCI_1_IPIFBAR2PCIBAR_4 0xffffffff
+#define XPAR_OPB_PCI_1_IPIFBAR_ENDIAN_TRANSLATE_EN_4 0
+#define XPAR_OPB_PCI_1_IPIF_PREFETCH_4 1
+#define XPAR_OPB_PCI_1_IPIF_SPACETYPE_4 1
+#define XPAR_OPB_PCI_1_IPIFBAR_5 0xffffffff
+#define XPAR_OPB_PCI_1_IPIF_HIGHADDR_5 0x00000000
+#define XPAR_OPB_PCI_1_IPIFBAR2PCIBAR_5 0xffffffff
+#define XPAR_OPB_PCI_1_IPIFBAR_ENDIAN_TRANSLATE_EN_5 0
+#define XPAR_OPB_PCI_1_IPIF_PREFETCH_5 1
+#define XPAR_OPB_PCI_1_IPIF_SPACETYPE_5 1
+#define XPAR_OPB_PCI_1_DMA_BASEADDR 0xDD000000
+#define XPAR_OPB_PCI_1_DMA_HIGHADDR 0xDD00007F
+#define XPAR_OPB_PCI_1_DMA_CHAN_TYPE 0
+#define XPAR_OPB_PCI_1_DMA_LENGTH_WIDTH 13
+#define XPAR_OPB_PCI_1_BRIDGE_IDSEL_ADDR_BIT 16
+#define XPAR_OPB_PCI_1_IPIFBAR_NUM 4
+#define XPAR_OPB_PCI_1_INCLUDE_BAROFFSET_REG 0
+#define XPAR_OPB_PCI_1_INCLUDE_DEVNUM_REG 0
+
+/******************************************************************/
+
+#define XPAR_XSPI_NUM_INSTANCES 1
+#define XPAR_SPI_EEPROM_BASEADDR 0xD0030000
+#define XPAR_SPI_EEPROM_HIGHADDR 0xD003007F
+#define XPAR_SPI_EEPROM_DEVICE_ID 0
+#define XPAR_SPI_EEPROM_FIFO_EXIST 1
+#define XPAR_SPI_EEPROM_SPI_SLAVE_ONLY 0
+#define XPAR_SPI_EEPROM_NUM_SS_BITS 1
+
+/******************************************************************/
+
+#define XPAR_XSYSACE_MEM_WIDTH 16
+#define XPAR_XSYSACE_NUM_INSTANCES 1
+#define XPAR_SYSACE_COMPACTFLASH_BASEADDR 0xD0030100
+#define XPAR_SYSACE_COMPACTFLASH_HIGHADDR 0xD003017F
+#define XPAR_SYSACE_COMPACTFLASH_DEVICE_ID 0
+#define XPAR_SYSACE_COMPACTFLASH_MEM_WIDTH 16
+
+/******************************************************************/
+
+#define XPAR_CPU_PPC440_CORE_CLOCK_FREQ_HZ 781250
+#define XPAR_CPU_PPC440_DCRBASEADDR 0x000
+
+/******************************************************************/
+
+
+/******************************************************************/
+
+/* Linux Redefines */
+
+/******************************************************************/
+
+#define XPAR_EMAC_0_BASEADDR XPAR_ETHERNET_MAC_BASEADDR
+#define XPAR_EMAC_0_HIGHADDR XPAR_ETHERNET_MAC_HIGHADDR
+#define XPAR_EMAC_0_DMA_PRESENT XPAR_ETHERNET_MAC_DMA_PRESENT
+#define XPAR_EMAC_0_MII_EXIST XPAR_ETHERNET_MAC_MII_EXIST
+#define XPAR_EMAC_0_ERR_COUNT_EXIST XPAR_ETHERNET_MAC_ERR_COUNT_EXIST
+#define XPAR_EMAC_0_CAM_EXIST XPAR_ETHERNET_MAC_CAM_EXIST
+#define XPAR_EMAC_0_JUMBO_EXIST XPAR_ETHERNET_MAC_JUMBO_EXIST
+#define XPAR_EMAC_0_DEVICE_ID XPAR_ETHERNET_MAC_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_UARTNS550_0_BASEADDR (XPAR_RS232_UART_0_BASEADDR+0x1000)
+#define XPAR_UARTNS550_0_HIGHADDR XPAR_RS232_UART_0_HIGHADDR
+#define XPAR_UARTNS550_0_CLOCK_FREQ_HZ XPAR_XUARTNS550_CLOCK_HZ
+#define XPAR_UARTNS550_0_DEVICE_ID XPAR_RS232_UART_0_DEVICE_ID
+#define XPAR_UARTNS550_1_BASEADDR (XPAR_RS232_UART_1_BASEADDR+0x1000)
+#define XPAR_UARTNS550_1_HIGHADDR XPAR_RS232_UART_1_HIGHADDR
+#define XPAR_UARTNS550_1_CLOCK_FREQ_HZ XPAR_XUARTNS550_CLOCK_HZ
+#define XPAR_UARTNS550_1_DEVICE_ID XPAR_RS232_UART_1_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_GPIO_0_BASEADDR XPAR_LCD_OPTIONAL_BASEADDR
+#define XPAR_GPIO_0_HIGHADDR XPAR_LCD_OPTIONAL_HIGHADDR
+#define XPAR_GPIO_0_DEVICE_ID XPAR_LCD_OPTIONAL_DEVICE_ID
+#define XPAR_GPIO_1_BASEADDR XPAR_LEDS_8BIT_BASEADDR
+#define XPAR_GPIO_1_HIGHADDR XPAR_LEDS_8BIT_HIGHADDR
+#define XPAR_GPIO_1_DEVICE_ID XPAR_LEDS_8BIT_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_IIC_0_BASEADDR XPAR_IIC_BUS_BASEADDR
+#define XPAR_IIC_0_HIGHADDR XPAR_IIC_BUS_HIGHADDR
+#define XPAR_IIC_0_TEN_BIT_ADR XPAR_IIC_BUS_TEN_BIT_ADR
+#define XPAR_IIC_0_DEVICE_ID XPAR_IIC_BUS_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_INTC_0_BASEADDR XPAR_OPB_INTC_0_BASEADDR
+#define XPAR_INTC_0_HIGHADDR XPAR_OPB_INTC_0_HIGHADDR
+#define XPAR_INTC_0_KIND_OF_INTR XPAR_OPB_INTC_0_KIND_OF_INTR
+#define XPAR_INTC_0_DEVICE_ID XPAR_OPB_INTC_0_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_INTC_0_UARTNS550_0_VEC_ID XPAR_OPB_INTC_0_RS232_UART_0_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_UARTNS550_1_VEC_ID XPAR_OPB_INTC_0_RS232_UART_1_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_IIC_0_VEC_ID XPAR_OPB_INTC_0_IIC_BUS_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_SYSACE_0_VEC_ID XPAR_OPB_INTC_0_SYSACE_COMPACTFLASH_SYSACE_IRQ_INTR
+#define XPAR_INTC_0_SPI_0_VEC_ID XPAR_OPB_INTC_0_SPI_EEPROM_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_EMAC_0_VEC_ID XPAR_OPB_INTC_0_ETHERNET_MAC_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_PCI_0_VEC_ID_A XPAR_OPB_INTC_0_OPB_PCI_1_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_PCI_0_VEC_ID_B XPAR_OPB_INTC_0_OPB_PCI_1_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_PCI_0_VEC_ID_C XPAR_OPB_INTC_0_OPB_PCI_1_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_PCI_0_VEC_ID_D XPAR_OPB_INTC_0_OPB_PCI_1_IP2INTC_IRPT_INTR
+
+/******************************************************************/
+
+/*#define XPAR_SPI_0_BASEADDR XPAR_SPI_EEPROM_BASEADDR
+#define XPAR_SPI_0_HIGHADDR XPAR_SPI_EEPROM_HIGHADDR
+#define XPAR_SPI_0_DEVICE_ID XPAR_SPI_EEPROM_DEVICE_ID
+#define XPAR_OPB_SPI_0_FIFO_EXIST 0
+#define XPAR_OPB_SPI_0_SPI_SLAVE_ONLY 1
+#define XPAR_OPB_SPI_0_NUM_SS_BITS 1*/
+
+/******************************************************************/
+
+#define XPAR_SYSACE_0_BASEADDR XPAR_SYSACE_COMPACTFLASH_BASEADDR
+#define XPAR_SYSACE_0_HIGHADDR XPAR_SYSACE_COMPACTFLASH_HIGHADDR
+#define XPAR_SYSACE_0_DEVICE_ID XPAR_SYSACE_COMPACTFLASH_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_PLB_CLOCK_FREQ_HZ 100000000
+#define XPAR_CORE_CLOCK_FREQ_HZ XPAR_CPU_PPC440_CORE_CLOCK_FREQ_HZ
+#define XPAR_DDR_0_SIZE 0x04000000
+
+/******************************************************************/
+
+#define XPAR_PERSISTENT_0_IIC_0_BASEADDR 0x00000400
+#define XPAR_PERSISTENT_0_IIC_0_HIGHADDR 0x000007FF
+#define XPAR_PERSISTENT_0_IIC_0_EEPROMADDR 0xA0
+
+/******************************************************************/
+
+#define XPAR_PCI_0_CLOCK_FREQ_HZ 0
+
+/******************************************************************/
+
+/* FIXME:
+ * These are remaps of Xilinx #defines to conform with autoconf generated
+ * #defines. This is a temporary solution until the new MLD output is
+ * available.
+ *
+ * Using the autoconf #defines also makes it easier to "port" code back and
+ * forth between Xilinx's internal and John Williams' uClinux kernel sources.
+ */
+
+/* OPB EMAC */
+#define XPAR_ETHERNET_NUM_INSTANCES XPAR_XEMAC_NUM_INSTANCES
+
+#define XPAR_ETHERNET_0_BASEADDR XPAR_ETHERNET_MAC_BASEADDR
+#define XPAR_ETHERNET_0_HIGHADDR XPAR_ETHERNET_MAC_HIGHADDR
+#define XPAR_ETHERNET_0_ERR_COUNT_EXIST XPAR_ETHERNET_MAC_ERR_COUNT_EXIST
+#define XPAR_ETHERNET_0_DMA_PRESENT XPAR_ETHERNET_MAC_DMA_PRESENT
+#define XPAR_ETHERNET_0_MII_EXIST XPAR_ETHERNET_MAC_MII_EXIST
+#define XPAR_ETHERNET_0_CAM_EXIST XPAR_ETHERNET_MAC_CAM_EXIST
+#define XPAR_ETHERNET_0_JUMBO_EXIST XPAR_ETHERNET_MAC_JUMBO_EXIST
+
+#define XPAR_ETHERNET_0_INSTANCE XPAR_OPB_INTC_0_ETHERNET_MAC_IP2INTC_IRPT_INTR
+#define XPAR_ETHERNET_0_IRQ XPAR_OPB_INTC_0_ETHERNET_MAC_IP2INTC_IRPT_INTR
+
+/* MEMORY MAP */
+#define XPAR_ERAM_START XPAR_DDR_SDRAM_32MX16_MEM0_BASEADDR
+#define XPAR_ERAM_SIZE ((XPAR_DDR_SDRAM_32MX16_MEM0_HIGHADDR) - (XPAR_DDR_SDRAM_32MX16_MEM0_BASEADDR) +1)
+
+/* OPB TIMER */
+#define XPAR_TIMER_0_BASEADDR XPAR_OPB_TIMER_1_BASEADDR
+#define XPAR_TIMER_0_IRQ XPAR_OPB_INTC_0_OPB_TIMER_1_INTERRUPT_INTR
+#define XPAR_CPU_CLOCK_FREQ 51609600
+
+/* MICROBLAZE */
+#define XPAR_MICROBLAZE0_USE_ICACHE XPAR_MICROBLAZE_0_USE_ICACHE
+#define XPAR_MICROBLAZE0_CACHE_BYTE_SIZE XPAR_MICROBLAZE_0_CACHE_BYTE_SIZE
+#define XPAR_MICROBLAZE0_ICACHE_USE_FSL XPAR_MICROBLAZE_0_ICACHE_USE_FSL
--- /dev/null
+
+/*******************************************************************
+*
+* CAUTION: This file is automatically generated by libgen.
+* Version: Xilinx EDK 8.2.02 EDK_Im_Sp2.4
+* DO NOT EDIT.
+*
+* Copyright (c) 2005 Xilinx, Inc. All rights reserved.
+*
+* Description: Driver parameters
+*
+*******************************************************************/
+
+/* Definitions for driver PLBARB */
+#define XPAR_XPLBARB_NUM_INSTANCES 1
+
+/* Definitions for peripheral PLB */
+#define XPAR_PLB_BASEADDR 0x00000000
+#define XPAR_PLB_HIGHADDR 0x00000000
+#define XPAR_PLB_DEVICE_ID 0
+#define XPAR_PLB_PLB_NUM_MASTERS 3
+
+
+/******************************************************************/
+
+/* Definitions for driver OPBARB */
+#define XPAR_XOPBARB_NUM_INSTANCES 1
+
+/* Definitions for peripheral OPB */
+#define XPAR_OPB_BASEADDR 0xFFFFFFFF
+#define XPAR_OPB_HIGHADDR 0x00000000
+#define XPAR_OPB_DEVICE_ID 0
+#define XPAR_OPB_NUM_MASTERS 1
+
+
+/******************************************************************/
+
+
+/* Definitions for peripheral OPB_SOCKET_0 */
+#define XPAR_OPB_SOCKET_0_BASEADDR 0x7D400000
+#define XPAR_OPB_SOCKET_0_HIGHADDR 0x7D4000FF
+#define XPAR_OPB_SOCKET_0_DCR_BASEADDR 0x40700300
+#define XPAR_OPB_SOCKET_0_DCR_HIGHADDR 0x40700307
+
+/******************************************************************/
+
+/* Definitions for driver OPB_ONEWIRE */
+#define XPAR_OPB_ONEWIRE_NUM_INSTANCES 1
+
+/* Definitions for peripheral ONEWIRE_0 */
+#define XPAR_ONEWIRE_0_BASEADDR 0x7A200000
+#define XPAR_ONEWIRE_0_HIGHADDR 0x7A20FFFF
+
+
+/******************************************************************/
+
+/* Definitions for driver UARTNS550 */
+#define XPAR_XUARTNS550_NUM_INSTANCES 1
+#define XPAR_XUARTNS550_CLOCK_HZ 100000000
+
+/* Definitions for peripheral RS232_UART_1 */
+#define XPAR_RS232_UART_1_BASEADDR 0x40400000
+#define XPAR_RS232_UART_1_HIGHADDR 0x4040FFFF
+#define XPAR_RS232_UART_1_DEVICE_ID 0
+
+
+/******************************************************************/
+
+#define XPAR_XSYSACE_MEM_WIDTH 16
+/* Definitions for driver SYSACE */
+#define XPAR_XSYSACE_NUM_INSTANCES 1
+
+/* Definitions for peripheral SYSACE_COMPACTFLASH */
+#define XPAR_SYSACE_COMPACTFLASH_BASEADDR 0x41800000
+#define XPAR_SYSACE_COMPACTFLASH_HIGHADDR 0x4180FFFF
+#define XPAR_SYSACE_COMPACTFLASH_DEVICE_ID 0
+#define XPAR_SYSACE_COMPACTFLASH_MEM_WIDTH 16
+
+
+/******************************************************************/
+
+/* Definitions for driver GPIO */
+#define XPAR_XGPIO_NUM_INSTANCES 3
+
+/* Definitions for peripheral LEDS_4BIT */
+#define XPAR_LEDS_4BIT_BASEADDR 0x40000000
+#define XPAR_LEDS_4BIT_HIGHADDR 0x4000FFFF
+#define XPAR_LEDS_4BIT_DEVICE_ID 0
+#define XPAR_LEDS_4BIT_INTERRUPT_PRESENT 0
+#define XPAR_LEDS_4BIT_IS_DUAL 0
+
+
+/* Definitions for peripheral DIPSWS_4BIT */
+#define XPAR_DIPSWS_4BIT_BASEADDR 0x40020000
+#define XPAR_DIPSWS_4BIT_HIGHADDR 0x4002FFFF
+#define XPAR_DIPSWS_4BIT_DEVICE_ID 1
+#define XPAR_DIPSWS_4BIT_INTERRUPT_PRESENT 0
+#define XPAR_DIPSWS_4BIT_IS_DUAL 0
+
+
+/* Definitions for peripheral PUSHBUTTONS_5BIT */
+#define XPAR_PUSHBUTTONS_5BIT_BASEADDR 0x40040000
+#define XPAR_PUSHBUTTONS_5BIT_HIGHADDR 0x4004FFFF
+#define XPAR_PUSHBUTTONS_5BIT_DEVICE_ID 2
+#define XPAR_PUSHBUTTONS_5BIT_INTERRUPT_PRESENT 0
+#define XPAR_PUSHBUTTONS_5BIT_IS_DUAL 0
+
+
+/******************************************************************/
+
+#define XPAR_XPS2_NUM_INSTANCES 2
+#define XPAR_PS2_PORTS_DEVICE_ID_0 0
+#define XPAR_PS2_PORTS_BASEADDR_0 0x7a400000
+#define XPAR_PS2_PORTS_HIGHADDR_0 (0x7a400000+0x3F)
+#define XPAR_PS2_PORTS_DEVICE_ID_1 1
+#define XPAR_PS2_PORTS_BASEADDR_1 (0x7a400000+0x1000)
+#define XPAR_PS2_PORTS_HIGHADDR_1 (0x7a400000+0x103F)
+
+/******************************************************************/
+
+#define XPAR_INTC_MAX_NUM_INTR_INPUTS 7
+#define XPAR_XINTC_HAS_IPR 1
+#define XPAR_XINTC_USE_DCR 0
+/* Definitions for driver INTC */
+#define XPAR_XINTC_NUM_INSTANCES 1
+
+/* Definitions for peripheral OPB_INTC_0 */
+#define XPAR_OPB_INTC_0_BASEADDR 0x41200000
+#define XPAR_OPB_INTC_0_HIGHADDR 0x4120FFFF
+#define XPAR_OPB_INTC_0_DEVICE_ID 0
+#define XPAR_OPB_INTC_0_KIND_OF_INTR 0x00000000
+
+
+/******************************************************************/
+
+#define XPAR_INTC_SINGLE_BASEADDR 0x41200000
+#define XPAR_INTC_SINGLE_HIGHADDR 0x4120FFFF
+#define XPAR_INTC_SINGLE_DEVICE_ID XPAR_OPB_INTC_0_DEVICE_ID
+#define XPAR_OPB_TIMER_0_INTERRUPT_MASK 0X000001
+#define XPAR_OPB_INTC_0_OPB_TIMER_0_INTERRUPT_INTR 0
+#define XPAR_OPB_SOCKET_IP2INTC_IRPT_MASK 0X000002
+#define XPAR_OPB_INTC_0_OPB_SOCKET_IP2INTC_IRPT_INTR 1
+#define XPAR_ETHERNET_MAC_IP2INTC_IRPT_MASK 0X000004
+#define XPAR_OPB_INTC_0_ETHERNET_MAC_IP2INTC_IRPT_INTR 2
+#define XPAR_SYSACE_COMPACTFLASH_SYSACE_IRQ_MASK 0X000008
+#define XPAR_OPB_INTC_0_SYSACE_COMPACTFLASH_SYSACE_IRQ_INTR 3
+#define XPAR_RS232_UART_1_IP2INTC_IRPT_MASK 0X000010
+#define XPAR_OPB_INTC_0_RS232_UART_1_IP2INTC_IRPT_INTR 4
+#define XPAR_PS2_PORTS_SYS_INTR2_MASK 0X000020
+#define XPAR_OPB_INTC_0_PS2_PORTS_SYS_INTR2_INTR 5
+#define XPAR_PS2_PORTS_SYS_INTR1_MASK 0X000040
+#define XPAR_OPB_INTC_0_PS2_PORTS_SYS_INTR1_INTR 6
+
+/******************************************************************/
+
+/* Definitions for driver HWICAP */
+#define XPAR_XHWICAP_NUM_INSTANCES 1
+
+/* Definitions for peripheral OPB_HWICAP_0 */
+#define XPAR_OPB_HWICAP_0_BASEADDR 0x41300000
+#define XPAR_OPB_HWICAP_0_HIGHADDR 0x4130FFFF
+#define XPAR_OPB_HWICAP_0_DEVICE_ID 0
+
+/******************************************************************/
+
+/* Definitions for driver TFT_REF */
+#define XPAR_XTFT_NUM_INSTANCES 1
+
+/* Definitions for peripheral VGA_FRAMEBUFFER */
+#define XPAR_VGA_FRAMEBUFFER_DCR_BASEADDR 0x40700200
+#define XPAR_VGA_FRAMEBUFFER_DCR_HIGHADDR 0x40700207
+#define XPAR_VGA_FRAMEBUFFER_DEVICE_ID 0
+
+
+/******************************************************************/
+
+/* Definitions for driver TMRCTR */
+#define XPAR_XTMRCTR_NUM_INSTANCES 1
+
+/* Definitions for peripheral OPB_TIMER_0 */
+#define XPAR_OPB_TIMER_0_BASEADDR 0x40800000
+#define XPAR_OPB_TIMER_0_HIGHADDR 0x408000FF
+#define XPAR_OPB_TIMER_0_DEVICE_ID 0
+
+
+/******************************************************************/
+
+/* Definitions for driver EMAC */
+#define XPAR_XEMAC_NUM_INSTANCES 1
+
+/* Definitions for peripheral ETHERNET_MAC */
+#define XPAR_ETHERNET_MAC_BASEADDR 0x80400000
+#define XPAR_ETHERNET_MAC_HIGHADDR 0x8040FFFF
+#define XPAR_ETHERNET_MAC_DEVICE_ID 0
+#define XPAR_ETHERNET_MAC_ERR_COUNT_EXIST 1
+#define XPAR_ETHERNET_MAC_DMA_PRESENT 1
+#define XPAR_ETHERNET_MAC_MII_EXIST 1
+
+
+/******************************************************************/
+
+/* Definitions for driver DDR */
+#define XPAR_XDDR_NUM_INSTANCES 1
+
+/* Definitions for peripheral DDR_256MB_32MX64_RANK1_ROW13_COL10_CL2_5 */
+#define XPAR_DDR_256MB_32MX64_RANK1_ROW13_COL10_CL2_5_ECC_BASEADDR 0xFFFFFFFF
+#define XPAR_DDR_256MB_32MX64_RANK1_ROW13_COL10_CL2_5_ECC_HIGHADDR 0x00000000
+#define XPAR_DDR_256MB_32MX64_RANK1_ROW13_COL10_CL2_5_DEVICE_ID 0
+#define XPAR_DDR_256MB_32MX64_RANK1_ROW13_COL10_CL2_5_INCLUDE_ECC_INTR 0
+
+
+/******************************************************************/
+
+/* Definitions for peripheral DDR_256MB_32MX64_RANK1_ROW13_COL10_CL2_5 */
+#define XPAR_DDR_256MB_32MX64_RANK1_ROW13_COL10_CL2_5_MEM0_BASEADDR 0x00000000
+#define XPAR_DDR_256MB_32MX64_RANK1_ROW13_COL10_CL2_5_MEM0_HIGHADDR 0x0FFFFFFF
+
+/******************************************************************/
+
+
+/* Definitions for peripheral PLB_BRAM_IF_CNTLR_1 */
+#define XPAR_PLB_BRAM_IF_CNTLR_1_BASEADDR 0xffffc000
+#define XPAR_PLB_BRAM_IF_CNTLR_1_HIGHADDR 0xffffffff
+
+
+/******************************************************************/
+
+#define XPAR_CPU_PPC405_CORE_CLOCK_FREQ_HZ 300000000
+
+/******************************************************************/
+
+
+/******************************************************************/
+
+/* Cannonical Constant Names */
+
+/******************************************************************/
+
+#define XPAR_UARTNS550_0_BASEADDR (XPAR_RS232_UART_1_BASEADDR+0x1000)
+#define XPAR_UARTNS550_0_HIGHADDR XPAR_RS232_UART_1_HIGHADDR
+#define XPAR_UARTNS550_0_CLOCK_FREQ_HZ XPAR_XUARTNS550_CLOCK_HZ
+#define XPAR_UARTNS550_0_DEVICE_ID XPAR_RS232_UART_1_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_EMAC_0_BASEADDR XPAR_ETHERNET_MAC_BASEADDR
+#define XPAR_EMAC_0_HIGHADDR XPAR_ETHERNET_MAC_HIGHADDR
+#define XPAR_EMAC_0_DMA_PRESENT XPAR_ETHERNET_MAC_DMA_PRESENT
+#define XPAR_EMAC_0_MII_EXIST XPAR_ETHERNET_MAC_MII_EXIST
+#define XPAR_EMAC_0_ERR_COUNT_EXIST XPAR_ETHERNET_MAC_ERR_COUNT_EXIST
+#define XPAR_EMAC_0_DEVICE_ID XPAR_ETHERNET_MAC_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_SYSACE_0_BASEADDR XPAR_SYSACE_COMPACTFLASH_BASEADDR
+#define XPAR_SYSACE_0_HIGHADDR XPAR_SYSACE_COMPACTFLASH_HIGHADDR
+#define XPAR_SYSACE_0_DEVICE_ID XPAR_SYSACE_COMPACTFLASH_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_TMRCTR_0_BASEADDR XPAR_OPB_TIMER_0_BASEADDR
+#define XPAR_TMRCTR_0_HIGHADDR XPAR_OPB_TIMER_0_HIGHADDR
+#define XPAR_TMRCTR_0_DEVICE_ID XPAR_OPB_TIMER_0_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_HWICAP_0_BASEADDR XPAR_OPB_HWICAP_0_BASEADDR
+#define XPAR_HWICAP_0_HIGHADDR XPAR_OPB_HWICAP_0_HIGHADDR
+#define XPAR_HWICAP_0_DEVICE_ID XPAR_OPB_HWICAP_0_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_GPIO_0_BASEADDR XPAR_LEDS_4BIT_BASEADDR
+#define XPAR_GPIO_0_HIGHADDR XPAR_LEDS_4BIT_HIGHADDR
+#define XPAR_GPIO_0_IS_DUAL XPAR_LEDS_4BIT_IS_DUAL
+#define XPAR_GPIO_0_DEVICE_ID XPAR_LEDS_4BIT_DEVICE_ID
+#define XPAR_GPIO_1_BASEADDR XPAR_DIPSWS_4BIT_BASEADDR
+#define XPAR_GPIO_1_HIGHADDR XPAR_DIPSWS_4BIT_HIGHADDR
+#define XPAR_GPIO_1_IS_DUAL XPAR_DIPSWS_4BIT_IS_DUAL
+#define XPAR_GPIO_1_DEVICE_ID XPAR_DIPSWS_4BIT_DEVICE_ID
+#define XPAR_GPIO_2_BASEADDR XPAR_PUSHBUTTONS_5BIT_BASEADDR
+#define XPAR_GPIO_2_HIGHADDR XPAR_PUSHBUTTONS_5BIT_HIGHADDR
+#define XPAR_GPIO_2_IS_DUAL XPAR_PUSHBUTTONS_5BIT_IS_DUAL
+#define XPAR_GPIO_2_DEVICE_ID XPAR_PUSHBUTTONS_5BIT_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_PS2_0_BASEADDR XPAR_PS2_PORTS_BASEADDR_0
+#define XPAR_PS2_0_HIGHADDR XPAR_PS2_PORTS_HIGHADDR_0
+#define XPAR_PS2_0_DEVICE_ID XPAR_PS2_PORTS_DEVICE_ID_0
+#define XPAR_PS2_1_BASEADDR XPAR_PS2_PORTS_BASEADDR_1
+#define XPAR_PS2_1_HIGHADDR XPAR_PS2_PORTS_HIGHADDR_1
+#define XPAR_PS2_1_DEVICE_ID XPAR_PS2_PORTS_DEVICE_ID_1
+
+/******************************************************************/
+
+#define XPAR_INTC_0_BASEADDR XPAR_OPB_INTC_0_BASEADDR
+#define XPAR_INTC_0_HIGHADDR XPAR_OPB_INTC_0_HIGHADDR
+#define XPAR_INTC_0_KIND_OF_INTR XPAR_OPB_INTC_0_KIND_OF_INTR
+#define XPAR_INTC_0_DEVICE_ID XPAR_OPB_INTC_0_DEVICE_ID
+
+/******************************************************************/
+
+#define XPAR_INTC_0_TMRCTR_0_VEC_ID XPAR_OPB_INTC_0_OPB_TIMER_0_INTERRUPT_INTR
+#define XPAR_INTC_0_OPB_SOCKET_0_VEC_ID XPAR_OPB_INTC_0_OPB_SOCKET_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_EMAC_0_VEC_ID XPAR_OPB_INTC_0_ETHERNET_MAC_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_SYSACE_0_VEC_ID XPAR_OPB_INTC_0_SYSACE_COMPACTFLASH_SYSACE_IRQ_INTR
+#define XPAR_INTC_0_UARTNS550_0_VEC_ID XPAR_OPB_INTC_0_RS232_UART_1_IP2INTC_IRPT_INTR
+#define XPAR_INTC_0_PS2_1_VEC_ID XPAR_OPB_INTC_0_PS2_PORTS_SYS_INTR2_INTR
+#define XPAR_INTC_0_PS2_0_VEC_ID XPAR_OPB_INTC_0_PS2_PORTS_SYS_INTR1_INTR
+
+/******************************************************************/
+
+#define XPAR_TFT_0_BASEADDR XPAR_VGA_FRAMEBUFFER_DCR_BASEADDR
+
+/******************************************************************/
+
+#define XPAR_PLB_CLOCK_FREQ_HZ 100000000
+#define XPAR_CORE_CLOCK_FREQ_HZ XPAR_CPU_PPC405_CORE_CLOCK_FREQ_HZ
+#define XPAR_DDR_0_SIZE 0x10000000
+
+/******************************************************************/
+
+#define XPAR_PCI_0_CLOCK_FREQ_HZ 0
+
+/******************************************************************/
+
ifeq ($(CONFIG_4xx),y)
ifeq ($(CONFIG_XILINX_VIRTEX),y)
obj-$(CONFIG_40x) += xilinx_pic.o
+obj-$(CONFIG_44x) += xilinx_pic.o
obj-y += virtex_devices.o
else
+obj-$(CONFIG_44x) += ppc4xx_pic.o
ifeq ($(CONFIG_403),y)
obj-$(CONFIG_40x) += ppc403_pic.o
else
obj-$(CONFIG_40x) += ppc4xx_pic.o
endif
endif
-obj-$(CONFIG_44x) += ppc4xx_pic.o
obj-$(CONFIG_40x) += ppc4xx_setup.o
obj-$(CONFIG_GEN_RTC) += todc_time.o
obj-$(CONFIG_PPC4xx_DMA) += ppc4xx_dma.o
endif
obj-$(CONFIG_PPC_I8259) += i8259.o
+
#include <linux/serial_8250.h>
#include <syslib/virtex_devices.h>
#include <platforms/4xx/xparameters/xparameters.h>
+#include <linux/xilinx_devices.h>
#include <asm/io.h>
/*
}, \
}
+/*
+ * EMAC: shortcut macro for single instance
+ */
+#define XPAR_EMAC(num) { \
+ .name = "xilinx_emac", \
+ .id = num, \
+ .num_resources = 2, \
+ .resource = (struct resource[]) { \
+ { \
+ .start = XPAR_EMAC_##num##_BASEADDR, \
+ .end = XPAR_EMAC_##num##_HIGHADDR, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = XPAR_INTC_0_EMAC_##num##_VEC_ID, \
+ .flags = IORESOURCE_IRQ, \
+ }, \
+ }, \
+ .dev.platform_data = &(struct xemac_platform_data) { \
+ .dma_mode = XPAR_EMAC_##num##_DMA_PRESENT, \
+ .has_mii = XPAR_EMAC_##num##_MII_EXIST, \
+ .has_cam = XPAR_EMAC_##num##_CAM_EXIST, \
+ .has_err_cnt = XPAR_EMAC_##num##_ERR_COUNT_EXIST, \
+ .has_jumbo = XPAR_EMAC_##num##_JUMBO_EXIST, \
+ .tx_dre = XPAR_EMAC_##num##_TX_DRE_TYPE, \
+ .rx_dre = XPAR_EMAC_##num##_RX_DRE_TYPE, \
+ .tx_hw_csum = XPAR_EMAC_##num##_TX_INCLUDE_CSUM, \
+ .rx_hw_csum = XPAR_EMAC_##num##_RX_INCLUDE_CSUM, \
+ /* locally administered default address */ \
+ .mac_addr = {2, 0, 0, 0, 0, num}, \
+ }, \
+}
+
+/*
+ * EMAC: shortcut macro for single instance
+ */
+#define XPAR_EMACLITE(num) { \
+ .name = "xilinx_emaclite", \
+ .id = num, \
+ .num_resources = 2, \
+ .resource = (struct resource[]) { \
+ { \
+ .start = XPAR_EMACLITE_##num##_BASEADDR, \
+ .end = XPAR_EMACLITE_##num##_HIGHADDR, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = XPAR_INTC_0_EMACLITE_##num##_VEC_ID, \
+ .flags = IORESOURCE_IRQ, \
+ }, \
+ }, \
+ .dev.platform_data = &(struct xemaclite_platform_data) { \
+ .tx_ping_pong = XPAR_EMACLITE_##num##_TX_PING_PONG, \
+ .rx_ping_pong = XPAR_EMACLITE_##num##_RX_PING_PONG, \
+ /* locally administered default address */ \
+ .mac_addr = {2, 0, 0, 0, 0, num}, \
+ }, \
+}
+
+
+/*
+ * Tri-mode EMAC (TEMAC): shortcut macro for single instance
+ */
+#define XPAR_TEMAC_RESOURCES(num) \
+ .name = "xilinx_temac", \
+ .id = XPAR_TEMAC_##num##_DEVICE_ID, \
+ .num_resources = 2, \
+ .resource = (struct resource[]) { \
+ { \
+ .start = XPAR_TEMAC_##num##_BASEADDR, \
+ .end = XPAR_TEMAC_##num##_HIGHADDR, \
+ .flags = IORESOURCE_MEM \
+ }, \
+ { \
+ .start = XPAR_INTC_0_TEMAC_##num##_VEC_ID, \
+ .end = XPAR_INTC_0_TEMAC_##num##_VEC_ID, \
+ .flags = IORESOURCE_IRQ \
+ } \
+ }
+
+#define XPAR_TEMAC_RX_CSUM(num) { \
+ XPAR_TEMAC_RESOURCES(num), \
+ .dev.platform_data = &(struct xtemac_platform_data) { \
+ .tx_dre = XPAR_TEMAC_##num##_TX_DRE_TYPE, \
+ .rx_dre = XPAR_TEMAC_##num##_RX_DRE_TYPE, \
+ .tx_csum = XPAR_TEMAC_##num##_INCLUDE_TX_CSUM, \
+ .rx_csum = XPAR_TEMAC_##num##_INCLUDE_RX_CSUM, \
+ .phy_type = XPAR_HARD_TEMAC_##num##_PHY_TYPE, \
+ .rx_pkt_fifo_depth = XPAR_TEMAC_##num##_RXFIFO_DEPTH, \
+ .tx_pkt_fifo_depth = XPAR_TEMAC_##num##_TXFIFO_DEPTH, \
+ .dma_mode = XPAR_TEMAC_##num##_DMA_TYPE, \
+ .mac_fifo_depth = XPAR_TEMAC_##num##_MAC_FIFO_DEPTH, \
+ }, \
+}
+
+#define XPAR_TEMAC_NO_RX_CSUM(num) { \
+ XPAR_TEMAC_RESOURCES(num), \
+ .dev.platform_data = &(struct xtemac_platform_data) { \
+ .dcr_host = XPAR_TEMAC_##num##_TEMAC_DCR_HOST, \
+ .dre = XPAR_TEMAC_##num##_INCLUDE_DRE, \
+ .rx_pkt_fifo_depth = XPAR_TEMAC_##num##_IPIF_RDFIFO_DEPTH, \
+ .tx_pkt_fifo_depth = XPAR_TEMAC_##num##_IPIF_WRFIFO_DEPTH, \
+ .dma_mode = XPAR_TEMAC_##num##_DMA_TYPE, \
+ .mac_fifo_depth = XPAR_TEMAC_##num##_MAC_FIFO_DEPTH \
+ }, \
+}
+
+#define XPAR_LLTEMAC_RESOURCES(num) \
+ .name = "xilinx_lltemac", \
+ .id = XPAR_LLTEMAC_##num##_DEVICE_ID, \
+ .num_resources = 2, \
+ .resource = (struct resource[]) { \
+ { \
+ .start = XPAR_LLTEMAC_##num##_BASEADDR, \
+ .end = XPAR_LLTEMAC_##num##_BASEADDR + 0x1000, \
+ .flags = IORESOURCE_MEM \
+ }, \
+ { \
+ .start = XPAR_INTC_0_LLTEMAC_##num##_VEC_ID, \
+ .end = XPAR_INTC_0_LLTEMAC_##num##_VEC_ID, \
+ .flags = IORESOURCE_IRQ \
+ } \
+ }
+
+#ifdef XPAR_XLLDMA_USE_DCR
+#define DCRHOST 0xFF
+#else
+#define DCRHOST 0x00
+#endif
+
+#define XPAR_LLTEMAC(num) { \
+ XPAR_LLTEMAC_RESOURCES(num), \
+ .dev.platform_data = &(struct xlltemac_platform_data) { \
+ .tx_csum = XPAR_LLTEMAC_##num##_TXCSUM, \
+ .rx_csum = XPAR_LLTEMAC_##num##_RXCSUM, \
+ .phy_type = XPAR_LLTEMAC_##num##_PHY_TYPE, \
+ .dcr_host = DCRHOST, \
+ .ll_dev_type = XPAR_LLTEMAC_##num##_LLINK_CONNECTED_TYPE, \
+ .ll_dev_baseaddress = XPAR_LLTEMAC_##num##_LLINK_CONNECTED_BASEADDR, \
+ .ll_dev_dma_rx_irq = XPAR_LLTEMAC_##num##_LLINK_CONNECTED_DMARX_INTR, \
+ .ll_dev_dma_tx_irq = XPAR_LLTEMAC_##num##_LLINK_CONNECTED_DMATX_INTR, \
+ .ll_dev_fifo_irq = XPAR_LLTEMAC_##num##_LLINK_CONNECTED_FIFO_INTR, \
+ /* locally administered default address */ \
+ .mac_addr = {2, 0, 0, 0, 0, num}, \
+ }, \
+}
+
+
+#define XPAR_PS2(num) { \
+ .name = "xilinx_ps2", \
+ .id = num, \
+ .num_resources = 2, \
+ .resource = (struct resource[]) { \
+ { \
+ .start = XPAR_PS2_##num##_BASEADDR, \
+ .end = XPAR_PS2_##num##_HIGHADDR, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = XPAR_INTC_0_PS2_##num##_VEC_ID, \
+ .flags = IORESOURCE_IRQ, \
+ }, \
+ }, \
+}
+
+#define XPAR_IIC(num) { \
+ .name = "xilinx_iic", \
+ .id = num, \
+ .num_resources = 2, \
+ .resource = (struct resource[]) { \
+ { \
+ .start = XPAR_IIC_##num##_BASEADDR, \
+ .end = XPAR_IIC_##num##_HIGHADDR, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = XPAR_INTC_0_IIC_##num##_VEC_ID, \
+ .flags = IORESOURCE_IRQ, \
+ }, \
+ }, \
+}
+
+#ifdef CONFIG_XILINX_VIRTEX_II_PRO
+#define XPAR_HWICAP_FAMILY "virtex2p"
+#endif
+#ifdef CONFIG_XILINX_VIRTEX_4_FX
+#define XPAR_HWICAP_FAMILY "virtex4"
+#endif
+#ifdef CONFIG_XILINX_VIRTEX_5
+#define XPAR_HWICAP_FAMILY "virtex5"
+#endif
+#ifndef XPAR_HWICAP_FAMILY
+#define XPAR_HWICAP_FAMILY NULL
+#endif
+
+#define XPAR_HWICAP(num) { \
+ .name = "icap", \
+ .id = num, \
+ .num_resources = 1, \
+ .resource = (struct resource[]) { \
+ { \
+ .start = XPAR_HWICAP_##num##_BASEADDR, \
+ .end = XPAR_HWICAP_##num##_HIGHADDR, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ }, \
+ .dev.platform_data = XPAR_HWICAP_FAMILY, \
+}
+
#define XPAR_AC97_CONTROLLER_REFERENCE(num) { \
- .name = "ml403_ac97cr", \
+ .name = "ml403-ac97cr", \
.id = num, \
.num_resources = 3, \
.resource = (struct resource[]) { \
{ \
- .start = XPAR_OPB_AC97_CONTROLLER_REF_##num##_BASEADDR, \
- .end = XPAR_OPB_AC97_CONTROLLER_REF_##num##_HIGHADDR, \
+ .start = XPAR_AC97_CONTROLLER_REF_##num##_BASEADDR, \
+ .end = XPAR_AC97_CONTROLLER_REF_##num##_HIGHADDR, \
.flags = IORESOURCE_MEM, \
}, \
{ \
- .start = XPAR_INTC_0_AC97_CONTROLLER_REF_##num##_PLAYBACK_VEC_ID, \
- .end = XPAR_INTC_0_AC97_CONTROLLER_REF_##num##_PLAYBACK_VEC_ID, \
+ .start = XPAR_INTC_0_OPB_AC97_CONTROLLER_REF_##num##_PLAYBACK_INTERRUPT_INTR, \
+ .end = XPAR_INTC_0_OPB_AC97_CONTROLLER_REF_##num##_PLAYBACK_INTERRUPT_INTR, \
.flags = IORESOURCE_IRQ, \
}, \
{ \
- .start = XPAR_INTC_0_AC97_CONTROLLER_REF_##num##_RECORD_VEC_ID, \
- .end = XPAR_INTC_0_AC97_CONTROLLER_REF_##num##_RECORD_VEC_ID, \
+ .start = XPAR_INTC_0_OPB_AC97_CONTROLLER_REF_##num##_RECORD_INTERRUPT_INTR, \
+ .end = XPAR_INTC_0_OPB_AC97_CONTROLLER_REF_##num##_RECORD_INTERRUPT_INTR, \
.flags = IORESOURCE_IRQ, \
}, \
}, \
/* Full UART instances */
#if defined(XPAR_UARTNS550_0_BASEADDR)
{
- .name = "serial8250",
- .id = 0,
+ .name = "serial8250",
+ .id = 0,
.dev.platform_data = virtex_serial_platform_data,
},
#endif
XPAR_SYSACE(1),
#endif
+ /* EMAC instances */
+#if defined(XPAR_EMAC_0_BASEADDR)
+ XPAR_EMAC(0),
+#endif
+#if defined(XPAR_EMAC_1_BASEADDR)
+ XPAR_EMAC(1),
+#endif
+#if defined(XPAR_EMAC_2_BASEADDR)
+ XPAR_EMAC(2),
+#endif
+#if defined(XPAR_EMAC_3_BASEADDR)
+ XPAR_EMAC(3),
+#endif
+
+ /* EMACLITE instances */
+#if defined(XPAR_EMACLITE_0_BASEADDR)
+ XPAR_EMACLITE(0),
+#endif
+#if defined(XPAR_EMACLITE_1_BASEADDR)
+ XPAR_EMACLITE(1),
+#endif
+#if defined(XPAR_EMACLITE_2_BASEADDR)
+ XPAR_EMACLITE(2),
+#endif
+#if defined(XPAR_EMACLITE_3_BASEADDR)
+ XPAR_EMACLITE(3),
+#endif
+
+ /* TEMAC instances */
+#if defined(XPAR_TEMAC_0_BASEADDR)
+#if defined(XPAR_TEMAC_0_INCLUDE_RX_CSUM)
+ XPAR_TEMAC_RX_CSUM(0),
+#else
+ XPAR_TEMAC_NO_RX_CSUM(0),
+#endif
+#endif
+
+#if defined(XPAR_TEMAC_1_BASEADDR)
+#if defined(XPAR_TEMAC_1_INCLUDE_RX_CSUM)
+ XPAR_TEMAC_RX_CSUM(1),
+#else
+ XPAR_TEMAC_NO_RX_CSUM(1),
+#endif
+#endif
+
+#if defined(XPAR_TEMAC_2_BASEADDR)
+#if defined(XPAR_TEMAC_2_INCLUDE_RX_CSUM)
+ XPAR_TEMAC_RX_CSUM(2),
+#else
+ XPAR_TEMAC_NO_RX_CSUM(2),
+#endif
+#endif
+
+#if defined(XPAR_TEMAC_3_BASEADDR)
+#if defined(XPAR_TEMAC_3_INCLUDE_RX_CSUM)
+ XPAR_TEMAC_RX_CSUM(3),
+#else
+ XPAR_TEMAC_NO_RX_CSUM(3),
+#endif
+#endif
+
+ /* LLTEMAC instances */
+#if defined(XPAR_LLTEMAC_0_BASEADDR)
+ XPAR_LLTEMAC(0),
+#endif
+#if defined(XPAR_LLTEMAC_1_BASEADDR)
+ XPAR_LLTEMAC(1),
+#endif
+#if defined(XPAR_LLTEMAC_2_BASEADDR)
+ XPAR_LLTEMAC(2),
+#endif
+#if defined(XPAR_LLTEMAC_3_BASEADDR)
+ XPAR_LLTEMAC(3),
+#endif
+
+#if defined(XPAR_PS2_0_BASEADDR)
+ XPAR_PS2(0),
+#endif
+#if defined(XPAR_PS2_1_BASEADDR)
+ XPAR_PS2(1),
+#endif
+#if defined(XPAR_PS2_2_BASEADDR)
+ XPAR_PS2(2),
+#endif
+#if defined(XPAR_PS2_3_BASEADDR)
+ XPAR_PS2(3),
+#endif
+
+#if defined(XPAR_IIC_0_BASEADDR)
+ XPAR_IIC(0),
+#endif
+
+#if defined(XPAR_HWICAP_0_BASEADDR)
+ XPAR_HWICAP(0),
+#endif
+
+ /* ML300/403 reference design framebuffer */
#if defined(XPAR_TFT_0_BASEADDR)
XPAR_TFT(0),
#endif
virtex_early_serial_init(int num, struct plat_serial8250_port *pdata)
{
#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
+ extern void gen550_init(int i, struct uart_port *serial_req);
struct uart_port serial_req;
memset(&serial_req, 0, sizeof(serial_req));
- serial_req.mapbase = pdata->mapbase;
- serial_req.membase = pdata->membase;
- serial_req.irq = pdata->irq;
- serial_req.uartclk = pdata->uartclk;
- serial_req.regshift = pdata->regshift;
- serial_req.iotype = pdata->iotype;
- serial_req.flags = pdata->flags;
+ serial_req.mapbase = pdata->mapbase;
+ serial_req.membase = pdata->membase;
+ serial_req.irq = pdata->irq;
+ serial_req.uartclk = pdata->uartclk;
+ serial_req.regshift = pdata->regshift;
+ serial_req.iotype = pdata->iotype;
+ serial_req.flags = pdata->flags;
gen550_init(num, &serial_req);
#endif
}
if (virtex_device_fixup(index) != 0)
continue;
+ printk(KERN_INFO "Registering device %s:%d\n",
+ index->name, index->id);
if (platform_device_register(index)) {
ret = 1;
printk(KERN_ERR "cannot register dev %s:%d\n",
*/
int virtex_device_fixup(struct platform_device *dev);
-/* SPI Controller IP */
-struct xspi_platform_data {
- s16 bus_num;
- u16 num_chipselect;
- u32 speed_hz;
-};
-
#endif /* __ASM_VIRTEX_DEVICES_H__ */
/*
+ * arch/ppc/syslib/xilinx_pic.c
+ *
* Interrupt controller driver for Xilinx Virtex-II Pro.
*
* Author: MontaVista Software, Inc.
xilinx_intc_end(unsigned int irq)
{
unsigned long mask = (0x00000001 << (irq & 31));
-
pr_debug("end: %d\n", irq);
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
- intc_out_be32(intc + SIE, mask);
/* ack level sensitive intr */
if (irq_desc[irq].status & IRQ_LEVEL)
intc_out_be32(intc + IAR, mask);
+ /* unmask the interrupt */
+ intc_out_be32(intc + SIE, mask);
}
}
static struct hw_interrupt_type xilinx_intc = {
- .typename = "Xilinx Interrupt Controller",
- .enable = xilinx_intc_enable,
- .disable = xilinx_intc_disable,
- .ack = xilinx_intc_disable_and_ack,
- .end = xilinx_intc_end,
+ .typename = "Xilinx Interrupt Controller",
+ .enable = xilinx_intc_enable,
+ .disable = xilinx_intc_disable,
+ .ack = xilinx_intc_disable_and_ack,
+ .end = xilinx_intc_end,
};
int
xilinx_pic_get_irq(void)
{
- int irq;
+ u32 irq;
+
+#ifdef CONFIG_XILINX_INTC_IVR_WORKAROUND
+ u32 ipr;
/*
* NOTE: This function is the one that needs to be improved in
* order to handle multiple interrupt controllers. It currently
* is hardcoded to check for interrupts only on the first INTC.
*/
-
+ ipr = intc_in_be32(intc + IPR);
+ irq = 0;
+ while (irq <= XPAR_INTC_MAX_NUM_INTR_INPUTS) {
+ if (ipr & 0x1) {
+ break;
+ }
+ irq++;
+ ipr = ipr >> 1;
+ }
+#else
irq = intc_in_be32(intc + IVR);
- if (irq != -1)
- irq = irq;
- pr_debug("get_irq: %d\n", irq);
+ /* If no interrupt is pending then all bits of the IVR are set to 1. As
+ * the IVR is as many bits wide as numbers of inputs are available.
+ * Therefore, if all bits of the IVR are set to one, its content will
+ * be bigger than XPAR_INTC_MAX_NUM_INTR_INPUTS.
+ */
+ if (irq >= XPAR_INTC_MAX_NUM_INTR_INPUTS)
+ irq = -1; /* report no pending interrupt. */
+ pr_debug("get_irq: %d\n", irq);
+#endif
return (irq);
}
source "drivers/auxdisplay/Kconfig"
+source "drivers/xilinx_common/Kconfig"
+
source "drivers/uio/Kconfig"
source "drivers/xen/Kconfig"
obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
+obj-$(CONFIG_XILINX_EDK) += xilinx_common/
obj-$(CONFIG_SUPERH) += sh/
obj-$(CONFIG_GENERIC_TIME) += clocksource/
obj-$(CONFIG_DMA_ENGINE) += dma/
config XILINX_SYSACE
tristate "Xilinx SystemACE support"
- depends on 4xx
+ depends on XILINX_DRIVERS
+ help
+ Include support for the Xilinx SystemACE CompactFlash interface
+
+config XILINX_SYSACE_OLD
+ tristate "Xilinx SystemACE support (old driver)"
+ depends on XILINX_DRIVERS
help
Include support for the Xilinx SystemACE CompactFlash interface
obj-$(CONFIG_VIODASD) += viodasd.o
obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
obj-$(CONFIG_BLK_DEV_UB) += ub.o
+obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
+obj-$(CONFIG_XILINX_SYSACE_OLD) += xilinx_sysace/
obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
--- /dev/null
+#
+# Makefile for the Xilinx System ACE driver
+#
+
+EXTRA_CFLAGS += -Idrivers/xilinx_common -Iarch/ppc/platforms/4xx/xparameters
+
+# The Linux Version for the Xilinx driver code.
+xilinx_sysace-objs += xsysace_linux.o
+
+# The Xilinx OS independent code.
+xilinx_sysace-objs += xsysace.o xsysace_intr.o xsysace_l.o
+xilinx_sysace-objs += xsysace_compactflash.o xsysace_jtagcfg.o
+
+obj-$(CONFIG_XILINX_SYSACE_OLD) += xilinx_sysace.o
--- /dev/null
+/* $Id: xsysace.c,v 1.1 2006/02/17 21:52:36 moleres Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2005 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xsysace.c
+*
+* The Xilinx System ACE driver component. This driver supports the Xilinx
+* System Advanced Configuration Environment (ACE) controller. It currently
+* supports only the CompactFlash solution. See xsysace.h for a detailed
+* description of the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 06/17/02 work in progress
+* 1.00a rmm 05/14/03 Fixed diab compiler warnings relating to asserts
+* 1.01a jvb 12/13/05 I changed Initialize() into CfgInitialize(), and made
+* CfgInitialize() take a pointer to a config structure
+* instead of a device id. I moved Initialize() into
+* xgpio_sinit.c, and had Initialize() call CfgInitialize()
+* after it retrieved the config structure using the device
+* id. I removed include of xparameters.h along with any
+* dependencies on xparameters.h and the _g.c config table.
+* The dependency on XPAR_XSYSACE_MEM_WIDTH still remains.
+*
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xsysace.h"
+#include "xsysace_l.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+static void StubEventHandler(void *CallBackRef, int Event);
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+*
+* Initialize a specific XSysAce instance. The configuration information is
+* passed in as an argument and the driver instance data is initialized
+* appropriately.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+* @param Config is a reference to a structure containing information about a
+* specific SysAce device. This function initializes an InstancePtr object
+* for a specific device specified by the contents of Config. This function
+* can initialize multiple instance objects with the use of multiple calls
+* giving different Config information on each call.
+* @param EffectiveAddr is the device base address in the virtual memory address
+* space. The caller is responsible for keeping the address mapping
+* from EffectiveAddr to the device physical base address unchanged
+* once this function is invoked. Unexpected errors may occur if the
+* address mapping changes after this function is called. If address
+* translation is not used, use Config->BaseAddress for this parameters,
+* passing the physical address instead.
+*
+* @return
+*
+* XST_SUCCESS if successful.
+*
+* @note
+*
+* We do not want to reset the configuration controller here since this could
+* cause a reconfiguration of the JTAG target chain, depending on how the
+* CFGMODEPIN of the device is wired.
+* <br><br>
+* The Config pointer argument is not used by this function, but is provided
+* to keep the function signature consistent with other drivers.
+*
+******************************************************************************/
+int XSysAce_CfgInitialize(XSysAce * InstancePtr, XSysAce_Config * Config,
+ u32 EffectiveAddr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ InstancePtr->IsReady = 0;
+
+ /*
+ * Set some default values for the instance data
+ */
+ InstancePtr->BaseAddress = EffectiveAddr;
+ InstancePtr->EventHandler = StubEventHandler;
+ InstancePtr->NumRequested = 0;
+ InstancePtr->NumRemaining = 0;
+ InstancePtr->BufferPtr = NULL;
+
+ /*
+ * Put the device into 16-bit mode or 8-bit mode depending on compile-time
+ * parameter
+ */
+#if (XPAR_XSYSACE_MEM_WIDTH == 16)
+ XSysAce_RegWrite16(InstancePtr->BaseAddress + XSA_BMR_OFFSET,
+ XSA_BMR_16BIT_MASK);
+#else
+ XSysAce_RegWrite16(InstancePtr->BaseAddress + XSA_BMR_OFFSET, 0);
+#endif
+
+ /*
+ * Disable interrupts. Interrupts must be enabled by the user using
+ * XSysAce_EnableInterrupt(). Put the interrupt request line in reset and
+ * clear the interrupt enable bits.
+ */
+ XSysAce_mOrControlReg(InstancePtr->BaseAddress, XSA_CR_RESETIRQ_MASK);
+ XSysAce_mAndControlReg(InstancePtr->BaseAddress,
+ ~(XSA_CR_DATARDYIRQ_MASK | XSA_CR_ERRORIRQ_MASK |
+ XSA_CR_CFGDONEIRQ_MASK));
+
+ /*
+ * Indicate the instance is now ready to use, initialized without error
+ */
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Attempt to lock access to the CompactFlash. The CompactFlash may be accessed
+* by the MPU port as well as the JTAG configuration port within the System ACE
+* device. This function requests exclusive access to the CompactFlash for the
+* MPU port. This is a non-blocking request. If access cannot be locked
+* (because the configuration controller has the lock), an appropriate status is
+* returned. In this case, the user should call this function again until
+* successful.
+*
+* If the user requests a forced lock, the JTAG configuration controller will
+* be put into a reset state in case it currently has a lock on the CompactFlash.
+* This effectively aborts any operation the configuration controller had in
+* progress and makes the configuration controller restart its process the
+* next time it is able to get a lock.
+*
+* A lock must be granted to the user before attempting to read or write the
+* CompactFlash device.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+* @param Force is a boolean value that, when set to TRUE, will force the MPU
+* lock to occur in the System ACE. When set to FALSE, the lock is
+* requested and the device arbitrates between the MPU request and
+* JTAG requests. Forcing the MPU lock resets the configuration
+* controller, thus aborting any configuration operations in progress.
+*
+* @return
+*
+* XST_SUCCESS if the lock was granted, or XST_DEVICE_BUSY if the lock was
+* not granted because the configuration controller currently has access to
+* the CompactFlash.
+*
+* @note
+*
+* If the lock is not granted to the MPU immediately, this function removes its
+* request for a lock so that a lock is not later granted at a time when the
+* application is (a) not ready for the lock, or (b) cannot be informed
+* asynchronously about the granted lock since there is no such interrupt event.
+*
+******************************************************************************/
+int XSysAce_Lock(XSysAce * InstancePtr, u32 Force)
+{
+ u32 IsLocked;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Check to see if the configuration controller currently has the lock
+ */
+ IsLocked = (XSysAce_mGetStatusReg(InstancePtr->BaseAddress) &
+ XSA_SR_CFGLOCK_MASK);
+
+ if (Force) {
+ /*
+ * Reset the configuration controller if it has the lock. Per ASIC
+ * designer, this eliminates a potential deadlock if the FORCELOCK and
+ * LOCKREQ bits are both set and the RDYFORCFCMD is not set.
+ */
+ if (IsLocked) {
+ /* Reset the configuration controller */
+ XSysAce_mOrControlReg(InstancePtr->BaseAddress,
+ XSA_CR_CFGRESET_MASK);
+ }
+
+ /* Force the MPU lock. The lock will occur immediately. */
+ XSysAce_mOrControlReg(InstancePtr->BaseAddress,
+ XSA_CR_LOCKREQ_MASK |
+ XSA_CR_FORCELOCK_MASK);
+ }
+ else {
+ /*
+ * Check to see if the configuration controller has the lock. If so,
+ * return a busy status.
+ */
+ if (IsLocked) {
+ return XST_DEVICE_BUSY;
+ }
+
+ /* Request the lock, but do not force it */
+ XSysAce_mOrControlReg(InstancePtr->BaseAddress,
+ XSA_CR_LOCKREQ_MASK);
+ }
+
+ /*
+ * See if the lock was granted. Note that it is guaranteed to occur if
+ * the user forced it.
+ */
+ if (!XSysAce_mIsMpuLocked(InstancePtr->BaseAddress)) {
+ /* Lock was not granted, so remove request and return a busy */
+ XSysAce_mAndControlReg(InstancePtr->BaseAddress,
+ ~(XSA_CR_LOCKREQ_MASK |
+ XSA_CR_FORCELOCK_MASK));
+
+ return XST_DEVICE_BUSY;
+ }
+
+ /*
+ * Lock has been granted.
+ *
+ * If the configuration controller had the lock and has been reset,
+ * go ahead and release it from reset as it will not be able to get
+ * the lock again until the MPU lock is released.
+ */
+ if (IsLocked && Force) {
+ /* Release the reset of the configuration controller */
+ XSysAce_mAndControlReg(InstancePtr->BaseAddress,
+ ~XSA_CR_CFGRESET_MASK);
+ }
+
+ return XST_SUCCESS;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Release the MPU lock to the CompactFlash. If a lock is not currently granted
+* to the MPU port, this function has no effect.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XSysAce_Unlock(XSysAce * InstancePtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Blindly clear the lock and force-lock request bits of the control
+ * register
+ */
+ XSysAce_mAndControlReg(InstancePtr->BaseAddress,
+ ~(XSA_CR_LOCKREQ_MASK | XSA_CR_FORCELOCK_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* Get all outstanding errors. Errors include the inability to read or write
+* CompactFlash and the inability to successfully configure FPGA devices along
+* the target FPGA chain.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+*
+* @return
+*
+* A 32-bit mask of error values. See xsysace_l.h for a description of possible
+* values. The error identifiers are prefixed with XSA_ER_*.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XSysAce_GetErrors(XSysAce * InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ return XSysAce_mGetErrorReg(InstancePtr->BaseAddress);
+}
+
+/*****************************************************************************/
+/**
+*
+* Stub for the asynchronous event callback. The stub is here in case the upper
+* layers forget to set the handler.
+*
+* @param CallBackRef is a pointer to the upper layer callback reference
+* @param Event is the event that occurs
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void StubEventHandler(void *CallBackRef, int Event)
+{
+ XASSERT_VOID_ALWAYS();
+}
--- /dev/null
+/* $Id: xsysace.h,v 1.1 2006/02/17 21:52:36 moleres Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2005 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xsysace.h
+*
+* The Xilinx System ACE driver. This driver supports the Xilinx System Advanced
+* Configuration Environment (ACE) controller. It currently supports only the
+* CompactFlash solution. The driver makes use of the Microprocessor (MPU)
+* interface to communicate with the device.
+*
+* The driver provides a user the ability to access the CompactFlash through
+* the System ACE device. The user can read and write CompactFlash sectors,
+* identify the flash device, and reset the flash device. Also, the driver
+* provides a user the ability to configure FPGA devices by selecting a
+* configuration file (.ace file) resident on the CompactFlash, or directly
+* configuring the FPGA devices via the MPU port and the configuration JTAG
+* port of the controller.
+*
+* <b>Initialization & Configuration</b>
+*
+* The XSysAce_Config structure is used by the driver to configure itself. This
+* configuration structure is typically created by the tool-chain based on HW
+* build properties.
+*
+* To support multiple runtime loading and initialization strategies employed
+* by various operating systems, the driver instance can be initialized in one
+* of the following ways:
+*
+* - XSysAce_Initialize(InstancePtr, DeviceId) - The driver looks up its own
+* configuration structure created by the tool-chain based on an ID provided
+* by the tool-chain.
+*
+* - XSysAce_CfgInitialize(InstancePtr, CfgPtr, EffectiveAddr) - Uses a
+* configuration structure provided by the caller. If running in a system
+* with address translation, the provided virtual memory base address
+* replaces the physical address present in the configuration structure.
+*
+* <b>Bus Mode</b>
+*
+* The System ACE device supports both 8-bit and 16-bit access to its registers.
+* The driver defaults to 8-bit access, but can be changed to use 16-bit access
+* at compile-time. The compile-time constant XPAR_XSYSACE_MEM_WIDTH must be
+* defined equal to 16 to make the driver use 16-bit access. This constant is
+* typically defined in xparameters.h.
+*
+* <b>Endianness</b>
+*
+* The System ACE device is little-endian. If being accessed by a big-endian
+* processor, the endian conversion will be done by the device driver. The
+* endian conversion is encapsulated inside the XSysAce_RegRead/Write functions
+* so that it can be removed if the endian conversion is moved to hardware.
+*
+* <b>Hardware Access</b>
+*
+* The device driver expects the System ACE controller to be a memory-mapped
+* device. Access to the System ACE controller is typically achieved through
+* the External Memory Controller (EMC) IP core. The EMC is simply a pass-through
+* device that allows access to the off-chip System ACE device. There is no
+* software-based setup or configuration necessary for the EMC.
+*
+* The System ACE registers are expected to be byte-addressable. If for some
+* reason this is not possible, the register offsets defined in xsysace_l.h must
+* be changed accordingly.
+*
+* <b>Reading or Writing CompactFlash</b>
+*
+* The smallest unit that can be read from or written to CompactFlash is one
+* sector. A sector is 512 bytes. The functions provided by this driver allow
+* the user to specify a starting sector ID and the number of sectors to be read
+* or written. At most 256 sectors can be read or written in one operation. The
+* user must ensure that the buffer passed to the functions is big enough to
+* hold (512 * NumSectors), where NumSectors is the number of sectors specified.
+*
+* <b>Interrupt Mode</b>
+*
+* By default, the device and driver are in polled mode. The user is required to
+* enable interrupts using XSysAce_EnableInterrupt(). In order to use interrupts,
+* it is necessary for the user to connect the driver's interrupt handler,
+* XSysAce_InterruptHandler(), to the interrupt system of the application. This
+* function does not save and restore the processor context. An event handler
+* must also be set by the user, using XSysAce_SetEventHandler(), for the driver
+* such that the handler is called when interrupt events occur. The handler is
+* called from interrupt context and allows application-specific processing to
+* be performed.
+*
+* In interrupt mode, the only available interrupt is data buffer ready, so
+* the size of a data transfer between interrupts is 32 bytes (the size of the
+* data buffer).
+*
+* <b>Polled Mode</b>
+*
+* The sector read and write functions are blocking when in polled mode. This
+* choice was made over non-blocking since sector transfer rates are high
+* (>20Mbps) and the user can limit the number of sectors transferred in a single
+* operation to 1 when in polled mode, plus the API for non-blocking polled
+* functions was a bit awkward. Below is some more information on the sector
+* transfer rates given the current state of technology (year 2002). Although
+* the seek times for CompactFlash cards is high, this average hit needs to be
+* taken every time a new read/write operation is invoked by the user. So the
+* additional few microseconds to transfer an entire sector along with seeking
+* is miniscule.
+*
+* - Microdrives are slower than CompactFlash cards by a significant factor,
+* especially if the MD is asleep.
+* - Microdrive:
+* - Power-up/wake-up time is approx. 150 to 1000 ms.
+* - Average seek time is approx. 15 to 20 ms.
+* - CompactFlash:
+* - Power-up/reset time is approx. 50 to 400 ms and wake-up time is
+* approx. 3 ms.
+* - "Seek time" here means how long it takes the internal controller
+* to process the command until the sector data is ready for transfer
+* by the ACE controller. This time is approx. 2 ms per sector.
+*
+* - Once the sector data is ready in the CF device buffer (i.e., "seek time" is
+* over) the ACE controller can read 2 bytes from the MD/CF device every 11
+* clock cycles, assuming no wait cycles happen. For instance, if the clock
+* is 33 MHz, then then the max. rate that the ACE controller can transfer is
+* 6 MB/sec. However, due to other overhead (e.g., time for data buffer
+* transfers over MPU port, etc.), a better estimate is 3-5 MB/sec.
+*
+* <b>Mutual Exclusion</b>
+*
+* This driver is not thread-safe. The System ACE device has a single data
+* buffer and therefore only one operation can be active at a time. The device
+* driver does not prevent the user from starting an operation while a previous
+* operation is still in progress. It is up to the user to provide this mutual
+* exclusion.
+*
+* <b>Errors</b>
+*
+* Error causes are defined in xsysace_l.h using the prefix XSA_ER_*. The
+* user can use XSysAce_GetErrors() to retrieve all outstanding errors.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 06/17/02 work in progress
+* 1.01a jvb 12/14/05 I separated dependency on the static config table and
+* xparameters.h from the driver initialization by moving
+* _Initialize and _LookupConfig to _sinit.c. I also added
+* the new _CfgInitialize routine. (The dependency on
+* XPAR_XSYSACE_MEM_WIDTH still remains.)
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XSYSACE_H /* prevent circular inclusions */
+#define XSYSACE_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xsysace_l.h"
+
+/************************** Constant Definitions *****************************/
+
+/** @name Asynchronous Events
+ *
+ * Asynchronous events passed to the event handler when in interrupt mode.
+ *
+ * Note that when an error event occurs, the only way to clear this condition
+ * is to reset the CompactFlash or the System ACE configuration controller,
+ * depending on where the error occurred. The driver does not reset either
+ * and leaves this task to the user.
+ * @{
+ */
+#define XSA_EVENT_CFG_DONE 1 /**< Configuration of JTAG chain is done */
+#define XSA_EVENT_DATA_DONE 2 /**< Data transfer to/from CompactFlash is done */
+#define XSA_EVENT_ERROR 3 /**< An error occurred. Use XSysAce_GetErrors()
+ * to determine the cause of the error(s).
+ */
+/*@}*/
+
+
+/**************************** Type Definitions *******************************/
+
+/**
+ * Typedef for CompactFlash identify drive parameters. Use XSysAce_IdentifyCF()
+ * to retrieve this information from the CompactFlash storage device.
+ */
+typedef struct {
+ u16 Signature; /**< CompactFlash signature is 0x848a */
+ u16 NumCylinders; /**< Default number of cylinders */
+ u16 Reserved;
+ u16 NumHeads; /**< Default number of heads */
+ u16 NumBytesPerTrack;
+ /**< Number of unformatted bytes per track */
+ u16 NumBytesPerSector;
+ /**< Number of unformatted bytes per sector */
+ u16 NumSectorsPerTrack;
+ /**< Default number of sectors per track */
+ u32 NumSectorsPerCard;
+ /**< Default number of sectors per card */
+ u16 VendorUnique; /**< Vendor unique */
+ u8 SerialNo[20]; /**< ASCII serial number */
+ u16 BufferType; /**< Buffer type */
+ u16 BufferSize; /**< Buffer size in 512-byte increments */
+ u16 NumEccBytes; /**< Number of ECC bytes on R/W Long cmds */
+ u8 FwVersion[8]; /**< ASCII firmware version */
+ u8 ModelNo[40]; /**< ASCII model number */
+ u16 MaxSectors; /**< Max sectors on R/W Multiple cmds */
+ u16 DblWord; /**< Double Word not supported */
+ u16 Capabilities; /**< Device capabilities */
+ u16 Reserved2;
+ u16 PioMode; /**< PIO data transfer cycle timing mode */
+ u16 DmaMode; /**< DMA data transfer cycle timing mode */
+ u16 TranslationValid;
+ /**< Translation parameters are valid */
+ u16 CurNumCylinders;/**< Current number of cylinders */
+ u16 CurNumHeads; /**< Current number of heads */
+ u16 CurSectorsPerTrack;
+ /**< Current number of sectors per track */
+ u32 CurSectorsPerCard;
+ /**< Current capacity in sectors */
+ u16 MultipleSectors;/**< Multiple sector setting */
+ u32 LbaSectors; /**< Number of addressable sectors in LBA mode */
+ u8 Reserved3[132];
+ u16 SecurityStatus; /**< Security status */
+ u8 VendorUniqueBytes[62];
+ /**< Vendor unique bytes */
+ u16 PowerDesc; /**< Power requirement description */
+ u8 Reserved4[190];
+
+} XSysAce_CFParameters;
+
+
+/**
+ * Callback when an asynchronous event occurs during interrupt mode.
+ *
+ * @param CallBackRef is a callback reference passed in by the upper layer
+ * when setting the callback functions, and passed back to the upper
+ * layer when the callback is invoked.
+ * @param Event is the event that occurred. See xsysace.h and the event
+ * identifiers prefixed with XSA_EVENT_* for a description of possible
+ * events.
+ */
+typedef void (*XSysAce_EventHandler) (void *CallBackRef, int Event);
+
+/**
+ * This typedef contains configuration information for the device.
+ */
+typedef struct {
+ u16 DeviceId; /**< Unique ID of device */
+ u32 BaseAddress;/**< Register base address */
+
+} XSysAce_Config;
+
+/**
+ * The XSysAce driver instance data. The user is required to allocate a
+ * variable of this type for every System ACE device in the system. A
+ * pointer to a variable of this type is then passed to the driver API
+ * functions.
+ */
+typedef struct {
+ u32 BaseAddress; /* Base address of ACE device */
+ u32 IsReady; /* Device is initialized and ready */
+
+ /* interrupt-related data */
+ int NumRequested; /* Number of bytes to read/write */
+ int NumRemaining; /* Number of bytes left to read/write */
+ u8 *BufferPtr; /* Buffer being read/written */
+ XSysAce_EventHandler EventHandler; /* Callback for asynchronous events */
+ void *EventRef; /* Callback reference */
+
+} XSysAce;
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+/*
+ * Initialization functions in xsysace_sinit.c
+ */
+int XSysAce_Initialize(XSysAce * InstancePtr, u16 DeviceId);
+XSysAce_Config *XSysAce_LookupConfig(u16 DeviceId);
+
+/*
+ * Required functions in xsysace.c
+ */
+int XSysAce_CfgInitialize(XSysAce * InstancePtr, XSysAce_Config * Config,
+ u32 EffectiveAddr);
+int XSysAce_Lock(XSysAce * InstancePtr, u32 Force);
+void XSysAce_Unlock(XSysAce * InstancePtr);
+u32 XSysAce_GetErrors(XSysAce * InstancePtr);
+
+/*
+ * CompactFlash access functions in xsysace_compactflash.c
+ */
+int XSysAce_ResetCF(XSysAce * InstancePtr);
+int XSysAce_AbortCF(XSysAce * InstancePtr);
+int XSysAce_IdentifyCF(XSysAce * InstancePtr, XSysAce_CFParameters * ParamPtr);
+u32 XSysAce_IsCFReady(XSysAce * InstancePtr);
+int XSysAce_SectorRead(XSysAce * InstancePtr, u32 StartSector,
+ int NumSectors, u8 *BufferPtr);
+int XSysAce_SectorWrite(XSysAce * InstancePtr, u32 StartSector,
+ int NumSectors, u8 *BufferPtr);
+u16 XSysAce_GetFatStatus(XSysAce * InstancePtr);
+
+/*
+ * JTAG configuration interface functions in xsysace_jtagcfg.c
+ */
+void XSysAce_ResetCfg(XSysAce * InstancePtr);
+void XSysAce_SetCfgAddr(XSysAce * InstancePtr, unsigned int Address);
+void XSysAce_SetStartMode(XSysAce * InstancePtr, u32 ImmedOnReset,
+ u32 SetStart);
+u32 XSysAce_IsCfgDone(XSysAce * InstancePtr);
+u32 XSysAce_GetCfgSector(XSysAce * InstancePtr);
+int XSysAce_ProgramChain(XSysAce * InstancePtr, u8 *BufferPtr, int NumBytes);
+
+/*
+ * General interrupt-related functions in xsysace_intr.c
+ */
+void XSysAce_EnableInterrupt(XSysAce * InstancePtr);
+void XSysAce_DisableInterrupt(XSysAce * InstancePtr);
+void XSysAce_SetEventHandler(XSysAce * InstancePtr,
+ XSysAce_EventHandler FuncPtr, void *CallBackRef);
+void XSysAce_InterruptHandler(void *InstancePtr); /* interrupt handler */
+
+/*
+ * Diagnostic functions in xsysace_selftest.c
+ */
+int XSysAce_SelfTest(XSysAce * InstancePtr);
+u16 XSysAce_GetVersion(XSysAce * InstancePtr);
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xsysace_compactflash.c,v 1.1 2006/02/17 21:52:36 moleres Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xsysace_compactflash.c
+*
+* Contains functions to reset, read, and write the CompactFlash device via
+* the System ACE controller.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 06/17/02 work in progress
+* 1.00a ecm 09/17/04 Fixed the endianism issue with the string copies.
+* Replaced the ByteCopy with WordCopySwap which
+* copies the bytes and swaps to correct the endianism.
+* CR 194182
+*
+* 1.00a ecm 09/27/04 Fixed the lack of reset during read and write in
+* L1 functions.
+* CR 194423
+*
+* 1.00a ecm 12/09/04 Removed the above fix, breaks MVL.
+* CR 200015
+*
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xsysace.h"
+#include "xsysace_l.h"
+
+/************************** Constant Definitions *****************************/
+
+/*
+ * Indices into the parameter information from the CompactFlash. When the
+ * user calls XSysAce_IdentifyCF(), the parameter information is read into a
+ * byte buffer. The byte buffer is then mapped to a XSysAce_CFParameters
+ * structure using these indices into the byte buffer.
+ */
+#define XSA_CFPARM_SIGNATURE 0
+#define XSA_CFPARM_NUMCYLS 2
+#define XSA_CFPARM_RESERVED1 4
+#define XSA_CFPARM_NUMHEADS 6
+#define XSA_CFPARM_BYTES_TRACK 8
+#define XSA_CFPARM_BYTES_SECT 10
+#define XSA_CFPARM_SECTS_TRK 12
+#define XSA_CFPARM_SECTS_HI 14
+#define XSA_CFPARM_SECTS_LO 16
+#define XSA_CFPARM_VENDOR1 18
+#define XSA_CFPARM_SERIAL_NO 20
+#define XSA_CFPARM_BUFFER_TYPE 40
+#define XSA_CFPARM_BUFFER_SIZE 42
+#define XSA_CFPARM_ECC_BYTES 44
+#define XSA_CFPARM_FW_VERSION 46
+#define XSA_CFPARM_MODEL_NO 54
+#define XSA_CFPARM_MAX_SECTORS 94
+#define XSA_CFPARM_DBL_WORD 96
+#define XSA_CFPARM_CAPS 98
+#define XSA_CFPARM_RESERVED2 100
+#define XSA_CFPARM_PIO_MODE 102
+#define XSA_CFPARM_DMA_MODE 104
+#define XSA_CFPARM_TRANSLATE 106
+#define XSA_CFPARM_CURCYLS 108
+#define XSA_CFPARM_CURHEADS 110
+#define XSA_CFPARM_CURSECTS_TRK 112
+#define XSA_CFPARM_CURSECTS 114
+#define XSA_CFPARM_MULTIPLE 118
+#define XSA_CFPARM_LBA_SECTS 120
+#define XSA_CFPARM_RESERVED3 124
+#define XSA_CFPARM_SECURITY 256
+#define XSA_CFPARM_VENDOR2 258
+#define XSA_CFPARM_POWER 320
+#define XSA_CFPARM_RESERVED4 322
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+static void WordCopySwap(u8 *SourcePtr, u8 *DestPtr, int NumBytes);
+static void FillParam(XSysAce_CFParameters * ParamPtr, u8 *BufPtr);
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+*
+* Reset the CompactFlash device. This function does not reset the System ACE
+* controller. An ATA soft-reset of the CompactFlash is performed.
+*
+* An MPU lock, obtained using XSysAce_Lock(), must be granted before calling
+* this function. If a lock has not been granted, no action is taken and an
+* error is returned.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+*
+* @return
+*
+* - XST_SUCCESS if the reset was done successfully
+* - XST_SYSACE_NO_LOCK if no MPU lock has yet been granted
+* - XST_DEVICE_BUSY if the CompactFlash is not ready for a command
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XSysAce_ResetCF(XSysAce * InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* If a lock has not been granted, return an error */
+ if (!XSysAce_mIsMpuLocked(InstancePtr->BaseAddress)) {
+ return XST_SYSACE_NO_LOCK;
+ }
+
+ /* See if the CF is ready for a command */
+ if (!XSysAce_mIsReadyForCmd(InstancePtr->BaseAddress)) {
+ return XST_DEVICE_BUSY;
+ }
+
+ /*
+ * If interrupts are enabled, enable the error interrupt. A reset clears
+ * the error status, so we're going to re-enable the interrupt here so any
+ * new errors will be caught.
+ */
+ if (XSysAce_mIsIntrEnabled(InstancePtr->BaseAddress)) {
+ XSysAce_mOrControlReg(InstancePtr->BaseAddress,
+ XSA_CR_ERRORIRQ_MASK);
+ }
+
+ /*
+ * Send the reset command
+ */
+ XSysAce_RegWrite16(InstancePtr->BaseAddress + XSA_SCCR_OFFSET,
+ XSA_SCCR_RESET_MASK);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Abort the CompactFlash operation currently in progress.
+*
+* An MPU lock, obtained using XSysAce_Lock(), must be granted before calling
+* this function. If a lock has not been granted, no action is taken and an
+* error is returned.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+*
+* @return
+*
+* - XST_SUCCESS if the abort was done successfully
+* - XST_SYSACE_NO_LOCK if no MPU lock has yet been granted
+* - XST_DEVICE_BUSY if the CompactFlash is not ready for a command
+*
+* @note
+*
+* According to the ASIC designer, the abort command has not been well tested.
+*
+******************************************************************************/
+int XSysAce_AbortCF(XSysAce * InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* If a lock has not been granted, return an error */
+ if (!XSysAce_mIsMpuLocked(InstancePtr->BaseAddress)) {
+ return XST_SYSACE_NO_LOCK;
+ }
+
+ /*
+ * See if the CF is ready for a command
+ *
+ * TODO: make sure this check works, or possibly the abort can be done
+ * if it is not ready for a command (e.g., that's what we're aborting)?
+ */
+ if (!XSysAce_mIsReadyForCmd(InstancePtr->BaseAddress)) {
+ return XST_DEVICE_BUSY;
+ }
+
+ /*
+ * Send the abort command
+ */
+ XSysAce_RegWrite16(InstancePtr->BaseAddress + XSA_SCCR_OFFSET,
+ XSA_SCCR_ABORT_MASK);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Identify the CompactFlash device. Retrieves the parameters for the
+* CompactFlash storage device. Note that this is a polled read of one sector
+* of data. The data is read from the CompactFlash into a byte buffer, which
+* is then copied into the XSysAce_CFParameters structure passed in by the
+* user. The copy is necessary since we don't know how the compiler packs
+* the XSysAce_CFParameters structure.
+*
+* An MPU lock, obtained using XSysAce_Lock(), must be granted before calling
+* this function. If a lock has not been granted, no action is taken and an
+* error is returned.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+* @param ParamPtr is a pointer to a XSysAce_CFParameters structure where the
+* information for the CompactFlash device will be stored. See xsysace.h
+* for details on the XSysAce_CFParameters structure.
+*
+* @return
+*
+* - XST_SUCCESS if the identify was done successfully
+* - XST_FAILURE if an error occurs. Use XSysAce_GetErrors() to determine cause.
+* - XST_SYSACE_NO_LOCK if no MPU lock has yet been granted
+* - XST_DEVICE_BUSY if the CompactFlash is not ready for a command
+*
+* @note
+*
+* None.
+*
+* @internal
+*
+* The identify command has the same protocol as the read sector command
+* according to the CompactFlash specification. However, there is a discepency
+* in that same specification on the size of the parameter structure. The word
+* addresses defined in the spec indicate the parameter information is a full
+* 512 bytes, the same size as a sector. The total bytes defined in the spec,
+* however, indicate that the parameter information is only 500 bytes. We
+* defined the parameter structure in xsysace.h assuming the parameters are the
+* full 512 bytes since that makes sense, and therefore ignored the "Total
+* Bytes" column in the spec.
+*
+* The SectorData variable was made static to avoid putting 512 bytes on the
+* stack every time this function is called.
+*
+******************************************************************************/
+int XSysAce_IdentifyCF(XSysAce * InstancePtr, XSysAce_CFParameters * ParamPtr)
+{
+ int NumRead;
+ u32 InterruptsOn;
+ static u8 SectorData[XSA_CF_SECTOR_SIZE];
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(ParamPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* If a lock has not been granted, return an error */
+ if (!XSysAce_mIsMpuLocked(InstancePtr->BaseAddress)) {
+ return XST_SYSACE_NO_LOCK;
+ }
+
+ /* See if the CF is ready for a command */
+ if (!XSysAce_mIsReadyForCmd(InstancePtr->BaseAddress)) {
+ return XST_DEVICE_BUSY;
+ }
+
+ /*
+ * If interrupts are enabled, we disable them because we want to do this
+ * identify in polled mode - due to the buffer endian conversion and copy
+ * that takes place.
+ */
+ InterruptsOn = XSysAce_mIsIntrEnabled(InstancePtr->BaseAddress);
+ if (InterruptsOn) {
+ XSysAce_DisableInterrupt(InstancePtr);
+ }
+
+ /*
+ * Send the identify command
+ */
+ XSysAce_RegWrite16(InstancePtr->BaseAddress + XSA_SCCR_OFFSET,
+ XSA_SCCR_IDENTIFY_MASK);
+
+ /* Reset configuration controller (be sure to keep the lock) */
+ /* This breaks mvl, beware! */
+ /* XSysAce_mOrControlReg(InstancePtr->BaseAddress, XSA_CR_CFGRESET_MASK); */
+
+ /*
+ * Read a sector of data from the data buffer. The parameter info is
+ * the same size as a sector.
+ */
+ NumRead = XSysAce_ReadDataBuffer(InstancePtr->BaseAddress, SectorData,
+ XSA_CF_SECTOR_SIZE);
+
+ /* Clear reset of configuration controller */
+ /* This breaks mvl, beware! */
+ /*XSysAce_mAndControlReg(InstancePtr->BaseAddress, ~(XSA_CR_CFGRESET_MASK)); */
+
+ /* If interrupts were on, re-enable interrupts (regardless of error) */
+ if (InterruptsOn) {
+ XSysAce_EnableInterrupt(InstancePtr);
+ }
+
+ if (NumRead == 0) {
+ /* an error occurred */
+ return XST_FAILURE;
+ }
+
+ /*
+ * Copy the byte buffer to the parameter structure
+ */
+ FillParam(ParamPtr, SectorData);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Check to see if the CompactFlash is ready for a command. The CompactFlash
+* may delay after one operation before it is ready for the next. This function
+* helps the user determine when it is ready before invoking a CompactFlash
+* operation such as XSysAce_SectorRead() or XSysAce_SectorWrite();
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+*
+* @return
+*
+* TRUE if the CompactFlash is ready for a command, and FALSE otherwise.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XSysAce_IsCFReady(XSysAce * InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ return XSysAce_mIsReadyForCmd(InstancePtr->BaseAddress);
+}
+
+/*****************************************************************************/
+/**
+*
+* Read at least one sector of data from the CompactFlash. The user specifies
+* the starting sector ID and the number of sectors to be read. The minimum unit
+* that can be read from the CompactFlash is a sector, which is 512 bytes.
+*
+* In polled mode, this read is blocking. If there are other tasks in the system
+* that must run, it is best to keep the number of sectors to be read to a
+* minimum (e.g., 1). In interrupt mode, this read is non-blocking and an event,
+* XSA_EVENT_DATA_DONE, is returned to the user in the asynchronous event
+* handler when the read is complete. The user must call
+* XSysAce_EnableInterrupt() to put the driver/device into interrupt mode.
+*
+* An MPU lock, obtained using XSysAce_Lock(), must be granted before calling
+* this function. If a lock has not been granted, no action is taken and an
+* error is returned.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+* @param StartSector is the starting sector ID from where data will be read.
+* Sector IDs range from 0 (first sector) to 0x10000000.
+* @param NumSectors is the number of sectors to read. The range can be from
+* 1 to 256.
+* @param BufferPtr is a pointer to a buffer where the data will be stored.
+* The user must ensure it is big enough to hold (512 * NumSectors) bytes.
+*
+* @return
+*
+* - XST_SUCCESS if the read was successful. In interrupt mode, this does not
+* mean the read is complete, only that it has begun. An event is returned
+* to the user when the read is complete.
+* - XST_SYSACE_NO_LOCK if no MPU lock has yet been granted
+* - XST_DEVICE_BUSY if the ACE controller is not ready for a command
+* - XST_FAILURE if an error occurred during the read. The user should call
+* XSysAce_GetErrors() to determine the cause of the error.
+*
+* @note
+*
+* None.
+*
+* @internal
+*
+* Polled mode is blocking under the assumption that a single sector can be
+* transferred at a very fast rate (>20 Mbps). So, the user can choose to
+* transfer only single sectors when in polled mode, thus allowing time for
+* other work to be done. The biggest issue is that although data transfer
+* rates are high, seek time for CompactFlash cards is slow (5-20 ms on
+* average, depending on the type of device). We could move to a non-blocking
+* solution that transfers 32 bytes at a time (the entire data buffer) and
+* then returns. The user would then need to increment its buffer pointer
+* appropriately and call the read/write again. The driver would need some way
+* to know not to issue a new command to the CompactFlash, but instead continue
+* with the previous command. This can be done either with a NumSectors argument
+* of zero to indicate that there is already an operation in progress, or by
+* having the driver keep state to know there is an operation in progress. The
+* interface for either seems a bit awkward. Also, the hit for seek time needs
+* to be taken regardless of the blocking or non-blocking nature of the call, so
+* the additional few microseconds to transfer a sector of data seems acceptable.
+*
+******************************************************************************/
+int XSysAce_SectorRead(XSysAce * InstancePtr, u32 StartSector,
+ int NumSectors, u8 *BufferPtr)
+{
+ u16 SectorCmd;
+ int BytesToRecv;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(NumSectors > 0 &&
+ NumSectors <= (XSA_SCCR_COUNT_MASK + 1));
+ XASSERT_NONVOID(BufferPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* If a lock has not been granted, return an error */
+ if (!XSysAce_mIsMpuLocked(InstancePtr->BaseAddress)) {
+ return XST_SYSACE_NO_LOCK;
+ }
+
+ /* See if the CF is ready for a command */
+ if (!XSysAce_mIsReadyForCmd(InstancePtr->BaseAddress)) {
+ return XST_DEVICE_BUSY;
+ }
+
+ BytesToRecv = XSA_CF_SECTOR_SIZE * NumSectors;
+
+ /*
+ * If in interrupt mode, set up the state variables and enable the
+ * data-buffer-ready interrupt. This needs to be done before the command
+ * is sent to the ACE, which will cause the interrupt to occur.
+ */
+ if (XSysAce_mIsIntrEnabled(InstancePtr->BaseAddress)) {
+ InstancePtr->NumRequested = BytesToRecv;
+ InstancePtr->NumRemaining = BytesToRecv;
+ InstancePtr->BufferPtr = BufferPtr;
+
+ XSysAce_mOrControlReg(InstancePtr->BaseAddress,
+ XSA_CR_DATARDYIRQ_MASK);
+ }
+
+ /* Write the sector ID (LBA) */
+ XSysAce_RegWrite32(InstancePtr->BaseAddress + XSA_MLR_OFFSET,
+ StartSector);
+
+ /*
+ * Send the read command for the number of sectors specified
+ */
+ SectorCmd = (NumSectors & XSA_SCCR_COUNT_MASK) | XSA_SCCR_READDATA_MASK;
+ XSysAce_RegWrite16(InstancePtr->BaseAddress + XSA_SCCR_OFFSET,
+ SectorCmd);
+
+ /*
+ * If in polled mode, receive the entire amount requested
+ */
+ if (!XSysAce_mIsIntrEnabled(InstancePtr->BaseAddress)) {
+ int NumRead;
+
+ /* Reset configuration controller (be sure to keep the lock) */
+ /* This breaks mvl, beware! */
+ /*XSysAce_mOrControlReg(InstancePtr->BaseAddress, XSA_CR_CFGRESET_MASK); */
+
+ NumRead =
+ XSysAce_ReadDataBuffer(InstancePtr->BaseAddress,
+ BufferPtr, BytesToRecv);
+ /* Clear reset of configuration controller */
+ /* This breaks mvl, beware! */
+ /*XSysAce_mAndControlReg(InstancePtr->BaseAddress, ~(XSA_CR_CFGRESET_MASK)); */
+
+ if (NumRead != BytesToRecv) {
+ /* an error occurred, report this to the user */
+ return XST_FAILURE;
+ }
+ }
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Write data to the CompactFlash. The user specifies the starting sector ID
+* and the number of sectors to be written. The minimum unit that can be written
+* to the CompactFlash is a sector, which is 512 bytes.
+*
+* In polled mode, this write is blocking. If there are other tasks in the
+* system that must run, it is best to keep the number of sectors to be written
+* to a minimum (e.g., 1). In interrupt mode, this write is non-blocking and an
+* event, XSA_EVENT_DATA_DONE, is returned to the user in the asynchronous
+* event handler when the write is complete. The user must call
+* XSysAce_EnableInterrupt() to put the driver/device into interrupt mode.
+*
+* An MPU lock, obtained using XSysAce_Lock(), must be granted before calling
+* this function. If a lock has not been granted, no action is taken and an
+* error is returned.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+* @param StartSector is the starting sector ID from where data will be written.
+* Sector IDs range from 0 (first sector) to 0x10000000.
+* @param NumSectors is the number of sectors to write. The range can be from
+* 1 to 256.
+* @param BufferPtr is a pointer to the data buffer to be written. This buffer
+* must have at least (512 * NumSectors) bytes.
+*
+* @return
+*
+* - XST_SUCCESS if the write was successful. In interrupt mode, this does not
+* mean the write is complete, only that it has begun. An event is returned
+* to the user when the write is complete.
+* - XST_SYSACE_NO_LOCK if no MPU lock has yet been granted
+* - XST_DEVICE_BUSY if the ACE controller is not ready for a command
+* - XST_FAILURE if an error occurred during the write. The user should call
+* XSysAce_GetErrors() to determine the cause of the error.
+*
+* @note
+*
+* None.
+*
+* @internal
+*
+* Polled mode is blocking under the assumption that a single sector can be
+* transferred at a very fast rate (>20 Mbps). So, the user can choose to
+* transfer only single sectors when in polled mode, thus allowing time for
+* other work to be done. The biggest issue is that although data transfer
+* rates are high, seek time for CompactFlash cards is slow (5-20 ms on
+* average, depending on the type of device). We could move to a non-blocking
+* solution that transfers 32 bytes at a time (the entire data buffer) and
+* then returns. The user would then need to increment its buffer pointer
+* appropriately and call the read/write again. The driver would need some way
+* to know not to issue a new command to the CompactFlash, but instead continue
+* with the previous command. This can be done either with a NumSectors argument
+* of zero to indicate that there is already an operation in progress, or by
+* having the driver keep state to know there is an operation in progress. The
+* interface for either seems a bit awkward. Also, the hit for seek time needs
+* to be taken regardless of the blocking or non-blocking nature of the call, so
+* the additional few microseconds to transfer a sector of data seems acceptable.
+*
+******************************************************************************/
+int XSysAce_SectorWrite(XSysAce * InstancePtr, u32 StartSector,
+ int NumSectors, u8 *BufferPtr)
+{
+ u16 SectorCmd;
+ int NumSent;
+ int BytesToSend;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(NumSectors > 0 &&
+ NumSectors <= (XSA_SCCR_COUNT_MASK + 1));
+ XASSERT_NONVOID(BufferPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* If a lock has not been granted, return an error */
+ if (!XSysAce_mIsMpuLocked(InstancePtr->BaseAddress)) {
+ return XST_SYSACE_NO_LOCK;
+ }
+
+ /* See if the CF is ready for a command */
+ if (!XSysAce_mIsReadyForCmd(InstancePtr->BaseAddress)) {
+ return XST_DEVICE_BUSY;
+ }
+
+ /* Write the sector ID (LBA) */
+ XSysAce_RegWrite32(InstancePtr->BaseAddress + XSA_MLR_OFFSET,
+ StartSector);
+
+ /*
+ * Send the write command for the number of sectors specified
+ */
+ SectorCmd =
+ (NumSectors & XSA_SCCR_COUNT_MASK) | XSA_SCCR_WRITEDATA_MASK;
+ XSysAce_RegWrite16(InstancePtr->BaseAddress + XSA_SCCR_OFFSET,
+ SectorCmd);
+
+ BytesToSend = XSA_CF_SECTOR_SIZE * NumSectors;
+
+ /*
+ * If in interrupt mode, set up the state variables and enable the
+ * data-buffer-ready interrupt. We do this after the write command above
+ * is done in order to guarantee that the interrupt occurs only after the
+ * first data buffer write is done below (an interrupt may or may not occur
+ * after the write command is issued)
+ */
+ if (XSysAce_mIsIntrEnabled(InstancePtr->BaseAddress)) {
+ /*
+ * Set the state variables. We're going to send one data buffer here in
+ * this routine, so adjust the buffer pointer and number remaining to
+ * reflect this.
+ */
+ InstancePtr->NumRequested = BytesToSend;
+ InstancePtr->NumRemaining = BytesToSend - XSA_DATA_BUFFER_SIZE;
+ InstancePtr->BufferPtr = BufferPtr + XSA_DATA_BUFFER_SIZE;
+
+ /* Send only one data buffer in interrupt mode */
+ BytesToSend = XSA_DATA_BUFFER_SIZE;
+
+ XSysAce_mOrControlReg(InstancePtr->BaseAddress,
+ XSA_CR_DATARDYIRQ_MASK);
+ }
+
+ NumSent = XSysAce_WriteDataBuffer(InstancePtr->BaseAddress, BufferPtr,
+ BytesToSend);
+ if (NumSent != BytesToSend) {
+ /* an error occurred, report this to the user */
+ return XST_FAILURE;
+ }
+
+ return XST_SUCCESS;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Get the status of the FAT filesystem on the first valid partition of the
+* CompactFlash device such as the boot record and FAT types found.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+*
+* @return
+*
+* A 16-bit mask of status values. These values are defined in xsysace_l.h
+* with the prefix XSA_FAT_*.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u16 XSysAce_GetFatStatus(XSysAce * InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ return XSysAce_RegRead16(InstancePtr->BaseAddress + XSA_FSR_OFFSET);
+}
+
+/*****************************************************************************/
+/**
+*
+* This bit of ugliness allows us to present a structure to the user. The
+* byte buffer which was read from the CompactFlash is converted into the
+* XSysAce_CFParameters structure. The byte buffer is accessed by the indices
+* of the fields as defined at the top of this file. We do not read from
+* CompactFlash directly into the CF Parameter structure because of structure
+* packing problems.
+*
+* Note that we also need to perform endian conversion here since the System
+* ACE device gives us little endian data and we're (possibly) on a big endian
+* processor.
+*
+* @param ParamPtr is the structure to fill
+* @param BufPtr is the byte buffer containing the CF parameter data
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void FillParam(XSysAce_CFParameters * ParamPtr, u8 *BufPtr)
+{
+ u16 HiWord;
+ u16 LoWord;
+
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_SIGNATURE]),
+ &ParamPtr->Signature);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_NUMCYLS]),
+ &ParamPtr->NumCylinders);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_RESERVED1]),
+ &ParamPtr->Reserved);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_NUMHEADS]),
+ &ParamPtr->NumHeads);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_BYTES_TRACK]),
+ &ParamPtr->NumBytesPerTrack);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_BYTES_SECT]),
+ &ParamPtr->NumBytesPerSector);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_SECTS_TRK]),
+ &ParamPtr->NumSectorsPerTrack);
+
+ /* NumSectorsPerCard is stored as two half-words, MSW first */
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_SECTS_HI]),
+ &HiWord);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_SECTS_LO]),
+ &LoWord);
+ ParamPtr->NumSectorsPerCard = ((u32) HiWord << 16) | (u32) LoWord;
+
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_VENDOR1]),
+ &ParamPtr->VendorUnique);
+
+ WordCopySwap(&BufPtr[XSA_CFPARM_SERIAL_NO], ParamPtr->SerialNo, 20);
+
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_BUFFER_TYPE]),
+ &ParamPtr->BufferType);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_BUFFER_SIZE]),
+ &ParamPtr->BufferSize);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_ECC_BYTES]),
+ &ParamPtr->NumEccBytes);
+
+ WordCopySwap(&BufPtr[XSA_CFPARM_FW_VERSION], ParamPtr->FwVersion, 8);
+ WordCopySwap(&BufPtr[XSA_CFPARM_MODEL_NO], ParamPtr->ModelNo, 40);
+
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_MAX_SECTORS]),
+ &ParamPtr->MaxSectors);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_DBL_WORD]),
+ &ParamPtr->DblWord);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_CAPS]),
+ &ParamPtr->Capabilities);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_RESERVED2]),
+ &ParamPtr->Reserved2);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_PIO_MODE]),
+ &ParamPtr->PioMode);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_DMA_MODE]),
+ &ParamPtr->DmaMode);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_TRANSLATE]),
+ &ParamPtr->TranslationValid);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_CURCYLS]),
+ &ParamPtr->CurNumCylinders);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_CURHEADS]),
+ &ParamPtr->CurNumHeads);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_CURSECTS_TRK]),
+ &ParamPtr->CurSectorsPerTrack);
+ XIo_FromLittleEndian32(*((u32 *) &BufPtr[XSA_CFPARM_CURSECTS]),
+ &ParamPtr->CurSectorsPerCard);
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_MULTIPLE]),
+ &ParamPtr->MultipleSectors);
+ XIo_FromLittleEndian32(*((u32 *) &BufPtr[XSA_CFPARM_LBA_SECTS]),
+ &ParamPtr->LbaSectors);
+
+ WordCopySwap(&BufPtr[XSA_CFPARM_RESERVED3], ParamPtr->Reserved3, 132);
+
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_SECURITY]),
+ &ParamPtr->SecurityStatus);
+
+ WordCopySwap(&BufPtr[XSA_CFPARM_VENDOR2], ParamPtr->VendorUniqueBytes,
+ 62);
+
+ XIo_FromLittleEndian16(*((u16 *) &BufPtr[XSA_CFPARM_POWER]),
+ &ParamPtr->PowerDesc);
+
+ WordCopySwap(&BufPtr[XSA_CFPARM_RESERVED4], ParamPtr->Reserved4, 190);
+
+}
+
+/*****************************************************************************/
+/**
+*
+* Utility to copy words and swap the endianism on the fly.
+*
+* @param SourcePtr is a pointer to the source byte buffer
+* @param DestPtr is a pointer to the destination byte buffer
+* @param NumBytes is the number of bytes to copy
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* NumBytes should be even but if it isn't, the function increases by 1
+* to correct.
+*
+******************************************************************************/
+static void WordCopySwap(u8 *SourcePtr, u8 *DestPtr, int NumBytes)
+{
+ int i;
+
+ /* make sure the requested length is even, if not, increase by 1 */
+
+ if ((NumBytes & 0x00000001) != 0) {
+ NumBytes += 1;
+ }
+
+ for (i = 0; i < NumBytes; i += 2) {
+ DestPtr[i + 1] = SourcePtr[i];
+ DestPtr[i] = SourcePtr[i + 1];
+ }
+}
--- /dev/null
+/* $Id: xsysace_g.c,v 1.1 2006/02/17 21:52:36 moleres Exp $ */
+/*****************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+*****************************************************************************/
+/****************************************************************************/
+/**
+*
+* @file xsysace_g.c
+*
+* This file contains a configuration table that specifies the configuration of
+* System ACE devices in the system.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 06/17/02 work in progress
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xsysace.h"
+#include "xparameters.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Prototypes ******************************/
+
+/**
+ * The configuration table for System ACE devices in the system. Each
+ * device should have an entry in this table.
+ */
+XSysAce_Config XSysAce_ConfigTable[XPAR_XSYSACE_NUM_INSTANCES] = {
+ {
+ XPAR_SYSACE_0_DEVICE_ID,
+ XPAR_SYSACE_0_BASEADDR}
+};
--- /dev/null
+/* $Id: xsysace_intr.c,v 1.1 2006/02/17 21:52:36 moleres Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xsysace_intr.c
+*
+* Contains functions related to System ACE interrupt mode. The driver's
+* interrupt handler, XSysAce_InterruptHandler(), must be connected by the
+* user to the interrupt controller.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 06/17/02 work in progress
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xsysace.h"
+#include "xsysace_l.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/************************** Function Prototypes ******************************/
+
+static void HandleDataBuffer(XSysAce * InstancePtr, u32 StatusReg);
+static void DataComplete(XSysAce * InstancePtr);
+
+
+/*****************************************************************************/
+/**
+*
+* Enable System ACE interrupts. There are three interrupts that can be enabled.
+* The error interrupt enable serves as the driver's means to determine whether
+* interrupts have been enabled or not. The configuration-done interrupt is not
+* enabled here, instead it is enabled during a reset - which can cause a
+* configuration process to start. The data-buffer-ready interrupt is not enabled
+* here either. It is enabled when a read or write operation is started. The
+* reason for not enabling the latter two interrupts are because the status bits
+* may be set as a leftover of an earlier occurrence of the interrupt.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to work on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XSysAce_EnableInterrupt(XSysAce * InstancePtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* By default, enable only the error interrupt */
+ XSysAce_mOrControlReg(InstancePtr->BaseAddress, XSA_CR_ERRORIRQ_MASK);
+
+ /* Clear the reset on the interrupt line if it was in reset */
+ XSysAce_mAndControlReg(InstancePtr->BaseAddress, ~XSA_CR_RESETIRQ_MASK);
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Disable all System ACE interrupts and hold the interrupt request line of
+* the device in reset.
+*
+* @param InstancePtr is a pointer to the XSysAce instance that just interrupted.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XSysAce_DisableInterrupt(XSysAce * InstancePtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Put the interrupt request line in reset */
+ XSysAce_mOrControlReg(InstancePtr->BaseAddress, XSA_CR_RESETIRQ_MASK);
+
+ /* Clear the interrupt enable bits */
+ XSysAce_mAndControlReg(InstancePtr->BaseAddress,
+ ~(XSA_CR_DATARDYIRQ_MASK | XSA_CR_ERRORIRQ_MASK |
+ XSA_CR_CFGDONEIRQ_MASK));
+}
+
+
+/*****************************************************************************/
+/**
+*
+* The interrupt handler for the System ACE driver. This handler must be
+* connected by the user to an interrupt controller or source. This function
+* does not save or restore context.
+*
+* This function continues reading or writing to the compact flash if such an
+* operation is in progress, and notifies the upper layer software through
+* the event handler once the operation is complete or an error occurs. On an
+* error, any command currently in progress is aborted.
+*
+* @param InstancePtr is a pointer to the XSysAce instance that just interrupted.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XSysAce_InterruptHandler(void *InstancePtr)
+{
+ u32 StatusReg;
+ XSysAce *AcePtr = (XSysAce *) InstancePtr;
+
+ XASSERT_VOID(InstancePtr != NULL);
+
+ /*
+ * Get the status in order to process each interrupt that has occurred
+ */
+ StatusReg = XSysAce_mGetStatusReg(AcePtr->BaseAddress);
+
+ /*
+ * Reset the interrupt line to effectively clear the interrupt conditions.
+ * We need to set the bit to clear the interrupts, then clear the bit so
+ * that new interrupts can be generated.
+ */
+ XSysAce_mOrControlReg(AcePtr->BaseAddress, XSA_CR_RESETIRQ_MASK);
+ XSysAce_mAndControlReg(AcePtr->BaseAddress, ~XSA_CR_RESETIRQ_MASK);
+
+ /*
+ * Check for data buffer ready, which means an operation (either read or
+ * write) is in progress.
+ */
+ if (StatusReg & XSA_SR_DATABUFRDY_MASK) {
+ /*
+ * Handles the data buffer, and invokes the callback to the user for
+ * data transfer completion.
+ */
+ HandleDataBuffer(AcePtr, StatusReg);
+ }
+
+ /*
+ * Check for completion of JTAG configuration and report the event up.
+ * We only do this if the CFGDONE interrupt is enabled since the CFGDONE
+ * status only gets cleared when the confguration controller is reset,
+ * which we do not do unless requested by the user because it may cause
+ * a configuration process to start. We could have gotten into this
+ * interrupt handler by another interrupt, yet have a leftover CFGDONE
+ * status from an earlier configuration process.
+ */
+ if ((StatusReg & XSA_SR_CFGDONE_MASK) &&
+ (XSysAce_mGetControlReg(AcePtr->BaseAddress) &
+ XSA_CR_CFGDONEIRQ_MASK)) {
+ /*
+ * Clear the bit indicating MPU is the source of configuration data
+ * since we're done configuring from the MPU for now. Also clear the
+ * force CFGMODE bit and the CFGSTART bit, basically undoing what was
+ * done in XSysAce_ProgramChain(). Disable the interrupts since the
+ * CFGDONE status does not get cleared unless a reset occurs - and in
+ * the meantime we may get into this interrupt handler again.
+ */
+ XSysAce_mAndControlReg(AcePtr->BaseAddress,
+ ~(XSA_CR_CFGSEL_MASK |
+ XSA_CR_CFGSTART_MASK |
+ XSA_CR_CFGDONEIRQ_MASK |
+ XSA_CR_DATARDYIRQ_MASK |
+ XSA_CR_FORCECFGMODE_MASK));
+
+ AcePtr->EventHandler(AcePtr->EventRef, XSA_EVENT_CFG_DONE);
+ }
+
+ /*
+ * Check for errors and report the event (the user is responsible for
+ * retrieving and interpreting the errors). We only do this if the error
+ * interrupt is enabled since the error status only gets cleared when the
+ * CompactFlash or confguration controller is reset, which we do not do
+ * because it may cause a configuration process to start. We could have
+ * entered this interrupt handler by another interrupt and have a leftover
+ * error status from a previous error.
+ */
+ if ((StatusReg & (XSA_SR_CFGERROR_MASK | XSA_SR_CFCERROR_MASK)) &&
+ (XSysAce_mGetControlReg(AcePtr->BaseAddress) &
+ XSA_CR_ERRORIRQ_MASK)) {
+ /* Clear the transfer state to effectively abort the operation */
+ AcePtr->NumRequested = 0;
+ AcePtr->NumRemaining = 0;
+ AcePtr->BufferPtr = NULL;
+
+ /*
+ * Disable the error interrupt since the only way to clear the
+ * error status is to reset the CF or the configuration controller,
+ * neither of which we want to do here since the consequences may
+ * be undesirable (i.e., may cause a reconfiguration). The user
+ * will need to perform the reset based on the error event.
+ */
+ XSysAce_mAndControlReg(AcePtr->BaseAddress,
+ ~XSA_CR_ERRORIRQ_MASK);
+
+ AcePtr->EventHandler(AcePtr->EventRef, XSA_EVENT_ERROR);
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Set the callback function for handling events. The upper layer software
+* should call this function during initialization. The events are passed
+* asynchronously to the upper layer software. The events are described in
+* xsysace.h and are named XSA_EVENT_*.
+*
+* Note that the callback is invoked by the driver within interrupt context, so
+* it needs to do its job quickly. If there are potentially slow operations
+* within the callback, these should be done at task-level.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+* @param FuncPtr is the pointer to the callback function.
+* @param CallBackRef is a reference pointer to be passed back to the upper
+* layer.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XSysAce_SetEventHandler(XSysAce * InstancePtr,
+ XSysAce_EventHandler FuncPtr, void *CallBackRef)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(FuncPtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ InstancePtr->EventHandler = FuncPtr;
+ InstancePtr->EventRef = CallBackRef;
+}
+
+/*****************************************************************************/
+/**
+*
+* Handle a data-buffer-ready interrupt. If we get the interrupt when reading,
+* it means there is still data to read since the interrupt does not occur after
+* reading the last data buffer. If we get the interrupt when writing, there
+* may or may not be data left to write since the interrupt does occur after the
+* last data buffer is written.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+* @param StatusReg is the contents of the status register, read at the start
+* of the interrupt service routine.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void HandleDataBuffer(XSysAce * InstancePtr, u32 StatusReg)
+{
+ /* By default, transfer a whole data buffer */
+ int BytesToTransfer = XSA_DATA_BUFFER_SIZE;
+
+ /*
+ * Check to see if number of bytes remaining is less than the data buffer
+ * size. If it is, we need to adjust the remaining bytes to transfer.
+ */
+ if (InstancePtr->NumRemaining < XSA_DATA_BUFFER_SIZE) {
+ BytesToTransfer = InstancePtr->NumRemaining;
+ }
+
+ /*
+ * Transfer only one data buffer at a time, which is 32 bytes. Note that
+ * errors will be handled by an error interrupt occurring, so no need to
+ * check for them here.
+ */
+ if (StatusReg & XSA_SR_DATABUFMODE_MASK) {
+ /*
+ * A write operation in progress, so if there is data remaining then
+ * write the buffer. If no data is remaining, clean up.
+ */
+ if (InstancePtr->NumRemaining > 0) {
+ (void) XSysAce_WriteDataBuffer(InstancePtr->BaseAddress,
+ InstancePtr->BufferPtr,
+ BytesToTransfer);
+
+ /*
+ * Decrement the number of bytes remaining to be transferred and
+ * adjust the buffer pointer appropriately.
+ */
+ InstancePtr->NumRemaining -= BytesToTransfer;
+ InstancePtr->BufferPtr += BytesToTransfer;
+ }
+ else {
+ /* Done writing data, so clean up */
+ DataComplete(InstancePtr);
+ }
+ }
+ else {
+ /* A read operation in progress, so read the buffer */
+ (void) XSysAce_ReadDataBuffer(InstancePtr->BaseAddress,
+ InstancePtr->BufferPtr,
+ BytesToTransfer);
+
+ /*
+ * Decrement the number of bytes remaining to be transferred and
+ * adjust the buffer pointer appropriately. If it was the last buffer,
+ * we're done and we can cleanup.
+ */
+ InstancePtr->NumRemaining -= BytesToTransfer;
+ InstancePtr->BufferPtr += BytesToTransfer;
+
+ if (InstancePtr->NumRemaining == 0) {
+ /* Done reading data, so clean up */
+ DataComplete(InstancePtr);
+ }
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Handle cleanup when a data transfer is complete. This means intializing the
+* state variables, disabling the data-buffer-ready interrupt, and sending the
+* event to the user.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void DataComplete(XSysAce * InstancePtr)
+{
+ InstancePtr->NumRequested = 0;
+ InstancePtr->NumRemaining = 0;
+ InstancePtr->BufferPtr = NULL;
+
+ /*
+ * Disable the data-buffer-ready interrupt. This isn't necessary when
+ * reading since the DATABUFRDY status bit is cleared by the ACE after
+ * the last data buffer is read. However, the ACE isn't currently
+ * smart enough to clear the DATABUFRDY status bit after the last data
+ * buffer is written during a write operation. So, we need to use the
+ * enable/disable interrupt bit to control its usefulness.
+ */
+ XSysAce_mAndControlReg(InstancePtr->BaseAddress,
+ ~XSA_CR_DATARDYIRQ_MASK);
+
+ /*
+ * The same code is executed for JTAG configuration as well as CompactFlash
+ * transfers, so we need to distinguish between JTAG config done and CF
+ * data transfer done. We look at the CFGSEL value in the control register
+ * to determine if an MPU JTAG config process has just completed. The
+ * CFG_DONE event is passed up later by the main interrupt handler.
+ */
+ if ((XSysAce_mGetControlReg(InstancePtr->BaseAddress)
+ & XSA_CR_CFGSEL_MASK) == 0) {
+ /* no JTAG configuration in progress */
+ InstancePtr->EventHandler(InstancePtr->EventRef,
+ XSA_EVENT_DATA_DONE);
+ }
+}
--- /dev/null
+/* $Id: xsysace_jtagcfg.c,v 1.1 2006/02/17 21:52:36 moleres Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xsysace_jtagcfg.c
+*
+* Contains functions to control the configuration of the target FPGA chain on
+* the System ACE via the JTAG configuration port.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 06/17/02 work in progress
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xsysace.h"
+#include "xsysace_l.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+*
+* Reset the JTAG configuration controller. This comprises a reset of the JTAG
+* configuration controller and the CompactFlash controller (if it is currently
+* being accessed by the configuration controller). Note that the MPU controller
+* is not reset, meaning the MPU registers remain unchanged. The configuration
+* controller is reset then released from reset in this function.
+*
+* The CFGDONE status (and therefore interrupt) is cleared when the configuration
+* controller is reset. If interrupts have been enabled, we go ahead and enable
+* the CFGDONE interrupt here. This means that if and when a configuration
+* process starts as a result of this reset, an interrupt will be received when
+* it is complete.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* This function is not thread-safe.
+*
+******************************************************************************/
+void XSysAce_ResetCfg(XSysAce * InstancePtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Reset the configuration controller */
+ XSysAce_mOrControlReg(InstancePtr->BaseAddress, XSA_CR_CFGRESET_MASK);
+
+ /*
+ * If in interrupt mode, enable the CFGDONE and error interrupts.
+ * A reset clears the CFGDONE and error statuses, so we're going to
+ * re-enable the interrupts here so any new errors or CFGDONEs will be
+ * caught.
+ */
+ if (XSysAce_mIsIntrEnabled(InstancePtr->BaseAddress)) {
+ XSysAce_mOrControlReg(InstancePtr->BaseAddress,
+ XSA_CR_CFGDONEIRQ_MASK |
+ XSA_CR_ERRORIRQ_MASK);
+ }
+
+ /* Release the reset of the configuration controller */
+ XSysAce_mAndControlReg(InstancePtr->BaseAddress, ~XSA_CR_CFGRESET_MASK);
+}
+
+/*****************************************************************************/
+/**
+*
+* Select the configuration address (or file) from the CompactFlash to be used
+* for configuration of the target FPGA chain.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+* @param Address is the address or file number to be used as the bitstream to
+* configure the target FPGA devices. There are 8 possible files, so
+* the value of this parameter can range from 0 to 7.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XSysAce_SetCfgAddr(XSysAce * InstancePtr, unsigned int Address)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(Address < 8);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Set the control register with the address and the bit that forces
+ * the use of the control register address bits instead of the address
+ * pins on the device.
+ */
+ XSysAce_mSetCfgAddr(InstancePtr->BaseAddress, Address);
+}
+
+/*****************************************************************************/
+/**
+*
+* Set the start mode for configuration of the target FPGA chain from
+* CompactFlash. The configuration process only starts after a reset. The
+* user can indicate that the configuration should start immediately after a
+* reset, or the configuration process can be delayed until the user commands
+* it to start (using this function). The configuration controller can be
+* reset using XSysAce_ResetCfg().
+*
+* The user can select which configuration file on the CompactFlash to use using
+* the XSysAce_SetCfgAddr() function. If the user intends to configure the target
+* FPGA chain directly from the MPU port, this function is not needed. Instead,
+* the user would simply call XSysAce_ProgramChain().
+*
+* The user can use XSysAce_IsCfgDone() when in polled mode to determine if
+* the configuration is complete. If in interrupt mode, the event
+* XSA_EVENT_CFG_DONE will be returned asynchronously to the user when the
+* configuration is complete. The user must call XSysAce_EnableInterrupt() to put
+* the device/driver into interrupt mode.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+* @param ImmedOnReset can be set to TRUE to indicate the configuration process
+* will start immediately after a reset of the ACE configuration
+* controller, or it can be set to FALSE to indicate the configuration
+* process is delayed after a reset until the user starts it (using this
+* function).
+* @param StartCfg is a boolean indicating whether to start the configuration
+* process or not. When ImmedOnReset is set to TRUE, this value is
+* ignored. When ImmedOnReset is set to FALSE, then this value controls
+* when the configuration process is started. When set to TRUE the
+* configuration process starts (assuming a reset of the device has
+* occurred), and when set to FALSE the configuration process does not
+* start.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XSysAce_SetStartMode(XSysAce * InstancePtr, u32 ImmedOnReset, u32 StartCfg)
+{
+ u32 Control;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Get the current contents of the control register */
+ Control = XSysAce_mGetControlReg(InstancePtr->BaseAddress);
+
+ /*
+ * Since the user has called this function, we want to tell the ACE
+ * controller to look at the CFGMODE bit of the control register rather
+ * than the CFGMODE pin of the device to determine when to start a
+ * configuration process.
+ */
+ Control |= XSA_CR_FORCECFGMODE_MASK;
+
+ /* Set or clear the CFGMODE bit of the control register */
+ if (ImmedOnReset) {
+ Control |= XSA_CR_CFGMODE_MASK; /* immediate on reset */
+ }
+ else {
+ Control &= ~XSA_CR_CFGMODE_MASK; /* wait for start bit */
+ }
+
+
+ /* Set or clear the CFGSTART bit of the control register */
+ if (StartCfg) {
+ Control |= XSA_CR_CFGSTART_MASK;
+ }
+ else {
+ Control &= ~XSA_CR_CFGSTART_MASK;
+ }
+
+ XSysAce_mSetControlReg(InstancePtr->BaseAddress, Control);
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Program the target FPGA chain through the configuration JTAG port. This
+* allows the user to program the devices on the target FPGA chain from the MPU
+* port instead of from CompactFlash. The user specifies a buffer and the number
+* of bytes to write. The buffer should be equivalent to an ACE (.ace) file.
+*
+* Note that when loading the ACE file via the MPU port, the first sector of the
+* ACE file is discarded. The CF filesystem controller in the System ACE device
+* knows to skip the first sector when the ACE file comes from the CF, but the
+* CF filesystem controller is bypassed when the ACE file comes from the MPU
+* port. For this reason, this function skips the first sector of the buffer
+* passed in.
+*
+* In polled mode, the write is blocking. In interrupt mode, the write is
+* non-blocking and an event, XSA_EVENT_CFG_DONE, is returned to the user in
+* the asynchronous event handler when the configuration is complete.
+*
+* An MPU lock, obtained using XSysAce_Lock(), must be granted before calling
+* this function. If a lock has not been granted, no action is taken and an
+* error is returned.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+* @param BufferPtr is a pointer to a buffer that will be used to program
+* the configuration JTAG devices.
+* @param NumBytes is the number of bytes in the buffer. We assume that there
+* is at least one sector of data in the .ace file, which is the
+* information sector.
+*
+* @return
+*
+* - XST_SUCCESS if the write was successful. In interrupt mode, this does not
+* mean the write is complete, only that it has begun. An event is returned
+* to the user when the write is complete.
+* - XST_SYSACE_NO_LOCK if no MPU lock has yet been granted
+* - XST_FAILURE if an error occurred during the write. The user should call
+* XSysAce_GetErrors() to determine the cause of the error.
+*
+* @note
+*
+* None.
+*
+* @internal
+*
+* The System ACE controller has a 32-byte buffer which holds data. The entire
+* buffer must be written to ensure that it gets sent to the configuration
+* JTAG port. If the number of bytes specified by the user is not a multiple
+* of 32, the driver will pad the remaining bytes of the System ACE buffer with
+* zeroes in order to write the entire buffer.
+*
+******************************************************************************/
+int XSysAce_ProgramChain(XSysAce * InstancePtr, u8 *BufferPtr, int NumBytes)
+{
+ u32 ControlMask;
+ int BytesToSend;
+ int NumSent;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BufferPtr != NULL);
+ XASSERT_NONVOID(NumBytes > XSA_CF_SECTOR_SIZE);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* If a lock has not been granted, return an error */
+ if (!XSysAce_mIsMpuLocked(InstancePtr->BaseAddress)) {
+ return XST_SYSACE_NO_LOCK;
+ }
+
+ /*
+ * Set up the configuration controller to use the MPU port as the
+ * source of configuration data (instead of the CF). The following
+ * control flow comes directly from the System ACE specification, except
+ * the reset was moved to after the other control register bits are set.
+ * Putting it into reset before the bits are set seemed to produce
+ * configuration errors occasionally.
+ */
+ ControlMask = XSysAce_mGetControlReg(InstancePtr->BaseAddress);
+
+ /* Select MPU as the source */
+ ControlMask |= XSA_CR_CFGSEL_MASK;
+ XSysAce_mSetControlReg(InstancePtr->BaseAddress, ControlMask);
+
+ /* Tell controller to wait for start bit from MPU */
+ ControlMask |= XSA_CR_FORCECFGMODE_MASK;
+ ControlMask &= ~XSA_CR_CFGMODE_MASK;
+ XSysAce_mSetControlReg(InstancePtr->BaseAddress, ControlMask);
+
+ /* Set the start bit */
+ ControlMask |= XSA_CR_CFGSTART_MASK;
+ XSysAce_mSetControlReg(InstancePtr->BaseAddress, ControlMask);
+
+ /* Put the configuration controller into a reset condition */
+ ControlMask |= XSA_CR_CFGRESET_MASK;
+ XSysAce_mSetControlReg(InstancePtr->BaseAddress, ControlMask);
+
+ /* Clear the reset condition, which starts the process */
+ ControlMask &= ~XSA_CR_CFGRESET_MASK;
+ XSysAce_mSetControlReg(InstancePtr->BaseAddress, ControlMask);
+
+ /*
+ * Set up number of bytes to send. Default to the entire buffer, which
+ * will be true in polled mode. In interrupt mode, modify this value to
+ * send only one data buffer of data. Always skip the first sector per
+ * the comment above.
+ */
+ BytesToSend = NumBytes - XSA_CF_SECTOR_SIZE;
+
+ /*
+ * The number of bytes to write depends on interrupt or polled mode
+ */
+ if (XSysAce_mIsIntrEnabled(InstancePtr->BaseAddress)) {
+ /*
+ * In interrupt mode, so enable the data-buffer-ready and
+ * configuration-done interrupts. Also, set up the state variables for
+ * the interrupt handler to transfer the remaining data after the
+ * initial write below. We need to write one data buffer here in this
+ * function in order to cause the data-buffer-ready interrupt to occur
+ * for subsequent writes.
+ */
+ ControlMask |= XSA_CR_DATARDYIRQ_MASK | XSA_CR_CFGDONEIRQ_MASK;
+ XSysAce_mSetControlReg(InstancePtr->BaseAddress, ControlMask);
+
+ /* Send only one data buffer to begin with (if there is that much) */
+ if (BytesToSend > XSA_DATA_BUFFER_SIZE) {
+ BytesToSend = XSA_DATA_BUFFER_SIZE;
+ }
+
+ /*
+ * Setup state variables for the interrupt handler. Skip the first
+ * sector per the comment above, and also skip the first data buffer
+ * since it is written below.
+ */
+ InstancePtr->NumRequested = NumBytes - XSA_CF_SECTOR_SIZE;
+ InstancePtr->BufferPtr =
+ BufferPtr + XSA_CF_SECTOR_SIZE + BytesToSend;
+ InstancePtr->NumRemaining =
+ NumBytes - XSA_CF_SECTOR_SIZE - BytesToSend;
+ }
+
+ NumSent = XSysAce_WriteDataBuffer(InstancePtr->BaseAddress,
+ BufferPtr + XSA_CF_SECTOR_SIZE,
+ BytesToSend);
+ if (NumSent != BytesToSend) {
+ /* an error occurred, report this to the user */
+ return XST_FAILURE;
+ }
+
+ /*
+ * If in polled mode, restore the control register to the way it was
+ */
+ if (!XSysAce_mIsIntrEnabled(InstancePtr->BaseAddress)) {
+ /*
+ * Unselect MPU as the source, tell controller to use CFGMODE pin,
+ * and clear the start bit.
+ */
+ ControlMask &= ~(XSA_CR_CFGSEL_MASK | XSA_CR_FORCECFGMODE_MASK |
+ XSA_CR_CFGSTART_MASK);
+ XSysAce_mSetControlReg(InstancePtr->BaseAddress, ControlMask);
+ }
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Check to see if the configuration of the target FPGA chain is complete. This
+* function is typically only used in polled mode. In interrupt mode, an event
+* (XSA_EVENT_CFG_DONE) is returned to the user in the asynchronous event
+* handler.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+*
+* @return
+*
+* TRUE if the configuration is complete. FALSE otherwise.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XSysAce_IsCfgDone(XSysAce * InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Use the layer 0 macro by the same name */
+
+ return XSysAce_mIsCfgDone(InstancePtr->BaseAddress);
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Get the sector ID of the CompactFlash sector being used for configuration of
+* the target FPGA chain. This sector ID (or logical block address) only affects
+* transfers between the ACE configuration logic and the CompactFlash card.
+* This function is typically used for debug purposes to determine which sector
+* was being accessed when an error occurred.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+*
+* @return
+*
+* The sector ID (logical block address) being used for data transfers between
+* the ACE configuration logic and the CompactFlash. Sector IDs range from 0
+* to 0x10000000.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XSysAce_GetCfgSector(XSysAce * InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ return XSysAce_RegRead32(InstancePtr->BaseAddress + XSA_CLR_OFFSET);
+}
--- /dev/null
+/* $Id: xsysace_l.c,v 1.1 2006/02/17 21:52:36 moleres Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xsysace_l.c
+*
+* This file contains low-level functions to read and write CompactFlash
+* sectors and ACE controller registers. These functions can be used when only
+* the low-level functionality of the driver is desired. The user would
+* typically use the high-level driver functions defined in xsysace.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 06/14/02 work in progress
+* 1.00a rpm 09/16/03 Added include of xparameters.h in order to get
+* the XPAR_XSYSACE_MEM_WIDTH definition.
+* 1.00a rpm 02/17/04 Fixed WriteSector function command
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xsysace_l.h"
+
+/************************** Constant Definitions *****************************/
+
+/*
+ * Set up the access width of the MPU registers based on compile-time constants.
+ * If hardware requires 32-bit aligned addresses (XSA_ADDR_ALIGN=4) to access
+ * the MPU registers, then access all of them as 32 bits. If hardware allows
+ * 8-bit aligned addresses (XSA_ADDR_ALIGN=1, or not 4) to access the MPU
+ * registers, access them as 8 or 16 bits depending on the bus mode of the ACE
+ * controller.
+ */
+#if (XSA_ADDR_ALIGN == 4)
+
+#define XIo_In XIo_In32
+#define XIo_Out XIo_Out32
+
+#else
+
+#if (XPAR_XSYSACE_MEM_WIDTH == 16)
+#define XIo_In XIo_In16
+#define XIo_Out XIo_Out16
+#else /* XPAR_XSYSACE_MEM_WIDTH */
+#define XIo_In XIo_In8
+#define XIo_Out XIo_Out8
+#endif /* XPAR_XSYSACE_MEM_WIDTH */
+
+#endif /* (XSA_ADDR_ALIGN == 4) */
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Function Prototypes ******************************/
+
+/************************** Variable Definitions *****************************/
+
+/*****************************************************************************/
+/**
+*
+* Read a 32-bit value from the given address. Based on a compile-time
+* constant, do the read in two 16-bit reads or four 8-bit reads.
+*
+* @param Address is the address to read from.
+*
+* @return The 32-bit value of the address.
+*
+* @note No need for endian conversion in 8-bit mode since this function
+* gets the bytes into their proper lanes in the 32-bit word.
+*
+******************************************************************************/
+#if (XPAR_XSYSACE_MEM_WIDTH == 16)
+u32 XSysAce_RegRead32(u32 Address)
+{
+ u16 Data;
+ u16 ConvertedData;
+ u32 Value = 0;
+
+ /*
+ * Need to endian convert each 32-bit value. The ACE registers are little-
+ * endian, so we read the two LSBs first, endian convert, then put them
+ * in the LSB lanes of the 32-bit word. etc...
+ */
+ Data = (u16) XIo_In(Address);
+ XIo_FromLittleEndian16(Data, &ConvertedData);
+ Value = (u32) ConvertedData;
+
+ Data = (u16) XIo_In(Address + (2 * XSA_ADDR_ALIGN));
+ XIo_FromLittleEndian16(Data, &ConvertedData);
+ Value |= ((u32) ConvertedData << 16);
+
+ return Value;
+}
+#else
+u32 XSysAce_RegRead32(u32 Address)
+{
+ u32 Value = 0;
+
+ /*
+ * The ACE registers are little-endian always. This code reads each 8-bit
+ * register value, in order from LSB to MSB, and shifts it to the correct
+ * byte lane of the 32-bit word. This code should work on both
+ * little-endian and big-endian processors.
+ */
+ Value = (u32) XIo_In(Address);
+ Value |= ((u32) XIo_In(Address + (1 * XSA_ADDR_ALIGN)) << 8);
+ Value |= ((u32) XIo_In(Address + (2 * XSA_ADDR_ALIGN)) << 16);
+ Value |= ((u32) XIo_In(Address + (3 * XSA_ADDR_ALIGN)) << 24);
+
+ return Value;
+}
+#endif
+
+
+/*****************************************************************************/
+/**
+*
+* Read a 16-bit value from the given address. Based on a compile-time
+* constant, do the read in one 16-bit read or two 8-bit reads.
+*
+* @param Address is the address to read from.
+*
+* @return The 16-bit value of the address.
+*
+* @note No need for endian conversion in 8-bit mode since this function
+* gets the bytes into their proper lanes in the 16-bit word.
+*
+******************************************************************************/
+#if (XPAR_XSYSACE_MEM_WIDTH == 16)
+u16 XSysAce_RegRead16(u32 Address)
+{
+ u16 Data;
+ u16 ConvertedData;
+
+ /*
+ * Need to endian convert the 16-bit value. The ACE registers are little-
+ * endian.
+ */
+ Data = (u16) XIo_In(Address);
+ XIo_FromLittleEndian16(Data, &ConvertedData);
+ return ConvertedData;
+}
+#else
+u16 XSysAce_RegRead16(u32 Address)
+{
+ u16 Value = 0;
+
+ /*
+ * The ACE registers are little-endian always. This code reads each 8-bit
+ * register value, in order from LSB to MSB, and shifts it to the correct
+ * byte lane of the 32-bit word. This code should work on both
+ * little-endian and big-endian processors.
+ */
+ Value = (u16) XIo_In(Address);
+ Value |= ((u16) XIo_In(Address + (1 * XSA_ADDR_ALIGN)) << 8);
+
+ return Value;
+}
+#endif
+
+
+/*****************************************************************************/
+/**
+*
+* Write a 32-bit value to the given address. Based on a compile-time
+* constant, do the write in two 16-bit writes or four 8-bit writes.
+*
+* @param Address is the address to write to.
+* @param Data is the value to write
+*
+* @return None.
+*
+* @note No need for endian conversion in 8-bit mode since this function
+* writes the bytes into their proper lanes based on address.
+*
+******************************************************************************/
+#if (XPAR_XSYSACE_MEM_WIDTH == 16)
+void XSysAce_RegWrite32(u32 Address, u32 Data)
+{
+ u16 Hword;
+ u16 ConvertedData;
+
+ /*
+ * The ACE registers are little-endian always. This code takes each 16-bit
+ * value of the incoming 32-bit word and endian converts it, then writes it
+ * to the ACE register.
+ */
+ Hword = (u16) Data;
+ XIo_ToLittleEndian16(Hword, &ConvertedData);
+ XIo_Out(Address, ConvertedData);
+
+ Hword = (u16) (Data >> 16);
+ XIo_ToLittleEndian16(Hword, &ConvertedData);
+ XIo_Out(Address + (2 * XSA_ADDR_ALIGN), ConvertedData);
+}
+#else
+void XSysAce_RegWrite32(u32 Address, u32 Data)
+{
+ /*
+ * The ACE registers are little-endian always. This code reads each 8-bit
+ * register value, in order from LSB to MSB, and shifts it to the correct
+ * byte lane of the 32-bit word. This code should work on both
+ * little-endian and big-endian processors.
+ */
+ XIo_Out(Address, (u8) Data);
+ XIo_Out(Address + (1 * XSA_ADDR_ALIGN), (u8) (Data >> 8));
+ XIo_Out(Address + (2 * XSA_ADDR_ALIGN), (u8) (Data >> 16));
+ XIo_Out(Address + (3 * XSA_ADDR_ALIGN), (u8) (Data >> 24));
+}
+#endif
+
+
+/*****************************************************************************/
+/**
+*
+* Write a 16-bit value to the given address. Based on a compile-time
+* constant, do the write in one 16-bit write or two 8-bit writes.
+*
+* @param Address is the address to write to.
+* @param Data is the value to write
+*
+* @return None.
+*
+* @note No need for endian conversion in 8-bit mode since this function
+* writes the bytes into their proper lanes based on address.
+*
+******************************************************************************/
+#if (XPAR_XSYSACE_MEM_WIDTH == 16)
+void XSysAce_RegWrite16(u32 Address, u16 Data)
+{
+ u16 ConvertedData;
+
+ /*
+ * The ACE registers are little-endian always. This code takes the incoming
+ * 16-bit and endian converts it, then writes it to the ACE register.
+ */
+ XIo_ToLittleEndian16(Data, &ConvertedData);
+ XIo_Out(Address, ConvertedData);
+}
+#else
+void XSysAce_RegWrite16(u32 Address, u16 Data)
+{
+ /*
+ * The ACE registers are little-endian always. This code reads each 8-bit
+ * register value, in order from LSB to MSB, and shifts it to the correct
+ * byte lane of the 32-bit word. This code should work on both
+ * little-endian and big-endian processors.
+ */
+ XIo_Out(Address, (u8) Data);
+ XIo_Out(Address + (1 * XSA_ADDR_ALIGN), (u8) (Data >> 8));
+}
+#endif
+
+
+/*****************************************************************************/
+/**
+*
+* Read a CompactFlash sector. This is a blocking, low-level function which
+* does not return until the specified sector is read.
+*
+* @param BaseAddress is the base address of the device
+* @param SectorId is the id of the sector to read
+* @param BufferPtr is a pointer to a buffer where the data will be stored.
+*
+* @return
+*
+* The number of bytes read. If this number is not equal to the sector size,
+* 512 bytes, then an error occurred.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XSysAce_ReadSector(u32 BaseAddress, u32 SectorId, u8 *BufferPtr)
+{
+ int NumRead;
+
+ /* Request and wait for the lock */
+ XSysAce_mWaitForLock(BaseAddress);
+
+ /* See if the CF is ready for a command */
+ if (!XSysAce_mIsReadyForCmd(BaseAddress)) {
+ return 0;
+ }
+
+ /* Write the sector ID (LBA) */
+ XSysAce_RegWrite32(BaseAddress + XSA_MLR_OFFSET, SectorId);
+
+ /* Send a read command of one sector to the controller */
+ XSysAce_RegWrite16(BaseAddress + XSA_SCCR_OFFSET,
+ XSA_SCCR_READDATA_MASK | 1);
+
+ /* Reset configuration controller (be sure to keep the lock) */
+ XSysAce_mOrControlReg(BaseAddress, XSA_CR_CFGRESET_MASK);
+
+ /* Read a sector of data from the data buffer */
+ NumRead = XSysAce_ReadDataBuffer(BaseAddress, BufferPtr,
+ XSA_CF_SECTOR_SIZE);
+
+ /* Clear reset of configuration controller and locks */
+ XSysAce_mAndControlReg(BaseAddress, ~(XSA_CR_CFGRESET_MASK |
+ XSA_CR_LOCKREQ_MASK));
+
+ return NumRead;
+}
+
+/*****************************************************************************/
+/**
+*
+* Write a CompactFlash sector. This is a blocking, low-level function which
+* does not return until the specified sector is written in its entirety.
+*
+* @param BaseAddress is the base address of the device
+* @param SectorId is the id of the sector to write
+* @param BufferPtr is a pointer to a buffer used to write the sector.
+*
+* @return
+*
+* The number of bytes written. If this number is not equal to the sector size,
+* 512 bytes, then an error occurred.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XSysAce_WriteSector(u32 BaseAddress, u32 SectorId, u8 *BufferPtr)
+{
+ int NumSent;
+
+ /* Get the lock */
+ XSysAce_mWaitForLock(BaseAddress);
+
+ /* See if the CF is ready for a command */
+ if (!XSysAce_mIsReadyForCmd(BaseAddress)) {
+ return 0;
+ }
+
+ /* Write the sector ID (LBA) */
+ XSysAce_RegWrite32(BaseAddress + XSA_MLR_OFFSET, SectorId);
+
+ /* Send a write command of one sector to the controller */
+ XSysAce_RegWrite16(BaseAddress + XSA_SCCR_OFFSET,
+ XSA_SCCR_WRITEDATA_MASK | 1);
+
+ /* Reset configuration controller (be sure to keep the lock) */
+ XSysAce_mOrControlReg(BaseAddress, XSA_CR_CFGRESET_MASK);
+
+ /* Write a sector of data to the data buffer */
+ NumSent = XSysAce_WriteDataBuffer(BaseAddress, BufferPtr,
+ XSA_CF_SECTOR_SIZE);
+
+ /* Clear reset of configuration controller and locks */
+ XSysAce_mAndControlReg(BaseAddress, ~(XSA_CR_CFGRESET_MASK |
+ XSA_CR_LOCKREQ_MASK));
+
+ return NumSent;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Read the specified number of bytes from the data buffer of the ACE
+* controller. The data buffer, which is 32 bytes, can only be read two bytes
+* at a time. Once the data buffer is read, we wait for it to be filled again
+* before reading the next buffer's worth of data.
+*
+* @param BaseAddress is the base address of the device
+* @param BufferPtr is a pointer to a buffer in which to store data.
+* @param Size is the number of bytes to read
+*
+* @return
+*
+* The total number of bytes read, or 0 if an error occurred.
+*
+* @note
+*
+* If Size is not aligned with the size of the data buffer (32 bytes), this
+* function will read the entire data buffer, dropping the extra bytes on the
+* floor since the user did not request them. This is necessary to get the
+* data buffer to be ready again.
+*
+******************************************************************************/
+int XSysAce_ReadDataBuffer(u32 BaseAddress, u8 *BufferPtr, int Size)
+{
+ int DataBytes; /* number of data bytes written */
+ int BufferBytes;
+ u16 Data;
+
+ /*
+ * Read data two bytes at a time. We need to wait for the data
+ * buffer to be ready before reading the buffer.
+ */
+ BufferBytes = 0;
+ for (DataBytes = 0; DataBytes < Size;) {
+ /*
+ * If at any point during this read an error occurs, exit early
+ */
+ if (XSysAce_mGetErrorReg(BaseAddress) != 0) {
+ return 0;
+ }
+
+ if (BufferBytes == 0) {
+ /*
+ * Wait for CF data buffer to ready, then reset buffer byte count
+ */
+ while ((XSysAce_mGetStatusReg(BaseAddress)
+ & XSA_SR_DATABUFRDY_MASK) == 0);
+
+ BufferBytes = XSA_DATA_BUFFER_SIZE;
+ }
+
+ /*
+ * Need to read two bytes. Put the first one in the output buffer
+ * because if we're here we know one more is needed. Put the second one
+ * in the output buffer if there is still room, or just drop it on the
+ * floor if the requested number of bytes have already been read.
+ */
+ Data = XSysAce_RegRead16(BaseAddress + XSA_DBR_OFFSET);
+ *BufferPtr++ = (u8) Data;
+ DataBytes++;
+
+ if (DataBytes < Size) {
+ /* Still more room in the output buffer */
+ *BufferPtr++ = (u8) (Data >> 8);
+ DataBytes++;
+ }
+
+ BufferBytes -= 2;
+ }
+
+ /*
+ * If a complete data buffer was not read, read and ignore the remaining
+ * bytes
+ */
+ while (BufferBytes != 0) {
+ /*
+ * If at any point during this read an error occurs, exit early
+ */
+ if (XSysAce_mGetErrorReg(BaseAddress) != 0) {
+ return 0;
+ }
+
+ (void) XSysAce_RegRead16(BaseAddress + XSA_DBR_OFFSET);
+ BufferBytes -= 2;
+ }
+
+ return DataBytes;
+}
+
+/*****************************************************************************/
+/**
+*
+* Write the specified number of bytes to the data buffer of the ACE controller.
+* The data buffer, which is 32 bytes, can only be written two bytes at a time.
+* Once the data buffer is written, we wait for it to be empty again before
+* writing the next buffer's worth of data. If the size of the incoming buffer
+* is not aligned with the System ACE data buffer size (32 bytes), then this
+* routine pads out the data buffer with zeros so the entire data buffer is
+* written. This is necessary for the ACE controller to process the data buffer.
+*
+* @param BaseAddress is the base address of the device
+* @param BufferPtr is a pointer to a buffer used to write to the controller.
+* @param Size is the number of bytes to write
+*
+* @return
+*
+* The total number of bytes written (not including pad bytes), or 0 if an
+* error occurs.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XSysAce_WriteDataBuffer(u32 BaseAddress, u8 *BufferPtr, int Size)
+{
+ int DataBytes; /* number of data bytes written */
+ int BufferBytes;
+ u16 Data;
+
+ /*
+ * Write a sector two bytes at a time. We need to wait for the data
+ * buffer to be ready before writing the buffer.
+ */
+ BufferBytes = 0;
+ for (DataBytes = 0; DataBytes < Size;) {
+ /*
+ * If at any point during this write an error occurs, exit early
+ */
+ if (XSysAce_mGetErrorReg(BaseAddress) != 0) {
+ return 0;
+ }
+
+ if (BufferBytes == 0) {
+ /*
+ * Wait for CF read data buffer to ready, then reset buffer byte
+ * count
+ */
+ while ((XSysAce_mGetStatusReg(BaseAddress)
+ & XSA_SR_DATABUFRDY_MASK) == 0);
+
+ BufferBytes = XSA_DATA_BUFFER_SIZE;
+ }
+
+ /*
+ * Need to send two bytes. Grab the first one from the incoming buffer
+ * because if we're here we know one more exists. Grab the second one
+ * from the incoming buffer if there are still any bytes remaining, or
+ * send a pad byte if the incoming buffer has been expired.
+ */
+ Data = *BufferPtr++;
+ DataBytes++;
+
+ if (DataBytes < Size) {
+ /* Still more data in the incoming buffer */
+ Data |= ((u16) *BufferPtr++ << 8);
+ DataBytes++;
+ }
+ else {
+ /* No more data in the incoming buffer, send a pad byte of 0 */
+ Data |= ((u16) 0 << 8);
+ }
+
+ XSysAce_RegWrite16(BaseAddress + XSA_DBR_OFFSET, Data);
+
+ BufferBytes -= 2;
+ }
+
+ /*
+ * If a complete data buffer was not filled, fill it with pad bytes (zeros)
+ */
+ while (BufferBytes != 0) {
+ /*
+ * If at any point during this write an error occurs, exit early
+ */
+ if (XSysAce_mGetErrorReg(BaseAddress) != 0) {
+ return 0;
+ }
+
+ XSysAce_RegWrite16(BaseAddress + XSA_DBR_OFFSET, 0);
+ BufferBytes -= 2;
+ }
+
+ return DataBytes;
+}
--- /dev/null
+/* $Id: xsysace_l.h,v 1.1 2006/02/17 21:52:36 moleres Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xsysace_l.h
+*
+* Defines identifiers and low-level macros/functions for the XSysAce driver.
+* These identifiers include register offsets and bit masks. A high-level driver
+* interface is defined in xsysace.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 06/14/02 work in progress
+* 1.01a jvb 02/01/06 Added include of xparameters.h unless EXT_CONFIG is
+* defined at compile time (external configuration), in
+* which case it just defaults to 8-bit wide memory.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XSYSACE_L_H /* prevent circular inclusions */
+#define XSYSACE_L_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#ifdef EXT_CONFIG
+#define XPAR_XSYSACE_MEM_WIDTH 8
+#else
+#include "xparameters.h"
+#endif
+#include "xbasic_types.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+/*
+ * Constant used to align the register offsets to the proper address. This was
+ * used during development to handle both byte-addressable (alignment=1) and
+ * word addressable (alignment=4) registers. The #ifndef allows the user to
+ * modify this at compile-time.
+ */
+#ifndef XSA_ADDR_ALIGN
+#define XSA_ADDR_ALIGN 1
+#endif
+
+/** @name Register Offsets
+ * System ACE register offsets
+ * @{
+ */
+#define XSA_BMR_OFFSET (XSA_ADDR_ALIGN * 0) /**< Bus mode (BUSMODEREG) */
+#define XSA_SR_OFFSET (XSA_ADDR_ALIGN * 4) /**< Status (STATUSREG) */
+#define XSA_ER_OFFSET (XSA_ADDR_ALIGN * 8) /**< Error (ERRORREG) */
+#define XSA_CLR_OFFSET (XSA_ADDR_ALIGN * 12) /**< Config LBA (CFGLBAREG) */
+#define XSA_MLR_OFFSET (XSA_ADDR_ALIGN * 16) /**< MPU LBA (MPULBAREG) */
+#define XSA_SCCR_OFFSET (XSA_ADDR_ALIGN * 20) /**< Sector cnt (SECCNTCMDREG) */
+#define XSA_VR_OFFSET (XSA_ADDR_ALIGN * 22) /**< Version (VERSIONREG) */
+#define XSA_CR_OFFSET (XSA_ADDR_ALIGN * 24) /**< Control (CONTROLREG) */
+#define XSA_FSR_OFFSET (XSA_ADDR_ALIGN * 28) /**< FAT status (FATSTATREG) */
+#define XSA_DBR_OFFSET (XSA_ADDR_ALIGN * 64) /**< Data buffer (DATABUFREG) */
+/*@}*/
+
+/*
+ * Bus Mode Register masks
+ */
+#define XSA_BMR_16BIT_MASK 0x0101 /**< 16-bit access to ACE controller */
+
+
+/** @name Status Values
+ * Status Register masks
+ * @{
+ */
+#define XSA_SR_CFGLOCK_MASK 0x00000001 /**< Config port lock status */
+#define XSA_SR_MPULOCK_MASK 0x00000002 /**< MPU port lock status */
+#define XSA_SR_CFGERROR_MASK 0x00000004 /**< Config port error status */
+#define XSA_SR_CFCERROR_MASK 0x00000008 /**< CF error status */
+#define XSA_SR_CFDETECT_MASK 0x00000010 /**< CF detect flag */
+#define XSA_SR_DATABUFRDY_MASK 0x00000020 /**< Data buffer ready status */
+#define XSA_SR_DATABUFMODE_MASK 0x00000040 /**< Data buffer mode status */
+#define XSA_SR_CFGDONE_MASK 0x00000080 /**< Configuration done status */
+#define XSA_SR_RDYFORCMD_MASK 0x00000100 /**< Ready for CF command */
+#define XSA_SR_CFGMODE_MASK 0x00000200 /**< Configuration mode status */
+#define XSA_SR_CFGADDR_MASK 0x0000E000 /**< Configuration address */
+#define XSA_SR_CFBSY_MASK 0x00020000 /**< CF busy (BSY bit) */
+#define XSA_SR_CFRDY_MASK 0x00040000 /**< CF ready (RDY bit) */
+#define XSA_SR_CFDWF_MASK 0x00080000 /**< CF data write fault (DWF bit) */
+#define XSA_SR_CFDSC_MASK 0x00100000 /**< CF ready (DSC bit) */
+#define XSA_SR_CFDRQ_MASK 0x00200000 /**< CF data request (DRQ) */
+#define XSA_SR_CFCORR_MASK 0x00400000 /**< CF correctable error (CORR bit) */
+#define XSA_SR_CFERR_MASK 0x00800000 /**< CF error (ERR bit) */
+/*@}*/
+
+
+/** @name Error Values
+ * Error Register masks.
+ * @{
+ */
+#define XSA_ER_CARD_RESET 0x00000001 /**< CF card failed to reset */
+#define XSA_ER_CARD_READY 0x00000002 /**< CF card failed to ready */
+#define XSA_ER_CARD_READ 0x00000004 /**< CF read command failed */
+#define XSA_ER_CARD_WRITE 0x00000008 /**< CF write command failed */
+#define XSA_ER_SECTOR_READY 0x00000010 /**< CF sector failed to ready */
+#define XSA_ER_CFG_ADDR 0x00000020 /**< Cfg address is invalid */
+#define XSA_ER_CFG_FAIL 0x00000040 /**< Failed to configure a device */
+#define XSA_ER_CFG_READ 0x00000080 /**< Cfg read of CF failed */
+#define XSA_ER_CFG_INSTR 0x00000100 /**< Invalid instruction during cfg */
+#define XSA_ER_CFG_INIT 0x00000200 /**< CFGINIT pin error - did not
+ * go high within 500ms of start */
+#define XSA_ER_RESERVED 0x00000400 /**< reserved */
+#define XSA_ER_BAD_BLOCK 0x00000800 /**< CF bad block detected */
+#define XSA_ER_UNCORRECTABLE 0x00001000 /**< CF uncorrectable error */
+#define XSA_ER_SECTOR_ID 0x00002000 /**< CF sector ID not found */
+#define XSA_ER_ABORT 0x00004000 /**< CF command aborted */
+#define XSA_ER_GENERAL 0x00008000 /**< CF general error */
+/*@}*/
+
+
+/**
+ * Config LBA Register - address mask
+ */
+#define XSA_CLR_LBA_MASK 0x0FFFFFFF /* Logical Block Address mask */
+
+/**
+ * MPU LBA Register - address mask
+ */
+#define XSA_MLR_LBA_MASK 0x0FFFFFFF /* Logical Block Address mask */
+
+
+/** @name Sector Cound/Command Values
+ * Sector Count Command Register masks
+ * @{
+ */
+#define XSA_SCCR_COUNT_MASK 0x00FF /**< Sector count mask */
+#define XSA_SCCR_RESET_MASK 0x0100 /**< Reset CF card command */
+#define XSA_SCCR_IDENTIFY_MASK 0x0200 /**< Identify CF card command */
+#define XSA_SCCR_READDATA_MASK 0x0300 /**< Read CF card command */
+#define XSA_SCCR_WRITEDATA_MASK 0x0400 /**< Write CF card command */
+#define XSA_SCCR_ABORT_MASK 0x0600 /**< Abort CF command */
+#define XSA_SCCR_CMD_MASK 0x0700 /**< Command mask */
+/*@}*/
+
+
+/*
+ * Version Register masks
+ */
+#define XSA_VR_BUILD_MASK 0x00FF /* Revision/build number */
+#define XSA_VR_MINOR_MASK 0x0F00 /* Minor version number */
+#define XSA_VR_MAJOR_MASK 0xF000 /* Major version number */
+
+
+/** @name Control Values
+ * Control Register masks
+ * @{
+ */
+#define XSA_CR_FORCELOCK_MASK 0x00000001 /**< Force lock request */
+#define XSA_CR_LOCKREQ_MASK 0x00000002 /**< MPU lock request */
+#define XSA_CR_FORCECFGADDR_MASK 0x00000004 /**< Force CFG address */
+#define XSA_CR_FORCECFGMODE_MASK 0x00000008 /**< Force CFG mode */
+#define XSA_CR_CFGMODE_MASK 0x00000010 /**< CFG mode */
+#define XSA_CR_CFGSTART_MASK 0x00000020 /**< CFG start */
+#define XSA_CR_CFGSEL_MASK 0x00000040 /**< CFG select */
+#define XSA_CR_CFGRESET_MASK 0x00000080 /**< CFG reset */
+#define XSA_CR_DATARDYIRQ_MASK 0x00000100 /**< Enable data ready IRQ */
+#define XSA_CR_ERRORIRQ_MASK 0x00000200 /**< Enable error IRQ */
+#define XSA_CR_CFGDONEIRQ_MASK 0x00000400 /**< Enable CFG done IRQ */
+#define XSA_CR_RESETIRQ_MASK 0x00000800 /**< Reset IRQ line */
+#define XSA_CR_CFGPROG_MASK 0x00001000 /**< Inverted CFGPROG pin */
+#define XSA_CR_CFGADDR_MASK 0x0000E000 /**< Config address mask */
+#define XSA_CR_CFGADDR_SHIFT 13 /**< Config address shift */
+/*@}*/
+
+
+/** @name FAT Status
+ *
+ * FAT filesystem status masks. The first valid partition of the CF
+ * is a FAT partition.
+ * @{
+ */
+#define XSA_FAT_VALID_BOOT_REC 0x0001 /**< Valid master boot record */
+#define XSA_FAT_VALID_PART_REC 0x0002 /**< Valid partition boot record */
+#define XSA_FAT_12_BOOT_REC 0x0004 /**< FAT12 in master boot rec */
+#define XSA_FAT_12_PART_REC 0x0008 /**< FAT12 in parition boot rec */
+#define XSA_FAT_16_BOOT_REC 0x0010 /**< FAT16 in master boot rec */
+#define XSA_FAT_16_PART_REC 0x0020 /**< FAT16 in partition boot rec */
+#define XSA_FAT_12_CALC 0x0040 /**< Calculated FAT12 from clusters */
+#define XSA_FAT_16_CALC 0x0080 /**< Calculated FAT16 from clusters */
+/*@}*/
+
+
+#define XSA_DATA_BUFFER_SIZE 32 /**< Size of System ACE data buffer */
+#define XSA_CF_SECTOR_SIZE 512 /**< Number of bytes in a CF sector */
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*****************************************************************************
+*
+* Low-level driver macros and functions. The list below provides signatures
+* to help the user use the macros.
+*
+* u32 XSysAce_mGetControlReg(u32 BaseAddress)
+* void XSysAce_mSetControlReg(u32 BaseAddress, u32 Data)
+* void XSysAce_mOrControlReg(u32 BaseAddress, u32 Data)
+* void XSysAce_mAndControlReg(u32 BaseAddress, u32 Data)
+* u32 XSysAce_mGetErrorReg(u32 BaseAddress)
+* u32 XSysAce_mGetStatusReg(u32 BaseAddress)
+*
+* void XSysAce_mSetCfgAddr(u32 BaseAddress, unsigned int Address)
+* void XSysAce_mWaitForLock(u32 BaseAddress)
+* void XSysAce_mEnableIntr(u32 BaseAddress, u32 Mask)
+* void XSysAce_mDisableIntr(u32 BaseAddress, u32 Mask)
+*
+* u32 XSysAce_mIsReadyForCmd(u32 BaseAddress)
+* u32 XSysAce_mIsCfgDone(u32 BaseAddress)
+* u32 XSysAce_mIsMpuLocked(u32 BaseAddress)
+* u32 XSysAce_mIsIntrEnabled(u32 BaseAddress)
+*
+*****************************************************************************/
+
+
+/*****************************************************************************/
+/**
+*
+* Get the contents of the control register.
+*
+* @param BaseAddress is the base address of the device.
+*
+* @return The 32-bit value of the control register.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mGetControlReg(BaseAddress) \
+ XSysAce_RegRead32((BaseAddress) + XSA_CR_OFFSET)
+
+
+/*****************************************************************************/
+/**
+*
+* Set the contents of the control register.
+*
+* @param BaseAddress is the base address of the device.
+* @param Data is the 32-bit value to write to the register.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mSetControlReg(BaseAddress, Data) \
+ XSysAce_RegWrite32((BaseAddress) + XSA_CR_OFFSET, (Data))
+
+
+/*****************************************************************************/
+/**
+*
+* Set the contents of the control register to the value specified OR'ed with
+* its current contents.
+*
+* @param BaseAddress is the base address of the device.
+* @param Data is the 32-bit value to OR with the register.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mOrControlReg(BaseAddress, Data) \
+ XSysAce_mSetControlReg((BaseAddress), \
+ XSysAce_mGetControlReg(BaseAddress) | (Data))
+
+
+/*****************************************************************************/
+/**
+*
+* Set the contents of the control register to the value specified AND'ed with
+* its current contents.
+*
+* @param BaseAddress is the base address of the device.
+* @param Data is the 32-bit value to AND with the register.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mAndControlReg(BaseAddress, Data) \
+ XSysAce_mSetControlReg((BaseAddress), \
+ XSysAce_mGetControlReg(BaseAddress) & (Data))
+
+
+/*****************************************************************************/
+/**
+*
+* Get the contents of the error register.
+*
+* @param BaseAddress is the base address of the device.
+*
+* @return The 32-bit value of the register.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mGetErrorReg(BaseAddress) \
+ XSysAce_RegRead32((BaseAddress) + XSA_ER_OFFSET)
+
+
+/*****************************************************************************/
+/**
+*
+* Get the contents of the status register.
+*
+* @param BaseAddress is the base address of the device.
+*
+* @return The 32-bit value of the register.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mGetStatusReg(BaseAddress) \
+ XSysAce_RegRead32((BaseAddress) + XSA_SR_OFFSET)
+
+
+/*****************************************************************************/
+/**
+*
+* Set the configuration address, or file, of the CompactFlash. This address
+* indicates which .ace bitstream to use to configure the target FPGA chain.
+*
+* @param BaseAddress is the base address of the device.
+* @param Address ranges from 0 to 7 and represents the eight possible .ace
+* bitstreams that can reside on the CompactFlash.
+*
+* @return None.
+*
+* @note Used cryptic var names to avoid conflict with caller's var names.
+*
+******************************************************************************/
+#define XSysAce_mSetCfgAddr(BaseAddress, Address) \
+{ \
+ u32 A66rMask = ((Address) << XSA_CR_CFGADDR_SHIFT) & XSA_CR_CFGADDR_MASK; \
+ u32 C0ntr0l = XSysAce_mGetControlReg(BaseAddress); \
+ C0ntr0l &= ~XSA_CR_CFGADDR_MASK; /* clear current address */ \
+ C0ntr0l |= (A66rMask | XSA_CR_FORCECFGADDR_MASK); \
+ XSysAce_mSetControlReg((BaseAddress), C0ntr0l); \
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Request then wait for the MPU lock. This is not a forced lock, so we must
+* contend with the configuration controller.
+*
+* @param BaseAddress is the base address of the device.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mWaitForLock(BaseAddress) \
+{ \
+ XSysAce_mOrControlReg((BaseAddress), XSA_CR_LOCKREQ_MASK); \
+ while ((XSysAce_mGetStatusReg(BaseAddress) & XSA_SR_MPULOCK_MASK) == 0); \
+}
+
+/*****************************************************************************/
+/**
+*
+* Enable ACE controller interrupts.
+*
+* @param BaseAddress is the base address of the device.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mEnableIntr(BaseAddress, Mask) \
+ XSysAce_mOrControlReg((BaseAddress), (Mask));
+
+
+/*****************************************************************************/
+/**
+*
+* Disable ACE controller interrupts.
+*
+* @param BaseAddress is the base address of the device.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mDisableIntr(BaseAddress, Mask) \
+ XSysAce_mAndControlReg((BaseAddress), ~(Mask));
+
+
+/*****************************************************************************/
+/**
+*
+* Is the CompactFlash ready for a command?
+*
+* @param BaseAddress is the base address of the device.
+*
+* @return TRUE if it is ready, FALSE otherwise.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mIsReadyForCmd(BaseAddress) \
+ (XSysAce_mGetStatusReg(BaseAddress) & XSA_SR_RDYFORCMD_MASK)
+
+
+/*****************************************************************************/
+/**
+*
+* Is the ACE controller locked for MPU access?
+*
+* @param BaseAddress is the base address of the device.
+*
+* @return TRUE if it is locked, FALSE otherwise.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mIsMpuLocked(BaseAddress) \
+ (XSysAce_mGetStatusReg(BaseAddress) & XSA_SR_MPULOCK_MASK)
+
+
+/*****************************************************************************/
+/**
+*
+* Is the CompactFlash configuration of the target FPGA chain complete?
+*
+* @param BaseAddress is the base address of the device.
+*
+* @return TRUE if it is ready, FALSE otherwise.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mIsCfgDone(BaseAddress) \
+ (XSysAce_mGetStatusReg(BaseAddress) & XSA_SR_CFGDONE_MASK)
+
+
+/*****************************************************************************/
+/**
+*
+* Have interrupts been enabled by the user? We look for the interrupt reset
+* bit to be clear (meaning interrupts are armed, even though none may be
+* individually enabled).
+*
+* @param BaseAddress is the base address of the device.
+*
+* @return TRUE if it is enabled, FALSE otherwise.
+*
+* @note None.
+*
+******************************************************************************/
+#define XSysAce_mIsIntrEnabled(BaseAddress) \
+ ((XSysAce_mGetControlReg(BaseAddress) & XSA_CR_RESETIRQ_MASK) == 0)
+
+
+/************************** Function Prototypes ******************************/
+
+int XSysAce_ReadSector(u32 BaseAddress, u32 SectorId, u8 *BufferPtr);
+int XSysAce_WriteSector(u32 BaseAddress, u32 SectorId, u8 *BufferPtr);
+
+/*
+ * Utility functions to read and write registers and data buffer
+ */
+u32 XSysAce_RegRead32(u32 Address);
+u16 XSysAce_RegRead16(u32 Address);
+void XSysAce_RegWrite32(u32 Address, u32 Data);
+void XSysAce_RegWrite16(u32 Address, u16 Data);
+
+int XSysAce_ReadDataBuffer(u32 BaseAddress, u8 *BufferPtr, int NumBytes);
+int XSysAce_WriteDataBuffer(u32 BaseAddress, u8 *BufferPtr, int NumBytes);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* end of protection macro */
--- /dev/null
+/*
+ * drivers/block/xilinx_sysace/xsysace_linux.c
+ *
+ * Xilinx System ACE xsysace component to interface System ACE to Linux
+ *
+ * Authors: Dmitry Chigirev <chigirev@ru.mvista.com>
+ * Sergey Podstavin <spodstavin@ru.mvista.com>
+ *
+ * 2002-2005 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+/*
+ * Through System ACE, the processor can access the CompactFlash and the
+ * JTAG chain. In addition, the System ACE controls system reset and
+ * which configuration will be loaded into the JTAG chain at that time.
+ * This driver provides two different interfaces. The first is handling
+ * reset by tying into the system's reset code as well as providing a
+ * /proc interface to read and write which configuration should be used
+ * when the system is reset. The second is to expose a block interface
+ * to the CompactFlash.
+ *
+ * This driver is a bit unusual in that it is composed of two logical
+ * parts where one part is the OS independent code and the other part is
+ * the OS dependent code. Xilinx provides their drivers split in this
+ * fashion. This file represents the Linux OS dependent part known as
+ * the Linux xsysace. The other files in this directory are the OS
+ * independent files as provided by Xilinx with no changes made to them.
+ * The names exported by those files begin with XSysAce_. All functions
+ * in this file that are called by Linux have names that begin with
+ * xsysace_. Any other functions are static helper functions.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/hdreg.h>
+#include <linux/slab.h>
+#include <linux/blkpg.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/xilinx_devices.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/machdep.h>
+#include "xbasic_types.h"
+#include "xsysace.h"
+
+static DEFINE_SPINLOCK(xsysace_lock);
+
+/* Use xsa_major to support non-devfs configuration */
+static int xsa_major = 125;
+
+#define MAJOR_NAME "xsa"
+#define DEVICE_NAME "System ACE"
+
+static u32 xsa_phys_addr; /* Saved physical base address */
+static unsigned long xsa_remap_size;
+static int xsa_irq;
+
+static void (*old_restart) (char *cmd) = NULL; /* old ppc_md.restart */
+
+static unsigned char heads;
+static unsigned char sectors;
+static unsigned short cylinders;
+
+struct gendisk *xsa_gendisk;
+
+static struct request *xsysace_req; /* current request */
+static struct request_queue *xsysace_queue; /* current queue */
+
+static void do_read_write(struct work_struct *work);
+static DECLARE_WORK(xsysace_read_write_work, do_read_write);
+
+/*
+ * The underlying OS independent code needs space as well. A pointer to
+ * the following XSysAce structure will be passed to any XSysAce_
+ * function that requires it.
+ */
+static XSysAce SysAce;
+
+static void xsa_complete_request(int get_uptodate);
+
+/*req_fnc will be XSysAce_SectorRead or XSysAce_SectorWrite. */
+static int (*req_fnc) (XSysAce * InstancePtr, u32 StartSector,
+ int NumSectors, u8 *BufferPtr);
+
+/* req_str will be used for errors and will be either "reading" or "writing" */
+static char *req_str;
+
+/*******************************************************************************
+ * This configuration stuff should become unnecessary after EDK version 8.x is
+ * released.
+ ******************************************************************************/
+
+static DECLARE_MUTEX(cfg_sem);
+
+/*
+ * The following block of code implements the reset handling. The first
+ * part implements /proc/xsysace/cfgaddr. When read, it will yield a
+ * number from 0 to 7 that represents which configuration will be used
+ * next (the configuration address). Writing a number to it will change
+ * the configuration address. After that is the function that is hooked
+ * into the system's reset handler.
+ */
+#ifndef CONFIG_PROC_FS
+#define proc_init() 0
+#define proc_cleanup()
+#else
+#define CFGADDR_NAME "cfgaddr"
+static struct proc_dir_entry *xsysace_dir = NULL;
+static struct proc_dir_entry *cfgaddr_file = NULL;
+
+static unsigned int XSysAce_GetCfgAddr(XSysAce * InstancePtr)
+{
+ u32 Status;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ Status = XSysAce_mGetControlReg(InstancePtr->BaseAddress);
+ if (!(Status & XSA_CR_FORCECFGADDR_MASK))
+ Status = XSysAce_mGetStatusReg(InstancePtr->BaseAddress);
+
+ return (unsigned int) ((Status & XSA_SR_CFGADDR_MASK) >>
+ XSA_CR_CFGADDR_SHIFT);
+}
+
+static int cfgaddr_read(char *page, char **start,
+ off_t off, int count, int *eof, void *data)
+{
+ unsigned int cfgaddr;
+
+ /* Make sure we have room for a digit (0-7), a newline and a NULL */
+ if (count < 3)
+ return -EINVAL;
+
+ cfgaddr = XSysAce_GetCfgAddr(&SysAce);
+ count = sprintf(page + off, "%d\n", cfgaddr);
+ *eof = 1;
+ return count;
+}
+
+static int cfgaddr_write(struct file *file,
+ const char *buffer, unsigned long count, void *data)
+{
+ char val[2];
+
+ if (count < 1 || count > 2)
+ return -EINVAL;
+
+ if (copy_from_user(val, buffer, count)) {
+ return -EFAULT;
+ }
+
+ if (val[0] < '0' || val[0] > '7' || (count == 2 && !(val[1] == '\n' ||
+ val[1] == '\0'))) {
+ return -EINVAL;
+ }
+
+ XSysAce_SetCfgAddr(&SysAce, val[0] - '0');
+ return count;
+}
+
+static int proc_init(void)
+{
+ xsysace_dir = proc_mkdir(MAJOR_NAME, NULL);
+ if (!xsysace_dir)
+ return -ENOMEM;
+ xsysace_dir->owner = THIS_MODULE;
+
+ cfgaddr_file = create_proc_entry(CFGADDR_NAME, 0644, xsysace_dir);
+ if (!cfgaddr_file) {
+ remove_proc_entry(MAJOR_NAME, NULL);
+ return -ENOMEM;
+ }
+ cfgaddr_file->read_proc = cfgaddr_read;
+ cfgaddr_file->write_proc = cfgaddr_write;
+ cfgaddr_file->owner = THIS_MODULE;
+ return 0;
+}
+
+static void proc_cleanup(void)
+{
+ if (cfgaddr_file)
+ remove_proc_entry(CFGADDR_NAME, xsysace_dir);
+ if (xsysace_dir)
+ remove_proc_entry(MAJOR_NAME, NULL);
+}
+#endif /* CONFIG_PROC_FS */
+
+static void xsysace_restart(char *cmd)
+{
+ XSysAce_ResetCfg(&SysAce);
+
+ /* Wait for reset. */
+ for (;;);
+}
+
+/* Simple function that hands an interrupt to the Xilinx code. */
+static irqreturn_t xsysace_interrupt(int irq, void *dev_id)
+{
+ XSysAce_InterruptHandler(&SysAce);
+ return IRQ_HANDLED;
+}
+
+void xsysace_end_request(struct request *req, int uptodate)
+{
+ if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
+ blkdev_dequeue_request(req);
+ end_that_request_last(req, 1);
+ }
+}
+
+static void xsa_complete_request(int uptodate)
+{
+ XSysAce_Unlock(&SysAce);
+ spin_lock_irq(&xsysace_lock);
+ xsysace_end_request(xsysace_req, uptodate);
+ xsysace_req = 0;
+ spin_unlock_irq(&xsysace_lock);
+ schedule_work(&xsysace_read_write_work);
+}
+
+static void do_read_write(struct work_struct *work)
+{
+ int stat;
+ struct request *req;
+ request_queue_t *q;
+
+ q = xsysace_queue;
+ spin_lock_irq(&xsysace_lock);
+
+ if (blk_queue_plugged(q)) {
+ printk(KERN_ERR "XSysAce: Queue is plugged\n");
+ spin_unlock_irq(&xsysace_lock);
+ return;
+ }
+
+ while ((req = elv_next_request(q)) != NULL) {
+ if (!blk_fs_request(req)) {
+ printk(KERN_NOTICE "Skip non-fs request\n");
+ xsysace_end_request(req, 0);
+ continue;
+ }
+ if (rq_data_dir(req) == WRITE) {
+ req_str = "writing";
+ req_fnc = XSysAce_SectorWrite;
+ }
+ else {
+ req_str = "reading";
+ req_fnc = XSysAce_SectorRead;
+ }
+ xsysace_req = req;
+ break;
+ }
+ spin_unlock_irq(&xsysace_lock);
+
+ if (!req)
+ return;
+
+ /* We have a request. */
+ while ((stat = XSysAce_Lock(&SysAce, 0)) == XST_DEVICE_BUSY) {
+ msleep_interruptible(1);
+ }
+ if (stat != XST_SUCCESS) {
+ printk(KERN_ERR "%s: Error %d when locking.\n",
+ DEVICE_NAME, stat);
+ xsa_complete_request(0); /* Request failed. */
+ }
+
+ while ((stat = req_fnc(&SysAce, xsysace_req->sector,
+ xsysace_req->current_nr_sectors,
+ xsysace_req->buffer)) == XST_DEVICE_BUSY) {
+ msleep_interruptible(1);
+ }
+ /*
+ * If the stat is XST_SUCCESS, we have successfully
+ * gotten the request started on the hardware. The
+ * completion (or error) interrupt will unlock the
+ * CompactFlash and complete the request, so we don't
+ * need to do anything except just loop around and wait
+ * for the next request. If the status is not
+ * XST_SUCCESS, we need to finish the request with an
+ * error before waiting for the next request.
+ */
+ if (stat != XST_SUCCESS) {
+ printk(KERN_ERR "%s: Error %d when %s sector %lu.\n",
+ DEVICE_NAME, stat, req_str, xsysace_req->sector);
+ xsa_complete_request(0); /* Request failed. */
+ }
+}
+
+static void xsysace_do_request(request_queue_t * q)
+{
+ /* We're already handling a request. Don't accept another. */
+ if (xsysace_req)
+ return;
+ schedule_work(&xsysace_read_write_work);
+}
+
+/* Called by the Xilinx interrupt handler to give us an event. */
+static void EventHandler(void *CallbackRef, int Event)
+{
+ u32 ErrorMask;
+
+ switch (Event) {
+ case XSA_EVENT_DATA_DONE:
+ xsa_complete_request(1); /* The request succeeded. */
+ break;
+
+ case XSA_EVENT_ERROR:
+ ErrorMask = XSysAce_GetErrors(&SysAce);
+ /* Print out what went wrong. */
+ if (ErrorMask & XSA_ER_CARD_RESET)
+ printk(KERN_ERR "CompactFlash failed to reset\n");
+ if (ErrorMask & XSA_ER_CARD_READY)
+ printk(KERN_ERR "CompactFlash failed to ready\n");
+ if (ErrorMask & XSA_ER_CARD_READ)
+ printk(KERN_ERR "CompactFlash read command failed\n");
+ if (ErrorMask & XSA_ER_CARD_WRITE)
+ printk(KERN_ERR "CompactFlash write command failed\n");
+ if (ErrorMask & XSA_ER_SECTOR_READY)
+ printk(KERN_ERR
+ "CompactFlash sector failed to ready\n");
+ if (ErrorMask & XSA_ER_BAD_BLOCK)
+ printk(KERN_ERR "CompactFlash bad block detected\n");
+ if (ErrorMask & XSA_ER_UNCORRECTABLE)
+ printk(KERN_ERR "CompactFlash uncorrectable error\n");
+ if (ErrorMask & XSA_ER_SECTOR_ID)
+ printk(KERN_ERR "CompactFlash sector ID not found\n");
+ if (ErrorMask & XSA_ER_ABORT)
+ printk(KERN_ERR "CompactFlash command aborted\n");
+ if (ErrorMask & XSA_ER_GENERAL)
+ printk(KERN_ERR "CompactFlash general error\n");
+
+ if (ErrorMask & XSA_ER_CFG_READ)
+ printk(KERN_ERR
+ "JTAG controller couldn't read configuration from the CompactFlash\n");
+ if (ErrorMask & XSA_ER_CFG_ADDR)
+ printk(KERN_ERR
+ "Invalid address given to JTAG controller\n");
+ if (ErrorMask & XSA_ER_CFG_FAIL)
+ printk(KERN_ERR
+ "JTAG controller failed to configure a device\n");
+ if (ErrorMask & XSA_ER_CFG_INSTR)
+ printk(KERN_ERR
+ "Invalid instruction during JTAG configuration\n");
+ if (ErrorMask & XSA_ER_CFG_INIT)
+ printk(KERN_ERR "JTAG CFGINIT pin error\n");
+
+ /* Check for errors that should reset the CompactFlash */
+ if (ErrorMask & (XSA_ER_CARD_RESET |
+ XSA_ER_CARD_READY |
+ XSA_ER_CARD_READ |
+ XSA_ER_CARD_WRITE |
+ XSA_ER_SECTOR_READY |
+ XSA_ER_BAD_BLOCK |
+ XSA_ER_UNCORRECTABLE |
+ XSA_ER_SECTOR_ID | XSA_ER_ABORT |
+ XSA_ER_GENERAL)) {
+ if (XSysAce_ResetCF(&SysAce) != XST_SUCCESS)
+ printk(KERN_ERR
+ "Could not reset CompactFlash\n");
+ xsa_complete_request(0); /* The request failed. */
+ }
+ break;
+ case XSA_EVENT_CFG_DONE:
+ printk(KERN_WARNING "XSA_EVENT_CFG_DONE not handled yet.\n");
+ break;
+ default:
+ printk(KERN_ERR "%s: unrecognized event %d\n",
+ DEVICE_NAME, Event);
+ break;
+ }
+}
+
+static int
+xsysace_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct hd_geometry __user *geo = (struct hd_geometry __user *) arg;
+ struct hd_geometry g;
+
+ switch (cmd) {
+ case HDIO_GETGEO:
+ {
+ g.heads = heads;
+ g.sectors = sectors;
+ g.cylinders = cylinders;
+ g.start = 0;
+ return copy_to_user(geo, &g, sizeof(g)) ? -EFAULT : 0;
+ }
+ default:
+ return -ENOTTY;
+ }
+}
+
+static struct block_device_operations xsysace_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = xsysace_ioctl,
+};
+
+/******************************
+ * The platform device driver *
+ ******************************/
+
+/*
+ * Currently the driver supports just one System ACE device.
+ * Most of the code below could be easily extended to handle
+ * several devices except for proc_init()/proc_cleanup() and
+ * ppc_md.restart handling.
+ */
+
+#define DRIVER_NAME "xsysace"
+
+static int xsysace_probe(struct device *dev)
+{
+ XSysAce_Config xsysace_cfg;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *irq_res, *regs_res;
+ unsigned long remap_size;
+ int stat;
+ long size;
+ XSysAce_CFParameters ident;
+ int retval;
+
+ if (!dev)
+ return -EINVAL;
+
+ /* Find irq number, map the control registers in */
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs_res || !irq_res) {
+ printk(KERN_ERR "%s #%d: IO resource(s) not found\n",
+ DRIVER_NAME, pdev->id);
+ retval = -EFAULT;
+ goto failed1;
+ }
+ xsa_irq = irq_res->start;
+ xsa_phys_addr = regs_res->start;
+ remap_size = regs_res->end - regs_res->start + 1;
+ if (!request_mem_region(regs_res->start, remap_size, DRIVER_NAME)) {
+ printk(KERN_ERR
+ "%s #%d: Couldn't lock memory region at 0x%08X\n",
+ DRIVER_NAME, pdev->id, regs_res->start);
+ retval = -EBUSY;
+ goto failed1;
+ }
+
+ /* Fill in cfg data and add them to the list */
+ xsa_remap_size = remap_size;
+ xsysace_cfg.DeviceId = pdev->id;
+ xsysace_cfg.BaseAddress = (u32) ioremap(regs_res->start, remap_size);
+ if (xsysace_cfg.BaseAddress == 0) {
+ printk(KERN_ERR
+ "%s #%d: Couldn't ioremap memory at 0x%08X\n",
+ DRIVER_NAME, pdev->id, regs_res->start);
+ retval = -EFAULT;
+ goto failed2;
+ }
+
+ /* Tell the Xilinx code to bring this SystemACE interface up. */
+ down(&cfg_sem);
+ if (XSysAce_CfgInitialize
+ (&SysAce, &xsysace_cfg, xsysace_cfg.BaseAddress) != XST_SUCCESS) {
+ up(&cfg_sem);
+ printk(KERN_ERR
+ "%s #%d: Could not initialize device.\n",
+ DRIVER_NAME, pdev->id);
+ retval = -ENODEV;
+ goto failed3;
+ }
+ up(&cfg_sem);
+
+ retval = request_irq(xsa_irq, xsysace_interrupt, 0, DEVICE_NAME, NULL);
+ if (retval) {
+ printk(KERN_ERR
+ "%s #%d: Couldn't allocate interrupt %d.\n",
+ DRIVER_NAME, pdev->id, xsa_irq);
+ goto failed3;
+ }
+
+ XSysAce_SetEventHandler(&SysAce, EventHandler, (void *) NULL);
+ XSysAce_EnableInterrupt(&SysAce);
+
+ /* Time to identify the drive. */
+ while (XSysAce_Lock(&SysAce, 0) == XST_DEVICE_BUSY);
+ while ((stat = XSysAce_IdentifyCF(&SysAce, &ident)) == XST_DEVICE_BUSY);
+ XSysAce_Unlock(&SysAce);
+ if (stat != XST_SUCCESS) {
+ printk(KERN_ERR "%s: Could not send identify command.\n",
+ DEVICE_NAME);
+ retval = -ENODEV;
+ goto failed4;
+ }
+
+ /* Fill in what we learned. */
+ heads = ident.NumHeads;
+ sectors = ident.NumSectorsPerTrack;
+ cylinders = ident.NumCylinders;
+ size = (long) cylinders *(long) heads *(long) sectors;
+
+ xsysace_queue = blk_init_queue(xsysace_do_request, &xsysace_lock);
+ if (!xsysace_queue) {
+ retval = -ENODEV;
+ goto failed4;
+ }
+
+ if (register_blkdev(xsa_major, MAJOR_NAME)) {
+ retval = -EBUSY;
+ goto failed5;
+ }
+
+ xsa_gendisk = alloc_disk(16);
+ if (!xsa_gendisk) {
+ retval = -ENODEV;
+ goto failed6;
+ }
+
+ strcpy(xsa_gendisk->disk_name, MAJOR_NAME);
+ xsa_gendisk->fops = &xsysace_fops;
+ xsa_gendisk->major = xsa_major;
+ xsa_gendisk->first_minor = 0;
+ xsa_gendisk->minors = 16;
+ xsa_gendisk->queue = xsysace_queue;
+
+ set_capacity(xsa_gendisk, size);
+
+ printk(KERN_INFO
+ "%s at 0x%08X mapped to 0x%08X, irq=%d, %ldKB\n",
+ DEVICE_NAME, xsa_phys_addr, SysAce.BaseAddress, xsa_irq,
+ size / 2);
+
+ /* Hook our reset function into system's restart code. */
+ if (old_restart == NULL) {
+ old_restart = ppc_md.restart;
+ ppc_md.restart = xsysace_restart;
+ }
+
+ if (proc_init())
+ printk(KERN_WARNING "%s: could not register /proc interface.\n",
+ DEVICE_NAME);
+
+ add_disk(xsa_gendisk);
+
+ return 0; /* success */
+
+ failed6:
+ unregister_blkdev(xsa_major, MAJOR_NAME);
+
+ failed5:
+ blk_cleanup_queue(xsysace_queue);
+
+ failed4:
+ XSysAce_DisableInterrupt(&SysAce);
+ free_irq(xsa_irq, NULL);
+
+ failed3:
+ iounmap((void *) (xsysace_cfg.BaseAddress));
+
+ failed2:
+ release_mem_region(regs_res->start, remap_size);
+
+ failed1:
+ return retval;
+}
+
+static int xsysace_remove(struct device *dev)
+{
+ if (!dev)
+ return -EINVAL;
+
+ proc_cleanup();
+
+ if (old_restart)
+ ppc_md.restart = old_restart;
+
+ unregister_blkdev(xsa_major, MAJOR_NAME);
+ del_gendisk(xsa_gendisk);
+ blk_cleanup_queue(xsysace_queue);
+ XSysAce_DisableInterrupt(&SysAce);
+ free_irq(xsa_irq, NULL);
+ iounmap((void *) (SysAce.BaseAddress));
+ release_mem_region(xsa_phys_addr, xsa_remap_size);
+
+ return 0; /* success */
+}
+
+static struct device_driver xsysace_driver = {
+ .name = DRIVER_NAME,
+ .bus = &platform_bus_type,
+ .probe = xsysace_probe,
+ .remove = xsysace_remove
+};
+
+static int __init xsysace_init(void)
+{
+ return driver_register(&xsysace_driver);
+}
+
+static void __exit xsysace_cleanup(void)
+{
+ driver_unregister(&xsysace_driver);
+}
+
+module_init(xsysace_init);
+module_exit(xsysace_cleanup);
+
+MODULE_AUTHOR
+ ("Dmitry Chigirev <chigirev@ru.mvista.com>, Sergey Podstavin <spodstavin@ru.mvista.com>");
+MODULE_DESCRIPTION("Xilinx System ACE block driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/* $Id: xsysace_selftest.c,v 1.1 2006/02/17 21:52:36 moleres Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xsysace_selftest.c
+*
+* Contains diagnostic functions for the System ACE device and driver. This
+* includes a self-test to make sure communication to the device is possible
+* and the ability to retrieve the ACE controller version.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 06/17/02 work in progress
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xsysace.h"
+#include "xsysace_l.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+*
+* A self-test that simply proves communication to the ACE controller from the
+* device driver by obtaining an MPU lock, verifying it, then releasing it.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+*
+* @return
+*
+* XST_SUCCESS if self-test passes, or XST_FAILURE if an error occurs.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XSysAce_SelfTest(XSysAce * InstancePtr)
+{
+ int Result;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Grab a lock (expect immediate success)
+ */
+ Result = XSysAce_Lock(InstancePtr, TRUE);
+ if (Result != XST_SUCCESS) {
+ return Result;
+ }
+
+ /*
+ * Verify the lock was retrieved
+ */
+ if (!XSysAce_mIsMpuLocked(InstancePtr->BaseAddress)) {
+ return XST_FAILURE;
+ }
+
+ /*
+ * Release the lock
+ */
+ XSysAce_Unlock(InstancePtr);
+
+ /*
+ * Verify the lock was released
+ */
+ if (XSysAce_mIsMpuLocked(InstancePtr->BaseAddress)) {
+ return XST_FAILURE;
+ }
+
+ /*
+ * If there are currently any errors on the device, fail self-test
+ */
+ if (XSysAce_mGetErrorReg(InstancePtr->BaseAddress) != 0) {
+ return XST_FAILURE;
+ }
+
+ return XST_SUCCESS;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Retrieve the version of the System ACE device.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+*
+* @return
+*
+* A 16-bit version where the 4 most significant bits are the major version
+* number, the next four bits are the minor version number, and the least
+* significant 8 bits are the revision or build number.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u16 XSysAce_GetVersion(XSysAce * InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ return XSysAce_RegRead16(InstancePtr->BaseAddress + XSA_VR_OFFSET);
+}
--- /dev/null
+/* $Id: xsysace_sinit.c,v 1.1 2006/02/17 21:52:36 moleres Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xsysace_sinit.c
+
+* The implementation of the XSysAce component's static initialzation
+* functionality.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.01a jvb 10/13/05 First release
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xstatus.h"
+#include "xparameters.h"
+#include "xsysace.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+/*****************************************************************************/
+/**
+*
+* Looks up the device configuration based on the unique device ID. The table
+* XSysAce_ConfigTable contains the configuration info for each device in the
+* system.
+*
+* @param DeviceId is the unique device ID to look for.
+*
+* @return
+*
+* A pointer to the configuration data for the device, or NULL if no match is
+* found.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+XSysAce_Config *XSysAce_LookupConfig(u16 DeviceId)
+{
+ extern XSysAce_Config XSysAce_ConfigTable[];
+ XSysAce_Config *CfgPtr = NULL;
+ int i;
+
+ for (i = 0; i < XPAR_XSYSACE_NUM_INSTANCES; i++) {
+ if (XSysAce_ConfigTable[i].DeviceId == DeviceId) {
+ CfgPtr = &XSysAce_ConfigTable[i];
+ break;
+ }
+ }
+
+ return CfgPtr;
+}
+
+/*****************************************************************************/
+/**
+*
+* Initialize a specific XSysAce instance. The configuration information for
+* the given device ID is found and the driver instance data is initialized
+* appropriately.
+*
+* @param InstancePtr is a pointer to the XSysAce instance to be worked on.
+* @param DeviceId is the unique id of the device controlled by this XSysAce
+* instance.
+*
+* @return
+*
+* XST_SUCCESS if successful, or XST_DEVICE_NOT_FOUND if the device was not
+* found in the configuration table in xsysace_g.c.
+*
+* @note
+*
+* We do not want to reset the configuration controller here since this could
+* cause a reconfiguration of the JTAG target chain, depending on how the
+* CFGMODEPIN of the device is wired.
+*
+******************************************************************************/
+int XSysAce_Initialize(XSysAce * InstancePtr, u16 DeviceId)
+{
+ XSysAce_Config *ConfigPtr;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ InstancePtr->IsReady = 0;
+
+ /*
+ * Lookup configuration data in the device configuration table.
+ * Use this configuration info down below when initializing this component.
+ */
+ ConfigPtr = XSysAce_LookupConfig(DeviceId);
+
+ if (ConfigPtr == (XSysAce_Config *) NULL) {
+ return XST_DEVICE_NOT_FOUND;
+ }
+
+ return XSysAce_CfgInitialize(InstancePtr, ConfigPtr,
+ ConfigPtr->BaseAddress);
+}
To compile this driver as a module, choose M here: the
module will be called dtlk.
+config XILINX_GPIO
+ tristate "Xilinx OPB GPIO Support"
+ depends on XILINX_DRIVERS
+ select XILINX_EDK
+ help
+ This option enables support for Xilinx GPIO.
+
config XILINX_HWICAP
tristate "Xilinx HWICAP Support"
- depends on XILINX_VIRTEX
+ depends on XILINX_DRIVERS
help
This option enables support for Xilinx Internal Configuration
Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex
obj-$(CONFIG_SGI_DS1286) += ds1286.o
obj-$(CONFIG_SGI_IP27_RTC) += ip27-rtc.o
obj-$(CONFIG_DS1302) += ds1302.o
+obj-$(CONFIG_XILINX_GPIO) += xilinx_gpio/
obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
ifeq ($(CONFIG_GENERIC_NVRAM),y)
obj-$(CONFIG_NVRAM) += generic_nvram.o
--- /dev/null
+#
+# Makefile for the Xilinx OPB GPIO driver
+#
+
+EXTRA_CFLAGS += -I$(TOPDIR)/drivers/xilinx_common
+
+obj-$(CONFIG_XILINX_GPIO) := xilinx_gpio.o
+
+# The Linux adapter for the Xilinx driver code.
+xilinx_gpio-objs := adapter.o
+
+# The Xilinx OS independent code.
+xilinx_gpio-objs += xgpio.o
--- /dev/null
+/*
+ * adapter.c
+ *
+ * Xilinx GPIO Adapter component to interface GPIO component to Linux
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * 2002-2006 (c)MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+/*
+ * This driver is a bit unusual in that it is composed of two logical
+ * parts where one part is the OS independent code and the other part is
+ * the OS dependent code. Xilinx provides their drivers split in this
+ * fashion. This file represents the Linux OS dependent part known as
+ * the Linux adapter. The other files in this directory are the OS
+ * independent files as provided by Xilinx with no changes made to them.
+ * The names exported by those files begin with XGpio_. All functions
+ * in this file that are called by Linux have names that begin with
+ * xgpio_. Any other functions are static helper functions.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/xilinx_devices.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include "xgpio.h"
+#include "xgpio_ioctl.h"
+
+#include <asm/irq.h>
+#include <linux/interrupt.h>
+
+#define BUFSIZE 200
+#define MIN(x,y) (x < y ? x : y)
+
+
+struct xgpio_instance {
+ struct list_head link;
+ unsigned long base_phys; /* GPIO base address - physical */
+ unsigned long remap_size;
+ u32 device_id;
+ wait_queue_head_t wait;
+ unsigned int head, tail, count;
+ __u64 buf[BUFSIZE]; /* 32xChan1, 32xChan2 */
+ struct miscdevice *miscdev;
+ /*
+ * The underlying OS independent code needs space as well. A
+ * pointer to the following XGpio structure will be passed to
+ * any XGpio_ function that requires it. However, we try to treat the
+ * data as an opaque object in this file (meaning that we never
+ * reference any of the fields inside of the structure).
+ */
+ XGpio gpio;
+};
+
+/* SAATODO: This function will be moved into the Xilinx code. */
+/****************************************************************************/
+/**
+* Get the input/output direction of all discrete signals.
+*
+* @param InstancePtr is a pointer to an XGpio instance to be worked on.
+*
+* @return Current copy of the tristate (direction) register.
+*
+* @note
+*
+* None
+*
+*****************************************************************************/
+u32 XGpio_GetDataDirection(XGpio *InstancePtr, unsigned Channel)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ return XGpio_mReadReg(InstancePtr->BaseAddress,
+ (Channel - 1) * XGPIO_CHAN_OFFSET +
+ XGPIO_TRI_OFFSET);
+}
+
+inline int XGpio_IsReady(XGpio *InstancePtr)
+{
+ return InstancePtr->IsReady == XCOMPONENT_IS_READY;
+}
+
+static LIST_HEAD(inst_list);
+static DECLARE_RWSEM(inst_list_sem);
+
+/*******************
+ * The misc device *
+ *******************/
+
+static int xgpio_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int xgpio_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static struct xgpio_instance *xgpio_getinst(unsigned int minor)
+{
+ struct list_head *entry;
+ struct xgpio_instance *inst;
+
+ down_read(&inst_list_sem);
+
+ list_for_each(entry, &inst_list) {
+ inst = list_entry(entry, struct xgpio_instance, link);
+
+ if (minor == inst->miscdev->minor) {
+ up_read(&inst_list_sem);
+ if (XGpio_IsReady(&inst->gpio)) {
+ return inst;
+ }
+ else {
+ return NULL;
+ }
+ }
+ }
+
+ up_read(&inst_list_sem);
+ return NULL;
+}
+
+static int xgpio_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ struct xgpio_ioctl_data ioctl_data;
+ struct xgpio_instance *inst = xgpio_getinst(minor);
+ u32 r;
+
+ if (copy_from_user(&ioctl_data, (void *) arg, sizeof(ioctl_data)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case XGPIO_IN:
+ /*
+ * Ensure that the GPIO bits in the mask are tristated.
+ * Unlike IBM OCP GPIO, one needs to set the bits in the
+ * Tristate (direction) Register to make the corresponding
+ * GPIOs to be inputs.
+ */
+ r = XGpio_GetDataDirection(&inst->gpio, ioctl_data.chan);
+ XGpio_SetDataDirection(&inst->gpio, ioctl_data.chan,
+ r | ioctl_data.mask);
+
+ ioctl_data.data =
+ (XGpio_DiscreteRead(&inst->gpio, ioctl_data.chan)
+ & ioctl_data.mask);
+ if (copy_to_user((struct xgpio_ioctl_data *) arg,
+ &ioctl_data, sizeof(ioctl_data))) {
+ return -EFAULT;
+ }
+ break;
+
+ case XGPIO_OUT:
+ /* Get the prior value. */
+ r = XGpio_DiscreteRead(&inst->gpio, ioctl_data.chan);
+ /* Clear the bits that we're going to put in. */
+ r &= ~ioctl_data.mask;
+ /* Set the bits that were provided. */
+ r |= (ioctl_data.mask & ioctl_data.data);
+
+ XGpio_DiscreteWrite(&inst->gpio, ioctl_data.chan, r);
+
+ /*
+ * Ensure that the GPIO bits in the mask are not tristated.
+ * Unlike IBM OCP GPIO, one needs to clear the bits in the
+ * Tristate (direction) Register to make the corresponding
+ * GPIOs to be outputs.
+ */
+ r = XGpio_GetDataDirection(&inst->gpio, ioctl_data.chan);
+ XGpio_SetDataDirection(&inst->gpio, ioctl_data.chan,
+ r & ~ioctl_data.mask);
+
+ break;
+
+ case XGPIO_TRISTATE:
+ /* Get the prior value. */
+ r = XGpio_GetDataDirection(&inst->gpio, ioctl_data.chan);
+ /* Clear the bits that we're going to put in. */
+ r &= ~ioctl_data.mask;
+ /*
+ * Set the bits that were provided.
+ * Note that "1" makes the corresponding GPIO pin to tristate.
+ * To keep the interface the same as for IBM OCP GPIO
+ * we invert ioctl_data.data before writing them to the
+ * Tristate Register.
+ */
+ r |= (ioctl_data.mask & ~ioctl_data.data);
+
+ XGpio_SetDataDirection(&inst->gpio, ioctl_data.chan, r);
+ break;
+
+ case XGPIO_OPEN_DRAIN:
+ /* This can be implemented by configuring a pin as
+ * output when it is "0", and tristating a pin when
+ * it is "1". Now just fall trough. */
+
+ default:
+ return -ENOIOCTLCMD;
+
+ }
+ return 0;
+}
+
+static unsigned int next(unsigned int ptr)
+{
+ ptr++;
+ if (ptr >= BUFSIZE)
+ ptr = 0;
+ return ptr;
+}
+
+static unsigned int prev(unsigned int ptr)
+{
+ if (ptr == 0)
+ ptr = (BUFSIZE - 1);
+ else
+ ptr--;
+
+ return ptr;
+}
+
+
+static ssize_t xgpio_read(struct file *file, char *buf,
+ size_t count, loff_t * ppos)
+{
+ int retval = 0;
+ unsigned long flags, min, cur = 0;
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ struct xgpio_instance *inst = xgpio_getinst(minor);
+ __u64 *ubuf = (__u64 *) buf;
+
+ if (inst == 0)
+ return -EAGAIN;
+ if (count % 8)
+ return -EAGAIN;
+
+ if (count >= 8) {
+ if (!inst->count && (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ if (!inst->count) {
+ retval = wait_event_interruptible(inst->wait,
+ inst->count);
+ if (retval)
+ return retval;
+ }
+
+ local_irq_save(flags);
+ min = MIN(count / 8, inst->count);
+ while (min) {
+ ubuf[cur] = inst->buf[inst->head];
+ inst->head = next(inst->head);
+ inst->count--;
+ min--;
+ cur++;
+ }
+ local_irq_restore(flags);
+ return cur * 8;
+ }
+ return 0;
+}
+
+
+static irqreturn_t xgpio_interrupt(int irq, void *dev_id)
+{
+ struct xgpio_instance *inst = dev_id;
+ __u32 val_1 = XIo_In32(inst->gpio.BaseAddress);
+ __u32 val_2 =
+ inst->gpio.IsDual ? XIo_In32(inst->gpio.BaseAddress + 0x08) : 0;
+ __u32 int_status = XIo_In32(inst->gpio.BaseAddress + 0x120);
+
+
+ if (inst->buf[prev(inst->tail)] !=
+ (((__u64) val_1) | ((__u64) (val_2) << 32)))
+ if (next(inst->tail) != inst->head) {
+ inst->buf[inst->tail] = (__u64) val_1;
+ inst->buf[inst->tail] |= (__u64) (val_2) << 32;
+ inst->tail = next(inst->tail);
+ inst->count++;
+ wake_up_interruptible(&inst->wait);
+ }
+
+ XIo_Out32(inst->gpio.BaseAddress + 0x120, int_status); /* IP ISR (acknowledge sources) */
+
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * We get to all of the GPIOs through one minor number. Here's the
+ * miscdevice that gets registered for that minor number.
+ */
+
+static struct file_operations xgpio_fops = {
+ owner:THIS_MODULE,
+ ioctl:xgpio_ioctl,
+ open:xgpio_open,
+ read:xgpio_read,
+ release:xgpio_release
+};
+
+char *names[] = {
+ "xgpio0",
+ "xgpio1",
+ "xgpio2",
+ "xgpio3",
+ "xgpio4",
+ "xgpio5",
+ "xgpio6"
+};
+
+/******************************
+ * The platform device driver *
+ ******************************/
+
+#define DRIVER_NAME "xilinx_gpio"
+
+static int minor = XGPIO_MINOR;
+
+static int xgpio_probe(struct device *dev)
+{
+ XGpio_Config xgpio_config;
+ struct xgpio_instance *xgpio_inst;
+ struct miscdevice *miscdev = 0;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *irq_res, *regs_res;
+ void *v_addr;
+ int retval;
+
+ if (!dev)
+ return -EINVAL;
+
+ memset(&xgpio_config, 0, sizeof(XGpio_Config));
+ xgpio_inst = kmalloc(sizeof(struct xgpio_instance), GFP_KERNEL);
+ if (!xgpio_inst) {
+ printk(KERN_ERR
+ "%s #%d: Couldn't allocate device private record\n",
+ miscdev->name, pdev->id);
+ return -ENOMEM;
+ }
+ memset(xgpio_inst, 0, sizeof(struct xgpio_instance));
+
+ /* Map the control registers in */
+ regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs_res || (regs_res->end - regs_res->start + 1 < 8)) {
+ printk(KERN_ERR "%s #%d: Couldn't get registers resource\n",
+ miscdev->name, pdev->id);
+ retval = -EFAULT;
+ goto failed1;
+ }
+
+ xgpio_inst->remap_size = regs_res->end - regs_res->start + 1;
+ if (!request_mem_region(regs_res->start, xgpio_inst->remap_size,
+ DRIVER_NAME)) {
+ printk(KERN_ERR "Couldn't lock memory region at 0x%08lX\n",
+ (unsigned long) regs_res->start);
+ retval = -EBUSY;
+ goto failed2;
+ }
+
+ v_addr = ioremap(regs_res->start, xgpio_inst->remap_size);
+ if (!v_addr) {
+ printk(KERN_ERR "Couldn't ioremap memory at 0x%08lX\n",
+ (unsigned long) regs_res->start);
+ retval = -EFAULT;
+ goto failed3;
+ }
+
+ xgpio_inst->base_phys = regs_res->start;
+ /* The 1st GPIO channel uses */
+ xgpio_inst->device_id = pdev->id;
+ xgpio_config.DeviceId = pdev->id;
+ xgpio_config.IsDual =
+ ((unsigned) (dev->platform_data) & XGPIO_IS_DUAL) ? 1 : 0;
+
+ /* Tell the Xilinx code to bring this GPIO interface up. */
+ if (XGpio_CfgInitialize(&xgpio_inst->gpio, &xgpio_config,
+ (u32) v_addr) != XST_SUCCESS) {
+ printk(KERN_ERR "%s #%d: Could not initialize instance.\n",
+ miscdev->name, pdev->id);
+ retval = -ENODEV;
+ goto failed3;
+ }
+
+ /* Add XGpio instance to the list */
+ down_write(&inst_list_sem);
+
+ miscdev = kmalloc(sizeof(struct miscdevice), GFP_KERNEL);
+ if (!miscdev) {
+ printk(KERN_ERR
+ "%s #%d: Couldn't allocate device private record\n",
+ "xgpio", pdev->id);
+ return -ENOMEM;
+ }
+
+ memset(miscdev, 0, sizeof(struct miscdevice));
+ miscdev->minor = minor;
+ miscdev->name = names[minor - XGPIO_MINOR];
+ miscdev->fops = &xgpio_fops;
+ retval = misc_register(miscdev);
+ if (retval != 0) {
+ up_write(&inst_list_sem);
+ printk(KERN_ERR "%s #%d: Could not register miscdev.\n",
+ miscdev->name, pdev->id);
+ goto failed3;
+ }
+
+ xgpio_inst->miscdev = miscdev;
+
+ minor++;
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq_res) {
+ if (request_irq(irq_res->start,
+ xgpio_interrupt, 0, "XGPIO", xgpio_inst))
+ goto failed4;
+
+ XIo_Out32(v_addr + 0x11C, 0x80000000); /* OPB GIE (enable interrupts globally) */
+ XIo_Out32(v_addr + 0x128, 0x00000003); /* IP IER (enable for both channels) */
+ }
+
+ init_waitqueue_head(&xgpio_inst->wait);
+
+ list_add_tail(&xgpio_inst->link, &inst_list);
+ up_write(&inst_list_sem);
+
+ printk(KERN_INFO "%s #%d at 0x%08lX mapped to 0x%08X device: %u,%u",
+ miscdev->name, xgpio_inst->device_id,
+ xgpio_inst->base_phys,
+ (unsigned int) xgpio_inst->gpio.BaseAddress, MISC_MAJOR,
+ miscdev->minor);
+
+ if (irq_res)
+ printk(" using IRQ#%lu\r\n", (unsigned long) irq_res->start);
+ else
+ printk(" not using IRQ\r\n");
+
+ return 0; /* success */
+
+ failed4:
+ misc_deregister(miscdev);
+ minor--;
+
+ failed3:
+ iounmap((void *) (xgpio_config.BaseAddress));
+
+ failed2:
+ release_mem_region(regs_res->start, xgpio_inst->remap_size);
+
+ failed1:
+ kfree(xgpio_inst);
+
+ return retval;
+}
+
+static int xgpio_remove(struct device *dev)
+{
+ struct list_head *entry;
+ struct xgpio_instance *xgpio_inst = NULL;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (!dev)
+ return -EINVAL;
+
+ /* Set xgpio_inst based on pdev->id match */
+
+ down_read(&inst_list_sem);
+ list_for_each(entry, &inst_list) {
+ xgpio_inst = list_entry(entry, struct xgpio_instance, link);
+ if (pdev->id == xgpio_inst->device_id) {
+ break;
+ }
+ else {
+ xgpio_inst = NULL;
+ }
+ }
+ up_read(&inst_list_sem);
+
+ if (xgpio_inst == NULL)
+ return -ENODEV;
+
+ /* Remove the private data from the list */
+ down_write(&inst_list_sem);
+ list_del(&xgpio_inst->link);
+ if (list_empty(&inst_list)) {
+ misc_deregister(xgpio_inst->miscdev);
+ minor--;
+ }
+ up_write(&inst_list_sem);
+
+ iounmap((void *) (xgpio_inst->gpio.BaseAddress));
+
+ release_mem_region(xgpio_inst->base_phys, xgpio_inst->remap_size);
+
+ kfree(xgpio_inst);
+
+ return 0; /* success */
+}
+
+static struct device_driver xgpio_driver = {
+ .name = DRIVER_NAME,
+ .bus = &platform_bus_type,
+
+ .probe = xgpio_probe,
+ .remove = xgpio_remove
+};
+
+static int __init xgpio_init(void)
+{
+ /*
+ * No kernel boot options used,
+ * so we just need to register the driver
+ */
+ return driver_register(&xgpio_driver);
+}
+
+static void __exit xgpio_cleanup(void)
+{
+ driver_unregister(&xgpio_driver);
+}
+
+module_init(xgpio_init);
+module_exit(xgpio_cleanup);
+
+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL");
--- /dev/null
+/* $Id: xgpio.c,v 1.1.2.1 2007/02/16 10:03:28 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 - 2005 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/**
+* @file xgpio.c
+*
+* The implementation of the XGpio component's basic functionality. See xgpio.h
+* for more information about the component.
+*
+* @note
+*
+* None
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rmm 02/04/02 First release
+* 2.00a jhl 12/16/02 Update for dual channel and interrupt support
+* 2.01a jvb 12/13/05 I changed Initialize() into CfgInitialize(), and made
+* CfgInitialize() take a pointer to a config structure
+* instead of a device id. I moved Initialize() into
+* xgpio_sinit.c, and had Initialize() call CfgInitialize()
+* after it retrieved the config structure using the device
+* id. I removed include of xparameters.h along with any
+* dependencies on xparameters.h and the _g.c config table.
+*
+* </pre>
+*
+*****************************************************************************/
+
+/***************************** Include Files ********************************/
+
+#include "xgpio.h"
+#include "xstatus.h"
+
+/************************** Constant Definitions ****************************/
+
+/**************************** Type Definitions ******************************/
+
+/***************** Macros (Inline Functions) Definitions ********************/
+
+/************************** Variable Definitions ****************************/
+
+
+/************************** Function Prototypes *****************************/
+
+
+/****************************************************************************/
+/**
+* Initialize the XGpio instance provided by the caller based on the
+* given configuration data.
+*
+* Nothing is done except to initialize the InstancePtr.
+*
+* @param InstancePtr is a pointer to an XGpio instance. The memory the pointer
+* references must be pre-allocated by the caller. Further calls to
+* manipulate the component through the XGpio API must be made with this
+* pointer.
+*
+* @param Config is a reference to a structure containing information about
+* a specific GPIO device. This function initializes an InstancePtr object
+* for a specific device specified by the contents of Config. This
+* function can initialize multiple instance objects with the use of
+* multiple calls giving different Config information on each call.
+*
+* @param EffectiveAddr is the device base address in the virtual memory address
+* space. The caller is responsible for keeping the address mapping
+* from EffectiveAddr to the device physical base address unchanged
+* once this function is invoked. Unexpected errors may occur if the
+* address mapping changes after this function is called. If address
+* translation is not used, use Config->BaseAddress for this parameters,
+* passing the physical address instead.
+*
+* @return
+*
+* - XST_SUCCESS Initialization was successfull.
+*
+* @note
+*
+* None.
+*
+*****************************************************************************/
+int XGpio_CfgInitialize(XGpio *InstancePtr, XGpio_Config *Config,
+ u32 EffectiveAddr)
+{
+ /*
+ * Assert arguments
+ */
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /*
+ * Set some default values.
+ */
+ InstancePtr->BaseAddress = EffectiveAddr;
+ InstancePtr->InterruptPresent = Config->InterruptPresent;
+ InstancePtr->IsDual = Config->IsDual;
+
+ /*
+ * Indicate the instance is now ready to use, initialized without error
+ */
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+ return (XST_SUCCESS);
+}
+
+
+/****************************************************************************/
+/**
+* Set the input/output direction of all discrete signals for the specified
+* GPIO channel.
+*
+* @param InstancePtr is a pointer to an XGpio instance to be worked on.
+* @param Channel contains the channel of the GPIO (1 or 2) to operate on.
+* @param DirectionMask is a bitmask specifying which discretes are input and
+* which are output. Bits set to 0 are output and bits set to 1 are input.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* The hardware must be built for dual channels if this function is used
+* with any channel other than 1. If it is not, this function will assert.
+*
+*****************************************************************************/
+void XGpio_SetDataDirection(XGpio *InstancePtr, unsigned Channel,
+ u32 DirectionMask)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID((Channel == 1) ||
+ ((Channel == 2) && (InstancePtr->IsDual == TRUE)));
+
+ XGpio_mWriteReg(InstancePtr->BaseAddress,
+ ((Channel - 1) * XGPIO_CHAN_OFFSET) + XGPIO_TRI_OFFSET,
+ DirectionMask);
+}
+
+
+/****************************************************************************/
+/**
+* Read state of discretes for the specified GPIO channnel.
+*
+* @param InstancePtr is a pointer to an XGpio instance to be worked on.
+* @param Channel contains the channel of the GPIO (1 or 2) to operate on.
+*
+* @return Current copy of the discretes register.
+*
+* @note
+*
+* The hardware must be built for dual channels if this function is used
+* with any channel other than 1. If it is not, this function will assert.
+*
+*****************************************************************************/
+u32 XGpio_DiscreteRead(XGpio *InstancePtr, unsigned Channel)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID((Channel == 1) ||
+ ((Channel == 2) && (InstancePtr->IsDual == TRUE)));
+
+ return XGpio_mReadReg(InstancePtr->BaseAddress,
+ ((Channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET);
+}
+
+/****************************************************************************/
+/**
+* Write to discretes register for the specified GPIO channel.
+*
+* @param InstancePtr is a pointer to an XGpio instance to be worked on.
+* @param Channel contains the channel of the GPIO (1 or 2) to operate on.
+* @param Data is the value to be written to the discretes register.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* The hardware must be built for dual channels if this function is used
+* with any channel other than 1. If it is not, this function will assert.
+* See also XGpio_DiscreteSet() and XGpio_DiscreteClear().
+*
+*****************************************************************************/
+void XGpio_DiscreteWrite(XGpio *InstancePtr, unsigned Channel, u32 Data)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID((Channel == 1) ||
+ ((Channel == 2) && (InstancePtr->IsDual == TRUE)));
+
+ XGpio_mWriteReg(InstancePtr->BaseAddress,
+ ((Channel - 1) * XGPIO_CHAN_OFFSET) + XGPIO_DATA_OFFSET,
+ Data);
+}
--- /dev/null
+/* $Id: xgpio.h,v 1.1.2.1 2007/02/16 10:03:28 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 - 2005 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+* @file xgpio.h
+*
+* This file contains the software API definition of the Xilinx General Purpose
+* I/O (XGpio) device driver component.
+*
+* The Xilinx GPIO controller is a soft IP core designed for Xilinx FPGAs on
+* the OPB or PLB bus and contains the following general features:
+* - Support for up to 32 I/O discretes for each channel (64 bits total).
+* - Each of the discretes can be configured for input or output.
+* - Configurable support for dual channels and interrupt generation.
+*
+* The driver provides interrupt management functions. Implementation of
+* interrupt handlers is left to the user. Refer to the provided interrupt
+* example in the examples directory for details.
+*
+* This driver is intended to be RTOS and processor independent. Any needs for
+* dynamic memory management, threads or thread mutual exclusion, virtual
+* memory, or cache control must be satisfied by the layer above this driver.
+*
+* <b>Initialization & Configuration</b>
+*
+* The XGpio_Config structure is used by the driver to configure itself. This
+* configuration structure is typically created by the tool-chain based on HW
+* build properties.
+*
+* To support multiple runtime loading and initialization strategies employed
+* by various operating systems, the driver instance can be initialized in one
+* of the following ways:
+*
+* - XGpio_Initialize(InstancePtr, DeviceId) - The driver looks up its own
+* configuration structure created by the tool-chain based on an ID provided
+* by the tool-chain.
+*
+* - XGpio_CfgInitialize(InstancePtr, CfgPtr, EffectiveAddr) - Uses a
+* configuration structure provided by the caller. If running in a system
+* with address translation, the provided virtual memory base address
+* replaces the physical address present in the configuration structure.
+*
+* @note
+*
+* This API utilizes 32 bit I/O to the GPIO registers. With less than 32 bits,
+* the unused bits from registers are read as zero and written as don't cares.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rmm 03/13/02 First release
+* 2.00a jhl 11/26/03 Added support for dual channels and interrupts
+* 2.01a jvb 12/14/05 I separated dependency on the static config table and
+* xparameters.h from the driver initialization by moving
+* _Initialize and _LookupConfig to _sinit.c. I also added
+* the new _CfgInitialize routine.
+* </pre>
+*****************************************************************************/
+
+#ifndef XGPIO_H /* prevent circular inclusions */
+#define XGPIO_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files ********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xgpio_l.h"
+
+/************************** Constant Definitions ****************************/
+
+/**************************** Type Definitions ******************************/
+
+/**
+ * This typedef contains configuration information for the device.
+ */
+typedef struct {
+ u16 DeviceId; /* Unique ID of device */
+ u32 BaseAddress; /* Device base address */
+ int InterruptPresent; /* Are interrupts supported in h/w */
+ int IsDual; /* Are 2 channels supported in h/w */
+} XGpio_Config;
+
+/**
+ * The XGpio driver instance data. The user is required to allocate a
+ * variable of this type for every GPIO device in the system. A pointer
+ * to a variable of this type is then passed to the driver API functions.
+ */
+typedef struct {
+ u32 BaseAddress; /* Device base address */
+ u32 IsReady; /* Device is initialized and ready */
+ int InterruptPresent; /* Are interrupts supported in h/w */
+ int IsDual; /* Are 2 channels supported in h/w */
+} XGpio;
+
+/***************** Macros (Inline Functions) Definitions ********************/
+
+
+/************************** Function Prototypes *****************************/
+
+/*
+ * Initialization functions in xgpio_sinit.c
+ */
+int XGpio_Initialize(XGpio *InstancePtr, u16 DeviceId);
+XGpio_Config *XGpio_LookupConfig(u16 DeviceId);
+
+/*
+ * API Basic functions implemented in xgpio.c
+ */
+int XGpio_CfgInitialize(XGpio *InstancePtr, XGpio_Config *Config,
+ u32 EffectiveAddr);
+void XGpio_SetDataDirection(XGpio *InstancePtr, unsigned Channel,
+ u32 DirectionMask);
+u32 XGpio_DiscreteRead(XGpio *InstancePtr, unsigned Channel);
+void XGpio_DiscreteWrite(XGpio *InstancePtr, unsigned Channel, u32 Mask);
+
+
+/*
+ * API Functions implemented in xgpio_extra.c
+ */
+void XGpio_DiscreteSet(XGpio *InstancePtr, unsigned Channel, u32 Mask);
+void XGpio_DiscreteClear(XGpio *InstancePtr, unsigned Channel, u32 Mask);
+
+/*
+ * API Functions implemented in xgpio_selftest.c
+ */
+int XGpio_SelfTest(XGpio *InstancePtr);
+
+/*
+ * API Functions implemented in xgpio_intr.c
+ */
+void XGpio_InterruptGlobalEnable(XGpio *InstancePtr);
+void XGpio_InterruptGlobalDisable(XGpio *InstancePtr);
+void XGpio_InterruptEnable(XGpio *InstancePtr, u32 Mask);
+void XGpio_InterruptDisable(XGpio *InstancePtr, u32 Mask);
+void XGpio_InterruptClear(XGpio *InstancePtr, u32 Mask);
+u32 XGpio_InterruptGetEnabled(XGpio *InstancePtr);
+u32 XGpio_InterruptGetStatus(XGpio *InstancePtr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/*
+ * xgpio_ioctl.h
+ *
+ * ioctl numbers and data structure for Xilinx GPIO driver.
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * 2005 (c)MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Copied from ibm_ocp_gpio.h written by
+ *
+ * Armin Kuster akuster@pacbell.net
+ * Sept, 2001
+ *
+ * Orignial driver
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ * Frank Rowand <frank_rowand@mvista.com>
+ *
+ * Copyright 2000 MontaVista Software Inc.
+ */
+
+#ifndef __XGPIO_IOCTL_H
+#define __XGPIO_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define XGPIO_IOCTL_BASE 'Z'
+
+struct xgpio_ioctl_data {
+ __u32 chan;
+ __u32 mask;
+ __u32 data;
+};
+
+#define XGPIO_MINOR 185
+#define XGPIO_IN _IOWR(XGPIO_IOCTL_BASE, 0, struct xgpio_ioctl_data)
+#define XGPIO_OUT _IOW (XGPIO_IOCTL_BASE, 1, struct xgpio_ioctl_data)
+#define XGPIO_OPEN_DRAIN _IOW (XGPIO_IOCTL_BASE, 2, struct xgpio_ioctl_data)
+#define XGPIO_TRISTATE _IOW (XGPIO_IOCTL_BASE, 3, struct xgpio_ioctl_data)
+
+#endif /* __XGPIO_IOCTL_H */
--- /dev/null
+/* $Id: xgpio_l.h,v 1.1.2.1 2007/02/16 10:03:29 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 - 2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xgpio_l.h
+*
+* This header file contains identifiers and low-level driver functions (or
+* macros) that can be used to access the device. The user should refer to the
+* hardware device specification for more details of the device operation.
+* High-level driver functions are defined in xgpio.h.
+*
+* The macros that are available in this file use a multiply to calculate the
+* addresses of registers. The user can control whether that multiply is done
+* at run time or at compile time. A constant passed as the channel parameter
+* will cause the multiply to be done at compile time. A variable passed as the
+* channel parameter will cause it to occur at run time.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a jhl 04/24/02 First release of low level driver
+* 2.00a jhl 11/26/03 Added support for dual channels and interrupts. This
+* change required the functions to be changed such that
+* the interface is not compatible with previous versions.
+* See the examples in the example directory for macros
+* to help compile an application that was designed for
+* previous versions of the driver. The interrupt registers
+* are accessible using the ReadReg and WriteReg macros and
+* a channel parameter was added to the other macros.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XGPIO_L_H /* prevent circular inclusions */
+#define XGPIO_L_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+/** @name Registers
+ *
+ * Register offsets for this device. This device utilizes IPIF interrupt
+ * registers.
+ * @{
+ */
+#define XGPIO_DATA_OFFSET 0x0 /**< Data register for 1st channel */
+#define XGPIO_TRI_OFFSET 0x4 /**< I/O direction register for 1st channel */
+#define XGPIO_DATA2_OFFSET 0x8 /**< Data register for 2nd channel */
+#define XGPIO_TRI2_OFFSET 0xC /**< I/O direction register for 2nd channel */
+
+#define XGPIO_GIER_OFFSET 0x11C /**< Glogal interrupt enable register */
+#define XGPIO_ISR_OFFSET 0x120 /**< Interrupt status register */
+#define XGPIO_IER_OFFSET 0x128 /**< Interrupt enable register */
+
+/* @} */
+
+/* The following constant describes the offset of each channels data and
+ * tristate register from the base address.
+ */
+#define XGPIO_CHAN_OFFSET 8
+
+/** @name Interrupt Status and Enable Register bitmaps and masks
+ *
+ * Bit definitions for the interrupt status register and interrupt enable
+ * registers.
+ * @{
+ */
+#define XGPIO_IR_MASK 0x3 /**< Mask of all bits */
+#define XGPIO_IR_CH1_MASK 0x1 /**< Mask for the 1st channel */
+#define XGPIO_IR_CH2_MASK 0x2 /**< Mask for the 2nd channel */
+/*@}*/
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/****************************************************************************/
+/**
+*
+* Write a value to a GPIO register. A 32 bit write is performed. If the
+* GPIO component is implemented in a smaller width, only the least
+* significant data is written.
+*
+* @param BaseAddress is the base address of the GPIO device.
+* @param RegOffset is the register offset from the base to write to.
+* @param Data is the data written to the register.
+*
+* @return None.
+*
+* @note None.
+*
+* C-style signature:
+* void XGpio_mWriteReg(u32 BaseAddress, unsigned RegOffset,
+* u32 Data)
+*
+****************************************************************************/
+#define XGpio_mWriteReg(BaseAddress, RegOffset, Data) \
+ XIo_Out32((BaseAddress) + (RegOffset), (u32)(Data))
+
+/****************************************************************************/
+/**
+*
+* Read a value from a GPIO register. A 32 bit read is performed. If the
+* GPIO component is implemented in a smaller width, only the least
+* significant data is read from the register. The most significant data
+* will be read as 0.
+*
+* @param BaseAddress is the base address of the GPIO device.
+* @param Register is the register offset from the base to read from.
+* @param Data is the data from the register.
+*
+* @return None.
+*
+* @note None.
+*
+* C-style signature:
+* u32 XGpio_mReadReg(u32 BaseAddress, unsigned RegOffset)
+*
+****************************************************************************/
+#define XGpio_mReadReg(BaseAddress, RegOffset) \
+ XIo_In32((BaseAddress) + (RegOffset))
+
+/*****************************************************************************
+*
+* Set the input/output direction of the signals of the specified GPIO channel.
+*
+* @param BaseAddress contains the base address of the GPIO device.
+* @param Channel contains the channel (1 or 2) to operate on.
+* @param DirectionMask is a bitmask specifying which discretes are input and
+* which are output. Bits set to 0 are output and bits set to 1 are
+* input.
+*
+* @return None.
+*
+* @note None.
+*
+* C-style signature:
+* void XGpio_mSetDataDirection(u32 BaseAddress, unsigned Channel,
+* u32 DirectionMask)
+*
+******************************************************************************/
+#define XGpio_mSetDataDirection(BaseAddress, Channel, DirectionMask) \
+ XGpio_mWriteReg((BaseAddress), \
+ (((Channel) - 1) * XGPIO_CHAN_OFFSET) + XGPIO_TRI_OFFSET, \
+ (DirectionMask))
+
+/****************************************************************************/
+/**
+* Get the data register of the specified GPIO channel.
+*
+* @param BaseAddress contains the base address of the GPIO device.
+* @param Channel contains the channel (1 or 2) to operate on.
+*
+* @return The contents of the data register.
+*
+* @note None.
+*
+* C-style signature:
+* u32 XGpio_mGetDataReg(u32 BaseAddress, unsigned Channel)
+*
+*****************************************************************************/
+#define XGpio_mGetDataReg(BaseAddress, Channel) \
+ XGpio_mReadReg((BaseAddress), \
+ (((Channel) - 1) * XGPIO_CHAN_OFFSET) + XGPIO_DATA_OFFSET)
+
+/****************************************************************************/
+/**
+* Set the data register of the specified GPIO channel.
+*
+* @param BaseAddress contains the base address of the GPIO device.
+* @param Channel contains the channel (1 or 2) to operate on.
+* @param Data is the value to be written to the data register.
+*
+* @return None.
+*
+* @note None.
+*
+* C-style signature:
+* void XGpio_mSetDataReg(u32 BaseAddress, unsigned Channel,
+* u32 Data)
+*
+*****************************************************************************/
+#define XGpio_mSetDataReg(BaseAddress, Channel, Data) \
+ XGpio_mWriteReg((BaseAddress), \
+ (((Channel) - 1) * XGPIO_CHAN_OFFSET) + XGPIO_DATA_OFFSET,\
+ (Data))
+
+/************************** Function Prototypes ******************************/
+
+/************************** Variable Definitions *****************************/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
#include "buffer_icap.h"
+/* Number of times to poll the done register. This has to be large
+ enough to allow an entire configuration to complete. If an entire
+ page (4kb) is configured at once, that could take up to 4k cycles
+ with a byte-wide icap interface. */
+#define XHI_MAX_RETRIES 5000
+
/* Indicates how many bytes will fit in a buffer. (1 BRAM) */
#define XHI_MAX_BUFFER_BYTES 2048
#define XHI_MAX_BUFFER_INTS (XHI_MAX_BUFFER_BYTES >> 2)
while (buffer_icap_busy(base_address)) {
retries++;
if (retries > XHI_MAX_RETRIES)
- return -EBUSY;
+ return -EIO;
}
return 0;
while (buffer_icap_busy(base_address)) {
retries++;
if (retries > XHI_MAX_RETRIES)
- return -EBUSY;
+ return -EIO;
}
return 0;
#include "fifo_icap.h"
+/* Number of times to poll the done register. This has to be large
+ * enough to allow an entire configuration to complete. If an entire
+ * page (4kb) is configured at once, that could take up to 4k cycles
+ * with a byte-wide icap interface. In most cases, this driver is
+ * used with a much smaller fifo, but this should be sufficient in the
+ * worst case.
+ */
+#define XHI_MAX_RETRIES 5000
+
/* Register offsets for the XHwIcap device. */
#define XHI_GIER_OFFSET 0x1C /* Device Global Interrupt Enable Reg */
#define XHI_IPISR_OFFSET 0x20 /* Interrupt Status Register */
void (*reset)(struct hwicap_drvdata *drvdata);
};
-/* Number of times to poll the done regsiter */
-#define XHI_MAX_RETRIES 10
-
/************ Constant Definitions *************/
#define XHI_PAD_FRAMES 0x1
config I2C_ALGO_SGI
tristate
depends on SGI_IP22 || SGI_IP32 || X86_VISWS
+
+config XILINX_IIC
+ tristate "Xilinx IIC interface"
+ depends on I2C && XILINX_DRIVERS
+ help
+ Supports the Xilinx IIC interface.
obj-$(CONFIG_I2C_ALGOPCF) += i2c-algo-pcf.o
obj-$(CONFIG_I2C_ALGOPCA) += i2c-algo-pca.o
obj-$(CONFIG_I2C_ALGO_SGI) += i2c-algo-sgi.o
+obj-$(CONFIG_XILINX_IIC) += xilinx_iic/
ifeq ($(CONFIG_I2C_DEBUG_ALGO),y)
EXTRA_CFLAGS += -DDEBUG
--- /dev/null
+#
+# Makefile for the Xilinx IIC driver
+#
+
+EXTRA_CFLAGS += -I$(TOPDIR)/drivers/xilinx_common
+
+obj-$(CONFIG_XILINX_IIC) := xilinx_iic.o
+
+# The Linux adapter for the Xilinx driver code.
+xilinx_iic-objs := i2c-algo-xilinx.o
+
+# The Xilinx OS independent code.
+xilinx_iic-objs += xiic.o xiic_options.o xiic_master.o \
+ xiic_intr.o xiic_l.o
--- /dev/null
+/*
+ * i2c-algo-xilinx.c
+ *
+ * Xilinx IIC Adapter component to interface IIC component to Linux
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * 2002 (c) MontaVista, Software, Inc. This file is licensed under the terms
+ * of the GNU General Public License version 2. This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ */
+
+/*
+ * I2C drivers are split into two pieces: the adapter and the algorithm.
+ * The adapter is responsible for actually manipulating the hardware and
+ * the algorithm is the layer above that that handles the higher level
+ * tasks such as transmitting or receiving a buffer. The best example
+ * (in my opinion) of this is the bit banging algorithm has a number of
+ * different adapters that can plug in under it to actually wiggle the
+ * SDA and SCL.
+ *
+ * The interesting part is that the drivers Xilinx provides with their
+ * IP are also split into two pieces where one part is the OS
+ * independent code and the other part is the OS dependent code. All of
+ * the other sources in this directory are the OS independent files as
+ * provided by Xilinx with no changes made to them.
+ *
+ * As it turns out, this maps quite well into the I2C driver philosophy.
+ * This file is the I2C algorithm that communicates with the Xilinx OS
+ * independent function that will serve as our I2C adapter. The
+ * unfortunate part is that the term "adapter" is overloaded in our
+ * context. Xilinx refers to the OS dependent part of a driver as an
+ * adapter. So from an I2C driver perspective, this file is not an
+ * adapter; that role is filled by the Xilinx OS independent files.
+ * From a Xilinx perspective, this file is an adapter; it adapts their
+ * OS independent code to Linux.
+ *
+ * Another thing to consider is that the Xilinx OS dependent code knows
+ * nothing about Linux I2C adapters, so even though this file is billed
+ * as the I2C algorithm, it takes care of the i2c_adapter structure.
+ *
+ * Fortunately, naming conventions will give you a clue as to what comes
+ * from where. Functions beginning with XIic_ are provided by the
+ * Xilinx OS independent files. Functions beginning with i2c_ are
+ * provided by the I2C Linux core. All functions in this file that are
+ * called by Linux have names that begin with xiic_. The functions in
+ * this file that have Handler in their name are registered as callbacks
+ * with the underlying Xilinx OS independent layer. Any other functions
+ * are static helper functions.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/xilinx_devices.h>
+
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "xbasic_types.h"
+#include "xiic.h"
+#include "xiic_i.h"
+
+#if defined(CONFIG_OF)
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#endif
+
+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
+MODULE_DESCRIPTION("Xilinx IIC driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM_DESC(scan, "Scan for active chips on the bus");
+static int scan = 0; /* have a look at what's hanging 'round */
+
+/* SAATODO: actually use these? */
+#define XIIC_TIMEOUT 100
+#define XIIC_RETRY 3
+
+#define XILINX_IIC "xilinx_iic"
+
+static int __devinit xilinx_iic_probe(struct device *device);
+static int __devexit xilinx_iic_remove(struct device *device);
+
+static struct device_driver xilinx_iic_driver = {
+ .bus = &platform_bus_type,
+ .name = XILINX_IIC,
+ .probe = xilinx_iic_probe,
+ .remove = xilinx_iic_remove,
+};
+
+/* Our private per device data. */
+struct xiic_data {
+ struct i2c_adapter adap; /* The Linux I2C core data */
+ int index; /* index taken from platform_device */
+ struct completion complete; /* for waiting for interrupts */
+ u32 base; /* base memory address */
+ unsigned int irq; /* device IRQ number */
+ volatile u32 transmit_intr_flag; /* semaphore across task and interrupt - ECM */
+ volatile u32 receive_intr_flag; /* semaphore across task and interrupt - ECM */
+ volatile u32 status_intr_flag; /* semaphore across task and interrupt - ECM */
+ /*
+ * The underlying OS independent code needs space as well. A
+ * pointer to the following XIic structure will be passed to
+ * any XIic_ function that requires it. However, we treat the
+ * data as an opaque object in this file (meaning that we never
+ * reference any of the fields inside of the structure).
+ */
+ XIic Iic;
+
+ /*
+ * The following bit fields are used to keep track of what
+ * all has been done to initialize the xiic_dev to make
+ * error handling out of probe() easier.
+ */
+ unsigned int reqirq:1; /* Has request_irq() been called? */
+ unsigned int remapped:1; /* Has ioremap() been called? */
+ unsigned int started:1; /* Has XIic_Start() been called? */
+ unsigned int added:1; /* Has i2c_add_adapter() been called? */
+};
+
+/*******************************************************************************
+ * This configuration stuff should become unnecessary after EDK version 8.x is
+ * released.
+ ******************************************************************************/
+
+static DECLARE_MUTEX(cfg_sem);
+static int
+xiic_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num)
+{
+ struct xiic_data *dev = (struct xiic_data *) i2c_adap;
+ struct i2c_msg *pmsg;
+ u32 options;
+ int i, retries;
+ u32 Status;
+ u32 writeop;
+
+ for (i = 0; i < num; i++)
+ {
+ pmsg = &msgs[i];
+
+ if (!pmsg->len) /* If length is zero */
+ continue; /* on to the next request. */
+
+ /*
+ * This code checks up to 16 times for the
+ * bus busy condition.
+ */
+ retries = 4;
+ while((XIic_IsIicBusy(&dev->Iic) == TRUE) &&
+ (retries-- != 0))
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ/250);
+ }
+
+
+ /* If bus is still busy, bail */
+ if (XIic_IsIicBusy(&dev->Iic) == TRUE)
+ {
+ printk(KERN_WARNING
+ "%s #%d: Could not talk to device 0x%2x (%d), bus always busy, trying to reset\n",
+ dev->adap.name, dev->index, pmsg->addr,
+ dev->status_intr_flag);
+
+ /* Try stopping, reseting and starting device to clear condition
+ */
+ if (XIic_Stop(&dev->Iic) != XST_SUCCESS)
+ {
+ /* The bus was in use.. */
+ printk(KERN_WARNING
+ "%s #%d: Could not stop device. Restart from higher layer.\n",
+ dev->adap.name, dev->index);
+ return -ENXIO;
+ }
+ else
+ {
+ XIic_Reset(&dev->Iic);
+ if (XIic_Start(&dev->Iic) != XST_SUCCESS)
+ {
+ printk(KERN_ERR "%s #%d: Could not start device.\n",
+ dev->adap.name, dev->index);
+ return -ENODEV;
+ }
+
+ return -ENXIO;
+ }
+ }
+
+ options = 0;
+ if (pmsg->flags & I2C_M_TEN)
+ options |= XII_SEND_10_BIT_OPTION;
+ XIic_SetOptions(&dev->Iic, options);
+
+ if (XIic_SetAddress(&dev->Iic, XII_ADDR_TO_SEND_TYPE,
+ pmsg->addr) != XST_SUCCESS)
+ {
+ printk(KERN_WARNING
+ "%s #%d: Could not set address to 0x%2x.\n",
+ dev->adap.name, dev->index, pmsg->addr);
+ return -EIO;
+ }
+
+
+ dev->transmit_intr_flag = 0xFFFFFFFF;
+ dev->receive_intr_flag = 0xFFFFFFFF;
+ dev->status_intr_flag = 0xFFFFFFFF;
+
+ /* set the writeop flag to 0 so the adapter does not wait
+ * at bottom of loop
+ */
+ writeop = 0;
+
+ dev->Iic.Stats.TxErrors = 0;
+
+ if (pmsg->flags & I2C_M_RD)
+ {
+ Status = XIic_MasterRecv(&dev->Iic, pmsg->buf, pmsg->len);
+ }
+ else
+ {
+ Status = XIic_MasterSend(&dev->Iic, pmsg->buf, pmsg->len);
+ }
+
+ if (Status != XST_SUCCESS)
+ {
+ printk(KERN_WARNING
+ "%s #%d: Unexpected error %d.\n",
+ dev->adap.name, dev->index, (int)Status);
+ return -EIO;
+ }
+
+ /*
+ * Wait till the data is transmitted or received. If there is an error
+ * retry for 160 times.
+ */
+ retries = 160;
+
+ if(pmsg->flags & I2C_M_RD)
+ {
+ while((((volatile int)(dev->receive_intr_flag)) != 0) && (retries != 0))
+ {
+ if ( dev->Iic.Stats.TxErrors != 0)
+ {
+ udelay(25);
+ Status = XIic_MasterRecv(&dev->Iic, pmsg->buf, pmsg->len);
+ dev->Iic.Stats.TxErrors = 0;
+ retries--;
+ }
+
+ udelay(25);
+ }
+ }
+ else
+ {
+ while((((volatile int)(dev->transmit_intr_flag)) != 0) && (retries != 0))
+ {
+ if ( dev->Iic.Stats.TxErrors != 0)
+ {
+ udelay(25);
+ Status = XIic_MasterSend(&dev->Iic, pmsg->buf, pmsg->len);
+ dev->Iic.Stats.TxErrors = 0;
+ retries--;
+ }
+
+ udelay(25);
+ }
+ }
+
+
+ if(retries == 0)
+ {
+ printk("Unable to talk to Device\n");
+ printk("Wrong Slave address or Slave device Busy\n");
+ }
+ }
+ return num;
+}
+
+static u32 xiic_bit_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR |
+ I2C_FUNC_PROTOCOL_MANGLING;
+}
+
+static struct i2c_algorithm xiic_algo = {
+ .master_xfer = xiic_xfer, /* master_xfer */
+ .smbus_xfer = NULL, /* smbus_xfer */
+ .functionality = xiic_bit_func, /* functionality */
+};
+
+/*
+ * This routine is registered with the OS as the function to call when
+ * the IIC interrupts. It in turn, calls the Xilinx OS independent
+ * interrupt function. The Xilinx OS independent interrupt function
+ * will in turn call any callbacks that we have registered for various
+ * conditions.
+ */
+static irqreturn_t xiic_interrupt(int irq, void *dev_id)
+{
+ struct xiic_data *dev = dev_id;
+
+ XIic_InterruptHandler(&dev->Iic);
+ return IRQ_HANDLED;
+}
+
+static void RecvHandler(void *CallbackRef, int ByteCount)
+{
+ struct xiic_data *dev = (struct xiic_data *)CallbackRef;
+
+ if (ByteCount == 0) {
+ (dev->receive_intr_flag) = XST_SUCCESS;
+ complete(&dev->complete);
+ }
+}
+
+static void SendHandler(void *CallbackRef, int ByteCount)
+{
+ struct xiic_data *dev = (struct xiic_data *)CallbackRef;
+
+ if (ByteCount == 0) {
+ (dev->transmit_intr_flag) = XST_SUCCESS;
+ complete(&dev->complete);
+ }
+}
+
+static void StatusHandler(void *CallbackRef, int Status)
+{
+ struct xiic_data *dev = (struct xiic_data *)CallbackRef;
+
+ (dev->status_intr_flag) = Status;
+ complete(&dev->complete);
+}
+
+static char *xilinx_iic_do_scan(struct xiic_data *dev)
+{
+ int i;
+ char *page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *cptr = page;
+ u8 data;
+ u32 status;
+
+ for (i = 0x08; i < 0x78 && cptr; i++) {
+
+ snprintf(cptr, PAGE_SIZE - (cptr - page), "%02X: ", i);
+ cptr += strlen(cptr);
+
+ init_completion(&dev->complete);
+ if (XIic_SetAddress(&dev->Iic, XII_ADDR_TO_SEND_TYPE,
+ i) != XST_SUCCESS) {
+
+ snprintf(cptr, PAGE_SIZE - (cptr - page),
+ "can't set address\n");
+ cptr += strlen(cptr);
+ continue;
+ }
+
+ dev->receive_intr_flag = ~0;
+ status = XIic_MasterRecv(&dev->Iic, &data, sizeof(data));
+ if (status != XST_SUCCESS) {
+ snprintf(cptr, PAGE_SIZE - (cptr - page),
+ "unexpected error\n");
+ cptr += strlen(cptr);
+ continue;
+ }
+
+ wait_for_completion(&dev->complete);
+
+ snprintf(cptr, PAGE_SIZE - (cptr - page),
+ dev->receive_intr_flag == XST_SUCCESS ?
+ "OK\n" : "not respoding\n");
+ cptr += strlen(cptr);
+ }
+
+ return page;
+}
+
+static ssize_t scan_show(struct device *d, struct device_attribute *attr,
+ char *text)
+{
+ int len = 0;
+ char *scan_text = xilinx_iic_do_scan(dev_get_drvdata(d));
+
+ if (scan_text) {
+ len = strlen(scan_text);
+ memcpy(text, scan_text, len);
+ kfree(scan_text);
+ }
+ return len;
+}
+
+static DEVICE_ATTR(scan, S_IRUGO, scan_show, NULL);
+
+static int __devexit xilinx_iic_remove(struct device *device)
+{
+ struct xiic_data *dev;
+
+ dev = dev_get_drvdata(device);
+
+ /*
+ * If we've told the core I2C code about this dev, tell
+ * the core I2C code to forget the dev.
+ */
+ if (dev->added) {
+ /*
+ * If an error is returned, there's not a whole lot we can
+ * do. An error has already been printed out so we'll
+ * just keep trundling along.
+ */
+ (void)i2c_del_adapter(&dev->adap);
+ }
+
+ /* Tell the Xilinx code to take this IIC interface down. */
+ if (dev->started) {
+ while (XIic_Stop(&dev->Iic) != XST_SUCCESS) {
+ /* The bus was busy. Retry. */
+ printk(KERN_WARNING
+ "%s #%d: Could not stop device. Will retry.\n",
+ dev->adap.name, dev->index);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 2);
+ }
+ }
+
+ /*
+ * Now that the Xilinx code isn't using the IRQ or registers,
+ * unmap the registers and free the IRQ.
+ */
+ if (dev->remapped) {
+ iounmap((void *)dev->Iic.BaseAddress);
+ }
+
+ if (dev->reqirq) {
+ disable_irq(dev->irq);
+ free_irq(dev->irq, dev);
+ }
+
+ device_remove_file(device, &dev_attr_scan);
+ kfree(dev);
+
+ return 0;
+}
+
+/** Shared device initialization code */
+static int __devinit xilinx_iic_setup(
+ struct device *device,
+ struct resource *r_mem,
+ struct resource *r_irq,
+ u32 ten_bit_addr,
+ u32 gpo_width) {
+
+ XIic_Config xiic_cfg;
+ struct xiic_data *dev;
+ char *scan_results;
+ int error;
+
+ /* Allocate the dev and zero it out. */
+ dev = kmalloc(sizeof(struct xiic_data), GFP_KERNEL);
+ if (!dev) {
+ dev_err(device, "Cannot allocate struct xiic_data\n");
+ error = -ENOMEM;
+ goto out2;
+ }
+ memset(dev, 0, sizeof(struct xiic_data));
+
+ dev_set_drvdata(device, dev);
+
+ dev->irq = r_irq->start;
+
+ /* initialize fields to satisfy i2c */
+ strcpy(dev->adap.name, device->bus_id);
+ dev->index = 0;
+
+ init_completion(&dev->complete);
+
+ memset(&xiic_cfg, 0, sizeof(XIic_Config));
+ xiic_cfg.DeviceId = 0;
+
+ /* Change the addresses to be virtual; save the old ones to restore. */
+ dev->base = r_mem->start;
+ xiic_cfg.BaseAddress =
+ (u32) ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
+
+ dev->remapped = 1;
+ down(&cfg_sem);
+
+ xiic_cfg.Has10BitAddr = (int)ten_bit_addr;
+ xiic_cfg.GpOutWidth = (u8)gpo_width;
+
+ /* Tell the Xilinx code to bring this IIC interface up. */
+ if (XIic_CfgInitialize(&dev->Iic, &xiic_cfg, xiic_cfg.BaseAddress) !=
+ XST_SUCCESS) {
+ up(&cfg_sem);
+ dev_err(device, "could not initialize device.\n");
+ error = -ENODEV;
+ goto out;
+ }
+ up(&cfg_sem);
+ XIic_SetRecvHandler(&dev->Iic, (void *)dev, RecvHandler);
+ XIic_SetSendHandler(&dev->Iic, (void *)dev, SendHandler);
+ XIic_SetStatusHandler(&dev->Iic, (void *)dev, StatusHandler);
+
+ /* Grab the IRQ */
+ error = request_irq(dev->irq, xiic_interrupt, 0, dev->adap.name, dev);
+ if (error) {
+ dev_err(device, "could not allocate interrupt %d.\n", dev->irq);
+ goto out;
+ }
+ dev->reqirq = 1;
+
+ if (XIic_Start(&dev->Iic) != XST_SUCCESS) {
+ dev_err(device, "could not start device\n");
+ error = -ENODEV;
+ goto out;
+ }
+ dev->started = 1;
+
+ /* Now tell the core I2C code about our new device. */
+ /*
+ * SAATODO: Get a real ID (perhaps I2C_HW_XILINX) after
+ * initial release. Will need to email lm78@stimpy.netroedge.com
+ * per http://www2.lm-sensors.nu/~lm78/support.html
+ */
+ dev->adap.id = 0;
+ dev->adap.algo = &xiic_algo;
+ dev->adap.algo_data = NULL;
+ dev->adap.timeout = XIIC_TIMEOUT;
+ dev->adap.retries = XIIC_RETRY;
+ error = i2c_add_adapter(&dev->adap);
+
+ if (error) {
+ dev_err(device, "could not add i2c adapter\n");
+ goto out;
+ }
+ dev->added = 1;
+
+ printk("%s #%d at 0x%08X mapped to 0x%08X, irq=%d\n",
+ dev->adap.name, dev->index,
+ dev->base, (unsigned int)dev->Iic.BaseAddress, dev->irq);
+
+ if (scan) {
+ scan_results = xilinx_iic_do_scan(dev);
+ if (scan_results) {
+ printk(scan_results);
+ kfree(scan_results);
+ }
+ }
+
+ error = device_create_file(device, &dev_attr_scan);
+ out:
+ if (error)
+ xilinx_iic_remove(device);
+ out2:
+ return error;
+}
+static int __devinit xilinx_iic_probe(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct resource *r_irq = NULL; /* Interrupt resources */
+ struct resource *r_mem = NULL; /* IO mem resources */
+
+ /* param check */
+ if (!pdev) {
+ dev_err(device, "Probe called with NULL param.\n");
+ return -ENODEV;
+ }
+
+ /* Get iospace and an irq for the device */
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r_irq || !r_mem) {
+ dev_err(device, "IO resource(s) not found.\n");
+ return -ENODEV;
+ }
+
+ return xilinx_iic_setup(device, r_mem, r_irq, 0, 0);
+}
+
+#ifdef CONFIG_OF
+
+/* Match table for of_platform binding */
+static struct of_device_id __devinitdata xilinx_iic_of_match[] = {
+ { .compatible = "xlnx,xps-iic-2.00.a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xilinx_iic_of_match);
+
+static u32 get_u32(struct of_device *ofdev, const char *s) {
+ u32 *p = (u32 *)of_get_property(ofdev->node, s, NULL);
+ if(p) {
+ return *p;
+ } else {
+ dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to 0.\n", s);
+ return 0;
+ }
+}
+
+static int __devinit xilinx_iic_of_probe(struct of_device *ofdev, const struct of_device_id *match)
+{
+ u32 ten_bit_addr, gpo_width;
+ struct resource r_irq_struct;
+ struct resource r_mem_struct;
+
+ struct resource *r_irq = &r_irq_struct; /* Interrupt resources */
+ struct resource *r_mem = &r_mem_struct; /* IO mem resources */
+ int rc = 0;
+
+ printk(KERN_INFO "Device Tree Probing \'%s\'\n",
+ ofdev->node->name);
+
+ /* Get iospace for the device */
+ rc = of_address_to_resource(ofdev->node, 0, r_mem);
+ if(rc) {
+ dev_warn(&ofdev->dev, "invalid address\n");
+ return rc;
+ }
+
+ /* Get IRQ for the device */
+ rc = of_irq_to_resource(ofdev->node, 0, r_irq);
+ if(rc == NO_IRQ) {
+ dev_warn(&ofdev->dev, "no IRQ found.\n");
+ return rc;
+ }
+
+ ten_bit_addr = get_u32(ofdev, "xlnx,ten-bit-adr");
+ gpo_width = get_u32(ofdev, "xlnx,gpo-width");
+
+ return xilinx_iic_setup(&ofdev->dev, r_mem, r_irq, ten_bit_addr, gpo_width);
+}
+
+static int __devexit xilinx_iic_of_remove(struct of_device *ofdev)
+{
+ return xilinx_iic_remove(&ofdev->dev);
+}
+
+static struct of_platform_driver xilinx_iic_of_driver = {
+ .name = "iic",
+ .match_table = xilinx_iic_of_match,
+ .probe = xilinx_iic_of_probe,
+ .remove = __devexit_p(xilinx_iic_of_remove), };
+
+/* Registration helpers to keep the number of #ifdefs to a minimum */
+static inline int __init xilinx_iic_of_register(void)
+{
+ return of_register_platform_driver(&xilinx_iic_of_driver);
+}
+
+static inline void __exit xilinx_iic_of_unregister(void)
+{
+ of_unregister_platform_driver(&xilinx_iic_of_driver);
+}
+
+#else /* CONFIG_OF */
+
+/* CONFIG_OF not enabled; do nothing helpers */
+static inline int __init xilinx_iic_of_register(void) { return 0; }
+static inline void __exit xilinx_iic_of_unregister(void) { }
+
+#endif /* CONFIG_OF */
+
+static int __init xiic_init(void)
+{
+ int ret;
+
+ ret = driver_register(&xilinx_iic_driver);
+ if (ret)
+ goto err_driver;
+
+ ret = xilinx_iic_of_register();
+ if (ret)
+ goto err_of;
+
+ return 0;
+
+err_of:
+ driver_unregister(&xilinx_iic_driver);
+
+err_driver:
+ printk(KERN_ERR "registering iic driver failed: err=%i", ret);
+ return ret;
+}
+
+static void __exit xiic_cleanup(void)
+{
+ driver_unregister(&xilinx_iic_driver);
+ xilinx_iic_of_unregister();
+}
+
+module_init(xiic_init);
+module_exit(xiic_cleanup);
--- /dev/null
+/* $Id: xiic.c,v 1.1 2007/12/03 15:44:58 meinelte Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xiic.c
+*
+* Contains required functions for the XIic component. See xiic.h for more
+* information on the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- --- ------- -----------------------------------------------
+* 1.01a rfp 10/19/01 release
+* 1.01c ecm 12/05/02 new rev
+* 1.01c rmm 05/14/03 Fixed diab compiler warnings relating to asserts.
+* 1.01d jhl 10/08/03 Added general purpose output feature
+* 1.02a jvb 12/13/05 I changed Initialize() into CfgInitialize(), and made
+* CfgInitialize() take a pointer to a config structure
+* instead of a device id. I moved Initialize() into
+* xgpio_sinit.c, and had Initialize() call CfgInitialize()
+* after it retrieved the config structure using the device
+* id. I removed include of xparameters.h along with any
+* dependencies on xparameters.h and the _g.c config table.
+* 1.02a mta 03/09/06 Added a new function XIic_IsIicBusy() which returns
+* whether IIC Bus is Busy or Free.
+* 1.13a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+****************************************************************************/
+
+/***************************** Include Files *******************************/
+
+#include "xiic.h"
+#include "xiic_i.h"
+#include "xio.h"
+
+/************************** Constant Definitions ***************************/
+
+
+/**************************** Type Definitions *****************************/
+
+
+/***************** Macros (Inline Functions) Definitions *******************/
+
+
+/************************** Function Prototypes ****************************/
+
+static void XIic_StubStatusHandler(void *CallBackRef, int ErrorCode);
+
+static void XIic_StubHandler(void *CallBackRef, int ByteCount);
+
+/************************** Variable Definitions **************************/
+
+
+/*****************************************************************************/
+/**
+*
+* Initializes a specific XIic instance. The initialization entails:
+*
+* - Check the device has an entry in the configuration table.
+* - Initialize the driver to allow access to the device registers and
+* initialize other subcomponents necessary for the operation of the device.
+* - Default options to:
+* - 7-bit slave addressing
+* - Send messages as a slave device
+* - Repeated start off
+* - General call recognition disabled
+* - Clear messageing and error statistics
+*
+* The XIic_Start() function must be called after this function before the device
+* is ready to send and receive data on the IIC bus.
+*
+* Before XIic_Start() is called, the interrupt control must connect the ISR
+* routine to the interrupt handler. This is done by the user, and not
+* XIic_Start() to allow the user to use an interrupt controller of their choice.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+* @param Config is a reference to a structure containing information about
+* a specific IIC device. This function initializes an InstancePtr object
+* for a specific device specified by the contents of Config. This
+* function can initialize multiple instance objects with the use of
+* multiple calls giving different Config information on each call.
+* @param EffectiveAddr is the device base address in the virtual memory address
+* space. The caller is responsible for keeping the address mapping
+* from EffectiveAddr to the device physical base address unchanged
+* once this function is invoked. Unexpected errors may occur if the
+* address mapping changes after this function is called. If address
+* translation is not used, use Config->BaseAddress for this parameters,
+* passing the physical address instead.
+*
+* @return
+*
+* - XST_SUCCESS when successful
+* - XST_DEVICE_IS_STARTED indicates the device is started (i.e. interrupts
+* enabled and messaging is possible). Must stop before re-initialization
+* is allowed.
+*
+* @note
+*
+* None.
+*
+****************************************************************************/
+int XIic_CfgInitialize(XIic * InstancePtr, XIic_Config * Config,
+ u32 EffectiveAddr)
+{
+ /*
+ * Asserts test the validity of selected input arguments.
+ */
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ InstancePtr->IsReady = 0;
+
+ /*
+ * If the device is started, disallow the initialize and return a Status
+ * indicating it is started. This allows the user to stop the device
+ * and reinitialize, but prevents a user from inadvertently initializing
+ */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return XST_DEVICE_IS_STARTED;
+ }
+
+ /*
+ * Set default values and configuration data, including setting the
+ * callback handlers to stubs so the system will not crash should the
+ * application not assign its own callbacks.
+ */
+ InstancePtr->IsStarted = 0;
+ InstancePtr->BaseAddress = EffectiveAddr;
+ InstancePtr->RecvHandler = XIic_StubHandler;
+ InstancePtr->RecvBufferPtr = NULL;
+ InstancePtr->SendHandler = XIic_StubHandler;
+ InstancePtr->SendBufferPtr = NULL;
+ InstancePtr->StatusHandler = XIic_StubStatusHandler;
+ InstancePtr->Has10BitAddr = Config->Has10BitAddr;
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+ InstancePtr->Options = 0;
+ InstancePtr->BNBOnly = FALSE;
+ InstancePtr->GpOutWidth = Config->GpOutWidth;
+ InstancePtr->IsDynamic = FALSE;
+
+ /*
+ * Reset the device so it's in the reset state, this must be after the
+ * IPIF is initialized since it resets thru the IPIF and clear the stats
+ */
+ XIic_Reset(InstancePtr);
+
+ XIIC_CLEAR_STATS(InstancePtr);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function starts the IIC device and driver by enabling the proper
+* interrupts such that data may be sent and received on the IIC bus.
+* This function must be called before the functions to send and receive data.
+*
+* Before XIic_Start() is called, the interrupt control must connect the ISR
+* routine to the interrupt handler. This is done by the user, and not
+* XIic_Start() to allow the user to use an interrupt controller of their choice.
+*
+* Start enables:
+* - IIC device
+* - Interrupts:
+* - Addressed as slave to allow messages from another master
+* - Arbitration Lost to detect Tx arbitration errors
+* - Global IIC interrupt within the IPIF interface
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @return
+*
+* XST_SUCCESS always
+*
+* @note
+*
+* The device interrupt is connected to the interrupt controller, but no
+* "messaging" interrupts are enabled. Addressed as Slave is enabled to
+* reception of messages when this devices address is written to the bus.
+* The correct messaging interrupts are enabled when sending or receiving
+* via the IicSend() and IicRecv() functions. No action is required
+* by the user to control any IIC interrupts as the driver completely
+* manages all 8 interrupts. Start and Stop control the ability
+* to use the device. Stopping the device completely stops all device
+* interrupts from the processor.
+*
+****************************************************************************/
+int XIic_Start(XIic * InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Mask off all interrupts, each is enabled when needed.
+ */
+ XIIC_WRITE_IIER(InstancePtr->BaseAddress, 0);
+
+ /*
+ * Clear all interrupts by reading and rewriting exact value back.
+ * Only those bits set will get written as 1 (writing 1 clears intr)
+ */
+ XIic_mClearIntr(InstancePtr->BaseAddress, 0xFFFFFFFF);
+
+ /*
+ * Enable the device
+ */
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET,
+ XIIC_CR_ENABLE_DEVICE_MASK);
+ /*
+ * Set Rx FIFO Occupancy depth to throttle at first byte(after reset = 0)
+ */
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_RFD_REG_OFFSET, 0);
+
+ /*
+ * Clear and enable the interrupts needed
+ */
+ XIic_mClearEnableIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_AAS_MASK | XIIC_INTR_ARB_LOST_MASK);
+
+ InstancePtr->IsStarted = XCOMPONENT_IS_STARTED;
+ InstancePtr->IsDynamic = FALSE;
+
+ /* Enable all interrupts by the global enable in the IPIF */
+
+ XIIC_GINTR_ENABLE(InstancePtr->BaseAddress);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function stops the IIC device and driver such that data is no longer
+* sent or received on the IIC bus. This function stops the device by
+* disabling interrupts. This function only disables interrupts within the
+* device such that the caller is responsible for disconnecting the interrupt
+* handler of the device from the interrupt source and disabling interrupts
+* at other levels.
+*
+* Due to bus throttling that could hold the bus between messages when using
+* repeated start option, stop will not occur when the device is actively
+* sending or receiving data from the IIC bus or the bus is being throttled
+* by this device, but instead return XST_IIC_BUS_BUSY.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @return
+*
+* - XST_SUCCESS indicates all IIC interrupts are disabled. No messages can
+* be received or transmitted until XIic_Start() is called.
+* - XST_IIC_BUS_BUSY indicates this device is currently engaged in message
+* traffic and cannot be stopped.
+*
+* @note
+*
+* None.
+*
+****************************************************************************/
+int XIic_Stop(XIic * InstancePtr)
+{
+ u8 Status;
+ u8 CntlReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /*
+ * Disable all interrupts globally using the IPIF
+ */
+ XIIC_GINTR_DISABLE(InstancePtr->BaseAddress);
+
+ CntlReg = XIo_In8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET);
+ Status = XIo_In8(InstancePtr->BaseAddress + XIIC_SR_REG_OFFSET);
+
+ if ((CntlReg & XIIC_CR_MSMS_MASK) ||
+ (Status & XIIC_SR_ADDR_AS_SLAVE_MASK)) {
+ /* when this device is using the bus
+ * - re-enable interrupts to finish current messaging
+ * - return bus busy
+ */
+ XIIC_GINTR_ENABLE(InstancePtr->BaseAddress);
+
+ return XST_IIC_BUS_BUSY;
+ }
+
+ InstancePtr->IsStarted = 0;
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Resets the IIC device. Reset must only be called after the driver has been
+* initialized. The configuration after this reset is as follows:
+* - Repeated start is disabled
+* - General call is disabled
+*
+* The upper layer software is responsible for initializing and re-configuring
+* (if necessary) and restarting the IIC device after the reset.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+* @internal
+*
+* The reset is accomplished by setting the IPIF reset register. This takes
+* care of resetting all IPIF hardware blocks, including the IIC device.
+*
+****************************************************************************/
+void XIic_Reset(XIic * InstancePtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ XIIC_RESET(InstancePtr->BaseAddress);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets the bus addresses. The addresses include the device
+* address that the device responds to as a slave, or the slave address
+* to communicate with on the bus. The IIC device hardware is built to
+* allow either 7 or 10 bit slave addressing only at build time rather
+* than at run time. When this device is a master, slave addressing can
+* be selected at run time to match addressing modes for other bus devices.
+*
+* Addresses are represented as hex values with no adjustment for the data
+* direction bit as the software manages address bit placement.
+* Example: For a 7 address written to the device of 1010 011X where X is
+* the transfer direction (send/recv), the address parameter for this function
+* needs to be 01010011 or 0x53 where the correct bit alllignment will be
+* handled for 7 as well as 10 bit devices. This is especially important as
+* the bit placement is not handled the same depending on which options are
+* used such as repeated start.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+* @param AddressType indicates which address is being modified; the address
+* which this device responds to on the IIC bus as a slave, or the
+* slave address to communicate with when this device is a master. One
+* of the following values must be contained in this argument.
+* <pre>
+* XII_ADDRESS_TO_SEND Slave being addressed by a this master
+* XII_ADDRESS_TO_RESPOND Address to respond to as a slave device
+* </pre>
+* @param Address contains the address to be set; 7 bit or 10 bit address.
+* A ten bit address must be within the range: 0 - 1023 and a 7 bit
+* address must be within the range 0 - 127.
+*
+* @return
+*
+* XST_SUCCESS is returned if the address was successfully set, otherwise one
+* of the following errors is returned.
+* - XST_IIC_NO_10_BIT_ADDRESSING indicates only 7 bit addressing supported.
+* - XST_INVALID_PARAM indicates an invalid parameter was specified.
+*
+* @note
+*
+* Upper bits of 10-bit address is written only when current device is built
+* as a ten bit device.
+*
+****************************************************************************/
+int XIic_SetAddress(XIic * InstancePtr, int AddressType, int Address)
+{
+ u8 SendAddr;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(Address < 1023);
+
+ /* Set address to respond to for this device into address registers */
+
+ if (AddressType == XII_ADDR_TO_RESPOND_TYPE) {
+ SendAddr = (u8) ((Address & 0x007F) << 1); /* Addr in upper 7 bits */
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_ADR_REG_OFFSET,
+ SendAddr);
+
+ if (InstancePtr->Has10BitAddr == TRUE) {
+ /* Write upper 3 bits of addr to DTR only when 10 bit option
+ * included in design i.e. register exists
+ */
+ SendAddr = (u8) ((Address & 0x0380) >> 7);
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_TBA_REG_OFFSET,
+ SendAddr);
+ }
+
+ return XST_SUCCESS;
+ }
+
+ /* Store address of slave device being read from */
+
+ if (AddressType == XII_ADDR_TO_SEND_TYPE) {
+ InstancePtr->AddrOfSlave = Address;
+ return XST_SUCCESS;
+ }
+
+ return XST_INVALID_PARAM;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function gets the addresses for the IIC device driver. The addresses
+* include the device address that the device responds to as a slave, or the
+* slave address to communicate with on the bus. The address returned has the
+* same format whether 7 or 10 bits.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+* @param AddressType indicates which address, the address which this
+* responds to on the IIC bus as a slave, or the slave address to
+* communicate with when this device is a master. One of the following
+* values must be contained in this argument.
+* <pre>
+* XII_ADDRESS_TO_SEND_TYPE slave being addressed as a master
+* XII_ADDRESS_TO_RESPOND_TYPE slave address to respond to as a slave
+* </pre>
+* If neither of the two valid arguments are used, the function returns
+* the address of the slave device
+*
+* @return
+*
+* The address retrieved.
+*
+* @note
+*
+* None.
+*
+****************************************************************************/
+u16 XIic_GetAddress(XIic * InstancePtr, int AddressType)
+{
+ u8 LowAddr;
+ u16 HighAddr = 0;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /* return this devices address */
+
+ if (AddressType == XII_ADDR_TO_RESPOND_TYPE) {
+
+ LowAddr =
+ XIo_In8(InstancePtr->BaseAddress + XIIC_ADR_REG_OFFSET);
+
+ if (InstancePtr->Has10BitAddr == TRUE) {
+ HighAddr = (u16) XIo_In8(InstancePtr->BaseAddress +
+ XIIC_TBA_REG_OFFSET);
+ }
+ return ((HighAddr << 8) & (u16) LowAddr);
+ }
+
+ /* Otherwise return address of slave device on the IIC bus */
+
+ return InstancePtr->AddrOfSlave;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets the contents of the General Purpose Output register
+* for the IIC device driver. Note that the number of bits in this register is
+* parameterizable in the hardware such that it may not exist. This function
+* checks to ensure that it does exist to prevent bus errors, but does not
+* ensure that the number of bits in the register are sufficient for the
+* value being written (won't cause a bus error).
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @param OutputValue contains the value to be written to the register.
+*
+* @return
+*
+* A value indicating success, XST_SUCCESS, or XST_NO_FEATURE if the hardware
+* is configured such that this register does not contain any bits to read
+* or write.
+*
+* @note
+*
+* None.
+*
+****************************************************************************/
+int XIic_SetGpOutput(XIic * InstancePtr, u8 OutputValue)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /* If the general purpose output register is implemented by the hardware
+ * then write the specified value to it, otherwise indicate an error
+ */
+ if (InstancePtr->GpOutWidth > 0) {
+ XIic_mWriteReg(InstancePtr->BaseAddress, XIIC_GPO_REG_OFFSET,
+ OutputValue);
+ return XST_SUCCESS;
+ }
+ else {
+ return XST_NO_FEATURE;
+ }
+}
+
+
+/*****************************************************************************/
+/**
+*
+* This function gets the contents of the General Purpose Output register
+* for the IIC device driver. Note that the number of bits in this register is
+* parameterizable in the hardware such that it may not exist. This function
+* checks to ensure that it does exist to prevent bus errors.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @param OutputValuePtr contains the value which was read from the
+* register.
+*
+* @return
+*
+* A value indicating success, XST_SUCCESS, or XST_NO_FEATURE if the hardware
+* is configured such that this register does not contain any bits to read
+* or write.
+*
+* The OutputValuePtr is also an output as it contains the value read.
+*
+* @note
+*
+* None.
+*
+****************************************************************************/
+int XIic_GetGpOutput(XIic * InstancePtr, u8 *OutputValuePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(OutputValuePtr != NULL);
+
+ /* If the general purpose output register is implemented by the hardware
+ * then read the value from it, otherwise indicate an error
+ */
+ if (InstancePtr->GpOutWidth > 0) {
+ *OutputValuePtr = XIic_mReadReg(InstancePtr->BaseAddress,
+ XIIC_GPO_REG_OFFSET);
+ return XST_SUCCESS;
+ }
+ else {
+ return XST_NO_FEATURE;
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* A function to determine if the device is currently addressed as a slave
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @return
+*
+* TRUE if the device is addressed as slave, and FALSE otherwise.
+*
+* @note
+*
+* None.
+*
+****************************************************************************/
+u32 XIic_IsSlave(XIic * InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ if ((XIo_In8(InstancePtr->BaseAddress + XIIC_SR_REG_OFFSET) &
+ XIIC_SR_ADDR_AS_SLAVE_MASK) == 0) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+/*****************************************************************************/
+/**
+*
+* Sets the receive callback function, the receive handler, which the driver
+* calls when it finishes receiving data. The number of bytes used to signal
+* when the receive is complete is the number of bytes set in the XIic_Recv
+* function.
+*
+* The handler executes in an interrupt context such that it must minimize
+* the amount of processing performed such as transferring data to a thread
+* context.
+*
+* The number of bytes received is passed to the handler as an argument.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+* @param CallBackRef is the upper layer callback reference passed back when
+* the callback function is invoked.
+* @param FuncPtr is the pointer to the callback function.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* The handler is called within interrupt context ...
+*
+****************************************************************************/
+void XIic_SetRecvHandler(XIic * InstancePtr, void *CallBackRef,
+ XIic_Handler FuncPtr)
+{
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(FuncPtr != NULL);
+
+ InstancePtr->RecvHandler = FuncPtr;
+ InstancePtr->RecvCallBackRef = CallBackRef;
+}
+
+/*****************************************************************************/
+/**
+*
+* Sets the send callback function, the send handler, which the driver calls when
+* it receives confirmation of sent data. The handler executes in an interrupt
+* context such that it must minimize the amount of processing performed such
+* as transferring data to a thread context.
+*
+* @param InstancePtr the pointer to the XIic instance to be worked on.
+* @param CallBackRef the upper layer callback reference passed back when
+* the callback function is invoked.
+* @param FuncPtr the pointer to the callback function.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* The handler is called within interrupt context ...
+*
+****************************************************************************/
+void XIic_SetSendHandler(XIic * InstancePtr, void *CallBackRef,
+ XIic_Handler FuncPtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID(FuncPtr != NULL);
+
+ InstancePtr->SendHandler = FuncPtr;
+ InstancePtr->SendCallBackRef = CallBackRef;
+}
+
+/*****************************************************************************/
+/**
+*
+* Sets the status callback function, the status handler, which the driver calls
+* when it encounters conditions which are not data related. The handler
+* executes in an interrupt context such that it must minimize the amount of
+* processing performed such as transferring data to a thread context. The
+* status events that can be returned are described in xiic.h.
+*
+* @param InstancePtr points to the XIic instance to be worked on.
+* @param CallBackRef is the upper layer callback reference passed back when
+* the callback function is invoked.
+* @param FuncPtr is the pointer to the callback function.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* The handler is called within interrupt context ...
+*
+****************************************************************************/
+void XIic_SetStatusHandler(XIic * InstancePtr, void *CallBackRef,
+ XIic_StatusHandler FuncPtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID(FuncPtr != NULL);
+
+ InstancePtr->StatusHandler = FuncPtr;
+ InstancePtr->StatusCallBackRef = CallBackRef;
+}
+
+/*****************************************************************************
+*
+* This is a stub for the send and recv callbacks. The stub is here in case the
+* upper layers forget to set the handlers.
+*
+* @param CallBackRef is a pointer to the upper layer callback reference
+* @param ByteCount is the number of bytes sent or received
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void XIic_StubHandler(void *CallBackRef, int ByteCount)
+{
+ XASSERT_VOID_ALWAYS();
+}
+
+/*****************************************************************************
+*
+* This is a stub for the asynchronous error callback. The stub is here in case
+* the upper layers forget to set the handler.
+*
+* @param CallBackRef is a pointer to the upper layer callback reference
+* @param ErrorCode is the Xilinx error code, indicating the cause of the error
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void XIic_StubStatusHandler(void *CallBackRef, int ErrorCode)
+{
+ XASSERT_VOID_ALWAYS();
+}
+
+/*****************************************************************************
+*
+* This is a function which tells whether Bus is Busy or free.
+*
+* @param InstancePtr points to the XIic instance to be worked on.
+*
+* @return TRUE if Bus is Busy else FALSE
+*
+* @note None.
+*
+******************************************************************************/
+u32 XIic_IsIicBusy(XIic * InstancePtr)
+{
+ u8 StatusReg;
+
+ StatusReg = XIic_mReadReg(InstancePtr->BaseAddress, XIIC_SR_REG_OFFSET);
+ if (StatusReg & XIIC_SR_BUS_BUSY_MASK) {
+ return TRUE;
+ }
+ else {
+ return FALSE;
+ }
+}
--- /dev/null
+/* $Id: xiic.h,v 1.3 2007/12/17 19:15:38 meinelte Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2007 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xiic.h
+*
+* XIic is the driver for an IIC master or slave device.
+*
+* In order to reduce the memory requirements of the driver the driver is
+* partitioned such that there are optional parts of the driver.
+* Slave, master, and multimaster features are optional such that all these files
+* are not required at the same time.
+* In order to use the slave and multimaster features of the driver, the user
+* must call functions (XIic_SlaveInclude and XIic_MultiMasterInclude)
+* to dynamically include the code. These functions may be called at any time.
+*
+* Two sets of higher level API's are available in the XIic driver that can
+* be used for Transmission/Reception in Master mode :
+* - XIic_MasterSend()/ XIic_MasterRecv() which is used in normal mode.
+* - XIic_DynMasterSend()/XIic_DynMasterRecv() which is used in Dynamic mode.
+*
+* Similarly two sets of lower level API's are available in XIic driver that
+* can be used for Transmission/Reception in Master mode:
+* - XIic_Send()/XIic_Recv() which is used in normal mode
+* - XIic_DynSend()/XIic_DynRecv() which is used in Dynamic mode.
+*
+* The user should use a single set of APIs as per his requirement and
+* should not intermix them.
+*
+* All the driver APIs can be used for read, write and combined mode of
+* operations on the IIC bus.
+*
+* In the normal mode IIC support both 7-bit and 10-bit addressing, and in
+* the dynamic mode support only 7-bit addressing.
+*
+* <b>Initialization & Configuration</b>
+*
+* The XIic_Config structure is used by the driver to configure itself. This
+* configuration structure is typically created by the tool-chain based on HW
+* build properties.
+*
+* To support multiple runtime loading and initialization strategies employed
+* by various operating systems, the driver instance can be initialized in one
+* of the following ways:
+*
+* - XIic_Initialize(InstancePtr, DeviceId) - The driver looks up its own
+* configuration structure created by the tool-chain based on an ID provided
+* by the tool-chain.
+*
+* - XIic_CfgInitialize(InstancePtr, CfgPtr, EffectiveAddr) - Uses a
+* configuration structure provided by the caller. If running in a system
+* with address translation, the provided virtual memory base address
+* replaces the physical address present in the configuration structure.
+*
+* <b>General Purpose Output</b>
+* The IIC hardware provides a General Purpose Output Register that allows the
+* user to connect general purpose outputs to devices, such as a write protect,
+* for an EEPROM. This register is parameterizable in the hardware such that
+* there could be zero bits in this register and in this case it will cause
+* a bus error if read or written.
+*
+* <b>Bus Throttling</b>
+*
+* The IIC hardware provides bus throttling which allows either the device, as
+* either a master or a slave, to stop the clock on the IIC bus. This feature
+* allows the software to perform the appropriate processing for each interrupt
+* without an unreasonable response restriction. With this design, it is
+* important for the user to understand the implications of bus throttling.
+*
+* <b>Repeated Start</b>
+*
+* An application can send multiple messages, as a master, to a slave device
+* and re-acquire the IIC bus each time a message is sent. The repeated start
+* option allows the application to send multiple messages without re-acquiring
+* the IIC bus for each message. The transactions involving repeated start
+* are also called combined transfers if there is Read and Write in the
+* same transaction.
+*
+* The repeated start feature works with all the API's in XIic driver.
+*
+* The Repeated Start feature also could cause the application to lock up, or
+* monopolize the IIC bus, should repeated start option be enabled and sequences
+* of messages never end(periodic data collection).
+* Also when repeated start is not disable before the last master message is
+* sent or received, will leave the bus captive to the master, but unused.
+*
+* <b>Addressing</b>
+*
+* The IIC hardware is parameterized such that it can be built for 7 or 10
+* bit addresses. The driver provides the ability to control which address
+* size is sent in messages as a master to a slave device. The address size
+* which the hardware responds to as a slave is parameterized as 7 or 10 bits
+* but fixed by the hardware build.
+*
+* Addresses are represented as hex values with no adjustment for the data
+* direction bit as the software manages address bit placement. This is
+* especially important as the bit placement is not handled the same depending
+* on which options are used such as repeated start and 7 vs 10 bit addessing.
+*
+* <b>Data Rates</b>
+*
+* The IIC hardware is parameterized such that it can be built to support
+* data rates from DC to 400KBit. The frequency of the interrupts which
+* occur is proportional to the data rate.
+*
+* <b>Polled Mode Operation</b>
+*
+* This driver does not provide a polled mode of operation primarily because
+* polled mode which is non-blocking is difficult with the amount of
+* interaction with the hardware that is necessary.
+*
+* <b>Interrupts</b>
+*
+* The device has many interrupts which allow IIC data transactions as well
+* as bus status processing to occur.
+*
+* The interrupts are divided into two types, data and status. Data interrupts
+* indicate data has been received or transmitted while the status interrupts
+* indicate the status of the IIC bus. Some of the interrupts, such as Not
+* Addressed As Slave and Bus Not Busy, are only used when these specific
+* events must be recognized as opposed to being enabled at all times.
+*
+* Many of the interrupts are not a single event in that they are continuously
+* present such that they must be disabled after recognition or when undesired.
+* Some of these interrupts, which are data related, may be acknowledged by the
+* software by reading or writing data to the appropriate register, or must
+* be disabled. The following interrupts can be continuous rather than single
+* events.
+* - Data Transmit Register Empty/Transmit FIFO Empty
+* - Data Receive Register Full/Receive FIFO
+* - Transmit FIFO Half Empty
+* - Bus Not Busy
+* - Addressed As Slave
+* - Not Addressed As Slave
+*
+* The following interrupts are not passed directly to the application thru the
+* status callback. These are only used internally for the driver processing
+* and may result in the receive and send handlers being called to indicate
+* completion of an operation. The following interrupts are data related
+* rather than status.
+* - Data Transmit Register Empty/Transmit FIFO Empty
+* - Data Receive Register Full/Receive FIFO
+* - Transmit FIFO Half Empty
+* - Slave Transmit Complete
+*
+* <b>Interrupt To Event Mapping</b>
+*
+* The following table provides a mapping of the interrupts to the events which
+* are passed to the status handler and the intended role (master or slave) for
+* the event. Some interrupts can cause multiple events which are combined
+* together into a single status event such as XII_MASTER_WRITE_EVENT and
+* XII_GENERAL_CALL_EVENT
+* <pre>
+* Interrupt Event(s) Role
+*
+* Arbitration Lost Interrupt XII_ARB_LOST_EVENT Master
+* Transmit Error XII_SLAVE_NO_ACK_EVENT Master
+* IIC Bus Not Busy XII_BUS_NOT_BUSY_EVENT Master
+* Addressed As Slave XII_MASTER_READ_EVENT, Slave
+* XII_MASTER_WRITE_EVENT, Slave
+* XII_GENERAL_CALL_EVENT Slave
+* </pre>
+* <b>Not Addressed As Slave Interrupt</b>
+*
+* The Not Addressed As Slave interrupt is not passed directly to the
+* application thru the status callback. It is used to determine the end of
+* a message being received by a slave when there was no stop condition
+* (repeated start). It will cause the receive handler to be called to
+* indicate completion of the operation.
+*
+* <b>RTOS Independence</b>
+*
+* This driver is intended to be RTOS and processor independent. It works
+* with physical addresses only. Any needs for dynamic memory management,
+* threads or thread mutual exclusion, virtual memory, or cache control must
+* be satisfied by the layer above this driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.01a rfp 10/19/01 release
+* 1.01c ecm 12/05/02 new rev
+* 1.01d jhl 10/08/03 Added general purpose output feature
+* 1.01d sv 05/09/05 Changed the data being written to the Address/Control
+* Register and removed the code for testing the
+* Receive Data Register in XIic_SelfTest function of
+* xiic_selftest.c source file
+* 1.02a jvb 12/14/05 I separated dependency on the static config table and
+* xparameters.h from the driver initialization by moving
+* _Initialize and _LookupConfig to _sinit.c. I also added
+* the new _CfgInitialize routine.
+* 1.02a mta 03/09/06 Added a new function XIic_IsIicBusy() which returns
+* whether IIC Bus is Busy or Free.
+* 1.02a mta 03/09/06 Implemented Repeated Start in the Low Level Driver.
+* 1.03a mta 07/17/06 Added files to support Dynamic IIC controller in High
+* level driver. Added xiic_dyn_master.c. Added support
+* for IIC Dynamic controller in Low level driver in xiic_l.c
+* 1.13a wgr 03/22/07 Converted to new coding style.
+* 1.13b ecm 11/29/07 added BB polling loops to the DynSend and DynRecv
+* routines to handle the race condition with BNB in IISR.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XIIC_H /* prevent circular inclusions */
+#define XIIC_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xiic_l.h"
+
+/************************** Constant Definitions *****************************/
+
+/** @name Configuration options
+ *
+ * The following options may be specified or retrieved for the device and
+ * enable/disable additional features of the IIC bus. Each of the options
+ * are bit fields such that more than one may be specified.
+ * @{
+ */
+/**
+ * <pre>
+ * XII_GENERAL_CALL_OPTION The general call option allows an IIC slave to
+ * recognized the general call address. The status
+ * handler is called as usual indicating the device
+ * has been addressed as a slave with a general
+ * call. It is the application's responsibility to
+ * perform any special processing for the general
+ * call.
+ *
+ * XII_REPEATED_START_OPTION The repeated start option allows multiple
+ * messages to be sent/received on the IIC bus
+ * without rearbitrating for the bus. The messages
+ * are sent as a series of messages such that the
+ * option must be enabled before the 1st message of
+ * the series, to prevent an stop condition from
+ * being generated on the bus, and disabled before
+ * the last message of the series, to allow the
+ * stop condition to be generated.
+ *
+ * XII_SEND_10_BIT_OPTION The send 10 bit option allows 10 bit addresses
+ * to be sent on the bus when the device is a
+ * master. The device can be configured to respond
+ * as to 7 bit addresses even though it may be
+ * communicating with other devices that support 10
+ * bit addresses. When this option is not enabled,
+ * only 7 bit addresses are sent on the bus.
+ *
+ * </pre>
+ */
+#define XII_GENERAL_CALL_OPTION 0x00000001
+#define XII_REPEATED_START_OPTION 0x00000002
+#define XII_SEND_10_BIT_OPTION 0x00000004
+
+/*@}*/
+
+/** @name Status events
+ *
+ * The following status events occur during IIC bus processing and are passed
+ * to the status callback. Each event is only valid during the appropriate
+ * processing of the IIC bus. Each of these events are bit fields such that
+ * more than one may be specified.
+ * @{
+ */
+/**
+ * <pre>
+ * XII_BUS_NOT_BUSY_EVENT bus transitioned to not busy
+ * XII_ARB_LOST_EVENT arbitration was lost
+ * XII_SLAVE_NO_ACK_EVENT slave did not acknowledge data (had error)
+ * XII_MASTER_READ_EVENT master reading from slave
+ * XII_MASTER_WRITE_EVENT master writing to slave
+ * XII_GENERAL_CALL_EVENT general call to all slaves
+ * </pre>
+ */
+#define XII_BUS_NOT_BUSY_EVENT 0x00000001
+#define XII_ARB_LOST_EVENT 0x00000002
+#define XII_SLAVE_NO_ACK_EVENT 0x00000004
+#define XII_MASTER_READ_EVENT 0x00000008
+#define XII_MASTER_WRITE_EVENT 0x00000010
+#define XII_GENERAL_CALL_EVENT 0x00000020
+/*@}*/
+
+
+/* The following address types are used when setting and getting the addresses
+ * of the driver. These are mutually exclusive such that only one or the other
+ * may be specified.
+ */
+/** bus address of slave device */
+#define XII_ADDR_TO_SEND_TYPE 1
+/** this device's bus address when slave */
+#define XII_ADDR_TO_RESPOND_TYPE 2
+
+/**************************** Type Definitions *******************************/
+
+/**
+ * This typedef contains configuration information for the device.
+ */
+typedef struct {
+ u16 DeviceId; /**< Unique ID of device */
+ u32 BaseAddress;/**< Device base address */
+ int Has10BitAddr;
+ /**< does device have 10 bit address decoding */
+ u8 GpOutWidth; /**< number of bits in general purpose output */
+} XIic_Config;
+
+/**
+ * This callback function data type is defined to handle the asynchronous
+ * processing of sent and received data of the IIC driver. The application
+ * using this driver is expected to define a handler of this type to support
+ * interrupt driven mode. The handlers are called in an interrupt context such
+ * that minimal processing should be performed. The handler data type is
+ * utilized for both send and receive handlers.
+ *
+ * @param CallBackRef is a callback reference passed in by the upper layer when
+ * setting the callback functions, and passed back to the upper layer
+ * when the callback is invoked. Its type is unimportant to the driver
+ * component, so it is a void pointer.
+ *
+ * @param ByteCount indicates the number of bytes remaining to be sent or
+ * received. A value of zero indicates that the requested number of
+ * bytes were sent or received.
+ */
+typedef void (*XIic_Handler) (void *CallBackRef, int ByteCount);
+
+/**
+ * This callback function data type is defined to handle the asynchronous
+ * processing of status events of the IIC driver. The application using
+ * this driver is expected to define a handler of this type to support
+ * interrupt driven mode. The handler is called in an interrupt context such
+ * that minimal processing should be performed.
+ *
+ * @param CallBackRef is a callback reference passed in by the upper layer when
+ * setting the callback functions, and passed back to the upper layer
+ * when the callback is invoked. Its type is unimportant to the driver
+ * component, so it is a void pointer.
+ *
+ * @param StatusEvent indicates one or more status events that occurred. See
+ * the definition of the status events above.
+ */
+typedef void (*XIic_StatusHandler) (void *CallBackRef, int StatusEvent);
+
+/**
+ * XIic statistics
+ */
+typedef struct {
+ u8 ArbitrationLost;/**< Number of times arbitration was lost */
+ u8 RepeatedStarts; /**< Number of repeated starts */
+ u8 BusBusy; /**< Number of times bus busy status returned */
+ u8 RecvBytes; /**< Number of bytes received */
+ u8 RecvInterrupts; /**< Number of receive interrupts */
+ u8 SendBytes; /**< Number of transmit bytes received */
+ u8 SendInterrupts; /**< Number of transmit interrupts */
+ u8 TxErrors; /**< Number of transmit errors (no ack) */
+ u8 IicInterrupts; /**< Number of IIC (device) interrupts */
+} XIicStats;
+
+
+/**
+ * The XIic driver instance data. The user is required to allocate a
+ * variable of this type for every IIC device in the system. A pointer
+ * to a variable of this type is then passed to the driver API functions.
+ */
+typedef struct {
+ XIicStats Stats; /* Statistics */
+ u32 BaseAddress; /* Device base address */
+ int Has10BitAddr; /* TRUE when 10 bit addressing in design */
+ int IsReady; /* Device is initialized and ready */
+ int IsStarted; /* Device has been started */
+ int AddrOfSlave; /* Slave addr writing to */
+
+ u32 Options; /* current operating options */
+ u8 *SendBufferPtr; /* Buffer to send (state) */
+ u8 *RecvBufferPtr; /* Buffer to receive (state) */
+ u8 TxAddrMode; /* State of Tx Address transmission */
+ int SendByteCount; /* Number of data bytes in buffer (state) */
+ int RecvByteCount; /* Number of empty bytes in buffer (state) */
+
+ u32 BNBOnly; /* TRUE when BNB interrupt needs to */
+ /* call callback */
+ u8 GpOutWidth; /* General purpose output width */
+
+ XIic_StatusHandler StatusHandler;
+ void *StatusCallBackRef; /* Callback reference for status handler */
+ XIic_Handler RecvHandler;
+ void *RecvCallBackRef; /* Callback reference for recv handler */
+ XIic_Handler SendHandler;
+ void *SendCallBackRef; /* Callback reference for send handler */
+ int IsDynamic;
+
+} XIic;
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+/*
+ * Initialization functions in xiic_sinit.c
+ */
+int XIic_Initialize(XIic * InstancePtr, u16 DeviceId);
+XIic_Config *XIic_LookupConfig(u16 DeviceId);
+
+/*
+ * Required functions in xiic.c
+ */
+int XIic_CfgInitialize(XIic * InstancePtr, XIic_Config * Config,
+ u32 EffectiveAddr);
+
+int XIic_Start(XIic * InstancePtr);
+int XIic_Stop(XIic * InstancePtr);
+
+void XIic_Reset(XIic * InstancePtr);
+
+int XIic_SetAddress(XIic * InstancePtr, int AddressType, int Address);
+u16 XIic_GetAddress(XIic * InstancePtr, int AddressType);
+
+int XIic_SetGpOutput(XIic * InstancePtr, u8 OutputValue);
+int XIic_GetGpOutput(XIic * InstancePtr, u8 *OutputValuePtr);
+
+u32 XIic_IsSlave(XIic * InstancePtr);
+
+void XIic_SetRecvHandler(XIic * InstancePtr, void *CallBackRef,
+ XIic_Handler FuncPtr);
+void XIic_SetSendHandler(XIic * InstancePtr, void *CallBackRef,
+ XIic_Handler FuncPtr);
+void XIic_SetStatusHandler(XIic * InstancePtr, void *CallBackRef,
+ XIic_StatusHandler FuncPtr);
+
+
+/*
+ * Interrupt functions in xiic_intr.c
+ */
+void XIic_InterruptHandler(void *InstancePtr);
+
+/*
+ * Master send and receive functions in normal mode in xiic_master.c
+ */
+int XIic_MasterRecv(XIic * InstancePtr, u8 *RxMsgPtr, int ByteCount);
+int XIic_MasterSend(XIic * InstancePtr, u8 *TxMsgPtr, int ByteCount);
+
+/*
+ * Master send and receive functions in dynamic mode in xiic_master.c
+ */
+int XIic_DynMasterRecv(XIic * InstancePtr, u8 *RxMsgPtr, u8 ByteCount);
+int XIic_DynMasterSend(XIic * InstancePtr, u8 *TxMsgPtr, u8 ByteCount);
+
+/*
+ * Dynamic IIC Core Initialization.
+ */
+int XIic_DynamicInitialize(XIic * InstancePtr);
+
+/*
+ * Slave send and receive functions in xiic_slave.c
+ */
+void XIic_SlaveInclude(void);
+int XIic_SlaveRecv(XIic * InstancePtr, u8 *RxMsgPtr, int ByteCount);
+int XIic_SlaveSend(XIic * InstancePtr, u8 *TxMsgPtr, int ByteCount);
+
+/*
+ * Statistics functions in xiic_stats.c
+ */
+void XIic_GetStats(XIic * InstancePtr, XIicStats * StatsPtr);
+void XIic_ClearStats(XIic * InstancePtr);
+
+/*
+ * Self test functions in xiic_selftest.c
+ */
+int XIic_SelfTest(XIic * InstancePtr);
+
+/*
+ * Bus busy Function in xiic.c
+ */
+u32 XIic_IsIicBusy(XIic * InstancePtr);
+
+/*
+ * Options functions in xiic_options.c
+ */
+void XIic_SetOptions(XIic * InstancePtr, u32 Options);
+u32 XIic_GetOptions(XIic * InstancePtr);
+
+/*
+ * Multi-master functions in xiic_multi_master.c
+ */
+void XIic_MultiMasterInclude(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xiic_i.h,v 1.1 2007/12/03 15:44:58 meinelte Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-07 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xiic_i.h
+*
+* This header file contains internal identifiers, which are those shared
+* between XIic components. The identifiers in this file are not intended for
+* use external to the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.01a rfp 10/19/01 release
+* 1.01c ecm 12/05/02 new rev
+* 1.13a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XIIC_I_H /* prevent circular inclusions */
+#define XIIC_I_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xiic.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/******************************************************************************
+*
+* This macro sends the first byte of the address for a 10 bit address during
+* both read and write operations. It takes care of the details to format the
+* address correctly.
+*
+* address = 1111_0xxD xx = address MSBits
+* D = Tx direction = 0 = write
+*
+* @param SlaveAddress contains the address of the slave to send to.
+* @param Operation indicates XIIC_READ_OPERATION or XIIC_WRITE_OPERATION
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mSend10BitAddrByte1(u16 SlaveAddress, u8 Operation);
+*
+******************************************************************************/
+#define XIic_mSend10BitAddrByte1(SlaveAddress, Operation) \
+{ \
+ u8 LocalAddr = (u8)((SlaveAddress) >> 7); \
+ LocalAddr = (LocalAddr & 0xF6) | 0xF0 | (Operation); \
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_DTR_REG_OFFSET, LocalAddr); \
+}
+
+/******************************************************************************
+*
+* This macro sends the second byte of the address for a 10 bit address during
+* both read and write operations. It takes care of the details to format the
+* address correctly.
+*
+* @param SlaveAddress contains the address of the slave to send to.
+*
+* @return None.
+*
+* @note Signature: void XIic_mSend10BitAddrByte2(u16 SlaveAddress,
+* u8 Operation);
+*
+******************************************************************************/
+#define XIic_mSend10BitAddrByte2(SlaveAddress) \
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_DTR_REG_OFFSET, \
+ (u8)(SlaveAddress));
+
+/******************************************************************************
+*
+* This macro sends the address for a 7 bit address during both read and write
+* operations. It takes care of the details to format the address correctly.
+*
+* @param SlaveAddress contains the address of the slave to send to.
+* @param Operation indicates XIIC_READ_OPERATION or XIIC_WRITE_OPERATION
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mSend7BitAddr(u16 SlaveAddress, u8 Operation);
+*
+******************************************************************************/
+#define XIic_mSend7BitAddr(SlaveAddress, Operation) \
+{ \
+ u8 LocalAddr = (u8)(SlaveAddress << 1); \
+ LocalAddr = (LocalAddr & 0xFE) | (Operation); \
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_DTR_REG_OFFSET, LocalAddr); \
+}
+
+/******************************************************************************
+*
+* This macro disables the specified interrupts in the Interrupt enable
+* register. It is non-destructive in that the register is read and only the
+* interrupts specified is changed.
+*
+* @param BaseAddress is the base address of the IIC device.
+* @param InterruptMask contains the interrupts to be disabled
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mDisableIntr(u32 BaseAddress, u32 InterruptMask);
+*
+******************************************************************************/
+#define XIic_mDisableIntr(BaseAddress, InterruptMask) \
+ XIIC_WRITE_IIER((BaseAddress), \
+ XIIC_READ_IIER(BaseAddress) & ~(InterruptMask))
+
+/******************************************************************************
+*
+* This macro enables the specified interrupts in the Interrupt enable
+* register. It is non-destructive in that the register is read and only the
+* interrupts specified is changed.
+*
+* @param BaseAddress is the base address of the IIC device.
+* @param InterruptMask contains the interrupts to be disabled
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mEnableIntr(u32 BaseAddress, u32 InterruptMask);
+*
+******************************************************************************/
+#define XIic_mEnableIntr(BaseAddress, InterruptMask) \
+ XIIC_WRITE_IIER((BaseAddress), \
+ XIIC_READ_IIER(BaseAddress) | (InterruptMask))
+
+/******************************************************************************
+*
+* This macro clears the specified interrupt in the Interrupt status
+* register. It is non-destructive in that the register is read and only the
+* interrupt specified is cleared. Clearing an interrupt acknowledges it.
+*
+* @param BaseAddress is the base address of the IIC device.
+* @param InterruptMask contains the interrupts to be disabled
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mClearIntr(u32 BaseAddress, u32 InterruptMask);
+*
+******************************************************************************/
+#define XIic_mClearIntr(BaseAddress, InterruptMask) \
+ XIIC_WRITE_IISR((BaseAddress), \
+ XIIC_READ_IISR(BaseAddress) & (InterruptMask))
+
+/******************************************************************************
+*
+* This macro clears and enables the specified interrupt in the Interrupt
+* status and enable registers. It is non-destructive in that the registers are
+* read and only the interrupt specified is modified.
+* Clearing an interrupt acknowledges it.
+*
+* @param BaseAddress is the base address of the IIC device.
+* @param InterruptMask contains the interrupts to be cleared and enabled
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mClearEnableIntr(u32 BaseAddress, u32 InterruptMask);
+*
+******************************************************************************/
+#define XIic_mClearEnableIntr(BaseAddress, InterruptMask) \
+{ \
+ XIIC_WRITE_IISR(BaseAddress, \
+ (XIIC_READ_IISR(BaseAddress) & (InterruptMask))); \
+ \
+ XIIC_WRITE_IIER(BaseAddress, \
+ (XIIC_READ_IIER(BaseAddress) | (InterruptMask))); \
+}
+
+/******************************************************************************
+*
+* This macro flushes the receive FIFO such that all bytes contained within it
+* are discarded.
+*
+* @param InstancePtr is a pointer to the IIC instance containing the FIFO
+* to be flushed.
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mFlushRxFifo(XIic *InstancePtr);
+*
+******************************************************************************/
+#define XIic_mFlushRxFifo(InstancePtr) \
+{ \
+ int LoopCnt; \
+ u8 Temp; \
+ u8 BytesToRead = XIo_In8(InstancePtr->BaseAddress + \
+ XIIC_RFO_REG_OFFSET) + 1; \
+ for(LoopCnt = 0; LoopCnt < BytesToRead; LoopCnt++) \
+ { \
+ Temp = XIo_In8(InstancePtr->BaseAddress + XIIC_DRR_REG_OFFSET); \
+ } \
+}
+
+/******************************************************************************
+*
+* This macro flushes the transmit FIFO such that all bytes contained within it
+* are discarded.
+*
+* @param InstancePtr is a pointer to the IIC instance containing the FIFO
+* to be flushed.
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mFlushTxFifo(XIic *InstancePtr);
+*
+******************************************************************************/
+#define XIic_mFlushTxFifo(InstancePtr); \
+{ \
+ u8 CntlReg = XIo_In8(InstancePtr->BaseAddress + \
+ XIIC_CR_REG_OFFSET); \
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET, \
+ CntlReg | XIIC_CR_TX_FIFO_RESET_MASK); \
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET, CntlReg); \
+}
+
+/******************************************************************************
+*
+* This macro reads the next available received byte from the receive FIFO
+* and updates all the data structures to reflect it.
+*
+* @param InstancePtr is a pointer to the IIC instance to be operated on.
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mReadRecvByte(XIic *InstancePtr);
+*
+******************************************************************************/
+#define XIic_mReadRecvByte(InstancePtr) \
+{ \
+ *InstancePtr->RecvBufferPtr++ = \
+ XIo_In8(InstancePtr->BaseAddress + XIIC_DRR_REG_OFFSET); \
+ InstancePtr->RecvByteCount--; \
+ InstancePtr->Stats.RecvBytes++; \
+}
+
+/******************************************************************************
+*
+* This macro writes the next byte to be sent to the transmit FIFO
+* and updates all the data structures to reflect it.
+*
+* @param InstancePtr is a pointer to the IIC instance to be operated on.
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mWriteSendByte(XIic *InstancePtr);
+*
+******************************************************************************/
+#define XIic_mWriteSendByte(InstancePtr) \
+{ \
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_DTR_REG_OFFSET, \
+ *InstancePtr->SendBufferPtr++); \
+ InstancePtr->SendByteCount--; \
+ InstancePtr->Stats.SendBytes++; \
+}
+
+/******************************************************************************
+*
+* This macro sets up the control register for a master receive operation.
+* A write is necessary if a 10 bit operation is being performed.
+*
+* @param InstancePtr is a pointer to the IIC instance to be operated on.
+* @param ControlRegister contains the contents of the IIC device control
+* register
+* @param ByteCount contains the number of bytes to be received for the
+* master receive operation
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mSetControlRegister(XIic *InstancePtr,
+* u8 ControlRegister,
+* int ByteCount);
+*
+******************************************************************************/
+#define XIic_mSetControlRegister(InstancePtr, ControlRegister, ByteCount) \
+{ \
+ (ControlRegister) &= ~(XIIC_CR_NO_ACK_MASK | XIIC_CR_DIR_IS_TX_MASK); \
+ if (InstancePtr->Options & XII_SEND_10_BIT_OPTION) \
+ { \
+ (ControlRegister) |= XIIC_CR_DIR_IS_TX_MASK; \
+ } \
+ else \
+ { \
+ if ((ByteCount) == 1) \
+ { \
+ (ControlRegister) |= XIIC_CR_NO_ACK_MASK; \
+ } \
+ } \
+}
+
+/******************************************************************************
+*
+* This macro enters a critical region by disabling the global interrupt bit
+* in the Global interrupt register.
+*
+* @param BaseAddress is the base address of the IIC device.
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mEnterCriticalRegion(u32 BaseAddress)
+*
+******************************************************************************/
+#define XIic_mEnterCriticalRegion(BaseAddress) \
+ XIIC_GINTR_DISABLE(BaseAddress)
+
+/******************************************************************************
+*
+* This macro exits a critical region by enabling the global interrupt bit
+* in the Global interrupt register.
+*
+* @param BaseAddress is the base address of the IIC device.
+*
+* @return None.
+*
+* @note Signature:
+* void XIic_mExitCriticalRegion(u32 BaseAddress)
+*
+******************************************************************************/
+#define XIic_mExitCriticalRegion(BaseAddress) \
+ XIIC_GINTR_ENABLE(BaseAddress)
+
+/******************************************************************************
+*
+* This macro clears the statistics of an instance such that it can be common
+* such that some parts of the driver may be optional.
+*
+* @param InstancePtr is a pointer to the IIC instance to be operated on.
+*
+* @return None.
+*
+* @note Signature:
+* void XIIC_CLEAR_STATS(XIic *InstancePtr)
+*
+******************************************************************************/
+#define XIIC_CLEAR_STATS(InstancePtr) \
+{ \
+ u8 NumBytes; \
+ u8 *DestPtr; \
+ \
+ DestPtr = (u8 *)&InstancePtr->Stats; \
+ for (NumBytes = 0; NumBytes < sizeof(XIicStats); NumBytes++) \
+ { \
+ *DestPtr++ = 0; \
+ } \
+}
+
+/************************** Function Prototypes ******************************/
+
+extern XIic_Config XIic_ConfigTable[];
+
+/* The following variables are shared across files of the driver and
+ * are function pointers that are necessary to break dependencies allowing
+ * optional parts of the driver to be used without condition compilation
+ */
+extern void (*XIic_AddrAsSlaveFuncPtr) (XIic * InstancePtr);
+extern void (*XIic_NotAddrAsSlaveFuncPtr) (XIic * InstancePtr);
+extern void (*XIic_RecvSlaveFuncPtr) (XIic * InstancePtr);
+extern void (*XIic_SendSlaveFuncPtr) (XIic * InstancePtr);
+extern void (*XIic_RecvMasterFuncPtr) (XIic * InstancePtr);
+extern void (*XIic_SendMasterFuncPtr) (XIic * InstancePtr);
+extern void (*XIic_ArbLostFuncPtr) (XIic * InstancePtr);
+extern void (*XIic_BusNotBusyFuncPtr) (XIic * InstancePtr);
+
+void XIic_TransmitFifoFill(XIic * InstancePtr, int Role);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xiic_intr.c,v 1.1 2007/12/03 15:44:58 meinelte Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xiic_intr.c
+*
+* Contains interrupt functions of the XIic driver. This file is required
+* for the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.01a rfp 10/19/01 release
+* 1.01c ecm 12/05/02 new rev
+* 1.01c rmm 05/14/03 Fixed diab compiler warnings relating to asserts.
+* 1.03a ecm 06/22/06 Added a call to the status handler in the TxErrorHandler
+* even if the Rx buffer pointer is not set. This fix is as
+* a result of a Sony use model which did not set the RX
+* pointer while in Master mode so it checks if MSMS == 1.
+* 1.13a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+******************************************************************************/
+
+
+/***************************** Include Files *********************************/
+
+#include "xiic.h"
+#include "xiic_i.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions ******************/
+
+
+/*************** Macros (Inline Functions) Definitions ********************/
+
+
+/************************** Function Prototypes ****************************/
+
+static void StubFunction(XIic * InstancePtr);
+static void TxErrorHandler(XIic * InstancePtr);
+
+/************************** Variable Definitions *****************************/
+
+/* The following function pointers are used to help allow finer partitioning
+ * of the driver such that some parts of it are optional. These pointers are
+ * setup by functions in the optional parts of the driver.
+ */
+void (*XIic_AddrAsSlaveFuncPtr) (XIic * InstancePtr) = StubFunction;
+void (*XIic_NotAddrAsSlaveFuncPtr) (XIic * InstancePtr) = StubFunction;
+void (*XIic_RecvSlaveFuncPtr) (XIic * InstancePtr) = StubFunction;
+void (*XIic_SendSlaveFuncPtr) (XIic * InstancePtr) = StubFunction;
+void (*XIic_RecvMasterFuncPtr) (XIic * InstancePtr) = StubFunction;
+void (*XIic_SendMasterFuncPtr) (XIic * InstancePtr) = StubFunction;
+void (*XIic_ArbLostFuncPtr) (XIic * InstancePtr) = StubFunction;
+void (*XIic_BusNotBusyFuncPtr) (XIic * InstancePtr) = StubFunction;
+
+/*****************************************************************************/
+/**
+*
+* This function is the interrupt handler for the XIic driver. This function
+* should be connected to the interrupt system.
+*
+* Only one interrupt source is handled for each interrupt allowing
+* higher priority system interrupts quicker response time.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @internal
+*
+* The XIIC_INTR_ARB_LOST_MASK and XIIC_INTR_TX_ERROR_MASK interrupts must have
+* higher priority than the other device interrupts so that the IIC device does
+* not get into a potentially confused state. The remaining interrupts may be
+* rearranged with no harm.
+*
+* All XIic device interrupts are ORed into one device interrupt. This routine
+* reads the pending interrupts via the IpIf interface and masks that with the
+* interrupt mask to evaluate only the interrupts enabled.
+*
+******************************************************************************/
+void XIic_InterruptHandler(void *InstancePtr)
+{
+ u8 Status;
+ u32 IntrStatus;
+ u32 IntrPending;
+ u32 IntrEnable;
+ XIic *IicPtr = NULL;
+ u32 Clear = 0;
+
+ /*
+ * Verify that each of the inputs are valid.
+ */
+ XASSERT_VOID(InstancePtr != NULL);
+
+ /*
+ * Convert the non-typed pointer to an IIC instance pointer
+ */
+ IicPtr = (XIic *) InstancePtr;
+
+ /* Get the interrupt Status from the IPIF. There is no clearing of
+ * interrupts in the IPIF. Interrupts must be cleared at the source.
+ * To find which interrupts are pending; AND interrupts pending with
+ * interrupts masked.
+ */
+ IntrPending = XIIC_READ_IISR(IicPtr->BaseAddress);
+ IntrEnable = XIIC_READ_IIER(IicPtr->BaseAddress);
+ IntrStatus = IntrPending & IntrEnable;
+
+ /* Do not processes a devices interrupts if the device has no
+ * interrupts pending or the global interrupts have been disabled
+ */
+
+ if ((IntrStatus == 0) |
+ (XIIC_IS_GINTR_ENABLED(IicPtr->BaseAddress) == FALSE)) {
+ return;
+ }
+
+ /* Update interrupt stats and get the contents of the status register
+ */
+ IicPtr->Stats.IicInterrupts++;
+ Status = XIo_In8(IicPtr->BaseAddress + XIIC_SR_REG_OFFSET);
+
+ /* Service requesting interrupt
+ */
+ if (IntrStatus & XIIC_INTR_ARB_LOST_MASK) {
+ /* Bus Arbritration Lost */
+
+ IicPtr->Stats.ArbitrationLost++;
+ XIic_ArbLostFuncPtr(IicPtr);
+
+ Clear = XIIC_INTR_ARB_LOST_MASK;
+ }
+
+ else if (IntrStatus & XIIC_INTR_TX_ERROR_MASK) {
+ /* Transmit errors (no acknowledge) received */
+
+ IicPtr->Stats.TxErrors++;
+ TxErrorHandler(IicPtr);
+
+ Clear = XIIC_INTR_TX_ERROR_MASK;
+ }
+
+ else if (IntrStatus & XIIC_INTR_NAAS_MASK) {
+ /* Not Addressed As Slave */
+
+ XIic_NotAddrAsSlaveFuncPtr(IicPtr);
+ Clear = XIIC_INTR_NAAS_MASK;
+ }
+
+ else if (IntrStatus & XIIC_INTR_RX_FULL_MASK) {
+ /* Receive register/FIFO is full */
+
+ IicPtr->Stats.RecvInterrupts++;
+
+ if (Status & XIIC_SR_ADDR_AS_SLAVE_MASK) {
+ XIic_RecvSlaveFuncPtr(IicPtr);
+ }
+ else {
+ XIic_RecvMasterFuncPtr(IicPtr);
+ }
+
+ Clear = XIIC_INTR_RX_FULL_MASK;
+ }
+
+ else if (IntrStatus & XIIC_INTR_AAS_MASK) {
+ /* Addressed As Slave */
+
+ XIic_AddrAsSlaveFuncPtr(IicPtr);
+ Clear = XIIC_INTR_AAS_MASK;
+ }
+
+ else if (IntrStatus & XIIC_INTR_BNB_MASK) {
+ /* IIC bus has transitioned to not busy */
+
+ /* check if send callback needs to run */
+ if (IicPtr->BNBOnly == TRUE) {
+ XIic_BusNotBusyFuncPtr(IicPtr);
+ IicPtr->BNBOnly = FALSE;
+ }
+ else {
+ IicPtr->SendHandler(IicPtr->SendCallBackRef, 0);
+ }
+
+
+ Clear = XIIC_INTR_BNB_MASK;
+
+ /* The bus is not busy, disable BusNotBusy interrupt */
+ XIic_mDisableIntr(IicPtr->BaseAddress, XIIC_INTR_BNB_MASK);
+
+ }
+
+ else if ((IntrStatus & XIIC_INTR_TX_EMPTY_MASK) ||
+ (IntrStatus & XIIC_INTR_TX_HALF_MASK)) {
+ /* Transmit register/FIFO is empty or ½ empty *
+ */
+ IicPtr->Stats.SendInterrupts++;
+
+ if (Status & XIIC_SR_ADDR_AS_SLAVE_MASK) {
+ XIic_SendSlaveFuncPtr(IicPtr);
+ }
+ else {
+ XIic_SendMasterFuncPtr(IicPtr);
+ }
+
+ /* Clear Interrupts
+ */
+ IntrStatus = XIIC_READ_IISR(IicPtr->BaseAddress);
+ Clear = IntrStatus & (XIIC_INTR_TX_EMPTY_MASK |
+ XIIC_INTR_TX_HALF_MASK);
+ }
+
+ XIIC_WRITE_IISR(IicPtr->BaseAddress, Clear);
+}
+
+/******************************************************************************
+*
+* This function fills the FIFO using the occupancy register to determine the
+* available space to be filled. When the repeated start option is on, the last
+* byte is withheld to allow the control register to be properly set on the last
+* byte.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @param Role indicates the role of this IIC device, a slave or a master, on
+* the IIC bus (XIIC_SLAVE_ROLE or XIIC_MASTER_ROLE)
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XIic_TransmitFifoFill(XIic * InstancePtr, int Role)
+{
+ u8 AvailBytes;
+ int LoopCnt;
+ int NumBytesToSend;
+
+ /* Determine number of bytes to write to FIFO. Number of bytes that can be
+ * put into the FIFO is (FIFO depth) - (current occupancy + 1)
+ * When more room in FIFO than msg bytes put all of message in the FIFO.
+ */
+ AvailBytes = IIC_TX_FIFO_DEPTH -
+ (XIo_In8(InstancePtr->BaseAddress + XIIC_TFO_REG_OFFSET) + 1);
+
+ if (InstancePtr->SendByteCount > AvailBytes) {
+ NumBytesToSend = AvailBytes;
+ }
+ else {
+ /* More space in FIFO than bytes in message
+ */
+ if ((InstancePtr->Options & XII_REPEATED_START_OPTION) ||
+ (Role == XIIC_SLAVE_ROLE)) {
+ NumBytesToSend = InstancePtr->SendByteCount;
+ }
+ else {
+ NumBytesToSend = InstancePtr->SendByteCount - 1;
+ }
+ }
+
+ /* fill FIFO with amount determined above */
+
+ for (LoopCnt = 0; LoopCnt < NumBytesToSend; LoopCnt++) {
+ XIic_mWriteSendByte(InstancePtr);
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* This interrupt occurs four different ways: Two as master and two as slave.
+* Master:
+* <pre>
+* (1) Transmitter (IMPLIES AN ERROR)
+* The slave receiver did not acknowledge properly.
+* (2) Receiver (Implies tx complete)
+* Interrupt caused by setting TxAck high in the IIC to indicate to the
+* the last byte has been transmitted.
+* </pre>
+*
+* Slave:
+* <pre>
+* (3) Transmitter (Implies tx complete)
+* Interrupt caused by master device indicating last byte of the message
+* has been transmitted.
+* (4) Receiver (IMPLIES AN ERROR)
+* Interrupt caused by setting TxAck high in the IIC to indicate Rx
+* IIC had a problem - set by this device and condition already known
+* and interrupt is not enabled.
+* </pre>
+*
+* This interrupt is enabled during Master send and receive and disabled
+* when this device knows it is going to send a negative acknowledge (Ack = No).
+*
+* Signals user of Tx error via status callback sending: XII_TX_ERROR_EVENT
+*
+* When MasterRecv has no message to send and only receives one byte of data
+* from the salve device, the TxError must be enabled to catch addressing
+* errors, yet there is not opportunity to disable TxError when there is no
+* data to send allowing disabling on last byte. When the slave sends the
+* only byte the NOAck causes a Tx Error. To disregard this as no real error,
+* when there is data in the Receive FIFO/register then the error was not
+* a device address write error, but a NOACK read error - to be ignored.
+* To work with or without FIFO's, the Rx Data interrupt is used to indicate
+* data is in the rx register.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* No action is required to clear this interrupt in the device as it is a
+* pulse. The interrupt need only be cleared in the IpIf interface.
+*
+******************************************************************************/
+static void TxErrorHandler(XIic * InstancePtr)
+{
+ u32 IntrStatus;
+ u8 CntlReg;
+
+ /* When Sending as a slave, Tx error signals end of msg. Not Addressed As
+ * Slave will handle the callbacks. this is used to only flush the Tx fifo.
+ * The addressed as slave bit is gone as soon as the bus has been released
+ * such that the buffer pointers are used to determine the direction of
+ * transfer (send or receive).
+ */
+ if (InstancePtr->RecvBufferPtr == NULL) {
+ /* Master Receiver finished reading message. Flush Tx fifo to remove an
+ * 0xFF that was written to prevent bus throttling, and disable all
+ * transmit and receive interrupts
+ */
+ XIic_mFlushTxFifo(InstancePtr);
+ XIic_mDisableIntr(InstancePtr->BaseAddress,
+ XIIC_TX_RX_INTERRUPTS);
+
+
+ /* If operating in Master mode, call status handler to indicate
+ * NOACK occured
+ */
+ CntlReg =
+ XIo_In8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET);
+ if ((CntlReg & XIIC_CR_MSMS_MASK) != 0) {
+ InstancePtr->StatusHandler(InstancePtr->
+ StatusCallBackRef,
+ XII_SLAVE_NO_ACK_EVENT);
+ }
+ return;
+ }
+
+ /* Data in the receive register from either master or slave receive
+ * When:slave, indicates master sent last byte, message completed.
+ * When:master, indicates a master Receive with one byte received. When a
+ * byte is in Rx reg then the Tx error indicates the Rx data was recovered
+ * normally Tx errors are not enabled such that this should not occur.
+ */
+ IntrStatus = XIIC_READ_IISR(InstancePtr->BaseAddress);
+ if (IntrStatus & XIIC_INTR_RX_FULL_MASK) {
+ /* Rx Reg/FIFO has data, Disable tx error interrupts */
+
+ XIic_mDisableIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_TX_ERROR_MASK);
+ return;
+ }
+
+ XIic_mFlushTxFifo(InstancePtr);
+
+ /* Disable and clear tx empty, ½ empty, Rx Full or tx error interrupts
+ */
+ XIic_mDisableIntr(InstancePtr->BaseAddress, XIIC_TX_RX_INTERRUPTS);
+ XIic_mClearIntr(InstancePtr->BaseAddress, XIIC_TX_RX_INTERRUPTS);
+
+ /* Clear MSMS as on TX error when Rxing, the bus will be
+ * stopped but MSMS bit is still set. Reset to proper state
+ */
+ CntlReg = XIo_In8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET);
+ CntlReg &= ~XIIC_CR_MSMS_MASK;
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET, CntlReg);
+
+
+ /* set FIFO occupancy depth = 1 so that the first byte will throttle
+ * next recieve msg
+ */
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_RFD_REG_OFFSET, 0);
+
+ /* make event callback */
+
+ InstancePtr->StatusHandler(InstancePtr->StatusCallBackRef,
+ XII_SLAVE_NO_ACK_EVENT);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function is a stub function that is used for the default function for
+* events that are handled optionally only when the appropriate modules are
+* linked in. Function pointers are used to handle some events to allow
+* some events to be optionally handled.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+******************************************************************************/
+static void StubFunction(XIic * InstancePtr)
+{
+ XASSERT_VOID_ALWAYS();
+}
--- /dev/null
+/* $Id: xiic_l.c,v 1.3 2007/12/17 19:15:38 meinelte Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2007 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xiic_l.c
+*
+* This file contains low-level driver functions that can be used to access the
+* device in normal and dynamic controller mode. The user should refer to the
+* hardware device specification for more details of the device operation.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- --- ------- -----------------------------------------------
+* 1.01b jhl 05/13/02 First release
+* 1.01b jhl 10/14/02 Corrected bug in the receive function, the setup of the
+* interrupt status mask was not being done in the loop such
+* that a read would sometimes fail on the last byte because
+* the transmit error which should have been ignored was
+* being used. This would leave an extra byte in the FIFO
+* and the bus throttled such that the next operation would
+* also fail. Also updated the receive function to not
+* disable the device after the last byte until after the
+* bus transitions to not busy which is more consistent
+* with the expected behavior.
+* 1.01c ecm 12/05/02 new rev
+* 1.02a mta 03/09/06 Implemented Repeated Start in the Low Level Driver.
+* 1.03a mta 04/04/06 Implemented Dynamic IIC core routines.
+* 1.03a ecm 06/15/06 Fixed the hang in low_level_eeprom_test with -O0
+* Added polling loops for BNB to allow the slave to
+* respond correctly. Also added polling loop prior
+* to reset in _Recv.
+* 1.13a wgr 03/22/07 Converted to new coding style.
+* 1.13b ecm 11/29/07 added BB polling loops to the DynSend and DynRecv
+* routines to handle the race condition with BNB in IISR.
+* </pre>
+*
+****************************************************************************/
+
+/***************************** Include Files *******************************/
+
+#include "xbasic_types.h"
+#include "xio.h"
+#include "xiic_l.h"
+
+/************************** Constant Definitions ***************************/
+
+/**************************** Type Definitions *****************************/
+
+/***************** Macros (Inline Functions) Definitions *******************/
+
+/************************** Function Prototypes ****************************/
+
+static unsigned RecvData(u32 BaseAddress, u8 *BufferPtr,
+ unsigned ByteCount, u8 Option);
+static unsigned SendData(u32 BaseAddress, u8 *BufferPtr,
+ unsigned ByteCount, u8 Option);
+
+static unsigned DynRecvData(u32 BaseAddress, u8 *BufferPtr, u8 ByteCount);
+static unsigned DynSendData(u32 BaseAddress, u8 *BufferPtr,
+ u8 ByteCount, u8 Option);
+
+/************************** Variable Definitions **************************/
+
+/****************************************************************************/
+/**
+* Receive data as a master on the IIC bus. This function receives the data
+* using polled I/O and blocks until the data has been received. It only
+* supports 7 bit addressing mode of operation. The user is responsible for
+* ensuring the bus is not busy if multiple masters are present on the bus.
+*
+* @param BaseAddress contains the base address of the IIC device.
+* @param Address contains the 7 bit IIC address of the device to send the
+* specified data to.
+* @param BufferPtr points to the data to be sent.
+* @param ByteCount is the number of bytes to be sent.
+* @param Option indicates whether to hold or free the bus after reception
+* of data, XIIC_STOP = end with STOP condition, XIIC_REPEATED_START
+* = don't end with STOP condition.
+*
+* @return
+*
+* The number of bytes received.
+*
+* @note
+*
+* None
+*
+******************************************************************************/
+unsigned XIic_Recv(u32 BaseAddress, u8 Address,
+ u8 *BufferPtr, unsigned ByteCount, u8 Option)
+{
+ u8 CntlReg;
+ unsigned RemainingByteCount;
+ volatile u8 StatusReg;
+
+ /* Tx error is enabled incase the address (7 or 10) has no device to answer
+ * with Ack. When only one byte of data, must set NO ACK before address goes
+ * out therefore Tx error must not be enabled as it will go off immediately
+ * and the Rx full interrupt will be checked. If full, then the one byte
+ * was received and the Tx error will be disabled without sending an error
+ * callback msg.
+ */
+ XIic_mClearIisr(BaseAddress,
+ XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK |
+ XIIC_INTR_ARB_LOST_MASK);
+
+ /* Set receive FIFO occupancy depth for 1 byte (zero based)
+ */
+ XIo_Out8(BaseAddress + XIIC_RFD_REG_OFFSET, 0);
+
+
+ /* Check to see if already Master on the Bus.
+ * If Repeated Start bit is not set send Start bit by setting MSMS bit else
+ * Send the address.
+ */
+ CntlReg = XIo_In8(BaseAddress + XIIC_CR_REG_OFFSET);
+ if ((CntlReg & XIIC_CR_REPEATED_START_MASK) == 0) {
+ /* 7 bit slave address, send the address for a read operation
+ * and set the state to indicate the address has been sent
+ */
+ XIic_mSend7BitAddress(BaseAddress, Address,
+ XIIC_READ_OPERATION);
+
+
+ /* MSMS gets set after putting data in FIFO. Start the master receive
+ * operation by setting CR Bits MSMS to Master, if the buffer is only one
+ * byte, then it should not be acknowledged to indicate the end of data
+ */
+ CntlReg = XIIC_CR_MSMS_MASK | XIIC_CR_ENABLE_DEVICE_MASK;
+ if (ByteCount == 1) {
+ CntlReg |= XIIC_CR_NO_ACK_MASK;
+ }
+
+ /* Write out the control register to start receiving data and call the
+ * function to receive each byte into the buffer
+ */
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET, CntlReg);
+
+ /* Clear the latched interrupt status for the bus not busy bit which must
+ * be done while the bus is busy
+ */
+ StatusReg = XIo_In8(BaseAddress + XIIC_SR_REG_OFFSET);
+
+ while ((StatusReg & XIIC_SR_BUS_BUSY_MASK) == 0) {
+ StatusReg = XIo_In8(BaseAddress + XIIC_SR_REG_OFFSET);
+
+ }
+
+ XIic_mClearIisr(BaseAddress, XIIC_INTR_BNB_MASK);
+ }
+ else {
+ /* Already owns the Bus indicating that its a Repeated Start call.
+ * 7 bit slave address, send the address for a read operation
+ * and set the state to indicate the address has been sent
+ */
+ XIic_mSend7BitAddress(BaseAddress, Address,
+ XIIC_READ_OPERATION);
+ }
+ /* Try to receive the data from the IIC bus */
+
+ RemainingByteCount =
+ RecvData(BaseAddress, BufferPtr, ByteCount, Option);
+
+ CntlReg = XIo_In8(BaseAddress + XIIC_CR_REG_OFFSET);
+ if ((CntlReg & XIIC_CR_REPEATED_START_MASK) == 0) {
+ /* The receive is complete, disable the IIC device if the Option is
+ * to release the Bus after Reception of data and return the number of
+ * bytes that was received
+ */
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET, 0);
+ }
+
+ /* Return the number of bytes that was received */
+
+ return ByteCount - RemainingByteCount;
+}
+
+/******************************************************************************
+*
+* Receive the specified data from the device that has been previously addressed
+* on the IIC bus. This function assumes that the 7 bit address has been sent
+* and it should wait for the transmit of the address to complete.
+*
+* @param BaseAddress contains the base address of the IIC device.
+* @param BufferPtr points to the buffer to hold the data that is received.
+* @param ByteCount is the number of bytes to be received.
+* @param Option indicates whether to hold or free the bus after reception
+* of data, XIIC_STOP = end with STOP condition, XIIC_REPEATED_START
+* = don't end with STOP condition.
+*
+* @return
+*
+* The number of bytes remaining to be received.
+*
+* @note
+*
+* This function does not take advantage of the receive FIFO because it is
+* designed for minimal code space and complexity. It contains loops that
+* that could cause the function not to return if the hardware is not working.
+*
+* This function assumes that the calling function will disable the IIC device
+* after this function returns.
+*
+******************************************************************************/
+static unsigned RecvData(u32 BaseAddress, u8 *BufferPtr,
+ unsigned ByteCount, u8 Option)
+{
+ u8 CntlReg;
+ u32 IntrStatusMask;
+ u32 IntrStatus;
+
+ /* Attempt to receive the specified number of bytes on the IIC bus */
+
+ while (ByteCount > 0) {
+ /* Setup the mask to use for checking errors because when receiving one
+ * byte OR the last byte of a multibyte message an error naturally
+ * occurs when the no ack is done to tell the slave the last byte
+ */
+ if (ByteCount == 1) {
+ IntrStatusMask =
+ XIIC_INTR_ARB_LOST_MASK | XIIC_INTR_BNB_MASK;
+ }
+ else {
+ IntrStatusMask =
+ XIIC_INTR_ARB_LOST_MASK |
+ XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_BNB_MASK;
+ }
+
+ /* Wait for the previous transmit and the 1st receive to complete
+ * by checking the interrupt status register of the IPIF
+ */
+ while (1) {
+ IntrStatus = XIIC_READ_IISR(BaseAddress);
+ if (IntrStatus & XIIC_INTR_RX_FULL_MASK) {
+ break;
+ }
+ /* Check the transmit error after the receive full because when
+ * sending only one byte transmit error will occur because of the
+ * no ack to indicate the end of the data
+ */
+ if (IntrStatus & IntrStatusMask) {
+ return ByteCount;
+ }
+ }
+
+ CntlReg = XIo_In8(BaseAddress + XIIC_CR_REG_OFFSET);
+
+ /* Special conditions exist for the last two bytes so check for them
+ * Note that the control register must be setup for these conditions
+ * before the data byte which was already received is read from the
+ * receive FIFO (while the bus is throttled
+ */
+ if (ByteCount == 1) {
+ if (Option == XIIC_STOP) {
+
+ /* If the Option is to release the bus after the last data
+ * byte, it has already been read and no ack has been done, so
+ * clear MSMS while leaving the device enabled so it can get off
+ * the IIC bus appropriately with a stop.
+ */
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET,
+ XIIC_CR_ENABLE_DEVICE_MASK);
+ }
+ }
+
+ /* Before the last byte is received, set NOACK to tell the slave IIC
+ * device that it is the end, this must be done before reading the byte
+ * from the FIFO
+ */
+ if (ByteCount == 2) {
+ /* Write control reg with NO ACK allowing last byte to
+ * have the No ack set to indicate to slave last byte read.
+ */
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET,
+ CntlReg | XIIC_CR_NO_ACK_MASK);
+ }
+
+ /* Read in data from the FIFO and unthrottle the bus such that the
+ * next byte is read from the IIC bus
+ */
+ *BufferPtr++ = XIo_In8(BaseAddress + XIIC_DRR_REG_OFFSET);
+
+ if ((ByteCount == 1) && (Option == XIIC_REPEATED_START)) {
+
+ /* RSTA bit should be set only when the FIFO is completely Empty.
+ */
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET,
+ XIIC_CR_ENABLE_DEVICE_MASK | XIIC_CR_MSMS_MASK
+ | XIIC_CR_REPEATED_START_MASK);
+
+ }
+
+ /* Clear the latched interrupt status so that it will be updated with
+ * the new state when it changes, this must be done after the receive
+ * register is read
+ */
+ XIic_mClearIisr(BaseAddress, XIIC_INTR_RX_FULL_MASK |
+ XIIC_INTR_TX_ERROR_MASK |
+ XIIC_INTR_ARB_LOST_MASK);
+ ByteCount--;
+ }
+
+
+ if (Option == XIIC_STOP) {
+
+ /* If the Option is to release the bus after Reception of data, wait
+ * for the bus to transition to not busy before returning, the IIC
+ * device cannot be disabled until this occurs. It should transition as
+ * the MSMS bit of the control register was cleared before the last byte
+ * was read from the FIFO.
+ */
+ while (1) {
+ if (XIIC_READ_IISR(BaseAddress) &
+ XIIC_INTR_BNB_MASK) {
+ break;
+ }
+ }
+ }
+
+ return ByteCount;
+}
+
+/****************************************************************************/
+/**
+* Send data as a master on the IIC bus. This function sends the data
+* using polled I/O and blocks until the data has been sent. It only supports
+* 7 bit addressing mode of operation. The user is responsible for ensuring
+* the bus is not busy if multiple masters are present on the bus.
+*
+* @param BaseAddress contains the base address of the IIC device.
+* @param Address contains the 7 bit IIC address of the device to send the
+* specified data to.
+* @param BufferPtr points to the data to be sent.
+* @param ByteCount is the number of bytes to be sent.
+* @param Option indicates whether to hold or free the bus after
+* transmitting the data.
+*
+* @return
+*
+* The number of bytes sent.
+*
+* @note
+*
+* None
+*
+******************************************************************************/
+unsigned XIic_Send(u32 BaseAddress, u8 Address,
+ u8 *BufferPtr, unsigned ByteCount, u8 Option)
+{
+ unsigned RemainingByteCount;
+ u8 ControlReg;
+ volatile u8 StatusReg;
+
+ /* Check to see if already Master on the Bus.
+ * If Repeated Start bit is not set send Start bit by setting MSMS bit else
+ * Send the address.
+ */
+ ControlReg = XIo_In8(BaseAddress + XIIC_CR_REG_OFFSET);
+ if ((ControlReg & XIIC_CR_REPEATED_START_MASK) == 0) {
+ /* Put the address into the FIFO to be sent and indicate that the operation
+ * to be performed on the bus is a write operation
+ */
+ XIic_mSend7BitAddress(BaseAddress, Address,
+ XIIC_WRITE_OPERATION);
+ /* Clear the latched interrupt status so that it will be updated with the
+ * new state when it changes, this must be done after the address is put
+ * in the FIFO
+ */
+ XIic_mClearIisr(BaseAddress, XIIC_INTR_TX_EMPTY_MASK |
+ XIIC_INTR_TX_ERROR_MASK |
+ XIIC_INTR_ARB_LOST_MASK);
+
+ /* MSMS must be set after putting data into transmit FIFO, indicate the
+ * direction is transmit, this device is master and enable the IIC device
+ */
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET,
+ XIIC_CR_MSMS_MASK | XIIC_CR_DIR_IS_TX_MASK |
+ XIIC_CR_ENABLE_DEVICE_MASK);
+
+ /* Clear the latched interrupt
+ * status for the bus not busy bit which must be done while the bus is busy
+ */
+ StatusReg = XIo_In8(BaseAddress + XIIC_SR_REG_OFFSET);
+ while ((StatusReg & XIIC_SR_BUS_BUSY_MASK) == 0) {
+ StatusReg = XIo_In8(BaseAddress + XIIC_SR_REG_OFFSET);
+ }
+
+ XIic_mClearIisr(BaseAddress, XIIC_INTR_BNB_MASK);
+
+ }
+ else {
+ /* Already owns the Bus indicating that its a Repeated Start call.
+ * 7 bit slave address, send the address for a write operation
+ * and set the state to indicate the address has been sent
+ */
+ XIic_mSend7BitAddress(BaseAddress, Address,
+ XIIC_WRITE_OPERATION);
+ }
+
+ /* Send the specified data to the device on the IIC bus specified by the
+ * the address
+ */
+ RemainingByteCount =
+ SendData(BaseAddress, BufferPtr, ByteCount, Option);
+
+ ControlReg = XIo_In8(BaseAddress + XIIC_CR_REG_OFFSET);
+ if ((ControlReg & XIIC_CR_REPEATED_START_MASK) == 0) {
+ /* The Transmission is completed, disable the IIC device if the Option
+ * is to release the Bus after transmission of data and return the number
+ * of bytes that was received. Only wait if master, if addressed as slave
+ * just reset to release the bus.
+ */
+ if ((ControlReg & XIIC_CR_MSMS_MASK) != 0) {
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET,
+ (ControlReg & ~XIIC_CR_MSMS_MASK));
+ StatusReg = XIo_In8(BaseAddress + XIIC_SR_REG_OFFSET);
+ while ((StatusReg & XIIC_SR_BUS_BUSY_MASK) != 0) {
+ StatusReg =
+ XIo_In8(BaseAddress +
+ XIIC_SR_REG_OFFSET);
+ }
+ }
+
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET, 0);
+ }
+
+ return ByteCount - RemainingByteCount;
+}
+
+/******************************************************************************
+*
+* Send the specified buffer to the device that has been previously addressed
+* on the IIC bus. This function assumes that the 7 bit address has been sent
+* and it should wait for the transmit of the address to complete.
+*
+* @param BaseAddress contains the base address of the IIC device.
+* @param BufferPtr points to the data to be sent.
+* @param ByteCount is the number of bytes to be sent.
+* @param Option indicates whether to hold or free the bus after
+* transmitting the data.
+*
+* @return
+*
+* The number of bytes remaining to be sent.
+*
+* @note
+*
+* This function does not take advantage of the transmit FIFO because it is
+* designed for minimal code space and complexity. It contains loops that
+* that could cause the function not to return if the hardware is not working.
+*
+******************************************************************************/
+static unsigned SendData(u32 BaseAddress, u8 *BufferPtr,
+ unsigned ByteCount, u8 Option)
+{
+ u32 IntrStatus;
+
+ /* Send the specified number of bytes in the specified buffer by polling
+ * the device registers and blocking until complete
+ */
+ while (ByteCount > 0) {
+ /* Wait for the transmit to be empty before sending any more data
+ * by polling the interrupt status register
+ */
+ while (1) {
+ IntrStatus = XIIC_READ_IISR(BaseAddress);
+
+ if (IntrStatus & (XIIC_INTR_TX_ERROR_MASK |
+ XIIC_INTR_ARB_LOST_MASK |
+ XIIC_INTR_BNB_MASK)) {
+ return ByteCount;
+ }
+
+ if (IntrStatus & XIIC_INTR_TX_EMPTY_MASK) {
+ break;
+ }
+ }
+ /* If there is more than one byte to send then put the next byte to send
+ * into the transmit FIFO
+ */
+ if (ByteCount > 1) {
+ XIo_Out8(BaseAddress + XIIC_DTR_REG_OFFSET,
+ *BufferPtr++);
+ }
+ else {
+ if (Option == XIIC_STOP) {
+ /* If the Option is to release the bus after the last data
+ * byte, Set the stop Option before sending the last byte
+ * of data so that the stop Option will be generated
+ * immediately following the data. This is done by clearing
+ * the MSMS bit in the control register.
+ */
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET,
+ XIIC_CR_ENABLE_DEVICE_MASK |
+ XIIC_CR_DIR_IS_TX_MASK);
+ }
+
+ /* Put the last byte to send in the transmit FIFO */
+
+ XIo_Out8(BaseAddress + XIIC_DTR_REG_OFFSET,
+ *BufferPtr++);
+
+ if (Option == XIIC_REPEATED_START) {
+ XIic_mClearIisr(BaseAddress,
+ XIIC_INTR_TX_EMPTY_MASK);
+ /* Wait for the transmit to be empty before setting RSTA bit. */
+ while (1) {
+ IntrStatus =
+ XIIC_READ_IISR
+ (BaseAddress);
+ if (IntrStatus &
+ XIIC_INTR_TX_EMPTY_MASK) {
+ /* RSTA bit should be set only when the FIFO is completely Empty.
+ */
+ XIo_Out8(BaseAddress +
+ XIIC_CR_REG_OFFSET,
+ XIIC_CR_REPEATED_START_MASK
+ |
+ XIIC_CR_ENABLE_DEVICE_MASK
+ |
+ XIIC_CR_DIR_IS_TX_MASK
+ | XIIC_CR_MSMS_MASK);
+ break;
+ }
+ }
+ }
+
+ }
+
+ /* Clear the latched interrupt status register and this must be done after
+ * the transmit FIFO has been written to or it won't clear
+ */
+ XIic_mClearIisr(BaseAddress, XIIC_INTR_TX_EMPTY_MASK);
+
+ /* Update the byte count to reflect the byte sent and clear the latched
+ * interrupt status so it will be updated for the new state
+ */
+ ByteCount--;
+ }
+
+ if (Option == XIIC_STOP) {
+ /* If the Option is to release the bus after transmission of data,
+ * Wait for the bus to transition to not busy before returning, the IIC
+ * device cannot be disabled until this occurs.
+ * Note that this is different from a receive operation because the stop
+ * Option causes the bus to go not busy.
+ */
+ while (1) {
+ if (XIIC_READ_IISR(BaseAddress) &
+ XIIC_INTR_BNB_MASK) {
+ break;
+ }
+ }
+ }
+
+ return ByteCount;
+}
+
+/*****************************************************************************/
+/**
+* Receive data as a master on the IIC bus. This function receives the data
+* using polled I/O and blocks until the data has been received. It only
+* supports 7 bit addressing. The user is responsible for ensuring the bus is
+* not busy if multiple masters are present on the bus.
+*
+* @param BaseAddress contains the base address of the IIC Device.
+* @param Address contains the 7 bit IIC Device address of the device to send
+* the specified data to.
+* @param BufferPtr points to the data to be sent.
+* @param ByteCount is the number of bytes to be sent. This value can't be
+* greater than 255 and needs to be greater than 0.
+*
+* @return The number of bytes received.
+*
+* @note Upon entry to this function, the IIC interface needs to be already
+* enabled in the CR register.
+*
+******************************************************************************/
+unsigned XIic_DynRecv(u32 BaseAddress, u8 Address, u8 *BufferPtr, u8 ByteCount)
+{
+ unsigned RemainingByteCount;
+ u32 StatusRegister;
+
+ /*
+ * Clear the latched interrupt status so that it will be updated with
+ * the new state when it changes.
+ */
+ XIic_mClearIisr(BaseAddress, XIIC_INTR_TX_EMPTY_MASK |
+ XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_ARB_LOST_MASK);
+
+ /*
+ * Send the 7 bit slave address for a read operation and set the state
+ * to indicate the address has been sent. Upon writing the address, a
+ * start condition is initiated. MSMS is automatically set to master
+ * when the address is written to the Fifo. If MSMS was already set,
+ * then a re-start is sent prior to the address.
+ */
+ XIic_mDynSend7BitAddress(BaseAddress, Address, XIIC_READ_OPERATION);
+
+ /*
+ * Wait for the bus to go busy.
+ */
+ StatusRegister = XIo_In8(BaseAddress + XIIC_SR_REG_OFFSET);
+
+ while (( StatusRegister & XIIC_SR_BUS_BUSY_MASK) != XIIC_SR_BUS_BUSY_MASK)
+ {
+ StatusRegister = XIo_In8(BaseAddress + XIIC_SR_REG_OFFSET);
+ }
+
+ /*
+ * Clear the latched interrupt status for the bus not busy bit which
+ * must be done while the bus is busy.
+ */
+ XIic_mClearIisr(BaseAddress, XIIC_INTR_BNB_MASK);
+
+ /*
+ * Write to the Tx Fifo the dynamic stop control bit with the number of
+ * bytes that are to be read over the IIC interface from the presently
+ * addressed device.
+ */
+ XIic_mDynSendStop(BaseAddress, ByteCount);
+
+ /*
+ * Receive the data from the IIC bus.
+ */
+ RemainingByteCount = DynRecvData(BaseAddress, BufferPtr, ByteCount);
+
+ /*
+ * The receive is complete. Return the number of bytes that were
+ * received.
+ */
+ return ByteCount - RemainingByteCount;
+}
+
+/*****************************************************************************/
+/**
+* Receive the specified data from the device that has been previously addressed
+* on the IIC bus. This function assumes the following:
+* - The Rx Fifo occupancy depth has been set to its max.
+* - Upon entry, the Rx Fifo is empty.
+* - The 7 bit address has been sent.
+* - The dynamic stop and number of bytes to receive has been written to Tx
+* Fifo.
+*
+* @param BaseAddress contains the base address of the IIC Device.
+* @param BufferPtr points to the buffer to hold the data that is received.
+* @param ByteCount is the number of bytes to be received. The range of this
+* value is greater than 0 and not higher than 255.
+*
+* @return The number of bytes remaining to be received.
+*
+* @note This function contains loops that could cause the function not
+* to return if the hardware is not working.
+*
+******************************************************************************/
+static unsigned DynRecvData(u32 BaseAddress, u8 *BufferPtr, u8 ByteCount)
+{
+ u8 StatusReg;
+ u32 IntrStatus;
+ u32 IntrStatusMask;
+
+ while (ByteCount > 0) {
+
+ /* Setup the mask to use for checking errors because when
+ * receiving one byte OR the last byte of a multibyte message
+ * an error naturally occurs when the no ack is done to tell
+ * the slave the last byte.
+ */
+ if (ByteCount == 1) {
+ IntrStatusMask =
+ XIIC_INTR_ARB_LOST_MASK | XIIC_INTR_BNB_MASK;
+ }
+ else {
+ IntrStatusMask =
+ XIIC_INTR_ARB_LOST_MASK |
+ XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_BNB_MASK;
+ }
+
+ /*
+ * Wait for a byte to show up in the Rx Fifo.
+ */
+ do {
+ StatusReg = XIo_In8(BaseAddress + XIIC_SR_REG_OFFSET);
+ IntrStatus = XIIC_READ_IISR(BaseAddress);
+
+ /* Check the transmit error after the receive full
+ * because when sending only one byte transmit error
+ * will occur because of the no ack to indicate the end
+ * of the data.
+ */
+ if (IntrStatus & IntrStatusMask) {
+ return ByteCount;
+ }
+
+ } while ((StatusReg & XIIC_SR_RX_FIFO_EMPTY_MASK) ==
+ XIIC_SR_RX_FIFO_EMPTY_MASK);
+
+ /*
+ * Read in byte from the Rx Fifo. If the Fifo reached the
+ * programmed occupancy depth as programmed in the Rx occupancy
+ * reg, this read access will un throttle the bus such that
+ * the next byte is read from the IIC bus.
+ */
+ *BufferPtr++ = XIo_In8(BaseAddress + XIIC_DRR_REG_OFFSET);
+ ByteCount--;
+ }
+
+ return ByteCount;
+}
+
+/*****************************************************************************/
+/**
+* Send data as a master on the IIC bus. This function sends the data using
+* polled I/O and blocks until the data has been sent. It only supports 7 bit
+* addressing. The user is responsible for ensuring the bus is not busy if
+* multiple masters are present on the bus.
+*
+* @param BaseAddress contains the base address of the IIC Device.
+* @param Address contains the 7 bit IIC address of the device to send the
+* specified data to.
+* @param BufferPtr points to the data to be sent.
+* @param ByteCount is the number of bytes to be sent.
+* @param Option: XIIC_STOP = end with STOP condition, XIIC_REPEATED_START
+* = don't end with STOP condition.
+*
+* @return The number of bytes sent.
+*
+* @note None.
+*
+******************************************************************************/
+unsigned XIic_DynSend(u32 BaseAddress, u16 Address, u8 *BufferPtr,
+ u8 ByteCount, u8 Option)
+{
+ unsigned RemainingByteCount;
+ u32 StatusRegister;
+
+ /*
+ * Clear the latched interrupt status so that it will be updated with
+ * the new state when it changes, this must be done after the address
+ * is put in the FIFO
+ */
+ XIic_mClearIisr(BaseAddress, XIIC_INTR_TX_EMPTY_MASK |
+ XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_ARB_LOST_MASK);
+
+ /*
+ * Put the address into the Fifo to be sent and indicate that the
+ * operation to be performed on the bus is a write operation. Upon
+ * writing the address, a start condition is initiated. MSMS is
+ * automatically set to master when the address is written to the Fifo.
+ * If MSMS was already set, then a re-start is sent prior to the
+ * address.
+ */
+ if(!(Address & XIIC_TX_DYN_STOP_MASK))
+ {
+
+ XIic_mDynSend7BitAddress(BaseAddress, Address,
+ XIIC_WRITE_OPERATION);
+ }
+ else
+ {
+ XIic_mDynSendStartStopAddress(BaseAddress, Address,
+ XIIC_WRITE_OPERATION);
+ }
+
+ /*
+ * Wait for the bus to go busy.
+ */
+ StatusRegister = XIo_In8(BaseAddress + XIIC_SR_REG_OFFSET);
+
+ while (( StatusRegister & XIIC_SR_BUS_BUSY_MASK) != XIIC_SR_BUS_BUSY_MASK)
+ {
+ StatusRegister = XIo_In8(BaseAddress + XIIC_SR_REG_OFFSET);
+ }
+
+ /*
+ * Clear the latched interrupt status for the bus not busy bit which
+ * must be done while the bus is busy.
+ */
+ XIic_mClearIisr(BaseAddress, XIIC_INTR_BNB_MASK);
+
+ /*
+ * Send the specified data to the device on the IIC bus specified by the
+ * the address.
+ */
+ RemainingByteCount = DynSendData(BaseAddress, BufferPtr, ByteCount,
+ Option);
+
+ /*
+ * The send is complete return the number of bytes that was sent.
+ */
+ return ByteCount - RemainingByteCount;
+}
+
+/******************************************************************************
+*
+* Send the specified buffer to the device that has been previously addressed
+* on the IIC bus. This function assumes that the 7 bit address has been sent.
+*
+* @param BaseAddress contains the base address of the IIC Device.
+* @param BufferPtr points to the data to be sent.
+* @param ByteCount is the number of bytes to be sent.
+* @param Option: XIIC_STOP = end with STOP condition, XIIC_REPEATED_START
+* = don't end with STOP condition.
+*
+* @return The number of bytes remaining to be sent.
+*
+* @note This function does not take advantage of the transmit Fifo because
+* it is designed for minimal code space and complexity.
+*
+******************************************************************************/
+static unsigned DynSendData(u32 BaseAddress, u8 *BufferPtr,
+ u8 ByteCount, u8 Option)
+{
+ u32 IntrStatus;
+
+ while (ByteCount > 0) {
+ /*
+ * Wait for the transmit to be empty before sending any more
+ * data by polling the interrupt status register.
+ */
+ while (1) {
+ IntrStatus = XIIC_READ_IISR(BaseAddress);
+ if (IntrStatus & (XIIC_INTR_TX_ERROR_MASK |
+ XIIC_INTR_ARB_LOST_MASK |
+ XIIC_INTR_BNB_MASK)) {
+ /*
+ * Error condition (NACK or ARB Lost or BNB
+ * Error Has occurred. Clear the Control
+ * register to send a STOP condition on the Bus
+ * and return the number of bytes still to
+ * transmit.
+ */
+
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET,
+ 0x03);
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET,
+ 0x01);
+
+ return ByteCount;
+ }
+
+ /*
+ * Check for the transmit Fifo to become Empty.
+ */
+ if (IntrStatus & XIIC_INTR_TX_EMPTY_MASK) {
+ break;
+ }
+ }
+
+ /*
+ * Send data to Tx Fifo. If a stop condition is specified and
+ * the last byte is being sent, then set the dynamic stop bit.
+ */
+ if ((ByteCount == 1) && (Option == XIIC_STOP)) {
+ /*
+ * The MSMS will be cleared automatically upon setting
+ * dynamic stop.
+ */
+ XIo_Out16(BaseAddress + XIIC_DTR_REG_OFFSET - 1,
+ XIIC_TX_DYN_STOP_MASK | *BufferPtr++);
+ }
+ else {
+ XIo_Out8(BaseAddress + XIIC_DTR_REG_OFFSET,
+ *BufferPtr++);
+ }
+
+ /*
+ * Update the byte count to reflect the byte sent.
+ */
+ ByteCount--;
+ }
+
+ if (Option == XIIC_STOP) {
+ /*
+ * If the Option is to release the bus after transmission of
+ * data, Wait for the bus to transition to not busy before
+ * returning, the IIC device cannot be disabled until this
+ * occurs.
+ */
+ while (1) {
+ if (XIIC_READ_IISR(BaseAddress) & XIIC_INTR_BNB_MASK) {
+ break;
+ }
+ }
+ }
+
+ return ByteCount;
+}
+
+/******************************************************************************
+*
+* Initialize the IIC core for Dynamic Functionality.
+*
+* @param BaseAddress contains the base address of the IIC Device.
+*
+* @return XST_SUCCESS if Successful else XST_FAILURE.
+*
+* @note None.
+*
+******************************************************************************/
+int XIic_DynInit(u32 BaseAddress)
+{
+ u8 Status;
+
+ /*
+ * Reset IIC Core.
+ */
+ XIIC_RESET(BaseAddress);
+
+ /*
+ * Set receive Fifo depth to maximum (zero based).
+ */
+ XIo_Out8(BaseAddress + XIIC_RFD_REG_OFFSET, IIC_RX_FIFO_DEPTH - 1);
+
+ /*
+ * Reset Tx Fifo.
+ */
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
+
+ /*
+ * Enable IIC Device, remove Tx Fifo reset & disable general call.
+ */
+ XIo_Out8(BaseAddress + XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
+
+ /*
+ * Read status register and verify IIC Device is in initial state. Only the
+ * Tx Fifo and Rx Fifo empty bits should be set.
+ */
+ Status = XIo_In8(BaseAddress + XIIC_SR_REG_OFFSET);
+ if(Status == (XIIC_SR_RX_FIFO_EMPTY_MASK | XIIC_SR_TX_FIFO_EMPTY_MASK))
+ {
+ return XST_SUCCESS;
+ }
+
+ return XST_FAILURE;
+}
+
--- /dev/null
+/* $Id: xiic_l.h,v 1.1 2007/12/03 15:44:58 meinelte Exp $ */
+/*****************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2007 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+*****************************************************************************/
+/****************************************************************************/
+/**
+*
+* @file xiic_l.h
+*
+* This header file contains identifiers and driver functions (or
+* macros) that can be used to access the device in normal and dynamic
+* controller mode. High-level driver functions are defined in xiic.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00b jhl 05/07/02 First release
+* 1.01c ecm 12/05/02 new rev
+* 1.01d jhl 10/08/03 Added general purpose output feature
+* 1.02a mta 03/09/06 Implemented Repeated Start in the Low Level Driver.
+* 1.03a mta 04/04/06 Implemented Dynamic IIC core routines.
+* 1.03a rpm 09/08/06 Added include of xstatus.h for completeness
+* 1.13a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+*****************************************************************************/
+
+#ifndef XIIC_L_H /* prevent circular inclusions */
+#define XIIC_L_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files ********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+
+/************************** Constant Definitions ****************************/
+
+#define XIIC_MSB_OFFSET 3
+
+#define XIIC_REG_OFFSET 0x100 + XIIC_MSB_OFFSET
+
+/*
+ * Register offsets in bytes from RegisterBase. Three is added to the
+ * base offset to access LSB (IBM style) of the word
+ */
+#define XIIC_CR_REG_OFFSET 0x00+XIIC_REG_OFFSET /* Control Register */
+#define XIIC_SR_REG_OFFSET 0x04+XIIC_REG_OFFSET /* Status Register */
+#define XIIC_DTR_REG_OFFSET 0x08+XIIC_REG_OFFSET /* Data Tx Register */
+#define XIIC_DRR_REG_OFFSET 0x0C+XIIC_REG_OFFSET /* Data Rx Register */
+#define XIIC_ADR_REG_OFFSET 0x10+XIIC_REG_OFFSET /* Address Register */
+#define XIIC_TFO_REG_OFFSET 0x14+XIIC_REG_OFFSET /* Tx FIFO Occupancy */
+#define XIIC_RFO_REG_OFFSET 0x18+XIIC_REG_OFFSET /* Rx FIFO Occupancy */
+#define XIIC_TBA_REG_OFFSET 0x1C+XIIC_REG_OFFSET /* 10 Bit Address reg */
+#define XIIC_RFD_REG_OFFSET 0x20+XIIC_REG_OFFSET /* Rx FIFO Depth reg */
+#define XIIC_GPO_REG_OFFSET 0x24+XIIC_REG_OFFSET /* Output Register */
+
+/* Control Register masks */
+
+#define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */
+#define XIIC_CR_TX_FIFO_RESET_MASK 0x02 /* Transmit FIFO reset=1 */
+#define XIIC_CR_MSMS_MASK 0x04 /* Master starts Txing=1 */
+#define XIIC_CR_DIR_IS_TX_MASK 0x08 /* Dir of tx. Txing=1 */
+#define XIIC_CR_NO_ACK_MASK 0x10 /* Tx Ack. NO ack = 1 */
+#define XIIC_CR_REPEATED_START_MASK 0x20 /* Repeated start = 1 */
+#define XIIC_CR_GENERAL_CALL_MASK 0x40 /* Gen Call enabled = 1 */
+
+/* Status Register masks */
+
+#define XIIC_SR_GEN_CALL_MASK 0x01 /* 1=a mstr issued a GC */
+#define XIIC_SR_ADDR_AS_SLAVE_MASK 0x02 /* 1=when addr as slave */
+#define XIIC_SR_BUS_BUSY_MASK 0x04 /* 1 = bus is busy */
+#define XIIC_SR_MSTR_RDING_SLAVE_MASK 0x08 /* 1=Dir: mstr <-- slave */
+#define XIIC_SR_TX_FIFO_FULL_MASK 0x10 /* 1 = Tx FIFO full */
+#define XIIC_SR_RX_FIFO_FULL_MASK 0x20 /* 1 = Rx FIFO full */
+#define XIIC_SR_RX_FIFO_EMPTY_MASK 0x40 /* 1 = Rx FIFO empty */
+#define XIIC_SR_TX_FIFO_EMPTY_MASK 0x80 /* 1 = Tx FIFO empty */
+
+/* Interrupt Status Register masks Interrupt occurs when... */
+
+#define XIIC_INTR_ARB_LOST_MASK 0x01 /* 1 = arbitration lost */
+#define XIIC_INTR_TX_ERROR_MASK 0x02 /* 1=Tx error/msg complete */
+#define XIIC_INTR_TX_EMPTY_MASK 0x04 /* 1 = Tx FIFO/reg empty */
+#define XIIC_INTR_RX_FULL_MASK 0x08 /* 1=Rx FIFO/reg=OCY level */
+#define XIIC_INTR_BNB_MASK 0x10 /* 1 = Bus not busy */
+#define XIIC_INTR_AAS_MASK 0x20 /* 1 = when addr as slave */
+#define XIIC_INTR_NAAS_MASK 0x40 /* 1 = not addr as slave */
+#define XIIC_INTR_TX_HALF_MASK 0x80 /* 1 = TX FIFO half empty */
+
+#define XIIC_TX_ADDR_SENT 0x00
+#define XIIC_TX_ADDR_MSTR_RECV_MASK 0x02
+
+/* The following constants specify the depth of the FIFOs */
+
+#define IIC_RX_FIFO_DEPTH 16 /* Rx fifo capacity */
+#define IIC_TX_FIFO_DEPTH 16 /* Tx fifo capacity */
+
+/* The following constants specify groups of interrupts that are typically
+ * enabled or disables at the same time
+ */
+#define XIIC_TX_INTERRUPTS \
+ (XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_TX_EMPTY_MASK | \
+ XIIC_INTR_TX_HALF_MASK)
+
+#define XIIC_TX_RX_INTERRUPTS (XIIC_INTR_RX_FULL_MASK | XIIC_TX_INTERRUPTS)
+
+/* The following constants are used with the following macros to specify the
+ * operation, a read or write operation.
+ */
+#define XIIC_READ_OPERATION 1
+#define XIIC_WRITE_OPERATION 0
+
+/* The following constants are used with the transmit FIFO fill function to
+ * specify the role which the IIC device is acting as, a master or a slave.
+ */
+#define XIIC_MASTER_ROLE 1
+#define XIIC_SLAVE_ROLE 0
+
+/*
+ * The following constants are used with Transmit Function (XIic_Send) to
+ * specify whether to STOP after the current transfer of data or own the bus
+ * with a Repeated start.
+ */
+#define XIIC_STOP 0x00
+#define XIIC_REPEATED_START 0x01
+
+ /*
+ * Tx Fifo upper bit masks.
+ */
+
+#define XIIC_TX_DYN_START_MASK 0x0100 /* 1 = Set dynamic start */
+#define XIIC_TX_DYN_STOP_MASK 0x0200 /* 1 = Set dynamic stop */
+
+
+/**************************** Type Definitions ******************************/
+
+
+/***************** Macros (Inline Functions) Definitions ********************/
+
+/************************** Constant Definitions *****************************/
+
+/*
+ * The following constants define the register offsets for the Interrupt
+ * registers. There are some holes in the memory map for reserved addresses
+ * to allow other registers to be added and still match the memory map of the
+ * interrupt controller registers
+ */
+#define XIIC_DGIER_OFFSET 0x1C /* Device Global Interrupt Enable Register */
+#define XIIC_IISR_OFFSET 0x20 /* Interrupt Status Register */
+#define XIIC_IIER_OFFSET 0x28 /* Interrupt Enable Register */
+#define XIIC_RESETR_OFFSET 0x40 /* Reset Register */
+
+
+#define XIIC_RESET_MASK 0xAUL
+
+/*
+ * The following constant is used for the device global interrupt enable
+ * register, to enable all interrupts for the device, this is the only bit
+ * in the register
+ */
+#define XIIC_GINTR_ENABLE_MASK 0x80000000UL
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/******************************************************************************
+*
+* This macro resets the IIC device.
+*
+* @param RegBaseAddress is the base address of the IIC device.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void XIIC_RESET(u32 RegBaseAddress);
+*
+******************************************************************************/
+#define XIIC_RESET(RegBaseAddress) \
+ XIo_Out32(RegBaseAddress + XIIC_RESETR_OFFSET, XIIC_RESET_MASK)
+
+/******************************************************************************
+*
+* This macro disables all interrupts for the device by writing to the Global
+* interrupt enable register. This register provides the ability to disable
+* interrupts without any modifications to the interrupt enable register such
+* that it is minimal effort to restore the interrupts to the previous enabled
+* state. The corresponding function, XIIC_GINTR_ENABLE, is provided to
+* restore the interrupts to the previous enabled state. This function is
+* designed to be used in critical sections of device drivers such that it is
+* not necessary to disable other device interrupts.
+*
+* @param RegBaseAddress is the base address of the IIC device.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void XIIC_GINTR_DISABLE(u32 RegBaseAddress);
+*
+******************************************************************************/
+#define XIIC_GINTR_DISABLE(RegBaseAddress) \
+ XIo_Out32((RegBaseAddress) + XIIC_DGIER_OFFSET, 0)
+
+/******************************************************************************
+*
+* This macro writes to the global interrupt enable register to enable
+* interrupts from the device. This register provides the ability to enable
+* interrupts without any modifications to the interrupt enable register such
+* that it is minimal effort to restore the interrupts to the previous enabled
+* state. This function does not enable individual interrupts as the interrupt
+* enable register must be set appropriately. This function is designed to be
+* used in critical sections of device drivers such that it is not necessary to
+* disable other device interrupts.
+*
+* @param RegBaseAddress is the base address of the IIC device.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void XIIC_GINTR_ENABLE(u32 RegBaseAddress);
+*
+******************************************************************************/
+#define XIIC_GINTR_ENABLE(RegBaseAddress) \
+ XIo_Out32((RegBaseAddress) + XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK)
+
+/******************************************************************************
+*
+* This function determines if interrupts are enabled at the global level by
+* reading the gloabl interrupt register. This register provides the ability to
+* disable interrupts without any modifications to the interrupt enable register
+* such that it is minimal effort to restore the interrupts to the previous
+* enabled state.
+*
+* @param RegBaseAddress is the base address of the IIC device.
+*
+* @return
+* - TRUE if global interrupts are enabled.
+* - FALSE if global interrupts are disabled.
+*
+* @note C-Style signature:
+* int XIIC_IS_GINTR_ENABLED(u32 RegBaseAddress);
+*
+******************************************************************************/
+#define XIIC_IS_GINTR_ENABLED(RegBaseAddress) \
+ (XIo_In32((RegBaseAddress) + XIIC_DGIER_OFFSET) == \
+ XIIC_GINTR_ENABLE_MASK)
+
+/******************************************************************************
+*
+*
+* This function sets the Interrupt status register to the specified value.
+* This register indicates the status of interrupt sources for the device.
+* The status is independent of whether interrupts are enabled such that
+* the status register may also be polled when interrupts are not enabled.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* IIC device. All bits of this register are latched. Setting a bit which is zero
+* within this register causes an interrupt to be generated. The device global
+* interrupt enable register and the device interrupt enable register must be set
+* appropriately to allow an interrupt to be passed out of the device. The
+* interrupt is cleared by writing to this register with the bits to be
+* cleared set to a one and all others to zero. This register implements a
+* toggle on write functionality meaning any bits which are set in the value
+* written cause the bits in the register to change to the opposite state.
+*
+* This function writes only the specified value to the register such that
+* some status bits may be set and others cleared. It is the caller's
+* responsibility to get the value of the register prior to setting the value
+* to prevent an destructive behavior.
+*
+* @param RegBaseAddress is the base address of the IIC device.
+* @param Status contains the value to be written to the Interrupt
+* status register.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void XIIC_WRITE_IISR(u32 RegBaseAddress, u32 Status);
+*
+******************************************************************************/
+#define XIIC_WRITE_IISR(RegBaseAddress, Status) \
+ XIo_Out32((RegBaseAddress) + XIIC_IISR_OFFSET, (Status))
+
+/******************************************************************************
+*
+*
+* This function gets the contents of the Interrupt Status Register.
+* This register indicates the status of interrupt sources for the device.
+* The status is independent of whether interrupts are enabled such
+* that the status register may also be polled when interrupts are not enabled.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* device. All bits of this register are latched. Writing a 1 to a bit within
+* this register causes an interrupt to be generated if enabled in the interrupt
+* enable register and the global interrupt enable is set. Since the status is
+* latched, each status bit must be acknowledged in order for the bit in the
+* status register to be updated. Each bit can be acknowledged by writing a
+* 0 to the bit in the status register.
+
+* @param RegBaseAddress is the base address of the IIC device.
+*
+* @return A status which contains the value read from the Interrupt
+* Status Register.
+*
+* @note C-Style signature:
+* u32 XIIC_READ_IISR(u32 RegBaseAddress);
+*
+******************************************************************************/
+#define XIIC_READ_IISR(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XIIC_IISR_OFFSET)
+
+/******************************************************************************
+*
+* This function sets the contents of the Interrupt Enable Register . This
+* register controls which interrupt sources of the IIC device are allowed to
+* generate an interrupt. The global interrupt enable register and the device
+* interrupt enable register must also be set appropriately for an interrupt to be
+* passed out of the device.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* device. Setting a bit in this register enables the interrupt source to generate
+* an interrupt. Clearing a bit in this register disables interrupt generation
+* for that interrupt source.
+*
+* This function writes only the specified value to the register such that
+* some interrupt sources may be enabled and others disabled. It is the
+* caller's responsibility to get the value of the interrupt enable register
+* prior to setting the value to prevent a destructive behavior.
+*
+* @param RegBaseAddress is the base address of the IIC device.
+* @param Enable contains the value to be written to the Interrupt Enable
+* Register.
+*
+* @return None
+*
+* @note C-Style signature:
+* void XIIC_WRITE_IIER(u32 RegBaseAddress, u32 Enable);
+*
+******************************************************************************/
+#define XIIC_WRITE_IIER(RegBaseAddress, Enable) \
+ XIo_Out32((RegBaseAddress) + XIIC_IIER_OFFSET, (Enable))
+
+/******************************************************************************
+*
+*
+* This function gets the Interrupt enable register contents. This register
+* controls which interrupt sources of the device are allowed to generate an
+* interrupt. The global interrupt enable register and the device interrupt
+* enable register must also be set appropriately for an interrupt to be
+* passed out of the IIC device.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* IIC device. Setting a bit in this register enables the interrupt source to
+* generate an interrupt. Clearing a bit in this register disables interrupt
+* generation for that interrupt source.
+*
+* @param RegBaseAddress is the base address of the IIC device.
+*
+* @return The contents read from the Interrupt Enable Register.
+*
+* @note C-Style signature:
+* u32 XIIC_READ_IIER(u32 RegBaseAddress)
+*
+******************************************************************************/
+#define XIIC_READ_IIER(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XIIC_IIER_OFFSET)
+
+/************************** Function Prototypes ******************************/
+
+
+/******************************************************************************
+*
+* This macro reads a register in the IIC device using an 8 bit read operation.
+* This macro does not do any checking to ensure that the register exists if the
+* register may be excluded due to parameterization, such as the GPO Register.
+*
+* @param BaseAddress of the IIC device.
+* @param RegisterOffset contains the offset of the register from the
+* device base address.
+*
+* @return The value read from the register.
+*
+* @note C-Style signature:
+* u8 XIic_mReadReg(u32 BaseAddress, int RegisterOffset);
+*
+******************************************************************************/
+#define XIic_mReadReg(BaseAddress, RegisterOffset) \
+ XIo_In8((BaseAddress) + (RegisterOffset))
+
+/******************************************************************************
+*
+* This macro writes a register in the IIC device using an 8 bit write
+* operation. This macro does not do any checking to ensure that the register
+* exists if the register may be excluded due to parameterization, such as the
+* GPO Register.
+*
+* @param BaseAddress of the IIC device.
+* @param RegisterOffset contains the offset of the register from the
+* device base address.
+* @param Data contains the data to be written to the register.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void XIic_mWriteReg(u32 BaseAddress, int RegisterOffset,
+ u8 Data);
+*
+******************************************************************************/
+#define XIic_mWriteReg(BaseAddress, RegisterOffset, Data) \
+ XIo_Out8((BaseAddress) + (RegisterOffset), (Data))
+
+/******************************************************************************
+*
+* This macro clears the specified interrupt in the Interrupt status
+* register. It is non-destructive in that the register is read and only the
+* interrupt specified is cleared. Clearing an interrupt acknowledges it.
+*
+* @param BaseAddress contains the IIC registers base address.
+* @param InterruptMask contains the interrupts to be disabled
+*
+* @return None.
+*
+* @note C-Style signature:
+* void XIic_mClearIisr(u32 BaseAddress, u32 InterruptMask);
+*
+******************************************************************************/
+#define XIic_mClearIisr(BaseAddress, InterruptMask) \
+ XIIC_WRITE_IISR((BaseAddress), \
+ XIIC_READ_IISR(BaseAddress) & (InterruptMask))
+
+/******************************************************************************
+*
+* This macro sends the address for a 7 bit address during both read and write
+* operations. It takes care of the details to format the address correctly.
+* This macro is designed to be called internally to the drivers.
+*
+* @param BaseAddress contains the base address of the IIC Device.
+* @param SlaveAddress contains the address of the slave to send to.
+* @param Operation indicates XIIC_READ_OPERATION or XIIC_WRITE_OPERATION
+*
+* @return None.
+*
+* @note C-Style signature:
+* void XIic_mSend7BitAddress(u32 BaseAddress, u8 SlaveAddress,
+* u8 Operation);
+*
+******************************************************************************/
+#define XIic_mSend7BitAddress(BaseAddress, SlaveAddress, Operation) \
+{ \
+ u8 LocalAddr = (u8)(SlaveAddress << 1); \
+ LocalAddr = (LocalAddr & 0xFE) | (Operation); \
+ XIo_Out8(BaseAddress + XIIC_DTR_REG_OFFSET, LocalAddr); \
+}
+
+/******************************************************************************
+*
+* This macro sends the address for a 7 bit address during both read and write
+* operations. It takes care of the details to format the address correctly.
+* This macro is designed to be called internally to the drivers.
+*
+* @param BaseAddress contains the base address of the IIC Device.
+* @param SlaveAddress contains the address of the slave to send to.
+* @param Operation indicates XIIC_READ_OPERATION or XIIC_WRITE_OPERATION.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void XIic_mDynSend7BitAddress(u32 BaseAddress, u8 SlaveAddress,
+* u8 Operation);
+*
+******************************************************************************/
+#define XIic_mDynSend7BitAddress(BaseAddress, SlaveAddress, Operation) \
+{ \
+ u8 LocalAddr = (u8)(SlaveAddress << 1); \
+ LocalAddr = (LocalAddr & 0xFE) | (Operation); \
+ XIo_Out16(BaseAddress + XIIC_DTR_REG_OFFSET - 1, \
+ XIIC_TX_DYN_START_MASK | LocalAddr); \
+}
+
+/******************************************************************************
+*
+* This macro sends the address, start and stop for a 7 bit address during both
+* write operations. It takes care of the details to format the address
+* correctly.
+* This macro is designed to be called internally to the drivers.
+*
+* @param BaseAddress contains the base address of the IIC Device.
+* @param SlaveAddress contains the address of the slave to send to.
+* @param Operation indicates XIIC_WRITE_OPERATION.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void XIic_mDynSendStartStopAddress(u32 BaseAddress,
+* u8 SlaveAddress,
+* u8 Operation);
+*
+******************************************************************************/
+#define XIic_mDynSendStartStopAddress(BaseAddress, SlaveAddress, Operation) \
+{ \
+ u8 LocalAddr = (u8)(SlaveAddress << 1); \
+ LocalAddr = (LocalAddr & 0xFE) | (Operation); \
+ XIo_Out16(BaseAddress + XIIC_DTR_REG_OFFSET - 1, \
+ XIIC_TX_DYN_START_MASK | XIIC_TX_DYN_STOP_MASK | LocalAddr); \
+}
+
+/******************************************************************************
+*
+* This macro sends a stop condition on IIC bus for Dynamic logic.
+*
+* @param BaseAddress contains the base address of the IIC Device.
+* @param ByteCount is the number of Rx bytes received before the master.
+* doesn't respond with ACK.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XIic_mDynSendStop(BaseAddress, ByteCount) \
+{ \
+ XIo_Out16(BaseAddress + XIIC_DTR_REG_OFFSET-1, XIIC_TX_DYN_STOP_MASK | \
+ ByteCount); \
+}
+
+/************************** Function Prototypes *****************************/
+
+unsigned XIic_Recv(u32 BaseAddress, u8 Address,
+ u8 *BufferPtr, unsigned ByteCount, u8 Option);
+
+unsigned XIic_Send(u32 BaseAddress, u8 Address,
+ u8 *BufferPtr, unsigned ByteCount, u8 Option);
+
+unsigned XIic_DynRecv(u32 BaseAddress, u8 Address, u8 *BufferPtr, u8 ByteCount);
+
+unsigned XIic_DynSend(u32 BaseAddress, u16 Address, u8 *BufferPtr,
+ u8 ByteCount, u8 Option);
+
+int XIic_DynInit(u32 BaseAddress);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
+
--- /dev/null
+/* $Id: xiic_master.c,v 1.1 2007/12/03 15:44:58 meinelte Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xiic_master.c
+*
+* Contains master functions for the XIic component. This file is necessary to
+* send or receive as a master on the IIC bus.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- --- ------- -----------------------------------------------
+* 1.01b jhl 3/27/02 Reparitioned the driver
+* 1.01c ecm 12/05/02 new rev
+* 1.13a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+****************************************************************************/
+
+/***************************** Include Files *******************************/
+
+#include "xiic.h"
+#include "xiic_i.h"
+#include "xio.h"
+
+/************************** Constant Definitions ***************************/
+
+
+/**************************** Type Definitions *****************************/
+
+
+/***************** Macros (Inline Functions) Definitions *******************/
+
+/*****************************************************************************
+*
+* This macro includes master code such that master operations, sending
+* and receiving data, may be used. This function hooks the master processing
+* to the driver such that events are handled properly and allows master
+* processing to be optional. It must be called before any functions which
+* are contained in this file are called, such as after the driver is
+* initialized.
+*
+* @note
+*
+* None
+*
+******************************************************************************/
+#define XIIC_MASTER_INCLUDE \
+{ \
+ XIic_RecvMasterFuncPtr = RecvMasterData; \
+ XIic_SendMasterFuncPtr = SendMasterData; \
+}
+
+/************************** Function Prototypes ****************************/
+
+static void SendSlaveAddr(XIic * InstancePtr);
+static void RecvMasterData(XIic * InstancePtr);
+static void SendMasterData(XIic * InstancePtr);
+static int IsBusBusy(XIic * InstancePtr);
+
+/************************** Variable Definitions **************************/
+
+/****************************************************************************/
+/**
+* This function sends data as a master on the IIC bus. If the bus is busy, it
+* will indicate so and then enable an interrupt such that the status handler
+* will be called when the bus is no longer busy. The slave address which has
+* been set with the XIic_SetAddress() function is the address to which the
+* specific data is sent. Sending data on the bus performs a write operation.
+*
+* @param InstancePtr points to the Iic instance to be worked on.
+* @param TxMsgPtr points to the data to be transmitted
+* @param ByteCount is the number of message bytes to be sent
+*
+* @return
+*
+* - XST_SUCCESS indicates the message transmission has been initiated.
+* - XST_IIC_BUS_BUSY indicates the bus was in use and that the BusNotBusy
+* interrupt is enabled which will update the EventStatus when the bus is no
+* longer busy.
+*
+* @note
+*
+* None
+*
+******************************************************************************/
+int XIic_MasterSend(XIic * InstancePtr, u8 *TxMsgPtr, int ByteCount)
+{
+ u8 CntlReg;
+
+ XIic_mEnterCriticalRegion(InstancePtr->BaseAddress);
+
+ /* Ensure that the master processing has been included such that events
+ * will be properly handled
+ */
+ XIIC_MASTER_INCLUDE;
+ InstancePtr->IsDynamic = FALSE;
+
+ /*
+ * If the busy is busy, then exit the critical region and wait for the
+ * bus to not be busy, the function enables the bus not busy interrupt
+ */
+ if (IsBusBusy(InstancePtr)) {
+ XIic_mExitCriticalRegion(InstancePtr->BaseAddress);
+
+ return XST_IIC_BUS_BUSY;
+ }
+
+ /* If it is already a master on the bus (repeated start), the direction was
+ * set to tx which is throttling bus. The control register needs to be set
+ * before putting data into the FIFO
+ */
+ CntlReg = XIo_In8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET);
+
+ if (CntlReg & XIIC_CR_MSMS_MASK) {
+ CntlReg &= ~XIIC_CR_NO_ACK_MASK;
+ CntlReg |=
+ (XIIC_CR_DIR_IS_TX_MASK | XIIC_CR_REPEATED_START_MASK);
+
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET,
+ CntlReg);
+ InstancePtr->Stats.RepeatedStarts++;
+ }
+
+ /* Save message state
+ */
+ InstancePtr->SendByteCount = ByteCount;
+ InstancePtr->SendBufferPtr = TxMsgPtr;
+
+ /* Put the address into the FIFO to be sent and indicate that the operation
+ * to be performed on the bus is a write operation, a general call address
+ * handled the same as a 7 bit address even if 10 bit address is selected
+ * Set the transmit address state to indicate the address has been sent
+ */
+ if ((InstancePtr->Options & XII_SEND_10_BIT_OPTION) &&
+ (InstancePtr->AddrOfSlave != 0)) {
+ XIic_mSend10BitAddrByte1(InstancePtr->AddrOfSlave,
+ XIIC_WRITE_OPERATION);
+ XIic_mSend10BitAddrByte2(InstancePtr->AddrOfSlave);
+ }
+ else {
+ XIic_mSend7BitAddr(InstancePtr->AddrOfSlave,
+ XIIC_WRITE_OPERATION);
+ }
+ /* Set the transmit address state to indicate the address has been sent
+ * for communication with event driven processing
+ */
+ InstancePtr->TxAddrMode = XIIC_TX_ADDR_SENT;
+
+ /* Fill remaining available FIFO with message data
+ */
+ if (InstancePtr->SendByteCount > 1) {
+ XIic_TransmitFifoFill(InstancePtr, XIIC_MASTER_ROLE);
+ }
+
+ /* After filling fifo, if data yet to send > 1, enable Tx ½ empty interrupt
+ */
+ if (InstancePtr->SendByteCount > 1) {
+ XIic_mClearEnableIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_TX_HALF_MASK);
+ }
+
+ /* Clear any pending Tx empty, Tx Error and then enable them.
+ */
+ XIic_mClearEnableIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_TX_ERROR_MASK |
+ XIIC_INTR_TX_EMPTY_MASK);
+
+ /* When repeated start not used, MSMS must be set after putting data into
+ * transmit FIFO, start the transmitter
+ */
+
+ CntlReg = XIo_In8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET);
+ if ((CntlReg & XIIC_CR_MSMS_MASK) == 0) {
+ CntlReg &= ~XIIC_CR_NO_ACK_MASK;
+ CntlReg |= XIIC_CR_MSMS_MASK | XIIC_CR_DIR_IS_TX_MASK;
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET,
+ CntlReg);
+ }
+
+ XIic_mExitCriticalRegion(InstancePtr->BaseAddress);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+* This function receives data as a master from a slave device on the IIC bus.
+* If the bus is busy, it will indicate so and then enable an interrupt such
+* that the status handler will be called when the bus is no longer busy. The
+* slave address which has been set with the XIic_SetAddress() function is the
+* address from which data is received. Receiving data on the bus performs a
+* read operation.
+*
+* @param InstancePtr is a pointer to the Iic instance to be worked on.
+* @param RxMsgPtr is a pointer to the data to be transmitted
+* @param ByteCount is the number of message bytes to be sent
+*
+* @return
+*
+* - XST_SUCCESS indicates the message reception processes has been initiated.
+* - XST_IIC_BUS_BUSY indicates the bus was in use and that the BusNotBusy
+* interrupt is enabled which will update the EventStatus when the bus is no
+* longer busy.
+* - XST_IIC_GENERAL_CALL_ADDRESS indicates the slave address is set to the
+* the general call address. This is not allowed for Master receive mode.
+*
+* @internal
+*
+* The receive FIFO threshold is a zero based count such that 1 must be
+* subtracted from the desired count to get the correct value. When receiving
+* data it is also necessary to not receive the last byte with the prior bytes
+* because the acknowledge must be setup before the last byte is received.
+*
+******************************************************************************/
+int XIic_MasterRecv(XIic * InstancePtr, u8 *RxMsgPtr, int ByteCount)
+{
+ u8 CntlReg;
+ u8 Temp;
+
+ /* If the slave address is zero (general call) the master can't perform
+ * receive operations, indicate an error
+ */
+ if (InstancePtr->AddrOfSlave == 0) {
+ return XST_IIC_GENERAL_CALL_ADDRESS;
+ }
+
+ XIic_mEnterCriticalRegion(InstancePtr->BaseAddress);
+
+ /* Ensure that the master processing has been included such that events
+ * will be properly handled
+ */
+ XIIC_MASTER_INCLUDE;
+ InstancePtr->IsDynamic = FALSE;
+ /*
+ * If the busy is busy, then exit the critical region and wait for the
+ * bus to not be busy, the function enables the bus not busy interrupt
+ */
+ if (IsBusBusy(InstancePtr)) {
+ XIic_mExitCriticalRegion(InstancePtr->BaseAddress);
+
+ return XST_IIC_BUS_BUSY;
+ }
+
+ /* Save message state for event driven processing
+ */
+ InstancePtr->RecvByteCount = ByteCount;
+ InstancePtr->RecvBufferPtr = RxMsgPtr;
+
+ /* Clear and enable Rx full interrupt if using 7 bit, If 10 bit, wait until
+ * last address byte sent incase arbitration gets lost while sending out
+ * address.
+ */
+ if ((InstancePtr->Options & XII_SEND_10_BIT_OPTION) == 0) {
+ XIic_mClearEnableIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_RX_FULL_MASK);
+ }
+
+ /* If already a master on the bus, the direction was set by Rx Interrupt
+ * routine to tx which is throttling bus because during Rxing, Tx reg is
+ * empty = throttle. CR needs setting before putting data or the address
+ * written will go out as Tx instead of receive. Start Master Rx by setting
+ * CR Bits MSMS to Master and msg direction.
+ */
+ CntlReg = XIo_In8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET);
+
+ if (CntlReg & XIIC_CR_MSMS_MASK) {
+ CntlReg |= XIIC_CR_REPEATED_START_MASK;
+ XIic_mSetControlRegister(InstancePtr, CntlReg, ByteCount);
+
+ InstancePtr->Stats.RepeatedStarts++; /* increment stats counts */
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET,
+ CntlReg);
+
+ }
+
+ /* Set receive FIFO occupancy depth which must be done prior to writing the
+ * address in the FIFO because the transmitter will immediatedly start when
+ * in repeated start mode followed by the receiver such that the number of
+ * bytes to receive should be set 1st.
+ */
+ if (ByteCount == 1) {
+ Temp = 0;
+ }
+ else {
+ if (ByteCount <= IIC_RX_FIFO_DEPTH) {
+ Temp = ByteCount - 2;
+ }
+ else {
+ Temp = IIC_RX_FIFO_DEPTH - 1;
+ }
+ }
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_RFD_REG_OFFSET, Temp);
+
+ if (InstancePtr->Options & XII_SEND_10_BIT_OPTION) {
+ /* Send the 1st and 2nd byte of the 10 bit address of a write
+ * operation, write because it's a 10 bit address
+ */
+ XIic_mSend10BitAddrByte1(InstancePtr->AddrOfSlave,
+ XIIC_WRITE_OPERATION);
+ XIic_mSend10BitAddrByte2(InstancePtr->AddrOfSlave);
+
+ /* Set flag to indicate the next byte of the address needs to be
+ * send, clear and enable tx empty interrupt
+ */
+ InstancePtr->TxAddrMode = XIIC_TX_ADDR_MSTR_RECV_MASK;
+ XIic_mClearEnableIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_TX_EMPTY_MASK);
+ }
+ else {
+ /* 7 bit slave address, send the address for a read operation
+ * and set the state to indicate the address has been sent
+ */
+ XIic_mSend7BitAddr(InstancePtr->AddrOfSlave,
+ XIIC_READ_OPERATION);
+ InstancePtr->TxAddrMode = XIIC_TX_ADDR_SENT;
+ }
+
+ /* Tx error is enabled incase the address (7 or 10) has no device to answer
+ * with Ack. When only one byte of data, must set NO ACK before address goes
+ * out therefore Tx error must not be enabled as it will go off immediately
+ * and the Rx full interrupt will be checked. If full, then the one byte
+ * was received and the Tx error will be disabled without sending an error
+ * callback msg.
+ */
+ XIic_mClearEnableIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_TX_ERROR_MASK);
+
+ /* When repeated start not used, MSMS gets set after putting data
+ * in Tx reg. Start Master Rx by setting CR Bits MSMS to Master and
+ * msg direction.
+ */
+ CntlReg = XIo_In8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET);
+ if ((CntlReg & XIIC_CR_MSMS_MASK) == 0) {
+ CntlReg |= XIIC_CR_MSMS_MASK;
+ XIic_mSetControlRegister(InstancePtr, CntlReg, ByteCount);
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET,
+ CntlReg);
+ }
+
+ XIic_mExitCriticalRegion(InstancePtr->BaseAddress);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************
+*
+* This function checks to see if the IIC bus is busy. If so, it will enable
+* the bus not busy interrupt such that the driver is notified when the bus
+* is no longer busy.
+*
+* @param InstancePtr points to the Iic instance to be worked on.
+*
+* @return
+*
+* - FALSE indicates the IIC bus is not busy.
+* - TRUE indicates the bus was in use and that the BusNotBusy
+* interrupt is enabled which will update the EventStatus when the bus is no
+* longer busy.
+*
+* @note
+*
+* None
+*
+******************************************************************************/
+static int IsBusBusy(XIic * InstancePtr)
+{
+ u8 ControlReg;
+ u8 StatusReg;
+
+ ControlReg = XIo_In8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET);
+ StatusReg = XIo_In8(InstancePtr->BaseAddress + XIIC_SR_REG_OFFSET);
+
+ /* If this device is already master of the bus as when using the repeated
+ * start and the bus is busy setup to wait for it to not be busy
+ */
+ if (((ControlReg & XIIC_CR_MSMS_MASK) == 0) && /* not master */
+ (StatusReg & XIIC_SR_BUS_BUSY_MASK)) { /* is busy */
+ /* The bus is busy, clear pending BNB interrupt incase previously set
+ * and then enable BusNotBusy interrupt
+ */
+ InstancePtr->BNBOnly = TRUE;
+ XIic_mClearEnableIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_BNB_MASK);
+ InstancePtr->Stats.BusBusy++;
+
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/******************************************************************************
+*
+* This function sends the proper byte of the address as well as generate the
+* proper address bit fields depending on the address byte required and the
+* direction of the data (write or read).
+*
+* A master receiving has the restriction that the direction must be switched
+* from write to read when the third address byte is transmitted.
+* For the last byte of the 10 bit address, repeated start must be set prior
+* to writing the address. If repeated start options is enabled, the
+* control register is written before the address is written to the tx reg.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* This function does read/modify/write to the device control register. Calling
+* functions must ensure critical sections are used.
+*
+******************************************************************************/
+static void SendSlaveAddr(XIic * InstancePtr)
+{
+ u8 CRreg;
+
+ /* Set the control register for Master Receive, repeated start must be set
+ * before writing the address, MSMS should be already set, don't set here
+ * so if arbitration is lost or some other reason we don't want MSMS set
+ * incase of error
+ */
+ CRreg = XIo_In8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET);
+
+ CRreg |= XIIC_CR_REPEATED_START_MASK;
+ CRreg &= ~XIIC_CR_DIR_IS_TX_MASK;
+
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET, CRreg);
+
+ /* Send the 1st byte of the 10 bit address as a read operation, enable the
+ * receive interrupt to know when data is received, assuming that the
+ * receive FIFO threshold has been previously set
+ */
+ XIic_mSend10BitAddrByte1(InstancePtr->AddrOfSlave, XIIC_READ_OPERATION);
+
+ XIic_mClearEnableIntr(InstancePtr->BaseAddress, XIIC_INTR_RX_FULL_MASK);
+}
+
+/******************************************************************************
+*
+* When the IIC Tx FIFO/register goes empty, this routine is called by the
+* interrupt service routine to fill the transmit FIFO with data to be sent.
+*
+* This function also is called by the Tx ½ empty interrupt as the data handling
+* is identical when you don't assume the FIFO is empty but use the Tx_FIFO_OCY
+* register to indicate the available free FIFO bytes.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void SendMasterData(XIic * InstancePtr)
+{
+ u8 CntlReg;
+
+ /* The device is a master on the bus. If there is still more address bytes
+ * to send when in master receive operation and the slave device is 10 bit
+ * addressed. This requires the lower 7 bits of address to be resent when
+ * the mode switches to Read instead of write (while sending addresses).
+ */
+ if (InstancePtr->TxAddrMode & XIIC_TX_ADDR_MSTR_RECV_MASK) {
+ /* Send the 1st byte of the slave address in the read operation
+ * and change the state to indicate this has been done
+ */
+ SendSlaveAddr(InstancePtr);
+ InstancePtr->TxAddrMode = XIIC_TX_ADDR_SENT;
+ }
+
+ /* In between 1st and last byte of message, fill the FIFO with more data
+ * to send, disable the 1/2 empty interrupt based upon data left to send
+ */
+ else if (InstancePtr->SendByteCount > 1) {
+ XIic_TransmitFifoFill(InstancePtr, XIIC_MASTER_ROLE);
+
+ if (InstancePtr->SendByteCount < 2) {
+ XIic_mDisableIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_TX_HALF_MASK);
+ }
+ }
+ /*
+ * If there is only one byte left to send, processing differs between
+ * repeated start and normal messages
+ */
+ else if (InstancePtr->SendByteCount == 1) {
+ /* When using repeated start, another interrupt is expected after the
+ * last byte has been sent, so the message is not done yet
+ */
+ if (InstancePtr->Options & XII_REPEATED_START_OPTION) {
+ XIic_mWriteSendByte(InstancePtr);
+ }
+
+ /* When not using repeated start, the stop condition must be generated
+ * after the last byte is written. The bus is throttled waiting for the last
+ * byte.
+ */
+ else {
+ /* Set the stop condition before sending the last byte of data so that
+ * the stop condition will be generated immediately following the data
+ * another transmit interrupt is not expected so the message is done
+ */
+
+ CntlReg =
+ XIo_In8(InstancePtr->BaseAddress +
+ XIIC_CR_REG_OFFSET);
+ CntlReg &= ~XIIC_CR_MSMS_MASK;
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET,
+ CntlReg);
+
+ XIic_mWriteSendByte(InstancePtr);
+
+ /* Wait for bus to not be busy before declaring message has
+ * been sent for the no repeated start operation. The callback
+ * will be called from the BusNotBusy part of the Interrupt
+ * handler to ensure that the message is completely sent.
+ * Disable the TX interrupts and enable the BNB interrupt
+ */
+
+ InstancePtr->BNBOnly = FALSE;
+ XIic_mDisableIntr(InstancePtr->BaseAddress,
+ XIIC_TX_INTERRUPTS);
+ XIic_mEnableIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_BNB_MASK);
+
+ }
+ }
+ else {
+ if (InstancePtr->Options & XII_REPEATED_START_OPTION) {
+ /* The message being sent has completed. When using repeated start
+ * with no more bytes to send repeated start needs to be set in
+ * the control register so that the bus will still be held by this
+ * master
+ */
+
+ CntlReg =
+ XIo_In8(InstancePtr->BaseAddress +
+ XIIC_CR_REG_OFFSET);
+ CntlReg |= XIIC_CR_REPEATED_START_MASK;
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET,
+ CntlReg);
+
+ /* If the message that was being sent has finished, disable all
+ *transmit interrupts and call the callback that was setup to
+ * indicate the message was sent, with 0 bytes remaining
+ */
+
+ XIic_mDisableIntr(InstancePtr->BaseAddress,
+ XIIC_TX_INTERRUPTS);
+ InstancePtr->SendHandler(InstancePtr->SendCallBackRef,
+ 0);
+ }
+ }
+
+ return;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function is called when the receive register is full. The number
+* of bytes received to cause the interrupt is adjustable using the Receive FIFO
+* Depth register. The number of bytes in the register is read in the Receive
+* FIFO occupancy register. Both these registers are zero based values (0-15)
+* such that a value of zero indicates 1 byte.
+*
+* For a Master Receiver to properly signal the end of a message, the data must
+* be read in up to the message length - 1, where control register bits will be
+* set for bus controls to occur on reading of the last byte.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void RecvMasterData(XIic * InstancePtr)
+{
+ u8 LoopCnt;
+ int BytesInFifo;
+ int BytesToRead;
+ u8 CntlReg;
+
+ /* Device is a master receiving, get the contents of the control register
+ * and determine the number of bytes in fifo to be read out
+ */
+ CntlReg = XIo_In8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET);
+ BytesInFifo = XIo_In8(InstancePtr->BaseAddress + XIIC_RFO_REG_OFFSET)
+ + 1;
+
+ /* If data in FIFO holds all data to be retrieved - 1, set NOACK and
+ * disable the tx error
+ */
+ if ((InstancePtr->RecvByteCount - BytesInFifo) == 1) {
+ /* Disable tx error interrupt to prevent interrupt
+ * as this device will cause it when it set NO ACK next
+ */
+ XIic_mDisableIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_TX_ERROR_MASK);
+ XIic_mClearIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_TX_ERROR_MASK);
+
+ /* Write control reg with NO ACK allowing last byte to
+ * have the No ack set to indicate to slave last byte read.
+ */
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET,
+ (CntlReg | XIIC_CR_NO_ACK_MASK));
+
+ /* Read one byte to clear a place for the last byte to be read
+ * which will set the NO ACK
+ */
+ XIic_mReadRecvByte(InstancePtr);
+ }
+
+ /* If data in FIFO is all the data to be received then get the data
+ * and also leave the device in a good state for the next transaction
+ */
+ else if ((InstancePtr->RecvByteCount - BytesInFifo) == 0) {
+ /* If repeated start option is off then the master should stop
+ * using the bus, otherwise hold the bus, setting repeated start
+ * stops the slave from transmitting data when the FIFO is read
+ */
+ if ((InstancePtr->Options & XII_REPEATED_START_OPTION) == 0) {
+ CntlReg &= ~XIIC_CR_MSMS_MASK;
+ }
+ else {
+ CntlReg |= XIIC_CR_REPEATED_START_MASK;
+ }
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET,
+ CntlReg);
+
+ /* Read data from the FIFO then set zero based FIFO read depth for a byte
+ */
+ for (LoopCnt = 0; LoopCnt < BytesInFifo; LoopCnt++) {
+ XIic_mReadRecvByte(InstancePtr);
+ }
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_RFD_REG_OFFSET, 0);
+
+ /* Disable Rx full interrupt and write the control reg with ACK allowing
+ * next byte sent to be acknowledged automatically
+ */
+ XIic_mDisableIntr(InstancePtr->BaseAddress,
+ XIIC_INTR_RX_FULL_MASK);
+
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET,
+ (CntlReg & ~XIIC_CR_NO_ACK_MASK));
+
+ /* Send notification of msg Rx complete in RecvHandler callback
+ */
+ InstancePtr->RecvHandler(InstancePtr->RecvCallBackRef, 0);
+ }
+ else {
+ /* Fifo data not at n-1, read all but the last byte of data from the
+ * slave, if more than a FIFO full yet to receive read a FIFO full
+ */
+ BytesToRead = InstancePtr->RecvByteCount - BytesInFifo - 1;
+ if (BytesToRead > IIC_RX_FIFO_DEPTH) {
+ BytesToRead = IIC_RX_FIFO_DEPTH;
+ }
+
+ /* Read in data from the FIFO */
+
+ for (LoopCnt = 0; LoopCnt < BytesToRead; LoopCnt++) {
+ XIic_mReadRecvByte(InstancePtr);
+ }
+ }
+}
--- /dev/null
+/* $Id: xiic_options.c,v 1.1 2007/12/03 15:44:58 meinelte Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xiic_options.c
+*
+* Contains options functions for the XIic component. This file is not required
+* unless the functions in this file are called.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- --- ------- -----------------------------------------------
+* 1.01b jhl 3/26/02 repartioned the driver
+* 1.01c ecm 12/05/02 new rev
+* 1.13a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+****************************************************************************/
+
+/***************************** Include Files *******************************/
+
+#include "xiic.h"
+#include "xiic_i.h"
+#include "xio.h"
+
+/************************** Constant Definitions ***************************/
+
+
+/**************************** Type Definitions *****************************/
+
+
+/***************** Macros (Inline Functions) Definitions *******************/
+
+
+/************************** Function Prototypes ****************************/
+
+
+/************************** Variable Definitions **************************/
+
+
+/*****************************************************************************/
+/**
+*
+* This function sets the options for the IIC device driver. The options control
+* how the device behaves relative to the IIC bus. If an option applies to
+* how messages are sent or received on the IIC bus, it must be set prior to
+* calling functions which send or receive data.
+*
+* To set multiple options, the values must be ORed together. To not change
+* existing options, read/modify/write with the current options using
+* XIic_GetOptions().
+*
+* <b>USAGE EXAMPLE:</b>
+*
+* Read/modify/write to enable repeated start:
+* <pre>
+* u8 Options;
+* Options = XIic_GetOptions(&Iic);
+* XIic_SetOptions(&Iic, Options | XII_REPEATED_START_OPTION);
+* </pre>
+*
+* Disabling General Call:
+* <pre>
+* Options = XIic_GetOptions(&Iic);
+* XIic_SetOptions(&Iic, Options &= ~XII_GENERAL_CALL_OPTION);
+* </pre>
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @param NewOptions are the options to be set. See xiic.h for a list of
+* the available options.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* Sending or receiving messages with repeated start enabled, and then
+* disabling repeated start, will not take effect until another master
+* transaction is completed. i.e. After using repeated start, the bus will
+* continue to be throttled after repeated start is disabled until a master
+* transaction occurs allowing the IIC to release the bus.
+* <br><br>
+* Options enabled will have a 1 in its appropriate bit position.
+*
+****************************************************************************/
+void XIic_SetOptions(XIic * InstancePtr, u32 NewOptions)
+{
+ u8 CntlReg;
+
+ XASSERT_VOID(InstancePtr != NULL);
+
+ XIic_mEnterCriticalRegion(InstancePtr->BaseAddress);
+
+ /* Update the options in the instance and get the contents of the control
+ * register such that the general call option can be modified
+ */
+ InstancePtr->Options = NewOptions;
+ CntlReg = XIo_In8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET);
+
+ /* The general call option is the only option that maps directly to
+ * a hardware register feature
+ */
+ if (NewOptions & XII_GENERAL_CALL_OPTION) {
+ CntlReg |= XIIC_CR_GENERAL_CALL_MASK;
+ }
+ else {
+ CntlReg &= ~XIIC_CR_GENERAL_CALL_MASK;
+ }
+
+ /* Write the new control register value to the register */
+
+ XIo_Out8(InstancePtr->BaseAddress + XIIC_CR_REG_OFFSET, CntlReg);
+
+ XIic_mExitCriticalRegion(InstancePtr->BaseAddress);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function gets the current options for the IIC device. Options control
+* the how the device behaves on the IIC bus. See SetOptions for more information
+* on options.
+*
+* @param InstancePtr is a pointer to the XIic instance to be worked on.
+*
+* @return
+*
+* The options of the IIC device. See xiic.h for a list of available options.
+*
+* @note
+*
+* Options enabled will have a 1 in its appropriate bit position.
+*
+****************************************************************************/
+u32 XIic_GetOptions(XIic * InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ return InstancePtr->Options;
+}
To compile this driver as a module, choose M here: the
module will be called atakbd.
+config KEYBOARD_XILINX_FSL_PS2_AT
+ tristate "AT keyboard connected to XILINX PS2 FSL"
+ depends on HAVE_XILINX_FSL_PS2
+ help
+ Say Y here if you are running Linux on SPARTAN3E and have a keyboard
+ attached.
+
+ To compile this driver as a module, choose M here: the
+ module will be called amikbd.
+
config KEYBOARD_HIL_OLD
tristate "HP HIL keyboard support (simple driver)"
depends on GSC || HP300
obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
+obj-$(CONFIG_KEYBOARD_XILINX_FSL_PS2_AT)+= xil_fsl_ps2_kbd.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
--- /dev/null
+/*
+ * XILINX FSL PS2 IP core keyboard driver for Linux
+ */
+
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/xparameters.h>
+#include <asm/mb_interface.h>
+#include <asm/irq.h>
+
+MODULE_AUTHOR("LynxWorks");
+MODULE_DESCRIPTION("XILINX fsl_ps2 keyboard driver");
+MODULE_LICENSE("GPL");
+
+
+static unsigned char xilkbd_keycode[512] = {
+
+ 0, 67, 65, 63, 61, 59, 60, 88, 0, 68, 66, 64, 62, 15, 41,117,
+ 0, 56, 42, 93, 29, 16, 2, 0, 0, 0, 44, 31, 30, 17, 3, 0,
+ 0, 46, 45, 32, 18, 5, 4, 95, 0, 57, 47, 33, 20, 19, 6,183,
+ 0, 49, 48, 35, 34, 21, 7,184, 0, 0, 50, 36, 22, 8, 9,185,
+ 0, 51, 37, 23, 24, 11, 10, 0, 0, 52, 53, 38, 39, 25, 12, 0,
+ 0, 89, 40, 0, 26, 13, 0, 0, 58, 54, 28, 27, 0, 43, 0, 85,
+ 0, 86, 91, 90, 92, 0, 14, 94, 0, 79,124, 75, 71,121, 0, 0,
+ 82, 83, 80, 76, 77, 72, 1, 69, 87, 78, 81, 74, 55, 73, 70, 99,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 217,100,255, 0, 97,165, 0, 0,156, 0, 0, 0, 0, 0, 0,125,
+ 173,114, 0,113, 0, 0, 0,126,128, 0, 0,140, 0, 0, 0,127,
+ 159, 0,115, 0,164, 0, 0,116,158, 0,150,166, 0, 0, 0,142,
+ 157, 0, 0, 0, 0, 0, 0, 0,155, 0, 98, 0, 0,163, 0, 0,
+ 226, 0, 0, 0, 0, 0, 0, 0, 0,255, 96, 0, 0, 0,143, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,107, 0,105,102, 0, 0,112,
+ 110,111,108,112,106,103, 0,119, 0,118,109, 0, 99,104,119, 0,
+
+ 0, 0, 0, 65, 99,
+};
+
+
+static unsigned char inbyte(void);
+static unsigned int inword(void);
+/*
+unsigned int cur_scancode = 0;
+unsigned int mark_scancode = 0 ;
+
+
+static unsigned char inbyte(void)
+{
+ int break_scancode = 0, done = 0 ;
+ unsigned int data = 1 ;
+
+
+ while(!done){
+ microblaze_nbread_datafsl(data,0);
+ data = data >> 24;
+ if( !mark_scancode && (data != cur_scancode) ) {
+ mark_scancode = 1 ;
+ cur_scancode = data ;
+ }else if( mark_scancode && (data == 0xF0) ) {
+ break_scancode = 1 ;
+ }else if( break_scancode ) {
+ if( data == cur_scancode )
+ mark_scancode = 0 ;
+ else
+ cur_scancode = data ;
+ done = 1 ;
+ }
+ }
+
+ return data;
+}
+*/
+
+
+static unsigned int inword(void)
+{
+
+ unsigned int data = 0 ;
+ static unsigned int prev_data = 0;
+ unsigned int msr;
+
+ microblaze_nbread_datafsl(data,0);
+// msr = mfmsr();
+// if(msr&4) return 0;
+ data = data >> 24;
+
+ if (data == 0xF0) {
+ prev_data = data;
+ return 0;
+ }
+
+ if (prev_data == 0xF0) {
+ prev_data = data;
+ return 0x00F00000 | data;
+ }
+
+ prev_data = data;
+ return data;
+}
+
+static struct input_dev xilkbd_dev;
+
+static char *xilkbd_name = "PS2 keyboard on XILINX FSL PS2";
+static char *xilkbd_phys = "xilkbd/input0";
+
+
+static irqreturn_t xilkbd_interrupt(int irq, void *dummy, struct pt_regs *fp)
+{
+ unsigned int scancode;
+
+ scancode = inword();
+
+ if (!scancode) return IRQ_HANDLED;
+
+ input_regs(&xilkbd_dev, fp);
+
+ if ((scancode&0x00F00000) == 0x00F00000)
+ input_report_key(&xilkbd_dev, xilkbd_keycode[scancode&0xFF], 0);
+ else
+ input_report_key(&xilkbd_dev, xilkbd_keycode[scancode&0xFF], 1);
+
+ input_sync(&xilkbd_dev);
+
+ if (
+ xilkbd_keycode[scancode&0xFF] != KEY_LEFTSHIFT &&
+ xilkbd_keycode[scancode&0xFF] != KEY_RIGHTSHIFT &&
+ xilkbd_keycode[scancode&0xFF] != KEY_CAPSLOCK &&
+ xilkbd_keycode[scancode&0xFF] != KEY_LEFTCTRL &&
+ xilkbd_keycode[scancode&0xFF] != KEY_LEFTALT &&
+ xilkbd_keycode[scancode&0xFF] != KEY_RIGHTALT &&
+ xilkbd_keycode[scancode&0xFF] != KEY_LEFTMETA &&
+ xilkbd_keycode[scancode&0xFF] != KEY_RIGHTMETA
+ ) {
+ input_report_key(&xilkbd_dev, xilkbd_keycode[scancode&0xFF], 0);
+ input_sync(&xilkbd_dev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+static irqreturn_t xilkbd_interrupt(int irq, void *dummy, struct pt_regs *fp)
+{
+ unsigned int scancode;
+
+ scancode = inbyte();
+
+ input_regs(&xilkbd_dev, fp);
+
+ input_report_key(&xilkbd_dev, xilkbd_keycode[scancode], 1);
+ input_sync(&xilkbd_dev);
+ input_report_key(&xilkbd_dev, xilkbd_keycode[scancode], 0);
+ input_sync(&xilkbd_dev);
+
+
+ return IRQ_HANDLED;
+}
+*/
+
+static int __init xilkbd_init(void)
+{
+ int i;
+
+ init_input_dev(&xilkbd_dev);
+
+ xilkbd_dev.evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
+ xilkbd_dev.keycode = xilkbd_keycode;
+ xilkbd_dev.keycodesize = sizeof(unsigned char);
+ xilkbd_dev.keycodemax = ARRAY_SIZE(xilkbd_keycode);
+
+ for (i = 0; i < 512; i++)
+ if (xilkbd_keycode[i])
+ set_bit(xilkbd_keycode[i], xilkbd_dev.keybit);
+
+
+ request_irq(XPAR_FSL_PS2_IRQ, xilkbd_interrupt, 0, "xilkbd", xilkbd_interrupt);
+
+ xilkbd_dev.name = xilkbd_name;
+ xilkbd_dev.phys = xilkbd_phys;
+ xilkbd_dev.id.bustype = 0;
+ xilkbd_dev.id.vendor = 0x0001;
+ xilkbd_dev.id.product = 0x0001;
+ xilkbd_dev.id.version = 0x0100;
+
+ input_register_device(&xilkbd_dev);
+
+ printk(KERN_INFO "input: %s\n", xilkbd_name);
+
+ return 0;
+}
+
+static void __exit xilkbd_exit(void)
+{
+ input_unregister_device(&xilkbd_dev);
+ free_irq(XPAR_FSL_PS2_IRQ, xilkbd_interrupt);
+}
+
+module_init(xilkbd_init);
+module_exit(xilkbd_exit);
To compile this driver as a module, choose M here: the
module will be called libps2.
+config SERIO_XILINXPS2
+ tristate "Xilinx OPB PS/2 Controller Support"
+ depends on XILINX_DRIVERS && SERIO
+ help
+ This driver supports OPB PS/2 IP from Xilinx EDK.
+
+config SERIO_XILINX_XPS_PS2
+ tristate "Xilinx XPS PS/2 Controller Support"
+ depends on XILINX_DRIVERS && SERIO
+ help
+ This driver supports XPS PS/2 IP from Xilinx EDK.
+
config SERIO_RAW
tristate "Raw access to serio ports"
help
obj-$(CONFIG_SERIO_PCIPS2) += pcips2.o
obj-$(CONFIG_SERIO_MACEPS2) += maceps2.o
obj-$(CONFIG_SERIO_LIBPS2) += libps2.o
+obj-$(CONFIG_SERIO_XILINXPS2) += xilinx_ps2/
+obj-$(CONFIG_SERIO_XILINX_XPS_PS2) += xilinx_ps2.o
obj-$(CONFIG_SERIO_RAW) += serio_raw.o
--- /dev/null
+/*
+ * xilinx_ps2.c
+ *
+ * Xilinx PS/2 driver to interface PS/2 component to Linux
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * (c) 2005 MontaVista Software, Inc.
+ * (c) 2008 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#include <linux/module.h>
+#include <linux/serio.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/xilinx_devices.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_OF /* For open firmware */
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/of_platform.h>
+#endif /* CONFIG_OF */
+
+#include "xilinx_ps2.h"
+
+#define DRIVER_NAME "xilinx_ps2"
+#define DRIVER_DESCRIPTION "Xilinx XPS PS/2 driver"
+
+#define XPS2_NAME_DESC "Xilinx XPS PS/2 Port #%d"
+#define XPS2_PHYS_DESC "xilinxps2/serio%d"
+
+
+static DECLARE_MUTEX(cfg_sem);
+
+/*********************/
+/* Interrupt handler */
+/*********************/
+static irqreturn_t xps2_interrupt(int irq, void *dev_id)
+{
+ struct xps2data *drvdata = (struct xps2data *)dev_id;
+ u32 intr_sr;
+ u32 ier;
+ u8 c;
+ u8 retval;
+
+ /* Get the PS/2 interrupts and clear them */
+ intr_sr = in_be32(drvdata->base_address + XPS2_IPISR_OFFSET);
+ out_be32(drvdata->base_address + XPS2_IPISR_OFFSET, intr_sr);
+
+ /* Check which interrupt is active */
+ if (intr_sr & XPS2_IPIXR_RX_OVF) {
+ printk(KERN_ERR "%s: receive overrun error\n",
+ drvdata->serio.name);
+ }
+
+ if (intr_sr & XPS2_IPIXR_RX_ERR) {
+ drvdata->dfl |= SERIO_PARITY;
+ }
+
+ if (intr_sr & (XPS2_IPIXR_TX_NOACK | XPS2_IPIXR_WDT_TOUT)) {
+ drvdata->dfl |= SERIO_TIMEOUT;
+ }
+
+ if (intr_sr & XPS2_IPIXR_RX_FULL) {
+ retval = xps2_recv(drvdata, &drvdata->rxb);
+
+ /* Error, if 1 byte is not received */
+ if (retval != 1) {
+ printk(KERN_ERR
+ "%s: wrong rcvd byte count (%d)\n",
+ drvdata->serio.name, retval);
+ }
+ c = drvdata->rxb;
+ serio_interrupt(&drvdata->serio, c, drvdata->dfl);
+ drvdata->dfl = 0;
+ }
+
+ if (intr_sr & XPS2_IPIXR_TX_ACK) {
+
+ /* Disable the TX interrupts after the transmission is
+ * complete */
+ ier = in_be32(drvdata->base_address + XPS2_IPIER_OFFSET);
+ ier &= (~(XPS2_IPIXR_TX_ACK & XPS2_IPIXR_ALL ));
+ out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, ier);
+ drvdata->dfl = 0;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*******************/
+/* serio callbacks */
+/*******************/
+
+/*
+ * sxps2_write() sends a byte out through the PS/2 interface.
+ *
+ * The sole purpose of drvdata->tx_end is to prevent the driver
+ * from locking up in the do {} while; loop when nothing is connected
+ * to the given PS/2 port. That's why we do not try to recover
+ * from the transmission failure.
+ * drvdata->tx_end needs not to be initialized to some "far in the
+ * future" value, as the very first attempt to xps2_send() a byte
+ * is always successful, and drvdata->tx_end will be set to a proper
+ * value at that moment - before the 1st use in the comparison.
+ */
+static int sxps2_write(struct serio *pserio, unsigned char c)
+{
+ struct xps2data *drvdata = pserio->port_data;
+ unsigned long flags;
+ int retval;
+
+ do {
+ spin_lock_irqsave(&drvdata->lock, flags);
+ retval = xps2_send(drvdata, &c);
+ spin_unlock_irqrestore(&drvdata->lock, flags);
+
+ if (retval == 1) {
+ drvdata->tx_end = jiffies + HZ;
+ return 0; /* success */
+ }
+ } while (!time_after(jiffies, drvdata->tx_end));
+
+ return 1; /* transmission is frozen */
+}
+
+/*
+ * sxps2_open() is called when a port is open by the higher layer.
+ */
+static int sxps2_open(struct serio *pserio)
+{
+ struct xps2data *drvdata = pserio->port_data;
+ int retval;
+
+ retval = request_irq(drvdata->irq, &xps2_interrupt, 0,
+ DRIVER_NAME, drvdata);
+ if (retval) {
+ printk(KERN_ERR
+ "%s: Couldn't allocate interrupt %d\n",
+ drvdata->serio.name, drvdata->irq);
+ return retval;
+ }
+
+ /* start reception by enabling the interrupts */
+ out_be32(drvdata->base_address + XPS2_GIER_OFFSET, XPS2_GIER_GIE_MASK);
+ out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, XPS2_IPIXR_RX_ALL);
+ (void)xps2_recv(drvdata, &drvdata->rxb);
+
+ return 0; /* success */
+}
+
+/*
+ * sxps2_close() frees the interrupt.
+ */
+static void sxps2_close(struct serio *pserio)
+{
+ struct xps2data *drvdata = pserio->port_data;
+
+ /* Disable the PS2 interrupts */
+ out_be32(drvdata->base_address + XPS2_GIER_OFFSET, 0x00);
+ out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, 0x00);
+ free_irq(drvdata->irq, drvdata);
+}
+
+/*************************/
+/* XPS PS/2 driver calls */
+/*************************/
+
+/*
+ * xps2_initialize() initializes the Xilinx PS/2 device.
+ */
+static int xps2_initialize(struct xps2data *drvdata)
+{
+ /* Disable all the interrupts just in case */
+ out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, 0);
+
+ /* Reset the PS2 device and abort any current transaction, to make sure
+ * we have the PS2 in a good state */
+ out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET);
+
+ return 0;
+}
+
+/*
+ * xps2_send() sends the specified byte of data to the PS/2 port in interrupt
+ * mode.
+ */
+static u8 xps2_send(struct xps2data *drvdata, u8 *byte)
+{
+ u32 sr;
+ u32 ier;
+ u8 retval = 0;
+
+ /* Enter a critical region by disabling the PS/2 transmit interrupts to
+ * allow this call to stop a previous operation that may be interrupt
+ * driven. Only stop the transmit interrupt since this critical region
+ * is not really exited in the normal manner */
+ ier = in_be32(drvdata->base_address + XPS2_IPIER_OFFSET);
+ ier &= (~(XPS2_IPIXR_TX_ALL & XPS2_IPIXR_ALL ));
+ out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, ier);
+
+ /* If the PS/2 transmitter is empty send a byte of data */
+ sr = in_be32(drvdata->base_address + XPS2_STATUS_OFFSET);
+ if ((sr & XPS2_STATUS_TX_FULL) == 0) {
+ out_be32(drvdata->base_address + XPS2_TX_DATA_OFFSET, *byte);
+ retval = 1;
+ }
+
+ /* Enable the TX interrupts to track the status of the transmission */
+ ier = in_be32(drvdata->base_address + XPS2_IPIER_OFFSET);
+ ier |= ((XPS2_IPIXR_TX_ALL | XPS2_IPIXR_WDT_TOUT ));
+ out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, ier);
+
+ return retval; /* no. of bytes sent */
+}
+
+/*
+ * xps2_recv() will attempt to receive a byte of data from the PS/2 port.
+ */
+static u8 xps2_recv(struct xps2data *drvdata, u8 *byte)
+{
+ u32 sr;
+ u8 retval = 0;
+
+ /* If there is data available in the PS/2 receiver, read it */
+ sr = in_be32(drvdata->base_address + XPS2_STATUS_OFFSET);
+ if (sr & XPS2_STATUS_RX_FULL) {
+ *byte = in_be32(drvdata->base_address + XPS2_RX_DATA_OFFSET);
+ retval = 1;
+ }
+
+ return retval; /* no. of bytes received */
+}
+
+/******************************/
+/* The platform device driver */
+/******************************/
+
+static int xps2_probe(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ struct resource *irq_res = NULL; /* Interrupt resources */
+ struct resource *regs_res = NULL; /* IO mem resources */
+
+ if (!dev) {
+ dev_err(dev, "Probe called with NULL param\n");
+ return -EINVAL;
+ }
+
+ /* Find irq number, map the control registers in */
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs_res || !irq_res) {
+ dev_err(dev, "IO resource(s) not found\n");
+ return -ENODEV;
+ }
+ return xps2_setup(dev, pdev->id, regs_res, irq_res);
+}
+
+/*
+ * Shared device initialization code.
+ */
+static int xps2_setup(struct device *dev, int id, struct resource *regs_res,
+ struct resource *irq_res)
+{
+ struct xps2data *drvdata;
+ unsigned long remap_size;
+ int retval;
+
+ if (!dev)
+ return -EINVAL;
+
+ drvdata = kzalloc(sizeof(struct xps2data), GFP_KERNEL);
+ if (!drvdata) {
+ dev_err(dev, "Couldn't allocate device private record\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&drvdata->lock);
+ dev_set_drvdata(dev, (void *)drvdata);
+
+#ifdef CONFIG_OF
+ if (!regs_res || !irq_res) {
+ dev_err(dev, "IO resource(s) not found\n");
+ retval = -EFAULT;
+ goto failed1;
+ }
+#endif
+
+ drvdata->irq = irq_res->start;
+ remap_size = regs_res->end - regs_res->start + 1;
+ if (!request_mem_region(regs_res->start, remap_size, DRIVER_NAME)) {
+
+ dev_err(dev,"Couldn't lock memory region at 0x%08X\n",
+ regs_res->start);
+ retval = -EBUSY;
+ goto failed1;
+ }
+
+ /* Fill in configuration data and add them to the list */
+ drvdata->phys_addr = regs_res->start;
+ drvdata->remap_size = remap_size;
+ drvdata->device_id = id;
+ drvdata->base_address= ioremap(regs_res->start, remap_size);
+ if (drvdata->base_address == NULL) {
+
+ dev_err(dev,"Couldn't ioremap memory at 0x%08X\n",
+ regs_res->start);
+ retval = -EFAULT;
+ goto failed2;
+ }
+
+ /* Initialize the PS/2 interface */
+ down(&cfg_sem);
+ if (xps2_initialize(drvdata)) {
+ up(&cfg_sem);
+ dev_err(dev,"Could not initialize device\n");
+ retval = -ENODEV;
+ goto failed3;
+ }
+ up(&cfg_sem);
+
+ dev_info(dev, "Xilinx PS2 at 0x%08X mapped to 0x%08X, irq=%d\n",
+ drvdata->phys_addr, (u32)drvdata->base_address, drvdata->irq);
+
+ drvdata->serio.id.type = SERIO_8042;
+ drvdata->serio.write = sxps2_write;
+ drvdata->serio.open = sxps2_open;
+ drvdata->serio.close = sxps2_close;
+ drvdata->serio.port_data = drvdata;
+ drvdata->serio.dev.parent = dev;
+ snprintf(drvdata->serio.name, sizeof(drvdata->serio.name),
+ XPS2_NAME_DESC, id);
+ snprintf(drvdata->serio.phys, sizeof(drvdata->serio.phys),
+ XPS2_PHYS_DESC, id);
+ serio_register_port(&drvdata->serio);
+
+ return 0; /* success */
+
+failed3:
+ iounmap(drvdata->base_address);
+
+failed2:
+ release_mem_region(regs_res->start, remap_size);
+
+failed1:
+ kfree(drvdata);
+ dev_set_drvdata(dev, NULL);
+
+ return retval;
+}
+
+/*
+ * xps2_remove() dissociates the driver with the Xilinx PS/2 device.
+ */
+static int xps2_remove(struct device *dev)
+{
+ struct xps2data *drvdata;
+
+ if (!dev)
+ return -EINVAL;
+
+ drvdata = (struct xps2data *)dev_get_drvdata(dev);
+
+ serio_unregister_port(&drvdata->serio);
+
+ iounmap(drvdata->base_address);
+
+ release_mem_region(drvdata->phys_addr, drvdata->remap_size);
+
+ kfree(drvdata);
+ dev_set_drvdata(dev, NULL);
+
+ return 0; /* success */
+}
+
+static struct device_driver xps2_driver = {
+ .name = DRIVER_NAME,
+ .bus = &platform_bus_type,
+ .probe = xps2_probe,
+ .remove = xps2_remove
+};
+
+#ifdef CONFIG_OF
+static int __devinit xps2_of_probe(struct of_device *ofdev, const struct
+ of_device_id *match)
+{
+ struct resource r_irq_struct;
+ struct resource r_mem_struct;
+ struct resource *r_irq = &r_irq_struct; /* Interrupt resources */
+ struct resource *r_mem = &r_mem_struct; /* IO mem resources */
+ int rc = 0;
+ const unsigned int *id;
+
+ printk(KERN_INFO "Device Tree Probing \'%s\'\n",
+ ofdev->node->name);
+
+ /* Get iospace for the device */
+ rc = of_address_to_resource(ofdev->node, 0, r_mem);
+ if(rc) {
+ dev_warn(&ofdev->dev, "invalid address\n");
+ return rc;
+ }
+
+ /* Get IRQ for the device */
+ rc = of_irq_to_resource(ofdev->node, 0, r_irq);
+ if(rc == NO_IRQ) {
+ dev_warn(&ofdev->dev, "no IRQ found\n");
+ return rc;
+ }
+
+ id = of_get_property(ofdev->node, "port-number", NULL);
+ return xps2_setup(&ofdev->dev, id ? *id : -1, r_mem, r_irq);
+}
+
+static int __devexit xps2_of_remove(struct of_device *dev)
+{
+ return xps2_remove(&dev->dev);
+}
+
+static struct of_device_id xps2_of_match[] = {
+ { .compatible = "xlnx,xps-ps2-1.00.a", },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(of, xps2_of_match);
+
+static struct of_platform_driver xps2_of_driver = {
+ .name = DRIVER_NAME,
+ .match_table = xps2_of_match,
+ .probe = xps2_of_probe,
+ .remove = __devexit_p(xps2_of_remove),
+};
+#endif /* CONFIG_OF */
+
+static int __init xps2_init(void)
+{
+ int status = driver_register(&xps2_driver);
+#ifdef CONFIG_OF
+ status |= of_register_platform_driver(&xps2_of_driver);
+#endif /* CONFIG_OF */
+ return status;
+}
+
+static void __exit xps2_cleanup(void)
+{
+ driver_unregister(&xps2_driver);
+#ifdef CONFIG_OF
+ of_unregister_platform_driver(&xps2_of_driver);
+#endif /* CONFIG_OF */
+}
+
+module_init(xps2_init);
+module_exit(xps2_cleanup);
+
+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL");
+
--- /dev/null
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_PS2_H_ /* prevent circular inclusions */
+#define XILINX_PS2_H_ /* by using protection macros */
+
+/* Register offsets for the xps2 device */
+#define XPS2_SRST_OFFSET 0x00000000 /* Software Reset register */
+#define XPS2_STATUS_OFFSET 0x00000004 /* Status register */
+#define XPS2_RX_DATA_OFFSET 0x00000008 /* Receive Data register */
+#define XPS2_TX_DATA_OFFSET 0x0000000C /* Transmit Data register */
+#define XPS2_GIER_OFFSET 0x0000002C /* Global Interrupt Enable reg */
+#define XPS2_IPISR_OFFSET 0x00000030 /* Interrupt Status register */
+#define XPS2_IPIER_OFFSET 0x00000038 /* Interrupt Enable register */
+
+/* Reset Register Bit Definitions */
+#define XPS2_SRST_RESET 0x0000000A /* Software Reset */
+
+/* Status Register Bit Positions */
+#define XPS2_STATUS_RX_FULL 0x00000001 /* Receive Full */
+#define XPS2_STATUS_TX_FULL 0x00000002 /* Transmit Full */
+
+/* Bit definitions for ISR/IER registers. Both the registers have the same bit
+ * definitions and are only defined once. */
+#define XPS2_IPIXR_WDT_TOUT 0x00000001 /* Watchdog Timeout Interrupt */
+#define XPS2_IPIXR_TX_NOACK 0x00000002 /* Transmit No ACK Interrupt */
+#define XPS2_IPIXR_TX_ACK 0x00000004 /* Transmit ACK (Data) Interrupt */
+#define XPS2_IPIXR_RX_OVF 0x00000008 /* Receive Overflow Interrupt */
+#define XPS2_IPIXR_RX_ERR 0x00000010 /* Receive Error Interrupt */
+#define XPS2_IPIXR_RX_FULL 0x00000020 /* Receive Data Interrupt */
+
+/* Mask for all the Transmit Interrupts */
+#define XPS2_IPIXR_TX_ALL (XPS2_IPIXR_TX_NOACK | XPS2_IPIXR_TX_ACK)
+
+/* Mask for all the Receive Interrupts */
+#define XPS2_IPIXR_RX_ALL (XPS2_IPIXR_RX_OVF | XPS2_IPIXR_RX_ERR | \
+ XPS2_IPIXR_RX_FULL)
+
+/* Mask for all the Interrupts */
+#define XPS2_IPIXR_ALL (XPS2_IPIXR_TX_ALL | XPS2_IPIXR_RX_ALL | \
+ XPS2_IPIXR_WDT_TOUT)
+
+/* Global Interrupt Enable mask */
+#define XPS2_GIER_GIE_MASK 0x80000000
+
+struct xps2data {
+ int irq;
+ u32 device_id;
+ u32 phys_addr;
+ u32 remap_size;
+ spinlock_t lock;
+ u8 rxb; /* Rx buffer */
+ void __iomem *base_address; /* virt. address of control registers */
+ unsigned long tx_end;
+ unsigned int dfl;
+ struct serio serio; /* serio */
+};
+
+static u8 xps2_send(struct xps2data *drvdata, u8 *buffer_ptr);
+static u8 xps2_recv(struct xps2data *drvdata, u8 *buffer_ptr);
+static int xps2_initialize(struct xps2data *drvdata);
+static int xps2_setup(struct device *dev, int id, struct resource *regs_res,
+ struct resource *irq_res);
+
+#endif /* end of protection macro */
+
--- /dev/null
+#
+# Makefile for the Xilinx PS/2 driver
+#
+
+EXTRA_CFLAGS += -Idrivers/xilinx_common
+
+# The Linux adapter for the Xilinx driver code.
+xilinx_ps2-objs := xps2_linux.o
+
+# The Xilinx OS independent code.
+xilinx_ps2-objs += xps2.o xps2_intr.o xps2_l.o
+
+obj-$(CONFIG_SERIO_XILINXPS2) += xilinx_ps2.o
--- /dev/null
+/******************************************************************************
+*
+* Author: Xilinx, Inc.
+*
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A
+* COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS
+* ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR STANDARD,
+* XILINX IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION IS FREE
+* FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE FOR OBTAINING
+* ANY THIRD PARTY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION.
+* XILINX EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO
+* THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY
+* WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM
+* CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND
+* FITNESS FOR A PARTICULAR PURPOSE.
+*
+*
+* Xilinx hardware products are not intended for use in life support
+* appliances, devices, or systems. Use in such applications is
+* expressly prohibited.
+*
+*
+* (c) Copyright 2002-2005 Xilinx Inc.
+* All rights reserved.
+*
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+******************************************************************************/
+/****************************************************************************/
+/**
+*
+* @file xps2.c
+*
+* This file contains the required functions for the PS/2 driver.
+* Refer to the header file xps2.h for more detailed information.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a ch 06/18/02 First release
+* 1.00a rmm 05/14/03 Fixed diab compiler warnings relating to asserts.
+* 1.01a jvb 12/13/05 I changed Initialize() into CfgInitialize(), and made
+* CfgInitialize() take a pointer to a config structure
+* instead of a device id. I moved Initialize() into
+* xgpio_sinit.c, and had Initialize() call CfgInitialize()
+* after it retrieved the config structure using the device
+* id. I removed include of xparameters.h along with any
+* dependencies on xparameters.h and the _g.c config table.
+*
+* </pre>
+*
+*****************************************************************************/
+
+/***************************** Include Files ********************************/
+
+#include "xstatus.h"
+#include "xps2.h"
+#include "xps2_i.h"
+#include "xps2_l.h"
+#include "xio.h"
+
+/************************** Constant Definitions ****************************/
+
+/**************************** Type Definitions ******************************/
+
+/***************** Macros (Inline Functions) Definitions ********************/
+
+/************************** Variable Definitions ****************************/
+
+/************************** Function Prototypes *****************************/
+
+static void XPs2_StubHandler(void *CallBackRef, u32 Event,
+ unsigned int ByteCount);
+
+/****************************************************************************/
+/**
+*
+* Initializes a specific PS/2 instance such that it is ready to be used.
+* The default operating mode of the driver is polled mode.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+* @param Config is a reference to a structure containing information about
+* a specific PS2 device. This function initializes an InstancePtr object
+* for a specific device specified by the contents of Config. This
+* function can initialize multiple instance objects with the use of
+* multiple calls giving different Config information on each call.
+* @param EffectiveAddr is the device base address in the virtual memory address
+* space. The caller is responsible for keeping the address mapping
+* from EffectiveAddr to the device physical base address unchanged
+* once this function is invoked. Unexpected errors may occur if the
+* address mapping changes after this function is called. If address
+* translation is not used, use Config->BaseAddress for this parameters,
+* passing the physical address instead.
+*
+* @return
+*
+* - XST_SUCCESS if initialization was successful
+*
+* @note
+*
+* The Config pointer argument is not used by this function, but is provided
+* to keep the function signature consistent with other drivers.
+*
+*****************************************************************************/
+int XPs2_CfgInitialize(XPs2 * InstancePtr, XPs2_Config * Config,
+ u32 EffectiveAddr)
+{
+ /*
+ * Assert validates the input arguments
+ */
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /*
+ * Setup the data that is from the configuration information
+ */
+ InstancePtr->BaseAddress = EffectiveAddr;
+
+ /*
+ * Initialize the instance data to some default values and setup a default
+ * handler
+ */
+ InstancePtr->Handler = XPs2_StubHandler;
+
+ InstancePtr->SendBuffer.NextBytePtr = NULL;
+ InstancePtr->SendBuffer.RemainingBytes = 0;
+ InstancePtr->SendBuffer.RequestedBytes = 0;
+
+ InstancePtr->ReceiveBuffer.NextBytePtr = NULL;
+ InstancePtr->ReceiveBuffer.RemainingBytes = 0;
+ InstancePtr->ReceiveBuffer.RequestedBytes = 0;
+
+ /*
+ * Reset the PS/2 Hardware
+ */
+ XPs2_mReset(InstancePtr->BaseAddress);
+
+ /*
+ * Disable all PS/2 interrupts
+ */
+ XPs2_mDisableIntr(InstancePtr->BaseAddress, XPS2_INT_ALL);
+
+ /*
+ * Indicate the instance is now ready to use, initialized without error
+ */
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+
+ return XST_SUCCESS;
+}
+
+/****************************************************************************/
+/**
+*
+* This functions sends the specified buffer of data to the PS/2 port in either
+* polled or interrupt driven modes. This function is non-blocking such that it
+* will return before the data has been sent thorugh PS/2. If the port is busy
+* sending data, it will return and indicate zero bytes were sent.
+*
+* In a polled mode, this function will only send 1 byte which is as much data
+* as the transmitter can buffer. The application may need to call it
+* repeatedly to send a buffer.
+*
+* In interrupt mode, this function will start sending the specified buffer and
+* then the interrupt handler of the driver will continue sending data until the
+* buffer has been sent. A callback function, as specified by the application,
+* will be called to indicate the completion of sending the buffer.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+* @param BufferPtr is pointer to a buffer of data to be sent.
+* @param NumBytes contains the number of bytes to be sent. A value of zero
+* will stop a previous send operation that is in progress in interrupt
+* mode. Any data that was already put into the transmit FIFO will be
+* sent.
+*
+* @return
+*
+* The number of bytes actually sent.
+*
+* @note
+*
+* The number of bytes is not asserted so that this function may be called with
+* a value of zero to stop an operation that is already in progress.
+* <br><br>
+* This function modifies shared data such that there may be a need for mutual
+* exclusion in a multithreaded environment
+*
+*****************************************************************************/
+unsigned int XPs2_Send(XPs2 * InstancePtr, u8 * BufferPtr,
+ unsigned int NumBytes)
+{
+ unsigned int BytesSent;
+
+ /*
+ * Assert validates the input arguments
+ */
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BufferPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Enter a critical region by disabling the PS/2 transmit interrupts to
+ * allow this call to stop a previous operation that may be interrupt
+ * driven, only stop the transmit interrupt since this critical region is
+ * not really exited in the normal manner
+ */
+ XPs2_mDisableIntr(InstancePtr->BaseAddress, XPS2_INT_TX_ALL);
+
+ /*
+ * Setup the specified buffer to be sent by setting the instance
+ * variables so it can be sent with polled or interrupt mode
+ */
+ InstancePtr->SendBuffer.RequestedBytes = NumBytes;
+ InstancePtr->SendBuffer.RemainingBytes = NumBytes;
+ InstancePtr->SendBuffer.NextBytePtr = BufferPtr;
+
+ /*
+ * Send the buffer and return the number of bytes sent
+ */
+ BytesSent = XPs2_SendBuffer(InstancePtr);
+
+ /*
+ * The critical region is not exited in this function because of the way
+ * the transmit interrupts work. The other function called enables the
+ * transmit interrupt such that this function can't restore a value to the
+ * interrupt enable register and does not need to exit the critical region
+ */
+ return BytesSent;
+}
+
+/****************************************************************************/
+/**
+*
+* This function will attempt to receive a specified number of bytes of data
+* from PS/2 and store it into the specified buffer. This function is
+* designed for either polled or interrupt driven modes. It is non-blocking
+* such that it will return if no data has already received by the PS/2 port.
+*
+* In a polled mode, this function will only receive 1 byte which is as much
+* data as the receiver can buffer. The application may need to call it
+* repeatedly to receive a buffer. Polled mode is the default mode of
+* operation for the driver.
+*
+* In interrupt mode, this function will start receiving and then the interrupt
+* handler of the driver will continue receiving data until the buffer has been
+* received. A callback function, as specified by the application, will be called
+* to indicate the completion of receiving the buffer or when any receive errors
+* or timeouts occur. Interrupt mode must be enabled.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+* @param BufferPtr is pointer to buffer for data to be received into
+* @param NumBytes is the number of bytes to be received. A value of zero will
+* stop a previous receive operation that is in progress in interrupt mode.
+*
+* @return
+*
+* The number of bytes received.
+*
+* @note
+*
+* The number of bytes is not asserted so that this function may be called with
+* a value of zero to stop an operation that is already in progress.
+*
+*****************************************************************************/
+unsigned int XPs2_Recv(XPs2 * InstancePtr, u8 * BufferPtr,
+ unsigned int NumBytes)
+{
+ unsigned int ReceivedCount;
+
+ /*
+ * Assert validates the input arguments
+ */
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BufferPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Setup the specified buffer to be sent by setting the instance
+ * variables so it can be sent with polled or interrupt mode
+ */
+ InstancePtr->ReceiveBuffer.RequestedBytes = NumBytes;
+ InstancePtr->ReceiveBuffer.RemainingBytes = NumBytes;
+ InstancePtr->ReceiveBuffer.NextBytePtr = BufferPtr;
+
+ /*
+ * Receive the data from PS/2 and return the number of bytes
+ * received
+ */
+ ReceivedCount = XPs2_ReceiveBuffer(InstancePtr);
+
+ return ReceivedCount;
+}
+
+/****************************************************************************/
+/**
+*
+* This function sends a buffer that has been previously specified by setting
+* up the instance variables of the instance. This function is designed to be
+* an internal function for the XPs2 component such that it may be called
+* from a shell function that sets up the buffer or from an interrupt handler.
+*
+* This function sends the specified buffer of data to the PS/2 port in either
+* polled or interrupt driven modes. This function is non-blocking such that
+* it will return before the data has been sent.
+*
+* In a polled mode, this function will only send 1 byte which is as much data
+* transmitter can buffer. The application may need to call it repeatedly to
+* send a buffer.
+*
+* In interrupt mode, this function will start sending the specified buffer and
+* then the interrupt handler of the driver will continue until the buffer
+* has been sent. A callback function, as specified by the application, will
+* be called to indicate the completion of sending the buffer.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+*
+* @return
+*
+* NumBytes is the number of bytes actually sent
+*
+* @note
+*
+* None.
+*
+*****************************************************************************/
+unsigned int XPs2_SendBuffer(XPs2 * InstancePtr)
+{
+ unsigned int SentCount = 0;
+
+ /*
+ * If the transmitter is empty send one byte of data
+ */
+ if (!XPs2_mIsTransmitFull(InstancePtr->BaseAddress)) {
+ XPs2_SendByte(InstancePtr->BaseAddress,
+ InstancePtr->SendBuffer.NextBytePtr[SentCount]);
+
+ SentCount = 1;
+ }
+ /*
+ * Update the buffer to reflect the bytes that were sent
+ * from it
+ */
+ InstancePtr->SendBuffer.NextBytePtr += SentCount;
+ InstancePtr->SendBuffer.RemainingBytes -= SentCount;
+
+ /*
+ * If interrupts are enabled as indicated by the receive interrupt, then
+ * enable the transmit interrupt
+ */
+ if (XPs2_mIsIntrEnabled((InstancePtr->BaseAddress), XPS2_INT_RX_FULL)) {
+ XPs2_mEnableIntr(InstancePtr->BaseAddress, XPS2_INT_TX_ALL |
+ XPS2_INT_WDT_TOUT);
+ }
+
+ return SentCount;
+}
+
+/****************************************************************************/
+/**
+*
+* This function receives a buffer that has been previously specified by setting
+* up the instance variables of the instance. This function is designed to be
+* an internal function for the XPs2 component such that it may be called
+* from a shell function that sets up the buffer or from an interrupt handler.
+*
+* This function will attempt to receive a specified number of bytes of data
+* from PS/2 and store it into the specified buffer. This function is
+* designed for either polled or interrupt driven modes. It is non-blocking
+* such that it will return if there is no data has already received.
+*
+* In a polled mode, this function will only receive 1 byte which is as much
+* data as the receiver can buffer. The application may need to call it
+* repeatedly to receive a buffer. Polled mode is the default mode of operation
+* for the driver.
+*
+* In interrupt mode, this function will start receiving and then the interrupt
+* handler of the driver will continue until the buffer has been received. A
+* callback function, as specified by the application, will be called to indicate
+* the completion of receiving the buffer or when any receive errors or timeouts
+* occur. Interrupt mode must be enabled using the SetOptions function.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+*
+* @return
+*
+* The number of bytes received.
+*
+* @note
+*
+* None.
+*
+*****************************************************************************/
+unsigned int XPs2_ReceiveBuffer(XPs2 * InstancePtr)
+{
+ unsigned int ReceivedCount = 0;
+
+ /*
+ * Loop until there is no more date buffered by the PS/2 receiver or the
+ * specified number of bytes has been received
+ */
+ while (ReceivedCount < InstancePtr->ReceiveBuffer.RemainingBytes) {
+ /*
+ * If there is data ready to be read , then put the next byte
+ * read into the specified buffer
+ */
+ if (!XPs2_mIsReceiveEmpty(InstancePtr->BaseAddress)) {
+ InstancePtr->ReceiveBuffer.
+ NextBytePtr[ReceivedCount++] =
+ XPs2_RecvByte(InstancePtr->BaseAddress);
+ }
+
+ /*
+ * There is no more data buffered, so exit such that this function does
+ * not block waiting for data
+ */
+ else {
+ break;
+ }
+ }
+
+ /*
+ * Update the receive buffer to reflect the number of bytes that was
+ * received
+ */
+ InstancePtr->ReceiveBuffer.NextBytePtr += ReceivedCount;
+ InstancePtr->ReceiveBuffer.RemainingBytes -= ReceivedCount;
+
+ return ReceivedCount;
+}
+
+/****************************************************************************/
+/**
+*
+* This function is a stub handler that is the default handler such that if the
+* application has not set the handler when interrupts are enabled, this
+* function will be called. The function interface has to match the interface
+* specified for a handler even though none of the arguments are used.
+*
+* @param CallBackRef is unused by this function.
+* @param Event is unused by this function.
+* @param ByteCount is unused by this function.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+*****************************************************************************/
+static void XPs2_StubHandler(void *CallBackRef, u32 Event,
+ unsigned int ByteCount)
+{
+ /*
+ * Assert alway occurs since this is a stub and should never be called
+ */
+ XASSERT_VOID_ALWAYS();
+}
--- /dev/null
+/******************************************************************************
+*
+* Author: Xilinx, Inc.
+*
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A
+* COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS
+* ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR STANDARD,
+* XILINX IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION IS FREE
+* FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE FOR OBTAINING
+* ANY THIRD PARTY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION.
+* XILINX EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO
+* THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY
+* WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM
+* CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND
+* FITNESS FOR A PARTICULAR PURPOSE.
+*
+*
+* Xilinx hardware products are not intended for use in life support
+* appliances, devices, or systems. Use in such applications is
+* expressly prohibited.
+*
+*
+* (c) Copyright 2002-2005 Xilinx Inc.
+* All rights reserved.
+*
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xps2.h
+*
+* This driver supports the following features:
+*
+* - Polled mode
+* - Interrupt driven mode
+*
+* <b>Interrupts</b>
+*
+* The device does not have any way to disable the receiver such that the
+* receiver may contain unwanted data. The IP is reset driver is initialized,
+*
+* The driver defaults to no interrupts at initialization such that interrupts
+* must be enabled if desired. An interrupt is generated for any of the following
+* conditions.
+*
+* - Data in the receiver
+* - Any receive status error detected
+* - Data byte transmitted
+* - Any transmit status error detected
+*
+* The application can control which interrupts are enabled using the SetOptions
+* function.
+*
+* In order to use interrupts, it is necessary for the user to connect the
+* driver interrupt handler, XPs2_InterruptHandler(), to the interrupt system of
+* the application. This function does not save and restore the processor
+* context such that the user must provide it. A handler must be set for the
+* driver such that the handler is called when interrupt events occur. The
+* handler is called from interrupt context and is designed to allow application
+* specific processing to be performed.
+*
+* The functions, XPs2_Send() and Ps2_Recv(), are provided in the driver to
+* allow data to be sent and received. They are designed to be used in polled
+* or interrupt modes.
+*
+* <b>Initialization & Configuration</b>
+*
+* The XPs2_Config structure is used by the driver to configure itself. This
+* configuration structure is typically created by the tool-chain based on HW
+* build properties.
+*
+* To support multiple runtime loading and initialization strategies employed
+* by various operating systems, the driver instance can be initialized in one
+* of the following ways:
+*
+* - XPs2_Initialize(InstancePtr, DeviceId) - The driver looks up its own
+* configuration structure created by the tool-chain based on an ID provided
+* by the tool-chain.
+*
+* - XPs2_CfgInitialize(InstancePtr, CfgPtr, EffectiveAddr) - Uses a
+* configuration structure provided by the caller. If running in a system
+* with address translation, the provided virtual memory base address
+* replaces the physical address present in the configuration structure.
+*
+* @note
+*
+* None.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a ch 06/18/02 First release
+* 1.01a jvb 12/14/05 I separated dependency on the static config table and
+* xparameters.h from the driver initialization by moving
+* _Initialize and _LookupConfig to _sinit.c. I also added
+* the new _CfgInitialize routine.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XPS2_H /* prevent circular inclusions */
+#define XPS2_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files ********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xps2_l.h"
+
+/************************** Constant Definitions ****************************/
+
+/*
+ * These constants specify the handler events that are passed to
+ * a handler from the driver. These constants are not bit masks suuch that
+ * only one will be passed at a time to the handler
+ */
+#define XPS2_EVENT_RECV_DATA 1
+#define XPS2_EVENT_RECV_ERROR 2
+#define XPS2_EVENT_RECV_OVF 3
+#define XPS2_EVENT_SENT_DATA 4
+#define XPS2_EVENT_SENT_NOACK 5
+#define XPS2_EVENT_TIMEOUT 6
+
+/*
+ * These constants specify the errors that may be retrieved from the driver
+ * using the XPs2_GetLastErrors function. All of them are bit masks, except
+ * no error, such that multiple errors may be specified.
+ */
+#define XPS2_ERROR_NONE 0x00
+#define XPS2_ERROR_WDT_TOUT_MASK 0x01
+#define XPS2_ERROR_TX_NOACK_MASK 0x02
+#define XPS2_ERROR_RX_OVF_MASK 0x08
+#define XPS2_ERROR_RX_ERR_MASK 0x10
+
+/**************************** Type Definitions ******************************/
+
+/*
+ * This typedef contains configuration information for the device
+ */
+ typedef struct {
+ u16 DeviceId; /* Unique ID of device */
+ u32 BaseAddress; /* Base address of device */
+ } XPs2_Config;
+
+/*
+ * The following data type is used to manage the buffers that are handled
+ * when sending and receiving data in the interrupt mode
+ */
+ typedef struct {
+ u8 *NextBytePtr;
+ unsigned int RequestedBytes;
+ unsigned int RemainingBytes;
+ } XPs2Buffer;
+
+/*
+ * This data type defines a handler which the application must define
+ * when using interrupt mode. The handler will be called from the driver in an
+ * interrupt context to handle application specific processing.
+ *
+ * @param CallBackRef is a callback reference passed in by the upper layer
+ * when setting the handler, and is passed back to the upper layer when
+ * the handler is called.
+ * @param Event contains one of the event constants indicating why the handler
+ * is being called.
+ * @param EventData contains the number of bytes sent or received at the time
+* of the call.
+*/
+ typedef void (*XPs2_Handler) (void *CallBackRef, u32 Event,
+ unsigned int EventData);
+/*
+ * PS/2 statistics
+ */
+ typedef struct {
+ u16 TransmitInterrupts;
+ u16 ReceiveInterrupts;
+ u16 CharactersTransmitted;
+ u16 CharactersReceived;
+ u16 ReceiveErrors;
+ u16 ReceiveOverflowErrors;
+ u16 TransmitErrors;
+ } XPs2Stats;
+
+/*
+ * The PS/2 driver instance data. The user is required to allocate a
+ * variable of this type for every PS/2 device in the system.
+ * If the last byte of a message was received then call the application
+ * handler, this code should not use an else from the previous check of
+ * the number of bytes to receive because the call to receive the buffer
+ * updates the bytes to receive
+ * A pointer to a variable of this type is then passed to the driver API
+ * functions
+ */
+ typedef struct {
+ XPs2Stats Stats; /* Component Statistics */
+ u32 BaseAddress; /* Base address of device (IPIF) */
+ u32 IsReady; /* Device is initialized and ready */
+ u8 LastErrors; /* the accumulated errors */
+
+ XPs2Buffer SendBuffer;
+ XPs2Buffer ReceiveBuffer;
+
+ XPs2_Handler Handler;
+ void *CallBackRef; /* Callback reference for control handler */
+ } XPs2;
+
+/***************** Macros (Inline Functions) Definitions ********************/
+
+/************************** Function Prototypes *****************************/
+
+/*
+ * Initialization functions in xps2_sinit.c
+ */
+ int XPs2_Initialize(XPs2 * InstancePtr, u16 DeviceId);
+ XPs2_Config *XPs2_LookupConfig(u16 DeviceId);
+
+/*
+ * required functions is xps2.c
+ */
+ int XPs2_CfgInitialize(XPs2 * InstancePtr, XPs2_Config * Config,
+ u32 EffectiveAddr);
+ unsigned int XPs2_Send(XPs2 * InstancePtr, u8 * BufferPtr,
+ unsigned int NumBytes);
+ unsigned int XPs2_Recv(XPs2 * InstancePtr, u8 * BufferPtr,
+ unsigned int NumBytes);
+
+/*
+ * options functions in xps2_options.c
+ */
+ u8 XPs2_GetLastErrors(XPs2 * InstancePtr);
+ u32 XPs2_IsSending(XPs2 * InstancePtr);
+
+/*
+ * interrupt functions in xps2_intr.c
+ */
+ void XPs2_SetHandler(XPs2 * InstancePtr, XPs2_Handler FuncPtr,
+ void *CallBackRef);
+ void XPs2_InterruptHandler(XPs2 * InstancePtr);
+ void XPs2_EnableInterrupt(XPs2 * InstancePtr);
+ void XPs2_DisableInterrupt(XPs2 * InstancePtr);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* end of protection macro */
--- /dev/null
+/******************************************************************************
+*
+* Author: Xilinx, Inc.
+*
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A
+* COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS
+* ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR STANDARD,
+* XILINX IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION IS FREE
+* FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE FOR OBTAINING
+* ANY THIRD PARTY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION.
+* XILINX EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO
+* THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY
+* WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM
+* CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND
+* FITNESS FOR A PARTICULAR PURPOSE.
+*
+*
+* Xilinx hardware products are not intended for use in life support
+* appliances, devices, or systems. Use in such applications is
+* expressly prohibited.
+*
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+*
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+******************************************************************************/
+/****************************************************************************/
+/**
+*
+* @file xps2_i.h
+*
+* This header file contains internal identifiers, which are those shared
+* between the files of the driver. It is intended for internal use only.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a ch 06/18/02 First release
+* </pre>
+*
+******************************************************************************/
+#ifndef XPS2_I_H /* prevent circular inclusions */
+#define XPS2_I_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files ********************************/
+
+#include "xps2.h"
+
+/************************** Constant Definitions ****************************/
+
+/**************************** Type Definitions ******************************/
+
+/***************** Macros (Inline Functions) Definitions ********************/
+
+/****************************************************************************
+*
+* This macro clears the statistics of the component instance. The purpose of
+* this macro is to allow common processing between the modules of the
+* component with less overhead than a function in the required module.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* Signature: void XPs2_mClearStats(XPs2 *InstancePtr)
+*
+*****************************************************************************/
+#define XPs2_mClearStats(InstancePtr) \
+{ \
+ InstancePtr->Stats.TransmitInterrupts = 0UL; \
+ InstancePtr->Stats.ReceiveInterrupts = 0UL; \
+ InstancePtr->Stats.CharactersTransmitted = 0UL; \
+ InstancePtr->Stats.CharactersReceived = 0UL; \
+ InstancePtr->Stats.ReceiveErrors = 0UL; \
+ InstancePtr->Stats.ReceiveOverflowErrors = 0UL; \
+ InstancePtr->Stats.TransmitErrors = 0UL; \
+}
+
+/************************** Variable Definitions ****************************/
+
+ extern XPs2_Config XPs2_ConfigTable[];
+
+/************************** Function Prototypes *****************************/
+
+ unsigned int XPs2_SendBuffer(XPs2 * InstancePtr);
+ unsigned int XPs2_ReceiveBuffer(XPs2 * InstancePtr);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
--- /dev/null
+/******************************************************************************
+*
+* Author: Xilinx, Inc.
+*
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A
+* COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS
+* ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR STANDARD,
+* XILINX IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION IS FREE
+* FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE FOR OBTAINING
+* ANY THIRD PARTY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION.
+* XILINX EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO
+* THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY
+* WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM
+* CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND
+* FITNESS FOR A PARTICULAR PURPOSE.
+*
+*
+* Xilinx hardware products are not intended for use in life support
+* appliances, devices, or systems. Use in such applications is
+* expressly prohibited.
+*
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+*
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+******************************************************************************/
+/****************************************************************************/
+/**
+*
+* @file xps2_intr.c
+*
+* This file contains the functions that are related to interrupt processing
+* for the PS/2 driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a ch 06/18/02 First release
+* </pre>
+*
+*****************************************************************************/
+/***************************** Include Files ********************************/
+
+#include "xps2.h"
+#include "xps2_i.h"
+#include "xio.h"
+
+/************************** Constant Definitions ****************************/
+
+/**************************** Type Definitions ******************************/
+
+/***************** Macros (Inline Functions) Definitions ********************/
+
+/************************** Variable Definitions ****************************/
+
+typedef void (*Handler) (XPs2 * InstancePtr);
+
+/************************** Function Prototypes *****************************/
+
+static void ReceiveDataHandler(XPs2 * InstancePtr);
+static void ReceiveErrorHandler(XPs2 * InstancePtr);
+static void ReceiveOverflowHandler(XPs2 * InstancePtr);
+static void SendDataHandler(XPs2 * InstancePtr);
+static void SendErrorHandler(XPs2 * InstancePtr);
+static void TimeoutHandler(XPs2 * InstancePtr);
+
+/****************************************************************************/
+/**
+*
+* This function sets the handler that will be called when an event (interrupt)
+* occurs in the driver. The purpose of the handler is to allow application
+* specific processing to be performed.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+* @param FuncPtr is the pointer to the callback function.
+* @param CallBackRef is the upper layer callback reference passed back when
+* the callback function is invoked.
+*
+* @return
+*
+* None.
+*
+* @notes
+*
+* There is no assert on the CallBackRef since the driver doesn't know what it
+* is (nor should it)
+*
+*****************************************************************************/
+void XPs2_SetHandler(XPs2 * InstancePtr, XPs2_Handler FuncPtr,
+ void *CallBackRef)
+{
+ /*
+ * Assert validates the input arguments
+ * CallBackRef not checked, no way to know what is valid
+ */
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(FuncPtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ InstancePtr->Handler = FuncPtr;
+ InstancePtr->CallBackRef = CallBackRef;
+}
+
+/****************************************************************************/
+/**
+*
+* This function is the interrupt handler for the PS/2 driver.
+* It must be connected to an interrupt system by the user such that it is
+* called when an interrupt for any PS/2 port occurs. This function does
+* not save or restore the processor context such that the user must
+* ensure this occurs.
+*
+* @param InstancePtr contains a pointer to the instance of the PS/2 port
+* that the interrupt is for.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XPs2_InterruptHandler(XPs2 * InstancePtr)
+{
+ u8 IntrStatus;
+
+ XASSERT_VOID(InstancePtr != NULL);
+
+ /*
+ * Read the interrupt status register to determine which
+ * interrupt is active
+ */
+ IntrStatus = XPs2_mGetIntrStatus(InstancePtr->BaseAddress);
+
+ if (IntrStatus & XPS2_INT_WDT_TOUT) {
+ TimeoutHandler(InstancePtr);
+ }
+
+ if (IntrStatus & XPS2_INT_RX_ERR) {
+ ReceiveErrorHandler(InstancePtr);
+ }
+
+ if (IntrStatus & XPS2_INT_RX_OVF) {
+ ReceiveOverflowHandler(InstancePtr);
+ }
+
+ if (IntrStatus & XPS2_INT_TX_NOACK) {
+ SendErrorHandler(InstancePtr);
+ }
+
+ if (IntrStatus & XPS2_INT_RX_FULL) {
+ ReceiveDataHandler(InstancePtr);
+ }
+
+ if (IntrStatus & XPS2_INT_TX_ACK) {
+ SendDataHandler(InstancePtr);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function enables the PS/2 interrupts.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+*****************************************************************************/
+void XPs2_EnableInterrupt(XPs2 * InstancePtr)
+{
+ /*
+ * ASSERT the arguments
+ */
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Enable all receiver interrupts (RX_FULL, RX_ERR, RX_OVF)
+ * transmitter interrupts are enabled when sending data.
+ */
+ XPs2_mEnableIntr(InstancePtr->BaseAddress, XPS2_INT_RX_ALL);
+}
+
+/****************************************************************************/
+/**void XPs2_DisableInterrupt
+*
+* This function disables the PS/2 interrupts.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+*****************************************************************************/
+void XPs2_DisableInterrupt(XPs2 * InstancePtr)
+{
+ /*
+ * ASSERT the arguments
+ */
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Disable all interrupts.
+ */
+ XPs2_mDisableIntr(InstancePtr->BaseAddress, XPS2_INT_ALL);
+}
+
+/****************************************************************************/
+/**
+*
+* This function handles the interrupt when data is received.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+*****************************************************************************/
+static void ReceiveDataHandler(XPs2 * InstancePtr)
+{
+ XPs2_mClearIntr(InstancePtr->BaseAddress, XPS2_INT_RX_FULL);
+
+ /*
+ * If there are bytes still to be received in the specified buffer
+ * go ahead and receive them
+ */
+ if (InstancePtr->ReceiveBuffer.RemainingBytes != 0) {
+ XPs2_ReceiveBuffer(InstancePtr);
+ }
+
+ /*
+ * If the last byte of a message was received then call the application
+ * handler, this code should not use an else from the previous check of
+ * the number of bytes to receive because the call to receive the buffer
+ * updates the bytes to receive
+ */
+ if (InstancePtr->ReceiveBuffer.RemainingBytes == 0) {
+ InstancePtr->Handler(InstancePtr->CallBackRef,
+ XPS2_EVENT_RECV_DATA,
+ InstancePtr->ReceiveBuffer.RequestedBytes -
+ InstancePtr->ReceiveBuffer.RemainingBytes);
+ }
+
+ /*
+ * Update the receive stats to reflect the receive interrupt
+ */
+ InstancePtr->Stats.ReceiveInterrupts++;
+}
+
+/****************************************************************************/
+/**
+*
+* This function handles the receive error interrupt.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+*****************************************************************************/
+static void ReceiveErrorHandler(XPs2 * InstancePtr)
+{
+ XPs2_mClearIntr(InstancePtr->BaseAddress, XPS2_INT_RX_ERR);
+
+ /*
+ * Call the application handler with an error code
+ */
+ InstancePtr->Handler(InstancePtr->CallBackRef, XPS2_EVENT_RECV_ERROR,
+ InstancePtr->ReceiveBuffer.RequestedBytes -
+ InstancePtr->ReceiveBuffer.RemainingBytes);
+
+ /*
+ * Update the LastError variable
+ */
+ InstancePtr->LastErrors |= XPS2_ERROR_RX_ERR_MASK;
+
+ /*
+ * Update the receive stats to reflect the receive error interrupt
+ */
+ InstancePtr->Stats.ReceiveErrors++;
+}
+
+/****************************************************************************/
+/**
+*
+* This function handles the receive overflow interrupt.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+*****************************************************************************/
+static void ReceiveOverflowHandler(XPs2 * InstancePtr)
+{
+ XPs2_mClearIntr(InstancePtr->BaseAddress, XPS2_INT_RX_OVF);
+
+ /*
+ * Call the application handler with an error code
+ */
+ InstancePtr->Handler(InstancePtr->CallBackRef, XPS2_EVENT_RECV_OVF,
+ InstancePtr->ReceiveBuffer.RequestedBytes -
+ InstancePtr->ReceiveBuffer.RemainingBytes);
+
+ /*
+ * Update the LastError variable
+ */
+ InstancePtr->LastErrors |= XPS2_ERROR_RX_OVF_MASK;
+
+ /*
+ * Update the receive stats to reflect the receive interrupt
+ */
+ InstancePtr->Stats.ReceiveOverflowErrors++;
+}
+
+/****************************************************************************/
+/**
+*
+* This function handles the interrupt when data has been sent, the transmit
+* transmitter holding register is empty.
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+*****************************************************************************/
+static void SendDataHandler(XPs2 * InstancePtr)
+{
+ XPs2_mClearIntr(InstancePtr->BaseAddress, XPS2_INT_TX_ACK);
+
+ /*
+ * If there are no bytes to be sent from the specified buffer then disable
+ * the transmit interrupt
+ */
+ if (InstancePtr->SendBuffer.RemainingBytes == 0) {
+ XPs2_mDisableIntr(InstancePtr->BaseAddress, XPS2_INT_TX_ALL);
+
+ /*
+ * Call the application handler to indicate the data has been sent
+ */
+ InstancePtr->Handler(InstancePtr->CallBackRef,
+ XPS2_EVENT_SENT_DATA,
+ InstancePtr->SendBuffer.RequestedBytes -
+ InstancePtr->SendBuffer.RemainingBytes);
+ }
+
+ /*
+ * Otherwise there is still more data to send in the specified buffer
+ * so go ahead and send it
+ */
+ else {
+ XPs2_SendBuffer(InstancePtr);
+ }
+
+ /*
+ * Update the transmit stats to reflect the transmit interrupt
+ */
+ InstancePtr->Stats.TransmitInterrupts++;
+}
+
+/****************************************************************************/
+/**
+*
+* This function handles the interrupt when a transmit is not acknowledged
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+*****************************************************************************/
+static void SendErrorHandler(XPs2 * InstancePtr)
+{
+ XPs2_mClearIntr(InstancePtr->BaseAddress, XPS2_INT_TX_NOACK);
+
+ /*
+ * Call the application handler
+ */
+ InstancePtr->Handler(InstancePtr->CallBackRef, XPS2_EVENT_SENT_NOACK,
+ InstancePtr->SendBuffer.RequestedBytes -
+ InstancePtr->SendBuffer.RemainingBytes);
+
+ /*
+ * Update the LastError variable
+ */
+ InstancePtr->LastErrors |= XPS2_ERROR_TX_NOACK_MASK;
+
+ /*
+ * Update the transmit stats to reflect the transmit interrupt
+ */
+ InstancePtr->Stats.TransmitErrors++;
+}
+
+/****************************************************************************/
+/**
+*
+* This function handles the interrupt when timeout occurrs
+*
+* @param InstancePtr is a pointer to the XPs2 instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+*****************************************************************************/
+static void TimeoutHandler(XPs2 * InstancePtr)
+{
+ XPs2_mClearIntr(InstancePtr->BaseAddress, XPS2_INT_WDT_TOUT);
+
+ /*
+ * Call the application handler
+ */
+ InstancePtr->Handler(InstancePtr->CallBackRef, XPS2_EVENT_TIMEOUT,
+ InstancePtr->SendBuffer.RequestedBytes -
+ InstancePtr->SendBuffer.RemainingBytes);
+
+ /*
+ * Update the LastError variable
+ */
+ InstancePtr->LastErrors |= XPS2_ERROR_WDT_TOUT_MASK;
+
+ /*
+ * Update the transmit stats to reflect the transmit interrupt
+ */
+ InstancePtr->Stats.TransmitErrors++;
+}
--- /dev/null
+/******************************************************************************
+*
+* Author: Xilinx, Inc.
+*
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A
+* COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS
+* ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR STANDARD,
+* XILINX IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION IS FREE
+* FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE FOR OBTAINING
+* ANY THIRD PARTY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION.
+* XILINX EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO
+* THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY
+* WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM
+* CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND
+* FITNESS FOR A PARTICULAR PURPOSE.
+*
+*
+* Xilinx hardware products are not intended for use in life support
+* appliances, devices, or systems. Use in such applications is
+* expressly prohibited.
+*
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+*
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xps2_l.c
+*
+* This file contains low-level driver functions that can be used to access the
+* device. The user should refer to the hardware device specification for more
+* details of the device operation.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a ch 06/18/02 First release
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xps2_l.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Function Prototypes ******************************/
+
+/************************** Variable Definitions *****************************/
+
+/****************************************************************************/
+/**
+*
+* This function sends a data byte to PS/2. This function operates in the
+* polling mode and blocks until the data has been put into the transmit
+* holding register.
+*
+* @param BaseAddress contains the base address of the PS/2 port.
+* @param Data contains the data byte to be sent.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+
+void XPs2_SendByte(u32 BaseAddress, u8 Data)
+{
+ while (XPs2_mIsTransmitFull(BaseAddress)) {
+ }
+
+ XIo_Out8(BaseAddress + XPS2_TX_REG_OFFSET, Data);
+}
+
+/****************************************************************************/
+/**
+*
+* This function receives a byte from PS/2. It operates in the polling mode
+* and blocks until a byte of data is received.
+*
+* @param BaseAddress contains the base address of the PS/2 port.
+*
+* @return The data byte received by PS/2.
+*
+* @note None.
+*
+*****************************************************************************/
+u8 XPs2_RecvByte(u32 BaseAddress)
+{
+ while (XPs2_mIsReceiveEmpty(BaseAddress)) {
+ }
+
+ return (u8) XIo_In8(BaseAddress + XPS2_RX_REG_OFFSET);
+}
--- /dev/null
+/******************************************************************************
+*
+* Author: Xilinx, Inc.
+*
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A
+* COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS
+* ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR STANDARD,
+* XILINX IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION IS FREE
+* FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE FOR OBTAINING
+* ANY THIRD PARTY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION.
+* XILINX EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO
+* THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY
+* WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM
+* CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND
+* FITNESS FOR A PARTICULAR PURPOSE.
+*
+*
+* Xilinx hardware products are not intended for use in life support
+* appliances, devices, or systems. Use in such applications is
+* expressly prohibited.
+*
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+*
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xps2_l.h
+*
+* This header file contains identifiers and low-level driver functions (or
+* macros) that can be used to access the device. The user should refer to the
+* hardware device specification for more details of the device operation.
+* High-level driver functions are defined in xps2.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a ch 06/18/02 First release
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XPS2_L_H /* prevent circular inclusions */
+#define XPS2_L_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files ********************************/
+
+#include "xbasic_types.h"
+#include "xio.h"
+
+/************************** Constant Definitions ****************************/
+
+/* PS/2 register offsets */
+#define XPS2_RESET_OFFSET 0 /* reset register, write only */
+#define XPS2_STATUS_OFFSET 4 /* status register, read only */
+#define XPS2_RX_REG_OFFSET 8 /* receive register, read only */
+#define XPS2_TX_REG_OFFSET 12 /* transmit register, write only */
+#define XPS2_INTSTA_REG_OFFSET 16 /* int status register, read only */
+#define XPS2_INTCLR_REG_OFFSET 20 /* int clear register, write only */
+#define XPS2_INTMSET_REG_OFFSET 24 /* mask set register, read/write */
+#define XPS2_INTMCLR_REG_OFFSET 28 /* mask clear register, write only */
+
+/* reset register bit positions */
+#define XPS2_CLEAR_RESET 0x00
+#define XPS2_RESET 0x01
+
+/* status register bit positions */
+#define XPS2_ST_RX_FULL 0x01
+#define XPS2_ST_TX_FULL 0x02
+
+/* interrupt register bit positions */
+/* used for the INTSTA, INTCLR, INTMSET, INTMCLR register */
+#define XPS2_INT_WDT_TOUT 0x01
+#define XPS2_INT_TX_NOACK 0x02
+#define XPS2_INT_TX_ACK 0x04
+#define XPS2_INT_TX_ALL 0x06
+#define XPS2_INT_RX_OVF 0x08
+#define XPS2_INT_RX_ERR 0x10
+#define XPS2_INT_RX_FULL 0x20
+#define XPS2_INT_RX_ALL 0x38
+#define XPS2_INT_ALL 0x3f
+
+/**************************** Type Definitions ******************************/
+
+/***************** Macros (Inline Functions) Definitions ********************/
+
+/*****************************************************************************
+*
+* Low-level driver macros. The list below provides signatures to help the
+* user use the macros.
+*
+* void XPs2_mReset(u32 BaseAddress)
+* u8 XPs2_mGetStatus(u32 BaseAddress)
+*
+* u8 XPs2_mGetIntrStatus(u32 BaseAddress)
+* void XPs2_mClearIntr(u32 BaseAddress, u8 ClearMask)
+* u32 XPs2_mIsIntrEnabled(u32 BaseAddress, u8 EnabledMask)
+* void XPs2_mEnableIntr(u32 BaseAddress, u8 EnableMask)
+* void XPs2_mDisableIntr(u32 BaseAddress, u8 DisableMask)
+*
+* u32 XPs2_mIsReceiveEmpty(u32 BaseAddress)
+* u32 XPs2_mIsTransmitFull(u32 BaseAddress)
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/**
+* Reset the PS/2 port.
+*
+* @param BaseAddress contains the base address of the device.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XPs2_mReset(BaseAddress) \
+ XIo_Out8(((BaseAddress) + XPS2_RESET_OFFSET), XPS2_RESET); \
+ XIo_Out8(((BaseAddress) + XPS2_RESET_OFFSET), XPS2_CLEAR_RESET)
+
+/****************************************************************************/
+/**
+* Read the PS/2 status register.
+*
+* @param BaseAddress contains the base address of the device.
+*
+* @return The value read from the register.
+*
+* @note None.
+*
+******************************************************************************/
+#define XPs2_mGetStatus(BaseAddress) \
+ (XIo_In8((BaseAddress) + XPS2_STATUS_OFFSET))
+
+/****************************************************************************/
+/**
+* Read the interrupt status register.
+*
+* @param BaseAddress contains the base address of the device.
+*
+* @return The value read from the register.
+*
+* @note None.
+*
+******************************************************************************/
+#define XPs2_mGetIntrStatus(BaseAddress) \
+ (XIo_In8((BaseAddress) + XPS2_INTSTA_REG_OFFSET))
+
+/****************************************************************************/
+/**
+* Clear pending interrupts.
+*
+* @param BaseAddress contains the base address of the device.
+* Bitmask for interrupts to be cleared. A "1" clears the interrupt.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XPs2_mClearIntr(BaseAddress, ClearMask) \
+ XIo_Out8((BaseAddress) + XPS2_INTCLR_REG_OFFSET, (ClearMask))
+
+/****************************************************************************/
+/**
+* Check for enabled interrupts.
+*
+* @param BaseAddress contains the base address of the device.
+* Bitmask for interrupts to be checked.
+*
+* @return TRUE if the interrupt is enabled, FALSE otherwise.
+*
+* @note None.
+*
+******************************************************************************/
+#define XPs2_mIsIntrEnabled(BaseAddress, EnabledMask) \
+ (XIo_In8((BaseAddress) + XPS2_INTMSET_REG_OFFSET) & (EnabledMask))
+
+/****************************************************************************/
+/**
+* Enable Interrupts.
+*
+* @param BaseAddress contains the base address of the device.
+* Bitmask for interrupts to be enabled.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XPs2_mEnableIntr(BaseAddress, EnableMask) \
+ XIo_Out8((BaseAddress) + XPS2_INTMSET_REG_OFFSET, (EnableMask))
+
+/****************************************************************************/
+/**
+* Disable Interrupts.
+*
+* @param BaseAddress contains the base address of the device.
+* Bitmask for interrupts to be disabled.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XPs2_mDisableIntr(BaseAddress, DisableMask) \
+ XIo_Out8((BaseAddress) + XPS2_INTMCLR_REG_OFFSET, (DisableMask))
+
+/****************************************************************************/
+/**
+* Determine if there is receive data in the receiver.
+*
+* @param BaseAddress contains the base address of the device.
+*
+* @return TRUE if there is receive data, FALSE otherwise.
+*
+* @note None.
+*
+******************************************************************************/
+#define XPs2_mIsReceiveEmpty(BaseAddress) \
+ (!(XPs2_mGetStatus(BaseAddress) & XPS2_ST_RX_FULL))
+
+/****************************************************************************/
+/**
+* Determine if a byte of data can be sent with the transmitter.
+*
+* @param BaseAddress contains the base address of the device.
+*
+* @return TRUE if a byte can be sent, FALSE otherwise.
+*
+* @note None.
+*
+******************************************************************************/
+#define XPs2_mIsTransmitFull(BaseAddress) \
+ (XPs2_mGetStatus(BaseAddress) & XPS2_ST_TX_FULL)
+
+/************************** Variable Definitions ****************************/
+
+/************************** Function Prototypes *****************************/
+
+ void XPs2_SendByte(u32 BaseAddress, u8 Data);
+ u8 XPs2_RecvByte(u32 BaseAddress);
+
+/****************************************************************************/
+
+#ifdef __cplusplus
+}
+#endif
+#endif
--- /dev/null
+/*
+ * xps2_linux.c
+ *
+ * Xilinx PS/2 driver to interface PS/2 component to Linux
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * 2005 (c)MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+/*
+ * This driver is a bit unusual in that it is composed of two logical
+ * parts where one part is the OS independent code and the other part is
+ * the OS dependent code. Xilinx provides their drivers split in this
+ * fashion. This file represents the Linux OS dependent part known as
+ * the Linux adapter. The other files in this directory are the OS
+ * independent files as provided by Xilinx with no changes made to them.
+ * The names exported by those files begin with XPs2_. All functions
+ * in this file that are called by Linux have names that begin with
+ * xps2_. Any other functions are static helper functions.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/serio.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/xilinx_devices.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_OF
+// For open firmware.
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#endif
+
+#include "xps2.h"
+
+#define DRIVER_NAME "xilinx_ps2"
+#define DRIVER_DESCRIPTION "Xilinx PS/2 driver"
+
+#define XPS2_NAME_DESC "Xilinx PS/2 Port #%d"
+#define XPS2_PHYS_DESC "xilinxps2/serio%d"
+
+struct xps2data {
+ int irq;
+ u32 phys_addr;
+ u32 remap_size;
+ struct pt_regs *saved_regs;
+ spinlock_t lock;
+ u8 rxb; /* Rx buffer */
+ unsigned long tx_end;
+ unsigned int dfl;
+ /*
+ * The underlying OS independent code needs space as well. A
+ * pointer to the following XPs2 structure will be passed to
+ * any XPs2_ function that requires it. However, we treat the
+ * data as an opaque object in this file (meaning that we never
+ * reference any of the fields inside of the structure).
+ */
+ XPs2 ps2;
+ /*
+ * serio
+ */
+ struct serio serio;
+};
+
+/*******************************************************************************
+ * This configuration stuff should become unnecessary after EDK version 8.x is
+ * released.
+ ******************************************************************************/
+
+static DECLARE_MUTEX(cfg_sem);
+
+/*********************/
+/* Interrupt handler */
+/*********************/
+
+static irqreturn_t xps2_interrupt(int irq, void *dev_id)
+{
+ struct xps2data *drvdata = (struct xps2data *)dev_id;
+
+ /* Call EDK handler */
+ XPs2_InterruptHandler(&drvdata->ps2);
+
+ return IRQ_HANDLED;
+}
+
+static void sxps2_handler(void *CallbackRef, u32 Event, unsigned int EventData)
+{
+ struct xps2data *drvdata = (struct xps2data *)CallbackRef;
+ u8 c;
+
+ switch (Event) {
+ case XPS2_EVENT_RECV_OVF:
+ printk(KERN_ERR "%s: receive overrun error.\n",
+ drvdata->serio.name);
+ case XPS2_EVENT_RECV_ERROR:
+ drvdata->dfl |= SERIO_PARITY;
+ break;
+ case XPS2_EVENT_SENT_NOACK:
+ case XPS2_EVENT_TIMEOUT:
+ drvdata->dfl |= SERIO_TIMEOUT;
+ break;
+ case XPS2_EVENT_RECV_DATA:
+ if (EventData > 0) {
+ if (EventData != 1) {
+ printk(KERN_ERR
+ "%s: wrong rcvd byte count (%d).\n",
+ drvdata->serio.name, EventData);
+ }
+ c = drvdata->rxb;
+
+ XPs2_Recv(&drvdata->ps2, &drvdata->rxb, 1);
+ serio_interrupt(&drvdata->serio, c, drvdata->dfl);
+ drvdata->dfl = 0;
+ }
+ break;
+ case XPS2_EVENT_SENT_DATA:
+ break;
+ default:
+ printk(KERN_ERR "%s: unrecognized event %u.\n",
+ drvdata->serio.name, Event);
+ }
+}
+
+/*******************/
+/* serio callbacks */
+/*******************/
+
+/*
+ * sxps2_write() sends a byte out through the PS/2 interface.
+ *
+ * The sole purpose of drvdata->tx_end is to prevent the driver
+ * from locking up in the do {} while; loop when nothing is connected
+ * to the given PS/2 port. That's why we do not try to recover
+ * from the transmission failure.
+ * drvdata->tx_end needs not to be initialized to some "far in the
+ * future" value, as the very first attempt to XPs2_Send() a byte
+ * is always successfull, and drvdata->tx_end will be set to a proper
+ * value at that moment - before the 1st use in the comparison.
+ */
+static int sxps2_write(struct serio *pserio, unsigned char c)
+{
+ struct xps2data *drvdata = pserio->port_data;
+ unsigned long flags;
+ int retval;
+
+ do {
+ spin_lock_irqsave(&drvdata->lock, flags);
+ retval = XPs2_Send(&drvdata->ps2, &c, 1);
+ spin_unlock_irqrestore(&drvdata->lock, flags);
+
+ if (retval == 1) {
+ drvdata->tx_end = jiffies + HZ;
+ return 0; /* success */
+ }
+ } while (!time_after(jiffies, drvdata->tx_end));
+
+ return 1; /* transmission is frozen */
+}
+
+/*
+ * sxps2_open() is called when a port is open by the higher layer.
+ */
+
+static int sxps2_open(struct serio *pserio)
+{
+ struct xps2data *drvdata = pserio->port_data;
+ int retval;
+
+ retval = request_irq(drvdata->irq, &xps2_interrupt, 0,
+ "xilinx_ps2", drvdata);
+ if (retval) {
+ printk(KERN_ERR
+ "%s: Couldn't allocate interrupt %d.\n",
+ drvdata->serio.name, drvdata->irq);
+ return retval;
+ }
+
+ /* start receiption */
+ XPs2_EnableInterrupt(&drvdata->ps2);
+ XPs2_Recv(&drvdata->ps2, &drvdata->rxb, 1);
+
+ return 0; /* success */
+}
+
+/*
+ * sxps2_close() frees the interrupt.
+ */
+
+static void sxps2_close(struct serio *pserio)
+{
+ struct xps2data *drvdata = pserio->port_data;
+
+ XPs2_DisableInterrupt(&drvdata->ps2);
+ free_irq(drvdata->irq, drvdata);
+}
+
+/******************************
+ * The platform device driver *
+ ******************************/
+
+/** Shared device initialization code */
+static int xps2_setup(
+ struct device *dev,
+ int id,
+ struct resource *r_mem,
+ struct resource *r_irq) {
+ XPs2_Config xps2_cfg;
+ struct xps2data *drvdata;
+ unsigned long remap_size;
+ int retval;
+
+ if (!dev)
+ return -EINVAL;
+
+ drvdata = kzalloc(sizeof(struct xps2data), GFP_KERNEL);
+ if (!drvdata) {
+ dev_err(dev, "Couldn't allocate device private record\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&drvdata->lock);
+ dev_set_drvdata(dev, (void *)drvdata);
+
+ if (!r_mem || !r_irq) {
+ dev_err(dev, "IO resource(s) not found\n");
+ retval = -EFAULT;
+ goto failed1;
+ }
+ drvdata->irq = r_irq->start;
+
+ remap_size = r_mem->end - r_mem->start + 1;
+ if (!request_mem_region(r_mem->start, remap_size, DRIVER_NAME)) {
+ dev_err(dev, "Couldn't lock memory region at 0x%08x\n",
+ r_mem->start);
+ retval = -EBUSY;
+ goto failed1;
+ }
+
+ /* Fill in cfg data and add them to the list */
+ drvdata->phys_addr = r_mem->start;
+ drvdata->remap_size = remap_size;
+ xps2_cfg.DeviceId = id;
+ xps2_cfg.BaseAddress = (u32) ioremap(r_mem->start, remap_size);
+ if (xps2_cfg.BaseAddress == 0) {
+ dev_err(dev, "Couldn't ioremap memory at 0x%08x\n",
+ r_mem->start);
+ retval = -EFAULT;
+ goto failed2;
+ }
+
+ /* Tell the Xilinx code to bring this PS/2 interface up. */
+ down(&cfg_sem);
+ if (XPs2_CfgInitialize(&drvdata->ps2, &xps2_cfg, xps2_cfg.BaseAddress)
+ != XST_SUCCESS) {
+ up(&cfg_sem);
+ dev_err(dev, "Could not initialize device.\n");
+ retval = -ENODEV;
+ goto failed3;
+ }
+ up(&cfg_sem);
+
+ /* Set up the interrupt handler. */
+ XPs2_SetHandler(&drvdata->ps2, sxps2_handler, drvdata);
+
+ dev_info(dev, "Xilinx PS2 at 0x%08X mapped to 0x%08X, irq=%d\n",
+ drvdata->phys_addr,
+ drvdata->ps2.BaseAddress,
+ drvdata->irq);
+
+ drvdata->serio.id.type = SERIO_8042;
+ drvdata->serio.write = sxps2_write;
+ drvdata->serio.open = sxps2_open;
+ drvdata->serio.close = sxps2_close;
+ drvdata->serio.port_data = drvdata;
+ drvdata->serio.dev.parent = dev;
+ snprintf(drvdata->serio.name, sizeof(drvdata->serio.name),
+ XPS2_NAME_DESC, id);
+ snprintf(drvdata->serio.phys, sizeof(drvdata->serio.phys),
+ XPS2_PHYS_DESC, id);
+ serio_register_port(&drvdata->serio);
+
+ return 0; /* success */
+
+ failed3:
+ iounmap((void *)(xps2_cfg.BaseAddress));
+
+ failed2:
+ release_mem_region(r_mem->start, remap_size);
+
+ failed1:
+ kfree(drvdata);
+ dev_set_drvdata(dev, NULL);
+
+ return retval;
+}
+
+static int xps2_probe(struct device *dev)
+{
+ struct resource *r_irq = NULL; /* Interrupt resources */
+ struct resource *r_mem = NULL; /* IO mem resources */
+ struct platform_device *pdev = to_platform_device(dev);
+
+ /* param check */
+ if (!pdev) {
+ dev_err(dev, "Probe called with NULL param.\n");
+ return -ENODEV;
+ }
+
+ /* Find irq number, map the control registers in */
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r_irq || !r_mem) {
+ dev_err(dev, "IO resource(s) not found.\n");
+ return -ENODEV;
+ }
+
+ return xps2_setup(dev, pdev->id, r_mem, r_irq);
+}
+
+static int xps2_remove(struct device *dev)
+{
+ struct xps2data *drvdata;
+
+ if (!dev)
+ return -EINVAL;
+
+ drvdata = (struct xps2data *)dev_get_drvdata(dev);
+
+ serio_unregister_port(&drvdata->serio);
+
+ iounmap((void *)(drvdata->ps2.BaseAddress));
+
+ release_mem_region(drvdata->phys_addr, drvdata->remap_size);
+
+ kfree(drvdata);
+ dev_set_drvdata(dev, NULL);
+
+ return 0; /* success */
+}
+
+static struct device_driver xps2_driver = {
+ .name = DRIVER_NAME,
+ .bus = &platform_bus_type,
+ .probe = xps2_probe,
+ .remove = xps2_remove
+};
+
+#ifdef CONFIG_OF
+static int __devinit xps2_of_probe(struct of_device *ofdev, const struct of_device_id *match)
+{
+ struct resource r_irq_struct;
+ struct resource r_mem_struct;
+ struct resource *r_irq = &r_irq_struct; /* Interrupt resources */
+ struct resource *r_mem = &r_mem_struct; /* IO mem resources */
+ int rc = 0;
+ const unsigned int *id;
+
+ printk(KERN_INFO "Device Tree Probing \'%s\'\n",
+ ofdev->node->name);
+
+ /* Get iospace for the device */
+ rc = of_address_to_resource(ofdev->node, 0, r_mem);
+ if(rc) {
+ dev_warn(&ofdev->dev, "invalid address\n");
+ return rc;
+ }
+
+ /* Get IRQ for the device */
+ rc = of_irq_to_resource(ofdev->node, 0, r_irq);
+ if(rc == NO_IRQ) {
+ dev_warn(&ofdev->dev, "no IRQ found.\n");
+ return rc;
+ }
+
+ id = of_get_property(ofdev->node, "port-number", NULL);
+ return xps2_setup(&ofdev->dev, id ? *id : -1, r_mem, r_irq);
+}
+
+static int __devexit xps2_of_remove(struct of_device *dev)
+{
+ return xps2_remove(&dev->dev);
+}
+
+static struct of_device_id xps2_of_match[] = {
+ { .compatible = "xlnx,opb-ps2-dual-ref-1.00.a", },
+ { .compatible = "xlnx,xps-ps2-1.00.a", },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(of, xps2_of_match);
+
+static struct of_platform_driver xps2_of_driver = {
+ .name = DRIVER_NAME,
+ .match_table = xps2_of_match,
+ .probe = xps2_of_probe,
+ .remove = __devexit_p(xps2_of_remove),
+};
+#endif
+
+static int __init xps2_init(void)
+{
+ int status = driver_register(&xps2_driver);
+#ifdef CONFIG_OF
+ status |= of_register_platform_driver(&xps2_of_driver);
+#endif
+ return status;
+}
+
+static void __exit xps2_cleanup(void)
+{
+ driver_unregister(&xps2_driver);
+#ifdef CONFIG_OF
+ of_unregister_platform_driver(&xps2_of_driver);
+#endif
+}
+
+module_init(xps2_init);
+module_exit(xps2_cleanup);
+
+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL");
based on a network adapter and DMA messaging.
endif # MISC_DEVICES
+
+
+#
+# Xilinx devices and common device driver infrastructure
+#
+
+config XILINX_DRIVERS
+ bool
+ depends on PPC32 || MICROBLAZE
+ default y
+ ---help---
+ This option is used to enable all of the Xilinx drivers on
+ supported architectures. This is often useful if you have a
+ Xilinx FPGA in a system, either using embedded processors
+ internal to the FPGA or external processors.
+
+config NEED_XILINX_DMAV3
+ bool
+
+config NEED_XILINX_LLDMA
+ bool
+
+config NEED_XILINX_IPIF
+ bool
+
+config NEED_XILINX_PPC_DCR
+ bool
+
This provides support for the embedded root file system
on PMC MSP devices. This memory is mapped as a MTD block device.
+config MTD_XILINX_OPB
+ bool "OPB CFI Flash device mapped on Xilinx boards"
+ depends on MTD_CFI_INTELEXT && SP3E && HAVE_XILINX_EMC
+ help
+ OPB Flash mapping support for Xilinx boards
+
config MTD_SUN_UFLASH
tristate "Sun Microsystems userflash support"
depends on SPARC && MTD_CFI && PCI
# Chip mappings
obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
+obj-$(CONFIG_MTD_XILINX_OPB) += xilinx-opb-flash.o
obj-$(CONFIG_MTD_ARM_INTEGRATOR)+= integrator-flash.o
obj-$(CONFIG_MTD_BAST) += bast-flash.o
obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
/****************************************************************************/
+inline unsigned get_rootfs_len(unsigned *addr)
+{
+ if (memcmp(&addr[0], "-rom1fs-", 8) == 0) /* romfs */
+ return be32_to_cpu(addr[2]);
+ else if(addr[0] == le32_to_cpu(0x28cd3d45)) /* cramfs */
+ return le32_to_cpu(addr[1]);
+ return 0;
+}
+
int __init uclinux_mtd_init(void)
{
struct mtd_info *mtd;
mapp = &uclinux_ram_map;
mapp->phys = addr;
- mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(addr + 8))));
+ mapp->size = PAGE_ALIGN(get_rootfs_len((unsigned *)addr));
mapp->bankwidth = 4;
printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
--- /dev/null
+/*
+ * drivers/mtd/maps/xilinx-opb-flash.c
+ *
+ * MTD mapping driver for the OPB Flash device on Xilinx boards.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * Copyright 2007 Xilinx, Inc.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/xparameters.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+static struct map_info map_bank = {
+ .name = "OPB Flash on Xilinx board",
+ .size = XPAR_FLASH_HIGHADDR - XPAR_FLASH_BASEADDR + 1,
+ .bankwidth = XPAR_FLASH_BUSWIDTH,
+ .phys = XPAR_FLASH_BASEADDR,
+};
+
+static struct mtd_info *mtd_bank;
+
+static int __init init_opb_mtd(void) {
+
+ map_bank.virt = ioremap(map_bank.phys, map_bank.size);
+ if (!map_bank.virt) {
+ printk("OPB Flash: failed to ioremap\n");
+ return -EIO;
+ }
+
+ simple_map_init(&map_bank);
+
+ mtd_bank = do_map_probe("cfi_probe", &map_bank);
+ if (!mtd_bank) {
+ printk("OPB Flash: failed to find a mapping\n");
+ iounmap(map_bank.virt);
+ map_bank.virt = 0;
+ return -ENXIO;
+ }
+
+ mtd_bank->owner = THIS_MODULE;
+
+ printk("Registering a %ldMB OPB Flash at 0x%lX\n",
+ map_bank.size >> 20, map_bank.phys);
+
+ add_mtd_device(mtd_bank);
+
+ return 0;
+}
+
+static void __exit cleanup_opb_mtd(void) {
+ if (mtd_bank) {
+ del_mtd_device(mtd_bank);
+ map_destroy(mtd_bank);
+ }
+ if (map_bank.virt) {
+ iounmap((void *)map_bank.virt);
+ map_bank.virt = 0;
+ }
+}
+
+module_init(init_opb_mtd);
+module_exit(cleanup_opb_mtd);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("MTD map driver for OPB Flash on Xilinx boards");
Say Y here if you want to use the NE2000 compatible
controller on the Renesas H8/300 processor.
+config XILINX_EMAC
+ tristate "Xilinx 10/100 OPB EMAC support"
+ depends on XILINX_DRIVERS
+ select XILINX_EDK
+ select NEED_XILINX_IPIF
+ help
+ This driver supports the Xilinx 10/100 opb_emac
+
+config XILINX_EMACLITE
+ tristate "Xilinx 10/100 OPB EMACLITE support"
+ depends on XILINX_DRIVERS
+ select XILINX_EDK
+ select NEED_XILINX_IPIF
+ help
+ This driver supports the 10/100 OPB EMACLITE.
+
+
source "drivers/net/fec_8xx/Kconfig"
source "drivers/net/fs_enet/Kconfig"
To compile this driver as a module, choose M here. The module
will be called atl1.
+config XILINX_TEMAC
+ tristate "Xilinx TEMAC 10/100/1000 Ethernet MAC driver"
+ depends on XILINX_DRIVERS
+ select XILINX_EDK
+ select NEED_XILINX_DMAV3
+ select NEED_XILINX_IPIF
+ help
+ This driver supports the Xilinx TEMAC found in Virtex 4 FPGAs
+ when the plb_temac adapter interface is used.
+
+config XILINX_LLTEMAC
+ tristate "Xilinx LLTEMAC 10/100/1000 Ethernet MAC driver"
+ depends on XILINX_DRIVERS
+ select XILINX_EDK
+ select NEED_XILINX_LLDMA
+ help
+ This driver supports the 10/100/1000 LLTEMAC.
+
+choice
+ prompt "Xilinx LLTEMAC PHY Support"
+ default XILINX_LLTEMAC_MARVELL_88E1111_GMII
+
+config XILINX_LLTEMAC_MARVELL_88E1111_RGMII
+ bool "MARVELL 88E1111 using RGMII"
+ help
+ This phy is used by many Xilinx boards. This option includes
+ code for enabling RGMII over copper.
+
+config XILINX_LLTEMAC_MARVELL_88E1111_GMII
+ bool "MARVELL 88E1111 using GMII"
+ help
+ This phy is used by many Xilinx boards. This option includes
+ code for enabling GMII over copper, and for setting the correct
+ speed based on whatever the phy is able to autonegotiate. This is
+ usually the best option to use on ML40x and ML50x boards.
+
+config XILINX_LLTEMAC_MARVELL_88E1111_MII
+ bool "MARVELL 88E1111 using MII or other PHY"
+ help
+ If your physical interface is not covered by the other
+ selections, then choose this option. This option includes generic
+ speed autonegotation code.
+
+endchoice
+
endif # NETDEV_1000
#
obj-$(CONFIG_ETRAX_ETHERNET) += cris/
obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
+obj-$(CONFIG_XILINX_EMAC) += xilinx_emac/
+obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite/
+obj-$(CONFIG_XILINX_LLTEMAC) += xilinx_lltemac/
+obj-$(CONFIG_XILINX_TEMAC) += xilinx_temac/
+
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-$(CONFIG_FS_ENET) += fs_enet/
extern struct net_device *mc32_probe(int unit);
extern struct net_device *cops_probe(int unit);
extern struct net_device *ltpc_probe(void);
+// -wgr- extern struct net_device *xemac_probe(int unit);
/* Detachable devices ("pocket adaptors") */
extern struct net_device *de620_probe(int unit);
#ifdef CONFIG_CS89x0
{cs89x0_probe, 0},
#endif
+// -wgr- #ifdef CONFIG_XILINX_EMAC
+// -wgr- {xemac_probe, 0},
+// -wgr- #endif
#ifdef CONFIG_AT1700
{at1700_probe, 0},
#endif
--- /dev/null
+#
+# Makefile for the Xilinx 10/100 OPB EMAC driver
+#
+
+EXTRA_CFLAGS += -I$(TOPDIR)/drivers/xilinx_common
+
+obj-$(CONFIG_XILINX_EMAC) := xilinx_emac.o
+
+# The Linux driver for the Xilinx EMAC core.
+xilinx_emac-objs := xemac_linux.o
+
+# The Xilinx OS independent code.
+xilinx_emac-objs += xemac.o xemac_intr.o xemac_intr_dma.o \
+ xemac_options.o xemac_phy.o xemac_intr_fifo.o
--- /dev/null
+/* $Id: xemac.c,v 1.1 2006/11/01 17:30:12 moleres Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2003 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemac.c
+*
+* The XEmac driver. Functions in this file are the minimum required functions
+* for this driver. See xemac.h for a detailed description of the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rpm 07/31/01 First release
+* 1.00b rpm 02/20/02 Repartitioned files and functions
+* 1.00b rpm 07/23/02 Removed the PHY reset from Initialize()
+* 1.00b rmm 09/23/02 Removed commented code in Initialize(). Recycled as
+* XEmac_mPhyReset macro in xemac_l.h.
+* 1.00c rpm 12/05/02 New version includes support for simple DMA
+* 1.00c rpm 12/12/02 Changed location of IsStarted assignment in XEmac_Start
+* to be sure the flag is set before the device and
+* interrupts are enabled.
+* 1.00c rpm 02/03/03 SelfTest was not clearing polled mode. Take driver out
+* of polled mode in XEmac_Reset() to fix this problem.
+* 1.00c rmm 05/13/03 Fixed diab compiler warnings relating to asserts.
+* 1.00d rpm 09/26/03 New version includes support PLB Ethernet and v2.00a of
+* the packet fifo driver.
+* 1.00e rmm 04/06/04 Changed XEmac_Initialize() to clear the instance data.
+* Added XEM_NO_SGEND_INT_OPTION processing to XEmac_Start().
+* 1.01a ecm 09/26/05 Changed XEmac_Initialize() to create instance variable
+* which has the default DMA control words for the hardware.
+* Added the Checksum offload initialization to control word.
+* 1.01a wgr 09/14/06 Ported to Linux 2.6
+* 1.11a wgr 03/22/07 Converted to new coding style.
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include <linux/string.h>
+#include "xbasic_types.h"
+#include "xemac_i.h"
+#include "xio.h"
+#include "xbuf_descriptor.h"
+#include "xdma_channel.h"
+#include "xipif_v1_23_b.h" /* Uses v1.23b of the IPIF */
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+
+/************************** Function Prototypes ******************************/
+static int ConfigureDma(XEmac * InstancePtr);
+static int ConfigureFifo(XEmac * InstancePtr);
+static void StubFifoHandler(void *CallBackRef);
+static void StubErrorHandler(void *CallBackRef, int ErrorCode);
+static void StubSgHandler(void *CallBackRef, XBufDescriptor * BdPtr,
+ u32 NumBds);
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+*
+* Initialize a specific XEmac instance/driver.
+*
+* Retrieves the configuration table associated with the DeviceID provided.
+* Sets up the Instance data as determined bu the configuration table
+* Calls the unified local Initialize function contained in xemac,c
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param DeviceId is the unique id of the device controlled by this XEmac
+* instance. Passing in a device id associates the generic XEmac
+* instance to a specific device, as chosen by the caller or application
+* developer.
+* @param VirtualAddress is the address for the base address in the instance.
+* This method is specific to Linux usage.
+*
+* @return
+*
+* - XST_SUCCESS if initialization was successful
+* - XST_DEVICE_NOT_FOUND if device configuration information was not found for
+* a device with the supplied device ID.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XEmac_CfgInitialize(XEmac * InstancePtr, XEmac_Config * ConfigPtr,
+ u32 VirtualAddress)
+{
+ int Result;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(ConfigPtr != NULL);
+
+ /* Clear instance memory */
+ memset(InstancePtr, 0, sizeof(XEmac));
+
+ /*
+ * Assign the provided ConfigPtr to the instance.
+ */
+ InstancePtr->Config = *ConfigPtr;
+
+ /*
+ * Save the baseaddresses for faster access
+ */
+ if (0 != VirtualAddress) {
+ InstancePtr->BaseAddress = VirtualAddress;
+ }
+ else {
+ InstancePtr->BaseAddress = ConfigPtr->BaseAddress;
+ }
+ InstancePtr->PhysAddress = ConfigPtr->BaseAddress;
+
+ /*
+ * Set some default values
+ */
+ InstancePtr->IsReady = 0;
+ InstancePtr->IsStarted = 0;
+ InstancePtr->HasMulticastHash = FALSE;
+
+ /* Always default polled to false, let user configure this mode */
+ InstancePtr->IsPolled = FALSE;
+ InstancePtr->FifoRecvHandler = StubFifoHandler;
+ InstancePtr->FifoSendHandler = StubFifoHandler;
+ InstancePtr->ErrorHandler = StubErrorHandler;
+ InstancePtr->SgRecvHandler = StubSgHandler;
+ InstancePtr->SgSendHandler = StubSgHandler;
+
+
+ /*
+ * Configure the send and receive FIFOs in the MAC
+ */
+ Result = ConfigureFifo(InstancePtr);
+ if (Result != XST_SUCCESS) {
+ return Result;
+ }
+
+ /*
+ * If the device is configured for DMA, configure the send and receive DMA
+ * channels in the MAC.
+ */
+ if (XEmac_mIsDma(InstancePtr)) {
+ Result = ConfigureDma(InstancePtr);
+ if (Result != XST_SUCCESS) {
+ return Result;
+ }
+
+ if (XEmac_mIsTxDre(InstancePtr) == TRUE) {
+ InstancePtr->TxDmaControlWord = XEM_DFT_SEND_BD_MASK |
+ XDC_DMACR_DRE_MODE_MASK;
+ }
+ else {
+ InstancePtr->TxDmaControlWord = XEM_DFT_SEND_BD_MASK;
+ }
+
+ if (XEmac_mIsRxDre(InstancePtr) == TRUE) {
+ InstancePtr->RxDmaControlWord = XEM_DFT_RECV_BD_MASK |
+ XDC_DMACR_DRE_MODE_MASK;
+ }
+ else {
+ InstancePtr->RxDmaControlWord = XEM_DFT_RECV_BD_MASK;
+ }
+
+ /*
+ * TX Checksum offload is dynamic and needs to be set for every
+ * BD that uses it. It is not applicable to all data types so the
+ * adapter needs to handle each call individually
+ */
+
+ if (XEmac_mIsRxHwCsum(InstancePtr) == TRUE) {
+ InstancePtr->RxDmaControlWord |=
+ XDC_DMACR_CS_OFFLOAD_MASK;
+ }
+ }
+
+ /*
+ * Indicate the component is now ready to use. Note that this is done before
+ * we reset the device and the PHY below, which may seem a bit odd. The
+ * choice was made to move it here rather than remove the asserts in various
+ * functions (e.g., Reset() and all functions that it calls). Applications
+ * that use multiple threads, one to initialize the XEmac driver and one
+ * waiting on the IsReady condition could have a problem with this sequence.
+ */
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+
+ /*
+ * Reset the MAC to get it into its initial state. It is expected that
+ * device configuration by the user will take place after this
+ * initialization is done, but before the device is started.
+ */
+ XEmac_Reset(InstancePtr);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Start the Ethernet controller as follows:
+* - If not in polled mode
+* - Set the internal interrupt enable registers appropriately
+* - Enable interrupts within the device itself. Note that connection of
+* the driver's interrupt handler to the interrupt source (typically
+* done using the interrupt controller component) is done by the higher
+* layer software.
+* - If the device is configured with scatter-gather DMA, start the DMA
+* channels if the descriptor lists are not empty
+* - Enable the transmitter
+* - Enable the receiver
+*
+* The PHY is enabled after driver initialization. We assume the upper layer
+* software has configured it and the EMAC appropriately before this function
+* is called.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* - XST_SUCCESS if the device was started successfully
+* - XST_NO_CALLBACK if a callback function has not yet been registered using
+* the SetxxxHandler function. This is required if in interrupt mode.
+* - XST_DEVICE_IS_STARTED if the device is already started
+* - XST_DMA_SG_NO_LIST if configured for scatter-gather DMA and a descriptor
+* list has not yet been created for the send or receive channel.
+*
+* @note
+*
+* The driver tries to match the hardware configuration. So if the hardware
+* is configured with scatter-gather DMA, the driver expects to start the
+* scatter-gather channels and expects that the user has set up the buffer
+* descriptor lists already. If the user expects to use the driver in a mode
+* different than how the hardware is configured, the user should modify the
+* configuration table to reflect the mode to be used. Modifying the config
+* table is a workaround for now until we get some experience with how users
+* are intending to use the hardware in its different configurations. For
+* example, if the hardware is built with scatter-gather DMA but the user is
+* intending to use only simple DMA, the user either needs to modify the config
+* table as a workaround or rebuild the hardware with only simple DMA.
+*
+* This function makes use of internal resources that are shared between the
+* Start, Stop, and SetOptions functions. So if one task might be setting device
+* options while another is trying to start the device, the user is required to
+* provide protection of this shared data (typically using a semaphore).
+*
+******************************************************************************/
+int XEmac_Start(XEmac * InstancePtr)
+{
+ u32 ControlReg;
+ int Result;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * If it is already started, return a status indicating so
+ */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return XST_DEVICE_IS_STARTED;
+ }
+
+ /*
+ * If not polled, enable interrupts
+ */
+ if (!InstancePtr->IsPolled) {
+ /*
+ * Verify that the callbacks have been registered, then enable
+ * interrupts
+ */
+ if (XEmac_mIsSgDma(InstancePtr)) {
+ if ((InstancePtr->SgRecvHandler == StubSgHandler) ||
+ (InstancePtr->SgSendHandler == StubSgHandler)) {
+ return XST_NO_CALLBACK;
+ }
+
+ /* Enable IPIF interrupts */
+ XIIF_V123B_WRITE_DIER(InstancePtr->BaseAddress,
+ XEM_IPIF_DMA_DFT_MASK |
+ XIIF_V123B_ERROR_MASK);
+ XIIF_V123B_WRITE_IIER(InstancePtr->BaseAddress,
+ XEM_EIR_DFT_SG_MASK);
+
+ /* Enable scatter-gather DMA interrupts */
+ ControlReg = XEM_DMA_SG_INTR_MASK; /* Default mask */
+ if (InstancePtr->IsSgEndDisable) {
+ ControlReg &= ~XDC_IXR_SG_END_MASK; /* Don't enable SGEND */
+ }
+
+ XDmaChannel_SetIntrEnable(&InstancePtr->RecvChannel,
+ ControlReg);
+ XDmaChannel_SetIntrEnable(&InstancePtr->SendChannel,
+ ControlReg);
+ }
+ else {
+ if ((InstancePtr->FifoRecvHandler == StubFifoHandler) ||
+ (InstancePtr->FifoSendHandler == StubFifoHandler)) {
+ return XST_NO_CALLBACK;
+ }
+
+ /* Enable IPIF interrupts (used by simple DMA also) */
+ XIIF_V123B_WRITE_DIER(InstancePtr->BaseAddress,
+ XEM_IPIF_FIFO_DFT_MASK |
+ XIIF_V123B_ERROR_MASK);
+ XIIF_V123B_WRITE_IIER(InstancePtr->BaseAddress,
+ XEM_EIR_DFT_FIFO_MASK);
+ }
+
+ /* Enable the global IPIF interrupt output */
+ XIIF_V123B_GINTR_ENABLE(InstancePtr->BaseAddress);
+ }
+
+ /*
+ * Indicate that the device is started before we enable the transmitter
+ * or receiver. This needs to be done before because as soon as the
+ * receiver is enabled we may get an interrupt, and there are functions
+ * in the interrupt handling path that rely on the IsStarted flag.
+ */
+ InstancePtr->IsStarted = XCOMPONENT_IS_STARTED;
+
+ /*
+ * Enable the transmitter, and receiver (do a read/modify/write to preserve
+ * current settings). There is no critical section here since this register
+ * is not modified during interrupt context.
+ */
+ ControlReg = XIo_In32(InstancePtr->BaseAddress + XEM_ECR_OFFSET);
+ ControlReg &= ~(XEM_ECR_XMIT_RESET_MASK | XEM_ECR_RECV_RESET_MASK);
+ ControlReg |= (XEM_ECR_XMIT_ENABLE_MASK | XEM_ECR_RECV_ENABLE_MASK);
+
+ XIo_Out32(InstancePtr->BaseAddress + XEM_ECR_OFFSET, ControlReg);
+
+ /*
+ * If configured with scatter-gather DMA and not polled, restart the
+ * DMA channels in case there are buffers ready to be sent or received into.
+ * The DMA SgStart function uses data that can be modified during interrupt
+ * context, so a critical section is required here.
+ */
+ if ((XEmac_mIsSgDma(InstancePtr)) && (!InstancePtr->IsPolled)) {
+ XIIF_V123B_GINTR_DISABLE(InstancePtr->BaseAddress);
+
+ /*
+ * The only error we care about is if the list has not yet been
+ * created, or on receive, if no buffer descriptors have been
+ * added yet (the list is empty). Other errors are benign at this point.
+ */
+ Result = XDmaChannel_SgStart(&InstancePtr->RecvChannel);
+ if ((Result == XST_DMA_SG_NO_LIST) ||
+ (Result == XST_DMA_SG_LIST_EMPTY)) {
+ XIIF_V123B_GINTR_ENABLE(InstancePtr->BaseAddress);
+ return Result;
+ }
+
+ Result = XDmaChannel_SgStart(&InstancePtr->SendChannel);
+ if (Result == XST_DMA_SG_NO_LIST) {
+ XIIF_V123B_GINTR_ENABLE(InstancePtr->BaseAddress);
+ return Result;
+ }
+
+ XIIF_V123B_GINTR_ENABLE(InstancePtr->BaseAddress);
+ }
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Stop the Ethernet MAC as follows:
+* - If the device is configured with scatter-gather DMA, stop the DMA
+* channels (wait for acknowledgment of stop)
+* - Disable the transmitter and receiver
+* - Disable interrupts if not in polled mode (the higher layer software is
+* responsible for disabling interrupts at the interrupt controller)
+*
+* The PHY is left enabled after a Stop is called.
+*
+* If the device is configured for scatter-gather DMA, the DMA engine stops at
+* the next buffer descriptor in its list. The remaining descriptors in the list
+* are not removed, so anything in the list will be transmitted or received when
+* the device is restarted. The side effect of doing this is that the last
+* buffer descriptor processed by the DMA engine before stopping may not be the
+* last descriptor in the Ethernet frame. So when the device is restarted, a
+* partial frame (i.e., a bad frame) may be transmitted/received. This is only a
+* concern if a frame can span multiple buffer descriptors, which is dependent
+* on the size of the network buffers.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* - XST_SUCCESS if the device was stopped successfully
+* - XST_DEVICE_IS_STOPPED if the device is already stopped
+*
+* @note
+*
+* This function makes use of internal resources that are shared between the
+* Start, Stop, and SetOptions functions. So if one task might be setting device
+* options while another is trying to start the device, the user is required to
+* provide protection of this shared data (typically using a semaphore).
+*
+******************************************************************************/
+int XEmac_Stop(XEmac * InstancePtr)
+{
+ u32 ControlReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * If the device is already stopped, do nothing but return a status
+ * indicating so
+ */
+ if (InstancePtr->IsStarted != XCOMPONENT_IS_STARTED) {
+ return XST_DEVICE_IS_STOPPED;
+ }
+
+ /*
+ * If configured for scatter-gather DMA, stop the DMA channels. Ignore
+ * the XST_DMA_SG_IS_STOPPED return code. There is a critical section
+ * here between SgStart and SgStop, and SgStart can be called in interrupt
+ * context, so disable interrupts while calling SgStop.
+ */
+ if (XEmac_mIsSgDma(InstancePtr)) {
+ XBufDescriptor *BdTemp; /* temporary descriptor pointer */
+
+ XIIF_V123B_GINTR_DISABLE(InstancePtr->BaseAddress);
+
+ (void) XDmaChannel_SgStop(&InstancePtr->SendChannel, &BdTemp);
+ (void) XDmaChannel_SgStop(&InstancePtr->RecvChannel, &BdTemp);
+
+ XIIF_V123B_GINTR_ENABLE(InstancePtr->BaseAddress);
+ }
+
+ /*
+ * Disable the transmitter and receiver. There is no critical section
+ * here since this register is not modified during interrupt context.
+ */
+ ControlReg = XIo_In32(InstancePtr->BaseAddress + XEM_ECR_OFFSET);
+ ControlReg &= ~(XEM_ECR_XMIT_ENABLE_MASK | XEM_ECR_RECV_ENABLE_MASK);
+ XIo_Out32(InstancePtr->BaseAddress + XEM_ECR_OFFSET, ControlReg);
+
+ /*
+ * If not in polled mode, disable interrupts for IPIF (includes MAC and
+ * DMAs)
+ */
+ if (!InstancePtr->IsPolled) {
+ XIIF_V123B_GINTR_DISABLE(InstancePtr->BaseAddress);
+ }
+
+ InstancePtr->IsStarted = 0;
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Reset the Ethernet MAC. This is a graceful reset in that the device is stopped
+* first. Resets the DMA channels, the FIFOs, the transmitter, and the receiver.
+* The PHY is not reset. Any frames in the scatter-gather descriptor lists will
+* remain in the lists. The side effect of doing this is that after a reset and
+* following a restart of the device, frames that were in the list before the
+* reset may be transmitted or received. Reset must only be called after the
+* driver has been initialized.
+*
+* The driver is also taken out of polled mode if polled mode was set. The user
+* is responsbile for re-configuring the driver into polled mode after the
+* reset if desired.
+*
+* The configuration after this reset is as follows:
+* - Half duplex
+* - Disabled transmitter and receiver
+* - Enabled PHY (the PHY is not reset)
+* - MAC transmitter does pad insertion, FCS insertion, and source address
+* overwrite.
+* - MAC receiver does not strip padding or FCS
+* - Interframe Gap as recommended by IEEE Std. 802.3 (96 bit times)
+* - Unicast addressing enabled
+* - Broadcast addressing enabled
+* - Multicast addressing disabled (addresses are preserved)
+* - Promiscuous addressing disabled
+* - Default packet threshold and packet wait bound register values for
+* scatter-gather DMA operation
+* - MAC address of all zeros
+* - Non-polled mode
+*
+* The upper layer software is responsible for re-configuring (if necessary)
+* and restarting the MAC after the reset. Note that the PHY is not reset. PHY
+* control is left to the upper layer software. Note also that driver statistics
+* are not cleared on reset. It is up to the upper layer software to clear the
+* statistics if needed.
+*
+* When a reset is required due to an internal error, the driver notifies the
+* upper layer software of this need through the ErrorHandler callback and
+* specific status codes. The upper layer software is responsible for calling
+* this Reset function and then re-configuring the device.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+* @internal
+*
+* The reset is accomplished by setting the IPIF reset register. This takes
+* care of resetting all hardware blocks, including the MAC.
+*
+******************************************************************************/
+void XEmac_Reset(XEmac * InstancePtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Stop the device first
+ */
+ (void) XEmac_Stop(InstancePtr);
+
+ /*
+ * Take the driver out of polled mode
+ */
+ InstancePtr->IsPolled = FALSE;
+
+ /*
+ * Reset the entire IPIF at once. If we choose someday to reset each
+ * hardware block separately, the reset should occur in the direction of
+ * data flow. For example, for the send direction the reset order is DMA
+ * first, then FIFO, then the MAC transmitter.
+ */
+ XIIF_V123B_RESET(InstancePtr->BaseAddress);
+
+ if (XEmac_mIsSgDma(InstancePtr)) {
+ /*
+ * After reset, configure the scatter-gather DMA packet threshold and
+ * packet wait bound registers to default values. Ignore the return
+ * values of these functions since they only return error if the device
+ * is not stopped.
+ */
+ (void) XEmac_SetPktThreshold(InstancePtr, XEM_SEND,
+ XEM_SGDMA_DFT_THRESHOLD);
+ (void) XEmac_SetPktThreshold(InstancePtr, XEM_RECV,
+ XEM_SGDMA_DFT_THRESHOLD);
+ (void) XEmac_SetPktWaitBound(InstancePtr, XEM_SEND,
+ XEM_SGDMA_DFT_WAITBOUND);
+ (void) XEmac_SetPktWaitBound(InstancePtr, XEM_RECV,
+ XEM_SGDMA_DFT_WAITBOUND);
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Set the MAC address for this driver/device. The address is a 48-bit value.
+* The device must be stopped before calling this function.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param AddressPtr is a pointer to a 6-byte MAC address.
+*
+* @return
+*
+* - XST_SUCCESS if the MAC address was set successfully
+* - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XEmac_SetMacAddress(XEmac * InstancePtr, u8 *AddressPtr)
+{
+ u32 MacAddr = 0;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(AddressPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * The device must be stopped before setting the MAC address
+ */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return XST_DEVICE_IS_STARTED;
+ }
+
+ /*
+ * Set the device station address high and low registers
+ */
+ MacAddr = (AddressPtr[0] << 8) | AddressPtr[1];
+ XIo_Out32(InstancePtr->BaseAddress + XEM_SAH_OFFSET, MacAddr);
+
+ MacAddr = (AddressPtr[2] << 24) | (AddressPtr[3] << 16) |
+ (AddressPtr[4] << 8) | AddressPtr[5];
+
+ XIo_Out32(InstancePtr->BaseAddress + XEM_SAL_OFFSET, MacAddr);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Get the MAC address for this driver/device.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param BufferPtr is an output parameter, and is a pointer to a buffer into
+* which the current MAC address will be copied. The buffer must be at
+* least 6 bytes.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmac_GetMacAddress(XEmac * InstancePtr, u8 *BufferPtr)
+{
+ u32 MacAddrHi;
+ u32 MacAddrLo;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(BufferPtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ MacAddrHi = XIo_In32(InstancePtr->BaseAddress + XEM_SAH_OFFSET);
+ MacAddrLo = XIo_In32(InstancePtr->BaseAddress + XEM_SAL_OFFSET);
+
+ BufferPtr[0] = (u8) (MacAddrHi >> 8);
+ BufferPtr[1] = (u8) MacAddrHi;
+ BufferPtr[2] = (u8) (MacAddrLo >> 24);
+ BufferPtr[3] = (u8) (MacAddrLo >> 16);
+ BufferPtr[4] = (u8) (MacAddrLo >> 8);
+ BufferPtr[5] = (u8) MacAddrLo;
+}
+
+/******************************************************************************/
+/**
+*
+* Configure DMA capabilities.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* - XST_SUCCESS if successful initialization of DMA
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static int ConfigureDma(XEmac * InstancePtr)
+{
+ int Result;
+
+ /*
+ * Initialize the DMA channels with their base addresses. We assume
+ * scatter-gather DMA is the only possible configuration. Descriptor space
+ * will need to be set later by the upper layer.
+ */
+ Result = XDmaChannel_Initialize(&InstancePtr->RecvChannel,
+ InstancePtr->BaseAddress +
+ XEM_DMA_RECV_OFFSET);
+ if (Result != XST_SUCCESS) {
+ return Result;
+ }
+
+ Result = XDmaChannel_Initialize(&InstancePtr->SendChannel,
+ InstancePtr->BaseAddress +
+ XEM_DMA_SEND_OFFSET);
+
+ return Result;
+}
+
+/******************************************************************************/
+/**
+*
+* Configure the send and receive FIFO components with their base addresses
+* and interrupt masks. Currently the base addresses are defined constants.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* XST_SUCCESS if successful initialization of the packet FIFOs
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static int ConfigureFifo(XEmac * InstancePtr)
+{
+ int Result;
+
+ /*
+ * Return status from the packet FIFOs initialization is ignored since
+ * they always return success.
+ */
+ Result = XPacketFifoV200a_Initialize(&InstancePtr->RecvFifo,
+ InstancePtr->BaseAddress +
+ XEM_PFIFO_RXREG_OFFSET,
+ InstancePtr->BaseAddress +
+ XEM_PFIFO_RXDATA_OFFSET);
+ if (Result != XST_SUCCESS) {
+ return Result;
+ }
+
+ Result = XPacketFifoV200a_Initialize(&InstancePtr->SendFifo,
+ InstancePtr->BaseAddress +
+ XEM_PFIFO_TXREG_OFFSET,
+ InstancePtr->BaseAddress +
+ XEM_PFIFO_TXDATA_OFFSET);
+ return Result;
+}
+
+/******************************************************************************/
+/**
+*
+* This is a stub for the scatter-gather send and recv callbacks. The stub
+* is here in case the upper layers forget to set the handlers.
+*
+* @param CallBackRef is a pointer to the upper layer callback reference
+* @param BdPtr is a pointer to the first buffer descriptor in a list
+* @param NumBds is the number of descriptors in the list.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void StubSgHandler(void *CallBackRef, XBufDescriptor * BdPtr, u32 NumBds)
+{
+ XASSERT_VOID_ALWAYS();
+}
+
+/******************************************************************************/
+/**
+*
+* This is a stub for the non-DMA send and recv callbacks. The stub is here in
+* case the upper layers forget to set the handlers.
+*
+* @param CallBackRef is a pointer to the upper layer callback reference
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void StubFifoHandler(void *CallBackRef)
+{
+ XASSERT_VOID_ALWAYS();
+}
+
+/******************************************************************************/
+/**
+*
+* This is a stub for the asynchronous error callback. The stub is here in
+* case the upper layers forget to set the handler.
+*
+* @param CallBackRef is a pointer to the upper layer callback reference
+* @param ErrorCode is the Xilinx error code, indicating the cause of the error
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void StubErrorHandler(void *CallBackRef, int ErrorCode)
+{
+ XASSERT_VOID_ALWAYS();
+}
--- /dev/null
+/******************************************************************************
+*
+* Author: Xilinx, Inc.
+*
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A
+* COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS
+* ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR STANDARD,
+* XILINX IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION IS FREE
+* FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE FOR OBTAINING
+* ANY THIRD PARTY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION.
+* XILINX EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO
+* THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY
+* WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM
+* CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND
+* FITNESS FOR A PARTICULAR PURPOSE.
+*
+*
+* Xilinx hardware products are not intended for use in life support
+* appliances, devices, or systems. Use in such applications is
+* expressly prohibited.
+*
+*
+* (c) Copyright 2002-2004 Xilinx Inc.
+* All rights reserved.
+*
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemac.h
+*
+* The Xilinx Ethernet driver component. This component supports the Xilinx
+* Ethernet 10/100 MAC (EMAC).
+*
+* The Xilinx Ethernet 10/100 MAC supports the following features:
+* - Simple and scatter-gather DMA operations, as well as simple memory
+* mapped direct I/O interface (FIFOs)
+* - Media Independent Interface (MII) for connection to external
+* 10/100 Mbps PHY transceivers
+* - MII management control reads and writes with MII PHYs
+* - Independent internal transmit and receive FIFOs
+* - CSMA/CD compliant operations for half-duplex modes
+* - Programmable PHY reset signal
+* - Unicast, broadcast, multicast, and promiscuous address filtering
+* - Reception of any address that matches a CAM entry.
+* - Internal loopback
+* - Automatic source address insertion or overwrite (programmable)
+* - Automatic FCS insertion and stripping (programmable)
+* - Automatic pad insertion and stripping (programmable)
+* - Pause frame (flow control) detection in full-duplex mode
+* - Programmable interframe gap
+* - VLAN frame support
+* - Pause frame support
+* - Jumbo frame support
+* - Dynamic Re-alignment Engine (DRE) support handled automatically
+*
+* The device driver supports all the features listed above.
+*
+* <b>Driver Description</b>
+*
+* The device driver enables higher layer software (e.g., an application) to
+* communicate to the EMAC. The driver handles transmission and reception of
+* Ethernet frames, as well as configuration of the controller. It does not
+* handle protocol stack functionality such as Link Layer Control (LLC) or the
+* Address Resolution Protocol (ARP). The protocol stack that makes use of the
+* driver handles this functionality. This implies that the driver is simply a
+* pass-through mechanism between a protocol stack and the EMAC. A single device
+* driver can support multiple EMACs.
+*
+* The driver is designed for a zero-copy buffer scheme. That is, the driver will
+* not copy buffers. This avoids potential throughput bottlenecks within the
+* driver.
+*
+* Since the driver is a simple pass-through mechanism between a protocol stack
+* and the EMAC, no assembly or disassembly of Ethernet frames is done at the
+* driver-level. This assumes that the protocol stack passes a correctly
+* formatted Ethernet frame to the driver for transmission, and that the driver
+* does not validate the contents of an incoming frame
+*
+* <b>Buffer Alignment</b>
+*
+* It is important to note that when using direct FIFO communication (either
+* polled or interrupt-driven), packet buffers must be 32-bit aligned. When
+* using DMA without DRE and the OPB 10/100 Ethernet core, packet buffers
+* must be 32-bit aligned. When using DMA without DRE and the PLB 10/100
+* Ethernet core, packet buffers must be 64-bit aligned.
+*
+* When using scatter-gather DMA, the buffer descriptors must be 32-bit
+* aligned (for either the OPB or the PLB core). The driver may not enforce
+* this alignment so it is up to the user to guarantee the proper alignment.
+*
+* When DRE is available in the DMA engine, only the buffer descriptors must
+* be aligned, the actual buffers do not need to be aligned to any particular
+* addressing convention, the DRE takes care of that in hardware.
+*
+* <b>Receive Address Filtering</b>
+*
+* The device can be set to accept frames whose destination MAC address:
+*
+* - Match the station MAC address (see XEmac_SetMacAddress())
+* - Match the broadcast MAC address (see XEM_BROADCAST_OPTION)
+* - Match any multicast MAC address (see XEM_MULTICAST_OPTION)
+* - Match any one of the 64 possible CAM addresses (see XEmac_MulticastAdd()
+* and XEM_MULTICAST_CAM_OPTION). The CAM is optional.
+* - Match any MAC address (see XEM_PROMISC_OPTION)
+*
+* <b>PHY Communication</b>
+*
+* The driver provides rudimentary read and write functions to allow the higher
+* layer software to access the PHY. The EMAC provides MII registers for the
+* driver to access. This management interface can be parameterized away in the
+* FPGA implementation process. If this is the case, the PHY read and write
+* functions of the driver return XST_NO_FEATURE.
+*
+* External loopback is usually supported at the PHY. It is up to the user to
+* turn external loopback on or off at the PHY. The driver simply provides pass-
+* through functions for configuring the PHY. The driver does not read, write,
+* or reset the PHY on its own. All control of the PHY must be done by the user.
+*
+* <b>Asynchronous Callbacks</b>
+*
+* The driver services interrupts and passes Ethernet frames to the higher layer
+* software through asynchronous callback functions. When using the driver
+* directly (i.e., not with the RTOS protocol stack), the higher layer
+* software must register its callback functions during initialization. The
+* driver requires callback functions for received frames, for confirmation of
+* transmitted frames, and for asynchronous errors.
+*
+* <b>Interrupts</b>
+*
+* The driver has no dependencies on the interrupt controller. The driver
+* provides two interrupt handlers. XEmac_IntrHandlerDma() handles interrupts
+* when the EMAC is configured with scatter-gather DMA. XEmac_IntrHandlerFifo()
+* handles interrupts when the EMAC is configured for direct FIFO I/O or simple
+* DMA. Either of these routines can be connected to the system interrupt
+* controller by the user.
+*
+* <b>Interrupt Frequency</b>
+*
+* When the EMAC is configured with scatter-gather DMA, the frequency of
+* interrupts can be controlled with the interrupt coalescing features of the
+* scatter-gather DMA engine. The frequency of interrupts can be adjusted using
+* the driver API functions for setting the packet count threshold and the packet
+* wait bound values.
+*
+* The scatter-gather DMA engine only interrupts when the packet count threshold
+* is reached, instead of interrupting for each packet. A packet is a generic
+* term used by the scatter-gather DMA engine, and is equivalent to an Ethernet
+* frame in our case.
+*
+* The packet wait bound is a timer value used during interrupt coalescing to
+* trigger an interrupt when not enough packets have been received to reach the
+* packet count threshold.
+*
+* These values can be tuned by the user to meet their needs. If there appear to
+* be interrupt latency problems or delays in packet arrival that are longer than
+* might be expected, the user should verify that the packet count threshold is
+* set low enough to receive interrupts before the wait bound timer goes off.
+*
+* <b>Device Reset</b>
+*
+* Some errors that can occur in the device require a device reset. These errors
+* are listed in the XEmac_SetErrorHandler() function header. The user's error
+* handler is responsible for resetting the device and re-configuring it based on
+* its needs (the driver does not save the current configuration). When
+* integrating into an RTOS, these reset and re-configure obligations are
+* taken care of by the Xilinx adapter software if it exists for that RTOS.
+*
+* <b>Device Configuration</b>
+*
+* The device can be configured in various ways during the FPGA implementation
+* process. Configuration parameters are stored in the xemac_g.c files.
+* A table is defined where each entry contains configuration information
+* for an EMAC device. This information includes such things as the base address
+* of the memory-mapped device, the base addresses of IPIF, DMA, and FIFO modules
+* within the device, and whether the device has DMA, counter registers,
+* multicast support, MII support, and flow control.
+*
+* The driver tries to use the features built into the device. So if, for
+* example, the hardware is configured with scatter-gather DMA, the driver
+* expects to start the scatter-gather channels and expects that the user has set
+* up the buffer descriptor lists already. If the user expects to use the driver
+* in a mode different than how the hardware is configured, the user should
+* modify the configuration table to reflect the mode to be used. Modifying the
+* configuration table is a workaround for now until we get some experience with
+* how users are intending to use the hardware in its different configurations.
+* For example, if the hardware is built with scatter-gather DMA but the user is
+* intending to use only simple DMA, the user either needs to modify the config
+* table as a workaround or rebuild the hardware with only simple DMA. The
+* recommendation at this point is to build the hardware with the features you
+* intend to use. If you're inclined to modify the table, do so before the call
+* to XEmac_Initialize(). Here is a snippet of code that changes a device to
+* simple DMA (the hardware needs to have DMA for this to work of course):
+* <pre>
+* XEmac_Config *ConfigPtr;
+*
+* ConfigPtr = XEmac_LookupConfig(DeviceId);
+* ConfigPtr->IpIfDmaConfig = XEM_CFG_SIMPLE_DMA;
+* </pre>
+*
+* <b>Simple DMA</b>
+*
+* Simple DMA is supported through the FIFO functions, FifoSend and FifoRecv, of
+* the driver (i.e., there is no separate interface for it). The driver makes use
+* of the DMA engine for a simple DMA transfer if the device is configured with
+* DMA, otherwise it uses the FIFOs directly. While the simple DMA interface is
+* therefore transparent to the user, the caching of network buffers is not.
+* If the device is configured with DMA and the FIFO interface is used, the user
+* must ensure that the network buffers are not cached or are cache coherent,
+* since DMA will be used to transfer to and from the Emac device. If the device
+* is configured with DMA and the user really wants to use the FIFOs directly,
+* the user should rebuild the hardware without DMA. If unable to do this, there
+* is a workaround (described above in Device Configuration) to modify the
+* configuration table of the driver to fake the driver into thinking the device
+* has no DMA. A code snippet follows:
+* <pre>
+* XEmac_Config *ConfigPtr;
+*
+* ConfigPtr = XEmac_LookupConfig(DeviceId);
+* ConfigPtr->IpIfDmaConfig = XEM_CFG_NO_DMA;
+* </pre>
+*
+* <b>Asserts</b>
+*
+* Asserts are used within all Xilinx drivers to enforce constraints on argument
+* values. Asserts can be turned off on a system-wide basis by defining, at
+* compile time, the NDEBUG identifier. By default, asserts are turned on and it
+* is recommended that users leave asserts on during development.
+*
+* <b>Building the driver</b>
+*
+* The XEmac driver is composed of several source files. Why so many? This
+* allows the user to build and link only those parts of the driver that are
+* necessary. Since the EMAC hardware can be configured in various ways (e.g.,
+* with or without DMA), the driver too can be built with varying features.
+* For the most part, this means that besides always linking in xemac.c, you
+* link in only the driver functionality you want. Some of the choices you have
+* are polled vs. interrupt, interrupt with FIFOs only vs. interrupt with DMA,
+* self-test diagnostics, and driver statistics. Note that currently the DMA code
+* must be linked in, even if you don't have DMA in the device.
+*
+* @note
+*
+* Xilinx drivers are typically composed of two components, one is the driver
+* and the other is the adapter. The driver is independent of OS and processor
+* and is intended to be highly portable. The adapter is OS-specific and
+* facilitates communication between the driver and an OS.
+* <br><br>
+* This driver is intended to be RTOS and processor independent. It works
+* with physical addresses only. Any needs for dynamic memory management,
+* threads or thread mutual exclusion, virtual memory, or cache control must
+* be satisfied by the layer above this driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rpm 07/31/01 First release
+* 1.00b rpm 02/20/02 Repartitioned files and functions
+* 1.00b rpm 10/08/02 Replaced HasSgDma boolean with IpifDmaConfig enumerated
+* configuration parameter
+* 1.00c rpm 12/05/02 New version includes support for simple DMA and the delay
+* argument to SgSend
+* 1.00c rpm 02/03/03 The XST_DMA_SG_COUNT_EXCEEDED return code was removed
+* from SetPktThreshold in the internal DMA driver. Also
+* avoided compiler warnings by initializing Result in the
+* DMA interrupt service routines.
+* 1.00d rpm 09/26/03 New version includes support PLB Ethernet and v2.00a of
+* the packet fifo driver. Also supports multicast option.
+* 1.00e rmm 04/06/04 SGEND option added, Zero instance memory on init. Changed
+* SG DMA callback invokation from once per packet to once
+* for all packets received for an interrupt event. Added
+* XEmac_GetSgRecvFreeDesc() and GetSgSendFreeDesc()
+* functions. Moved some IFG and PHY constants to xemac_l.h.
+* 1.00f rmm 10/19/04 Added programmable CAM address filtering. Added jumbo
+* frame support. Added XEmac_PhyReset() function.
+* 1.01a ecm 09/01/05 Added DRE support through Control words in instance which
+* are set at initialization.
+* Added the config structure items to support separate
+* Tx and Rx capabilities..
+* 1.01a wgr 09/14/06 Ported to Linux 2.6
+* 1.11a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XEMAC_H /* prevent circular inclusions */
+#define XEMAC_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xpacket_fifo_v2_00_a.h" /* Uses v2.00a of Packet Fifo */
+#include "xdma_channel.h"
+
+/************************** Constant Definitions *****************************/
+
+/*
+ * Device information
+ */
+#define XEM_DEVICE_NAME "xemac"
+#define XEM_DEVICE_DESC "Xilinx Ethernet 10/100 MAC"
+
+/** @name Configuration options
+ *
+ * Device configuration options (see the XEmac_SetOptions() and
+ * XEmac_GetOptions() for information on how to use these options)
+ * @{
+ */
+#define XEM_UNICAST_OPTION 0x00000001UL /**< Unicast addressing
+ (defaults on) */
+#define XEM_BROADCAST_OPTION 0x00000002UL /**< Broadcast addressing
+ (defaults on) */
+#define XEM_PROMISC_OPTION 0x00000004UL /**< Promiscuous addressing
+ (defaults off) */
+#define XEM_FDUPLEX_OPTION 0x00000008UL /**< Full duplex mode
+ (defaults off) */
+#define XEM_POLLED_OPTION 0x00000010UL /**< Polled mode (defaults off) */
+#define XEM_LOOPBACK_OPTION 0x00000020UL /**< Internal loopback mode
+ (defaults off) */
+#define XEM_MULTICAST_OPTION 0x00000040UL /**< Multicast address reception
+ (defaults off) */
+#define XEM_FLOW_CONTROL_OPTION 0x00000080UL /**< Interpret pause frames in
+ full duplex mode (defaults
+ off) */
+#define XEM_INSERT_PAD_OPTION 0x00000100UL /**< Pad short frames on transmit
+ (defaults on) */
+#define XEM_INSERT_FCS_OPTION 0x00000200UL /**< Insert FCS (CRC) on transmit
+ (defaults on) */
+#define XEM_INSERT_ADDR_OPTION 0x00000400UL /**< Insert source address on
+ transmit (defaults on) */
+#define XEM_OVWRT_ADDR_OPTION 0x00000800UL /**< Overwrite source address on
+ transmit. This is only used
+ only used if source address
+ insertion is on (defaults on) */
+#define XEM_NO_SGEND_INT_OPTION 0x00001000UL /**< Disables the SGEND interrupt
+ with SG DMA. Setting this
+ option to ON may help bulk
+ data transfer performance
+ when utilizing higher packet
+ threshold counts on slower
+ systems (default is off) */
+#define XEM_STRIP_PAD_FCS_OPTION 0x00002000UL /**< Strip FCS and padding from
+ received frames (defaults off) */
+#define XEM_JUMBO_OPTION 0x00004000UL /**< Allow reception of Jumbo frames,
+ transmission of Jumbo frames is
+ always enabled.
+ (default is off) */
+#define XEM_MULTICAST_CAM_OPTION 0x00008000UL /**< Allow Rx address filtering
+ for multicast CAM entries
+ (default is off) */
+/*@}*/
+
+/*
+ * Some default values for interrupt coalescing within the scatter-gather
+ * DMA engine.
+ */
+#define XEM_SGDMA_DFT_THRESHOLD 1 /* Default pkt threshold */
+#define XEM_SGDMA_MAX_THRESHOLD 255 /* Maximum pkt theshold */
+#define XEM_SGDMA_DFT_WAITBOUND 5 /* Default pkt wait bound (msec) */
+#define XEM_SGDMA_MAX_WAITBOUND 1023 /* Maximum pkt wait bound (msec) */
+
+/*
+ * Direction identifiers. These are used for setting values like packet
+ * thresholds and wait bound for specific channels
+ */
+#define XEM_SEND 1
+#define XEM_RECV 2
+
+/*
+ * Arguments to SgSend function to indicate whether to hold off starting
+ * the scatter-gather engine.
+ */
+#define XEM_SGDMA_NODELAY 0 /* start SG DMA immediately */
+#define XEM_SGDMA_DELAY 1 /* do not start SG DMA */
+
+/*
+ * Constants to determine the configuration of the hardware device. They are
+ * used to allow the driver to verify it can operate with the hardware.
+ */
+#define XEM_CFG_NO_IPIF 0 /* Not supported by the driver */
+#define XEM_CFG_NO_DMA 1 /* No DMA */
+#define XEM_CFG_SIMPLE_DMA 2 /* Simple DMA */
+#define XEM_CFG_DMA_SG 3 /* DMA scatter gather */
+
+#define XEM_MULTI_CAM_ENTRIES 64 /* Number of storable addresses in
+ the CAM */
+
+/*
+ * The next few constants help upper layers determine the size of memory
+ * pools used for Ethernet buffers and descriptor lists.
+ */
+#define XEM_MAC_ADDR_SIZE 6 /* six-byte MAC address */
+#define XEM_MTU 1500 /* max size of Ethernet frame */
+#define XEM_JUMBO_MTU 8982 /* max payload size of jumbo frame */
+#define XEM_HDR_SIZE 14 /* size of Ethernet header */
+#define XEM_HDR_VLAN_SIZE 18 /* size of Ethernet header with VLAN */
+#define XEM_TRL_SIZE 4 /* size of Ethernet trailer (FCS) */
+#define XEM_MAX_FRAME_SIZE (XEM_MTU + XEM_HDR_SIZE + XEM_TRL_SIZE)
+#define XEM_MAX_VLAN_FRAME_SIZE (XEM_MTU + XEM_HDR_VLAN_SIZE + XEM_TRL_SIZE)
+#define XEM_MAX_JUMBO_FRAME_SIZE (XEM_JUMBO_MTU + XEM_HDR_SIZE + XEM_TRL_SIZE)
+
+/*
+ * Define a default number of send and receive buffers
+ */
+#define XEM_MIN_RECV_BUFS 32 /* minimum # of recv buffers */
+#define XEM_DFT_RECV_BUFS 64 /* default # of recv buffers */
+
+#define XEM_MIN_SEND_BUFS 16 /* minimum # of send buffers */
+#define XEM_DFT_SEND_BUFS 32 /* default # of send buffers */
+
+#define XEM_MIN_BUFFERS (XEM_MIN_RECV_BUFS + XEM_MIN_SEND_BUFS)
+#define XEM_DFT_BUFFERS (XEM_DFT_RECV_BUFS + XEM_DFT_SEND_BUFS)
+
+/*
+ * Define the number of send and receive buffer descriptors, used for
+ * scatter-gather DMA
+ */
+#define XEM_MIN_RECV_DESC 16 /* minimum # of recv descriptors */
+#define XEM_DFT_RECV_DESC 32 /* default # of recv descriptors */
+
+#define XEM_MIN_SEND_DESC 8 /* minimum # of send descriptors */
+#define XEM_DFT_SEND_DESC 16 /* default # of send descriptors */
+
+
+/**************************** Type Definitions *******************************/
+
+/**
+ * Ethernet statistics (see XEmac_GetStats() and XEmac_ClearStats())
+ */
+typedef struct {
+ u32 XmitFrames; /**< Number of frames transmitted */
+ u32 XmitBytes; /**< Number of bytes transmitted */
+ u32 XmitLateCollisionErrors;
+ /**< Number of transmission failures
+ due to late collisions */
+ u32 XmitExcessDeferral; /**< Number of transmission failures
+ due o excess collision deferrals */
+ u32 XmitOverrunErrors; /**< Number of transmit overrun errors */
+ u32 XmitUnderrunErrors; /**< Number of transmit underrun errors */
+ u32 RecvFrames; /**< Number of frames received */
+ u32 RecvBytes; /**< Number of bytes received */
+ u32 RecvFcsErrors; /**< Number of frames discarded due
+ to FCS errors */
+ u32 RecvAlignmentErrors; /**< Number of frames received with
+ alignment errors */
+ u32 RecvOverrunErrors; /**< Number of frames discarded due
+ to overrun errors */
+ u32 RecvUnderrunErrors; /**< Number of recv underrun errors */
+ u32 RecvMissedFrameErrors;
+ /**< Number of frames missed by MAC */
+ u32 RecvCollisionErrors; /**< Number of frames discarded due
+ to collisions */
+ u32 RecvLengthFieldErrors;
+ /**< Number of frames discarded with
+ invalid length field */
+ u32 RecvShortErrors; /**< Number of short frames discarded */
+ u32 RecvLongErrors; /**< Number of long frames discarded */
+ u32 DmaErrors; /**< Number of DMA errors since init */
+ u32 FifoErrors; /**< Number of FIFO errors since init */
+ u32 RecvInterrupts; /**< Number of receive interrupts */
+ u32 XmitInterrupts; /**< Number of transmit interrupts */
+ u32 EmacInterrupts; /**< Number of MAC (device) interrupts */
+ u32 TotalIntrs; /**< Total interrupts */
+} XEmac_Stats;
+
+/**
+ * This typedef contains configuration information for a device.
+ */
+typedef struct {
+ u16 DeviceId; /**< Unique ID of device */
+ u32 BaseAddress; /**< Register base address */
+ u32 HasCounters; /**< Does device have counters? */
+ u8 IpIfDmaConfig; /**< IPIF/DMA hardware configuration */
+ u32 HasMii; /**< Does device support MII? */
+ u32 HasCam; /**< Does device have multicast CAM */
+ u32 HasJumbo; /**< Can device transfer jumbo frames */
+ u32 TxDre; /**< Has data realignment engine on TX channel */
+ u32 RxDre; /**< Has data realignment engine on RX channel */
+ u32 TxHwCsum; /**< Has checksum offload on TX channel */
+ u32 RxHwCsum; /**< Has checksum offload on RX channel */
+} XEmac_Config;
+
+
+/** @name Typedefs for callbacks
+ * Callback functions.
+ * @{
+ */
+/**
+ * Callback when data is sent or received with scatter-gather DMA.
+ *
+ * @param CallBackRef is a callback reference passed in by the upper layer
+ * when setting the callback functions, and passed back to the upper
+ * layer when the callback is invoked.
+ * @param BdPtr is a pointer to the first buffer descriptor in a list of
+ * buffer descriptors.
+ * @param NumBds is the number of buffer descriptors in the list pointed
+ * to by BdPtr.
+ */
+typedef void (*XEmac_SgHandler) (void *CallBackRef, XBufDescriptor * BdPtr,
+ u32 NumBds);
+
+/**
+ * Callback when data is sent or received with direct FIFO communication or
+ * simple DMA. The user typically defines two callacks, one for send and one
+ * for receive.
+ *
+ * @param CallBackRef is a callback reference passed in by the upper layer
+ * when setting the callback functions, and passed back to the upper
+ * layer when the callback is invoked.
+ */
+typedef void (*XEmac_FifoHandler) (void *CallBackRef);
+
+/**
+ * Callback when an asynchronous error occurs.
+ *
+ * @param CallBackRef is a callback reference passed in by the upper layer
+ * when setting the callback functions, and passed back to the upper
+ * layer when the callback is invoked.
+ * @param ErrorCode is a Xilinx error code defined in xstatus.h. Also see
+ * XEmac_SetErrorHandler() for a description of possible errors.
+ */
+typedef void (*XEmac_ErrorHandler) (void *CallBackRef, int ErrorCode);
+
+/*@}*/
+
+/**
+ * The XEmac driver instance data. The user is required to allocate a
+ * variable of this type for every EMAC device in the system. A pointer
+ * to a variable of this type is then passed to the driver API functions.
+ */
+typedef struct {
+ XEmac_Config Config; /* Configuration table entry */
+
+ u32 BaseAddress; /* Base address (of IPIF) */
+ u32 PhysAddress; /* Base address, physical (of IPIF) */
+ u32 IsStarted; /* Device is currently started */
+ u32 IsReady; /* Device is initialized and ready */
+ u32 TxDmaControlWord; /* TX SGDMA channel control word */
+ u32 RxDmaControlWord; /* RX SGDMA channel control word */
+ u32 IsPolled; /* Device is in polled mode */
+ u32 HasMulticastHash; /* Does device support multicast hash table? */
+
+ XEmac_Stats Stats;
+ XPacketFifoV200a RecvFifo; /* FIFO used to receive frames */
+ XPacketFifoV200a SendFifo; /* FIFO used to send frames */
+
+ /*
+ * Callbacks
+ */
+ XEmac_FifoHandler FifoRecvHandler; /* for non-DMA/simple DMA interrupts */
+ void *FifoRecvRef;
+ XEmac_FifoHandler FifoSendHandler; /* for non-DMA/simple DMA interrupts */
+ void *FifoSendRef;
+ XEmac_ErrorHandler ErrorHandler; /* for asynchronous errors */
+ void *ErrorRef;
+
+ XDmaChannel RecvChannel; /* DMA receive channel driver */
+ XDmaChannel SendChannel; /* DMA send channel driver */
+ u32 IsSgEndDisable; /* Does SG DMA enable SGEND interrupt */
+
+ XEmac_SgHandler SgRecvHandler; /* callback for scatter-gather DMA */
+ void *SgRecvRef;
+ XEmac_SgHandler SgSendHandler; /* callback for scatter-gather DMA */
+ void *SgSendRef;
+} XEmac;
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is currently configured for
+* scatter-gather DMA.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured for scatter-gather DMA, or FALSE
+* if it is not.
+*
+* @note
+*
+* Signature: u32 XEmac_mIsSgDma(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mIsSgDma(InstancePtr) \
+ ((InstancePtr)->Config.IpIfDmaConfig == XEM_CFG_DMA_SG)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is currently configured for simple DMA.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured for simple DMA, or FALSE otherwise
+*
+* @note
+*
+* Signature: u32 XEmac_mIsSimpleDma(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mIsSimpleDma(InstancePtr) \
+ ((InstancePtr)->Config.IpIfDmaConfig == XEM_CFG_SIMPLE_DMA)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is currently configured with DMA (either
+* simple DMA or scatter-gather DMA)
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with DMA, or FALSE otherwise
+*
+* @note
+*
+* Signature: u32 XEmac_mIsDma(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mIsDma(InstancePtr) \
+ (XEmac_mIsSimpleDma(InstancePtr) || XEmac_mIsSgDma(InstancePtr))
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device has CAM option for storing additional
+* receive filters for multicast or unicast addresses.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with the CAM, or FALSE otherwise
+*
+* @note
+*
+* Signature: u32 XEmac_mHasCam(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mHasCam(InstancePtr) \
+ (((InstancePtr)->ConfigPtr->HasCam == 1) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device has the MII option for communications
+* with a PHY.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with MII, or FALSE otherwise
+*
+* @note
+*
+* Signature: u32 XEmac_mHasMii(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mHasMii(InstancePtr) \
+ (((InstancePtr)->Config.HasMii == 1) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device has the option to transfer jumbo sized
+* frames.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with jubmo frame capability, or
+* FALSE otherwise
+*
+* @note
+*
+* Signature: u32 XEmac_mHasJumbo(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mHasJumbo(InstancePtr) \
+ (((InstancePtr)->ConfigPtr->HasJumbo == 1) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is configured with the Data Realignment
+* Engine (DRE)
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with TX DRE, or FALSE otherwise.
+* Note that earlier versions do not have DRE capability so this macro always
+* returns FALSE.
+*
+* @note
+*
+* Signature: u32 XEmac_mIsTxDre(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mIsTxDre(InstancePtr) \
+ (((InstancePtr)->Config.TxDre != 0) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is configured with the Data Realignment
+* Engine (DRE)
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with RX DRE, or FALSE otherwise.
+* Note that earlier versions do not have DRE capability so this macro always
+* returns FALSE.
+*
+* @note
+*
+* Signature: u32 XEmac_mIsRxDre(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mIsRxDre(InstancePtr) \
+ (((InstancePtr)->Config.RxDre != 0) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is configured with the Checksum offload
+* functionality
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with TX CSum, or FALSE otherwise.
+* Note that earlier versions do not have CSum capability so this macro always
+* returns FALSE.
+*
+* @note
+*
+* Signature: u32 XEmac_mIsTxHwCsum(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mIsTxHwCsum(InstancePtr) \
+ (((InstancePtr)->Config.TxHwCsum == 1) ? TRUE : FALSE)
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is configured with the Checksum offload
+* functionality
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with RX CSum, or FALSE otherwise.
+* Note that earlier versions do not have CSum capability so this macro always
+* returns FALSE.
+*
+* @note
+*
+* Signature: u32 XEmac_mIsRxHwCsum(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mIsRxHwCsum(InstancePtr) \
+ (((InstancePtr)->Config.RxHwCsum == 1) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro enables the TxHwCsum for the EMAC
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* none
+*
+* @note
+*
+* Signature: void XEmac_mEnableTxHwCsum(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mEnableTxHwCsum(InstancePtr) \
+ ((InstancePtr)->TxDmaControlWord |= XDC_DMACR_CS_OFFLOAD_MASK);
+
+/*****************************************************************************/
+/**
+*
+* This macro disables the TxHwCsum for the EMAC
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* none
+*
+* @note
+*
+* Signature: void XEmac_mDisableTxHwCsum(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mDisableTxHwCsum(InstancePtr) \
+ ((InstancePtr)->TxDmaControlWord &= ~XDC_DMACR_CS_OFFLOAD_MASK);
+
+/*****************************************************************************/
+/**
+*
+* This macro disables the Global interrupt for the EMAC
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* none
+*
+* @note
+*
+* Signature: void XEmac_mDisableGIE(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mDisableGIE(InstancePtr) \
+ XIIF_V123B_GINTR_DISABLE((InstancePtr)->BaseAddress);
+
+/*****************************************************************************/
+/**
+*
+* This macro enables the Global interrupt for the EMAC
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* none
+*
+* @note
+*
+* Signature: void XEmac_mEnableGIE(XEmac *InstancePtr)
+*
+******************************************************************************/
+#define XEmac_mEnableGIE(InstancePtr) \
+ XIIF_V123B_GINTR_ENABLE((InstancePtr)->BaseAddress);
+
+/************************** Function Prototypes ******************************/
+
+int XEmac_CfgInitialize(XEmac * InstancePtr, XEmac_Config * CfgPtr,
+ u32 VirtualAddress);
+int XEmac_Start(XEmac * InstancePtr);
+int XEmac_Stop(XEmac * InstancePtr);
+void XEmac_Reset(XEmac * InstancePtr);
+
+/*
+ * Diagnostic functions in xemac_selftest.c
+ */
+int XEmac_SelfTest(XEmac * InstancePtr);
+
+/*
+ * Polled functions in xemac_polled.c
+ */
+int XEmac_PollSend(XEmac * InstancePtr, u8 *BufPtr, u32 ByteCount);
+int XEmac_PollRecv(XEmac * InstancePtr, u8 *BufPtr, u32 *ByteCountPtr);
+
+/*
+ * Interrupts with scatter-gather DMA functions in xemac_intr_dma.c
+ */
+int XEmac_SgSend(XEmac * InstancePtr, XBufDescriptor * BdPtr, int Delay);
+int XEmac_SgRecv(XEmac * InstancePtr, XBufDescriptor * BdPtr);
+int XEmac_SetPktThreshold(XEmac * InstancePtr, u32 Direction, u8 Threshold);
+int XEmac_GetPktThreshold(XEmac * InstancePtr, u32 Direction, u8 *ThreshPtr);
+int XEmac_SetPktWaitBound(XEmac * InstancePtr, u32 Direction, u32 TimerValue);
+int XEmac_GetPktWaitBound(XEmac * InstancePtr, u32 Direction, u32 *WaitPtr);
+int XEmac_SetSgRecvSpace(XEmac * InstancePtr, u32 *MemoryPtr,
+ u32 ByteCount, void *PhyPtr);
+int XEmac_SetSgSendSpace(XEmac * InstancePtr, u32 *MemoryPtr,
+ u32 ByteCount, void *PhyPtr);
+void XEmac_SetSgRecvHandler(XEmac * InstancePtr, void *CallBackRef,
+ XEmac_SgHandler FuncPtr);
+void XEmac_SetSgSendHandler(XEmac * InstancePtr, void *CallBackRef,
+ XEmac_SgHandler FuncPtr);
+unsigned XEmac_GetSgSendFreeDesc(XEmac * InstancePtr);
+unsigned XEmac_GetSgRecvFreeDesc(XEmac * InstancePtr);
+
+void XEmac_IntrHandlerDma(void *InstancePtr); /* interrupt handler */
+
+/*
+ * Interrupts with direct FIFO functions in xemac_intr_fifo.c. Also used
+ * for simple DMA.
+ */
+int XEmac_FifoSend(XEmac * InstancePtr, u8 *BufPtr, u32 ByteCount);
+int XEmac_FifoRecv(XEmac * InstancePtr, u8 *BufPtr, u32 *ByteCountPtr);
+void XEmac_SetFifoRecvHandler(XEmac * InstancePtr, void *CallBackRef,
+ XEmac_FifoHandler FuncPtr);
+void XEmac_SetFifoSendHandler(XEmac * InstancePtr, void *CallBackRef,
+ XEmac_FifoHandler FuncPtr);
+
+void XEmac_IntrHandlerFifo(void *InstancePtr); /* interrupt handler */
+
+/*
+ * General interrupt-related functions in xemac_intr.c
+ */
+void XEmac_SetErrorHandler(XEmac * InstancePtr, void *CallBackRef,
+ XEmac_ErrorHandler FuncPtr);
+
+/*
+ * MAC configuration in xemac_options.c
+ */
+int XEmac_SetOptions(XEmac * InstancePtr, u32 OptionFlag);
+u32 XEmac_GetOptions(XEmac * InstancePtr);
+int XEmac_SetMacAddress(XEmac * InstancePtr, u8 *AddressPtr);
+void XEmac_GetMacAddress(XEmac * InstancePtr, u8 *BufferPtr);
+int XEmac_SetInterframeGap(XEmac * InstancePtr, u8 Part1, u8 Part2);
+void XEmac_GetInterframeGap(XEmac * InstancePtr, u8 *Part1Ptr, u8 *Part2Ptr);
+
+/*
+ * Multicast functions in xemac_multicast.c
+ */
+int XEmac_MulticastAdd(XEmac * InstancePtr, u8 *AddressPtr, int Entry);
+int XEmac_MulticastClear(XEmac * InstancePtr, int Entry);
+
+/*
+ * PHY configuration in xemac_phy.c
+ */
+void XEmac_PhyReset(XEmac * InstancePtr);
+int XEmac_PhyRead(XEmac * InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 *PhyDataPtr);
+int XEmac_PhyWrite(XEmac * InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 PhyData);
+
+/*
+ * Statistics in xemac_stats.c
+ */
+void XEmac_GetStats(XEmac * InstancePtr, XEmac_Stats * StatsPtr);
+void XEmac_ClearStats(XEmac * InstancePtr);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xemac_hw.h,v 1.1 2007/04/04 18:27:45 wre Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2003-2006 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemac_hw.h
+*
+* This header file contains identifiers and low-level driver functions (or
+* macros) that can be used to access the device. High-level driver functions
+* are defined in xemac.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00b rpm 04/26/02 First release
+* 1.00b rmm 09/23/02 Added XEmac_mPhyReset macro
+* 1.00c rpm 12/05/02 New version includes support for simple DMA
+* 1.00d rpm 09/26/03 New version includes support PLB Ethernet and v2.00a of
+* the packet fifo driver.
+* 1.00e rmm 04/06/04 Relocated IFG and MGT max values from 'c' files to this
+* one.
+* 1.00f rmm 10/19/04 Added constants for CAM address filtering & jumbo frame
+* support.
+* 1.01a rpm 03/08/06 Fixed EMIR TYPE mask
+* 1.11a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XEMAC_HW_H /* prevent circular inclusions */
+#define XEMAC_HW_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+/* Offset of the MAC registers from the IPIF base address */
+#define XEM_REG_OFFSET 0x1100UL
+
+/*
+ * Register offsets for the Ethernet MAC. Each register is 32 bits.
+ */
+#define XEM_EMIR_OFFSET (XEM_REG_OFFSET + 0x0) /* EMAC Module ID */
+#define XEM_ECR_OFFSET (XEM_REG_OFFSET + 0x4) /* MAC Control */
+#define XEM_IFGP_OFFSET (XEM_REG_OFFSET + 0x8) /* Interframe Gap */
+#define XEM_SAH_OFFSET (XEM_REG_OFFSET + 0xC) /* Station addr, high */
+#define XEM_SAL_OFFSET (XEM_REG_OFFSET + 0x10) /* Station addr, low */
+#define XEM_MGTCR_OFFSET (XEM_REG_OFFSET + 0x14) /* MII mgmt control */
+#define XEM_MGTDR_OFFSET (XEM_REG_OFFSET + 0x18) /* MII mgmt data */
+#define XEM_RPLR_OFFSET (XEM_REG_OFFSET + 0x1C) /* Rx packet length */
+#define XEM_TPLR_OFFSET (XEM_REG_OFFSET + 0x20) /* Tx packet length */
+#define XEM_TSR_OFFSET (XEM_REG_OFFSET + 0x24) /* Tx status */
+#define XEM_RMFC_OFFSET (XEM_REG_OFFSET + 0x28) /* Rx missed frames */
+#define XEM_RCC_OFFSET (XEM_REG_OFFSET + 0x2C) /* Rx collisions */
+#define XEM_RFCSEC_OFFSET (XEM_REG_OFFSET + 0x30) /* Rx FCS errors */
+#define XEM_RAEC_OFFSET (XEM_REG_OFFSET + 0x34) /* Rx alignment errors */
+#define XEM_TEDC_OFFSET (XEM_REG_OFFSET + 0x38) /* Transmit excess
+ * deferral cnt */
+#define XEM_CAMH_OFFSET (XEM_REG_OFFSET + 0x40) /* CAM address, high */
+#define XEM_CAML_OFFSET (XEM_REG_OFFSET + 0x44) /* CAM address, low */
+
+/*
+ * Register offsets for the IPIF components
+ */
+#define XEM_ISR_OFFSET 0x20UL /* Interrupt status */
+
+#define XEM_DMA_OFFSET 0x2300UL
+#define XEM_DMA_SEND_OFFSET (XEM_DMA_OFFSET + 0x0) /* DMA send channel */
+#define XEM_DMA_RECV_OFFSET (XEM_DMA_OFFSET + 0x40) /* DMA recv channel */
+
+#define XEM_PFIFO_OFFSET 0x2000UL
+#define XEM_PFIFO_TXREG_OFFSET (XEM_PFIFO_OFFSET + 0x0) /* Tx registers */
+#define XEM_PFIFO_RXREG_OFFSET (XEM_PFIFO_OFFSET + 0x10) /* Rx registers */
+#define XEM_PFIFO_TXDATA_OFFSET (XEM_PFIFO_OFFSET + 0x100) /* Tx keyhole */
+#define XEM_PFIFO_RXDATA_OFFSET (XEM_PFIFO_OFFSET + 0x200) /* Rx keyhole */
+
+/*
+ * EMAC Module Identification Register (EMIR)
+ */
+#define XEM_EMIR_VERSION_MASK 0xFFFF0000UL /* Device version */
+#define XEM_EMIR_ID_MASK 0x0000FF00UL /* Device ID */
+#define XEM_EMIR_TYPE_MASK 0x000000FFUL /* Device type */
+
+/*
+ * EMAC Control Register (ECR)
+ */
+#define XEM_ECR_FULL_DUPLEX_MASK 0x80000000UL /* Full duplex mode */
+#define XEM_ECR_XMIT_RESET_MASK 0x40000000UL /* Reset transmitter */
+#define XEM_ECR_XMIT_ENABLE_MASK 0x20000000UL /* Enable transmitter */
+#define XEM_ECR_RECV_RESET_MASK 0x10000000UL /* Reset receiver */
+#define XEM_ECR_RECV_ENABLE_MASK 0x08000000UL /* Enable receiver */
+#define XEM_ECR_PHY_ENABLE_MASK 0x04000000UL /* Enable PHY */
+#define XEM_ECR_XMIT_PAD_ENABLE_MASK 0x02000000UL /* Enable xmit pad insert */
+#define XEM_ECR_XMIT_FCS_ENABLE_MASK 0x01000000UL /* Enable xmit FCS insert */
+#define XEM_ECR_XMIT_ADDR_INSERT_MASK 0x00800000UL /* Enable xmit source addr
+ * insertion */
+#define XEM_ECR_XMIT_ERROR_INSERT_MASK 0x00400000UL /* Insert xmit error */
+#define XEM_ECR_XMIT_ADDR_OVWRT_MASK 0x00200000UL /* Enable xmit source addr
+ * overwrite */
+#define XEM_ECR_LOOPBACK_MASK 0x00100000UL /* Enable internal
+ * loopback */
+#define XEM_ECR_RECV_STRIP_ENABLE_MASK 0x00080000UL /* Enable recv pad/fcs strip */
+#define XEM_ECR_UNICAST_ENABLE_MASK 0x00020000UL /* Enable unicast addr */
+#define XEM_ECR_MULTI_ENABLE_MASK 0x00010000UL /* Enable multicast addr */
+#define XEM_ECR_BROAD_ENABLE_MASK 0x00008000UL /* Enable broadcast addr */
+#define XEM_ECR_PROMISC_ENABLE_MASK 0x00004000UL /* Enable promiscuous mode */
+#define XEM_ECR_RECV_ALL_MASK 0x00002000UL /* Receive all frames */
+#define XEM_ECR_RECV_JUMBO_ENABLE_MASK 0x00001000UL /* Enable jumbo frame
+ reception */
+#define XEM_ECR_CAM_ENABLE_MASK 0x00000800UL /* Enable multicast CAM
+ filtering */
+#define XEM_ECR_PAUSE_FRAME_MASK 0x00000400UL /* Interpret pause frames */
+#define XEM_ECR_WRITE_CAM_MASK 0x00000100UL /* Add address to multicast
+ CAM */
+
+/*
+ * Interframe Gap Register (IFGR)
+ */
+#define XEM_IFGP_PART1_MASK 0xF8000000UL /* Interframe Gap Part1 */
+#define XEM_IFGP_PART1_SHIFT 27
+#define XEM_IFGP_PART2_MASK 0x07C00000UL /* Interframe Gap Part2 */
+#define XEM_IFGP_PART2_SHIFT 22
+#define XEM_IFGP_PART1_MAX 31 /* Max IFG 1 value */
+#define XEM_IFGP_PART2_MAX 31 /* Max IFG 2 value */
+
+/*
+ * Station Address High Register (SAH)
+ */
+#define XEM_SAH_ADDR_MASK 0x0000FFFFUL /* Station address high bytes */
+
+/*
+ * Station Address Low Register (SAL)
+ */
+#define XEM_SAL_ADDR_MASK 0xFFFFFFFFUL /* Station address low bytes */
+
+/*
+ * MII Management Control Register (MGTCR)
+ */
+#define XEM_MGTCR_START_MASK 0x80000000UL /* Start/Busy */
+#define XEM_MGTCR_RW_NOT_MASK 0x40000000UL /* Read/Write Not (direction) */
+#define XEM_MGTCR_PHY_ADDR_MASK 0x3E000000UL /* PHY address */
+#define XEM_MGTCR_PHY_ADDR_SHIFT 25 /* PHY address shift */
+#define XEM_MGTCR_REG_ADDR_MASK 0x01F00000UL /* Register address */
+#define XEM_MGTCR_REG_ADDR_SHIFT 20 /* Register addr shift */
+#define XEM_MGTCR_MII_ENABLE_MASK 0x00080000UL /* Enable MII from EMAC */
+#define XEM_MGTCR_RD_ERROR_MASK 0x00040000UL /* MII mgmt read error */
+#define XEM_MGTCR_MAX_PHY_ADDR 31 /* Maximum PHY address */
+#define XEM_MGTCR_MAX_PHY_REG 31 /* Maximum PHY register number */
+
+
+/*
+ * MII Management Data Register (MGTDR)
+ */
+#define XEM_MGTDR_DATA_MASK 0x0000FFFFUL /* MII data */
+
+/*
+ * Receive Packet Length Register (RPLR)
+ */
+#define XEM_RPLR_LENGTH_MASK 0x0000FFFFUL /* Receive packet length */
+
+/*
+ * Transmit Packet Length Register (TPLR)
+ */
+#define XEM_TPLR_LENGTH_MASK 0x0000FFFFUL /* Transmit packet length */
+
+/*
+ * Transmit Status Register (TSR)
+ */
+#define XEM_TSR_EXCESS_DEFERRAL_MASK 0x80000000UL /* Transmit excess deferral */
+#define XEM_TSR_FIFO_UNDERRUN_MASK 0x40000000UL /* Packet FIFO underrun */
+#define XEM_TSR_ATTEMPTS_MASK 0x3E000000UL /* Transmission attempts */
+#define XEM_TSR_LATE_COLLISION_MASK 0x01000000UL /* Transmit late collision */
+
+/*
+ * Receive Missed Frame Count (RMFC)
+ */
+#define XEM_RMFC_DATA_MASK 0x0000FFFFUL
+
+/*
+ * Receive Collision Count (RCC)
+ */
+#define XEM_RCC_DATA_MASK 0x0000FFFFUL
+
+/*
+ * Receive FCS Error Count (RFCSEC)
+ */
+#define XEM_RFCSEC_DATA_MASK 0x0000FFFFUL
+
+/*
+ * Receive Alignment Error Count (RALN)
+ */
+#define XEM_RAEC_DATA_MASK 0x0000FFFFUL
+
+/*
+ * Transmit Excess Deferral Count (TEDC)
+ */
+#define XEM_TEDC_DATA_MASK 0x0000FFFFUL
+
+/*
+ * CAM high (CAMH)
+ */
+#define XEM_CAMH_CAM_MASK 0x0000FFFFUL
+#define XEM_CAMH_SLOT_MASK 0x003F0000UL
+#define XEM_CAMH_SLOT_SHIFT 16
+
+/*
+ * EMAC Interrupt Registers (Status and Enable) masks. These registers are
+ * part of the IPIF IP Interrupt registers
+ */
+#define XEM_EIR_XMIT_DONE_MASK 0x00000001UL /* Xmit complete */
+#define XEM_EIR_RECV_DONE_MASK 0x00000002UL /* Recv complete */
+#define XEM_EIR_XMIT_ERROR_MASK 0x00000004UL /* Xmit error */
+#define XEM_EIR_RECV_ERROR_MASK 0x00000008UL /* Recv error */
+#define XEM_EIR_XMIT_SFIFO_EMPTY_MASK 0x00000010UL /* Xmit status fifo empty */
+#define XEM_EIR_RECV_LFIFO_EMPTY_MASK 0x00000020UL /* Recv length fifo empty */
+#define XEM_EIR_XMIT_LFIFO_FULL_MASK 0x00000040UL /* Xmit length fifo full */
+#define XEM_EIR_RECV_LFIFO_OVER_MASK 0x00000080UL /* Recv length fifo
+ * overrun */
+#define XEM_EIR_RECV_LFIFO_UNDER_MASK 0x00000100UL /* Recv length fifo
+ * underrun */
+#define XEM_EIR_XMIT_SFIFO_OVER_MASK 0x00000200UL /* Xmit status fifo
+ * overrun */
+#define XEM_EIR_XMIT_SFIFO_UNDER_MASK 0x00000400UL /* Transmit status fifo
+ * underrun */
+#define XEM_EIR_XMIT_LFIFO_OVER_MASK 0x00000800UL /* Transmit length fifo
+ * overrun */
+#define XEM_EIR_XMIT_LFIFO_UNDER_MASK 0x00001000UL /* Transmit length fifo
+ * underrun */
+#define XEM_EIR_XMIT_PAUSE_MASK 0x00002000UL /* Transmit pause pkt
+ * received */
+#define XEM_EIR_RECV_DFIFO_OVER_MASK 0x00004000UL /* Receive data fifo
+ * overrun */
+#define XEM_EIR_RECV_MISSED_FRAME_MASK 0x00008000UL /* Receive missed frame
+ * error */
+#define XEM_EIR_RECV_COLLISION_MASK 0x00010000UL /* Receive collision
+ * error */
+#define XEM_EIR_RECV_FCS_ERROR_MASK 0x00020000UL /* Receive FCS error */
+#define XEM_EIR_RECV_LEN_ERROR_MASK 0x00040000UL /* Receive length field
+ * error */
+#define XEM_EIR_RECV_SHORT_ERROR_MASK 0x00080000UL /* Receive short frame
+ * error */
+#define XEM_EIR_RECV_LONG_ERROR_MASK 0x00100000UL /* Receive long frame
+ * error */
+#define XEM_EIR_RECV_ALIGN_ERROR_MASK 0x00200000UL /* Receive alignment
+ * error */
+
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*****************************************************************************
+*
+* Low-level driver macros and functions. The list below provides signatures
+* to help the user use the macros.
+*
+* u32 XEmac_mReadReg(u32 BaseAddress, int RegOffset)
+* void XEmac_mWriteReg(u32 BaseAddress, int RegOffset, u32 Mask)
+*
+* void XEmac_mSetControlReg(u32 BaseAddress, u32 Mask)
+* void XEmac_mSetMacAddress(u32 BaseAddress, u8 *AddressPtr)
+*
+* void XEmac_mEnable(u32 BaseAddress)
+* void XEmac_mDisable(u32 BaseAddress)
+*
+* u32 XEmac_mIsTxDone(u32 BaseAddress)
+* u32 XEmac_mIsRxEmpty(u32 BaseAddress)
+*
+* void XEmac_SendFrame(u32 BaseAddress, u8 *FramePtr, int Size)
+* int XEmac_RecvFrame(u32 BaseAddress, u8 *FramePtr)
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/**
+*
+* Read the given register.
+*
+* @param BaseAddress is the base address of the device
+* @param RegOffset is the register offset to be read
+*
+* @return The 32-bit value of the register
+*
+* @note None.
+*
+*****************************************************************************/
+#define XEmac_mReadReg(BaseAddress, RegOffset) \
+ XIo_In32((BaseAddress) + (RegOffset))
+
+
+/****************************************************************************/
+/**
+*
+* Write the given register.
+*
+* @param BaseAddress is the base address of the device
+* @param RegOffset is the register offset to be written
+* @param Data is the 32-bit value to write to the register
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+#define XEmac_mWriteReg(BaseAddress, RegOffset, Data) \
+ XIo_Out32((BaseAddress) + (RegOffset), (Data))
+
+
+/****************************************************************************/
+/**
+*
+* Set the contents of the control register. Use the XEM_ECR_* constants
+* defined above to create the bit-mask to be written to the register.
+*
+* @param BaseAddress is the base address of the device
+* @param Mask is the 16-bit value to write to the control register
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+#define XEmac_mSetControlReg(BaseAddress, Mask) \
+ XIo_Out32((BaseAddress) + XEM_ECR_OFFSET, (Mask))
+
+
+/****************************************************************************/
+/**
+*
+* Set the station address of the EMAC device.
+*
+* @param BaseAddress is the base address of the device
+* @param AddressPtr is a pointer to a 6-byte MAC address
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+#define XEmac_mSetMacAddress(BaseAddress, AddressPtr) \
+{ \
+ u32 MacAddr; \
+ \
+ MacAddr = ((AddressPtr)[0] << 8) | (AddressPtr)[1]; \
+ XIo_Out32((BaseAddress) + XEM_SAH_OFFSET, MacAddr); \
+ \
+ MacAddr = ((AddressPtr)[2] << 24) | ((AddressPtr)[3] << 16) | \
+ ((AddressPtr)[4] << 8) | (AddressPtr)[5]; \
+ \
+ XIo_Out32((BaseAddress) + XEM_SAL_OFFSET, MacAddr); \
+}
+
+
+/****************************************************************************/
+/**
+*
+* Enable the transmitter and receiver. Preserve the contents of the control
+* register.
+*
+* @param BaseAddress is the base address of the device
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+#define XEmac_mEnable(BaseAddress) \
+{ \
+ u32 Control; \
+ Control = XIo_In32((BaseAddress) + XEM_ECR_OFFSET); \
+ Control &= ~(XEM_ECR_XMIT_RESET_MASK | XEM_ECR_RECV_RESET_MASK); \
+ Control |= (XEM_ECR_XMIT_ENABLE_MASK | XEM_ECR_RECV_ENABLE_MASK); \
+ XIo_Out32((BaseAddress) + XEM_ECR_OFFSET, Control); \
+}
+
+
+/****************************************************************************/
+/**
+*
+* Disable the transmitter and receiver. Preserve the contents of the control
+* register.
+*
+* @param BaseAddress is the base address of the device
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+#define XEmac_mDisable(BaseAddress) \
+ XIo_Out32((BaseAddress) + XEM_ECR_OFFSET, \
+ XIo_In32((BaseAddress) + XEM_ECR_OFFSET) & \
+ ~(XEM_ECR_XMIT_ENABLE_MASK | XEM_ECR_RECV_ENABLE_MASK))
+
+
+/****************************************************************************/
+/**
+*
+* Check to see if the transmission is complete.
+*
+* @param BaseAddress is the base address of the device
+*
+* @return TRUE if it is done, or FALSE if it is not.
+*
+* @note None.
+*
+*****************************************************************************/
+#define XEmac_mIsTxDone(BaseAddress) \
+ (XIo_In32((BaseAddress) + XEM_ISR_OFFSET) & XEM_EIR_XMIT_DONE_MASK)
+
+
+/****************************************************************************/
+/**
+*
+* Check to see if the receive FIFO is empty.
+*
+* @param BaseAddress is the base address of the device
+*
+* @return TRUE if it is empty, or FALSE if it is not.
+*
+* @note None.
+*
+*****************************************************************************/
+#define XEmac_mIsRxEmpty(BaseAddress) \
+ (!(XIo_In32((BaseAddress) + XEM_ISR_OFFSET) & XEM_EIR_RECV_DONE_MASK))
+
+
+/****************************************************************************/
+/**
+*
+* Reset MII compliant PHY
+*
+* @param BaseAddress is the base address of the device
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+#define XEmac_mPhyReset(BaseAddress) \
+{ \
+ u32 Control; \
+ Control = XIo_In32((BaseAddress) + XEM_ECR_OFFSET); \
+ Control &= ~XEM_ECR_PHY_ENABLE_MASK; \
+ XIo_Out32((BaseAddress) + XEM_ECR_OFFSET, Control); \
+ Control |= XEM_ECR_PHY_ENABLE_MASK; \
+ XIo_Out32((BaseAddress) + XEM_ECR_OFFSET, Control); \
+}
+
+
+/************************** Function Prototypes ******************************/
+
+void XEmac_SendFrame(u32 BaseAddress, u8 *FramePtr, int Size);
+int XEmac_RecvFrame(u32 BaseAddress, u8 *FramePtr);
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xemac_i.h,v 1.2 2007/05/15 00:52:28 wre Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2003 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemac_i.h
+*
+* This header file contains internal identifiers, which are those shared
+* between XEmac components. The identifiers in this file are not intended for
+* use external to the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 07/31/01 First release
+* 1.00b rpm 02/20/02 Repartitioned files and functions
+* 1.00b rpm 04/29/02 Moved register definitions to xemac_hw.h
+* 1.00c rpm 12/05/02 New version includes support for simple DMA
+* 1.00d rpm 09/26/03 New version includes support PLB Ethernet and v2.00a of
+* the packet fifo driver.
+* 1.01a ecm 09/01/05 Added DRE support through separate SgSendDRE specific
+* define, XEM_DRE_SEND_BD_MASK.
+* 1.11a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XEMAC_I_H /* prevent circular inclusions */
+#define XEMAC_I_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xemac.h"
+#include "xemac_hw.h"
+
+/******************************************************************************
+ *
+ * Definitions transferred from the IPIF library header file.
+ *
+ ******************************************************************************/
+/** @name Register Offsets
+ *
+ * The following constants define the register offsets for the registers of the
+ * IPIF, there are some holes in the memory map for reserved addresses to allow
+ * other registers to be added and still match the memory map of the interrupt
+ * controller registers
+ * @{
+ */
+#define XEMAC_DISR_OFFSET 0UL /**< device interrupt status register */
+#define XEMAC_DIPR_OFFSET 4UL /**< device interrupt pending register */
+#define XEMAC_DIER_OFFSET 8UL /**< device interrupt enable register */
+#define XEMAC_DIIR_OFFSET 24UL /**< device interrupt ID register */
+#define XEMAC_DGIER_OFFSET 28UL /**< device global interrupt enable register */
+#define XEMAC_IISR_OFFSET 32UL /**< IP interrupt status register */
+#define XEMAC_IIER_OFFSET 40UL /**< IP interrupt enable register */
+#define XEMAC_RESETR_OFFSET 64UL /**< reset register */
+/* @} */
+
+/**
+ * The value used for the reset register to reset the IPIF
+ */
+#define XEMAC_RESET_MASK 0xAUL
+
+/**
+ * The following constant is used for the device global interrupt enable
+ * register, to enable all interrupts for the device, this is the only bit
+ * in the register
+ */
+#define XEMAC_GINTR_ENABLE_MASK 0x80000000UL
+
+/**
+ * The mask to identify each internal IPIF error condition in the device
+ * registers of the IPIF. Interrupts are assigned in the register from LSB
+ * to the MSB
+ */
+#define XEMAC_ERROR_MASK 1UL /**< LSB of the register */
+
+/** @name Interrupt IDs
+ *
+ * The interrupt IDs which identify each internal IPIF condition, this value
+ * must correlate with the mask constant for the error
+ * @{
+ */
+#define XEMAC_ERROR_INTERRUPT_ID 0 /**< interrupt bit #, (LSB = 0) */
+#define XEMAC_NO_INTERRUPT_ID 128 /**< no interrupts are pending */
+/* @} */
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/*****************************************************************************/
+/**
+*
+* Reset the IPIF component and hardware. This is a destructive operation that
+* could cause the loss of data since resetting the IPIF of a device also
+* resets the device using the IPIF and any blocks, such as FIFOs or DMA
+* channels, within the IPIF. All registers of the IPIF will contain their
+* reset value when this function returns.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+#define XEMAC_RESET(RegBaseAddress) \
+ XIo_Out32(RegBaseAddress + XEMAC_RESETR_OFFSET, XEMAC_RESET_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This macro sets the device interrupt status register to the value.
+* This register indicates the status of interrupt sources for a device
+* which contains the IPIF. The status is independent of whether interrupts
+* are enabled and could be used for polling a device at a higher level rather
+* than a more detailed level.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* device which contains the IPIF. With the exception of some internal IPIF
+* conditions, the contents of this register are not latched but indicate
+* the live status of the interrupt sources within the device. Writing any of
+* the non-latched bits of the register will have no effect on the register.
+*
+* For the latched bits of this register only, setting a bit which is zero
+* within this register causes an interrupt to generated. The device global
+* interrupt enable register and the device interrupt enable register must be set
+* appropriately to allow an interrupt to be passed out of the device. The
+* interrupt is cleared by writing to this register with the bits to be
+* cleared set to a one and all others to zero. This register implements a
+* toggle on write functionality meaning any bits which are set in the value
+* written cause the bits in the register to change to the opposite state.
+*
+* This function writes the specified value to the register such that
+* some bits may be set and others cleared. It is the caller's responsibility
+* to get the value of the register prior to setting the value to prevent a
+* destructive behavior.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @param Status contains the value to be written to the interrupt status
+* register of the device. The only bits which can be written are
+* the latched bits which contain the internal IPIF conditions. The
+* following values may be used to set the status register or clear an
+* interrupt condition.
+* - XEMAC_ERROR_MASK Indicates a device error in the IPIF
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XEMAC_WRITE_DISR(RegBaseAddress, Status) \
+ XIo_Out32((RegBaseAddress) + XEMAC_DISR_OFFSET, (Status))
+
+/*****************************************************************************/
+/**
+*
+* This macro gets the device interrupt status register contents.
+* This register indicates the status of interrupt sources for a device
+* which contains the IPIF. The status is independent of whether interrupts
+* are enabled and could be used for polling a device at a higher level.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* device which contains the IPIF. With the exception of some internal IPIF
+* conditions, the contents of this register are not latched but indicate
+* the live status of the interrupt sources within the device.
+*
+* For only the latched bits of this register, the interrupt may be cleared by
+* writing to these bits in the status register.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* A status which contains the value read from the interrupt status register of
+* the device. The bit definitions are specific to the device with
+* the exception of the latched internal IPIF condition bits. The following
+* values may be used to detect internal IPIF conditions in the status.
+* <br><br>
+* - XEMAC_ERROR_MASK Indicates a device error in the IPIF
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XEMAC_READ_DISR(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XEMAC_DISR_OFFSET)
+
+/*****************************************************************************/
+/**
+*
+* This function sets the device interrupt enable register contents.
+* This register controls which interrupt sources of the device are allowed to
+* generate an interrupt. The device global interrupt enable register must also
+* be set appropriately for an interrupt to be passed out of the device.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* device which contains the IPIF. Setting a bit in this register enables that
+* interrupt source to generate an interrupt. Clearing a bit in this register
+* disables interrupt generation for that interrupt source.
+*
+* This function writes only the specified value to the register such that
+* some interrupts source may be enabled and others disabled. It is the
+* caller's responsibility to get the value of the interrupt enable register
+* prior to setting the value to prevent an destructive behavior.
+*
+* An interrupt source may not be enabled to generate an interrupt, but can
+* still be polled in the interrupt status register.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @param
+*
+* Enable contains the value to be written to the interrupt enable register
+* of the device. The bit definitions are specific to the device with
+* the exception of the internal IPIF conditions. The following
+* values may be used to enable the internal IPIF conditions interrupts.
+* - XEMAC_ERROR_MASK Indicates a device error in the IPIF
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* Signature: Xuint32 XEMAC_WRITE_DIER(Xuint32 RegBaseAddress,
+* Xuint32 Enable)
+*
+******************************************************************************/
+#define XEMAC_WRITE_DIER(RegBaseAddress, Enable) \
+ XIo_Out32((RegBaseAddress) + XEMAC_DIER_OFFSET, (Enable))
+
+/*****************************************************************************/
+/**
+*
+* This function gets the device interrupt enable register contents.
+* This register controls which interrupt sources of the device
+* are allowed to generate an interrupt. The device global interrupt enable
+* register and the device interrupt enable register must also be set
+* appropriately for an interrupt to be passed out of the device.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* device which contains the IPIF. Setting a bit in this register enables that
+* interrupt source to generate an interrupt if the global enable is set
+* appropriately. Clearing a bit in this register disables interrupt generation
+* for that interrupt source regardless of the global interrupt enable.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* The value read from the interrupt enable register of the device. The bit
+* definitions are specific to the device with the exception of the internal
+* IPIF conditions. The following values may be used to determine from the
+* value if the internal IPIF conditions interrupts are enabled.
+* <br><br>
+* - XEMAC_ERROR_MASK Indicates a device error in the IPIF
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XEMAC_READ_DIER(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XEMAC_DIER_OFFSET)
+
+/*****************************************************************************/
+/**
+*
+* This function gets the device interrupt pending register contents.
+* This register indicates the pending interrupt sources, those that are waiting
+* to be serviced by the software, for a device which contains the IPIF.
+* An interrupt must be enabled in the interrupt enable register of the IPIF to
+* be pending.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* the device which contains the IPIF. With the exception of some internal IPIF
+* conditions, the contents of this register are not latched since the condition
+* is latched in the IP interrupt status register, by an internal block of the
+* IPIF such as a FIFO or DMA channel, or by the IP of the device. This register
+* is read only and is not latched, but it is necessary to acknowledge (clear)
+* the interrupt condition by performing the appropriate processing for the IP
+* or block within the IPIF.
+*
+* This register can be thought of as the contents of the interrupt status
+* register ANDed with the contents of the interrupt enable register.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* The value read from the interrupt pending register of the device. The bit
+* definitions are specific to the device with the exception of the latched
+* internal IPIF condition bits. The following values may be used to detect
+* internal IPIF conditions in the value.
+* <br><br>
+* - XEMAC_ERROR_MASK Indicates a device error in the IPIF
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XEMAC_READ_DIPR(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XEMAC_DIPR_OFFSET)
+
+/*****************************************************************************/
+/**
+*
+* This macro gets the device interrupt ID for the highest priority interrupt
+* which is pending from the interrupt ID register. This function provides
+* priority resolution such that faster interrupt processing is possible.
+* Without priority resolution, it is necessary for the software to read the
+* interrupt pending register and then check each interrupt source to determine
+* if an interrupt is pending. Priority resolution becomes more important as the
+* number of interrupt sources becomes larger.
+*
+* Interrupt priorities are based upon the bit position of the interrupt in the
+* interrupt pending register with bit 0 being the highest priority. The
+* interrupt ID is the priority of the interrupt, 0 - 31, with 0 being the
+* highest priority. The interrupt ID register is live rather than latched such
+* that multiple calls to this function may not yield the same results. A
+* special value, outside of the interrupt priority range of 0 - 31, is
+* contained in the register which indicates that no interrupt is pending. This
+* may be useful for allowing software to continue processing interrupts in a
+* loop until there are no longer any interrupts pending.
+*
+* The interrupt ID is designed to allow a function pointer table to be used
+* in the software such that the interrupt ID is used as an index into that
+* table. The function pointer table could contain an instance pointer, such
+* as to DMA channel, and a function pointer to the function which handles
+* that interrupt. This design requires the interrupt processing of the device
+* driver to be partitioned into smaller more granular pieces based upon
+* hardware used by the device, such as DMA channels and FIFOs.
+*
+* It is not mandatory that this function be used by the device driver software.
+* It may choose to read the pending register and resolve the pending interrupt
+* priorities on it's own.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* An interrupt ID, 0 - 31, which identifies the highest priority interrupt
+* which is pending. A value of XIIF_NO_INTERRUPT_ID indicates that there is
+* no interrupt pending. The following values may be used to identify the
+* interrupt ID for the internal IPIF interrupts.
+* <br><br>
+* - XEMAC_ERROR_INTERRUPT_ID Indicates a device error in the IPIF
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XEMAC_READ_DIIR(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XEMAC_DIIR_OFFSET)
+
+/*****************************************************************************/
+/**
+*
+* This function disables all interrupts for the device by writing to the global
+* interrupt enable register. This register provides the ability to disable
+* interrupts without any modifications to the interrupt enable register such
+* that it is minimal effort to restore the interrupts to the previous enabled
+* state. The corresponding function, XIpIf_GlobalIntrEnable, is provided to
+* restore the interrupts to the previous enabled state. This function is
+* designed to be used in critical sections of device drivers such that it is
+* not necessary to disable other device interrupts.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XEMAC_GINTR_DISABLE(RegBaseAddress) \
+ XIo_Out32((RegBaseAddress) + XEMAC_DGIER_OFFSET, 0)
+
+/*****************************************************************************/
+/**
+*
+* This function writes to the global interrupt enable register to enable
+* interrupts from the device. This register provides the ability to enable
+* interrupts without any modifications to the interrupt enable register such
+* that it is minimal effort to restore the interrupts to the previous enabled
+* state. This function does not enable individual interrupts as the interrupt
+* enable register must be set appropriately. This function is designed to be
+* used in critical sections of device drivers such that it is not necessary to
+* disable other device interrupts.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XEMAC_GINTR_ENABLE(RegBaseAddress) \
+ XIo_Out32((RegBaseAddress) + XEMAC_DGIER_OFFSET, \
+ XEMAC_GINTR_ENABLE_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This function determines if interrupts are enabled at the global level by
+* reading the global interrupt register. This register provides the ability to
+* disable interrupts without any modifications to the interrupt enable register
+* such that it is minimal effort to restore the interrupts to the previous
+* enabled state.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* XTRUE if interrupts are enabled for the IPIF, XFALSE otherwise.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XEMAC_IS_GINTR_ENABLED(RegBaseAddress) \
+ (XIo_In32((RegBaseAddress) + XEMAC_DGIER_OFFSET) == \
+ XEMAC_GINTR_ENABLE_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This function sets the IP interrupt status register to the specified value.
+* This register indicates the status of interrupt sources for the IP of the
+* device. The IP is defined as the part of the device that connects to the
+* IPIF. The status is independent of whether interrupts are enabled such that
+* the status register may also be polled when interrupts are not enabled.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* IP. All bits of this register are latched. Setting a bit which is zero
+* within this register causes an interrupt to be generated. The device global
+* interrupt enable register and the device interrupt enable register must be set
+* appropriately to allow an interrupt to be passed out of the device. The
+* interrupt is cleared by writing to this register with the bits to be
+* cleared set to a one and all others to zero. This register implements a
+* toggle on write functionality meaning any bits which are set in the value
+* written cause the bits in the register to change to the opposite state.
+*
+* This function writes only the specified value to the register such that
+* some status bits may be set and others cleared. It is the caller's
+* responsibility to get the value of the register prior to setting the value
+* to prevent an destructive behavior.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @param Status contains the value to be written to the IP interrupt status
+* register. The bit definitions are specific to the device IP.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XEMAC_WRITE_IISR(RegBaseAddress, Status) \
+ XIo_Out32((RegBaseAddress) + XEMAC_IISR_OFFSET, (Status))
+
+/*****************************************************************************/
+/**
+*
+* This macro gets the contents of the IP interrupt status register.
+* This register indicates the status of interrupt sources for the IP of the
+* device. The IP is defined as the part of the device that connects to the
+* IPIF. The status is independent of whether interrupts are enabled such
+* that the status register may also be polled when interrupts are not enabled.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* device. All bits of this register are latched. Writing a 1 to a bit within
+* this register causes an interrupt to be generated if enabled in the interrupt
+* enable register and the global interrupt enable is set. Since the status is
+* latched, each status bit must be acknowledged in order for the bit in the
+* status register to be updated. Each bit can be acknowledged by writing a
+* 0 to the bit in the status register.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* A status which contains the value read from the IP interrupt status register.
+* The bit definitions are specific to the device IP.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XEMAC_READ_IISR(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XEMAC_IISR_OFFSET)
+
+/*****************************************************************************/
+/**
+*
+* This macro sets the IP interrupt enable register contents. This register
+* controls which interrupt sources of the IP are allowed to generate an
+* interrupt. The global interrupt enable register and the device interrupt
+* enable register must also be set appropriately for an interrupt to be
+* passed out of the device containing the IPIF and the IP.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* IP. Setting a bit in this register enables the interrupt source to generate
+* an interrupt. Clearing a bit in this register disables interrupt generation
+* for that interrupt source.
+*
+* This function writes only the specified value to the register such that
+* some interrupt sources may be enabled and others disabled. It is the
+* caller's responsibility to get the value of the interrupt enable register
+* prior to setting the value to prevent an destructive behavior.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @param Enable contains the value to be written to the IP interrupt enable
+* register. The bit definitions are specific to the device IP.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XEMAC_WRITE_IIER(RegBaseAddress, Enable) \
+ XIo_Out32((RegBaseAddress) + XEMAC_IIER_OFFSET, (Enable))
+
+/*****************************************************************************/
+/**
+*
+* This macro gets the IP interrupt enable register contents. This register
+* controls which interrupt sources of the IP are allowed to generate an
+* interrupt. The global interrupt enable register and the device interrupt
+* enable register must also be set appropriately for an interrupt to be
+* passed out of the device containing the IPIF and the IP.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* IP. Setting a bit in this register enables the interrupt source to generate
+* an interrupt. Clearing a bit in this register disables interrupt generation
+* for that interrupt source.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* The contents read from the IP interrupt enable register. The bit definitions
+* are specific to the device IP.
+*
+* @note
+*
+* Signature: Xuint32 XEMAC_READ_IIER(Xuint32 RegBaseAddress)
+*
+******************************************************************************/
+#define XEMAC_READ_IIER(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XEMAC_IIER_OFFSET)
+
+
+/******************************************************************************
+ *
+ * End of transferred IPIF definitions.
+ *
+ ******************************************************************************/
+
+/************************** Constant Definitions *****************************/
+
+/*
+ * Default buffer descriptor control word masks. The default send BD control
+ * is set for incrementing the source address by one for each byte transferred,
+ * and specify that the destination address (FIFO) is local to the device. The
+ * default receive BD control is set for incrementing the destination address
+ * by one for each byte transferred, and specify that the source address is
+ * local to the device.
+ */
+#define XEM_DFT_SEND_BD_MASK (XDC_DMACR_SOURCE_INCR_MASK | \
+ XDC_DMACR_DEST_LOCAL_MASK)
+#define XEM_DFT_RECV_BD_MASK (XDC_DMACR_DEST_INCR_MASK | \
+ XDC_DMACR_SOURCE_LOCAL_MASK)
+
+/*
+ * Masks for the IPIF Device Interrupt enable and status registers.
+ */
+#define XEM_IPIF_EMAC_MASK 0x00000004UL /* MAC interrupt */
+#define XEM_IPIF_SEND_DMA_MASK 0x00000008UL /* Send DMA interrupt */
+#define XEM_IPIF_RECV_DMA_MASK 0x00000010UL /* Receive DMA interrupt */
+#define XEM_IPIF_RECV_FIFO_MASK 0x00000020UL /* Receive FIFO interrupt */
+#define XEM_IPIF_SEND_FIFO_MASK 0x00000040UL /* Send FIFO interrupt */
+
+/*
+ * Default IPIF Device Interrupt mask when configured for DMA
+ */
+#define XEM_IPIF_DMA_DFT_MASK (XEM_IPIF_SEND_DMA_MASK | \
+ XEM_IPIF_RECV_DMA_MASK | \
+ XEM_IPIF_EMAC_MASK | \
+ XEM_IPIF_SEND_FIFO_MASK | \
+ XEM_IPIF_RECV_FIFO_MASK)
+
+/*
+ * Default IPIF Device Interrupt mask when configured without DMA
+ */
+#define XEM_IPIF_FIFO_DFT_MASK (XEM_IPIF_EMAC_MASK | \
+ XEM_IPIF_SEND_FIFO_MASK | \
+ XEM_IPIF_RECV_FIFO_MASK)
+
+#define XEM_IPIF_DMA_DEV_INTR_COUNT 7 /* Number of interrupt sources */
+#define XEM_IPIF_FIFO_DEV_INTR_COUNT 5 /* Number of interrupt sources */
+#define XEM_IPIF_DEVICE_INTR_COUNT 7 /* Number of interrupt sources */
+#define XEM_IPIF_IP_INTR_COUNT 22 /* Number of MAC interrupts */
+
+
+/* a mask for all transmit interrupts, used in polled mode */
+#define XEM_EIR_XMIT_ALL_MASK (XEM_EIR_XMIT_DONE_MASK | \
+ XEM_EIR_XMIT_ERROR_MASK | \
+ XEM_EIR_XMIT_SFIFO_EMPTY_MASK | \
+ XEM_EIR_XMIT_LFIFO_FULL_MASK)
+
+/* a mask for all receive interrupts, used in polled mode */
+#define XEM_EIR_RECV_ALL_MASK (XEM_EIR_RECV_DONE_MASK | \
+ XEM_EIR_RECV_ERROR_MASK | \
+ XEM_EIR_RECV_LFIFO_EMPTY_MASK | \
+ XEM_EIR_RECV_LFIFO_OVER_MASK | \
+ XEM_EIR_RECV_LFIFO_UNDER_MASK | \
+ XEM_EIR_RECV_DFIFO_OVER_MASK | \
+ XEM_EIR_RECV_MISSED_FRAME_MASK | \
+ XEM_EIR_RECV_COLLISION_MASK | \
+ XEM_EIR_RECV_FCS_ERROR_MASK | \
+ XEM_EIR_RECV_LEN_ERROR_MASK | \
+ XEM_EIR_RECV_SHORT_ERROR_MASK | \
+ XEM_EIR_RECV_LONG_ERROR_MASK | \
+ XEM_EIR_RECV_ALIGN_ERROR_MASK)
+
+/* a default interrupt mask for scatter-gather DMA operation */
+#define XEM_EIR_DFT_SG_MASK (XEM_EIR_RECV_ERROR_MASK | \
+ XEM_EIR_RECV_LFIFO_OVER_MASK | \
+ XEM_EIR_RECV_LFIFO_UNDER_MASK | \
+ XEM_EIR_XMIT_SFIFO_OVER_MASK | \
+ XEM_EIR_XMIT_SFIFO_UNDER_MASK | \
+ XEM_EIR_XMIT_LFIFO_OVER_MASK | \
+ XEM_EIR_XMIT_LFIFO_UNDER_MASK | \
+ XEM_EIR_RECV_DFIFO_OVER_MASK | \
+ XEM_EIR_RECV_MISSED_FRAME_MASK | \
+ XEM_EIR_RECV_COLLISION_MASK | \
+ XEM_EIR_RECV_FCS_ERROR_MASK | \
+ XEM_EIR_RECV_LEN_ERROR_MASK | \
+ XEM_EIR_RECV_SHORT_ERROR_MASK | \
+ XEM_EIR_RECV_LONG_ERROR_MASK | \
+ XEM_EIR_RECV_ALIGN_ERROR_MASK)
+
+/* a default interrupt mask for non-DMA operation (direct FIFOs) */
+#define XEM_EIR_DFT_FIFO_MASK (XEM_EIR_XMIT_DONE_MASK | \
+ XEM_EIR_RECV_DONE_MASK | \
+ XEM_EIR_DFT_SG_MASK)
+
+
+/*
+ * Mask for the DMA interrupt enable and status registers when configured
+ * for scatter-gather DMA.
+ */
+#define XEM_DMA_SG_INTR_MASK (XDC_IXR_DMA_ERROR_MASK | \
+ XDC_IXR_PKT_THRESHOLD_MASK | \
+ XDC_IXR_PKT_WAIT_BOUND_MASK | \
+ XDC_IXR_SG_END_MASK)
+
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/*****************************************************************************/
+/*
+*
+* Clears a structure of given size, in bytes, by setting each byte to 0.
+*
+* @param StructPtr is a pointer to the structure to be cleared.
+* @param NumBytes is the number of bytes in the structure.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* Signature: void XEmac_mClearStruct(u8 *StructPtr, unsigned int NumBytes)
+*
+******************************************************************************/
+#define XEmac_mClearStruct(StructPtr, NumBytes) \
+{ \
+ u32 i; \
+ u8 *BytePtr = (u8 *)(StructPtr); \
+ for (i=0; i < (unsigned int)(NumBytes); i++) \
+ { \
+ *BytePtr++ = 0; \
+ } \
+}
+
+/************************** Variable Definitions *****************************/
+
+extern XEmac_Config XEmac_ConfigTable[];
+
+/************************** Function Prototypes ******************************/
+
+void XEmac_CheckEmacError(XEmac * InstancePtr, u32 IntrStatus);
+void XEmac_CheckFifoRecvError(XEmac * InstancePtr);
+void XEmac_CheckFifoSendError(XEmac * InstancePtr);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xemac_intr.c,v 1.2 2007/05/15 00:52:28 wre Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2003 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemac_intr.c
+*
+* This file contains general interrupt-related functions of the XEmac driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 07/31/01 First release
+* 1.00b rpm 02/20/02 Repartitioned files and functions
+* 1.00c rpm 12/05/02 New version includes support for simple DMA
+* 1.00c rpm 03/31/03 Added comment to indicate that no Receive Length FIFO
+* overrun interrupts occur in v1.00l and later of the EMAC
+* device. This avoids the need to reset the device on
+* receive overruns.
+* 1.00d rpm 09/26/03 New version includes support PLB Ethernet and v2.00a of
+* the packet fifo driver.
+* 1.11a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xemac_i.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/*****************************************************************************/
+/**
+*
+* Set the callback function for handling asynchronous errors. The upper layer
+* software should call this function during initialization.
+*
+* The error callback is invoked by the driver within interrupt context, so it
+* needs to do its job quickly. If there are potentially slow operations within
+* the callback, these should be done at task-level.
+*
+* The Xilinx errors that must be handled by the callback are:
+* - XST_DMA_ERROR indicates an unrecoverable DMA error occurred. This is
+* typically a bus error or bus timeout. The handler must reset and
+* re-configure the device.
+* - XST_FIFO_ERROR indicates an unrecoverable FIFO error occurred. This is a
+* deadlock condition in the packet FIFO. The handler must reset and
+* re-configure the device.
+* - XST_RESET_ERROR indicates an unrecoverable MAC error occurred, usually an
+* overrun or underrun. The handler must reset and re-configure the device.
+* - XST_DMA_SG_NO_LIST indicates an attempt was made to access a scatter-gather
+* DMA list that has not yet been created.
+* - XST_DMA_SG_LIST_EMPTY indicates the driver tried to get a descriptor from
+* the receive descriptor list, but the list was empty.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param CallBackRef is a reference pointer to be passed back to the driver in
+* the callback. This helps the driver correlate the callback to a
+* particular driver.
+* @param FuncPtr is the pointer to the callback function.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmac_SetErrorHandler(XEmac * InstancePtr, void *CallBackRef,
+ XEmac_ErrorHandler FuncPtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(FuncPtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ InstancePtr->ErrorHandler = FuncPtr;
+ InstancePtr->ErrorRef = CallBackRef;
+}
+
+/****************************************************************************/
+/*
+*
+* Check the interrupt status bits of the Ethernet MAC for errors. Errors
+* currently handled are:
+* - Receive length FIFO overrun. Indicates data was lost due to the receive
+* length FIFO becoming full during the reception of a packet. Only a device
+* reset clears this condition.
+* - Receive length FIFO underrun. An attempt to read an empty FIFO. Only a
+* device reset clears this condition.
+* - Transmit status FIFO overrun. Indicates data was lost due to the transmit
+* status FIFO becoming full following the transmission of a packet. Only a
+* device reset clears this condition.
+* - Transmit status FIFO underrun. An attempt to read an empty FIFO. Only a
+* device reset clears this condition.
+* - Transmit length FIFO overrun. Indicates data was lost due to the transmit
+* length FIFO becoming full following the transmission of a packet. Only a
+* device reset clears this condition.
+* - Transmit length FIFO underrun. An attempt to read an empty FIFO. Only a
+* device reset clears this condition.
+* - Receive data FIFO overrun. Indicates data was lost due to the receive data
+* FIFO becoming full during the reception of a packet.
+* - Receive data errors:
+* - Receive missed frame error. Valid data was lost by the MAC.
+* - Receive collision error. Data was lost by the MAC due to a collision.
+* - Receive FCS error. Data was dicarded by the MAC due to FCS error.
+* - Receive length field error. Data was dicarded by the MAC due to an invalid
+* length field in the packet.
+* - Receive short error. Data was dicarded by the MAC because a packet was
+* shorter than allowed.
+* - Receive long error. Data was dicarded by the MAC because a packet was
+* longer than allowed.
+* - Receive alignment error. Data was truncated by the MAC because its length
+* was not byte-aligned.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param IntrStatus is the contents of the interrupt status register to be checked
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* This function is intended for internal use only.
+*
+******************************************************************************/
+void XEmac_CheckEmacError(XEmac * InstancePtr, u32 IntrStatus)
+{
+ u32 ResetError = FALSE;
+
+ /*
+ * First check for receive fifo overrun/underrun errors. Most require a
+ * reset by the user to clear, but the data FIFO overrun error does not.
+ */
+ if (IntrStatus & XEM_EIR_RECV_DFIFO_OVER_MASK) {
+ InstancePtr->Stats.RecvOverrunErrors++;
+ InstancePtr->Stats.FifoErrors++;
+ }
+
+ if (IntrStatus & XEM_EIR_RECV_LFIFO_OVER_MASK) {
+ /*
+ * Receive Length FIFO overrun interrupts no longer occur in v1.00l
+ * and later of the EMAC device. Frames are just dropped by the EMAC
+ * if the length FIFO is full. The user would notice the Receive Missed
+ * Frame count incrementing without any other errors being reported.
+ * This code is left here for backward compatibility with v1.00k and
+ * older EMAC devices.
+ */
+ InstancePtr->Stats.RecvOverrunErrors++;
+ InstancePtr->Stats.FifoErrors++;
+ ResetError = TRUE; /* requires a reset */
+ }
+
+ if (IntrStatus & XEM_EIR_RECV_LFIFO_UNDER_MASK) {
+ InstancePtr->Stats.RecvUnderrunErrors++;
+ InstancePtr->Stats.FifoErrors++;
+ ResetError = TRUE; /* requires a reset */
+ }
+
+ /*
+ * Now check for general receive errors. Get the latest count where
+ * available, otherwise just bump the statistic so we know the interrupt
+ * occurred.
+ */
+ if (IntrStatus & XEM_EIR_RECV_ERROR_MASK) {
+ if (IntrStatus & XEM_EIR_RECV_MISSED_FRAME_MASK) {
+ /*
+ * Caused by length FIFO or data FIFO overruns on receive side
+ */
+ InstancePtr->Stats.RecvMissedFrameErrors =
+ XIo_In32(InstancePtr->BaseAddress +
+ XEM_RMFC_OFFSET);
+ }
+
+ if (IntrStatus & XEM_EIR_RECV_COLLISION_MASK) {
+ InstancePtr->Stats.RecvCollisionErrors =
+ XIo_In32(InstancePtr->BaseAddress +
+ XEM_RCC_OFFSET);
+ }
+
+ if (IntrStatus & XEM_EIR_RECV_FCS_ERROR_MASK) {
+ InstancePtr->Stats.RecvFcsErrors =
+ XIo_In32(InstancePtr->BaseAddress +
+ XEM_RFCSEC_OFFSET);
+ }
+
+ if (IntrStatus & XEM_EIR_RECV_LEN_ERROR_MASK) {
+ InstancePtr->Stats.RecvLengthFieldErrors++;
+ }
+
+ if (IntrStatus & XEM_EIR_RECV_SHORT_ERROR_MASK) {
+ InstancePtr->Stats.RecvShortErrors++;
+ }
+
+ if (IntrStatus & XEM_EIR_RECV_LONG_ERROR_MASK) {
+ InstancePtr->Stats.RecvLongErrors++;
+ }
+
+ if (IntrStatus & XEM_EIR_RECV_ALIGN_ERROR_MASK) {
+ InstancePtr->Stats.RecvAlignmentErrors =
+ XIo_In32(InstancePtr->BaseAddress +
+ XEM_RAEC_OFFSET);
+ }
+
+ /*
+ * Bump recv interrupts stats only if not scatter-gather DMA (this
+ * stat gets bumped elsewhere in that case)
+ */
+ if (!XEmac_mIsSgDma(InstancePtr)) {
+ InstancePtr->Stats.RecvInterrupts++; /* TODO: double bump? */
+ }
+
+ }
+
+ /*
+ * Check for transmit errors. These apply to both DMA and non-DMA modes
+ * of operation. The entire device should be reset after overruns or
+ * underruns.
+ */
+ if (IntrStatus & (XEM_EIR_XMIT_SFIFO_OVER_MASK |
+ XEM_EIR_XMIT_LFIFO_OVER_MASK)) {
+ InstancePtr->Stats.XmitOverrunErrors++;
+ InstancePtr->Stats.FifoErrors++;
+ ResetError = TRUE;
+ }
+
+ if (IntrStatus & (XEM_EIR_XMIT_SFIFO_UNDER_MASK |
+ XEM_EIR_XMIT_LFIFO_UNDER_MASK)) {
+ InstancePtr->Stats.XmitUnderrunErrors++;
+ InstancePtr->Stats.FifoErrors++;
+ ResetError = TRUE;
+ }
+
+ if (ResetError) {
+ /*
+ * If a reset error occurred, disable the EMAC interrupts since the
+ * reset-causing interrupt(s) is latched in the EMAC - meaning it will
+ * keep occurring until the device is reset. In order to give the higher
+ * layer software time to reset the device, we have to disable the
+ * overrun/underrun interrupts until that happens. We trust that the
+ * higher layer resets the device. We are able to get away with disabling
+ * all EMAC interrupts since the only interrupts it generates are for
+ * error conditions, and we don't care about any more errors right now.
+ */
+ XEMAC_WRITE_IIER(InstancePtr->BaseAddress, 0);
+
+ /*
+ * Invoke the error handler callback, which should result in a reset
+ * of the device by the upper layer software.
+ */
+ InstancePtr->ErrorHandler(InstancePtr->ErrorRef,
+ XST_RESET_ERROR);
+ }
+}
+
+/*****************************************************************************/
+/*
+*
+* Check the receive packet FIFO for errors. FIFO error interrupts are:
+* - Deadlock. See the XPacketFifo component for a description of deadlock on a
+* FIFO.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Although the function returns void, it can return an asynchronous error to the
+* application through the error handler. It can return XST_FIFO_ERROR if a FIFO
+* error occurred.
+*
+* @note
+*
+* This function is intended for internal use only.
+*
+******************************************************************************/
+void XEmac_CheckFifoRecvError(XEmac * InstancePtr)
+{
+ /*
+ * Although the deadlock is currently the only interrupt from a packet
+ * FIFO, make sure it is deadlocked before taking action. There is no
+ * need to clear this interrupt since it requires a reset of the device.
+ */
+ if (XPF_V200A_IS_DEADLOCKED(&InstancePtr->RecvFifo)) {
+ u32 IntrEnable;
+
+ InstancePtr->Stats.FifoErrors++;
+
+ /*
+ * Invoke the error callback function, which should result in a reset
+ * of the device by the upper layer software. We first need to disable
+ * the FIFO interrupt, since otherwise the upper layer thread that
+ * handles the reset may never run because this interrupt condition
+ * doesn't go away until a reset occurs (there is no way to ack it).
+ */
+ IntrEnable = XEMAC_READ_DIER(InstancePtr->BaseAddress);
+ XEMAC_WRITE_DIER(InstancePtr->BaseAddress,
+ IntrEnable & ~XEM_IPIF_RECV_FIFO_MASK);
+
+ InstancePtr->ErrorHandler(InstancePtr->ErrorRef,
+ XST_FIFO_ERROR);
+ }
+}
+
+/*****************************************************************************/
+/*
+*
+* Check the send packet FIFO for errors. FIFO error interrupts are:
+* - Deadlock. See the XPacketFifo component for a description of deadlock on a
+* FIFO.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Although the function returns void, it can return an asynchronous error to the
+* application through the error handler. It can return XST_FIFO_ERROR if a FIFO
+* error occurred.
+*
+* @note
+*
+* This function is intended for internal use only.
+*
+******************************************************************************/
+void XEmac_CheckFifoSendError(XEmac * InstancePtr)
+{
+ /*
+ * Although the deadlock is currently the only interrupt from a packet
+ * FIFO, make sure it is deadlocked before taking action. There is no
+ * need to clear this interrupt since it requires a reset of the device.
+ */
+ if (XPF_V200A_IS_DEADLOCKED(&InstancePtr->SendFifo)) {
+ u32 IntrEnable;
+
+ InstancePtr->Stats.FifoErrors++;
+
+ /*
+ * Invoke the error callback function, which should result in a reset
+ * of the device by the upper layer software. We first need to disable
+ * the FIFO interrupt, since otherwise the upper layer thread that
+ * handles the reset may never run because this interrupt condition
+ * doesn't go away until a reset occurs (there is no way to ack it).
+ */
+ IntrEnable = XEMAC_READ_DIER(InstancePtr->BaseAddress);
+ XEMAC_WRITE_DIER(InstancePtr->BaseAddress,
+ IntrEnable & ~XEM_IPIF_SEND_FIFO_MASK);
+
+ InstancePtr->ErrorHandler(InstancePtr->ErrorRef,
+ XST_FIFO_ERROR);
+ }
+}
--- /dev/null
+/* $Id: xemac_intr_dma.c,v 1.1 2006/11/01 17:30:12 moleres Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2003 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemac_intr_dma.c
+*
+* Contains functions used in interrupt mode when configured with scatter-gather
+* DMA.
+*
+* The interrupt handler, XEmac_IntrHandlerDma(), must be connected by the user
+* to the interrupt controller.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- ---------------------------------------------------------
+* 1.00a rpm 07/31/01 First release
+* 1.00b rpm 02/20/02 Repartitioned files and functions
+* 1.00c rpm 12/05/02 New version includes support for simple DMA and the delay
+* argument to SgSend
+* 1.00c rpm 02/03/03 The XST_DMA_SG_COUNT_EXCEEDED return code was removed
+* from SetPktThreshold in the internal DMA driver. Also
+* avoided compiler warnings by initializing Result in the
+* interrupt service routines.
+* 1.00c rpm 03/26/03 Fixed a problem in the interrupt service routines where
+* the interrupt status was toggled clear after a call to
+* ErrorHandler, but if ErrorHandler reset the device the
+* toggle actually asserted the interrupt because the
+* reset had cleared it.
+* 1.00d rpm 09/26/03 New version includes support PLB Ethernet and v2.00a of
+* the packet fifo driver.
+* 1.00e rmm 04/06/04 Instead of invoking once for each packet received,
+* send/recv callbacks are invoked once for all packets.
+* Added functions XEmac_GetSgRecvFreeDesc() and XEmac_Get-
+* SgSendFreeDesc().
+* 1.01a ecm 09/01/05 Added DRE support through Control words in instance which
+* are set at initialization.
+* 1.01a wgr 09/14/06 Ported to Linux 2.6
+* 1.11a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xemac_i.h"
+#include "xio.h"
+#include "xbuf_descriptor.h"
+#include "xdma_channel.h"
+#include "xipif_v1_23_b.h" /* Uses v1.23b of the IPIF */
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/************************** Function Prototypes ******************************/
+
+static void HandleDmaRecvIntr(XEmac * InstancePtr);
+static void HandleDmaSendIntr(XEmac * InstancePtr);
+static void HandleEmacDmaIntr(XEmac * InstancePtr);
+
+/*****************************************************************************/
+/**
+*
+* Send an Ethernet frame using scatter-gather DMA. The caller attaches the
+* frame to one or more buffer descriptors, then calls this function once for
+* each descriptor. The caller is responsible for allocating and setting up the
+* descriptor. An entire Ethernet frame may or may not be contained within one
+* descriptor. This function simply inserts the descriptor into the scatter-
+* gather engine's transmit list. The caller is responsible for providing mutual
+* exclusion to guarantee that a frame is contiguous in the transmit list. The
+* buffer attached to the descriptor must be 32-bit aligned if using the OPB
+* Ethernet core and 64-bit aligned if using the PLB Ethernet core.
+*
+* The driver updates the descriptor with the device control register before
+* being inserted into the transmit list. If this is the last descriptor in
+* the frame, the inserts are committed, which means the descriptors for this
+* frame are now available for transmission.
+*
+* It is assumed that the upper layer software supplies a correctly formatted
+* Ethernet frame, including the destination and source addresses, the
+* type/length field, and the data field. It is also assumed that upper layer
+* software does not append FCS at the end of the frame.
+*
+* This call is non-blocking. Notification of error or successful transmission
+* is done asynchronously through the send or error callback function.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param BdPtr is the address of a descriptor to be inserted into the transmit
+* ring.
+* @param Delay indicates whether to start the scatter-gather DMA channel
+* immediately, or whether to wait. This allows the user to build up a
+* list of more than one descriptor before starting the transmission of
+* the packets, which allows the application to keep up with DMA and have
+* a constant stream of frames being transmitted. Use XEM_SGDMA_NODELAY or
+* XEM_SGDMA_DELAY, defined in xemac.h, as the value of this argument. If
+* the user chooses to delay and build a list, the user must call this
+* function with the XEM_SGDMA_NODELAY option or call XEmac_Start() to
+* kick off the tranmissions.
+*
+* @return
+*
+* - XST_SUCCESS if the buffer was successfull sent
+* - XST_DEVICE_IS_STOPPED if the Ethernet MAC has not been started yet
+* - XST_NOT_SGDMA if the device is not in scatter-gather DMA mode
+* - XST_DMA_SG_LIST_FULL if the descriptor list for the DMA channel is full
+* - XST_DMA_SG_BD_LOCKED if the DMA channel cannot insert the descriptor into
+* the list because a locked descriptor exists at the insert point
+* - XST_DMA_SG_NOTHING_TO_COMMIT if even after inserting a descriptor into the
+* list, the DMA channel believes there are no new descriptors to commit. If
+* this is ever encountered, there is likely a thread mutual exclusion problem
+* on transmit.
+*
+* @note
+*
+* This function is not thread-safe. The user must provide mutually exclusive
+* access to this function if there are to be multiple threads that can call it.
+*
+* @internal
+*
+* A status that should never be returned from this function, although
+* the code is set up to handle it, is XST_DMA_SG_NO_LIST. Starting the device
+* requires a list to be created, and this function requires the device to be
+* started.
+*
+******************************************************************************/
+int XEmac_SgSend(XEmac * InstancePtr, XBufDescriptor * BdPtr, int Delay)
+{
+ int Result;
+ u32 BdControl;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BdPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Be sure the device is configured for scatter-gather DMA, then be sure
+ * it is started.
+ */
+ if (!XEmac_mIsSgDma(InstancePtr)) {
+ return XST_NOT_SGDMA;
+ }
+
+ /*
+ * Set some descriptor control word defaults (source address increment
+ * and local destination address) and the destination address
+ * (the FIFO). These are the same for every transmit descriptor.
+ */
+ BdControl = XBufDescriptor_GetControl(BdPtr);
+ XBufDescriptor_SetControl(BdPtr,
+ BdControl | (InstancePtr->TxDmaControlWord));
+
+ /*
+ * If the TX channel has Checksum offload enabled, do not overwrite
+ * the Data in the destination address location, it is offload related
+ * not the actual address for the data to be written to
+ */
+ if ((InstancePtr->TxDmaControlWord & XDC_DMACR_CS_OFFLOAD_MASK) == 0) {
+ XBufDescriptor_SetDestAddress(BdPtr,
+ InstancePtr->PhysAddress +
+ XEM_PFIFO_TXDATA_OFFSET);
+ }
+
+ /*
+ * Put the descriptor in the send list. The DMA component accesses data
+ * here that can also be modified in interrupt context, so a critical
+ * section is required.
+ */
+ XIIF_V123B_GINTR_DISABLE(InstancePtr->BaseAddress);
+
+ Result = XDmaChannel_PutDescriptor(&InstancePtr->SendChannel, BdPtr);
+ if (Result != XST_SUCCESS) {
+ XIIF_V123B_GINTR_ENABLE(InstancePtr->BaseAddress);
+ return Result;
+ }
+
+ /*
+ * If this is the last buffer in the frame, commit the inserts and start
+ * the DMA engine if necessary
+ */
+ if (XBufDescriptor_IsLastControl(BdPtr)) {
+ Result = XDmaChannel_CommitPuts(&InstancePtr->SendChannel);
+ if (Result != XST_SUCCESS) {
+ XIIF_V123B_GINTR_ENABLE(InstancePtr->BaseAddress);
+ return Result;
+ }
+
+ if (Delay == XEM_SGDMA_NODELAY) {
+ /*
+ * Start the DMA channel. Ignore the return status since we know the
+ * list exists and has at least one entry and we don't care if the
+ * channel is already started. The DMA component accesses data here
+ * that can be modified at interrupt or task levels, so a critical
+ * section is required.
+ */
+ (void) XDmaChannel_SgStart(&InstancePtr->SendChannel);
+ }
+ }
+
+ XIIF_V123B_GINTR_ENABLE(InstancePtr->BaseAddress);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Add a descriptor, with an attached empty buffer, into the receive descriptor
+* list. The buffer attached to the descriptor must be 32-bit aligned if using
+* the OPB Ethernet core and 64-bit aligned if using the PLB Ethernet core.
+* This function is used by the upper layer software during initialization when
+* first setting up the receive descriptors, and also during reception of frames
+* to replace filled buffers with empty buffers. This function can be called
+* when the device is started or stopped. Note that it does start the scatter-
+* gather DMA engine. Although this is not necessary during initialization, it
+* is not a problem during initialization because the MAC receiver is not yet
+* started.
+*
+* The buffer attached to the descriptor must be aligned on both the front end
+* and the back end.
+*
+* Notification of received frames are done asynchronously through the receive
+* callback function.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param BdPtr is a pointer to the buffer descriptor that will be added to the
+* descriptor list.
+*
+* @return
+*
+* - XST_SUCCESS if a descriptor was successfully returned to the driver
+* - XST_NOT_SGDMA if the device is not in scatter-gather DMA mode
+* - XST_DMA_SG_LIST_FULL if the receive descriptor list is full
+* - XST_DMA_SG_BD_LOCKED if the DMA channel cannot insert the descriptor into
+* the list because a locked descriptor exists at the insert point.
+* - XST_DMA_SG_NOTHING_TO_COMMIT if even after inserting a descriptor into the
+* list, the DMA channel believes there are no new descriptors to commit.
+*
+* @internal
+*
+* A status that should never be returned from this function, although
+* the code is set up to handle it, is XST_DMA_SG_NO_LIST. Starting the device
+* requires a list to be created, and this function requires the device to be
+* started.
+*
+******************************************************************************/
+int XEmac_SgRecv(XEmac * InstancePtr, XBufDescriptor * BdPtr)
+{
+ int Result;
+ u32 BdControl;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BdPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Be sure the device is configured for scatter-gather DMA
+ */
+ if (!XEmac_mIsSgDma(InstancePtr)) {
+ return XST_NOT_SGDMA;
+ }
+
+ /*
+ * Set some descriptor control word defaults (destination address increment
+ * and local source address) and the source address (the FIFO). These are
+ * the same for every receive descriptor.
+ */
+ BdControl = XBufDescriptor_GetControl(BdPtr);
+ XBufDescriptor_SetControl(BdPtr,
+ BdControl | (InstancePtr->RxDmaControlWord));
+ XBufDescriptor_SetSrcAddress(BdPtr,
+ InstancePtr->PhysAddress +
+ XEM_PFIFO_RXDATA_OFFSET);
+
+ /*
+ * Put the descriptor into the channel's descriptor list and commit.
+ * Although this function is likely called within interrupt context, there
+ * is the possibility that the upper layer software queues it to a task.
+ * In this case, a critical section is needed here to protect shared data
+ * in the DMA component.
+ */
+ XIIF_V123B_GINTR_DISABLE(InstancePtr->BaseAddress);
+
+ Result = XDmaChannel_PutDescriptor(&InstancePtr->RecvChannel, BdPtr);
+ if (Result != XST_SUCCESS) {
+ XIIF_V123B_GINTR_ENABLE(InstancePtr->BaseAddress);
+ return Result;
+ }
+
+ Result = XDmaChannel_CommitPuts(&InstancePtr->RecvChannel);
+ if (Result != XST_SUCCESS) {
+ XIIF_V123B_GINTR_ENABLE(InstancePtr->BaseAddress);
+ return Result;
+ }
+
+ /*
+ * Start the DMA channel. Ignore the return status since we know the list
+ * exists and has at least one entry and we don't care if the channel is
+ * already started. The DMA component accesses data here that can be
+ * modified at interrupt or task levels, so a critical section is required.
+ */
+ (void) XDmaChannel_SgStart(&InstancePtr->RecvChannel);
+
+ XIIF_V123B_GINTR_ENABLE(InstancePtr->BaseAddress);
+
+ return XST_SUCCESS;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* The interrupt handler for the Ethernet driver when configured with scatter-
+* gather DMA.
+*
+* Get the interrupt status from the IpIf to determine the source of the
+* interrupt. The source can be: MAC, Recv Packet FIFO, Send Packet FIFO, Recv
+* DMA channel, or Send DMA channel. The packet FIFOs only interrupt during
+* "deadlock" conditions.
+*
+* @param InstancePtr is a pointer to the XEmac instance that just interrupted.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmac_IntrHandlerDma(void *InstancePtr)
+{
+ u32 IntrStatus;
+ XEmac *EmacPtr = (XEmac *) InstancePtr;
+
+ EmacPtr->Stats.TotalIntrs++;
+
+ /*
+ * Get the interrupt status from the IPIF. There is no clearing of
+ * interrupts in the IPIF. Interrupts must be cleared at the source.
+ */
+ IntrStatus = XIIF_V123B_READ_DIPR(EmacPtr->BaseAddress);
+
+ /*
+ * See which type of interrupt is being requested, and service it
+ */
+ if (IntrStatus & XEM_IPIF_RECV_DMA_MASK) { /* Receive DMA interrupt */
+ EmacPtr->Stats.RecvInterrupts++;
+ HandleDmaRecvIntr(EmacPtr);
+ }
+
+ if (IntrStatus & XEM_IPIF_SEND_DMA_MASK) { /* Send DMA interrupt */
+ EmacPtr->Stats.XmitInterrupts++;
+ HandleDmaSendIntr(EmacPtr);
+ }
+
+ if (IntrStatus & XEM_IPIF_EMAC_MASK) { /* MAC interrupt */
+ EmacPtr->Stats.EmacInterrupts++;
+ HandleEmacDmaIntr(EmacPtr);
+ }
+
+ if (IntrStatus & XEM_IPIF_RECV_FIFO_MASK) { /* Receive FIFO interrupt */
+ EmacPtr->Stats.RecvInterrupts++;
+ XEmac_CheckFifoRecvError(EmacPtr);
+ }
+
+ if (IntrStatus & XEM_IPIF_SEND_FIFO_MASK) { /* Send FIFO interrupt */
+ EmacPtr->Stats.XmitInterrupts++;
+ XEmac_CheckFifoSendError(EmacPtr);
+ }
+
+ if (IntrStatus & XIIF_V123B_ERROR_MASK) {
+ /*
+ * An error occurred internal to the IPIF. This is more of a debug and
+ * integration issue rather than a production error. Don't do anything
+ * other than clear it, which provides a spot for software to trap
+ * on the interrupt and begin debugging.
+ */
+ XIIF_V123B_WRITE_DISR(EmacPtr->BaseAddress,
+ XIIF_V123B_ERROR_MASK);
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Set the packet count threshold for this device. The device must be stopped
+* before setting the threshold. The packet count threshold is used for interrupt
+* coalescing, which reduces the frequency of interrupts from the device to the
+* processor. In this case, the scatter-gather DMA engine only interrupts when
+* the packet count threshold is reached, instead of interrupting for each packet.
+* A packet is a generic term used by the scatter-gather DMA engine, and is
+* equivalent to an Ethernet frame in our case.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param Direction indicates the channel, send or receive, from which the
+* threshold register is read.
+* @param Threshold is the value of the packet threshold count used during
+* interrupt coalescing. A value of 0 disables the use of packet threshold
+* by the hardware.
+*
+* @return
+*
+* - XST_SUCCESS if the threshold was successfully set
+* - XST_NOT_SGDMA if the MAC is not configured for scatter-gather DMA
+* - XST_DEVICE_IS_STARTED if the device has not been stopped
+* - XST_INVALID_PARAM if the Direction parameter is invalid. Turning on
+* asserts would also catch this error.
+*
+* @note
+*
+* The packet threshold could be set to larger than the number of descriptors
+* allocated to the DMA channel. In this case, the wait bound will take over
+* and always indicate data arrival. There was a check in this function that
+* returned an error if the treshold was larger than the number of descriptors,
+* but that was removed because users would then have to set the threshold
+* only after they set descriptor space, which is an order dependency that
+* caused confustion.
+*
+******************************************************************************/
+int XEmac_SetPktThreshold(XEmac * InstancePtr, u32 Direction, u8 Threshold)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(Direction == XEM_SEND || Direction == XEM_RECV);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Be sure device is configured for scatter-gather DMA and has been stopped
+ */
+ if (!XEmac_mIsSgDma(InstancePtr)) {
+ return XST_NOT_SGDMA;
+ }
+
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return XST_DEVICE_IS_STARTED;
+ }
+
+ /*
+ * Based on the direction, set the packet threshold in the
+ * corresponding DMA channel component. Default to the receive
+ * channel threshold register (if an invalid Direction is passed).
+ */
+ switch (Direction) {
+ case XEM_SEND:
+ return XDmaChannel_SetPktThreshold(&InstancePtr->SendChannel,
+ Threshold);
+
+ case XEM_RECV:
+ return XDmaChannel_SetPktThreshold(&InstancePtr->RecvChannel,
+ Threshold);
+
+ default:
+ return XST_INVALID_PARAM;
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Get the value of the packet count threshold for this driver/device. The packet
+* count threshold is used for interrupt coalescing, which reduces the frequency
+* of interrupts from the device to the processor. In this case, the
+* scatter-gather DMA engine only interrupts when the packet count threshold is
+* reached, instead of interrupting for each packet. A packet is a generic term
+* used by the scatter-gather DMA engine, and is equivalent to an Ethernet frame
+* in our case.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param Direction indicates the channel, send or receive, from which the
+* threshold register is read.
+* @param ThreshPtr is a pointer to the byte into which the current value of the
+* packet threshold register will be copied. An output parameter. A value
+* of 0 indicates the use of packet threshold by the hardware is disabled.
+*
+* @return
+*
+* - XST_SUCCESS if the packet threshold was retrieved successfully
+* - XST_NOT_SGDMA if the MAC is not configured for scatter-gather DMA
+* - XST_INVALID_PARAM if the Direction parameter is invalid. Turning on
+* asserts would also catch this error.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XEmac_GetPktThreshold(XEmac * InstancePtr, u32 Direction, u8 *ThreshPtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(Direction == XEM_SEND || Direction == XEM_RECV);
+ XASSERT_NONVOID(ThreshPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ if (!XEmac_mIsSgDma(InstancePtr)) {
+ return XST_NOT_SGDMA;
+ }
+
+ /*
+ * Based on the direction, return the packet threshold set in the
+ * corresponding DMA channel component. Default to the value in
+ * the receive channel threshold register (if an invalid Direction
+ * is passed).
+ */
+ switch (Direction) {
+ case XEM_SEND:
+ *ThreshPtr =
+ XDmaChannel_GetPktThreshold(&InstancePtr->SendChannel);
+ break;
+
+ case XEM_RECV:
+ *ThreshPtr =
+ XDmaChannel_GetPktThreshold(&InstancePtr->RecvChannel);
+ break;
+
+ default:
+ return XST_INVALID_PARAM;
+ }
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Set the packet wait bound timer for this driver/device. The device must be
+* stopped before setting the timer value. The packet wait bound is used during
+* interrupt coalescing to trigger an interrupt when not enough packets have been
+* received to reach the packet count threshold. A packet is a generic term used
+* by the scatter-gather DMA engine, and is equivalent to an Ethernet frame in
+* our case. The timer is in milliseconds.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param Direction indicates the channel, send or receive, from which the
+* threshold register is read.
+* @param TimerValue is the value of the packet wait bound used during interrupt
+* coalescing. It is in milliseconds in the range 0 - 1023. A value of 0
+* disables the packet wait bound timer.
+*
+* @return
+*
+* - XST_SUCCESS if the packet wait bound was set successfully
+* - XST_NOT_SGDMA if the MAC is not configured for scatter-gather DMA
+* - XST_DEVICE_IS_STARTED if the device has not been stopped
+* - XST_INVALID_PARAM if the Direction parameter is invalid. Turning on
+* asserts would also catch this error.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XEmac_SetPktWaitBound(XEmac * InstancePtr, u32 Direction, u32 TimerValue)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(Direction == XEM_SEND || Direction == XEM_RECV);
+ XASSERT_NONVOID(TimerValue <= XEM_SGDMA_MAX_WAITBOUND);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Be sure device is configured for scatter-gather DMA and has been stopped
+ */
+ if (!XEmac_mIsSgDma(InstancePtr)) {
+ return XST_NOT_SGDMA;
+ }
+
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return XST_DEVICE_IS_STARTED;
+ }
+
+ /*
+ * Based on the direction, set the packet wait bound in the
+ * corresponding DMA channel component. Default to the receive
+ * channel wait bound register (if an invalid Direction is passed).
+ */
+ switch (Direction) {
+ case XEM_SEND:
+ XDmaChannel_SetPktWaitBound(&InstancePtr->SendChannel,
+ TimerValue);
+ break;
+
+ case XEM_RECV:
+ XDmaChannel_SetPktWaitBound(&InstancePtr->RecvChannel,
+ TimerValue);
+ break;
+
+ default:
+ return XST_INVALID_PARAM;
+ }
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Get the packet wait bound timer for this driver/device. The packet wait bound
+* is used during interrupt coalescing to trigger an interrupt when not enough
+* packets have been received to reach the packet count threshold. A packet is a
+* generic term used by the scatter-gather DMA engine, and is equivalent to an
+* Ethernet frame in our case. The timer is in milliseconds.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param Direction indicates the channel, send or receive, from which the
+* threshold register is read.
+* @param WaitPtr is a pointer to the byte into which the current value of the
+* packet wait bound register will be copied. An output parameter. Units
+* are in milliseconds in the range 0 - 1023. A value of 0 indicates the
+* packet wait bound timer is disabled.
+*
+* @return
+*
+* - XST_SUCCESS if the packet wait bound was retrieved successfully
+* - XST_NOT_SGDMA if the MAC is not configured for scatter-gather DMA
+* - XST_INVALID_PARAM if the Direction parameter is invalid. Turning on
+* asserts would also catch this error.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XEmac_GetPktWaitBound(XEmac * InstancePtr, u32 Direction, u32 *WaitPtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(Direction == XEM_SEND || Direction == XEM_RECV);
+ XASSERT_NONVOID(WaitPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ if (!XEmac_mIsSgDma(InstancePtr)) {
+ return XST_NOT_SGDMA;
+ }
+
+ /*
+ * Based on the direction, return the packet wait bound set in the
+ * corresponding DMA channel component. Default to the value in
+ * the receive channel wait bound register (if an invalid Direction
+ * is passed).
+ */
+ switch (Direction) {
+ case XEM_SEND:
+ *WaitPtr =
+ XDmaChannel_GetPktWaitBound(&InstancePtr->SendChannel);
+ break;
+
+ case XEM_RECV:
+ *WaitPtr =
+ XDmaChannel_GetPktWaitBound(&InstancePtr->RecvChannel);
+ break;
+
+ default:
+ return XST_INVALID_PARAM;
+ }
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Give the driver the memory space to be used for the scatter-gather DMA
+* receive descriptor list. This function should only be called once, during
+* initialization of the Ethernet driver. The memory space must be big enough
+* to hold some number of descriptors, depending on the needs of the system.
+* The xemac.h file defines minimum and default numbers of descriptors
+* which can be used to allocate this memory space.
+*
+* The memory space must be 32-bit aligned. An assert will occur if asserts
+* are turned on and the memory is not aligned.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param MemoryPtr is a pointer to the aligned memory.
+* @param ByteCount is the length, in bytes, of the memory space.
+*
+* @return
+*
+* - XST_SUCCESS if the space was initialized successfully
+* - XST_NOT_SGDMA if the MAC is not configured for scatter-gather DMA
+* - XST_DMA_SG_LIST_EXISTS if this list space has already been created
+*
+* @note
+*
+* If the device is configured for scatter-gather DMA, this function must be
+* called AFTER the XEmac_Initialize() function because the DMA channel
+* components must be initialized before the memory space is set.
+*
+******************************************************************************/
+int XEmac_SetSgRecvSpace(XEmac * InstancePtr, u32 *MemoryPtr,
+ u32 ByteCount, void *PhyPtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(MemoryPtr != NULL);
+ XASSERT_NONVOID(ByteCount != 0);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ if (!XEmac_mIsSgDma(InstancePtr)) {
+ return XST_NOT_SGDMA;
+ }
+
+ return XDmaChannel_CreateSgList(&InstancePtr->RecvChannel, MemoryPtr,
+ ByteCount, PhyPtr);
+}
+
+/*****************************************************************************/
+/**
+*
+* Give the driver the memory space to be used for the scatter-gather DMA
+* transmit descriptor list. This function should only be called once, during
+* initialization of the Ethernet driver. The memory space must be big enough
+* to hold some number of descriptors, depending on the needs of the system.
+* The xemac.h file defines minimum and default numbers of descriptors
+* which can be used to allocate this memory space.
+*
+* The memory space must be 32-bit aligned. An assert will occur if asserts
+* are turned on and the memory is not aligned.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param MemoryPtr is a pointer to the aligned memory.
+* @param ByteCount is the length, in bytes, of the memory space.
+*
+* @return
+*
+* - XST_SUCCESS if the space was initialized successfully
+* - XST_NOT_SGDMA if the MAC is not configured for scatter-gather DMA
+* - XST_DMA_SG_LIST_EXISTS if this list space has already been created
+*
+* @note
+*
+* If the device is configured for scatter-gather DMA, this function must be
+* called AFTER the XEmac_Initialize() function because the DMA channel
+* components must be initialized before the memory space is set.
+*
+******************************************************************************/
+int XEmac_SetSgSendSpace(XEmac * InstancePtr, u32 *MemoryPtr,
+ u32 ByteCount, void *PhyPtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(MemoryPtr != NULL);
+ XASSERT_NONVOID(ByteCount != 0);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ if (!XEmac_mIsSgDma(InstancePtr)) {
+ return XST_NOT_SGDMA;
+ }
+
+ return XDmaChannel_CreateSgList(&InstancePtr->SendChannel, MemoryPtr,
+ ByteCount, PhyPtr);
+}
+
+/*****************************************************************************/
+/**
+*
+* Return the number of free buffer descriptor slots that can be added to the
+* send descriptor ring with XEmac_SgSend() before filling it up.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* - The number of descriptors that can be given to the HW with XEmac_SgSend()
+* - 0 if no room is left or the device is not configured for SG DMA
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+unsigned XEmac_GetSgSendFreeDesc(XEmac * InstancePtr)
+{
+ unsigned Slots;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ Slots = InstancePtr->SendChannel.TotalDescriptorCount -
+ InstancePtr->SendChannel.ActiveDescriptorCount;
+
+ return Slots;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Return the number of free buffer descriptor slots that can be added to the
+* receive descriptor ring with XEmac_SgRecv() before filling it up.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* - The number of descriptors that can be given to the HW with XEmac_SgRecv()
+* - 0 if no room is left or the device is not configured for SG DMA
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+unsigned XEmac_GetSgRecvFreeDesc(XEmac * InstancePtr)
+{
+ unsigned Slots;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ Slots = InstancePtr->RecvChannel.TotalDescriptorCount -
+ InstancePtr->RecvChannel.ActiveDescriptorCount;
+
+ return Slots;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Set the callback function for handling received frames in scatter-gather DMA
+* mode. The upper layer software should call this function during
+* initialization. The callback is called once per frame received. The head of
+* a descriptor list is passed in along with the number of descriptors in the
+* list. Before leaving the callback, the upper layer software should attach a
+* new buffer to each descriptor in the list.
+*
+* The callback is invoked by the driver within interrupt context, so it needs
+* to do its job quickly. Sending the received frame up the protocol stack
+* should be done at task-level. If there are other potentially slow operations
+* within the callback, these too should be done at task-level.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param CallBackRef is a reference pointer to be passed back to the adapter in
+* the callback. This helps the adapter correlate the callback to a
+* particular driver.
+* @param FuncPtr is the pointer to the callback function.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmac_SetSgRecvHandler(XEmac * InstancePtr, void *CallBackRef,
+ XEmac_SgHandler FuncPtr)
+{
+ /*
+ * Asserted IsDmaSg here instead of run-time check because there is really
+ * no ill-effects of setting these when not configured for scatter-gather.
+ */
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(FuncPtr != NULL);
+ XASSERT_VOID(XEmac_mIsSgDma(InstancePtr));
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ InstancePtr->SgRecvHandler = FuncPtr;
+ InstancePtr->SgRecvRef = CallBackRef;
+}
+
+/*****************************************************************************/
+/**
+*
+* Set the callback function for handling confirmation of transmitted frames in
+* scatter-gather DMA mode. The upper layer software should call this function
+* during initialization. The callback is called once per frame sent. The head
+* of a descriptor list is passed in along with the number of descriptors in
+* the list. The callback is responsible for freeing buffers attached to these
+* descriptors.
+*
+* The callback is invoked by the driver within interrupt context, so it needs
+* to do its job quickly. If there are potentially slow operations within the
+* callback, these should be done at task-level.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param CallBackRef is a reference pointer to be passed back to the adapter in
+* the callback. This helps the adapter correlate the callback to a
+* particular driver.
+* @param FuncPtr is the pointer to the callback function.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmac_SetSgSendHandler(XEmac * InstancePtr, void *CallBackRef,
+ XEmac_SgHandler FuncPtr)
+{
+ /*
+ * Asserted IsDmaSg here instead of run-time check because there is really
+ * no ill-effects of setting these when not configured for scatter-gather.
+ */
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(FuncPtr != NULL);
+ XASSERT_VOID(XEmac_mIsSgDma(InstancePtr));
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ InstancePtr->SgSendHandler = FuncPtr;
+ InstancePtr->SgSendRef = CallBackRef;
+}
+
+/*****************************************************************************/
+/*
+*
+* Handle an interrupt from the DMA receive channel. DMA interrupts are:
+*
+* - DMA error. DMA encountered a bus error or timeout. This is a fatal error
+* that requires reset of the channel. The driver calls the error handler
+* of the upper layer software with an error code indicating the device should
+* be reset.
+* - Packet count threshold reached. For scatter-gather operations, indicates
+* the threshold for the number of packets not serviced by software has been
+* reached. The driver behaves as follows:
+* - Get the value of the packet counter, which tells us how many packets
+* are ready to be serviced
+* - For each packet
+* - For each descriptor, remove it from the scatter-gather list
+* - Check for the last descriptor in the frame, and if set
+* - Bump frame statistics
+* - Decrement the packet counter by one
+* - Call the scatter-gather receive callback function
+* Note that there are no receive errors reported in the status word of
+* the buffer descriptor. If receive errors occur, the MAC drops the
+* packet, and we only find out about the errors through various error
+* count registers.
+* - Packet wait bound reached. For scatter-gather, indicates the time to wait
+* for the next packet has expired. The driver follows the same logic as when
+* the packet count threshold interrupt is received.
+* - Scatter-gather end acknowledge. Hardware has reached the end of the
+* descriptor list. The driver follows the same logic as when the packet count
+* threshold interrupt is received. In addition, the driver restarts the DMA
+* scatter-gather channel in case there are newly inserted descriptors.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Although the function returns void, there are asynchronous errors that can
+* be generated (by calling the ErrorHandler) from this function. These are:
+* - XST_DMA_SG_LIST_EMPTY indicates we tried to get a buffer descriptor from the
+* DMA channel, but there was not one ready for software.
+* - XST_DMA_ERROR indicates a DMA bus error or timeout occurred. This is a fatal
+* error that requires reset.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void HandleDmaRecvIntr(XEmac * InstancePtr)
+{
+ int Result;
+ u32 IntrStatus;
+ u32 NumBds;
+ u32 PacketsLeft;
+ XBufDescriptor *BdHeadPtr;
+ XBufDescriptor *BdPtr;
+
+ /*
+ * Read the interrupt status
+ */
+ IntrStatus = XDmaChannel_GetIntrStatus(&InstancePtr->RecvChannel);
+
+ /*
+ * For packet threshold or wait bound interrupts, process desciptors. Also
+ * process descriptors on a SG end acknowledgement, which means the end of
+ * the descriptor list has been reached by the hardware. For receive, this
+ * is potentially trouble since it means the descriptor list is full,
+ * unless software can process enough packets quickly enough so the
+ * hardware has room to put new packets.
+ */
+ if (IntrStatus & (XDC_IXR_PKT_THRESHOLD_MASK |
+ XDC_IXR_PKT_WAIT_BOUND_MASK | XDC_IXR_SG_END_MASK)) {
+ /* Get the number of packets that need processing */
+ PacketsLeft =
+ XDmaChannel_GetPktCount(&InstancePtr->RecvChannel);
+
+ if (PacketsLeft) {
+ /* Get the buffer descriptor at the head of the list */
+ Result = XDmaChannel_GetDescriptor(&InstancePtr->
+ RecvChannel,
+ &BdHeadPtr);
+ BdPtr = BdHeadPtr;
+ NumBds = 0;
+
+ /* Loop until all packets have been pulled or an error occurs */
+ while (1) {
+ NumBds++;
+
+ /*
+ * An error getting a buffer descriptor from the list.
+ * This should not happen, but if it does, report it to
+ * the error callback and break out of the loop to service
+ * other interrupts.
+ */
+ if (Result != XST_SUCCESS) {
+ InstancePtr->ErrorHandler(InstancePtr->
+ ErrorRef,
+ Result);
+ break;
+ }
+
+ /* Bump statistics */
+ InstancePtr->Stats.RecvBytes +=
+ XBufDescriptor_GetLength(BdPtr);
+
+ /* Have all BDs been read for this packet */
+ if (XBufDescriptor_IsLastStatus(BdPtr)) {
+ /*
+ * Decrement the packet count register to reflect the fact
+ * we just processed a packet
+ */
+ XDmaChannel_DecrementPktCount
+ (&InstancePtr->RecvChannel);
+
+ /* Bump statistics */
+ InstancePtr->Stats.RecvFrames++;
+
+ /* Test loop exit condition */
+ if (--PacketsLeft == 0) {
+ break;
+ }
+ }
+
+ /* Get the next buffer descriptor in the list */
+ Result = XDmaChannel_GetDescriptor
+ (&InstancePtr->RecvChannel, &BdPtr);
+ } /* while */
+
+ /*
+ * Check for error that occurred inside the while loop, and break
+ * out of the for loop if there was one so other interrupts can
+ * be serviced.
+ */
+ if (Result == XST_SUCCESS) {
+ /*
+ * Make the callback to the upper layers, passing it the first
+ * descriptor in the first packet and the number of descriptors
+ * in the list.
+ */
+ InstancePtr->SgRecvHandler(InstancePtr->
+ SgRecvRef, BdHeadPtr,
+ NumBds);
+ }
+ } /* if (PacketsLeft) */
+
+ /*
+ * If the interrupt was an end-ack, check the descriptor list again to
+ * see if it is empty. If not, go ahead and restart the scatter-gather
+ * channel. This is to fix a possible race condition where, on receive,
+ * the driver attempted to start a scatter-gather channel that was
+ * already started, which resulted in no action from the XDmaChannel
+ * component. But, just after the XDmaChannel component saw that the
+ * hardware was already started, the hardware stopped because it
+ * reached the end of the list. In that case, this interrupt is
+ * generated and we can restart the hardware here.
+ */
+ if (IntrStatus & XDC_IXR_SG_END_MASK) {
+ /*
+ * Ignore the return status since we know the list exists and we
+ * don't care if the list is empty or the channel is already started.
+ */
+ (void) XDmaChannel_SgStart(&InstancePtr->RecvChannel);
+ }
+ }
+
+ /*
+ * All interrupts are handled (except the error below) so acknowledge
+ * (clear) the interrupts by writing the value read above back to the status
+ * register. The packet count interrupt must be acknowledged after the
+ * decrement, otherwise it will come right back. We clear the interrupts
+ * before we handle the error interrupt because the ErrorHandler should
+ * result in a reset, which clears the interrupt status register. So we
+ * don't want to toggle the interrupt back on by writing the interrupt
+ * status register with an old value after a reset.
+ */
+ XDmaChannel_SetIntrStatus(&InstancePtr->RecvChannel, IntrStatus);
+
+ /*
+ * Check for DMA errors and call the error callback function if an error
+ * occurred (DMA bus or timeout error), which should result in a reset of
+ * the device by the upper layer software.
+ */
+ if (IntrStatus & XDC_IXR_DMA_ERROR_MASK) {
+ InstancePtr->Stats.DmaErrors++;
+ InstancePtr->ErrorHandler(InstancePtr->ErrorRef, XST_DMA_ERROR);
+ }
+}
+
+/*****************************************************************************/
+/*
+*
+* Handle an interrupt from the DMA send channel. DMA interrupts are:
+*
+* - DMA error. DMA encountered a bus error or timeout. This is a fatal error
+* that requires reset of the channel. The driver calls the error handler
+* of the upper layer software with an error code indicating the device should
+* be reset.
+* - Packet count threshold reached. For scatter-gather operations, indicates
+* the threshold for the number of packets not serviced by software has been
+* reached. The driver behaves as follows:
+* - Get the value of the packet counter, which tells us how many packets
+* are ready to be serviced
+* - For each packet
+* - For each descriptor, remove it from the scatter-gather list
+* - Check for the last descriptor in the frame, and if set
+* - Bump frame statistics
+* - Decrement the packet counter by one
+* - Call the scatter-gather receive callback function
+* Note that there are no receive errors reported in the status word of
+* the buffer descriptor. If receive errors occur, the MAC drops the
+* packet, and we only find out about the errors through various error
+* count registers.
+* - Packet wait bound reached. For scatter-gather, indicates the time to wait
+* for the next packet has expired. The driver follows the same logic as when
+* the packet count threshold interrupt is received.
+* - Scatter-gather end acknowledge. Hardware has reached the end of the
+* descriptor list. The driver follows the same logic as when the packet count
+* threshold interrupt is received. In addition, the driver restarts the DMA
+* scatter-gather channel in case there are newly inserted descriptors.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* Although the function returns void, there are asynchronous errors
+* that can be generated from this function. These are:
+* - XST_DMA_SG_LIST_EMPTY indicates we tried to get a buffer descriptor from
+* the DMA channel, but there was not one ready for software.
+* - XST_DMA_ERROR indicates a DMA bus error or timeout occurred. This is a
+* fatal error that requires reset.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void HandleDmaSendIntr(XEmac * InstancePtr)
+{
+ int Result;
+ u32 IntrStatus;
+ u32 NumBds;
+ u32 PacketsLeft;
+ u32 XmitStatus;
+ int PacketStart;
+ XBufDescriptor *BdHeadPtr;
+ XBufDescriptor *BdPtr;
+
+ /*
+ * Read the interrupt status
+ */
+ IntrStatus = XDmaChannel_GetIntrStatus(&InstancePtr->SendChannel);
+
+ /*
+ * For packet threshold or wait bound interrupt, process descriptors. Also
+ * process descriptors on a SG end acknowledgement, which means the end of
+ * the descriptor list has been reached by the hardware. For transmit,
+ * this is a normal condition during times of light traffic. In fact, the
+ * wait bound interrupt may be masked for transmit since the end-ack would
+ * always occur before the wait bound expires.
+ */
+ if (IntrStatus & (XDC_IXR_PKT_THRESHOLD_MASK |
+ XDC_IXR_PKT_WAIT_BOUND_MASK | XDC_IXR_SG_END_MASK)) {
+ /* Get the number of packets that need processing */
+ PacketsLeft =
+ XDmaChannel_GetPktCount(&InstancePtr->SendChannel);
+
+ if (PacketsLeft) {
+ /* Get the buffer descriptor at the head of the list */
+ Result = XDmaChannel_GetDescriptor(&InstancePtr->
+ SendChannel,
+ &BdHeadPtr);
+ BdPtr = BdHeadPtr;
+ NumBds = 0;
+ PacketStart = 1;
+
+ /* Loop until all packets have been pulled or an error occurs */
+ while (1) {
+ NumBds++;
+
+ /*
+ * An error getting a buffer descriptor from the list.
+ * This should not happen, but if it does, report it to
+ * the error callback and break out of the loop to service
+ * other interrupts.
+ */
+ if (Result != XST_SUCCESS) {
+ InstancePtr->ErrorHandler(InstancePtr->
+ ErrorRef,
+ Result);
+ break;
+ }
+
+ /* Bump statistics */
+ InstancePtr->Stats.XmitBytes +=
+ XBufDescriptor_GetLength(BdPtr);
+
+ /* If 1st BD in a packet, then check xmit status */
+ if (PacketStart) {
+ XmitStatus =
+ XBufDescriptor_GetDeviceStatus
+ (BdPtr);
+ if (XmitStatus &
+ XEM_TSR_EXCESS_DEFERRAL_MASK) {
+ InstancePtr->Stats.
+ XmitExcessDeferral++;
+ }
+
+ if (XmitStatus &
+ XEM_TSR_LATE_COLLISION_MASK) {
+ InstancePtr->Stats.
+ XmitLateCollisionErrors++;
+ }
+
+ PacketStart = 0;
+ }
+
+ /* Have all BDs been read for this packet */
+ if (XBufDescriptor_IsLastStatus(BdPtr)) {
+ /*
+ * Decrement the packet count register to reflect the fact
+ * we just processed a packet
+ */
+ XDmaChannel_DecrementPktCount
+ (&InstancePtr->SendChannel);
+
+ /* Bump statistics */
+ InstancePtr->Stats.XmitFrames++;
+
+ /* Test loop exit condition */
+ if (--PacketsLeft == 0) {
+ break;
+ }
+
+ /* Next BD will mark the beginning of a new packet */
+ PacketStart = 1;
+ }
+
+ /* Get the next buffer descriptor in the list */
+ Result = XDmaChannel_GetDescriptor
+ (&InstancePtr->SendChannel, &BdPtr);
+ } /* while */
+
+ /*
+ * Check for error that occurred inside the while loop, and break
+ * out of the for loop if there was one so other interrupts can
+ * be serviced.
+ */
+ if (Result == XST_SUCCESS) {
+ /*
+ * Make the callback to the upper layers, passing it the first
+ * descriptor in the first packet and the number of descriptors
+ * in the list.
+ */
+ InstancePtr->SgSendHandler(InstancePtr->
+ SgSendRef, BdHeadPtr,
+ NumBds);
+ }
+ } /* if (PacketsLeft) */
+
+ /*
+ * If the interrupt was an end-ack, check the descriptor list again to
+ * see if it is empty. If not, go ahead and restart the scatter-gather
+ * channel. This is to fix a possible race condition where, on transmit,
+ * the driver attempted to start a scatter-gather channel that was
+ * already started, which resulted in no action from the XDmaChannel
+ * component. But, just after the XDmaChannel component saw that the
+ * hardware was already started, the hardware stopped because it
+ * reached the end of the list. In that case, this interrupt is
+ * generated and we can restart the hardware here.
+ */
+ if (IntrStatus & XDC_IXR_SG_END_MASK) {
+ /*
+ * Ignore the return status since we know the list exists and we
+ * don't care if the list is empty or the channel is already started.
+ */
+ (void) XDmaChannel_SgStart(&InstancePtr->SendChannel);
+ }
+ }
+
+ /*
+ * All interrupts are handled (except the error below) so acknowledge
+ * (clear) the interrupts by writing the value read above back to the status
+ * register. The packet count interrupt must be acknowledged after the
+ * decrement, otherwise it will come right back. We clear the interrupts
+ * before we handle the error interrupt because the ErrorHandler should
+ * result in a reset, which clears the interrupt status register. So we
+ * don't want to toggle the interrupt back on by writing the interrupt
+ * status register with an old value after a reset.
+ */
+ XDmaChannel_SetIntrStatus(&InstancePtr->SendChannel, IntrStatus);
+
+ /*
+ * Check for DMA errors and call the error callback function if an error
+ * occurred (DMA bus or timeout error), which should result in a reset of
+ * the device by the upper layer software.
+ */
+ if (IntrStatus & XDC_IXR_DMA_ERROR_MASK) {
+ InstancePtr->Stats.DmaErrors++;
+ InstancePtr->ErrorHandler(InstancePtr->ErrorRef, XST_DMA_ERROR);
+ }
+}
+
+/*****************************************************************************/
+/*
+*
+* Handle an interrupt from the Ethernet MAC when configured with scatter-gather
+* DMA. The only interrupts handled in this case are errors.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void HandleEmacDmaIntr(XEmac * InstancePtr)
+{
+ u32 IntrStatus;
+
+ /*
+ * When configured with DMA, the EMAC generates interrupts only when errors
+ * occur. We clear the interrupts immediately so that any latched status
+ * interrupt bits will reflect the true status of the device, and so any
+ * pulsed interrupts (non-status) generated during the Isr will not be lost.
+ */
+ IntrStatus = XIIF_V123B_READ_IISR(InstancePtr->BaseAddress);
+ XIIF_V123B_WRITE_IISR(InstancePtr->BaseAddress, IntrStatus);
+
+ /*
+ * Check the MAC for errors
+ */
+ XEmac_CheckEmacError(InstancePtr, IntrStatus);
+}
--- /dev/null
+/* $Id: xemac_intr_fifo.c,v 1.2 2007/05/15 00:52:28 wre Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2003 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemac_intr_fifo.c
+*
+* Contains functions related to interrupt mode using direct FIFO I/O or simple
+* DMA. The driver uses simple DMA if the device is configured with DMA,
+* otherwise it uses direct FIFO access.
+*
+* The interrupt handler, XEmac_IntrHandlerFifo(), must be connected by the user
+* to the interrupt controller.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 07/31/01 First release
+* 1.00b rpm 02/20/02 Repartitioned files and functions
+* 1.00c rpm 12/05/02 New version includes support for simple DMA
+* 1.00c rpm 04/01/03 Added check in FifoSend for room in the data FIFO
+* before starting a simple DMA transfer.
+* 1.00d rpm 09/26/03 New version includes support PLB Ethernet and v2.00a of
+* the packet fifo driver.
+* 1.11a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xemac_i.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/************************** Function Prototypes ******************************/
+
+static void HandleEmacFifoIntr(XEmac * InstancePtr);
+
+/*****************************************************************************/
+/**
+*
+* Send an Ethernet frame using direct FIFO I/O or simple DMA with interrupts.
+* The caller provides a contiguous-memory buffer and its length. The buffer
+* must be 32-bit aligned. If using simple DMA and the PLB 10/100 Ethernet core,
+* the buffer must be 64-bit aligned. The callback function set by using
+* SetFifoSendHandler is invoked when the transmission is complete.
+*
+* It is assumed that the upper layer software supplies a correctly formatted
+* Ethernet frame, including the destination and source addresses, the
+* type/length field, and the data field.
+*
+* If the device is configured with DMA, simple DMA will be used to transfer
+* the buffer from memory to the Emac. This means that this buffer should not
+* be cached. See the comment section "Simple DMA" in xemac.h for more
+* information.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param BufPtr is a pointer to a aligned buffer containing the Ethernet
+* frame to be sent.
+* @param ByteCount is the size of the Ethernet frame.
+*
+* @return
+*
+* - XST_SUCCESS if the frame was successfully sent. An interrupt is generated
+* when the EMAC transmits the frame and the driver calls the callback set
+* with XEmac_SetFifoSendHandler()
+* - XST_DEVICE_IS_STOPPED if the device has not yet been started
+* - XST_NOT_INTERRUPT if the device is not in interrupt mode
+* - XST_FIFO_NO_ROOM if there is no room in the FIFO for this frame
+* - XST_DEVICE_BUSY if configured for simple DMA and the DMA engine is busy
+* - XST_DMA_ERROR if an error occurred during the DMA transfer (simple DMA).
+* The user should treat this as a fatal error that requires a reset of the
+* EMAC device.
+*
+* @note
+*
+* This function is not thread-safe. The user must provide mutually exclusive
+* access to this function if there are to be multiple threads that can call it.
+*
+* @internal
+*
+* The Ethernet MAC uses FIFOs behind its length and status registers. For this
+* reason, it is important to keep the length, status, and data FIFOs in sync
+* when reading or writing to them.
+*
+******************************************************************************/
+int XEmac_FifoSend(XEmac * InstancePtr, u8 *BufPtr, u32 ByteCount)
+{
+ int Result;
+ volatile u32 StatusReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BufPtr != NULL);
+ XASSERT_NONVOID(ByteCount > XEM_HDR_SIZE); /* send at least 1 byte */
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Be sure the device is configured for interrupt mode and it is started
+ */
+ if (InstancePtr->IsPolled) {
+ return XST_NOT_INTERRUPT;
+ }
+
+ if (InstancePtr->IsStarted != XCOMPONENT_IS_STARTED) {
+ return XST_DEVICE_IS_STOPPED;
+ }
+
+ /*
+ * Before writing to the data FIFO, make sure the length FIFO is not
+ * full. The data FIFO might not be full yet even though the length FIFO
+ * is. This avoids an overrun condition on the length FIFO and keeps the
+ * FIFOs in sync.
+ */
+ StatusReg = XEMAC_READ_IISR(InstancePtr->BaseAddress);
+ if (StatusReg & XEM_EIR_XMIT_LFIFO_FULL_MASK) {
+ return XST_FIFO_NO_ROOM;
+ }
+
+ /*
+ * Send either by directly writing to the FIFOs or using the DMA engine
+ */
+ if (!XEmac_mIsDma(InstancePtr)) {
+ /*
+ * This is a non-blocking write. The packet FIFO returns an error if there
+ * is not enough room in the FIFO for this frame.
+ */
+ Result = XPacketFifoV200a_Write(&InstancePtr->SendFifo, BufPtr,
+ ByteCount);
+ if (Result != XST_SUCCESS) {
+ return Result;
+ }
+ }
+ else {
+ u32 Vacancy;
+
+ /*
+ * Need to make sure there is room in the data FIFO for the packet
+ * before trying to DMA into it. Get the vacancy count (in words)
+ * and make sure the packet will fit.
+ */
+ Vacancy = XPF_V200A_GET_COUNT(&InstancePtr->SendFifo);
+ if ((Vacancy * sizeof(u32)) < ByteCount) {
+ return XST_FIFO_NO_ROOM;
+ }
+
+ /*
+ * Check the DMA engine to make sure it is not already busy
+ */
+ if (XDmaChannel_GetStatus(&InstancePtr->SendChannel) &
+ XDC_DMASR_BUSY_MASK) {
+ return XST_DEVICE_BUSY;
+ }
+
+ /*
+ * Set the DMA control register up properly
+ */
+ XDmaChannel_SetControl(&InstancePtr->SendChannel,
+ XDC_DMACR_SOURCE_INCR_MASK |
+ XDC_DMACR_DEST_LOCAL_MASK |
+ XDC_DMACR_SG_DISABLE_MASK);
+
+ /*
+ * Now transfer the data from the buffer to the FIFO
+ */
+ XDmaChannel_Transfer(&InstancePtr->SendChannel, (u32 *) BufPtr,
+ (u32 *) (InstancePtr->BaseAddress +
+ XEM_PFIFO_TXDATA_OFFSET),
+ ByteCount);
+
+ /*
+ * Poll here waiting for DMA to be not busy. We think this will
+ * typically be a single read since DMA should be ahead of the SW.
+ */
+ do {
+ StatusReg =
+ XDmaChannel_GetStatus(&InstancePtr->
+ SendChannel);
+ }
+ while (StatusReg & XDC_DMASR_BUSY_MASK);
+
+ /* Return an error if there was a problem with DMA */
+ if ((StatusReg & XDC_DMASR_BUS_ERROR_MASK) ||
+ (StatusReg & XDC_DMASR_BUS_TIMEOUT_MASK)) {
+ InstancePtr->Stats.DmaErrors++;
+ return XST_DMA_ERROR;
+ }
+ }
+
+ /*
+ * Set the MAC's transmit packet length register to tell it to transmit
+ */
+ XIo_Out32(InstancePtr->BaseAddress + XEM_TPLR_OFFSET, ByteCount);
+
+ /*
+ * Bump stats here instead of the Isr since we know the byte count
+ * here but would have to save it in the instance in order to know the
+ * byte count at interrupt time.
+ */
+ InstancePtr->Stats.XmitFrames++;
+ InstancePtr->Stats.XmitBytes += ByteCount;
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Receive an Ethernet frame into the buffer passed as an argument. This
+* function is called in response to the callback function for received frames
+* being called by the driver. The callback function is set up using
+* SetFifoRecvHandler, and is invoked when the driver receives an interrupt
+* indicating a received frame. The driver expects the upper layer software to
+* call this function, FifoRecv, to receive the frame. The buffer supplied
+* should be large enough to hold a maximum-size Ethernet frame.
+*
+* The buffer into which the frame will be received must be 32-bit aligned. If
+* using simple DMA and the PLB 10/100 Ethernet core, the buffer must be 64-bit
+* aligned.
+*
+* If the device is configured with DMA, simple DMA will be used to transfer
+* the buffer from the Emac to memory. This means that this buffer should not
+* be cached. See the comment section "Simple DMA" in xemac.h for more
+* information.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param BufPtr is a pointer to a aligned buffer into which the received
+* Ethernet frame will be copied.
+* @param ByteCountPtr is both an input and an output parameter. It is a pointer
+* to a 32-bit word that contains the size of the buffer on entry into
+* the function and the size the received frame on return from the
+* function.
+*
+* @return
+*
+* - XST_SUCCESS if the frame was sent successfully
+* - XST_DEVICE_IS_STOPPED if the device has not yet been started
+* - XST_NOT_INTERRUPT if the device is not in interrupt mode
+* - XST_NO_DATA if there is no frame to be received from the FIFO
+* - XST_BUFFER_TOO_SMALL if the buffer to receive the frame is too small for
+* the frame waiting in the FIFO.
+* - XST_DEVICE_BUSY if configured for simple DMA and the DMA engine is busy
+* - XST_DMA_ERROR if an error occurred during the DMA transfer (simple DMA).
+* The user should treat this as a fatal error that requires a reset of the
+* EMAC device.
+*
+* @note
+*
+* The input buffer must be big enough to hold the largest Ethernet frame.
+*
+* @internal
+*
+* The Ethernet MAC uses FIFOs behind its length and status registers. For this
+* reason, it is important to keep the length, status, and data FIFOs in sync
+* when reading or writing to them.
+*
+******************************************************************************/
+int XEmac_FifoRecv(XEmac * InstancePtr, u8 *BufPtr, u32 *ByteCountPtr)
+{
+ int Result;
+ u32 PktLength;
+ u32 StatusReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BufPtr != NULL);
+ XASSERT_NONVOID(ByteCountPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Be sure the device is not configured for polled mode and it is started
+ */
+ if (InstancePtr->IsPolled) {
+ return XST_NOT_INTERRUPT;
+ }
+
+ if (InstancePtr->IsStarted != XCOMPONENT_IS_STARTED) {
+ return XST_DEVICE_IS_STOPPED;
+ }
+
+ /*
+ * Make sure the buffer is big enough to hold the maximum frame size.
+ * We need to do this because as soon as we read the MAC's packet length
+ * register, which is actually a FIFO, we remove that length from the
+ * FIFO. We do not want to read the length FIFO without also reading the
+ * data FIFO since this would get the FIFOs out of sync. So we have to
+ * make this restriction.
+ */
+ if (*ByteCountPtr < XEM_MAX_FRAME_SIZE) {
+ return XST_BUFFER_TOO_SMALL;
+ }
+
+ /*
+ * Before reading from the length FIFO, make sure the length FIFO is not
+ * empty. We could cause an underrun error if we try to read from an
+ * empty FIFO.
+ */
+ StatusReg = XEMAC_READ_IISR(InstancePtr->BaseAddress);
+ if (StatusReg & XEM_EIR_RECV_LFIFO_EMPTY_MASK) {
+ /*
+ * Clear the empty status so the next time through the current status
+ * of the hardware is reflected (we have to do this because the status
+ * is level in the device but latched in the interrupt status register).
+ */
+ XEMAC_WRITE_IISR(InstancePtr->BaseAddress,
+ XEM_EIR_RECV_LFIFO_EMPTY_MASK);
+ return XST_NO_DATA;
+ }
+
+ /*
+ * If configured with DMA, make sure the DMA engine is not busy
+ */
+ if (XEmac_mIsDma(InstancePtr)) {
+ if (XDmaChannel_GetStatus(&InstancePtr->RecvChannel) &
+ XDC_DMASR_BUSY_MASK) {
+ return XST_DEVICE_BUSY;
+ }
+ }
+
+ /*
+ * Determine, from the MAC, the length of the next packet available
+ * in the data FIFO (there should be a non-zero length here)
+ */
+ PktLength = XIo_In32(InstancePtr->BaseAddress + XEM_RPLR_OFFSET);
+ if (PktLength == 0) {
+ return XST_NO_DATA;
+ }
+
+ /*
+ * We assume that the MAC never has a length bigger than the largest
+ * Ethernet frame, so no need to make another check here.
+ *
+ * Receive either by directly reading the FIFO or using the DMA engine
+ */
+ if (!XEmac_mIsDma(InstancePtr)) {
+ /*
+ * This is a non-blocking read. The FIFO returns an error if there is
+ * not at least the requested amount of data in the FIFO.
+ */
+ Result = XPacketFifoV200a_Read(&InstancePtr->RecvFifo, BufPtr,
+ PktLength);
+ if (Result != XST_SUCCESS) {
+ return Result;
+ }
+ }
+ else {
+ /*
+ * Call on DMA to transfer from the FIFO to the buffer. First set up
+ * the DMA control register.
+ */
+ XDmaChannel_SetControl(&InstancePtr->RecvChannel,
+ XDC_DMACR_DEST_INCR_MASK |
+ XDC_DMACR_SOURCE_LOCAL_MASK |
+ XDC_DMACR_SG_DISABLE_MASK);
+
+ /*
+ * Now transfer the data
+ */
+ XDmaChannel_Transfer(&InstancePtr->RecvChannel,
+ (u32 *) (InstancePtr->BaseAddress +
+ XEM_PFIFO_RXDATA_OFFSET),
+ (u32 *) BufPtr, PktLength);
+
+ /*
+ * Poll here waiting for DMA to be not busy. We think this will
+ * typically be a single read since DMA should be ahead of the SW.
+ */
+ do {
+ StatusReg =
+ XDmaChannel_GetStatus(&InstancePtr->
+ RecvChannel);
+ }
+ while (StatusReg & XDC_DMASR_BUSY_MASK);
+
+ /* Return an error if there was a problem with DMA */
+ if ((StatusReg & XDC_DMASR_BUS_ERROR_MASK) ||
+ (StatusReg & XDC_DMASR_BUS_TIMEOUT_MASK)) {
+ InstancePtr->Stats.DmaErrors++;
+ return XST_DMA_ERROR;
+ }
+ }
+
+ *ByteCountPtr = PktLength;
+
+ InstancePtr->Stats.RecvFrames++;
+ InstancePtr->Stats.RecvBytes += PktLength;
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* The interrupt handler for the Ethernet driver when configured for direct FIFO
+* communication or simple DMA.
+*
+* Get the interrupt status from the IpIf to determine the source of the
+* interrupt. The source can be: MAC, Recv Packet FIFO, or Send Packet FIFO.
+* The packet FIFOs only interrupt during "deadlock" conditions. All other
+* FIFO-related interrupts are generated by the MAC.
+*
+* @param InstancePtr is a pointer to the XEmac instance that just interrupted.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmac_IntrHandlerFifo(void *InstancePtr)
+{
+ u32 IntrStatus;
+ XEmac *EmacPtr = (XEmac *) InstancePtr;
+
+ EmacPtr->Stats.TotalIntrs++;
+
+ /*
+ * Get the interrupt status from the IPIF. There is no clearing of
+ * interrupts in the IPIF. Interrupts must be cleared at the source.
+ */
+ IntrStatus = XEMAC_READ_DIPR(EmacPtr->BaseAddress);
+
+ if (IntrStatus & XEM_IPIF_EMAC_MASK) { /* MAC interrupt */
+ EmacPtr->Stats.EmacInterrupts++;
+ HandleEmacFifoIntr(EmacPtr);
+ }
+
+ if (IntrStatus & XEM_IPIF_RECV_FIFO_MASK) { /* Receive FIFO interrupt */
+ EmacPtr->Stats.RecvInterrupts++;
+ XEmac_CheckFifoRecvError(EmacPtr);
+ }
+
+ if (IntrStatus & XEM_IPIF_SEND_FIFO_MASK) { /* Send FIFO interrupt */
+ EmacPtr->Stats.XmitInterrupts++;
+ XEmac_CheckFifoSendError(EmacPtr);
+ }
+
+ if (IntrStatus & XEMAC_ERROR_MASK) {
+ /*
+ * An error occurred internal to the IPIF. This is more of a debug and
+ * integration issue rather than a production error. Don't do anything
+ * other than clear it, which provides a spot for software to trap
+ * on the interrupt and begin debugging.
+ */
+ XEMAC_WRITE_DISR(EmacPtr->BaseAddress,
+ XEMAC_ERROR_MASK);
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Set the callback function for handling confirmation of transmitted frames when
+* configured for direct memory-mapped I/O using FIFOs. The upper layer software
+* should call this function during initialization. The callback is called by the
+* driver once per frame sent. The callback is responsible for freeing the
+* transmitted buffer if necessary.
+*
+* The callback is invoked by the driver within interrupt context, so it needs
+* to do its job quickly. If there are potentially slow operations within the
+* callback, these should be done at task-level.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param CallBackRef is a reference pointer to be passed back to the driver in
+* the callback. This helps the driver correlate the callback to a
+* particular driver.
+* @param FuncPtr is the pointer to the callback function.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmac_SetFifoRecvHandler(XEmac * InstancePtr, void *CallBackRef,
+ XEmac_FifoHandler FuncPtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(FuncPtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ InstancePtr->FifoRecvHandler = FuncPtr;
+ InstancePtr->FifoRecvRef = CallBackRef;
+}
+
+/*****************************************************************************/
+/**
+*
+* Set the callback function for handling received frames when configured for
+* direct memory-mapped I/O using FIFOs. The upper layer software should call
+* this function during initialization. The callback is called once per frame
+* received. During the callback, the upper layer software should call FifoRecv
+* to retrieve the received frame.
+*
+* The callback is invoked by the driver within interrupt context, so it needs
+* to do its job quickly. Sending the received frame up the protocol stack
+* should be done at task-level. If there are other potentially slow operations
+* within the callback, these too should be done at task-level.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param CallBackRef is a reference pointer to be passed back to the driver in
+* the callback. This helps the driver correlate the callback to a
+* particular driver.
+* @param FuncPtr is the pointer to the callback function.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmac_SetFifoSendHandler(XEmac * InstancePtr, void *CallBackRef,
+ XEmac_FifoHandler FuncPtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(FuncPtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ InstancePtr->FifoSendHandler = FuncPtr;
+ InstancePtr->FifoSendRef = CallBackRef;
+}
+
+/******************************************************************************
+*
+* Handle an interrupt from the Ethernet MAC when configured for direct FIFO
+* communication. The interrupts handled are:
+* - Transmit done (transmit status FIFO is non-empty). Used to determine when
+* a transmission has been completed.
+* - Receive done (receive length FIFO is non-empty). Used to determine when a
+* valid frame has been received.
+*
+* In addition, the interrupt status is checked for errors.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static void HandleEmacFifoIntr(XEmac * InstancePtr)
+{
+ u32 IntrStatus;
+
+ /*
+ * The EMAC generates interrupts for errors and generates the transmit
+ * and receive done interrupts for data. We clear the interrupts
+ * immediately so that any latched status interrupt bits will reflect the
+ * true status of the device, and so any pulsed interrupts (non-status)
+ * generated during the Isr will not be lost.
+ */
+ IntrStatus = XEMAC_READ_IISR(InstancePtr->BaseAddress);
+ XEMAC_WRITE_IISR(InstancePtr->BaseAddress, IntrStatus);
+
+ if (IntrStatus & XEM_EIR_RECV_DONE_MASK) {
+ /*
+ * Configured for direct memory-mapped I/O using FIFO with interrupts.
+ * This interrupt means the RPLR is non-empty, indicating a frame has
+ * arrived.
+ */
+ InstancePtr->Stats.RecvInterrupts++;
+
+ InstancePtr->FifoRecvHandler(InstancePtr->FifoRecvRef);
+
+ /*
+ * The upper layer has removed as many frames as it wants to, so we
+ * need to clear the RECV_DONE bit before leaving the ISR so that it
+ * reflects the current state of the hardware (because it's a level
+ * interrupt that is latched in the IPIF interrupt status register).
+ * Note that if we've reached this point the bit is guaranteed to be
+ * set because it was cleared at the top of this ISR before any frames
+ * were serviced, so the bit was set again immediately by hardware
+ * because the RPLR was not yet emptied by software.
+ */
+ XEMAC_WRITE_IISR(InstancePtr->BaseAddress,
+ XEM_EIR_RECV_DONE_MASK);
+ }
+
+ /*
+ * If configured for direct memory-mapped I/O using FIFO, the xmit status
+ * FIFO must be read and the callback invoked regardless of success or not.
+ */
+ if (IntrStatus & XEM_EIR_XMIT_DONE_MASK) {
+ u32 XmitStatus;
+
+ InstancePtr->Stats.XmitInterrupts++;
+
+ XmitStatus =
+ XIo_In32(InstancePtr->BaseAddress + XEM_TSR_OFFSET);
+
+ /*
+ * Collision errors are stored in the transmit status register
+ * instead of the interrupt status register
+ */
+ if (XmitStatus & XEM_TSR_EXCESS_DEFERRAL_MASK) {
+ InstancePtr->Stats.XmitExcessDeferral++;
+ }
+
+ if (XmitStatus & XEM_TSR_LATE_COLLISION_MASK) {
+ InstancePtr->Stats.XmitLateCollisionErrors++;
+ }
+
+ InstancePtr->FifoSendHandler(InstancePtr->FifoSendRef);
+
+ /*
+ * Only one status is retrieved per interrupt. We need to clear the
+ * XMIT_DONE bit before leaving the ISR so that it reflects the current
+ * state of the hardware (because it's a level interrupt that is latched
+ * in the IPIF interrupt status register). Note that if we've reached
+ * this point the bit is guaranteed to be set because it was cleared at
+ * the top of this ISR before any statuses were serviced, so the bit was
+ * set again immediately by hardware because the TSR was not yet emptied
+ * by software.
+ */
+ XEMAC_WRITE_IISR(InstancePtr->BaseAddress,
+ XEM_EIR_XMIT_DONE_MASK);
+ }
+
+ /*
+ * Check the MAC for errors
+ */
+ XEmac_CheckEmacError(InstancePtr, IntrStatus);
+}
--- /dev/null
+/*
+ * emac_linux.c
+ *
+ * Xilinx Ethernet Adapter component to interface XEmac component to Linux
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * 2002 (c) MontaVista, Software, Inc. This file is licensed under the terms
+ * of the GNU General Public License version 2.1. This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -----------------------------------------------
+ * 1.01a ecm 12/19/04 Added TX and RX DRE and Checksum offload functionality.
+ * This adapter now only works with v1.01.a of the XEmac
+ * driver. Also enables stripping of the PAD and FCS and
+ * The adpater requires this feature to function properly
+ *
+ * 1.01a rpm 06/09/06 Fixed the FCS subtraction above for bridging by using
+ * skb_put when setting the skb length instead of directly
+ * assigning the skb->len field. Directly assigning it
+ * broke IP forwarding.
+ * 1.01a wgr 09/14/06 Ported to Linux 2.6
+ * 1.01a xd 10/13/06 Add support to change MTU on the fly using tools like
+ * ifconfig
+ * 1.01a wgr 10/13/06 Added workaround for PHY detection
+ * 1.11a wgr 03/22/07 Converted to new coding style.
+ */
+
+/*
+ * This driver is a bit unusual in that it is composed of two logical
+ * parts where one part is the OS independent code and the other part is
+ * the OS dependent code. Xilinx provides their drivers split in this
+ * fashion. This file represents the Linux OS dependent part known as
+ * the Linux adapter. The other files in this directory are the OS
+ * independent files as provided by Xilinx with no changes made to them.
+ * The names exported by those files begin with XEmac_. All functions
+ * in this file that are called by Linux have names that begin with
+ * xenet_. The functions in this file that have Handler in their name
+ * are registered as callbacks with the underlying Xilinx OS independent
+ * layer. Any other functions are static helper functions.
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/atomic.h>
+#include <asm/checksum.h>
+#include <linux/ethtool.h>
+
+#include <linux/xilinx_devices.h>
+
+#include <xbasic_types.h>
+#include "xemac.h"
+#include "xemac_i.h"
+#include "xipif_v1_23_b.h"
+
+#ifdef CONFIG_OF
+// For open firmware.
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#endif
+
+/*
+ * Add a delay (in ms) after resetting the EMAC since it
+ * also resets the PHY - which needs a delay before using it. - RPM
+ */
+#define RESET_DELAY 1500
+
+#ifdef RESET_DELAY
+# include <linux/delay.h>
+#endif
+
+#undef XEM_DFT_SEND_DESC
+#define XEM_DFT_SEND_DESC 256
+#define DFT_LOCAL_SEND_DESC 64
+
+#undef XEM_DFT_RECV_DESC
+#define XEM_DFT_RECV_DESC 256
+
+#define DRIVER_NAME "xilinx_emac"
+#define DRIVER_VERSION "1.0"
+
+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
+MODULE_DESCRIPTION("Xilinx Ethernet MAC driver");
+MODULE_LICENSE("GPL");
+
+#define TX_TIMEOUT (60*HZ) /* Transmission timeout is 60 seconds. */
+
+/* On the OPB, the 10/100 EMAC requires data to be aligned to 4 bytes.
+ * On the PLB, the 10/100 EMAC requires data to be aligned to 8 bytes.
+ * For simplicity, we always align to 8 bytes.
+ */
+#define ALIGNMENT 32
+
+/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
+
+#define ENET_HDR_SIZ 14
+#define ETHERTYPE_IP 0x0800 /* IP protocol */
+
+#define RX_PSEUDO_HEADER_DATA_START 12
+#define RX_PSEUDO_HEADER_DATA_END 18
+
+/* physical to virtual pointer conversion */
+#define P_TO_V(InstancePtr, p) \
+ ((p) ? \
+ ((InstancePtr)->VirtPtr + ((u32)(p) - (u32)(InstancePtr)->PhyPtr)) : \
+ 0)
+
+int bh_entry = 0;
+
+/*
+ * Our private per device data. When a net_device is allocated we will
+ * ask for enough extra space for this.
+ */
+struct net_local {
+ struct list_head rcv;
+ XBufDescriptor *rcvBdPtr;
+ int rcvBds;
+ struct list_head xmit;
+ XBufDescriptor *xmitBdPtr;
+ int xmitBds;
+
+ struct net_device_stats stats; /* Statistics for this device */
+ struct net_device *ndev; /* this device */
+ struct timer_list phy_timer; /* PHY monitoring timer */
+ XInterruptHandler Isr; /* Pointer to the XEmac ISR routine */
+ u8 mii_addr; /* The MII address of the PHY */
+ /*
+ * The underlying OS independent code needs space as well. A
+ * pointer to the following XEmac structure will be passed to
+ * any XEmac_ function that requires it. However, we treat the
+ * data as an opaque object in this file (meaning that we never
+ * reference any of the fields inside of the structure).
+ */
+ XEmac Emac;
+
+ void *desc_space; /* Virtual address */
+ dma_addr_t desc_space_handle; /* Physical address */
+ int desc_space_size;
+
+ u8 *ddrVirtPtr;
+ u32 ddrOffset;
+ u32 ddrSize;
+
+ struct sk_buff *deferred_skb;
+
+ atomic_t availSendBds;
+};
+
+/* for exclusion of all program flows (processes, ISRs and BHs) possible to share data with current one */
+static spinlock_t reset_lock = SPIN_LOCK_UNLOCKED;
+
+/* Helper function to determine if a given XEmac error warrants a reset. */
+extern inline int status_requires_reset(int s)
+{
+ return (s == XST_DMA_ERROR || s == XST_FIFO_ERROR ||
+ s == XST_RESET_ERROR || s == XST_DMA_SG_NO_LIST ||
+ s == XST_DMA_SG_LIST_EMPTY);
+}
+
+/* BH statics */
+static LIST_HEAD(receivedQueue);
+static spinlock_t rcvSpin = SPIN_LOCK_UNLOCKED;
+
+static LIST_HEAD(sentQueue);
+static spinlock_t xmitSpin = SPIN_LOCK_UNLOCKED;
+
+/*
+ * The following are notes regarding the critical sections in this
+ * driver and how they are protected.
+ *
+ *
+ * XEmac_Start, XEmac_Stop and XEmac_SetOptions are not thread safe.
+ * These functions are called from xenet_open(), xenet_close(), reset(),
+ * and xenet_set_multicast_list(). xenet_open() and xenet_close()
+ * should be safe because when they do start and stop, they don't have
+ * interrupts or timers enabled. The other side is that they won't be
+ * called while a timer or interrupt is being handled.
+ *
+ * XEmac_PhyRead and XEmac_PhyWrite are not thread safe.
+ * These functions are called from get_phy_status(), xenet_ioctl() and
+ * xenet_probe(). xenet_probe() is only called from xenet_init() so it is not
+ * an issue (nothing is really up and running yet). get_phy_status() is called
+ * from both poll_mii() (a timer bottom half) and xenet_open(). These
+ * shouldn't interfere with each other because xenet_open() is what starts the
+ * poll_mii() timer. xenet_open() and xenet_ioctl() should be safe as well
+ * because they will be sequential. That leaves the interaction between
+ * poll_mii() and xenet_ioctl(). While the timer bottom half is executing, a
+ * new ioctl won't come in so that is taken care of. That leaves the one case
+ * of the poll_mii timer popping while handling an ioctl. To take care of that
+ * case, the timer is deleted when the ioctl comes in and then added back in
+ * after the ioctl is finished.
+ */
+
+typedef enum DUPLEX { UNKNOWN_DUPLEX, HALF_DUPLEX, FULL_DUPLEX } DUPLEX;
+static void reset(struct net_device *dev, DUPLEX duplex)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u32 Options;
+ u8 IfgPart1;
+ u8 IfgPart2;
+ u8 SendThreshold;
+ u32 SendWaitBound;
+ u8 RecvThreshold;
+ u32 RecvWaitBound;
+ int dma_works;
+
+ /* Shouldn't really be necessary, but shouldn't hurt. */
+ netif_stop_queue(dev);
+
+ /*
+ * XEmac_Reset puts the device back to the default state. We need
+ * to save all the settings we don't already know, reset, restore
+ * the settings, and then restart the emac.
+ */
+ XEmac_GetInterframeGap(&lp->Emac, &IfgPart1, &IfgPart2);
+ Options = XEmac_GetOptions(&lp->Emac);
+ switch (duplex) {
+ case HALF_DUPLEX:
+ Options &= ~XEM_FDUPLEX_OPTION;
+ break;
+ case FULL_DUPLEX:
+ Options |= XEM_FDUPLEX_OPTION;
+ break;
+ case UNKNOWN_DUPLEX:
+ break;
+ }
+
+ if (XEmac_mIsSgDma(&lp->Emac)) {
+ /*
+ * The following four functions will return an error if we are
+ * not doing scatter-gather DMA. We just checked that so we
+ * can safely ignore the return values. We cast them to void
+ * to make that explicit.
+ */
+ dma_works = 1;
+ (void) XEmac_GetPktThreshold(&lp->Emac, XEM_SEND,
+ &SendThreshold);
+ (void) XEmac_GetPktWaitBound(&lp->Emac, XEM_SEND,
+ &SendWaitBound);
+ (void) XEmac_GetPktThreshold(&lp->Emac, XEM_RECV,
+ &RecvThreshold);
+ (void) XEmac_GetPktWaitBound(&lp->Emac, XEM_RECV,
+ &RecvWaitBound);
+ }
+ else
+ dma_works = 0;
+
+ XEmac_Reset(&lp->Emac);
+
+#ifdef RESET_DELAY
+ mdelay(RESET_DELAY);
+#endif
+
+ /*
+ * The following three functions will return an error if the
+ * EMAC is already started. We just stopped it by calling
+ * XEmac_Reset() so we can safely ignore the return values.
+ * We cast them to void to make that explicit.
+ */
+ (void) XEmac_SetMacAddress(&lp->Emac, dev->dev_addr);
+ (void) XEmac_SetInterframeGap(&lp->Emac, IfgPart1, IfgPart2);
+ (void) XEmac_SetOptions(&lp->Emac, Options);
+ if (XEmac_mIsSgDma(&lp->Emac)) {
+ /*
+ * The following four functions will return an error if
+ * we are not doing scatter-gather DMA or if the EMAC is
+ * already started. We just checked that we are indeed
+ * doing scatter-gather and we just stopped the EMAC so
+ * we can safely ignore the return values. We cast them
+ * to void to make that explicit.
+ */
+ (void) XEmac_SetPktThreshold(&lp->Emac, XEM_SEND,
+ SendThreshold);
+ (void) XEmac_SetPktWaitBound(&lp->Emac, XEM_SEND,
+ SendWaitBound);
+ (void) XEmac_SetPktThreshold(&lp->Emac, XEM_RECV,
+ RecvThreshold);
+ (void) XEmac_SetPktWaitBound(&lp->Emac, XEM_RECV,
+ RecvWaitBound);
+ }
+
+ /*
+ * XEmac_Start returns an error when: it is already started, the send
+ * and receive handlers are not set, or a scatter-gather DMA list is
+ * missing. None of these can happen at this point, so we cast the
+ * return to void to make that explicit.
+ */
+
+ if (dma_works) {
+ int avail_plus = 0;
+
+ while (!(XDmaChannel_IsSgListEmpty(&(lp->Emac.SendChannel)))) { /* list isn't empty, has to be cleared */
+ int ret;
+ XBufDescriptor *BdPtr;
+
+ if ((ret =
+ XDmaChannel_GetDescriptor(&(lp->Emac.SendChannel),
+ &BdPtr)) !=
+ XST_SUCCESS) {
+ printk(KERN_ERR
+ "SgDma ring structure ERROR %d\n", ret);
+ break;
+ }
+ avail_plus++;
+ XBufDescriptor_Unlock(BdPtr);
+ pci_unmap_single(NULL,
+ (u32)
+ XBufDescriptor_GetSrcAddress(BdPtr),
+ XBufDescriptor_GetLength(BdPtr),
+ DMA_TO_DEVICE);
+ lp->stats.tx_errors++;
+ }
+ atomic_add(avail_plus, &lp->availSendBds);
+ }
+ else {
+ if (lp->deferred_skb) {
+ dev_kfree_skb(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ lp->stats.tx_errors++;
+ }
+ }
+
+ dev->trans_start = 0xffffffff - TX_TIMEOUT - TX_TIMEOUT; /* to exclude tx timeout */
+ (void) XEmac_Start(&lp->Emac);
+ /* We're all ready to go. Start the queue in case it was stopped. */
+ if (!bh_entry)
+ netif_wake_queue(dev);
+}
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* AddCsumRxPseudoHeader
+*
+* DESCRIPTION:
+*
+* Calculate the Pseudo header checksum of the provided IP packet
+*
+* ARGUMENTS:
+*
+* skb is the buffer containing the received packet. The entire packet is
+* within this skb.
+*
+* Initial Checksum - Checksum to start with, InitCSum
+*
+* Length of the Data, IpPayloadLen
+*
+* ProtoTTL is the data from the IP header containing the Time To Live (TTL)
+* and the protocol type, 6 = TCP and 16 = UDP
+*
+*
+* RETURN VALUE:
+*
+* Completed checksum or 0 if not an IP/TCP or IP/UDP packet
+*
+******************************************************************************/
+inline static u16 AddCsumRxPseudoHeader(struct sk_buff *skb, u16 InitCSum,
+ u16 IpPayloadLen, u16 ProtoTTL)
+{
+ register u32 Csum;
+ int i;
+
+ Csum = InitCSum;
+
+ /*
+ * Add in the pseudoheader source address and destination address info
+ */
+
+ for (i = RX_PSEUDO_HEADER_DATA_START;
+ i <= RX_PSEUDO_HEADER_DATA_END; i = i + 2) {
+ Csum += (u32) (*(u16 *) (skb->data + i));
+ }
+
+ Csum += (u32) (ProtoTTL & 0x00FF);
+
+ /* Add in the length of the TCP/UDP data payload */
+ Csum += (u32) (IpPayloadLen);
+
+ /* Handle the carries */
+ Csum += ((Csum & 0xFFFF0000) >> 16);
+
+
+ return (Csum);
+
+}
+
+static int get_phy_status(struct net_device *dev, DUPLEX * duplex, int *linkup)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u16 reg;
+ int xs;
+
+ xs = XEmac_PhyRead(&lp->Emac, lp->mii_addr, MII_BMCR, ®);
+ if (xs != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: Could not read PHY control register; error %d\n",
+ dev->name, xs);
+ return -1;
+ }
+
+ if (!(reg & BMCR_ANENABLE)) {
+ /*
+ * Auto-negotiation is disabled so the full duplex bit in
+ * the control tells us if the PHY is running
+ * half or full duplex.
+ */
+ *duplex = (reg & BMCR_FULLDPLX) ? FULL_DUPLEX : HALF_DUPLEX;
+ }
+ else {
+ /*
+ * Auto-negotiation is enabled. Figure out what was
+ * negotiated by looking for the best mode in the union
+ * of what we and our partner advertise.
+ */
+ u16 advertise, partner, negotiated;
+
+ xs = XEmac_PhyRead(&lp->Emac, lp->mii_addr,
+ MII_ADVERTISE, &advertise);
+ if (xs != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: Could not read PHY advertisement; error %d\n",
+ dev->name, xs);
+ return -1;
+ }
+ xs = XEmac_PhyRead(&lp->Emac, lp->mii_addr, MII_LPA, &partner);
+ if (xs != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: Could not read PHY LPA; error %d\n",
+ dev->name, xs);
+ return -1;
+ }
+
+ negotiated = advertise & partner & ADVERTISE_ALL;
+ if (negotiated & ADVERTISE_100FULL)
+ *duplex = FULL_DUPLEX;
+ else if (negotiated & ADVERTISE_100HALF)
+ *duplex = HALF_DUPLEX;
+ else if (negotiated & ADVERTISE_10FULL)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+ }
+
+ xs = XEmac_PhyRead(&lp->Emac, lp->mii_addr, MII_BMSR, ®);
+ if (xs != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: Could not read PHY status register; error %d\n",
+ dev->name, xs);
+ return -1;
+ }
+
+ *linkup = (reg & BMSR_LSTATUS) != 0;
+
+ return 0;
+}
+
+/*
+ * This routine is used for two purposes. The first is to keep the
+ * EMAC's duplex setting in sync with the PHY's. The second is to keep
+ * the system apprised of the state of the link. Note that this driver
+ * does not configure the PHY. Either the PHY should be configured for
+ * auto-negotiation or it should be handled by something like mii-tool.
+ */
+static void poll_mii(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u32 Options;
+ DUPLEX phy_duplex, mac_duplex;
+ int phy_carrier, netif_carrier;
+ unsigned long flags;
+
+ /* First, find out what's going on with the PHY. */
+ if (get_phy_status(dev, &phy_duplex, &phy_carrier)) {
+ printk(KERN_ERR "%s: Terminating link monitoring.\n",
+ dev->name);
+ return;
+ }
+
+ /* Second, figure out if we have the EMAC in half or full duplex. */
+ Options = XEmac_GetOptions(&lp->Emac);
+ mac_duplex = (Options & XEM_FDUPLEX_OPTION) ? FULL_DUPLEX : HALF_DUPLEX;
+
+ /* Now see if there is a mismatch. */
+ if (mac_duplex != phy_duplex) {
+ /*
+ * Make sure that no interrupts come in that could cause
+ * reentrancy problems in reset.
+ */
+ spin_lock_irqsave(&reset_lock, flags);
+ reset(dev, phy_duplex); /* the function sets Emac options to match the PHY */
+ spin_unlock_irqrestore(&reset_lock, flags);
+ if (mac_duplex == FULL_DUPLEX)
+ printk(KERN_INFO
+ "%s: Duplex has been changed: now %s\n",
+ dev->name, "HALF_DUPLEX");
+ else
+ printk(KERN_INFO
+ "%s: Duplex has been changed: now %s\n",
+ dev->name, "FULL_DUPLEX");
+ }
+ netif_carrier = netif_carrier_ok(dev) != 0;
+
+ if (phy_carrier != netif_carrier) {
+ if (phy_carrier) {
+ printk(KERN_INFO "%s: Link carrier restored.\n",
+ dev->name);
+ netif_carrier_on(dev);
+ }
+ else {
+ printk(KERN_INFO "%s: Link carrier lost.\n", dev->name);
+ netif_carrier_off(dev);
+ }
+ }
+
+ /* Set up the timer so we'll get called again in 2 seconds. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ add_timer(&lp->phy_timer);
+}
+
+/*
+ * This routine is registered with the OS as the function to call when
+ * the EMAC interrupts. It in turn, calls the Xilinx OS independent
+ * interrupt function. There are different interrupt functions for FIFO
+ * and scatter-gather so we just set a pointer (Isr) into our private
+ * data so we don't have to figure it out here. The Xilinx OS
+ * independent interrupt function will in turn call any callbacks that
+ * we have registered for various conditions.
+ */
+static irqreturn_t xenet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ /* Call it. */
+ (*(lp->Isr)) (&lp->Emac);
+
+ /* Right now, our IRQ handlers do not return a status. Let's always return
+ * IRQ_HANDLED here for now.
+ */
+ return IRQ_HANDLED;
+}
+
+static int xenet_open(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u32 Options;
+ DUPLEX phy_duplex, mac_duplex;
+ int phy_carrier;
+
+ /*
+ * Just to be safe, stop the device first. If the device is already
+ * stopped, an error will be returned. In this case, we don't really
+ * care, so cast it to void to make it explicit.
+ */
+ (void) XEmac_Stop(&lp->Emac);
+ /* Set the MAC address each time opened. */
+ if (XEmac_SetMacAddress(&lp->Emac, dev->dev_addr) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: Could not set MAC address.\n", dev->name);
+ return -EIO;
+ }
+
+ /*
+ * If the device is not configured for polled mode, connect to the
+ * interrupt controller and enable interrupts. Currently, there
+ * isn't any code to set polled mode, so this check is probably
+ * superfluous.
+ */
+ Options = XEmac_GetOptions(&lp->Emac);
+ if ((Options & XEM_POLLED_OPTION) == 0) {
+ int retval;
+
+ /* Grab the IRQ */
+ retval = request_irq(dev->irq, xenet_interrupt, 0, dev->name, dev);
+ if (retval) {
+ printk(KERN_ERR
+ "%s: Could not allocate interrupt %d.\n",
+ dev->name, dev->irq);
+ return retval;
+ }
+ }
+
+ /* Only advertise 10/100 modes, since we can't talk to a
+ * Tri-mode PHY if it autonegotiates a gigabit link. (e.g. ML403, ML410)
+ */
+ XEmac_PhyWrite(&lp->Emac, lp->mii_addr, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA);
+ XEmac_PhyWrite(&lp->Emac, lp->mii_addr, MII_CTRL1000, 0);
+
+ /* Give the system enough time to establish a link */
+ mdelay(2000);
+
+ /* Set the EMAC's duplex setting based upon what the PHY says. */
+ if (!get_phy_status(dev, &phy_duplex, &phy_carrier)) {
+ /* We successfully got the PHY status. */
+ mac_duplex = ((Options & XEM_FDUPLEX_OPTION)
+ ? FULL_DUPLEX : HALF_DUPLEX);
+ if (mac_duplex != phy_duplex) {
+ switch (phy_duplex) {
+ case HALF_DUPLEX:
+ Options &= ~XEM_FDUPLEX_OPTION;
+ break;
+ case FULL_DUPLEX:
+ Options |= XEM_FDUPLEX_OPTION;
+ break;
+ case UNKNOWN_DUPLEX:
+ break;
+ }
+ /*
+ * The following function will return an error
+ * if the EMAC is already started. We know it
+ * isn't started so we can safely ignore the
+ * return value. We cast it to void to make
+ * that explicit.
+ */
+ }
+ }
+ Options |= XEM_FLOW_CONTROL_OPTION;
+ (void) XEmac_SetOptions(&lp->Emac, Options);
+
+ INIT_LIST_HEAD(&(lp->rcv));
+ lp->rcvBds = 0;
+ INIT_LIST_HEAD(&(lp->xmit));
+ lp->xmitBds = 0;
+
+ if (XEmac_Start(&lp->Emac) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: Could not start device.\n", dev->name);
+ free_irq(dev->irq, dev);
+ return -EBUSY;
+ }
+
+ /* We're ready to go. */
+ netif_start_queue(dev);
+
+ /* Set up the PHY monitoring timer. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ lp->phy_timer.data = (unsigned long) dev;
+ lp->phy_timer.function = &poll_mii;
+ init_timer(&lp->phy_timer);
+ add_timer(&lp->phy_timer);
+ return 0;
+}
+static int xenet_close(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ unsigned long flags;
+
+ /* Shut down the PHY monitoring timer. */
+ del_timer_sync(&lp->phy_timer);
+
+ netif_stop_queue(dev);
+
+ /*
+ * If not in polled mode, free the interrupt. Currently, there
+ * isn't any code to set polled mode, so this check is probably
+ * superfluous.
+ */
+ if ((XEmac_GetOptions(&lp->Emac) & XEM_POLLED_OPTION) == 0)
+ free_irq(dev->irq, dev);
+
+ spin_lock_irqsave(&rcvSpin, flags);
+ list_del(&(lp->rcv));
+ spin_unlock_irqrestore(&rcvSpin, flags);
+ spin_lock_irqsave(&xmitSpin, flags);
+ list_del(&(lp->xmit));
+ spin_unlock_irqrestore(&xmitSpin, flags);
+
+ if (XEmac_Stop(&lp->Emac) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: Could not stop device.\n", dev->name);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+static int xenet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int head_size = XEM_HDR_SIZE;
+ int max_frame = new_mtu + head_size + XEM_TRL_SIZE;
+ int min_frame = 1 + head_size + XEM_TRL_SIZE;
+
+ if ((max_frame < min_frame) || (max_frame > XEM_MAX_FRAME_SIZE))
+ return -EINVAL;
+
+ dev->mtu = new_mtu; /* change mtu in net_device structure */
+ return 0;
+}
+static struct net_device_stats *xenet_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ return &lp->stats;
+}
+
+static int xenet_FifoSend(struct sk_buff *orig_skb, struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct sk_buff *new_skb;
+ unsigned int len, align;
+ unsigned long flags;
+
+ len = orig_skb->len;
+
+ /* PR FIXME: what follows can be removed if the asserts in the Xilinx
+ * independent drivers change. There is really no need to align the
+ * buffers in FIFO mode. The story is different for simple DMA.
+ */
+
+ /*
+ * The packet FIFO requires the buffers to be 32/64 bit aligned.
+ * The sk_buff data is not 32/64 bit aligned, so we have to do this
+ * copy. As you probably well know, this is not optimal.
+ */
+ if (!(new_skb = alloc_skb(len + ALIGNMENT, GFP_ATOMIC))) {
+ /* We couldn't get another skb. */
+ dev_kfree_skb(orig_skb);
+ lp->stats.tx_dropped++;
+ printk(KERN_ERR "%s: Could not allocate transmit buffer.\n",
+ dev->name);
+ netif_wake_queue(dev);
+ return -EBUSY;
+ }
+ /*
+ * A new skb should have the data word aligned, but this code is
+ * here just in case that isn't true... Calculate how many
+ * bytes we should reserve to get the data to start on a word
+ * boundary. */
+ align = BUFFER_ALIGN(new_skb->data);
+ if (align)
+ skb_reserve(new_skb, align);
+
+ /* Copy the data from the original skb to the new one. */
+ skb_put(new_skb, len);
+ memcpy(new_skb->data, orig_skb->data, len);
+
+ /* Get rid of the original skb. */
+ dev_kfree_skb(orig_skb);
+ spin_lock_irqsave(&reset_lock, flags);
+ if (XEmac_FifoSend(&lp->Emac, (u8 *) new_skb->data, len) != XST_SUCCESS) {
+ netif_stop_queue(dev);
+ lp->deferred_skb = new_skb;
+ spin_unlock_irqrestore(&reset_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&reset_lock, flags);
+
+ lp->stats.tx_bytes += len;
+ dev_kfree_skb(new_skb);
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+/* The callback function for completed frames sent in FIFO mode. */
+static void FifoSendHandler(void *CallbackRef)
+{
+ struct net_device *dev = (struct net_device *) CallbackRef;
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ if (lp->deferred_skb) {
+ if (XEmac_FifoSend
+ (&lp->Emac, (u8 *) lp->deferred_skb->data,
+ lp->deferred_skb->len) != XST_SUCCESS) {
+ return;
+ }
+ else {
+ dev_kfree_skb(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ netif_wake_queue(dev);
+ }
+ }
+ lp->stats.tx_packets++;
+}
+
+/* The send function for frames sent in DMA mode. */
+static int xenet_SgSend(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ unsigned int len;
+ XBufDescriptor bd;
+ int result;
+ u32 physAddr;
+ unsigned long flags;
+ u8 *virtAddr;
+
+ len = skb->len;
+ virtAddr = lp->ddrVirtPtr + lp->ddrOffset;
+
+ if (skb->ip_summed == CHECKSUM_NONE)
+#ifdef CONFIG_PPC32
+ cacheable_memcpy(virtAddr, skb->data, len);
+#else
+ memcpy(virtAddr, skb->data, len);
+#endif
+ else
+ skb_copy_and_csum_dev(skb, virtAddr);
+
+ dev_kfree_skb(skb);
+ physAddr = (u32) dma_map_single(NULL, virtAddr, len, DMA_TO_DEVICE);
+
+ /*
+ * lock the buffer descriptor to prevent lower layers from reusing
+ * it before the adapter has a chance to deallocate the buffer
+ * attached to it. The adapter will unlock it in the callback function
+ * that handles confirmation of transmits
+ */
+ XBufDescriptor_Initialize(&bd);
+ XBufDescriptor_Lock(&bd);
+ XBufDescriptor_SetSrcAddress(&bd, physAddr);
+ XBufDescriptor_SetLength(&bd, len);
+ XBufDescriptor_SetLast(&bd);
+
+ lp->ddrOffset += len + BUFFER_ALIGN(len);
+ if (lp->ddrOffset + XEM_MAX_FRAME_SIZE > lp->ddrSize)
+ lp->ddrOffset = 0;
+
+ spin_lock_irqsave(&reset_lock, flags);
+
+ result = XEmac_SgSend(&lp->Emac, &bd, XEM_SGDMA_NODELAY);
+ if (result != XST_SUCCESS) {
+ lp->stats.tx_dropped++;
+ printk(KERN_ERR
+ "%s: ERROR, could not send transmit buffer (%d).\n",
+ dev->name, result);
+ /* we should never get here in the first place, but
+ * for some reason the kernel doesn't like -EBUSY here,
+ * so just return 0 and let the stack handle dropped packets.
+ */
+ /* return -EBUSY; */
+ spin_unlock_irqrestore(&reset_lock, flags);
+ return 0;
+ }
+
+ if (atomic_dec_and_test(&lp->availSendBds)) {
+ netif_stop_queue(dev);
+ }
+
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&reset_lock, flags);
+ return 0;
+}
+
+/*
+ * The send function for frames sent in DMA mode using DRE and
+ * Checksum offload in the DMA.
+ */
+
+static XBufDescriptor bd[10];
+
+static int xenet_SgSendDre(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ unsigned int len;
+ int result;
+ u32 physAddr;
+ u8 *virtAddr;
+ u32 i;
+ unsigned long flags;
+ u16 csum_insert_offset;
+ u16 IpHeaderLength;
+ u16 ProtoTTL;
+ volatile u32 num_frag;
+ skb_frag_t *frag;
+ XBufDescriptor *prev_p;
+ XBufDescriptor *cur_p;
+
+ virtAddr = lp->ddrVirtPtr + lp->ddrOffset;
+
+ num_frag = skb_shinfo(skb)->nr_frags;
+ frag = &skb_shinfo(skb)->frags[0];
+
+ cur_p = &bd[0];
+ prev_p = 0;
+
+ if (num_frag > 9) {
+ printk("num_frag:%8.8x \n", num_frag);
+ }
+
+ /*
+ * Return to the default configuration for the driver
+ */
+ XEmac_mDisableTxHwCsum(&lp->Emac);
+
+ /*
+ * Queue up the buffer descriptors only if there is space in the ring for this
+ * batch with extra room. The netif_stop_queue will not occur until after the
+ * next call to this function, therefore I need to leave enough for at least
+ * one more call in the descriptor ring.
+ */
+ if ((lp->availSendBds.counter) < (num_frag + 4)) {
+ netif_stop_queue(dev);
+ }
+
+
+ /* Assign dummy values to 'csum_insert_offset', 'IpHeaderLength' and 'len'.
+ * This will quiet down gcc warnings about unassigned variables in the
+ * "else" case of "if (i==0)" below.
+ *
+ * 'csum_insert_offset', 'IpHeaderLength' and 'len' are assigned in the
+ * iteration for the first fragment (i==0) case and will not change for
+ * subsequent fragments. As we ALWAYS execute the (i==0) case the variables
+ * will never be uninitialized.
+ */
+ csum_insert_offset = 0;
+ IpHeaderLength = 0;
+ len = 0;
+
+ for (i = 0; i < (num_frag + 1); ++i) {
+ /*
+ * Initialize the buffer desctiptor and then
+ * lock the buffer descriptor to prevent lower layers from reusing
+ * it before the adapter has a chance to deallocate the buffer
+ * attached to it. The adapter will unlock it in the callback function
+ * that handles confirmation of transmits
+ */
+ XBufDescriptor_Initialize(cur_p);
+ XBufDescriptor_Lock(cur_p);
+
+ if (prev_p) {
+ XBufDescriptor_SetNextPtr(prev_p, cur_p);
+ }
+
+ if (i == 0) {
+
+ /*
+ * Set the ID for the first descriptor to be the
+ * address of the skbuffer to be freed in the BH
+ */
+ XBufDescriptor_SetId(cur_p, skb);
+
+ /* Grab protocol */
+ ProtoTTL = (*(u16 *) (skb->data + 22));
+
+ /*
+ * The kernel does not send frames down with
+ * CHECKSUM_COMPLETE set unless they are TCP, UDP is
+ * always CHECKSUM_NONE due to the TCP_SENDFILE test
+ * requirement
+ */
+ virtAddr = skb->data;
+ len = skb_headlen(skb);
+
+ if ((XEmac_mIsTxHwCsum(&lp->Emac)) &&
+ ((ProtoTTL & 0x00FF) == 6)) {
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ XEmac_mEnableTxHwCsum(&lp->Emac);
+
+ /*
+ * Determine the length of the IP header which is used
+ * for the offset into the data for the protocol field.
+ */
+
+ IpHeaderLength =
+ ((((*(u16 *) (skb->data + 14)) &
+ 0x0F00) >> 8) * 4);
+
+ /*
+ * Determine the proper offset for the insert
+ * TCP offset is 16, UDP offset is 6 but the
+ * 2.4 stack does not use this for UDP
+ */
+ csum_insert_offset =
+ IpHeaderLength + 16 +
+ ENET_HDR_SIZ;
+
+ /*
+ * 0 works for the TCP TX checksum offload initial value
+ */
+ XBufDescriptor_SetCSInit(cur_p, 0);
+ XBufDescriptor_SetCSInsertLoc(cur_p,
+ csum_insert_offset);
+ XBufDescriptor_SetCSBegin(cur_p,
+ IpHeaderLength
+ +
+ ENET_HDR_SIZ);
+
+ }
+
+ XBufDescriptor_SetLength(cur_p, len);
+
+ physAddr =
+ (u32) dma_map_single(NULL, virtAddr,
+ len,
+ DMA_TO_DEVICE);
+ XBufDescriptor_SetSrcAddress(cur_p, physAddr);
+
+ }
+ else {
+ /*
+ * First fragment, no hardware checksum offload or is it not TCP
+ */
+ XBufDescriptor_SetLength(cur_p, len);
+
+ physAddr =
+ (u32) dma_map_single(NULL, virtAddr,
+ len,
+ DMA_TO_DEVICE);
+ XBufDescriptor_SetSrcAddress(cur_p, physAddr);
+ }
+ }
+ else {
+ /*
+ * Fragment is not number 0
+ */
+ virtAddr = ((void *) page_address(frag->page) +
+ frag->page_offset);
+
+ len = frag->size;
+
+ /* NOTE:
+ * 'csum_insert_offset', 'IpHeaderLength' and 'len' will be
+ * initialized in the first iteration of the loop in the (i==0)
+ * case, so they are valid in this iteration of the loop.
+ */
+ physAddr =
+ (u32) dma_map_single(NULL, virtAddr, len,
+ DMA_TO_DEVICE);
+ XBufDescriptor_SetSrcAddress(cur_p, physAddr);
+ XBufDescriptor_SetCSInit(cur_p, 0);
+
+ XBufDescriptor_SetCSInsertLoc(cur_p,
+ csum_insert_offset);
+ XBufDescriptor_SetCSBegin(cur_p,
+ IpHeaderLength +
+ ENET_HDR_SIZ);
+ XBufDescriptor_SetLength(cur_p, len);
+
+ frag++;
+ }
+
+ if (i == num_frag) {
+ /*
+ * This is the last descriptor in the chain
+ */
+ XBufDescriptor_SetLast(cur_p);
+ }
+ prev_p = cur_p;
+ cur_p++;
+ }
+
+ spin_lock_irqsave(&reset_lock, flags);
+ for (i = 0; i < (num_frag + 1); ++i) {
+
+ result = XEmac_SgSend(&lp->Emac, &bd[i], XEM_SGDMA_NODELAY);
+ if (result != XST_SUCCESS) {
+ lp->stats.tx_dropped++;
+ printk( /*KERN_ERR */
+ "%s: ERROR, could not send transmit buffer (%d).\n",
+ dev->name, result);
+ /* we should never get here in the first place, but
+ * for some reason the kernel doesn't like -EBUSY here,
+ * so just return 0 and let the stack handle dropped packets.
+ */
+ /* return -EBUSY; */
+ spin_unlock_irqrestore(&reset_lock, flags);
+ return 0;
+ }
+
+ }
+
+ if ((atomic_sub_return((num_frag + 1), &lp->availSendBds)) == 0) {
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&reset_lock, flags);
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+
+/* The callback function for completed frames sent in DMA mode. */
+static void SgSendHandlerBH(unsigned long p);
+static void SgRecvHandlerBH(unsigned long p);
+
+DECLARE_TASKLET(SgSendBH, SgSendHandlerBH, 0);
+DECLARE_TASKLET(SgRecvBH, SgRecvHandlerBH, 0);
+
+static void SgSendHandlerBH(unsigned long p)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ XBufDescriptor *BdPtr;
+ u32 NumBds;
+ u32 len;
+ XBufDescriptor *curbd;
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ while (1) {
+ spin_lock_irqsave(&xmitSpin, flags);
+ if (list_empty(&sentQueue)) {
+ spin_unlock_irqrestore(&xmitSpin, flags);
+ break;
+ }
+ lp = list_entry(sentQueue.next, struct net_local, xmit);
+
+ list_del_init(&(lp->xmit));
+ NumBds = lp->xmitBds;
+ BdPtr = lp->xmitBdPtr;
+ dev = lp->ndev;
+ atomic_add(NumBds, &lp->availSendBds);
+ while (NumBds != 0) {
+ NumBds--;
+
+ len = XBufDescriptor_GetLength(BdPtr);
+ pci_unmap_single(NULL,
+ (u32)
+ XBufDescriptor_GetSrcAddress(BdPtr),
+ len, DMA_TO_DEVICE);
+
+ lp->stats.tx_bytes += len;
+ lp->stats.tx_packets++;
+
+ curbd = BdPtr;
+ BdPtr = P_TO_V(&lp->Emac.SendChannel,
+ XBufDescriptor_GetNextPtr(BdPtr));
+ XBufDescriptor_Unlock(curbd);
+ /*
+ * If the descriptor was part of a fragment list, the ID is
+ * the skbuffer which can be freed at this point
+ */
+ skb = (struct sk_buff *) XBufDescriptor_GetId(curbd);
+ if (skb != 0UL) {
+ dev_kfree_skb(skb);
+ }
+
+ }
+ spin_unlock_irqrestore(&xmitSpin, flags);
+ netif_wake_queue(dev);
+ }
+ bh_entry = 0;
+}
+
+static void SgSendHandler(void *CallBackRef, XBufDescriptor * BdPtr, u32 NumBds)
+{
+ struct net_device *dev = (struct net_device *) CallBackRef;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct list_head *cur_lp = NULL;
+
+ spin_lock(&xmitSpin);
+ list_for_each(cur_lp, &sentQueue) {
+ if (cur_lp == &(lp->xmit)) {
+ lp->xmitBds += NumBds;
+ break;
+ }
+ }
+ if (cur_lp != &(lp->xmit)) {
+ lp->xmitBds = NumBds;
+ lp->xmitBdPtr = BdPtr;
+ list_add_tail(&lp->xmit, &sentQueue);
+ bh_entry++;
+ tasklet_schedule(&SgSendBH);
+ }
+ spin_unlock(&xmitSpin);
+}
+
+static void SgRecvHandlerBH(unsigned long p)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ XBufDescriptor *BdPtr;
+ int NumBds;
+ struct sk_buff *skb, *new_skb;
+ u32 len, new_skb_vaddr;
+ dma_addr_t skb_vaddr;
+ u32 align;
+ int result;
+ XBufDescriptor *curbd;
+ unsigned long flags;
+ u16 HwCSum, PhCSum;
+ u16 IpDataLen, IpHeaderLength, ProtoTTL;
+ u32 CalcCSum;
+ u32 EmacFCS;
+ u8 *EmacFCSPtr;
+
+ while (1) {
+ spin_lock_irqsave(&rcvSpin, flags);
+ if (list_empty(&receivedQueue)) {
+ spin_unlock_irqrestore(&rcvSpin, flags);
+ break;
+ }
+ lp = list_entry(receivedQueue.next, struct net_local, rcv);
+
+ list_del_init(&(lp->rcv));
+ NumBds = lp->rcvBds;
+ BdPtr = lp->rcvBdPtr;
+ dev = lp->ndev;
+ spin_unlock_irqrestore(&rcvSpin, flags);
+ while (NumBds != 0) {
+ NumBds--;
+
+ /* get ptr to skb */
+ skb = (struct sk_buff *) XBufDescriptor_GetId(BdPtr);
+ len = XBufDescriptor_GetLength(BdPtr);
+
+ /*
+ * Retrieve hardware Checksum regardless, check later if
+ * valid to use
+ */
+
+ HwCSum = XBufDescriptor_GetCSRaw(BdPtr);
+
+ /* we have all the information we need - move on */
+ curbd = BdPtr;
+ BdPtr = P_TO_V(&lp->Emac.RecvChannel,
+ XBufDescriptor_GetNextPtr(curbd));
+
+ skb_vaddr =
+ (dma_addr_t)
+ XBufDescriptor_GetDestAddress(curbd);
+ pci_unmap_single(NULL, skb_vaddr, len, DMA_FROM_DEVICE);
+
+ /* replace skb with a new one */
+ new_skb =
+ alloc_skb(XEM_MAX_FRAME_SIZE + ALIGNMENT,
+ GFP_ATOMIC);
+ if (new_skb == 0) {
+ printk("SgRecvHandler: no mem for new_skb\n");
+ return;
+ }
+
+ if (!(XEmac_mIsRxDre(&lp->Emac))) {
+ /* make sure we're long-word aligned */
+ align = BUFFER_ALIGN(new_skb->data);
+ if (align) {
+ skb_reserve(new_skb, align);
+ }
+ }
+
+ new_skb_vaddr =
+ (u32) dma_map_single(NULL, new_skb->data,
+ XEM_MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ XBufDescriptor_SetDestAddress(curbd, new_skb_vaddr);
+ XBufDescriptor_SetLength(curbd, XEM_MAX_FRAME_SIZE);
+ XBufDescriptor_SetId(curbd, new_skb);
+ XBufDescriptor_Unlock(curbd);
+
+ /* give the descriptor back to the driver */
+ result = XEmac_SgRecv(&lp->Emac, curbd);
+ if (result != XST_SUCCESS) {
+ printk("SgRecvHandler: SgRecv unsuccessful\n");
+ return;
+ }
+
+ /* back to the original skb
+ * NOTE:
+ * The following line should read
+ * skb_put(skb, len);
+ * However, doing this causes the driver not to work
+ * anymore. If you want to fix this and put in the
+ * skb_pu() again, you need to delete the
+ * skb->len -= 4;
+ * line below.
+ * wgr 09/14/2006
+ */
+ skb->len = len;
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+
+ /*
+ * Check if Checksum offload in in the hardware, if so
+ * verify the checksum here and then sent up the stack
+ */
+ if ((len > 76 /*64 */ ) &&
+ (skb->protocol == ETHERTYPE_IP) &&
+ (XEmac_mIsRxHwCsum(&lp->Emac))) {
+
+ EmacFCS = 0;
+ EmacFCSPtr = (u8 *) &EmacFCS;
+
+ IpHeaderLength = ((((*(u16 *) (skb->data)) & 0x0F00) >> 8) * 4);
+
+ /* Grab protocol */
+ ProtoTTL = (*(u16 *) (skb->data + 8)) & 0x00FF;
+
+ /*
+ * Set the length of the IP payload for the CS calculation
+ */
+ IpDataLen = len - IpHeaderLength - ENET_HDR_SIZ;
+
+ /*
+ * Adjust the hardware checksum due to the fact that it ALWAYS includes
+ * the FCS field in the RX data, regardless of whether the.
+ * XEM_STRIP_PAD_FCS_OPTION is set or not set around 2400.
+ */
+
+ CalcCSum = HwCSum;
+
+ if (((IpDataLen & 0x0003) == 2) ||
+ ((IpDataLen & 0x0003) == 0)) {
+ /*
+ * 16-bit alignment case
+ */
+ EmacFCSPtr[0] = skb_mac_header(skb)[len-4];
+ EmacFCSPtr[1] = skb_mac_header(skb)[len-3];
+ EmacFCSPtr[2] = skb_mac_header(skb)[len-2];
+ EmacFCSPtr[3] = skb_mac_header(skb)[len-1];
+ }
+ else if ((IpDataLen & 0x0003) == 1) {
+ /*
+ * 8-bit alignment case one
+ */
+ EmacFCSPtr[0] = skb_mac_header(skb)[len-3];
+ EmacFCSPtr[1] = skb_mac_header(skb)[len-2];
+ EmacFCSPtr[2] = skb_mac_header(skb)[len-1];
+ EmacFCSPtr[3] = skb_mac_header(skb)[len-4];
+ }
+ else if ((IpDataLen & 0x0003) == 3) {
+ /*
+ * 8-bit alignment case two
+ */
+ EmacFCSPtr[0] = skb_mac_header(skb)[len-1];
+ EmacFCSPtr[1] = skb_mac_header(skb)[len-4];
+ EmacFCSPtr[2] = skb_mac_header(skb)[len-3];
+ EmacFCSPtr[3] = skb_mac_header(skb)[len-2];
+ }
+
+ CalcCSum +=
+ (u32) ((*(u16 *) (&(EmacFCSPtr[0]))) ^
+ 0xFFFF);
+ CalcCSum +=
+ (u32) ((*(u16 *) (&(EmacFCSPtr[2]))) ^
+ 0xFFFF);
+ CalcCSum += (u32) (0xFFFB); /* this is the subtraction of 4, trust me */
+
+ HwCSum = ((CalcCSum >> 16) +
+ (CalcCSum & 0x0000FFFF));
+
+ PhCSum = AddCsumRxPseudoHeader(skb, HwCSum,
+ IpDataLen,
+ ProtoTTL);
+
+ /*
+ * The resulting checksum should be equal to 0xFFFF. If not, the upper
+ * layers can calculate where the error is and retransmit if needed.
+ */
+
+ if (PhCSum == 0xFFFF) {
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* NOTE:
+ * The following line goes together with the changed
+ * skb_put(skb, len);
+ * line above. If you change the line above to skb_put(),
+ * you need to delete the following line.
+ * wgr 09/14/2006
+ */
+ skb->len -= 4;
+ skb->csum = 0xFFFF;
+ }
+ }
+
+ netif_rx(skb); /* Send the packet upstream. */
+ }
+ }
+}
+
+static void SgRecvHandler(void *CallBackRef, XBufDescriptor * BdPtr, u32 NumBds)
+{
+ struct net_device *dev = (struct net_device *) CallBackRef;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct list_head *cur_lp = NULL;
+
+ spin_lock(&rcvSpin);
+ list_for_each(cur_lp, &receivedQueue) {
+ if (cur_lp == &(lp->rcv)) {
+ lp->rcvBds += NumBds;
+ break;
+ }
+ }
+ if (cur_lp != &(lp->rcv)) {
+ lp->rcvBds = NumBds;
+ lp->rcvBdPtr = BdPtr;
+ list_add_tail(&lp->rcv, &receivedQueue);
+ tasklet_schedule(&SgRecvBH);
+ }
+ spin_unlock(&rcvSpin);
+}
+
+static void xenet_tx_timeout(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ unsigned long flags;
+
+ printk("%s: Exceeded transmit timeout of %lu ms.\n",
+ dev->name, TX_TIMEOUT * 1000UL / HZ);
+
+ lp->stats.tx_errors++;
+ spin_lock_irqsave(&reset_lock, flags);
+ reset(dev, UNKNOWN_DUPLEX);
+ spin_unlock_irqrestore(&reset_lock, flags);
+}
+
+/* The callback function for frames received when in FIFO mode. */
+static void FifoRecvHandler(void *CallbackRef)
+{
+ struct net_device *dev = (struct net_device *) CallbackRef;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct sk_buff *skb;
+ unsigned int align;
+ u32 len;
+ int Result;
+
+ /*
+ * The OS independent Xilinx EMAC code does not provide a
+ * function to get the length of an incoming packet and a
+ * separate call to actually get the packet data. It does this
+ * because they didn't add any code to keep the hardware's
+ * receive length and data FIFOs in sync. Instead, they require
+ * that you send a maximal length buffer so that they can read
+ * the length and data FIFOs in a single chunk of code so that
+ * they can't get out of sync. So, we need to allocate an skb
+ * that can hold a maximal sized packet. The OS independent
+ * code needs to see the data 32/64-bit aligned, so we tack on an
+ * extra four just in case we need to do an skb_reserve to get
+ * it that way.
+ */
+ len = XEM_MAX_FRAME_SIZE;
+ if (!(skb = alloc_skb(len + ALIGNMENT, GFP_ATOMIC))) {
+ /* Couldn't get memory. */
+ lp->stats.rx_dropped++;
+ printk(KERN_ERR "%s: Could not allocate receive buffer.\n",
+ dev->name);
+ return;
+ }
+
+ /*
+ * A new skb should have the data word aligned, but this code is
+ * here just in case that isn't true... Calculate how many
+ * bytes we should reserve to get the data to start on a word
+ * boundary. */
+ align = BUFFER_ALIGN(skb->data);
+ if (align)
+ skb_reserve(skb, align);
+
+ Result = XEmac_FifoRecv(&lp->Emac, (u8 *) skb->data, &len);
+ if (Result != XST_SUCCESS) {
+ int need_reset = status_requires_reset(Result);
+
+ lp->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ printk(KERN_ERR "%s: Could not receive buffer, error=%d%s.\n",
+ dev->name, Result,
+ need_reset ? ", resetting device." : "");
+ if (need_reset) {
+ spin_lock(&reset_lock);
+ reset(dev, UNKNOWN_DUPLEX);
+ spin_unlock(&reset_lock);
+ }
+
+ return;
+ }
+
+ skb_put(skb, len - 4); /* Tell the skb how much data we got,
+ crop FCS (the last four bytes). */
+ skb->dev = dev; /* Fill out required meta-data. */
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+
+ netif_rx(skb); /* Send the packet upstream. */
+}
+
+/* The callback function for errors. */
+static void ErrorHandler(void *CallbackRef, int Code)
+{
+ struct net_device *dev = (struct net_device *) CallbackRef;
+ int need_reset = status_requires_reset(Code);
+ unsigned long flags;
+
+ /* ignore some errors */
+ if (Code == XST_DMA_ERROR)
+ return;
+ printk(KERN_ERR "%s: device error %d%s\n",
+ dev->name, Code, need_reset ? ", resetting device." : "");
+ if (need_reset) {
+ spin_lock_irqsave(&reset_lock, flags);
+ reset(dev, UNKNOWN_DUPLEX);
+ spin_unlock_irqrestore(&reset_lock, flags);
+ }
+}
+
+static int descriptor_init(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ int i, recvsize, sendsize;
+ int dftsize;
+ u32 *recvpoolptr, *sendpoolptr;
+ void *recvpoolphy, *sendpoolphy;
+
+ /* calc size of descriptor space pool; alloc from non-cached memory */
+ dftsize = (XEM_DFT_RECV_DESC + XEM_DFT_SEND_DESC) *
+ sizeof(XBufDescriptor);
+
+ lp->desc_space = dma_alloc_coherent(NULL, dftsize,
+ &lp->desc_space_handle, GFP_KERNEL);
+ if (lp->desc_space == 0) {
+ return -1;
+ }
+
+ lp->desc_space_size = dftsize;
+
+ lp->ddrSize = DFT_LOCAL_SEND_DESC * (XEM_MAX_FRAME_SIZE + ALIGNMENT);
+ lp->ddrOffset = 0;
+ lp->ddrVirtPtr = kmalloc(lp->ddrSize, GFP_ATOMIC);
+
+ if (lp->ddrVirtPtr == 0)
+ return -1;
+
+ if (XEmac_mIsTxDre(&lp->Emac)) {
+ atomic_set(&lp->availSendBds, XEM_DFT_SEND_DESC);
+ }
+ else {
+ atomic_set(&lp->availSendBds, DFT_LOCAL_SEND_DESC);
+ }
+
+
+
+ /* calc size of send and recv descriptor space */
+ recvsize = XEM_DFT_RECV_DESC * sizeof(XBufDescriptor);
+ sendsize = XEM_DFT_SEND_DESC * sizeof(XBufDescriptor);
+
+ recvpoolptr = lp->desc_space;
+ sendpoolptr = (void *) ((u32) lp->desc_space + recvsize);
+
+ recvpoolphy = (void *) lp->desc_space_handle;
+ sendpoolphy = (void *) ((u32) lp->desc_space_handle + recvsize);
+
+ /* add ptr to descriptor space to the driver */
+ XEmac_SetSgRecvSpace(&lp->Emac, recvpoolptr, recvsize, recvpoolphy);
+ XEmac_SetSgSendSpace(&lp->Emac, sendpoolptr, sendsize, sendpoolphy);
+
+ /* allocate skb's and give them to the dma engine */
+ for (i = 0; i < XEM_DFT_RECV_DESC; i++) {
+ struct sk_buff *skb;
+ XBufDescriptor bd;
+ int result;
+ u32 skb_vaddr, align;
+
+ skb = alloc_skb(XEM_MAX_FRAME_SIZE + ALIGNMENT, GFP_ATOMIC);
+ if (skb == 0) {
+ return -1;
+ }
+
+ align = BUFFER_ALIGN(skb->data);
+ if (align)
+ skb_reserve(skb, align);
+
+ skb_vaddr = (u32) dma_map_single(NULL, skb->data,
+ XEM_MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ /*
+ * initialize descriptors and set buffer address
+ * buffer length gets max frame size
+ */
+ XBufDescriptor_Initialize(&bd);
+ XBufDescriptor_Lock(&bd);
+ XBufDescriptor_SetDestAddress(&bd, skb_vaddr);
+ XBufDescriptor_SetLength(&bd, XEM_MAX_FRAME_SIZE);
+ XBufDescriptor_SetId(&bd, skb);
+
+ /*
+ * descriptor with attached buffer to the driver and
+ * let it make it ready for frame reception
+ */
+ result = XEmac_SgRecv(&lp->Emac, &bd);
+ if (result != XST_SUCCESS) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void free_descriptor_skb(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ int i;
+ XBufDescriptor *BdPtr;
+ struct sk_buff *skb;
+
+ BdPtr = (XBufDescriptor *) lp->Emac.RecvChannel.VirtPtr;
+ for (i = 0; i < XEM_DFT_RECV_DESC; i++) {
+ skb = (struct sk_buff *) XBufDescriptor_GetId(BdPtr);
+ pci_unmap_single(NULL, virt_to_bus(skb->data),
+ XBufDescriptor_GetLength(BdPtr),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ BdPtr = P_TO_V(&lp->Emac.RecvChannel,
+ XBufDescriptor_GetNextPtr(BdPtr));
+ }
+}
+
+static void xenet_set_multicast_list(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u32 Options;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * XEmac_Start, XEmac_Stop and XEmac_SetOptions are supposed to
+ * be protected by a semaphore. We do have one area in which
+ * this is a problem.
+ *
+ * xenet_set_multicast_list() is called while the link is up and
+ * interrupts are enabled, so at any point in time we could get
+ * an error that causes our reset() to be called. reset() calls
+ * the aforementioned functions, and we need to call them from
+ * here as well.
+ *
+ * The solution is to make sure that we don't get interrupts or
+ * timers popping while we are in this function.
+ */
+ spin_lock_irqsave(&reset_lock, flags);
+
+ if ((ret = XEmac_Stop(&lp->Emac)) == XST_SUCCESS) {
+
+ Options = XEmac_GetOptions(&lp->Emac);
+
+ /* Clear out the bits we may set. */
+ Options &= ~(XEM_PROMISC_OPTION | XEM_MULTICAST_OPTION);
+
+ if (dev->flags & IFF_PROMISC)
+ Options |= XEM_PROMISC_OPTION;
+#if 0
+ else {
+ /*
+ * SAATODO: Xilinx is going to add multicast support to their
+ * VxWorks adapter and OS independent layer. After that is done,
+ * this skeleton code should be fleshed out. Note that
+ * IFF_MULTICAST is being masked out from dev->flags in
+ * xenet_probe, so that will need to be removed to actually do
+ * multidrop.
+ */
+ if ((dev->flags & IFF_ALLMULTI)
+ || dev->mc_count > MAX_MULTICAST ? ? ?) {
+ xemac_get_all_multicast ? ? ? ();
+ Options |= XEM_MULTICAST_OPTION;
+ }
+ else if (dev->mc_count != 0) {
+ struct dev_mc_list *mc;
+
+ XEmac_MulticastClear(&lp->Emac);
+ for (mc = dev->mc_list; mc; mc = mc->next)
+ XEmac_MulticastAdd(&lp->Emac,
+ mc->dmi_addr);
+ Options |= XEM_MULTICAST_OPTION;
+ }
+ }
+#endif
+
+ /*
+ * The following function will return an error if the EMAC is already
+ * started. We know it isn't started so we can safely ignore the
+ * return value. We cast it to void to make that explicit.
+ */
+ (void) XEmac_SetOptions(&lp->Emac, Options);
+
+ /*
+ * XEmac_Start returns an error when: it is already started, the send
+ * and receive handlers are not set, or a scatter-gather DMA list is
+ * missing. None of these can happen at this point, so we cast the
+ * return to void to make that explicit.
+ */
+ (void) XEmac_Start(&lp->Emac);
+ }
+ /* All done, get those interrupts and timers going again. */
+ spin_unlock_irqrestore(&reset_lock, flags);
+}
+
+
+static int
+xenet_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ int ret;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u32 mac_options;
+ u8 threshold;
+ u16 mii_cmd;
+ u16 mii_status;
+ u16 mii_advControl;
+ int xs;
+
+ memset(ecmd, 0, sizeof(struct ethtool_cmd));
+ mac_options = XEmac_GetOptions(&(lp->Emac));
+ xs = XEmac_PhyRead(&lp->Emac, lp->mii_addr, MII_BMCR, &mii_cmd);
+ if (xs != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: Could not read mii command register; error %d\n",
+ dev->name, xs);
+ return -1;
+ }
+ xs = XEmac_PhyRead(&lp->Emac, lp->mii_addr, MII_BMSR, &mii_status);
+ if (xs != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: Could not read mii status register; error %d\n",
+ dev->name, xs);
+ return -1;
+ }
+ xs = XEmac_PhyRead(&lp->Emac, lp->mii_addr, MII_ADVERTISE,
+ &mii_advControl);
+ if (xs != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: Could not read mii advertisement control register; error %d\n",
+ dev->name, xs);
+ return -1;
+ }
+
+ if (mac_options & XEM_FDUPLEX_OPTION)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ if (mii_status & BMSR_100FULL)
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+ if (mii_status & BMSR_100HALF)
+ ecmd->supported |= SUPPORTED_100baseT_Half;
+ if (mii_status & BMSR_10FULL)
+ ecmd->supported |= SUPPORTED_10baseT_Full;
+ if (mii_status & BMSR_10HALF)
+ ecmd->supported |= SUPPORTED_10baseT_Half;
+ if (lp->Emac.Config.HasMii)
+ ecmd->supported |= SUPPORTED_MII;
+ else
+ ecmd->supported &= (~SUPPORTED_MII);
+ if (mii_status & BMSR_ANEGCAPABLE)
+ ecmd->supported |= SUPPORTED_Autoneg;
+ if (mii_status & BMSR_ANEGCOMPLETE) {
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ if ((mii_advControl & ADVERTISE_100FULL) ||
+ (mii_advControl & ADVERTISE_100HALF))
+ ecmd->speed = SPEED_100;
+ else
+ ecmd->speed = SPEED_10;
+ }
+ else {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ if (mii_cmd & BMCR_SPEED100)
+ ecmd->speed = SPEED_100;
+ else
+ ecmd->speed = SPEED_10;
+ }
+ if (mii_advControl & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (mii_advControl & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (mii_advControl & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (mii_advControl & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ ecmd->advertising |= ADVERTISED_MII;
+ ecmd->port = PORT_MII;
+ ecmd->phy_address = lp->Emac.PhysAddress;
+ ecmd->transceiver = XCVR_INTERNAL;
+ if (XEmac_mIsSgDma(&lp->Emac)) {
+ if ((ret =
+ XEmac_GetPktThreshold(&lp->Emac, XEM_SEND,
+ &threshold)) == XST_SUCCESS) {
+ ecmd->maxtxpkt = threshold;
+ }
+ else
+ return -EIO;
+ if ((ret =
+ XEmac_GetPktThreshold(&lp->Emac, XEM_RECV,
+ &threshold)) == XST_SUCCESS) {
+ ecmd->maxrxpkt = threshold;
+ }
+ else
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+xenet_ethtool_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ int ret;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u8 threshold;
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+ if ((ret =
+ XEmac_GetPktThreshold(&lp->Emac, XEM_RECV,
+ &threshold)) != XST_SUCCESS) {
+ printk(KERN_INFO "XEmac_GetPktThreshold error %d\n", ret);
+ return -EIO;
+ }
+ ec->rx_max_coalesced_frames = threshold;
+ if ((ret =
+ XEmac_GetPktWaitBound(&lp->Emac, XEM_RECV,
+ &(ec->rx_coalesce_usecs))) != XST_SUCCESS) {
+ printk(KERN_INFO "XEmac_GetPktWaitBound error %d\n", ret);
+ return -EIO;
+ }
+ if ((ret =
+ XEmac_GetPktThreshold(&lp->Emac, XEM_SEND,
+ &threshold)) != XST_SUCCESS) {
+ printk(KERN_INFO "XEmac_GetPktThreshold send error %d\n", ret);
+ return -EIO;
+ }
+ ec->tx_max_coalesced_frames = threshold;
+ if ((ret =
+ XEmac_GetPktWaitBound(&lp->Emac, XEM_SEND,
+ &(ec->tx_coalesce_usecs))) != XST_SUCCESS) {
+ printk(KERN_INFO "XEmac_GetPktWaitBound send error %d\n", ret);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+xenet_ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ int ret;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&reset_lock, flags);
+ if ((ret = XEmac_Stop(&lp->Emac)) != XST_SUCCESS)
+ return -EIO;
+ if ((ret =
+ XEmac_SetPktThreshold(&lp->Emac, XEM_RECV,
+ ec->rx_max_coalesced_frames)) !=
+ XST_SUCCESS) {
+ printk(KERN_INFO "XEmac_SetPktThreshold error %d\n", ret);
+ return -EIO;
+ }
+ if ((ret =
+ XEmac_SetPktWaitBound(&lp->Emac, XEM_RECV,
+ ec->rx_coalesce_usecs)) != XST_SUCCESS) {
+ printk(KERN_INFO "XEmac_SetPktWaitBound error %d\n", ret);
+ return -EIO;
+ }
+ if ((ret =
+ XEmac_SetPktThreshold(&lp->Emac, XEM_SEND,
+ ec->tx_max_coalesced_frames)) !=
+ XST_SUCCESS) {
+ printk(KERN_INFO "XEmac_SetPktThreshold send error %d\n", ret);
+ return -EIO;
+ }
+ if ((ret =
+ XEmac_SetPktWaitBound(&lp->Emac, XEM_SEND,
+ ec->tx_coalesce_usecs)) != XST_SUCCESS) {
+ printk(KERN_INFO "XEmac_SetPktWaitBound send error %d\n", ret);
+ return -EIO;
+ }
+ if ((ret = XEmac_Start(&lp->Emac)) != XST_SUCCESS)
+ return -EIO;
+ spin_unlock_irqrestore(&reset_lock, flags);
+ return 0;
+}
+
+static int
+xenet_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *ed)
+{
+ memset(ed, 0, sizeof(struct ethtool_drvinfo));
+ strcpy(ed->driver, DRIVER_NAME);
+ strcpy(ed->version, DRIVER_VERSION);
+ return 0;
+}
+
+static int
+xenet_ethtool_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *erp)
+{
+ memset(erp, 0, sizeof(struct ethtool_ringparam));
+ erp->rx_max_pending = XEM_DFT_RECV_DESC;
+ erp->tx_max_pending = XEM_DFT_SEND_DESC;
+ erp->rx_pending = XEM_DFT_RECV_DESC;
+ erp->tx_pending = XEM_DFT_SEND_DESC;
+ return 0;
+}
+
+#define EMAG_REGS_N 32
+struct mac_regsDump {
+ struct ethtool_regs hd;
+ u16 data[EMAG_REGS_N];
+};
+
+static void
+xenet_ethtool_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ret)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct mac_regsDump *dump = (struct mac_regsDump *) regs;
+ int i;
+ int r;
+
+ dump->hd.version = 0;
+ dump->hd.len = EMAG_REGS_N * sizeof(dump->data);
+ for (i = 0; i < EMAG_REGS_N; i++) {
+ if ((r =
+ XEmac_PhyRead(&(lp->Emac), lp->mii_addr, i,
+ &(dump->data[i]))) != XST_SUCCESS) {
+ printk(KERN_INFO "PhyRead ERROR %d\n", r);
+ *(int *) ret = -EIO;
+ return;
+ }
+ }
+ *(int *) ret = 0;
+}
+
+static int xenet_do_ethtool_ioctl(struct net_device *dev, struct ifreq *rq)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct ethtool_cmd ecmd;
+ struct ethtool_coalesce eco;
+ struct ethtool_drvinfo edrv;
+ struct ethtool_ringparam erp;
+ struct ethtool_pauseparam epp;
+ struct mac_regsDump regs;
+ int ret = -EOPNOTSUPP;
+ int result;
+ u32 Options;
+ u16 mii_reg_sset;
+ u16 mii_reg_spause;
+ u16 mii_reg_autoneg;
+ unsigned long flags;
+
+ if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd.cmd)))
+ return -EFAULT;
+ switch (ecmd.cmd) {
+ case ETHTOOL_GSET:
+ ret = xenet_ethtool_get_settings(dev, &ecmd);
+ if (ret >= 0) {
+ if (copy_to_user(rq->ifr_data, &ecmd, sizeof(ecmd)))
+ ret = -EFAULT;
+ }
+ break;
+ case ETHTOOL_SSET:
+ if (copy_from_user
+ (&ecmd, rq->ifr_data, sizeof(struct ethtool_cmd)))
+ return -EFAULT;
+ mii_reg_sset = 0;
+ if (ecmd.speed == SPEED_100)
+ mii_reg_sset |= BMCR_SPEED100;
+ if (ecmd.duplex == DUPLEX_FULL)
+ mii_reg_sset |= BMCR_FULLDPLX;
+ if (ecmd.autoneg == AUTONEG_ENABLE) {
+ mii_reg_sset |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ spin_lock_irqsave(&reset_lock, flags);
+ result = XEmac_PhyWrite(&lp->Emac, lp->mii_addr,
+ MII_BMCR, mii_reg_sset);
+ if (result != XST_SUCCESS) {
+ spin_unlock_irqrestore(&reset_lock, flags);
+ ret = -EIO;
+ break;
+ }
+ result = XEmac_PhyRead(&lp->Emac, lp->mii_addr,
+ MII_ADVERTISE, &mii_reg_sset);
+ if (result != XST_SUCCESS) {
+ spin_unlock_irqrestore(&reset_lock, flags);
+ ret = -EIO;
+ break;
+ }
+ if (ecmd.speed == SPEED_100) {
+ if (ecmd.duplex == DUPLEX_FULL) {
+ mii_reg_sset |=
+ (ADVERTISE_10FULL |
+ ADVERTISE_100FULL |
+ ADVERTISE_10HALF |
+ ADVERTISE_100HALF);
+ }
+ else {
+ mii_reg_sset |=
+ (ADVERTISE_10HALF |
+ ADVERTISE_100HALF);
+ mii_reg_sset &=
+ ~(ADVERTISE_10FULL |
+ ADVERTISE_100FULL);
+ }
+ }
+ else {
+ if (ecmd.duplex == DUPLEX_FULL) {
+ mii_reg_sset |=
+ (ADVERTISE_10FULL |
+ ADVERTISE_10HALF);
+ mii_reg_sset &=
+ ~(ADVERTISE_100FULL |
+ ADVERTISE_100HALF);
+ }
+ else {
+ mii_reg_sset |= (ADVERTISE_10HALF);
+ mii_reg_sset &=
+ ~(ADVERTISE_100FULL |
+ ADVERTISE_100HALF |
+ ADVERTISE_10FULL);
+ }
+ }
+ result = XEmac_PhyWrite(&lp->Emac, lp->mii_addr,
+ MII_ADVERTISE, mii_reg_sset);
+ spin_unlock_irqrestore(&reset_lock, flags);
+ if (result != XST_SUCCESS) {
+ ret = -EIO;
+ break;
+ }
+ }
+ else {
+ mii_reg_sset &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
+ if (ecmd.duplex == DUPLEX_FULL) {
+ mii_reg_sset |= BMCR_FULLDPLX;
+ }
+ else {
+ mii_reg_sset &= ~BMCR_FULLDPLX;
+ }
+ if (ecmd.speed == SPEED_100) {
+ mii_reg_sset |= BMCR_SPEED100;
+ }
+ else {
+ mii_reg_sset &= ~BMCR_SPEED100;
+ }
+ spin_lock_irqsave(&reset_lock, flags);
+ result = XEmac_PhyWrite(&lp->Emac, lp->mii_addr,
+ MII_BMCR, mii_reg_sset);
+ spin_unlock_irqrestore(&reset_lock, flags);
+ if (result != XST_SUCCESS) {
+ ret = -EIO;
+ break;
+ }
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_GPAUSEPARAM:
+ ret = xenet_ethtool_get_settings(dev, &ecmd);
+ if (ret < 0) {
+ break;
+ }
+ epp.cmd = ecmd.cmd;
+ epp.autoneg = ecmd.autoneg;
+ Options = XEmac_GetOptions(&lp->Emac);
+ if (Options & XEM_INSERT_PAD_OPTION) {
+ epp.rx_pause = 1;
+ epp.tx_pause = 1;
+ }
+ else {
+ epp.rx_pause = 0;
+ epp.tx_pause = 0;
+ }
+ if (copy_to_user
+ (rq->ifr_data, &epp, sizeof(struct ethtool_pauseparam)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ break;
+ case ETHTOOL_SPAUSEPARAM:
+ if (copy_from_user
+ (&epp, rq->ifr_data, sizeof(struct ethtool_pauseparam)))
+ return -EFAULT;
+ ret = xenet_ethtool_get_settings(dev, &ecmd);
+ if (ret < 0) {
+ break;
+ }
+ epp.cmd = ecmd.cmd;
+ mii_reg_spause = 0;
+ if (epp.autoneg == AUTONEG_ENABLE) {
+ mii_reg_spause |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ }
+ else {
+ if (ecmd.speed == SPEED_100)
+ mii_reg_spause |= BMCR_SPEED100;
+ if (ecmd.duplex == DUPLEX_FULL)
+ mii_reg_spause |= BMCR_FULLDPLX;
+ }
+ spin_lock_irqsave(&reset_lock, flags);
+ result = XEmac_PhyWrite(&lp->Emac, lp->mii_addr,
+ MII_BMCR, mii_reg_spause);
+ spin_unlock_irqrestore(&reset_lock, flags);
+ if (result != XST_SUCCESS) {
+ ret = -EIO;
+ break;
+ }
+ if (epp.rx_pause != epp.tx_pause) {
+ ret = 0;
+ break;
+ }
+ else {
+ spin_lock_irqsave(&reset_lock, flags);
+ (void) XEmac_Stop(&(lp->Emac));
+ Options = XEmac_GetOptions(&lp->Emac);
+ if (epp.rx_pause)
+ Options |= XEM_INSERT_PAD_OPTION;
+ else
+ Options &= ~XEM_INSERT_PAD_OPTION;
+ (void) XEmac_SetOptions(&lp->Emac, Options);
+ (void) XEmac_Start(&(lp->Emac));
+ spin_unlock_irqrestore(&reset_lock, flags);
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_GCOALESCE:
+ eco.cmd = ecmd.cmd;
+ ret = xenet_ethtool_get_coalesce(dev, &eco);
+ if (ret >= 0) {
+ if (copy_to_user
+ (rq->ifr_data, &eco,
+ sizeof(struct ethtool_coalesce)))
+ ret = -EFAULT;
+ }
+ break;
+ case ETHTOOL_SCOALESCE:
+ if (copy_from_user
+ (&eco, rq->ifr_data, sizeof(struct ethtool_coalesce)))
+ return -EFAULT;
+ ret = xenet_ethtool_set_coalesce(dev, &eco);
+ break;
+ case ETHTOOL_GDRVINFO:
+ edrv.cmd = edrv.cmd;
+ ret = xenet_ethtool_get_drvinfo(dev, &edrv);
+ if (ret >= 0) {
+ if (copy_to_user
+ (rq->ifr_data, &edrv,
+ sizeof(struct ethtool_drvinfo)))
+ ret = -EFAULT;
+ }
+ break;
+ case ETHTOOL_GREGS:
+ regs.hd.cmd = edrv.cmd;
+ xenet_ethtool_get_regs(dev, &(regs.hd), &ret);
+ if (ret >= 0) {
+ if (copy_to_user
+ (rq->ifr_data, ®s, sizeof(struct mac_regsDump)))
+ ret = -EFAULT;
+ }
+ break;
+ case ETHTOOL_GRINGPARAM:
+ erp.cmd = edrv.cmd;
+ ret = xenet_ethtool_get_ringparam(dev, &(erp));
+ if (ret >= 0) {
+ if (copy_to_user
+ (rq->ifr_data, &erp,
+ sizeof(struct ethtool_ringparam)))
+ ret = -EFAULT;
+ }
+ break;
+ case ETHTOOL_NWAY_RST:
+ epp.cmd = ecmd.cmd;
+ mii_reg_autoneg = 0;
+ mii_reg_autoneg |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ spin_lock_irqsave(&reset_lock, flags);
+ result = XEmac_PhyWrite(&lp->Emac, lp->mii_addr,
+ MII_BMCR, mii_reg_autoneg);
+ spin_unlock_irqrestore(&reset_lock, flags);
+ if (result != XST_SUCCESS) {
+ ret = -EIO;
+ break;
+ }
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int xenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ /* mii_ioctl_data has 4 u16 fields: phy_id, reg_num, val_in & val_out */
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *) &rq->ifr_data;
+ struct {
+ __u8 threshold;
+ __u32 direction;
+ } thr_arg;
+ struct {
+ __u32 waitbound;
+ __u32 direction;
+ } wbnd_arg;
+ int ret;
+ unsigned long flags;
+
+ int Result;
+
+ switch (cmd) {
+ case SIOCETHTOOL:
+ return xenet_do_ethtool_ioctl(dev, rq);
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
+ data->phy_id = lp->mii_addr;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ case SIOCDEVPRIVATE + 1: /* for binary compat, remove in 2.5 */
+ if (data->phy_id > 31 || data->reg_num > 31)
+ return -ENXIO;
+
+ /* Stop the PHY timer to prevent reentrancy. */
+ del_timer_sync(&lp->phy_timer);
+ spin_lock_irqsave(&reset_lock, flags);
+ Result = XEmac_PhyRead(&lp->Emac, data->phy_id,
+ data->reg_num, &data->val_out);
+ /* Start the PHY timer up again. */
+ spin_unlock_irqrestore(&reset_lock, flags);
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ add_timer(&lp->phy_timer);
+
+ if (Result != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: Could not read from PHY, error=%d.\n",
+ dev->name, Result);
+ return (Result == XST_EMAC_MII_BUSY) ? -EBUSY : -EIO;
+ }
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ case SIOCDEVPRIVATE + 2: /* for binary compat, remove in 2.5 */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (data->phy_id > 31 || data->reg_num > 31)
+ return -ENXIO;
+
+ /* Stop the PHY timer to prevent reentrancy. */
+ del_timer_sync(&lp->phy_timer);
+ spin_lock_irqsave(&reset_lock, flags);
+ Result = XEmac_PhyWrite(&lp->Emac, data->phy_id,
+ data->reg_num, data->val_in);
+ spin_unlock_irqrestore(&reset_lock, flags);
+ /* Start the PHY timer up again. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ add_timer(&lp->phy_timer);
+
+ if (Result != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: Could not write to PHY, error=%d.\n",
+ dev->name, Result);
+ return (Result == XST_EMAC_MII_BUSY) ? -EBUSY : -EIO;
+ }
+ return 0;
+
+ case SIOCDEVPRIVATE + 3: /* set THRESHOLD */
+ if (copy_from_user(&thr_arg, rq->ifr_data, sizeof(thr_arg))) {
+ return -EFAULT;
+ }
+ spin_lock_irqsave(&reset_lock, flags);
+ if ((ret = XEmac_Stop(&lp->Emac)) != XST_SUCCESS) {
+ return -EIO;
+ }
+ if ((ret =
+ XEmac_SetPktThreshold(&lp->Emac, thr_arg.direction,
+ thr_arg.threshold)) != XST_SUCCESS) {
+ return -EIO;
+ }
+ if ((ret = XEmac_Start(&lp->Emac)) != XST_SUCCESS) {
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&reset_lock, flags);
+ return 0;
+
+ case SIOCDEVPRIVATE + 4: /* set WAITBOUND */
+ if (copy_from_user(&wbnd_arg, rq->ifr_data, sizeof(wbnd_arg))) {
+ return -EFAULT;
+ }
+ spin_lock_irqsave(&reset_lock, flags);
+ if ((ret = XEmac_Stop(&lp->Emac)) != XST_SUCCESS) {
+ return -EIO;
+ }
+ if ((ret =
+ XEmac_SetPktWaitBound(&lp->Emac, wbnd_arg.direction,
+ wbnd_arg.waitbound)) !=
+ XST_SUCCESS) {
+ return -EIO;
+ }
+ if ((ret = XEmac_Start(&lp->Emac)) != XST_SUCCESS) {
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&reset_lock, flags);
+ return 0;
+
+ case SIOCDEVPRIVATE + 5: /* get THRESHOLD */
+ if (copy_from_user(&thr_arg, rq->ifr_data, sizeof(thr_arg))) {
+ return -EFAULT;
+ }
+ if ((ret =
+ XEmac_GetPktThreshold(&lp->Emac, thr_arg.direction,
+ &(thr_arg.threshold))) !=
+ XST_SUCCESS) {
+ return -EIO;
+ }
+ if (copy_to_user(rq->ifr_data, &thr_arg, sizeof(thr_arg))) {
+ return -EFAULT;
+ }
+ return 0;
+
+
+ case SIOCDEVPRIVATE + 6: /* get WAITBOUND */
+ if (copy_from_user(&wbnd_arg, rq->ifr_data, sizeof(wbnd_arg))) {
+ return -EFAULT;
+ }
+ if ((ret =
+ XEmac_GetPktWaitBound(&lp->Emac, wbnd_arg.direction,
+ &(wbnd_arg.waitbound))) !=
+ XST_SUCCESS) {
+ return -EIO;
+ }
+ if (copy_to_user(rq->ifr_data, &wbnd_arg, sizeof(wbnd_arg))) {
+ return -EFAULT;
+ }
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+static void xenet_remove_ndev(struct net_device *ndev)
+{
+ if (ndev) {
+ struct net_local *lp = netdev_priv(ndev);
+
+ if (XEmac_mIsSgDma(&lp->Emac) && (lp->desc_space)) {
+ free_descriptor_skb(ndev);
+ dma_free_coherent(NULL,
+ lp->desc_space_size,
+ lp->desc_space,
+ lp->desc_space_handle);
+ }
+
+ if (lp->ddrVirtPtr) {
+ kfree(lp->ddrVirtPtr);
+ }
+
+ iounmap((void *) (lp->Emac.BaseAddress));
+ free_netdev(ndev);
+ }
+}
+
+/** Shared device initialization code */
+static int xenet_setup(
+ struct device *dev,
+ struct resource *r_mem,
+ struct resource *r_irq,
+ struct xemac_platform_data *pdata) {
+
+ u32 virt_baddr; /* virtual base address of emac */
+
+ XEmac_Config Config;
+
+ struct net_device *ndev = NULL;
+ struct net_local *lp = NULL;
+
+ int rc;
+
+ u32 phy_addr; /* used for scanning PHY address */
+ u32 hwid; /* used for informational HW ID output */
+
+ /* Create an ethernet device instance */
+ ndev = alloc_etherdev(sizeof(struct net_local));
+ if (!ndev) {
+ dev_err(dev, "XEmac: Could not allocate net device.\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ dev_set_drvdata(dev, ndev);
+
+ ndev->irq = r_irq->start;
+ ndev->mem_start = r_mem->start;
+ ndev->mem_end = r_mem->end;
+
+ if (!request_mem_region(ndev->mem_start,ndev->mem_end - ndev->mem_start+1, DRIVER_NAME)) {
+ dev_err(dev, "Couldn't lock memory region at %p\n",
+ (void *)ndev->mem_start);
+ rc = -EBUSY;
+ goto error;
+ }
+
+ /* Initialize the private netdev structure
+ */
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+
+ /* Setup the Config structure for the XEmac_CfgInitialize() call. */
+ Config.BaseAddress = r_mem->start; /* Physical address */
+ Config.IpIfDmaConfig = pdata->dma_mode;
+ Config.HasMii = pdata->has_mii;
+ Config.HasCam = pdata->has_cam;
+ Config.HasJumbo = pdata->has_jumbo;
+ Config.TxDre = pdata->tx_dre;
+ Config.RxDre = pdata->rx_dre;
+ Config.TxHwCsum = pdata->tx_hw_csum;
+ Config.RxHwCsum = pdata->rx_hw_csum;
+
+
+ /* Get the virtual base address for the device */
+ virt_baddr = (u32) ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
+ if (0 == virt_baddr) {
+ dev_err(dev, "XEmac: Could not allocate iomem.\n");
+ rc = -EIO;
+ goto error;
+ }
+
+
+ if (XEmac_CfgInitialize(&lp->Emac, &Config, virt_baddr) != XST_SUCCESS) {
+ dev_err(dev, "XEmac: Could not initialize device.\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ /* Set the MAC address */
+ memcpy(ndev->dev_addr, pdata->mac_addr, 6);
+ if (XEmac_SetMacAddress(&lp->Emac, ndev->dev_addr) != XST_SUCCESS) {
+ /* should not fail right after an initialize */
+ dev_err(dev, "XEmac: could not set MAC address.\n");
+ rc = -EIO;
+ goto error;
+ }
+ dev_info(dev,
+ "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n",
+ pdata->mac_addr[0], pdata->mac_addr[1],
+ pdata->mac_addr[2], pdata->mac_addr[3],
+ pdata->mac_addr[4], pdata->mac_addr[5]);
+
+ if (XEmac_mIsSgDma(&lp->Emac)) {
+ int result;
+
+ printk(KERN_ERR "XEmac: using sgDMA mode.\n");
+ XEmac_SetSgSendHandler(&lp->Emac, ndev, SgSendHandler);
+ XEmac_SetSgRecvHandler(&lp->Emac, ndev, SgRecvHandler);
+
+ if (XEmac_mIsTxDre(&lp->Emac) == TRUE) {
+ printk(KERN_INFO "XEmac: using TxDRE mode\n");
+ ndev->hard_start_xmit = xenet_SgSendDre;
+ }
+ else {
+ printk(KERN_INFO "XEmac: not using TxDRE mode\n");
+ if (XEmac_mIsTxHwCsum(&lp->Emac)) {
+ printk(KERN_ERR
+ "XEmac: HW CONFIGURATION ERROR, "
+ "Checksum offload without TX DRE!\n");
+ rc = -EIO;
+ goto error;
+ }
+ ndev->hard_start_xmit = xenet_SgSend;
+ }
+ if (XEmac_mIsRxDre(&lp->Emac) == TRUE) {
+ printk(KERN_INFO "XEmac: using RxDRE mode\n");
+ }
+ else {
+ printk(KERN_INFO "XEmac: not using RxDRE mode\n");
+ }
+
+ if (XEmac_mIsTxHwCsum(&lp->Emac)) {
+ printk(KERN_ERR
+ "XEmac: TX Checksum offload Mode enabled.\n");
+ }
+
+ if (XEmac_mIsRxHwCsum(&lp->Emac)) {
+ printk(KERN_ERR
+ "XEmac: RX Checksum offload Mode enabled.\n");
+ }
+
+
+ /* Set up Interrupt handler.
+ */
+ lp->Isr = XEmac_IntrHandlerDma;
+
+ /* Set up SG DMA descriptors.
+ */
+ result = descriptor_init(ndev);
+ if (result) {
+ rc = -EIO;
+ goto error;
+ }
+
+ /* set the packet threshold and waitbound */
+ XEmac_SetPktThreshold(&lp->Emac, XEM_SEND, 31);
+ XEmac_SetPktThreshold(&lp->Emac, XEM_RECV, 31);
+ (void) XEmac_SetPktWaitBound(&lp->Emac, XEM_SEND, 5);
+ (void) XEmac_SetPktWaitBound(&lp->Emac, XEM_RECV, 5);
+
+
+ /* disable SGEND interrupt and enable stripping of FCS and PAD */
+ XEmac_SetOptions(&lp->Emac, XEmac_GetOptions(&lp->Emac) |
+ (XEM_NO_SGEND_INT_OPTION
+ /*| XEM_STRIP_PAD_FCS_OPTION */ ));
+
+ }
+ else {
+ printk(KERN_ERR "XEmac: using fifo mode.\n");
+ XEmac_SetFifoRecvHandler(&lp->Emac, ndev, FifoRecvHandler);
+ XEmac_SetFifoSendHandler(&lp->Emac, ndev, FifoSendHandler);
+ ndev->hard_start_xmit = xenet_FifoSend;
+ lp->Isr = XEmac_IntrHandlerFifo;
+ }
+ XEmac_SetErrorHandler(&lp->Emac, ndev, ErrorHandler);
+
+
+ /* Scan for the PHY.
+ */
+ lp->mii_addr = 0xFF;
+ for (phy_addr = 0; phy_addr < 31; phy_addr++) {
+ int Result;
+ u16 reg;
+ int repeat;
+
+ Result = XEmac_PhyRead(&lp->Emac, phy_addr, MII_PHYSID1, ®);
+ if (XST_SUCCESS != Result) {
+ continue;
+ }
+
+ /* Even if we get here, we can not be sure that we actually found a PHY
+ * at this address. Once in a while, XEmac_PhyRead() returns
+ * XST_SUCCESS for reads at non-exsiting PHY addresses. This seems to
+ * be a bug in the hardware. Fortunately, we can work around this
+ * problem by reading the PHY register several times and checking that
+ * we always get the same value. If we do, it is VERY likely that a PHY
+ * exists at that address.
+ */
+ for (repeat = 0; repeat < 4; repeat++) {
+ u16 check_reg;
+
+ Result = XEmac_PhyRead(&lp->Emac, phy_addr, MII_PHYSID1,
+ &check_reg);
+ if ((XST_SUCCESS != Result) || (check_reg != reg)) {
+ break;
+ }
+ }
+
+ /* If we read the same value 4 times, we can be pretty sure that there
+ * is a PHY out there at this address.
+ */
+ if (4 == repeat) {
+ u16 rev;
+
+ /* Reading the revision # after successfully reading the
+ * manufacturer ID should NEVER fail. If it does, something really
+ * bad is happening.
+ */
+ Result = XEmac_PhyRead(&lp->Emac, phy_addr, MII_PHYSID2,
+ &rev);
+ if (XST_SUCCESS != Result) {
+ printk("XEmac: Error reading PHY revision for PHY at address %d! " "THIS SHOULD NOT HAPPEN! Ignoring this PHY.\n", phy_addr);
+ continue;
+ }
+
+ /* If we get here, we know that we successfully detected a PHY.
+ */
+ printk("XEmac: Detected PHY at address %d, ManufID 0x%04x, Rev. 0x%04x.\n", phy_addr, reg, rev);
+ lp->mii_addr = phy_addr;
+ break;
+ }
+ /* If we get here, the repeated reads returned inconsistent results ->
+ * No PHY.
+ */
+ }
+ if (lp->mii_addr == 0xFF) {
+ printk(KERN_WARNING
+ "XEmac: No PHY detected. Assuming PHY at address 0.\n");
+ lp->mii_addr = 0;
+ }
+
+ /* initialize the netdev structure */
+ ndev->open = xenet_open;
+ ndev->stop = xenet_close;
+ ndev->change_mtu = xenet_change_mtu;
+ ndev->get_stats = xenet_get_stats;
+ ndev->set_multicast_list = xenet_set_multicast_list;
+ ndev->do_ioctl = xenet_ioctl;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->flags &= ~IFF_MULTICAST;
+ ndev->tx_timeout = xenet_tx_timeout;
+ ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+
+ if ((XEmac_mIsTxHwCsum(&lp->Emac)) && (XEmac_mIsTxDre(&lp->Emac))) {
+ ndev->features |= NETIF_F_IP_CSUM;
+ }
+
+ /* Finally, register the device.
+ */
+ rc = register_netdev(ndev);
+ if (rc) {
+ printk(KERN_ERR
+ "%s: Cannot register net device, aborting.\n",
+ ndev->name);
+ goto error; /* rc is already set here... */
+ }
+
+ printk(KERN_INFO
+ "%s: Xilinx 10/100 EMAC at 0x%08X mapped to 0x%08X, irq=%d\n",
+ ndev->name,
+ lp->Emac.PhysAddress,
+ lp->Emac.BaseAddress, ndev->irq);
+
+ /* print h/w id */
+ hwid = XIo_In32((lp->Emac).BaseAddress + XEM_EMIR_OFFSET);
+
+ printk(KERN_INFO
+ "%s: XEmac id %d.%d%c, block id %d, type %d\n",
+ ndev->name, (hwid >> 28) & 0xf, (hwid >> 21) & 0x7f,
+ ((hwid >> 16) & 0x1f) + 'a', (hwid >> 16) & 0xff,
+ (hwid >> 0) & 0xff);
+
+ return 0;
+
+error:
+ return rc;
+}
+
+static int xenet_remove(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ unregister_netdev(ndev);
+ xenet_remove_ndev(ndev);
+
+ release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start+1);
+
+ free_netdev(ndev);
+
+ dev_set_drvdata(dev, NULL);
+
+ return 0; /* success */
+}
+
+
+static int xenet_probe(struct device *dev)
+{
+ struct resource *r_irq = NULL; /* Interrupt resources */
+ struct resource *r_mem = NULL; /* IO mem resources */
+ struct xemac_platform_data *pdata;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ /* param check */
+ if (!pdev) {
+ printk(KERN_ERR
+ "XEmac: Internal error. Probe called with NULL param.\n");
+ return -ENODEV;
+ }
+
+ pdata = (struct xemac_platform_data *) pdev->dev.platform_data;
+ if (!pdata) {
+ printk(KERN_ERR "XEmac %d: Couldn't find platform data.\n",
+ pdev->id);
+
+ return -ENODEV;
+ }
+
+ /* Get iospace and an irq for the device */
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r_irq || !r_mem) {
+ printk(KERN_ERR "XEmac %d: IO resource(s) not found.\n",
+ pdev->id);
+ return -ENODEV;
+ }
+
+ return xenet_setup(dev, r_mem, r_irq, pdata);
+}
+
+static struct device_driver xenet_driver = {
+ .name = DRIVER_NAME,
+ .bus = &platform_bus_type,
+
+ .probe = xenet_probe,
+ .remove = xenet_remove
+};
+
+#ifdef CONFIG_OF
+static u32 get_u32(struct of_device *ofdev, const char *s) {
+ u32 *p = (u32 *)of_get_property(ofdev->node, s, NULL);
+ if(p) {
+ return *p;
+ } else {
+ dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false.\n", s);
+ return FALSE;
+ }
+}
+
+static bool get_bool(struct of_device *ofdev, const char *s) {
+ u32 *p = (u32 *)of_get_property(ofdev->node, s, NULL);
+ if(p) {
+ return (bool)*p;
+ } else {
+ dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false.\n", s);
+ return FALSE;
+ }
+}
+
+static int __devinit xenet_of_probe(struct of_device *ofdev, const struct of_device_id *match)
+{
+ struct xemac_platform_data pdata_struct;
+ struct resource r_irq_struct;
+ struct resource r_mem_struct;
+
+ struct resource *r_irq = &r_irq_struct; /* Interrupt resources */
+ struct resource *r_mem = &r_mem_struct; /* IO mem resources */
+ struct xemac_platform_data *pdata = &pdata_struct;
+ void *mac_address;
+ int rc = 0;
+
+ printk(KERN_ERR "Device Tree Probing \'%s\'\n",
+ ofdev->node->name);
+
+ /* Get iospace for the device */
+ rc = of_address_to_resource(ofdev->node, 0, r_mem);
+ if(rc) {
+ dev_warn(&ofdev->dev, "invalid address\n");
+ return rc;
+ }
+
+ /* Get IRQ for the device */
+ rc = of_irq_to_resource(ofdev->node, 0, r_irq);
+ if(rc == NO_IRQ) {
+ dev_warn(&ofdev->dev, "no IRQ found.\n");
+ return rc;
+ }
+
+ /* Many of these features do not exist in all versions of the
+ ethernet core. We accept if these attributes are not
+ present and do not attempt to exercise the corresponding
+ feature. */
+ pdata_struct.dma_mode = get_u32(ofdev, "xlnx,dma-present");
+ pdata_struct.has_mii = get_bool(ofdev, "xlnx,mii-exist");
+ pdata_struct.has_cam = get_bool(ofdev, "xlnx,cam-exist");
+ pdata_struct.has_err_cnt = get_bool(ofdev, "xlnx,err-count-exist");
+ pdata_struct.has_jumbo = get_bool(ofdev, "xlnx,jumbo-exist");
+ pdata_struct.tx_dre = get_u32(ofdev, "xlnx,tx-dre-type");
+ pdata_struct.rx_dre = get_u32(ofdev, "xlnx,rx-dre-type");
+ pdata_struct.tx_hw_csum = get_bool(ofdev, "xlnx,tx-include-csum");
+ pdata_struct.rx_hw_csum = get_bool(ofdev, "xlnx,rx-include-csum");
+ mac_address = of_get_mac_address(ofdev->node);
+ if(mac_address) {
+ memcpy(pdata_struct.mac_addr, mac_address, 6);
+ } else {
+ dev_warn(&ofdev->dev, "No MAC address found.\n");
+ }
+
+ return xenet_setup(&ofdev->dev, r_mem, r_irq, pdata);
+}
+
+static int __devexit xenet_of_remove(struct of_device *dev)
+{
+ return xenet_remove(&dev->dev);
+}
+
+static struct of_device_id xenet_of_match[] = {
+ { .compatible = "xlnx,opb-ethernet-1.01.a", },
+ { .compatible = "xlnx,opb-ethernet-1.02.a", },
+ { .compatible = "xlnx,opb-ethernet-1.04.a", },
+ { .compatible = "xlnx,plb-ethernet-1.00.a", },
+ { .compatible = "xlnx,plb-ethernet-1.01.a", },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(of, xenet_of_match);
+
+static struct of_platform_driver xenet_of_driver = {
+ .name = DRIVER_NAME,
+ .match_table = xenet_of_match,
+ .probe = xenet_of_probe,
+ .remove = __devexit_p(xenet_of_remove),
+};
+#endif
+
+static int __init xenet_init(void)
+{
+ /*
+ * No kernel boot options used,
+ * so we just need to register the driver
+ */
+ int status = driver_register(&xenet_driver);
+#ifdef CONFIG_OF
+ status |= of_register_platform_driver(&xenet_of_driver);
+#endif
+ return status;
+}
+
+static void __exit xenet_cleanup(void)
+{
+ driver_unregister(&xenet_driver);
+#ifdef CONFIG_OF
+ of_unregister_platform_driver(&xenet_of_driver);
+#endif
+}
+
+module_init(xenet_init);
+module_exit(xenet_cleanup);
--- /dev/null
+/* $Id: xemac_options.c,v 1.1 2007/04/04 18:27:45 wre Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2003 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemac_options.c
+*
+* Functions in this file handle configuration of the XEmac driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 07/31/01 First release
+* 1.00b rpm 02/20/02 Repartitioned files and functions
+* 1.00c rpm 12/05/02 New version includes support for simple DMA
+* 1.00d rpm 09/26/03 New version includes support PLB Ethernet and v2.00a of
+* the packet fifo driver.
+* 1.00e rmm 04/06/04 Added XEM_NO_SGEND_INT_OPTION processing. Relocated
+* XEM_MAX_IFG definition from here to xemac_hw.h as XEM_IFGP_
+* PART1_MAX and XEM_IFGP_PART2_MAX.
+* 1.00f rmm 10/19/04 Added options to control CAM and jumbo frames.
+* 1.11a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xemac_i.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+/*
+ * A table of options and masks. This table maps the user-visible options with
+ * the control register masks. It is used in Set/GetOptions as an alternative
+ * to a series of if/else pairs. Note that the polled options does not have a
+ * corresponding entry in the control register, so it does not exist in the
+ * table.
+ */
+typedef struct {
+ u32 Option;
+ u32 Mask;
+} OptionMap;
+
+static OptionMap OptionsTable[] = {
+ {XEM_UNICAST_OPTION, XEM_ECR_UNICAST_ENABLE_MASK},
+ {XEM_BROADCAST_OPTION, XEM_ECR_BROAD_ENABLE_MASK},
+ {XEM_PROMISC_OPTION, XEM_ECR_PROMISC_ENABLE_MASK},
+ {XEM_FDUPLEX_OPTION, XEM_ECR_FULL_DUPLEX_MASK},
+ {XEM_LOOPBACK_OPTION, XEM_ECR_LOOPBACK_MASK},
+ {XEM_MULTICAST_OPTION, XEM_ECR_MULTI_ENABLE_MASK},
+ {XEM_FLOW_CONTROL_OPTION, XEM_ECR_PAUSE_FRAME_MASK},
+ {XEM_INSERT_PAD_OPTION, XEM_ECR_XMIT_PAD_ENABLE_MASK},
+ {XEM_INSERT_FCS_OPTION, XEM_ECR_XMIT_FCS_ENABLE_MASK},
+ {XEM_INSERT_ADDR_OPTION, XEM_ECR_XMIT_ADDR_INSERT_MASK},
+ {XEM_OVWRT_ADDR_OPTION, XEM_ECR_XMIT_ADDR_OVWRT_MASK},
+ {XEM_STRIP_PAD_FCS_OPTION, XEM_ECR_RECV_STRIP_ENABLE_MASK},
+ {XEM_MULTICAST_CAM_OPTION, XEM_ECR_CAM_ENABLE_MASK},
+ {XEM_JUMBO_OPTION, XEM_ECR_RECV_JUMBO_ENABLE_MASK}
+};
+
+#define XEM_NUM_OPTIONS (sizeof(OptionsTable) / sizeof(OptionMap))
+
+/*****************************************************************************/
+/**
+*
+* Set Ethernet driver/device options. The device must be stopped before
+* calling this function. The options are contained within a bit-mask with each
+* bit representing an option (i.e., you can OR the options together). A one (1)
+* in the bit-mask turns an option on, and a zero (0) turns the option off.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param OptionsFlag is a bit-mask representing the Ethernet options to turn on
+* or off. See xemac.h for a description of the available options.
+*
+* @return
+*
+* - XST_SUCCESS if the options were set successfully
+* - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+*
+* @note
+*
+* This function is not thread-safe and makes use of internal resources that are
+* shared between the Start, Stop, and SetOptions functions, so if one task
+* might be setting device options while another is trying to start the device,
+* protection of this shared data (typically using a semaphore) is required.
+*
+******************************************************************************/
+int XEmac_SetOptions(XEmac * InstancePtr, u32 OptionsFlag)
+{
+ u32 ControlReg;
+ u32 Index;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return XST_DEVICE_IS_STARTED;
+ }
+
+ ControlReg = XIo_In32(InstancePtr->BaseAddress + XEM_ECR_OFFSET);
+
+ /*
+ * Loop through the options table, turning the option on or off
+ * depending on whether the bit is set in the incoming options flag.
+ */
+ for (Index = 0; Index < XEM_NUM_OPTIONS; Index++) {
+ if (OptionsFlag & OptionsTable[Index].Option) {
+ ControlReg |= OptionsTable[Index].Mask; /* turn it on */
+ }
+ else {
+ ControlReg &= ~OptionsTable[Index].Mask; /* turn it off */
+ }
+ }
+
+ /*
+ * TODO: need to validate addr-overwrite only if addr-insert?
+ */
+
+ /*
+ * Now write the control register. Leave it to the upper layers
+ * to restart the device.
+ */
+ XIo_Out32(InstancePtr->BaseAddress + XEM_ECR_OFFSET, ControlReg);
+
+ /*
+ * Check the polled option
+ */
+ if (OptionsFlag & XEM_POLLED_OPTION) {
+ InstancePtr->IsPolled = TRUE;
+ }
+ else {
+ InstancePtr->IsPolled = FALSE;
+ }
+
+ /*
+ * Check the No SGEND option
+ */
+ if (OptionsFlag & XEM_NO_SGEND_INT_OPTION) {
+ InstancePtr->IsSgEndDisable = TRUE;
+ }
+ else {
+ InstancePtr->IsSgEndDisable = FALSE;
+ }
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Get Ethernet driver/device options. The 32-bit value returned is a bit-mask
+* representing the options. A one (1) in the bit-mask means the option is on,
+* and a zero (0) means the option is off.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @return
+*
+* The 32-bit value of the Ethernet options. The value is a bit-mask
+* representing all options that are currently enabled. See xemac.h for a
+* description of the available options.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XEmac_GetOptions(XEmac * InstancePtr)
+{
+ u32 OptionsFlag = 0;
+ u32 ControlReg;
+ u32 Index;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Get the control register to determine which options are currently set.
+ */
+ ControlReg = XIo_In32(InstancePtr->BaseAddress + XEM_ECR_OFFSET);
+
+ /*
+ * Loop through the options table to determine which options are set
+ */
+ for (Index = 0; Index < XEM_NUM_OPTIONS; Index++) {
+ if (ControlReg & OptionsTable[Index].Mask) {
+ OptionsFlag |= OptionsTable[Index].Option;
+ }
+ }
+
+ if (InstancePtr->IsPolled) {
+ OptionsFlag |= XEM_POLLED_OPTION;
+ }
+
+ if (InstancePtr->IsSgEndDisable) {
+ OptionsFlag |= XEM_NO_SGEND_INT_OPTION;
+ }
+
+ return OptionsFlag;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Set the Interframe Gap (IFG), which is the time the MAC delays between
+* transmitting frames. There are two parts required. The total interframe gap
+* is the total of the two parts. The values provided for the Part1 and Part2
+* parameters are multiplied by 4 to obtain the bit-time interval. The first
+* part should be the first 2/3 of the total interframe gap. The MAC will reset
+* the interframe gap timer if carrier sense becomes true during the period
+* defined by interframe gap Part1. Part1 may be shorter than 2/3 the total and
+* can be as small as zero. The second part should be the last 1/3 of the total
+* interframe gap, but can be as large as the total interframe gap. The MAC
+* will not reset the interframe gap timer if carrier sense becomes true during
+* the period defined by interframe gap Part2.
+*
+* The device must be stopped before setting the interframe gap.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param Part1 is the interframe gap part 1 (which will be multiplied by 4 to
+* get the bit-time interval).
+* @param Part2 is the interframe gap part 2 (which will be multiplied by 4 to
+* get the bit-time interval).
+*
+* @return
+*
+* - XST_SUCCESS if the interframe gap was set successfully
+* - XST_DEVICE_IS_STARTED if the device has not been stopped
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XEmac_SetInterframeGap(XEmac * InstancePtr, u8 Part1, u8 Part2)
+{
+ u32 Ifg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(Part1 <= XEM_IFGP_PART1_MAX);
+ XASSERT_NONVOID(Part2 <= XEM_IFGP_PART2_MAX);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Be sure device has been stopped
+ */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return XST_DEVICE_IS_STARTED;
+ }
+
+ Ifg = Part1 << XEM_IFGP_PART1_SHIFT;
+ Ifg |= (Part2 << XEM_IFGP_PART2_SHIFT);
+ XIo_Out32(InstancePtr->BaseAddress + XEM_IFGP_OFFSET, Ifg);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Get the interframe gap, parts 1 and 2. See the description of interframe gap
+* above in XEmac_SetInterframeGap().
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param Part1Ptr is a pointer to an 8-bit buffer into which the interframe gap
+* part 1 value will be copied.
+* @param Part2Ptr is a pointer to an 8-bit buffer into which the interframe gap
+* part 2 value will be copied.
+*
+* @return
+*
+* None. The values of the interframe gap parts are copied into the
+* output parameters.
+*
+******************************************************************************/
+void XEmac_GetInterframeGap(XEmac * InstancePtr, u8 *Part1Ptr, u8 *Part2Ptr)
+{
+ u32 Ifg;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(Part1Ptr != NULL);
+ XASSERT_VOID(Part2Ptr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ Ifg = XIo_In32(InstancePtr->BaseAddress + XEM_IFGP_OFFSET);
+ *Part1Ptr = (Ifg & XEM_IFGP_PART1_MASK) >> XEM_IFGP_PART1_SHIFT;
+ *Part2Ptr = (Ifg & XEM_IFGP_PART2_MASK) >> XEM_IFGP_PART2_SHIFT;
+}
--- /dev/null
+/* $Id: xemac_phy.c,v 1.1 2007/04/04 18:27:45 wre Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2003 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemac_phy.c
+*
+* Contains functions to read and write the PHY through the Ethernet MAC MII
+* registers. These assume an MII-compliant PHY.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a rpm 07/31/01 First release
+* 1.00b rpm 02/20/02 Repartitioned files and functions
+* 1.00c rpm 12/05/02 New version includes support for simple DMA
+* 1.00d rpm 09/26/03 New version includes support PLB Ethernet and v2.00a of
+* the packet fifo driver.
+* 1.00e rmm 04/06/04 Moved XEM_MAX_PHY constants to xemac_hw.h
+* 1.00f rmm 10/19/04 Added XEmac_PhyReset() function.
+* 1.11a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xemac_i.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+*
+* Assert the PHY reset signal. This function will work only when the external
+* PHY supports the reset_n signal. See EMAC spec for more information.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+*
+* @note
+*
+* This function will always leave the PHY enabled.
+*
+******************************************************************************/
+void XEmac_PhyReset(XEmac * InstancePtr)
+{
+ u32 RegECR;
+
+ XASSERT_VOID(InstancePtr != NULL);
+
+ /* Disable/reset the PHY */
+ RegECR = XIo_In32(InstancePtr->BaseAddress + XEM_ECR_OFFSET);
+ XIo_Out32(InstancePtr->BaseAddress + XEM_ECR_OFFSET,
+ RegECR & ~XEM_ECR_PHY_ENABLE_MASK);
+
+ /* Re-enable the PHY */
+ XIo_Out32(InstancePtr->BaseAddress + XEM_ECR_OFFSET,
+ RegECR | XEM_ECR_PHY_ENABLE_MASK);
+}
+
+
+
+/*****************************************************************************/
+/**
+*
+* Read the current value of the PHY register indicated by the PhyAddress and
+* the RegisterNum parameters. The MAC provides the driver with the ability to
+* talk to a PHY that adheres to the Media Independent Interface (MII) as
+* defined in the IEEE 802.3 standard.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param PhyAddress is the address of the PHY to be read (supports multiple
+* PHYs)
+* @param RegisterNum is the register number, 0-31, of the specific PHY register
+* to read
+* @param PhyDataPtr is an output parameter, and points to a 16-bit buffer into
+* which the current value of the register will be copied.
+*
+* @return
+*
+* - XST_SUCCESS if the PHY was read from successfully
+* - XST_NO_FEATURE if the device is not configured with MII support
+* - XST_EMAC_MII_BUSY if there is another PHY operation in progress
+* - XST_EMAC_MII_READ_ERROR if a read error occurred between the MAC and the PHY
+*
+* @note
+*
+* This function is not thread-safe. The user must provide mutually exclusive
+* access to this function if there are to be multiple threads that can call it.
+* <br><br>
+* There is the possibility that this function will not return if the hardware
+* is broken (i.e., it never sets the status bit indicating that the read is
+* done). If this is of concern to the user, the user should provide protection
+* from this problem - perhaps by using a different timer thread to monitor the
+* PhyRead thread.
+*
+******************************************************************************/
+int XEmac_PhyRead(XEmac * InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 *PhyDataPtr)
+{
+ u32 MiiControl;
+ u32 MiiData;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(PhyAddress <= XEM_MGTCR_MAX_PHY_ADDR);
+ XASSERT_NONVOID(RegisterNum <= XEM_MGTCR_MAX_PHY_REG);
+ XASSERT_NONVOID(PhyDataPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Make sure the device has the management interface
+ */
+ if (!XEmac_mHasMii(InstancePtr)) {
+ return XST_NO_FEATURE;
+ }
+
+ /*
+ * Verify that there is no operation in progress already
+ */
+ MiiControl = XIo_In32(InstancePtr->BaseAddress + XEM_MGTCR_OFFSET);
+ if (MiiControl & XEM_MGTCR_START_MASK) {
+ /* operation in progress */
+ return XST_EMAC_MII_BUSY;
+ }
+
+ /*
+ * Set up the MII control register first. We set up a control word with
+ * the PHY address and register number, then indicate the direction (read),
+ * then start the operation.
+ */
+ MiiControl = PhyAddress << XEM_MGTCR_PHY_ADDR_SHIFT;
+ MiiControl |= (RegisterNum << XEM_MGTCR_REG_ADDR_SHIFT);
+ MiiControl |= (XEM_MGTCR_RW_NOT_MASK | XEM_MGTCR_START_MASK |
+ XEM_MGTCR_MII_ENABLE_MASK);
+
+ XIo_Out32(InstancePtr->BaseAddress + XEM_MGTCR_OFFSET, MiiControl);
+
+ /*
+ * Wait for the operation to complete
+ */
+ do {
+ MiiControl =
+ XIo_In32(InstancePtr->BaseAddress + XEM_MGTCR_OFFSET);
+ }
+ while (MiiControl & XEM_MGTCR_START_MASK);
+
+ /*
+ * Now read the resulting MII data register. First check to see if
+ * an error occurred before reading and returning the value in
+ * the MII data register.
+ */
+ if (MiiControl & XEM_MGTCR_RD_ERROR_MASK) {
+ /*
+ * MII read error occurred. Upper layer will need to retry.
+ */
+ return XST_EMAC_MII_READ_ERROR;
+ }
+
+ /*
+ * Retrieve the data from the 32-bit register, then copy it to
+ * the 16-bit output parameter.
+ */
+ MiiData = XIo_In32(InstancePtr->BaseAddress + XEM_MGTDR_OFFSET);
+
+ *PhyDataPtr = (u16) MiiData;
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Write data to the specified PHY register. The Ethernet driver does not
+* require the device to be stopped before writing to the PHY. Although it is
+* probably a good idea to stop the device, it is the responsibility of the
+* application to deem this necessary. The MAC provides the driver with the
+* ability to talk to a PHY that adheres to the Media Independent Interface
+* (MII) as defined in the IEEE 802.3 standard.
+*
+* @param InstancePtr is a pointer to the XEmac instance to be worked on.
+* @param PhyAddress is the address of the PHY to be written (supports multiple
+* PHYs)
+* @param RegisterNum is the register number, 0-31, of the specific PHY register
+* to write
+* @param PhyData is the 16-bit value that will be written to the register
+*
+* @return
+*
+* - XST_SUCCESS if the PHY was written to successfully. Since there is no error
+* status from the MAC on a write, the user should read the PHY to verify the
+* write was successful.
+* - XST_NO_FEATURE if the device is not configured with MII support
+* - XST_EMAC_MII_BUSY if there is another PHY operation in progress
+*
+* @note
+*
+* This function is not thread-safe. The user must provide mutually exclusive
+* access to this function if there are to be multiple threads that can call it.
+* <br><br>
+* There is the possibility that this function will not return if the hardware
+* is broken (i.e., it never sets the status bit indicating that the write is
+* done). If this is of concern to the user, the user should provide protection
+* from this problem - perhaps by using a different timer thread to monitor the
+* PhyWrite thread.
+*
+******************************************************************************/
+int XEmac_PhyWrite(XEmac * InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 PhyData)
+{
+ u32 MiiControl;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(PhyAddress <= XEM_MGTCR_MAX_PHY_ADDR);
+ XASSERT_NONVOID(RegisterNum <= XEM_MGTCR_MAX_PHY_REG);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Make sure the device has the management interface
+ */
+ if (!XEmac_mHasMii(InstancePtr)) {
+ return XST_NO_FEATURE;
+ }
+
+ /*
+ * Verify that there is no operation in progress already
+ */
+ MiiControl = XIo_In32(InstancePtr->BaseAddress + XEM_MGTCR_OFFSET);
+ if (MiiControl & XEM_MGTCR_START_MASK) {
+ /* operation in progress */
+ return XST_EMAC_MII_BUSY;
+ }
+
+ /*
+ * Set up the MII data register first. Write the 16-bit input
+ * value to the 32-bit data register.
+ */
+ XIo_Out32(InstancePtr->BaseAddress + XEM_MGTDR_OFFSET, (u32) PhyData);
+
+ /*
+ * Now set up the MII control register. We set up a control
+ * word with the PHY address and register number, then indicate
+ * the direction (write), then start the operation.
+ */
+ MiiControl = PhyAddress << XEM_MGTCR_PHY_ADDR_SHIFT;
+ MiiControl |= (RegisterNum << XEM_MGTCR_REG_ADDR_SHIFT);
+ MiiControl |= (XEM_MGTCR_START_MASK | XEM_MGTCR_MII_ENABLE_MASK);
+
+ XIo_Out32(InstancePtr->BaseAddress + XEM_MGTCR_OFFSET, MiiControl);
+
+ /*
+ * Wait for the operation to complete
+ */
+ do {
+ MiiControl =
+ XIo_In32(InstancePtr->BaseAddress + XEM_MGTCR_OFFSET);
+ }
+ while (MiiControl & XEM_MGTCR_START_MASK);
+
+ /*
+ * There is no status indicating whether the operation was
+ * successful or not.
+ */
+ return XST_SUCCESS;
+}
--- /dev/null
+#
+# Makefile for the Xilinx ethernet Lite driver
+#
+
+EXTRA_CFLAGS += -I$(TOPDIR)/drivers/xilinx_common
+
+obj-$(CONFIG_XILINX_EMACLITE) := xilinx_emaclite.o
+
+# The Linux adapter for the Xilinx driver code.
+xilinx_emaclite-objs += adapter.o
+
+# The Xilinx OS independent code.
+xilinx_emaclite-objs += xemaclite.o xemaclite_intr.o xemaclite_l.o
+
+
+
--- /dev/null
+/*
+ * adapter.c
+ *
+ * Xilinx Ethernet MAC Lite Adapter component to interface XEmac component
+ * to Linux
+ *
+ * Author: John Williams <john.williams@petalogix.com>
+ *
+ * based on Xilinx enet driver which is by
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * 2002 (c) MontaVista, Software, Inc. This file is licensed under the terms
+ * of the GNU General Public License version 2.1. This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ *
+ * (c) Copyright 2007 Xilinx Inc.
+ *
+ */
+
+/*
+ * This driver is a bit unusual in that it is composed of two logical
+ * parts where one part is the OS independent code and the other part is
+ * the OS dependent code. Xilinx provides their drivers split in this
+ * fashion. This file represents the Linux OS dependent part known as
+ * the Linux adapter. The other files in this directory are the OS
+ * independent files as provided by Xilinx with no changes made to them.
+ * The names exported by those files begin with XEmacLite_. All functions
+ * in this file that are called by Linux have names that begin with
+ * xemaclite_. The functions in this file that have Handler in their name
+ * are registered as callbacks with the underlying Xilinx OS independent
+ * layer. Any other functions are static helper functions.
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/atomic.h>
+#include <asm/pgalloc.h>
+#include <linux/ethtool.h>
+
+#include <linux/xilinx_devices.h>
+
+#include <xbasic_types.h>
+#include "xemaclite.h"
+#include "xemaclite_i.h"
+#include "xipif_v1_23_b.h"
+
+#ifdef CONFIG_OF
+// For open firmware.
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#endif
+
+#define DRIVER_NAME "xilinx_emaclite"
+#define DRIVER_VERSION "1.0"
+
+MODULE_AUTHOR("John Williams <john.williams@petalogix.com>");
+MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
+MODULE_LICENSE("GPL");
+
+#define TX_TIMEOUT (60*HZ) /* Transmission timeout is 60 seconds. */
+
+#define ALIGNMENT 4
+
+/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
+
+/* physical to virtual pointer conversion */
+#define P_TO_V(InstancePtr, p) \
+ ((p) ? \
+ ((InstancePtr)->VirtPtr + ((u32)(p) - (u32)(InstancePtr)->PhyPtr)) : \
+ 0)
+
+/*
+ * Our private per device data. When a net_device is allocated we will
+ * ask for enough extra space for this.
+ */
+struct net_local {
+ struct list_head rcv;
+ struct list_head xmit;
+
+ struct net_device_stats stats; /* Statistics for this device */
+ struct net_device *ndev; /* this device */
+ u32 index; /* Which interface is this */
+ XInterruptHandler Isr; /* Pointer to the XEmac ISR routine */
+
+ /*
+ * The underlying OS independent code needs space as well. A
+ * pointer to the following XEmacLite structure will be passed to
+ * any XEmacLite_ function that requires it. However, we treat the
+ * data as an opaque object in this file (meaning that we never
+ * reference any of the fields inside of the structure).
+ */
+ XEmacLite EmacLite;
+
+ void *desc_space;
+ dma_addr_t desc_space_handle;
+ int desc_space_size;
+
+ u8 *ddrVirtPtr;
+ u32 ddrOffset;
+ u32 ddrSize;
+
+ struct sk_buff *deferred_skb;
+};
+
+/* for exclusion of all program flows (processes, ISRs and BHs) possible to share data with current one */
+static spinlock_t reset_lock = SPIN_LOCK_UNLOCKED;
+
+/* Helper function to determine if a given XEmac error warrants a reset. */
+extern inline int status_requires_reset(int s)
+{
+ return (s == XST_DMA_ERROR || s == XST_FIFO_ERROR ||
+ s == XST_RESET_ERROR || s == XST_DMA_SG_NO_LIST ||
+ s == XST_DMA_SG_LIST_EMPTY);
+}
+
+/* BH statics */
+static spinlock_t rcvSpin = SPIN_LOCK_UNLOCKED;
+static spinlock_t xmitSpin = SPIN_LOCK_UNLOCKED;
+
+/*
+ * The following are notes regarding the critical sections in this
+ * driver and how they are protected.
+ *
+ *
+ * XEmacLite_EnableInterrupts, XEmacLite_DisableInterrupts and XEmacLite_SetOptions are not thread safe.
+ * These functions are called from xemaclite_open(), xemaclite_close(), reset(),
+ * and xemaclite_set_multicast_list(). xemaclite_open() and xemaclite_close()
+ * should be safe because when they do start and stop, they don't have
+ * interrupts or timers enabled. The other side is that they won't be
+ * called while a timer or interrupt is being handled.
+ *
+ * XEmacLite_PhyRead and XEmacLite_PhyWrite are not thread safe.
+ * These functions are called from get_phy_status(), xemaclite_ioctl() and
+ * probe(). probe() is only called from xemaclite_init() so it is not an
+ * issue (nothing is really up and running yet). get_phy_status() is
+ * called from both poll_mii() (a timer bottom half) and xemaclite_open().
+ * These shouldn't interfere with each other because xemaclite_open() is
+ * what starts the poll_mii() timer. xemaclite_open() and xemaclite_ioctl()
+ * should be safe as well because they will be sequential. That leaves
+ * the interaction between poll_mii() and xemaclite_ioctl(). While the
+ * timer bottom half is executing, a new ioctl won't come in so that is
+ * taken care of. That leaves the one case of the poll_mii timer
+ * popping while handling an ioctl. To take care of that case, the
+ * timer is deleted when the ioctl comes in and then added back in after
+ * the ioctl is finished.
+ */
+
+typedef enum DUPLEX { UNKNOWN_DUPLEX, HALF_DUPLEX, FULL_DUPLEX } DUPLEX;
+static void reset(struct net_device *dev, DUPLEX duplex)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ /* Shouldn't really be necessary, but shouldn't hurt. */
+ netif_stop_queue(dev);
+
+ XEmacLite_DisableInterrupts(&lp->EmacLite);
+ XEmacLite_EnableInterrupts(&lp->EmacLite);
+
+ if (lp->deferred_skb) {
+ dev_kfree_skb(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ lp->stats.tx_errors++;
+ }
+
+ dev->trans_start = 0xffffffff - TX_TIMEOUT - TX_TIMEOUT; /* to exclude tx timeout */
+
+ /* We're all ready to go. Start the queue in case it was stopped. */
+ netif_wake_queue(dev);
+}
+
+/*
+ * This routine is registered with the OS as the function to call when
+ * the EMAC interrupts. It in turn, calls the Xilinx OS independent
+ */
+static irqreturn_t
+xemaclite_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ /* Call it. */
+ (*(lp->Isr)) (&lp->EmacLite);
+
+ return IRQ_HANDLED;
+}
+
+static int xemaclite_open(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ int retval;
+
+ /*
+ * Just to be safe, stop the device first.
+ */
+ XEmacLite_DisableInterrupts(&lp->EmacLite);
+
+ /* Set the MAC address each time opened. */
+ XEmacLite_SetMacAddress(&lp->EmacLite, dev->dev_addr);
+
+ /* Grab the IRQ */
+ retval = request_irq(dev->irq, &xemaclite_interrupt, 0, dev->name, dev);
+ if (retval) {
+ printk(KERN_ERR
+ "%s: Could not allocate interrupt %d.\n",
+ dev->name, dev->irq);
+ return retval;
+ }
+
+ INIT_LIST_HEAD(&(lp->rcv));
+ INIT_LIST_HEAD(&(lp->xmit));
+
+ if (XEmacLite_EnableInterrupts(&lp->EmacLite) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: Could not start device.\n", dev->name);
+ free_irq(dev->irq, dev);
+ return -EBUSY;
+ }
+
+ /* We're ready to go. */
+ netif_start_queue(dev);
+
+ return 0;
+}
+static int xemaclite_close(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ XEmacLite_DisableInterrupts(&lp->EmacLite);
+
+ free_irq(dev->irq, dev);
+
+ spin_lock_irqsave(&rcvSpin, flags);
+ list_del(&(lp->rcv));
+ spin_unlock_irqrestore(&rcvSpin, flags);
+ spin_lock_irqsave(&xmitSpin, flags);
+ list_del(&(lp->xmit));
+ spin_unlock_irqrestore(&xmitSpin, flags);
+
+ return 0;
+}
+static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ return &lp->stats;
+}
+
+static int xemaclite_Send(struct sk_buff *orig_skb, struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct sk_buff *new_skb;
+ unsigned int len;
+ unsigned long flags;
+
+ len = orig_skb->len;
+
+ new_skb = orig_skb;
+
+ spin_lock_irqsave(&reset_lock, flags);
+ if (XEmacLite_Send(&lp->EmacLite, (u8 *) new_skb->data, len) !=
+ XST_SUCCESS) {
+ netif_stop_queue(dev);
+ lp->deferred_skb = new_skb;
+ spin_unlock_irqrestore(&reset_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&reset_lock, flags);
+
+ lp->stats.tx_bytes += len;
+ dev_kfree_skb(new_skb);
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+/* The callback function for completed frames sent. */
+static void SendHandler(void *CallbackRef)
+{
+ struct net_device *dev = (struct net_device *) CallbackRef;
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ if (lp->deferred_skb) {
+ if (XEmacLite_Send
+ (&lp->EmacLite, (u8 *) lp->deferred_skb->data,
+ lp->deferred_skb->len) != XST_SUCCESS) {
+ return;
+ }
+ else {
+ dev_kfree_skb(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ netif_wake_queue(dev);
+ }
+ }
+ lp->stats.tx_packets++;
+}
+
+static void xemaclite_tx_timeout(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ unsigned long flags;
+
+ printk("%s: Exceeded transmit timeout of %lu ms.\n",
+ dev->name, TX_TIMEOUT * 1000UL / HZ);
+
+ lp->stats.tx_errors++;
+ spin_lock_irqsave(&reset_lock, flags);
+ reset(dev, UNKNOWN_DUPLEX);
+ spin_unlock_irqrestore(&reset_lock, flags);
+}
+
+/* The callback function for frames received. */
+static void RecvHandler(void *CallbackRef)
+{
+ struct net_device *dev = (struct net_device *) CallbackRef;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct sk_buff *skb;
+ unsigned int align;
+ u32 len;
+
+ len = XEL_MAX_FRAME_SIZE;
+ if (!(skb = /*dev_ */ alloc_skb(len + ALIGNMENT, GFP_ATOMIC))) {
+ /* Couldn't get memory. */
+ lp->stats.rx_dropped++;
+ printk(KERN_ERR "%s: Could not allocate receive buffer.\n",
+ dev->name);
+ return;
+ }
+
+ /*
+ * A new skb should have the data halfword aligned, but this code is
+ * here just in case that isn't true... Calculate how many
+ * bytes we should reserve to get the data to start on a word
+ * boundary. */
+ align = BUFFER_ALIGN(skb->data);
+ if (align)
+ skb_reserve(skb, align);
+
+ skb_reserve(skb, 2);
+
+ len = XEmacLite_Recv(&lp->EmacLite, (u8 *) skb->data);
+
+ if (!len) {
+
+ lp->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ //printk(KERN_ERR "%s: Could not receive buffer\n",dev->name);
+ spin_lock(&reset_lock);
+ //reset(dev, UNKNOWN_DUPLEX);
+ spin_unlock(&reset_lock);
+
+ return;
+ }
+
+ skb_put(skb, len); /* Tell the skb how much data we got. */
+ skb->dev = dev; /* Fill out required meta-data. */
+
+
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+
+ netif_rx(skb); /* Send the packet upstream. */
+}
+
+static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct hw_addr_data *hw_addr = (struct sockaddr *) &rq->ifr_hwaddr;
+
+ switch (cmd) {
+ case SIOCETHTOOL:
+ return -EIO;
+
+ case SIOCSIFHWADDR:
+ {
+ printk(KERN_INFO "%s: SIOCSIFHWADDR\n", dev->name);
+
+ /* Copy MAC address in from user space */
+ copy_from_user(dev->dev_addr, (void *) hw_addr,
+ IFHWADDRLEN);
+ XEmacLite_SetMacAddress(&lp->EmacLite, dev->dev_addr);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void xemaclite_remove_ndev(struct net_device *ndev)
+{
+ if (ndev) {
+ struct net_local *lp = netdev_priv(ndev);
+
+ iounmap((void *) (lp->EmacLite.BaseAddress));
+ free_netdev(ndev);
+ }
+}
+
+static int xemaclite_remove(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ unregister_netdev(ndev);
+ xemaclite_remove_ndev(ndev);
+
+ release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start+1);
+
+ free_netdev(ndev);
+
+ dev_set_drvdata(dev, NULL);
+
+ return 0; /* success */
+}
+
+
+/** Shared device initialization code */
+static int xemaclite_setup(
+ struct device *dev,
+ struct resource *r_mem,
+ struct resource *r_irq,
+ struct xemaclite_platform_data *pdata) {
+
+ u32 virt_baddr; /* virtual base address of emac */
+
+ XEmacLite_Config Config;
+
+ struct net_device *ndev = NULL;
+ struct net_local *lp = NULL;
+
+ int rc = 0;
+
+ /* Create an ethernet device instance */
+ ndev = alloc_etherdev(sizeof(struct net_local));
+ if (!ndev) {
+ dev_err(dev, "XEmacLite: Could not allocate net device.\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ dev_set_drvdata(dev, ndev);
+
+ ndev->irq = r_irq->start;
+ ndev->mem_start = r_mem->start;
+ ndev->mem_end = r_mem->end;
+
+ if (!request_mem_region(ndev->mem_start,ndev->mem_end - ndev->mem_start+1, DRIVER_NAME)) {
+ dev_err(dev, "Couldn't lock memory region at %p\n",
+ (void *)ndev->mem_start);
+ rc = -EBUSY;
+ goto error;
+ }
+
+ /* Initialize the private netdev structure
+ */
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+
+ /* Setup the Config structure for the XEmacLite_CfgInitialize() call. */
+ Config.BaseAddress = r_mem->start; /* Physical address */
+ Config.TxPingPong = pdata->tx_ping_pong;
+ Config.RxPingPong = pdata->rx_ping_pong;
+
+
+ /* Get the virtual base address for the device */
+ virt_baddr = (u32) ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
+ if (0 == virt_baddr) {
+ dev_err(dev, "XEmacLite: Could not allocate iomem.\n");
+ rc = -EIO;
+ goto error;
+ }
+
+
+ if (XEmacLite_CfgInitialize(&lp->EmacLite, &Config, virt_baddr) != XST_SUCCESS) {
+ dev_err(dev, "XEmacLite: Could not initialize device.\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ /* Set the MAC address */
+ memcpy(ndev->dev_addr, pdata->mac_addr, 6);
+
+ /* Note: in the xemac driver, SetMacAddress returns a success code. */
+ XEmacLite_SetMacAddress(&lp->EmacLite, ndev->dev_addr);
+
+ dev_info(dev,
+ "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n",
+ pdata->mac_addr[0], pdata->mac_addr[1],
+ pdata->mac_addr[2], pdata->mac_addr[3],
+ pdata->mac_addr[4], pdata->mac_addr[5]);
+
+ dev_err(dev, "using fifo mode.\n");
+ XEmacLite_SetRecvHandler(&lp->EmacLite, ndev, RecvHandler);
+ XEmacLite_SetSendHandler(&lp->EmacLite, ndev, SendHandler);
+ ndev->hard_start_xmit = xemaclite_Send;
+ lp->Isr = XEmacLite_InterruptHandler;
+
+ ndev->open = xemaclite_open;
+ ndev->stop = xemaclite_close;
+ ndev->get_stats = xemaclite_get_stats;
+ ndev->flags &= ~IFF_MULTICAST;
+ ndev->do_ioctl = xemaclite_ioctl;
+ ndev->tx_timeout = xemaclite_tx_timeout;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+
+ /* Finally, register the device.
+ */
+ rc = register_netdev(ndev);
+ if (rc) {
+ printk(KERN_ERR
+ "%s: Cannot register net device, aborting.\n",
+ ndev->name);
+ goto error; /* rc is already set here... */
+ }
+
+ dev_info(dev,
+ "Xilinx EMACLite at 0x%08X mapped to 0x%08X, irq=%d\n",
+ lp->EmacLite.PhysAddress,
+ lp->EmacLite.BaseAddress, ndev->irq);
+ return 0;
+ error:
+ if (ndev) {
+ xemaclite_remove_ndev(ndev);
+ }
+ return rc;
+}
+
+static int xemaclite_probe(struct device *dev)
+{
+ struct resource *r_irq = NULL; /* Interrupt resources */
+ struct resource *r_mem = NULL; /* IO mem resources */
+ struct xemaclite_platform_data *pdata;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ /* param check */
+ if (!pdev) {
+ printk(KERN_ERR
+ "XEmac: Internal error. Probe called with NULL param.\n");
+ return -ENODEV;
+ }
+
+ pdata = (struct xemaclite_platform_data *) pdev->dev.platform_data;
+ if (!pdata) {
+ printk(KERN_ERR "XEmac %d: Couldn't find platform data.\n",
+ pdev->id);
+
+ return -ENODEV;
+ }
+
+ /* Get iospace and an irq for the device */
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r_irq || !r_mem) {
+ printk(KERN_ERR "XEmac %d: IO resource(s) not found.\n",
+ pdev->id);
+ return -ENODEV;
+ }
+
+ return xemaclite_setup(dev, r_mem, r_irq, pdata);
+}
+
+static struct device_driver xemaclite_driver = {
+ .name = DRIVER_NAME,
+ .bus = &platform_bus_type,
+
+ .probe = xemaclite_probe,
+ .remove = xemaclite_remove
+};
+
+#ifdef CONFIG_OF
+
+static bool get_bool(struct of_device *ofdev, const char *s) {
+ u32 *p = (u32 *)of_get_property(ofdev->node, s, NULL);
+ if(p) {
+ return (bool)*p;
+ } else {
+ dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false.\n", s);
+ return FALSE;
+ }
+}
+
+static int __devinit xemaclite_of_probe(struct of_device *ofdev, const struct of_device_id *match)
+{
+ struct xemaclite_platform_data pdata_struct;
+ struct resource r_irq_struct;
+ struct resource r_mem_struct;
+
+ struct resource *r_irq = &r_irq_struct; /* Interrupt resources */
+ struct resource *r_mem = &r_mem_struct; /* IO mem resources */
+ struct xemaclite_platform_data *pdata = &pdata_struct;
+ void *mac_address;
+ int rc = 0;
+
+ dev_info(&ofdev->dev, "Device Tree Probing \'%s\'\n",
+ ofdev->node->name);
+
+ /* Get iospace for the device */
+ rc = of_address_to_resource(ofdev->node, 0, r_mem);
+ if(rc) {
+ dev_warn(&ofdev->dev, "invalid address\n");
+ return rc;
+ }
+
+ /* Get IRQ for the device */
+ rc = of_irq_to_resource(ofdev->node, 0, r_irq);
+ if(rc == NO_IRQ) {
+ dev_warn(&ofdev->dev, "no IRQ found.\n");
+ return rc;
+ }
+
+ pdata_struct.tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
+ pdata_struct.rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
+ mac_address = of_get_mac_address(ofdev->node);
+ if(mac_address) {
+ memcpy(pdata_struct.mac_addr, mac_address, 6);
+ } else {
+ dev_warn(&ofdev->dev, "No MAC address found.\n");
+ }
+
+ return xemaclite_setup(&ofdev->dev, r_mem, r_irq, pdata);
+}
+
+static int __devexit xemaclite_of_remove(struct of_device *dev)
+{
+ return xemaclite_remove(&dev->dev);
+}
+
+static struct of_device_id xemaclite_of_match[] = {
+ { .compatible = "xlnx,opb-ethernetlite-1.01.a", },
+ { .compatible = "xlnx,opb-ethernetlite-1.01.b", },
+ { .compatible = "xlnx,xps-ethernetlite-1.00.a", },
+ { .compatible = "xlnx,xps-ethernetlite-2.00.a", },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(of, xemaclite_of_match);
+
+static struct of_platform_driver xemaclite_of_driver = {
+ .name = DRIVER_NAME,
+ .match_table = xemaclite_of_match,
+ .probe = xemaclite_of_probe,
+ .remove = __devexit_p(xemaclite_of_remove),
+};
+#endif
+
+static int __init xemaclite_init(void)
+{
+ /*
+ * No kernel boot options used,
+ * so we just need to register the driver
+ */
+ int status = driver_register(&xemaclite_driver);
+#ifdef CONFIG_OF
+ status |= of_register_platform_driver(&xemaclite_of_driver);
+#endif
+ return status;
+}
+
+static void __exit xemaclite_cleanup(void)
+{
+ driver_unregister(&xemaclite_driver);
+#ifdef CONFIG_OF
+ of_unregister_platform_driver(&xemaclite_of_driver);
+#endif
+}
+
+module_init(xemaclite_init);
+module_exit(xemaclite_cleanup);
+
--- /dev/null
+/* $Id: xemaclite.c,v 1.1.2.1 2007/03/13 17:26:07 akondratenko Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemaclite.c
+*
+* Functions in this file are the minimum required functions for the EMAC Lite
+* driver. See xemaclite.h for a detailed description of the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- --------------------------------------------------------
+* 1.01a ecm 01/31/04 First release
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xstatus.h"
+#include "xio.h"
+//#include <asm/delay.h>
+#include "xemaclite.h"
+#include "xemaclite_l.h"
+#include "xemaclite_i.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+/****************************************************************************
+*
+* Return the length of the data in the Receive Buffer.
+*
+* @param InstancePtr is the pointer to the instance of the driver to
+* be worked on
+*
+* @note
+* This macro returns the length of the received data..
+*
+*****************************************************************************/
+#define XEmacLite_mGetReceiveDataLength(BaseAddress) \
+ ((XIo_In32((BaseAddress) + XEL_HEADER_OFFSET + XEL_RXBUFF_OFFSET) >> \
+ XEL_HEADER_SHIFT) & \
+ (XEL_RPLR_LENGTH_MASK_HI | XEL_RPLR_LENGTH_MASK_LO))
+
+/************************** Function Prototypes ******************************/
+/************************** Variable Definitions *****************************/
+
+int XEmacLite_CfgInitialize(XEmacLite * InstancePtr, XEmacLite_Config * ConfigPtr,
+ u32 VirtualAddress)
+{
+ /*
+ * Set some default values for instance data, don't indicate the device
+ * is ready to use until everything has been initialized successfully
+ */
+
+ if (0 != VirtualAddress) {
+ InstancePtr->BaseAddress = VirtualAddress;
+ }
+ else {
+ InstancePtr->BaseAddress = ConfigPtr->BaseAddress;
+ }
+ InstancePtr->PhysAddress = ConfigPtr->BaseAddress;
+ InstancePtr->ConfigPtr = ConfigPtr;
+
+ InstancePtr->RecvHandler = (XEmacLite_Handler) StubHandler;
+ InstancePtr->SendHandler = (XEmacLite_Handler) StubHandler;
+
+
+ /*
+ * Clear the TX CSR's in case this is a restart
+ */
+
+ XIo_Out32(InstancePtr->BaseAddress + XEL_TSR_OFFSET, 0);
+ XIo_Out32(InstancePtr->BaseAddress + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET,
+ 0);
+
+ /*
+ * Since there were no failures, indicate the device is ready to use.
+ */
+
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Send an Ethernet frame. The ByteCount is the total frame size, including
+* header.
+*
+* @param InstancePtr is a pointer to the XEmacLite instance to be worked on.
+* @param FramePtr is a pointer to frame. For optimal performance, a 32-bit
+* aligned buffer should be used but it is not required, the function
+* will align the data if necessary.
+* @param ByteCount is the size, in bytes, of the frame
+*
+* @return
+*
+* - XST_SUCCESS if data was transmitted.
+* - XST_FAILURE if buffer(s) was (were) full and no valid data was
+* transmitted.
+*
+* @note
+*
+* This function call is not blocking in nature, i.e. it will not wait until the
+* frame is transmitted.
+*
+******************************************************************************/
+int XEmacLite_Send(XEmacLite * InstancePtr, u8 *FramePtr, unsigned ByteCount)
+{
+ u32 Register;
+ u32 BaseAddress;
+
+ /*
+ * Verify that each of the inputs are valid.
+ */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /*
+ * Determine the expected TX buffer address
+ */
+
+ BaseAddress = XEmacLite_mNextTransmitAddr(InstancePtr);
+
+ /*
+ * Check the Length, of too large, truncate
+ */
+
+ if (ByteCount > XEL_MAX_FRAME_SIZE) {
+
+ ByteCount = XEL_MAX_FRAME_SIZE;
+ }
+ /*
+ * Determine if the expected buffer address is empty
+ */
+
+ Register = XIo_In32(BaseAddress + XEL_TSR_OFFSET);
+
+ /*
+ * If the expected buffer is available, fill it with the provided data
+ * Align if necessary.
+ */
+
+
+ if (((Register & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
+ ((XEmacLite_mGetTxActive(BaseAddress) & XEL_TSR_XMIT_ACTIVE_MASK) ==
+ 0)) {
+
+ /*
+ * Switch to next buffer if configured
+ */
+
+ if (InstancePtr->ConfigPtr->TxPingPong != 0) {
+ InstancePtr->NextTxBufferToUse ^= XEL_BUFFER_OFFSET;
+ }
+
+ /*
+ * Write the frame to the buffer.
+ */
+ XEmacLite_AlignedWrite(FramePtr, (u32 *) BaseAddress,
+ ByteCount);
+
+
+ /*
+ * The frame is in the buffer, now send it
+ */
+ XIo_Out32(BaseAddress + XEL_TPLR_OFFSET, (ByteCount &
+ (XEL_TPLR_LENGTH_MASK_HI
+ |
+ XEL_TPLR_LENGTH_MASK_LO)));
+
+ Register = XIo_In32(BaseAddress + XEL_TSR_OFFSET);
+
+ Register |= XEL_TSR_XMIT_BUSY_MASK;
+
+ if ((Register & XEL_TSR_XMIT_IE_MASK) != 0) {
+ Register |= XEL_TSR_XMIT_ACTIVE_MASK;
+ }
+
+ XIo_Out32(BaseAddress + XEL_TSR_OFFSET, Register);
+
+ return XST_SUCCESS;
+ }
+
+ /*
+ * If the expected buffer was full, try the other buffer if configured
+ */
+
+ if (InstancePtr->ConfigPtr->TxPingPong != 0) {
+
+ BaseAddress ^= XEL_BUFFER_OFFSET;
+
+ /*
+ * Determine if the expected buffer address is empty
+ */
+
+ Register = XIo_In32(BaseAddress + XEL_TSR_OFFSET);
+
+ /*
+ * If the next buffer is available, fill it with the provided data
+ */
+
+ if (((Register & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
+ ((XEmacLite_mGetTxActive(BaseAddress) &
+ XEL_TSR_XMIT_ACTIVE_MASK) == 0)) {
+
+ /*
+ * Write the frame to the buffer.
+ */
+ XEmacLite_AlignedWrite(FramePtr, (u32 *) BaseAddress,
+ ByteCount);
+
+ /*
+ * The frame is in the buffer, now send it
+ */
+ XIo_Out32(BaseAddress + XEL_TPLR_OFFSET, (ByteCount &
+ (XEL_TPLR_LENGTH_MASK_HI
+ |
+ XEL_TPLR_LENGTH_MASK_LO)));
+
+ Register = XIo_In32(BaseAddress + XEL_TSR_OFFSET);
+
+ Register |= XEL_TSR_XMIT_BUSY_MASK;
+
+ if ((Register & XEL_TSR_XMIT_IE_MASK) != 0) {
+ Register |= XEL_TSR_XMIT_ACTIVE_MASK;
+ }
+
+ XIo_Out32(BaseAddress + XEL_TSR_OFFSET, Register);
+
+ /*
+ * Do not switch to next buffer, there is a sync problem and
+ * the expected buffer should not change.
+ */
+
+ return XST_SUCCESS;
+ }
+ }
+
+
+ /*
+ * Buffer(s) was(were) full, return failure to allow for polling usage
+ */
+
+ return XST_FAILURE;
+}
+
+/*****************************************************************************/
+/**
+*
+* Receive a frame. Intended to be called from the interrupt context or
+* with a wrapper which waits for the receive frame to be available.
+*
+* @param InstancePtr is a pointer to the XEmacLite instance to be worked on.
+* @param FramePtr is a pointer to a buffer where the frame will
+* be stored. The buffer must be at least XEL_MAX_FRAME_SIZE bytes.
+* For optimal performance, a 32-bit aligned buffer should be used but
+* it is not required, the function will align the data if necessary.
+*
+* @return
+*
+* The type/length field of the frame received. When the type/length field
+* contains the type, XEL_MAX_FRAME_SIZE bytes will be copied out of the
+* buffer and it is up to the higher layers to sort out the frame.
+* Function returns 0 if there is no data waiting in the receive buffer or
+* the pong buffer if configured.
+*
+* @note
+*
+* This function call is not blocking in nature, i.e. it will not wait until
+* a frame arrives.
+*
+******************************************************************************/
+u16 XEmacLite_Recv(XEmacLite * InstancePtr, u8 *FramePtr)
+{
+ u16 LengthType;
+ u16 Length;
+ u32 Register;
+ u32 BaseAddress;
+
+ /*
+ * Verify that each of the inputs are valid.
+ */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /*
+ * Determine the expected buffer address
+ */
+
+ BaseAddress = XEmacLite_mNextReceiveAddr(InstancePtr);
+
+ /*
+ * Verify which buffer has valid data
+ */
+
+ Register = XIo_In32(BaseAddress + XEL_RSR_OFFSET);
+
+ if ((Register & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
+
+ /*
+ * The driver is in sync, update the next expected buffer if configured
+ */
+
+ if (InstancePtr->ConfigPtr->RxPingPong != 0) {
+ InstancePtr->NextRxBufferToUse ^= XEL_BUFFER_OFFSET;
+ }
+ }
+ else {
+ /*
+ * The instance is out of sync, try other buffer if other
+ * buffer is configured, return 0 otherwise. If the instance is
+ * out of syne, do not update the 'NextRxBufferToUse' since it
+ * will ce correct on subsequent calls.
+ */
+ if (InstancePtr->ConfigPtr->RxPingPong != 0) {
+ BaseAddress ^= XEL_BUFFER_OFFSET;
+ }
+ else {
+ return 0; /* No data was available */
+ }
+ /*
+ * Verify that buffer has valid data
+ */
+
+ Register = XIo_In32(BaseAddress + XEL_RSR_OFFSET);
+
+ if ((Register & XEL_RSR_RECV_DONE_MASK) !=
+ XEL_RSR_RECV_DONE_MASK) {
+ return 0; /* No data was available */
+ }
+ }
+
+ /*
+ * Get the length of the frame that arrived
+ */
+ LengthType = XEmacLite_mGetReceiveDataLength(BaseAddress);
+
+ /* Check if length is valid */
+
+ if (LengthType > XEL_MAX_FRAME_SIZE) {
+ /* Field contains type, use max frame size and let user parse it */
+ Length = XEL_MAX_FRAME_SIZE;
+ }
+ else {
+ /* Use the length in the frame, plus the header and trailer */
+ Length = LengthType + XEL_HEADER_SIZE + XEL_FCS_SIZE;
+ }
+
+ /*
+ * Read from the EMAC Lite
+ */
+ XEmacLite_AlignedRead(((u32 *) (BaseAddress + XEL_RXBUFF_OFFSET)),
+ FramePtr, Length);
+
+ /*
+ * Acknowledge the frame
+ */
+
+ Register = XIo_In32(BaseAddress + XEL_RSR_OFFSET);
+
+ Register &= ~XEL_RSR_RECV_DONE_MASK;
+
+ XIo_Out32(BaseAddress + XEL_RSR_OFFSET, Register);
+
+
+ return Length;
+}
+
+/*****************************************************************************/
+/**
+*
+* Set the MAC address for this device. The address is a 48-bit value.
+*
+* @param InstancePtr is a pointer to the XEmacLite instance to be worked on.
+* @param AddressPtr is a pointer to a 6-byte MAC address.
+* the format of the MAC address is major octet to minor octet
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* TX must be idle and RX should be idle for deterministic results.
+*
+* Function will not return if hardware is absent or not functioning
+* properly.
+*
+******************************************************************************/
+void XEmacLite_SetMacAddress(XEmacLite * InstancePtr, u8 *AddressPtr)
+{
+ u32 BaseAddress;
+
+ /*
+ * Verify that each of the inputs are valid.
+ */
+
+ XASSERT_VOID(InstancePtr != NULL);
+
+ BaseAddress =
+ InstancePtr->BaseAddress + InstancePtr->NextTxBufferToUse +
+ XEL_TXBUFF_OFFSET;
+
+ /*
+ * Copy the MAC address to the Transmit buffer
+ */
+ XEmacLite_AlignedWrite(AddressPtr, (u32 *) BaseAddress,
+ XEL_MAC_ADDR_SIZE);
+
+ /*
+ * Set the length
+ */
+
+ XIo_Out32(BaseAddress + XEL_TPLR_OFFSET, XEL_MAC_ADDR_SIZE);
+
+ /*
+ * Update the MAC address in the EMAC Lite
+ */
+
+ XIo_Out32(BaseAddress + XEL_TSR_OFFSET, XEL_TSR_PROG_MAC_ADDR);
+
+
+ /*
+ * Wait for EMAC Lite to finish with the MAC address update
+ */
+
+ while ((XIo_In32(BaseAddress + XEL_TSR_OFFSET) &
+ XEL_TSR_PROG_MAC_ADDR) != 0);
+
+ /*
+ * Switch to next buffer if configured
+ */
+
+ if (InstancePtr->ConfigPtr->TxPingPong != 0) {
+ InstancePtr->NextTxBufferToUse ^= XEL_BUFFER_OFFSET;
+ }
+
+}
+
+
+/******************************************************************************/
+/**
+*
+* This is a stub for the send and recv callbacks. The stub
+* is here in case the upper layers forget to set the handlers.
+*
+* @param CallBackRef is a pointer to the upper layer callback reference
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void StubHandler(void *CallBackRef)
+{
+ XASSERT_VOID_ALWAYS();
+}
+
+
+/****************************************************************************/
+/**
+*
+* Determine if there is a transmit buffer available.
+*
+* @param InstancePtr is the pointer to the instance of the driver to
+* be worked on
+*
+* @return TRUE if there is a TX buffer available for data to be written into,
+* FALSE otherwise.
+*
+* @note
+*
+*****************************************************************************/
+u32 XEmacLite_TxBufferAvailable(XEmacLite * InstancePtr)
+{
+
+ u32 Register;
+ u32 TxPingBusy;
+ u32 TxPongBusy;
+
+ /*
+ * Verify that each of the inputs are valid.
+ */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /*
+ * Read the current buffer register and determine if the buffer is available
+ */
+
+ Register = XIo_In32(InstancePtr->BaseAddress +
+ InstancePtr->NextTxBufferToUse + XEL_TXBUFF_OFFSET);
+
+ TxPingBusy =
+ ((Register & XEL_TSR_XMIT_BUSY_MASK) == XEL_TSR_XMIT_BUSY_MASK);
+
+ /*
+ * Read the other buffer register and determine if the other buffer is available
+ */
+
+ Register = XIo_In32(InstancePtr->BaseAddress +
+ (InstancePtr->NextTxBufferToUse ^ XEL_TSR_OFFSET) +
+ XEL_TXBUFF_OFFSET);
+
+ TxPongBusy =
+ ((Register & XEL_TSR_XMIT_BUSY_MASK) == XEL_TSR_XMIT_BUSY_MASK);
+
+ return (!(TxPingBusy && TxPongBusy));
+
+}
+
+/****************************************************************************/
+/**
+*
+*
+* Flush the Receive buffers. All data will be lost.
+*
+* @param InstancePtr is the pointer to the instance of the driver to
+* be worked on
+*
+* @return None.
+*
+* @note
+*
+*****************************************************************************/
+void XEmacLite_FlushReceive(XEmacLite * InstancePtr)
+{
+
+ u32 Register;
+
+ /*
+ * Verify that each of the inputs are valid.
+ */
+
+ XASSERT_VOID(InstancePtr != NULL);
+
+ /*
+ * Read the current buffer register and determine if the buffer is available
+ */
+
+ Register = XIo_In32(InstancePtr->BaseAddress + XEL_RSR_OFFSET);
+
+ /*
+ * Preserve the IE bit
+ */
+ Register &= XEL_RSR_RECV_IE_MASK;
+
+ /*
+ * Write out the value to flush the RX buffer
+ */
+
+ XIo_Out32(InstancePtr->BaseAddress + XEL_RSR_OFFSET, Register);
+
+ /*
+ * If the pong buffer is available, flush it also
+ */
+
+ if (InstancePtr->ConfigPtr->RxPingPong != 0) {
+ /*
+ * Read the current buffer register and determine if the buffer is
+ * available
+ */
+
+ Register = XIo_In32(InstancePtr->BaseAddress + XEL_RSR_OFFSET +
+ XEL_BUFFER_OFFSET);
+
+ /*
+ * Preserve the IE bit
+ */
+ Register &= XEL_RSR_RECV_IE_MASK;
+
+ /*
+ * Write out the value to flush the RX buffer
+ */
+
+ XIo_Out32(InstancePtr->BaseAddress + XEL_RSR_OFFSET +
+ XEL_BUFFER_OFFSET, Register);
+
+ }
+
+}
--- /dev/null
+/* $Id: xemaclite.h,v 1.1.2.1 2007/03/13 17:26:07 akondratenko Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemaclite.h
+*
+* The Xilinx Ethernet Lite driver component. This component supports the Xilinx
+* Lite Ethernet 10/100 MAC (EMAC Lite).
+*
+* The Xilinx Ethernet Lite 10/100 MAC supports the following features:
+* - Media Independent Interface (MII) for connection to external
+* 10/100 Mbps PHY transceivers.
+* - Independent internal transmit and receive buffers
+* - CSMA/CD compliant operations for half-duplex modes
+* - Unicast and broadcast
+* - Automatic FCS insertion
+* - Automatic pad insertion on transmit
+* - Configurable ping/pong buffer scheme for either/both transmit and receive
+* buffer areas.
+* - Interrupt driven mode available.
+*
+* The Xilinx Ethernet Lite 10/100 MAC does not support the following features:
+* - multi-frame buffering
+* only 1 transmit frame is allowed into each transmit buffer
+* only 1 receive frame is allowed into each receive buffer.
+* the hardware blocks reception until buffer is emptied
+* - Pause frame (flow control) detection in full-duplex mode
+* - Programmable interframe gap
+* - Multicast and promiscuous address filtering
+* - Internal loopback
+* - Automatic source address insertion or overwrite
+*
+* <b>Driver Description</b>
+*
+* The device driver enables higher layer software (e.g., an application) to
+* communicate to the EMAC Lite. The driver handles transmission and reception
+* of Ethernet frames, as well as configuration of the controller. It does not
+* handle protocol stack functionality such as Link Layer Control (LLC) or the
+* Address Resolution Protocol (ARP). The protocol stack that makes use of the
+* driver handles this functionality. This implies that the driver is simply a
+* pass-through mechanism between a protocol stack and the EMAC Lite.
+*
+* Since the driver is a simple pass-through mechanism between a protocol stack
+* and the EMAC Lite, no assembly or disassembly of Ethernet frames is done at
+* the driver-level. This assumes that the protocol stack passes a correctly
+* formatted Ethernet frame to the driver for transmission, and that the driver
+* does not validate the contents of an incoming frame. A single device driver
+* can support multiple EmacLite devices.
+*
+* The driver supports interrupt driven mode and the default mode of operation
+* is polled mode. If interrupts are desired, XEmacLite_InterruptEnable() must
+* be called.
+*
+* <b>Device Configuration</b>
+*
+* The device can be configured in various ways during the FPGA implementation
+* process. Configuration parameters are stored in the xemaclite_g.c file. A table
+* is defined where each entry contains configuration information for an EmacLite
+* device. This information includes such things as the base address
+* of the memory-mapped device and the number of buffers.
+*
+* <b>Interrupt Processing</b>
+*
+* After _Initialize is called, _InterruptEnable can be called to enable the interrupt
+* driven functionality. If polled operation is desired, just call _Send and check the
+* return code. If XST_FAILURE is returned, call _Send with the same data until
+* XST_SUCCESS is returned. The same idea applies to _Recv. Call _Recv until the
+* returned length is non-zero at which point the received data is in the buffer
+* provided in the function call.
+*
+* The Transmit and Receive interrupts are enabled within the _InterruptEnable
+* function and disabled in the _InterruptDisable function. The _Send and _Recv
+* functions acknowledge the EMACLite generated interrupts associated with each
+* function.
+* It is the application's responsibility to acknowledge any associated Interrupt
+* Controller interrupts if it is used in the system.
+*
+* <b>Memory Buffer Alignment</b>
+*
+* The alignment of the input/output buffers for the _Send and _Recv routine is
+* not required to be 32 bits. If the buffer is not aligned on a 32-bit boundry
+* there will be a performance impact while the driver aligns the data for
+* transmission or upon reception.
+*
+* For optimum performance, the user should provide a 32-bit aligned buffer
+* to the _Send and _Recv routines.
+*
+* <b>Asserts</b>
+*
+* Asserts are used within all Xilinx drivers to enforce constraints on argument
+* values. Asserts can be turned off on a system-wide basis by defining, at compile
+* time, the NDEBUG identifier. By default, asserts are turned on and it is
+* recommended that application developers leave asserts on during development.
+*
+* @note
+*
+* This driver requires EmacLite hardware version 1.01a and higher. It is not
+* compatible with earlier versions of the EmacLite hardware. Use version 1.00a
+* software driver for hardware version 1.00a/b.
+*
+* The RX hardware is enabled from powerup and there is no disable. It is
+* possible that frames have been received prior to the initialization
+* of the driver. If this situation is possible, call XEmacLite_mFlushReceive()
+* to empty the receive buffers after initialization.
+*
+* This driver is intended to be RTOS and processor independent. It works
+* with physical addresses only. Any needs for dynamic memory management,
+* threads or thread mutual exclusion, virtual memory, or cache control must
+* be satisfied by the layer above this driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.01a ecm 01/30/04 First release
+* </pre>
+*
+*
+******************************************************************************/
+
+#ifndef XEMACLITE_H /* prevent circular inclusions */
+#define XEMACLITE_H /* by using protection macros */
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xemaclite_l.h"
+
+/************************** Constant Definitions *****************************/
+/*
+ * Device information
+ */
+#define XEL_DEVICE_NAME "xemaclite"
+#define XEL_DEVICE_DESC "Xilinx Ethernet Lite 10/100 MAC"
+
+/**************************** Type Definitions *******************************/
+
+/**
+ * This typedef contains configuration information for a device.
+ */
+typedef struct {
+ u16 DeviceId; /**< Unique ID of device */
+ u32 BaseAddress; /**< Device base address */
+ u32 PhysAddress; /* < Physical address (for Linux)> */
+ u8 TxPingPong; /**< 1 if TX Pong buffer configured,0 otherwise */
+ u8 RxPingPong; /**< 1 if RX Pong buffer configured,0 otherwise */
+} XEmacLite_Config;
+
+
+/*
+ * Callback when data is sent or received .
+ * @param CallBackRef is a callback reference passed in by the upper layer
+ * when setting the callback functions, and passed back to the upper
+ * layer when the callback is invoked.
+ */
+typedef void (*XEmacLite_Handler) (void *CallBackRef);
+
+/**
+ * The XEmacLite driver instance data. The user is required to allocate a
+ * variable of this type for every EmacLite device in the system. A pointer
+ * to a variable of this type is then passed to the driver API functions.
+ */
+
+typedef struct {
+ u32 PhysAddress; /* Base address for device (IPIF) */
+ u32 BaseAddress; /* Base address for device (IPIF) */
+ u32 IsReady; /* Device is initialized and ready */
+ u32 NextTxBufferToUse; /* Next TX buffer to write to */
+ u32 NextRxBufferToUse; /* Next RX buffer to read from */
+ XEmacLite_Config *ConfigPtr; /* A pointer to the device configuration */
+
+ /*
+ * Callbacks
+ */
+
+ XEmacLite_Handler RecvHandler;
+ void *RecvRef;
+ XEmacLite_Handler SendHandler;
+ void *SendRef;
+
+} XEmacLite;
+
+/***************** Macros (Inline Functions) Definitions *********************/
+/****************************************************************************/
+/**
+*
+* Return the next expected Transmit Buffer's address .
+*
+* @param InstancePtr is the pointer to the instance of the driver to
+* be worked on
+*
+* @note
+* This macro returns the address of the next transmit buffer to put data into.
+* This is used to determine the destination of the next transmit data frame.
+*
+*****************************************************************************/
+#define XEmacLite_mNextTransmitAddr(InstancePtr) \
+ ((InstancePtr)->BaseAddress + (InstancePtr)->NextTxBufferToUse) + \
+ XEL_TXBUFF_OFFSET
+
+/****************************************************************************/
+/**
+*
+* Return the next expected Receive Buffer's address .
+*
+* @param InstancePtr is the pointer to the instance of the driver to
+* be worked on
+*
+* @note
+* This macro returns the address of the next receive buffer to read data from.
+* This is the expected receive buffer address if the driver is in sync.
+*
+*****************************************************************************/
+#define XEmacLite_mNextReceiveAddr(InstancePtr) \
+ ((InstancePtr)->BaseAddress + (InstancePtr)->NextRxBufferToUse)
+
+
+/************************** Variable Definitions *****************************/
+
+/************************** Function Prototypes ******************************/
+
+/*
+ * Initialization functions in xemaclite.c
+ */
+int XEmacLite_CfgInitialize(XEmacLite * InstancePtr, XEmacLite_Config * CfgPtr,
+ u32 VirtualAddress);
+void XEmacLite_SetMacAddress(XEmacLite * InstancePtr, u8 *AddressPtr);
+u32 XEmacLite_TxBufferAvailable(XEmacLite * InstancePtr);
+void XEmacLite_FlushReceive(XEmacLite * InstancePtr);
+
+int XEmacLite_Send(XEmacLite * InstancePtr, u8 *FramePtr, unsigned ByteCount);
+u16 XEmacLite_Recv(XEmacLite * InstancePtr, u8 *FramePtr);
+
+/*
+ * Interrupt driven functions in xemaclite_intr.c
+ */
+
+int XEmacLite_EnableInterrupts(XEmacLite * InstancePtr);
+void XEmacLite_DisableInterrupts(XEmacLite * InstancePtr);
+
+void XEmacLite_InterruptHandler(void *InstancePtr);
+
+void XEmacLite_SetRecvHandler(XEmacLite * InstancePtr, void *CallBackRef,
+ XEmacLite_Handler FuncPtr);
+void XEmacLite_SetSendHandler(XEmacLite * InstancePtr, void *CallBackRef,
+ XEmacLite_Handler FuncPtr);
+
+/*
+ * Selftest function in xemaclite_selftest.c
+ */
+int XEmacLite_SelfTest(XEmacLite * InstancePtr);
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xemaclite_i.h,v 1.1.2.1 2007/03/13 17:26:08 akondratenko Exp $: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/******************************************************************************/
+/**
+* @file xemaclite_i.h
+*
+* This header file contains internal identifiers, which are those shared
+* between the files of the driver. It is intended for internal use only.
+*
+* NOTES:
+*
+* None.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.01a ecm 05/21/04 First release
+* </pre>
+******************************************************************************/
+
+#ifndef XEMACLITE_I_H /* prevent circular inclusions */
+#define XEMACLITE_I_H /* by using protection macros */
+
+/***************************** Include Files *********************************/
+
+#include "xemaclite.h"
+
+/************************** Constant Definitions ****************************/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/****************************************************************************/
+/**
+*
+* Get the TX active location to check status. This is used to check if
+* the TX buffer is currently active. There isn't any way in the hardware
+* to implement this but the register is fully populated so the driver can
+* set the bit in the send routine and the ISR can clear the bit when
+* the handler is complete. This mimics the correct operation of the hardware
+* if it was possible to do this in hardware.
+*
+* @param BaseAddress is the base address of the device
+*
+* @return Contents of active bit in register.
+*
+* @note
+* u32 XEmacLite_mGetTxActive(u32 BaseAddress)
+*
+*****************************************************************************/
+#define XEmacLite_mGetTxActive(BaseAddress) \
+ (XIo_In32((BaseAddress) + XEL_TSR_OFFSET))
+
+/****************************************************************************/
+/**
+*
+* Set the TX active location to update status. This is used to set the bit
+* indicating which TX buffer is currently active. There isn't any way in the
+* hardware to implement this but the register is fully populated so the driver
+* can set the bit in the send routine and the ISR can clear the bit when
+* the handler is complete. This mimics the correct operation of the hardware
+* if it was possible to do this in hardware.
+*
+* @param BaseAddress is the base address of the device
+* @param Mask is the data to be written
+*
+* @return None
+*
+* @note
+* void XEmacLite_mSetTxActive(u32 BaseAddress, u32 Mask)
+*
+*****************************************************************************/
+#define XEmacLite_mSetTxActive(BaseAddress, Mask) \
+ (XIo_Out32((BaseAddress) + XEL_TSR_OFFSET, (Mask)))
+
+/************************** Variable Definitions ****************************/
+
+/************************** Function Prototypes ******************************/
+
+void XEmacLite_AlignedWrite(void *SrcPtr, u32 *DestPtr, unsigned ByteCount);
+void XEmacLite_AlignedRead(u32 *SrcPtr, void *DestPtr, unsigned ByteCount);
+
+void StubHandler(void *CallBackRef);
+
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xemaclite_intr.c,v 1.1.2.1 2007/03/13 17:26:08 akondratenko Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemaclite_intr.c
+*
+* Functions in this file are for the interrupt driven processing functionality.
+* See xemaclite.h for a detailed description of the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- --------------------------------------------------------
+* 1.01a ecm 03/31/04 First release
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xstatus.h"
+#include "xemaclite_l.h"
+#include "xemaclite_i.h"
+#include "xio.h"
+#include "xemaclite.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Function Prototypes ******************************/
+
+/************************** Variable Definitions *****************************/
+/*****************************************************************************/
+/**
+*
+* Enable the EmacLite Interrupts.
+*
+* This function must be called before other functions to send or receive data
+* in interrupt driven mode. The user should have connected the
+* interrupt handler of the driver to an interrupt source such as an interrupt
+* controller or the processor interrupt prior to this function being called.
+*
+* @param InstancePtr is a pointer to the XEmacLite instance to be worked on.
+*
+* @return
+*
+* - XST_SUCCESS if the device interrupts were enabled successfully.
+* - XST_NO_CALLBACK if the callbacks were not set.
+*
+* @note
+*
+* None
+*
+******************************************************************************/
+int XEmacLite_EnableInterrupts(XEmacLite * InstancePtr)
+{
+ u32 Register;
+
+ /*
+ * Verify that each of the inputs are valid.
+ */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * Verify that the handlers are in place
+ */
+
+ if ((InstancePtr->RecvHandler == (XEmacLite_Handler) StubHandler) ||
+ (InstancePtr->SendHandler == (XEmacLite_Handler) StubHandler)) {
+ return XST_NO_CALLBACK;
+ }
+
+
+ /* Enable TX and RX interrupts */
+
+ Register = XIo_In32(InstancePtr->BaseAddress + XEL_TSR_OFFSET);
+ Register |= XEL_TSR_XMIT_IE_MASK;
+ XIo_Out32(InstancePtr->BaseAddress + XEL_TSR_OFFSET, Register);
+ XIo_Out32(InstancePtr->BaseAddress + XEL_TSR_OFFSET +
+ XEL_BUFFER_OFFSET, Register);
+
+ Register = XIo_In32(InstancePtr->BaseAddress + XEL_RSR_OFFSET);
+ Register |= XEL_RSR_RECV_IE_MASK;
+ XIo_Out32(InstancePtr->BaseAddress + XEL_RSR_OFFSET, Register);
+ XIo_Out32(InstancePtr->BaseAddress + XEL_RSR_OFFSET +
+ XEL_BUFFER_OFFSET, Register);
+
+ /* Enable the global interrupt output. */
+
+ XIo_Out32(InstancePtr->BaseAddress + XEL_GIER_OFFSET,
+ XEL_GIER_GIE_MASK);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Disables the interrupts from the device(the higher layer software is
+* responsible for disabling interrupts at the interrupt controller).
+*
+* To start using the device again, _EnableInterrupts must be called.
+*
+* @param InstancePtr is a pointer to the XEmacLite instance to be worked on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmacLite_DisableInterrupts(XEmacLite * InstancePtr)
+{
+ u32 Register;
+
+ /*
+ * Verify that each of the inputs are valid.
+ */
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+
+ /* Disable the global interrupt output. */
+
+ XIo_Out32(InstancePtr->BaseAddress + XEL_GIER_OFFSET, 0);
+
+ /* Disable TX and RX interrupts */
+
+ Register = XIo_In32(InstancePtr->BaseAddress + XEL_TSR_OFFSET);
+ Register &= ~XEL_TSR_XMIT_IE_MASK;
+ XIo_Out32(InstancePtr->BaseAddress + XEL_TSR_OFFSET, Register);
+ XIo_Out32(InstancePtr->BaseAddress + XEL_TSR_OFFSET +
+ XEL_BUFFER_OFFSET, Register);
+
+ Register = XIo_In32(InstancePtr->BaseAddress + XEL_RSR_OFFSET);
+ Register &= ~XEL_RSR_RECV_IE_MASK;
+ XIo_Out32(InstancePtr->BaseAddress + XEL_RSR_OFFSET, Register);
+ XIo_Out32(InstancePtr->BaseAddress + XEL_RSR_OFFSET +
+ XEL_BUFFER_OFFSET, Register);
+
+}
+
+/*****************************************************************************/
+/**
+*
+* Interrupt handler for the EMACLite driver. It performs the following
+* processing:
+*
+* - Get the interrupt status from the registers to determine the source of the
+* interrupt.
+*
+* - Call the appropriate handler based on the source of the interrupt.
+*
+* @param InstancePtr contains a pointer to the EMACLite device instance for
+* the interrupt.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+*
+******************************************************************************/
+void XEmacLite_InterruptHandler(void *InstancePtr)
+{
+
+ XEmacLite *EmacLitePtr;
+ u32 Register = 0;
+
+ /*
+ * Verify that each of the inputs are valid.
+ */
+
+ XASSERT_VOID(InstancePtr != NULL);
+
+ /*
+ * Convert the non-typed pointer to an EmacLite instance pointer
+ * such that there is access to the device.
+ */
+ EmacLitePtr = (XEmacLite *) InstancePtr;
+
+ if ((XEmacLite_mIsRxEmpty(EmacLitePtr->BaseAddress) != TRUE) ||
+ (XEmacLite_mIsRxEmpty(EmacLitePtr->BaseAddress + XEL_BUFFER_OFFSET)
+ != TRUE)) {
+ /*
+ * Call the RX callback.
+ */
+
+ EmacLitePtr->RecvHandler(EmacLitePtr->RecvRef);
+
+ }
+ if ((XEmacLite_mIsTxDone(EmacLitePtr->BaseAddress) == TRUE) &&
+ ((XEmacLite_mGetTxActive(EmacLitePtr->BaseAddress) &
+ XEL_TSR_XMIT_ACTIVE_MASK) != 0)) {
+
+ /*
+ * Clear the Active bit
+ */
+ Register = XEmacLite_mGetTxActive(EmacLitePtr->BaseAddress);
+ Register &= ~XEL_TSR_XMIT_ACTIVE_MASK;
+ XEmacLite_mSetTxActive(EmacLitePtr->BaseAddress, Register);
+ }
+ if ((XEmacLite_mIsTxDone(EmacLitePtr->BaseAddress + XEL_BUFFER_OFFSET)
+ == TRUE) &&
+ ((XEmacLite_mGetTxActive
+ (EmacLitePtr->BaseAddress + XEL_BUFFER_OFFSET)
+ & XEL_TSR_XMIT_ACTIVE_MASK) != 0)) {
+
+ /*
+ * Clear the Active bit
+ */
+ Register =
+ XEmacLite_mGetTxActive(EmacLitePtr->BaseAddress +
+ XEL_BUFFER_OFFSET);
+ Register &= ~XEL_TSR_XMIT_ACTIVE_MASK;
+ XEmacLite_mSetTxActive(EmacLitePtr->BaseAddress +
+ XEL_BUFFER_OFFSET, Register);
+ }
+
+ /*
+ * If there was a TX interrupt, call the callback
+ */
+ if (Register != 0) {
+
+ /*
+ * Call the TX callback.
+ */
+
+ EmacLitePtr->SendHandler(EmacLitePtr->SendRef);
+
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Sets the callback function for handling received frames in interrupt mode.
+* The upper layer software should call this function during initialization.
+* The callback is called when a frame is received. The callback function
+* should communicate the data to a thread such that the processing is not
+* performed in an interrupt context.
+*
+* The callback is invoked by the driver within interrupt context, so it needs
+* to do its job quickly. If there are other potentially slow operations
+* within the callback, these should be done at task-level.
+*
+* @param InstancePtr is a pointer to the XEmacLite instance to be worked on.
+* @param CallBackRef is a reference pointer to be passed back to the
+* application in the callback. This helps the application correlate
+* the callback to a particular driver.
+* @param FuncPtr is the pointer to the callback function.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmacLite_SetRecvHandler(XEmacLite * InstancePtr, void *CallBackRef,
+ XEmacLite_Handler FuncPtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(FuncPtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ InstancePtr->RecvHandler = FuncPtr;
+ InstancePtr->RecvRef = CallBackRef;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Sets the callback function for handling transmitted frames in interrupt mode.
+* The upper layer software should call this function during initialization.
+* The callback is called when a frame is transmitted. The callback function
+* should communicate the data to a thread such that the processing is not
+* performed in an interrupt context.
+*
+* The callback is invoked by the driver within interrupt context, so it needs
+* to do its job quickly. If there are other potentially slow operations
+* within the callback, these should be done at task-level.
+*
+* @param InstancePtr is a pointer to the XEmacLite instance to be worked on.
+* @param CallBackRef is a reference pointer to be passed back to the
+* application in the callback. This helps the application correlate
+* the callback to a particular driver.
+* @param FuncPtr is the pointer to the callback function.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmacLite_SetSendHandler(XEmacLite * InstancePtr, void *CallBackRef,
+ XEmacLite_Handler FuncPtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(FuncPtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ InstancePtr->SendHandler = FuncPtr;
+ InstancePtr->SendRef = CallBackRef;
+}
--- /dev/null
+/* $Id: xemaclite_l.c,v 1.1.2.1 2007/03/13 17:26:08 akondratenko Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemaclite_l.c
+*
+* This file contains the minimal, polled functions to send and receive Ethernet
+* frames.
+*
+* Refer to xemaclite.h for more details.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a ecm 06/01/02 First release
+* 1.01a ecm 03/31/04 Additional functionality and the _AlignedRead and
+* _AlignedWrite functions.
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xemaclite_l.h"
+#include "xemaclite_i.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Function Prototypes ******************************/
+void XEmacLite_AlignedWrite(void *SrcPtr, u32 *DestPtr, unsigned ByteCount);
+void XEmacLite_AlignedRead(u32 *SrcPtr, void *DestPtr, unsigned ByteCount);
+
+/************************** Variable Definitions *****************************/
+
+/*****************************************************************************/
+/**
+*
+* Send an Ethernet frame. The size is the total frame size, including header.
+* This function blocks waiting for the frame to be transmitted.
+*
+* @param BaseAddress is the base address of the device
+* @param FramePtr is a pointer to frame
+* @param ByteCount is the size, in bytes, of the frame
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* This function call is blocking in nature, i.e. it will wait until the
+* frame is transmitted. This function can hang and not exit if the
+* hardware is not configured properly.
+*
+* If the ping buffer is the destination of the data, the argument should be
+* DeviceAddress + XEL_TXBUFF_OFFSET.
+* If the pong buffer is the destination of the data, the argument should be
+* DeviceAddress + XEL_TXBUFF_OFFSET + XEL_BUFFER_OFFSET.
+* The function does not take the different buffers into consideration.
+******************************************************************************/
+void XEmacLite_SendFrame(u32 BaseAddress, u8 *FramePtr, unsigned ByteCount)
+{
+ u32 Register;
+
+ /*
+ * Write data to the EMAC Lite
+ */
+ XEmacLite_AlignedWrite(FramePtr, (u32 *) (BaseAddress), ByteCount);
+
+ /*
+ * The frame is in the buffer, now send it
+ */
+ XIo_Out32(BaseAddress + XEL_TPLR_OFFSET,
+ (ByteCount &
+ (XEL_TPLR_LENGTH_MASK_HI | XEL_TPLR_LENGTH_MASK_LO)));
+
+
+ Register = XIo_In32(BaseAddress + XEL_TSR_OFFSET);
+ XIo_Out32(BaseAddress + XEL_TSR_OFFSET,
+ (Register | XEL_TSR_XMIT_BUSY_MASK));
+
+ /*
+ * Loop on the status waiting for the transmit to be complete.
+ */
+// while (!XEmacLite_mIsTxDone(BaseAddress));
+
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Receive a frame. Wait for a frame to arrive.
+*
+* @param BaseAddress is the base address of the device
+* @param FramePtr is a pointer to a buffer where the frame will
+* be stored.
+*
+* @return
+*
+* The type/length field of the frame received. When the type/length field
+* contains the type , XEL_MAX_FRAME_SIZE bytes will be copied out of the
+* buffer and it is up to the higher layers to sort out the frame.
+*
+* @note
+*
+* This function call is blocking in nature, i.e. it will wait until a
+* frame arrives.
+*
+* If the ping buffer is the source of the data, the argument should be
+* DeviceAddress + XEL_RXBUFF_OFFSET.
+* If the pong buffer is the source of the data, the argument should be
+* DeviceAddress + XEL_RXBUFF_OFFSET + XEL_BUFFER_OFFSET.
+* The function does not take the different buffers into consideration.
+******************************************************************************/
+u16 XEmacLite_RecvFrame(u32 BaseAddress, u8 *FramePtr)
+{
+ u16 LengthType;
+ u16 Length;
+ u32 Register;
+
+ /*
+ * Wait for a frame to arrive - this is a blocking call
+ */
+
+ while (XEmacLite_mIsRxEmpty(BaseAddress));
+
+ /*
+ * Get the length of the frame that arrived
+ */
+ LengthType = XIo_In32(BaseAddress + XEL_RPLR_OFFSET);
+ LengthType &= (XEL_RPLR_LENGTH_MASK_HI | XEL_RPLR_LENGTH_MASK_LO);
+
+ /* check if length is valid */
+
+ if (LengthType > XEL_MAX_FRAME_SIZE) {
+ /* Field contain type, use max frame size and let user parse it */
+ Length = XEL_MAX_FRAME_SIZE;
+ }
+ else {
+ /* Use the length in the frame, plus the header and trailer */
+ Length = LengthType + XEL_HEADER_SIZE + XEL_FCS_SIZE;
+ }
+
+ /*
+ * Read each byte from the EMAC Lite
+ */
+ XEmacLite_AlignedRead((u32 *) (BaseAddress + XEL_RXBUFF_OFFSET),
+ FramePtr, Length);
+
+ /*
+ * Acknowledge the frame
+ */
+
+ Register = XIo_In32(BaseAddress + XEL_RSR_OFFSET);
+ Register &= ~XEL_RSR_RECV_DONE_MASK;
+ XIo_Out32(BaseAddress + XEL_RSR_OFFSET, Register);
+
+ return LengthType;
+}
+
+/******************************************************************************/
+/**
+*
+* This function aligns the incoming data and writes it out to a 32-bit
+* aligned destination address range.
+*
+* @param SrcPtr is a pointer to incoming data of any alignment.
+* @param DestPtr is a pointer to outgoing data of 32-bit alignment.
+* @param ByteCount is the number of bytes to write.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmacLite_AlignedWrite(void *SrcPtr, u32 *DestPtr, unsigned ByteCount)
+{
+ unsigned i;
+ unsigned Length = ByteCount;
+ u32 AlignBuffer;
+ u32 *To32Ptr;
+ u32 *From32Ptr;
+ u16 *To16Ptr;
+ u16 *From16Ptr;
+ u8 *To8Ptr;
+ u8 *From8Ptr;
+
+ To32Ptr = DestPtr;
+
+ if ((((u32) SrcPtr) & 0x00000003) == 0) {
+
+ /*
+ * Word aligned buffer, no correction needed.
+ */
+// printk("noaligned\n");
+
+ From32Ptr = (u32 *) SrcPtr;
+
+ while (Length > 3) {
+ /*
+ * Output each word destination.
+ */
+
+ *To32Ptr++ = *From32Ptr++;
+
+ /*
+ * Adjust length accordingly
+ */
+
+ Length -= 4;
+ }
+
+ /*
+ * Set up to output the remaining data, zero the temp buffer first.
+ */
+
+ AlignBuffer = 0;
+ To8Ptr = (u8 *) &AlignBuffer;
+ From8Ptr = (u8 *) From32Ptr;
+
+ }
+ else if ((((u32) SrcPtr) & 0x00000001) != 0) {
+ /*
+ * Byte aligned buffer, correct.
+ */
+
+ AlignBuffer = 0;
+ To8Ptr = (u8 *) &AlignBuffer;
+ From8Ptr = (u8 *) SrcPtr;
+
+// printk("aligned8\n");
+
+ while (Length > 3) {
+ /*
+ * Copy each byte into the temporary buffer.
+ */
+
+ for (i = 0; i < 4; i++) {
+ *To8Ptr++ = *From8Ptr++;
+ }
+
+ /*
+ * Output the buffer
+ */
+
+ *To32Ptr++ = AlignBuffer;
+
+ /*.
+ * Reset the temporary buffer pointer and adjust length.
+ */
+
+ To8Ptr = (u8 *) &AlignBuffer;
+ Length -= 4;
+ }
+
+ /*
+ * Set up to output the remaining data, zero the temp buffer first.
+ */
+
+ AlignBuffer = 0;
+ To8Ptr = (u8 *) &AlignBuffer;
+
+ }
+ else {
+ /*
+ * Half-Word aligned buffer, correct.
+ */
+
+ AlignBuffer = 0;
+ To16Ptr = (u16 *) &AlignBuffer;
+ From16Ptr = (u16 *) SrcPtr;
+
+// printk("aligned16\n");
+
+ while (Length > 3) {
+ /*
+ * Copy each half word into the temporary buffer.
+ */
+
+ for (i = 0; i < 2; i++) {
+ *To16Ptr++ = *From16Ptr++;
+ }
+
+ /*
+ * Output the buffer.
+ */
+
+ *To32Ptr++ = AlignBuffer;
+
+ /*
+ * Reset the temporary buffer pointer and adjust length.
+ */
+
+ To16Ptr = (u16 *) &AlignBuffer;
+ Length -= 4;
+ }
+
+ /*
+ * Set up to output the remaining data, zero the temp buffer first.
+ */
+
+ AlignBuffer = 0;
+ To8Ptr = (u8 *) &AlignBuffer;
+ From8Ptr = (u8 *) From16Ptr;
+ }
+
+ /*
+ * Output the remaining data, zero the temp buffer first.
+ */
+ for (i = 0; i < Length; i++) {
+ *To8Ptr++ = *From8Ptr++;
+ }
+
+ *To32Ptr++ = AlignBuffer;
+
+}
+
+/******************************************************************************/
+/**
+*
+* This function reads from a 32-bit aligned source address range and aligns
+* the writes to the provided destination pointer alignment.
+*
+* @param SrcPtr is a pointer to incoming data of 32-bit alignment.
+* @param DestPtr is a pointer to outgoing data of any alignment.
+* @param ByteCount is the number of bytes to read.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XEmacLite_AlignedRead(u32 *SrcPtr, void *DestPtr, unsigned ByteCount)
+{
+ unsigned i;
+ unsigned Length = ByteCount;
+ u32 AlignBuffer;
+ u32 *To32Ptr;
+ u32 *From32Ptr;
+ u16 *To16Ptr;
+ u16 *From16Ptr;
+ u8 *To8Ptr;
+ u8 *From8Ptr;
+
+ From32Ptr = (u32 *) SrcPtr;
+
+ if ((((u32) DestPtr) & 0x00000003) == 0) {
+
+ /*
+ * Word aligned buffer, no correction needed.
+ */
+
+ To32Ptr = (u32 *) DestPtr;
+
+ while (Length > 3) {
+ /*
+ * Output each word.
+ */
+
+ *To32Ptr++ = *From32Ptr++;
+
+ /*
+ * Adjust length accordingly.
+ */
+ Length -= 4;
+ }
+
+ /*
+ * Set up to read the remaining data.
+ */
+
+ To8Ptr = (u8 *) To32Ptr;
+
+ }
+ else if ((((u32) DestPtr) & 0x00000001) != 0) {
+ /*
+ * Byte aligned buffer, correct.
+ */
+
+ To8Ptr = (u8 *) DestPtr;
+
+ while (Length > 3) {
+ /*
+ * Copy each word into the temporary buffer.
+ */
+
+ AlignBuffer = *From32Ptr++;
+ From8Ptr = (u8 *) &AlignBuffer;
+
+ /*
+ * Write data to destination.
+ */
+
+ for (i = 0; i < 4; i++) {
+ *To8Ptr++ = *From8Ptr++;
+ }
+
+ /*
+ * Adjust length
+ */
+
+ Length -= 4;
+ }
+
+ }
+ else {
+ /*
+ * Half-Word aligned buffer, correct.
+ */
+
+ To16Ptr = (u16 *) DestPtr;
+
+ while (Length > 3) {
+ /*
+ * Copy each word into the temporary buffer.
+ */
+
+ AlignBuffer = *From32Ptr++;
+ From16Ptr = (u16 *) &AlignBuffer;
+
+ /*
+ * Write data to destination.
+ */
+
+ for (i = 0; i < 2; i++) {
+ *To16Ptr++ = *From16Ptr++;
+ }
+
+ /*
+ * Adjust length.
+ */
+
+ Length -= 4;
+ }
+
+ /*
+ * Set up to read the remaining data.
+ */
+
+ To8Ptr = (u8 *) To16Ptr;
+ }
+
+ /*
+ * Read the remaining data.
+ */
+
+ AlignBuffer = *From32Ptr++;
+ From8Ptr = (u8 *) &AlignBuffer;
+
+ for (i = 0; i < Length; i++) {
+ *To8Ptr++ = *From8Ptr++;
+ }
+}
--- /dev/null
+/* $Id: xemaclite_l.h,v 1.1.2.1 2007/03/13 17:26:08 akondratenko Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemaclite_l.h
+*
+* This header file contains identifiers and low-level driver functions and
+* macros that can be used to access the device.
+*
+* The Xilinx Ethernet Lite driver component. This component supports the Xilinx
+* Lite Ethernet 10/100 MAC (EMAC Lite).
+*
+* Refer to xemaclite.h for more details.
+*
+* @note
+*
+* The functions and macros in this file assume that the proper device address is
+* provided in the argument. If the ping buffer is the source or destination,
+* the argument should be DeviceAddress + XEL_(T/R)XBUFF_OFFSET. If the pong
+* buffer is the source or destination, the argument should be
+* DeviceAddress + XEL_(T/R)XBUFF_OFFSET + XEL_BUFFER_OFFSET. The driver does
+* not take the different buffers into consideration.
+* For more details on the ping/pong buffer configuration please refer to the
+* OPB Ehternet Lite Media Access Controller hardware specification.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a ecm 06/01/02 First release
+* 1.01a ecm 03/31/04 Additional functionality and the _AlignedRead and
+* AlignedWrite functions.
+* Moved the bulk of description to xemaclite.h
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XEMAC_LITE_L_H /* prevent circular inclusions */
+#define XEMAC_LITE_L_H /* by using protection macros */
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+/**
+ * Register offsets for the Ethernet MAC.
+ */
+#define XEL_TXBUFF_OFFSET (0x00000000) /**< Transmit Buffer */
+#define XEL_GIER_OFFSET (XEL_TXBUFF_OFFSET + 0x07F8) /**< Offset for the GIE bit */
+#define XEL_TSR_OFFSET (XEL_TXBUFF_OFFSET + 0x07FC) /**< Tx status */
+#define XEL_TPLR_OFFSET (XEL_TXBUFF_OFFSET + 0x07F4) /**< Tx packet length */
+
+#define XEL_RXBUFF_OFFSET (0x00001000) /**< Receive Buffer */
+#define XEL_RSR_OFFSET (XEL_RXBUFF_OFFSET + 0x07FC) /**< Rx status */
+#define XEL_RPLR_OFFSET (XEL_RXBUFF_OFFSET + 0x0C) /**< Rx packet length */
+
+#define XEL_MAC_HI_OFFSET (XEL_TXBUFF_OFFSET + 0x14) /**< MAC address hi offset */
+#define XEL_MAC_LO_OFFSET (XEL_TXBUFF_OFFSET) /**< MAC address lo offset */
+
+#define XEL_BUFFER_OFFSET (0x00000800) /**< Next buffer's offset
+ same for both TX and RX*/
+
+/**
+ * Global Interrupt Enable Register (GIER)
+ */
+#define XEL_GIER_GIE_MASK 0x80000000UL /**< Global Enable */
+
+/**
+ * Transmit Status Register (TSR)
+ */
+#define XEL_TSR_XMIT_BUSY_MASK 0x00000001UL /**< Xmit complete */
+#define XEL_TSR_PROGRAM_MASK 0x00000002UL /**< Program the MAC address */
+#define XEL_TSR_XMIT_IE_MASK 0x00000008UL /**< Xmit interrupt enable bit */
+#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000UL /**< Buffer is active, SW bit only */
+
+/**
+ * define for programming the MAC address into the EMAC Lite
+ */
+
+#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
+
+/**
+ * Receive Status Register (RSR)
+ */
+#define XEL_RSR_RECV_DONE_MASK 0x00000001UL /**< Recv complete */
+#define XEL_RSR_RECV_IE_MASK 0x00000008UL /**< Recv interrupt enable bit */
+
+/**
+ * Transmit Packet Length Register (TPLR)
+ */
+#define XEL_TPLR_LENGTH_MASK_HI 0x0000FF00UL /**< Transmit packet length upper byte */
+#define XEL_TPLR_LENGTH_MASK_LO 0x000000FFUL /**< Transmit packet length lower byte */
+
+/**
+ * Receive Packet Length Register (RPLR)
+ */
+#define XEL_RPLR_LENGTH_MASK_HI 0x0000FF00UL /**< Receive packet length upper byte */
+#define XEL_RPLR_LENGTH_MASK_LO 0x000000FFUL /**< Receive packet length lower byte */
+
+#define XEL_HEADER_SIZE 14 /**< Size of header in bytes */
+#define XEL_MTU_SIZE 1500 /**< Max size of data in frame */
+#define XEL_FCS_SIZE 4 /**< Size of CRC */
+
+#define XEL_HEADER_OFFSET 12 /**< Offset to length field */
+#define XEL_HEADER_SHIFT 16 /**< Right shift value to align length */
+
+
+#define XEL_MAX_FRAME_SIZE (XEL_HEADER_SIZE+XEL_MTU_SIZE+XEL_FCS_SIZE)
+ /**< Maximum lenght of rx frame
+ used if length/type field
+ contains the type (> 1500) */
+
+#define XEL_MAC_ADDR_SIZE 6 /**< length of MAC address */
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/****************************************************************************/
+/**
+*
+* Check to see if the transmission is complete.
+*
+* @param BaseAddress is the base address of the device
+*
+* @return TRUE if it is done, or FALSE if it is not.
+*
+* @note
+* u32 XEmacLite_mIsTxDone(u32 BaseAddress)
+*
+*****************************************************************************/
+#define XEmacLite_mIsTxDone(BaseAddress) \
+ ((XIo_In32((BaseAddress) + XEL_TSR_OFFSET) & \
+ XEL_TSR_XMIT_BUSY_MASK) != XEL_TSR_XMIT_BUSY_MASK)
+
+
+/****************************************************************************/
+/**
+*
+* Check to see if the receive is empty.
+*
+* @param BaseAddress is the base address of the device
+*
+* @return TRUE if it is empty, or FALSE if it is not.
+*
+* @note
+* u32 XEmacLite_mIsRxEmpty(u32 BaseAddress)
+*
+*****************************************************************************/
+#define XEmacLite_mIsRxEmpty(BaseAddress) \
+ ((XIo_In32((BaseAddress) + XEL_RSR_OFFSET) & \
+ XEL_RSR_RECV_DONE_MASK) != XEL_RSR_RECV_DONE_MASK)
+
+/************************** Function Prototypes ******************************/
+
+void XEmacLite_SendFrame(u32 BaseAddress, u8 *FramePtr, unsigned ByteCount);
+u16 XEmacLite_RecvFrame(u32 BaseAddress, u8 *FramePtr);
+
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xemaclite_selftest.c,v 1.1.2.1 2007/03/13 17:26:08 akondratenko Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xemaclite_selftest.c
+*
+* Function(s) in this file are the required functions for the EMAC Lite
+* driver sefftest for the hardware.
+* See xemaclite.h for a detailed description of the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- --------------------------------------------------------
+* 1.01a ecm 01/31/04 First release
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xstatus.h"
+#include "xemaclite_l.h"
+#include "xio.h"
+#include "xemaclite.h"
+#include "xemaclite_i.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Function Prototypes ******************************/
+
+/************************** Variable Definitions *****************************/
+
+/*****************************************************************************/
+/**
+*
+* Performs a SelfTest on the EmacLite device as follows:
+* - Writes to the mandatory TX buffer and reads back to verify.
+* - If configured, writes to the secondary TX buffer and reads back to verify.
+* - Writes to the mandatory RX buffer and reads back to verify.
+* - If configured, writes to the secondary RX buffer and reads back to verify.
+*
+*
+* @param InstancePtr is a pointer to the XEmacLite instance to be worked on.
+*
+* @return
+*
+* - XST_SUCCESS if the device Passed the Self Test.
+* - XST_FAILURE if any of the data read backs fail.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XEmacLite_SelfTest(XEmacLite * InstancePtr)
+{
+ u32 BaseAddress;
+ u8 i;
+ u8 TestString[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
+ u8 ReturnString[4] = { 0x0, 0x0, 0x0, 0x0 };
+
+ /*
+ * Verify that each of the inputs are valid.
+ */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /*
+ * Determine the TX buffer address
+ */
+
+ BaseAddress = InstancePtr->BaseAddress + XEL_TXBUFF_OFFSET;
+
+ /*
+ * Write the TestString to the TX buffer in EMAC Lite then
+ * back from the EMAC Lite and verify
+ */
+ XEmacLite_AlignedWrite(TestString, (u32 *) BaseAddress,
+ sizeof(TestString));
+ XEmacLite_AlignedRead((u32 *) BaseAddress, ReturnString,
+ sizeof(ReturnString));
+
+ for (i = 0; i < 4; i++) {
+
+ if (ReturnString[i] != TestString[i]) {
+ return XST_FAILURE;
+ }
+
+ /*
+ * Zero thge return string for the next test
+ */
+ ReturnString[i] = 0;
+ }
+
+ /*
+ * If the second buffer is configured, test it also
+ */
+
+ if (InstancePtr->ConfigPtr->TxPingPong != 0) {
+ BaseAddress += XEL_BUFFER_OFFSET;
+ /*
+ * Write the TestString to the optional TX buffer in EMAC Lite then
+ * back from the EMAC Lite and verify
+ */
+ XEmacLite_AlignedWrite(TestString, (u32 *) BaseAddress,
+ sizeof(TestString));
+ XEmacLite_AlignedRead((u32 *) BaseAddress, ReturnString,
+ sizeof(ReturnString));
+
+ for (i = 0; i < 4; i++) {
+
+ if (ReturnString[i] != TestString[i]) {
+ return XST_FAILURE;
+ }
+
+ /*
+ * Zero thge return string for the next test
+ */
+ ReturnString[i] = 0;
+ }
+ }
+
+ /*
+ * Determine the RX buffer address
+ */
+
+ BaseAddress = InstancePtr->BaseAddress + XEL_RXBUFF_OFFSET;
+
+ /*
+ * Write the TestString to the RX buffer in EMAC Lite then
+ * back from the EMAC Lite and verify
+ */
+ XEmacLite_AlignedWrite(TestString, (u32 *) (BaseAddress),
+ sizeof(TestString));
+ XEmacLite_AlignedRead((u32 *) (BaseAddress), ReturnString,
+ sizeof(ReturnString));
+
+ for (i = 0; i < 4; i++) {
+
+ if (ReturnString[i] != TestString[i]) {
+ return XST_FAILURE;
+ }
+
+ /*
+ * Zero thge return string for the next test
+ */
+ ReturnString[i] = 0;
+ }
+
+ /*
+ * If the second buffer is configured, test it also
+ */
+
+ if (InstancePtr->ConfigPtr->RxPingPong != 0) {
+ BaseAddress += XEL_BUFFER_OFFSET;
+ /*
+ * Write the TestString to the optional RX buffer in EMAC Lite then
+ * back from the EMAC Lite and verify
+ */
+ XEmacLite_AlignedWrite(TestString, (u32 *) BaseAddress,
+ sizeof(TestString));
+ XEmacLite_AlignedRead((u32 *) BaseAddress, ReturnString,
+ sizeof(ReturnString));
+
+ for (i = 0; i < 4; i++) {
+
+ if (ReturnString[i] != TestString[i]) {
+ return XST_FAILURE;
+ }
+
+ /*
+ * Zero thge return string for the next test
+ */
+ ReturnString[i] = 0;
+ }
+ }
+
+ return XST_SUCCESS;
+}
--- /dev/null
+#
+# Makefile for the Xilinx Tri-mode ethernet driver
+#
+
+EXTRA_CFLAGS += -Idrivers/xilinx_common
+
+# The Linux adapter for the Xilinx driver code.
+xilinx_temac-objs := xlltemac_main.o xlltemac.o xlltemac_control.o
+
+obj-$(CONFIG_XILINX_LLTEMAC) := xilinx_temac.o
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file xlltemac.c
+ *
+ * The XLlTemac driver. Functions in this file are the minimum required functions
+ * for this driver. See xlltemac.h for a detailed description of the driver.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 11/10/06 First release
+ * </pre>
+ ******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include <linux/string.h>
+#include <linux/delay.h>
+
+#include "xlltemac.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+static void InitHw(XLlTemac *InstancePtr); /* HW reset */
+
+/************************** Variable Definitions *****************************/
+
+xdbg_stmnt(int indent_on = 0;
+
+ )
+ xdbg_stmnt(u32 _xlltemac_rir_value;
+
+ )
+
+/*****************************************************************************/
+/**
+ *
+ * XLlTemac_CfgInitialize initializes a TEMAC channel along with the
+ * <i>InstancePtr</i> that references it. Each TEMAC channel is treated as a
+ * separate device from the point of view of this driver.
+ *
+ * The PHY is setup independently from the TEMAC. Use the MII or whatever other
+ * interface may be present for setup.
+ *
+ * @param InstancePtr references the memory instance to be associated with
+ * the TEMAC channel upon initialization.
+ * @param CfgPtr references the structure holding the hardware configuration
+ * for the TEMAC channel to initialize.
+ * @param EffectiveAddress is the processor address used to access the
+ * base address of the TEMAC channel. In systems with an MMU and virtual
+ * memory, <i>EffectiveAddress</i> is the virtual address mapped to the
+ * physical in <code>ConfigPtr->Config.BaseAddress</code>. In systems
+ * without an active MMU, <i>EffectiveAddress</i> should be set to the
+ * same value as <code>ConfigPtr->Config.BaseAddress</code>.
+ *
+ * @return XLlTemac_CfgInitialize returns XST_SUCCESS.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ *
+ ******************************************************************************/
+ int XLlTemac_CfgInitialize(XLlTemac *InstancePtr,
+ XLlTemac_Config *CfgPtr, u32 EffectiveAddress)
+{
+ /* Verify arguments */
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /* Clear instance memory and make copy of configuration */
+ memset(InstancePtr, 0, sizeof(XLlTemac));
+ memcpy(&InstancePtr->Config, CfgPtr, sizeof(XLlTemac_Config));
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_CfgInitialize\n");
+ /* Set device base address */
+ InstancePtr->Config.BaseAddress = EffectiveAddress;
+
+ /* Reset the hardware and set default options */
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+
+ XLlTemac_Reset(InstancePtr, XTE_NORESET_HARD);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Temac_CfgInitialize: returning SUCCESS\n");
+ return XST_SUCCESS;
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_Start starts the TEMAC channel as follows:
+ * - Enable transmitter if XTE_TRANSMIT_ENABLE_OPTION is set
+ * - Enable receiver if XTE_RECEIVER_ENABLE_OPTION is set
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return N/A
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_Start(XLlTemac *InstancePtr)
+{
+ u32 Reg;
+
+ /* Assert bad arguments and conditions */
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ /* If already started, then there is nothing to do */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return;
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_Start\n");
+ /* Enable transmitter if not already enabled */
+ if (InstancePtr->Options & XTE_TRANSMITTER_ENABLE_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL, "enabling transmitter\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET);
+ if (!(Reg & XTE_TC_TX_MASK)) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "transmitter not enabled, enabling now\n");
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.
+ BaseAddress, XTE_TC_OFFSET,
+ Reg | XTE_TC_TX_MASK);
+ }
+ xdbg_printf(XDBG_DEBUG_GENERAL, "transmitter enabled\n");
+ }
+
+ /* Enable receiver */
+ if (InstancePtr->Options & XTE_RECEIVER_ENABLE_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL, "enabling receiver\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ if (!(Reg & XTE_RCW1_RX_MASK)) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "receiver not enabled, enabling now\n");
+
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.
+ BaseAddress, XTE_RCW1_OFFSET,
+ Reg | XTE_RCW1_RX_MASK);
+ }
+ xdbg_printf(XDBG_DEBUG_GENERAL, "receiver enabled\n");
+ }
+
+ /* Mark as started */
+ InstancePtr->IsStarted = XCOMPONENT_IS_STARTED;
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_Start: done\n");
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_Stop gracefully stops the TEMAC channel as follows:
+ * - Disable all interrupts from this device
+ * - Disable the receiver
+ *
+ * XLlTemac_Stop does not modify any of the current device options.
+ *
+ * Since the transmitter is not disabled, frames currently in internal buffers
+ * or in process by a DMA engine are allowed to be transmitted.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return N/A
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_Stop(XLlTemac *InstancePtr)
+{
+ u32 Reg;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ /* If already stopped, then there is nothing to do */
+ if (InstancePtr->IsStarted == 0) {
+ return;
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_Stop\n");
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_Stop: disabling interrupts\n");
+ /* Disable interrupts */
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET, 0);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_Stop: disabling receiver\n");
+ /* Disable the receiver */
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ Reg &= ~XTE_RCW1_RX_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET, Reg);
+
+ /* Stopping the receiver in mid-packet causes a dropped packet indication
+ * from HW. Clear it.
+ */
+ /* get the interrupt pending register */
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress, XTE_IP_OFFSET);
+ if (Reg & XTE_INT_RXRJECT_MASK) {
+ /* set the interrupt status register to clear the interrupt */
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_IS_OFFSET, XTE_INT_RXRJECT_MASK);
+ }
+
+ /* Mark as stopped */
+ InstancePtr->IsStarted = 0;
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_Stop: done\n");
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_Reset performs a reset of the TEMAC channel, specified by
+ * <i>InstancePtr</i>, or both channels if <i>HardCoreAction</i> is set to
+ * XTE_RESET_HARD.
+ *
+ * XLlTemac_Reset also resets the TEMAC channel's options to their default values.
+ *
+ * The calling software is responsible for re-configuring the TEMAC channel
+ * (if necessary) and restarting the MAC after the reset.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param HardCoreAction describes how XLlTemac_Reset should treat the hard core
+ * block of the TEMAC.<br><br>
+ *
+ * If XTE_RESET_HARD is set to XTE_RESET_HARD, then XLlTemac_Reset asserts
+ * the reset signal to the hard core block which will reset both channels
+ * of the TEMAC. This, of course, will bork any activity that may be
+ * occuring on the other channel. So, be careful here.<br><br>
+ *
+ * Otherwise, XLlTemac_Reset resets just the transmitter and receiver of
+ * this TEMAC channel.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_Reset(XLlTemac *InstancePtr, int HardCoreAction)
+{
+ u32 Reg;
+ u32 TimeoutCount = 2;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_Reset\n");
+ /* Stop the device and reset HW */
+ XLlTemac_Stop(InstancePtr);
+ InstancePtr->Options = XTE_DEFAULT_OPTIONS;
+
+ /* Reset the receiver */
+ xdbg_printf(XDBG_DEBUG_GENERAL, "resetting the receiver\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ Reg |= XTE_RCW1_RST_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET, Reg);
+
+ /* Reset the transmitter */
+ xdbg_printf(XDBG_DEBUG_GENERAL, "resetting the transmitter\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET);
+ Reg |= XTE_TC_RST_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET, Reg);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "waiting until reset is done\n");
+ /* Poll until the reset is done */
+ while (Reg & (XTE_RCW1_RST_MASK | XTE_TC_RST_MASK)) {
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ Reg |= XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET);
+ }
+
+ /* Reset hard core if required */
+ /* Resetting hard core will cause both channels to reset :-( */
+ if (HardCoreAction == XTE_RESET_HARD) {
+ xdbg_printf(XDBG_DEBUG_GENERAL, "hard reset\n");
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET);
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET, Reg | XTE_RAF_HTRST_MASK);
+ while (TimeoutCount &&
+ (!(XLlTemac_ReadReg
+ (InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK))) {
+ udelay(XTE_RESET_HARD_DELAY_US);
+ TimeoutCount--;
+ }
+ }
+
+ /* Setup HW */
+ InitHw(InstancePtr);
+}
+
+
+/******************************************************************************
+ * InitHw (internal use only) performs a one-time setup of a TEMAC channel. The
+ * setup performed here only need to occur once after any reset.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+static void InitHw(XLlTemac *InstancePtr)
+{
+ u32 Reg;
+
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac InitHw\n");
+ /* Disable the receiver */
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac InitHw\n");
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac InitHw: disabling receiver\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ Reg &= ~XTE_RCW1_RX_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET, Reg);
+
+ /*
+ * Stopping the receiver in mid-packet causes a dropped packet
+ * indication from HW. Clear it.
+ */
+ /* get the interrupt pending register */
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress, XTE_IP_OFFSET);
+ if (Reg & XTE_INT_RXRJECT_MASK) {
+ /*
+ * set the interrupt status register to clear the pending
+ * interrupt
+ */
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_IS_OFFSET, XTE_INT_RXRJECT_MASK);
+ }
+
+ /* Sync default options with HW but leave receiver and transmitter
+ * disabled. They get enabled with XLlTemac_Start() if
+ * XTE_TRANSMITTER_ENABLE_OPTION and XTE_RECEIVER_ENABLE_OPTION are set
+ */
+ XLlTemac_SetOptions(InstancePtr, InstancePtr->Options &
+ ~(XTE_TRANSMITTER_ENABLE_OPTION |
+ XTE_RECEIVER_ENABLE_OPTION));
+
+ XLlTemac_ClearOptions(InstancePtr, ~InstancePtr->Options);
+
+ /* Set default MDIO divisor */
+ XLlTemac_PhySetMdioDivisor(InstancePtr, XTE_MDIO_DIV_DFT);
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac InitHw: done\n");
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_SetMacAddress sets the MAC address for the TEMAC channel, specified
+ * by <i>InstancePtr</i> to the MAC address specified by <i>AddressPtr</i>.
+ * The TEMAC channel must be stopped before calling this function.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param AddressPtr is a reference to the 6-byte MAC address to set.
+ *
+ * @return On successful completion, XLlTemac_SetMacAddress returns XST_SUCCESS.
+ * Otherwise, if the TEMAC channel has not stopped,
+ * XLlTemac_SetMacAddress returns XST_DEVICE_IS_STARTED.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_SetMacAddress(XLlTemac *InstancePtr, void *AddressPtr)
+{
+ u32 MacAddr;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(AddressPtr != NULL);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ /* Be sure device has been stopped */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SetMacAddress: setting mac address to: 0x%08x%8x%8x%8x%8x%8x\n",
+ Aptr[0], Aptr[1], Aptr[2], Aptr[3], Aptr[4], Aptr[5]);
+ /*
+ * Set the MAC bits [31:0] in UAW0
+ * Having Aptr be unsigned type prevents the following operations from sign extending
+ */
+ MacAddr = Aptr[0];
+ MacAddr |= Aptr[1] << 8;
+ MacAddr |= Aptr[2] << 16;
+ MacAddr |= Aptr[3] << 24;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_UAW0_OFFSET, MacAddr);
+
+ /* There are reserved bits in UAW1 so don't affect them */
+ MacAddr = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_UAW1_OFFSET);
+ MacAddr &= ~XTE_UAW1_UNICASTADDR_MASK;
+
+ /* Set MAC bits [47:32] in UAW1 */
+ MacAddr |= Aptr[4];
+ MacAddr |= Aptr[5] << 8;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_UAW1_OFFSET, MacAddr);
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_GetMacAddress gets the MAC address for the TEMAC channel, specified
+ * by <i>InstancePtr</i> into the memory buffer specified by <i>AddressPtr</i>.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param AddressPtr references the memory buffer to store the retrieved MAC
+ * address. This memory buffer must be at least 6 bytes in length.
+ *
+ * @return N/A
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_GetMacAddress(XLlTemac *InstancePtr, void *AddressPtr)
+{
+ u32 MacAddr;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ /* Read MAC bits [31:0] in UAW0 */
+ MacAddr = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_UAW0_OFFSET);
+ Aptr[0] = (u8) MacAddr;
+ Aptr[1] = (u8) (MacAddr >> 8);
+ Aptr[2] = (u8) (MacAddr >> 16);
+ Aptr[3] = (u8) (MacAddr >> 24);
+
+ /* Read MAC bits [47:32] in UAW1 */
+ MacAddr = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_UAW1_OFFSET);
+ Aptr[4] = (u8) MacAddr;
+ Aptr[5] = (u8) (MacAddr >> 8);
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_SetOptions enables the options, <i>Options</i> for the TEMAC channel,
+ * specified by <i>InstancePtr</i>. The TEMAC channel should be stopped with
+ * XLlTemac_Stop() before changing options.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param Options is a bitmask of OR'd XTE_*_OPTION values for options to
+ * set. Options not specified are not affected.
+ *
+ * @return On successful completion, XLlTemac_SetOptions returns XST_SUCCESS.
+ * Otherwise, if the device has not been stopped, XLlTemac_SetOptions
+ * returns XST_DEVICE_IS_STARTED.
+ *
+ * @note
+ * See xlltemac.h for a description of the available options.
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_SetOptions(XLlTemac *InstancePtr, u32 Options)
+{
+ u32 Reg; /* Generic register contents */
+ u32 RegRcw1; /* Reflects original contents of RCW1 */
+ u32 RegTc; /* Reflects original contents of TC */
+ u32 RegNewRcw1; /* Reflects new contents of RCW1 */
+ u32 RegNewTc; /* Reflects new contents of TC */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ /* Be sure device has been stopped */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetOptions\n");
+ /* Many of these options will change the RCW1 or TC registers.
+ * To reduce the amount of IO to the device, group these options here
+ * and change them all at once.
+ */
+
+ /* Grab current register contents */
+ RegRcw1 = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ RegTc = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET);
+ RegNewRcw1 = RegRcw1;
+ RegNewTc = RegTc;
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "current control regs: RCW1: 0x%0x; TC: 0x%0x\n", RegRcw1,
+ RegTc);
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Options: 0x%0x; default options: 0x%0x\n", Options,
+ XTE_DEFAULT_OPTIONS);
+
+ /* Turn on jumbo packet support for both Rx and Tx */
+ if (Options & XTE_JUMBO_OPTION) {
+ RegNewTc |= XTE_TC_JUM_MASK;
+ RegNewRcw1 |= XTE_RCW1_JUM_MASK;
+ }
+
+ /* Turn on VLAN packet support for both Rx and Tx */
+ if (Options & XTE_VLAN_OPTION) {
+ RegNewTc |= XTE_TC_VLAN_MASK;
+ RegNewRcw1 |= XTE_RCW1_VLAN_MASK;
+ }
+
+ /* Turn on FCS stripping on receive packets */
+ if (Options & XTE_FCS_STRIP_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: enabling fcs stripping\n");
+ RegNewRcw1 &= ~XTE_RCW1_FCS_MASK;
+ }
+
+ /* Turn on FCS insertion on transmit packets */
+ if (Options & XTE_FCS_INSERT_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: enabling fcs insertion\n");
+ RegNewTc &= ~XTE_TC_FCS_MASK;
+ }
+
+ /* Turn on length/type field checking on receive packets */
+ if (Options & XTE_LENTYPE_ERR_OPTION) {
+ RegNewRcw1 &= ~XTE_RCW1_LT_DIS_MASK;
+ }
+
+ /* Enable transmitter */
+ if (Options & XTE_TRANSMITTER_ENABLE_OPTION) {
+ RegNewTc |= XTE_TC_TX_MASK;
+ }
+
+ /* Enable receiver */
+ if (Options & XTE_RECEIVER_ENABLE_OPTION) {
+ RegNewRcw1 |= XTE_RCW1_RX_MASK;
+ }
+
+ /* Change the TC or RCW1 registers if they need to be modified */
+ if (RegTc != RegNewTc) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: writting tc: 0x%0x\n", RegNewTc);
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET, RegNewTc);
+ }
+
+ if (RegRcw1 != RegNewRcw1) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: writting rcw1: 0x%0x\n", RegNewRcw1);
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET, RegNewRcw1);
+ }
+
+ /* Rest of options twiddle bits of other registers. Handle them one at
+ * a time
+ */
+
+ /* Turn on flow control */
+ if (Options & XTE_FLOW_CONTROL_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: endabling flow control\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_FCC_OFFSET);
+ Reg |= XTE_FCC_FCRX_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_FCC_OFFSET, Reg);
+ }
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: rcw1 is now (fcc): 0x%0x\n",
+ XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET));
+
+ /* Turn on promiscuous frame filtering (all frames are received ) */
+ if (Options & XTE_PROMISC_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: endabling promiscuous mode\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_AFM_OFFSET);
+ Reg |= XTE_AFM_PM_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_AFM_OFFSET, Reg);
+ }
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: rcw1 is now (afm): 0x%0x\n",
+ XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET));
+
+ /* Allow broadcast address filtering */
+ if (Options & XTE_BROADCAST_OPTION) {
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET);
+ Reg &= ~XTE_RAF_BCSTREJ_MASK;
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET, Reg);
+ }
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: rcw1 is now (raf): 0x%0x\n",
+ XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET));
+
+ /* Allow multicast address filtering */
+ if (Options & XTE_MULTICAST_OPTION) {
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET);
+ Reg &= ~XTE_RAF_MCSTREJ_MASK;
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET, Reg);
+ }
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: rcw1 is now (raf2): 0x%0x\n",
+ XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET));
+
+ /* The remaining options not handled here are managed elsewhere in the
+ * driver. No register modifications are needed at this time. Reflecting the
+ * option in InstancePtr->Options is good enough for now.
+ */
+
+ /* Set options word to its new value */
+ InstancePtr->Options |= Options;
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "setOptions: rcw1 is now (end): 0x%0x\n",
+ XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET));
+ xdbg_printf(XDBG_DEBUG_GENERAL, "setOptions: returning SUCCESS\n");
+ return (XST_SUCCESS);
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_ClearOptions clears the options, <i>Options</i> for the TEMAC channel,
+ * specified by <i>InstancePtr</i>. The TEMAC channel should be stopped with
+ * XLlTemac_Stop() before changing options.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param Options is a bitmask of OR'd XTE_*_OPTION values for options to
+ * clear. Options not specified are not affected.
+ *
+ * @return On successful completion, XLlTemac_ClearOptions returns XST_SUCCESS.
+ * Otherwise, if the device has not been stopped, XLlTemac_ClearOptions
+ * returns XST_DEVICE_IS_STARTED.
+ *
+ * @note
+ * See xlltemac.h for a description of the available options.
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_ClearOptions(XLlTemac *InstancePtr, u32 Options)
+{
+ u32 Reg; /* Generic */
+ u32 RegRcw1; /* Reflects original contents of RCW1 */
+ u32 RegTc; /* Reflects original contents of TC */
+ u32 RegNewRcw1; /* Reflects new contents of RCW1 */
+ u32 RegNewTc; /* Reflects new contents of TC */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "Xtemac_ClearOptions: 0x%08x\n",
+ Options);
+ /* Be sure device has been stopped */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Many of these options will change the RCW1 or TC registers.
+ * Group these options here and change them all at once. What we are
+ * trying to accomplish is to reduce the amount of IO to the device
+ */
+
+ /* Grab current register contents */
+ RegRcw1 = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ RegTc = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET);
+ RegNewRcw1 = RegRcw1;
+ RegNewTc = RegTc;
+
+ /* Turn off jumbo packet support for both Rx and Tx */
+ if (Options & XTE_JUMBO_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling jumbo\n");
+ RegNewTc &= ~XTE_TC_JUM_MASK;
+ RegNewRcw1 &= ~XTE_RCW1_JUM_MASK;
+ }
+
+ /* Turn off VLAN packet support for both Rx and Tx */
+ if (Options & XTE_VLAN_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling vlan\n");
+ RegNewTc &= ~XTE_TC_VLAN_MASK;
+ RegNewRcw1 &= ~XTE_RCW1_VLAN_MASK;
+ }
+
+ /* Turn off FCS stripping on receive packets */
+ if (Options & XTE_FCS_STRIP_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling fcs strip\n");
+ RegNewRcw1 |= XTE_RCW1_FCS_MASK;
+ }
+
+ /* Turn off FCS insertion on transmit packets */
+ if (Options & XTE_FCS_INSERT_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling fcs insert\n");
+ RegNewTc |= XTE_TC_FCS_MASK;
+ }
+
+ /* Turn off length/type field checking on receive packets */
+ if (Options & XTE_LENTYPE_ERR_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling lentype err\n");
+ RegNewRcw1 |= XTE_RCW1_LT_DIS_MASK;
+ }
+
+ /* Disable transmitter */
+ if (Options & XTE_TRANSMITTER_ENABLE_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling transmitter\n");
+ RegNewTc &= ~XTE_TC_TX_MASK;
+ }
+
+ /* Disable receiver */
+ if (Options & XTE_RECEIVER_ENABLE_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling receiver\n");
+ RegNewRcw1 &= ~XTE_RCW1_RX_MASK;
+ }
+
+ /* Change the TC and RCW1 registers if they need to be
+ * modified
+ */
+ if (RegTc != RegNewTc) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: setting TC: 0x%0x\n",
+ RegNewTc);
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TC_OFFSET, RegNewTc);
+ }
+
+ if (RegRcw1 != RegNewRcw1) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: setting RCW1: 0x%0x\n",
+ RegNewRcw1);
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET, RegNewRcw1);
+ }
+
+ /* Rest of options twiddle bits of other registers. Handle them one at
+ * a time
+ */
+
+ /* Turn off flow control */
+ if (Options & XTE_FLOW_CONTROL_OPTION) {
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_FCC_OFFSET);
+ Reg &= ~XTE_FCC_FCRX_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_FCC_OFFSET, Reg);
+ }
+
+ /* Turn off promiscuous frame filtering */
+ if (Options & XTE_PROMISC_OPTION) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: disabling promiscuous mode\n");
+ Reg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_AFM_OFFSET);
+ Reg &= ~XTE_AFM_PM_MASK;
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "Xtemac_ClearOptions: setting AFM: 0x%0x\n", Reg);
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_AFM_OFFSET, Reg);
+ }
+
+ /* Disable broadcast address filtering */
+ if (Options & XTE_BROADCAST_OPTION) {
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET);
+ Reg |= XTE_RAF_BCSTREJ_MASK;
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET, Reg);
+ }
+
+ /* Disable multicast address filtering */
+ if (Options & XTE_MULTICAST_OPTION) {
+ Reg = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET);
+ Reg |= XTE_RAF_MCSTREJ_MASK;
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress,
+ XTE_RAF_OFFSET, Reg);
+ }
+
+ /* The remaining options not handled here are managed elsewhere in the
+ * driver. No register modifications are needed at this time. Reflecting the
+ * option in InstancePtr->Options is good enough for now.
+ */
+
+ /* Set options word to its new value */
+ InstancePtr->Options &= ~Options;
+
+ return (XST_SUCCESS);
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_GetOptions returns the current option settings.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_GetOptions returns a bitmask of XTE_*_OPTION constants,
+ * each bit specifying an option that is currently active.
+ *
+ * @note
+ * See xlltemac.h for a description of the available options.
+ *
+ ******************************************************************************/
+u32 XLlTemac_GetOptions(XLlTemac *InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ return (InstancePtr->Options);
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_GetOperatingSpeed gets the current operating link speed. This may be
+ * the value set by XLlTemac_SetOperatingSpeed() or a hardware default.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_GetOperatingSpeed returns the link speed in units of megabits
+ * per second.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+u16 XLlTemac_GetOperatingSpeed(XLlTemac *InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_GetOperatingSpeed\n");
+ switch (XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_EMMC_OFFSET) &
+ XTE_EMMC_LINKSPEED_MASK) {
+ case XTE_EMMC_LINKSPD_1000:
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetOperatingSpeed: returning 1000\n");
+ return (1000);
+
+ case XTE_EMMC_LINKSPD_100:
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetOperatingSpeed: returning 100\n");
+ return (100);
+
+ case XTE_EMMC_LINKSPD_10:
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetOperatingSpeed: returning 10\n");
+ return (10);
+
+ default:
+ return (0);
+ }
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_SetOperatingSpeed sets the current operating link speed. For any
+ * traffic to be passed, this speed must match the current MII/GMII/SGMII/RGMII
+ * link speed.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param Speed is the speed to set in units of Mbps. Valid values are 10, 100,
+ * or 1000. XLlTemac_SetOperatingSpeed ignores invalid values.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_SetOperatingSpeed(XLlTemac *InstancePtr, u16 Speed)
+{
+ u32 EmmcReg;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID((Speed == 10) || (Speed == 100) || (Speed == 1000));
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetOperatingSpeed\n");
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SetOperatingSpeed: setting speed to: %d (0x%0x)\n",
+ Speed, Speed);
+ /* Get the current contents of the EMAC config register and zero out
+ * speed bits
+ */
+ EmmcReg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_EMMC_OFFSET) &
+ ~XTE_EMMC_LINKSPEED_MASK;
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SetOperatingSpeed: current speed: 0x%0x\n",
+ EmmcReg);
+ switch (Speed) {
+ case 10:
+ break;
+
+ case 100:
+ EmmcReg |= XTE_EMMC_LINKSPD_100;
+ break;
+
+ case 1000:
+ EmmcReg |= XTE_EMMC_LINKSPD_1000;
+ break;
+
+ default:
+ return;
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SetOperatingSpeed: new speed: 0x%0x\n", EmmcReg);
+ /* Set register and return */
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_EMMC_OFFSET, EmmcReg);
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetOperatingSpeed: done\n");
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_PhySetMdioDivisor sets the MDIO clock divisor in the TEMAC channel,
+ * specified by <i>InstancePtr</i> to the value, <i>Divisor</i>. This function
+ * must be called once after each reset prior to accessing MII PHY registers.
+ *
+ * From the Virtex-4 Embedded Tri-Mode Ethernet MAC User's Guide, the
+ * following equation governs the MDIO clock to the PHY:
+ *
+ * <pre>
+ * f[HOSTCLK]
+ * f[MDC] = -----------------
+ * (1 + Divisor) * 2
+ * </pre>
+ *
+ * where f[HOSTCLK] is the bus clock frequency in MHz, and f[MDC] is the
+ * MDIO clock frequency in MHz to the PHY. Typically, f[MDC] should not
+ * exceed 2.5 MHz. Some PHYs can tolerate faster speeds which means faster
+ * access.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param Divisor is the divisor value to set within the range of 0 to
+ * XTE_MC_CLK_DVD_MAX.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_PhySetMdioDivisor(XLlTemac *InstancePtr, u8 Divisor)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY)
+ XASSERT_VOID(Divisor <= XTE_MC_CLOCK_DIVIDE_MAX);
+
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_PhySetMdioDivisor\n");
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_MC_OFFSET,
+ (u32) Divisor | XTE_MC_MDIOEN_MASK);
+}
+
+/*****************************************************************************/
+/*
+ * XLlTemac_PhyRead reads the specified PHY register, <i>RegiseterNum</i> on the
+ * PHY specified by <i>PhyAddress</i> into <i>PhyDataPtr</i>. This Ethernet
+ * driver does not require the device to be stopped before reading from the PHY.
+ * It is the responsibility of the calling code to stop the device if it is
+ * deemed necessary.
+ *
+ * Note that the TEMAC hardware provides the ability to talk to a PHY that
+ * adheres to the Media Independent Interface (MII) as defined in the IEEE 802.3
+ * standard.
+ *
+ * <b>It is important that calling code set up the MDIO clock with
+ * XLlTemac_PhySetMdioDivisor() prior to accessing the PHY with this function.</b>
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param PhyAddress is the address of the PHY to be written (multiple
+ * PHYs supported).
+ * @param RegisterNum is the register number, 0-31, of the specific PHY register
+ * to write.
+ * @param PhyDataPtr is a reference to the location where the 16-bit result
+ * value is stored.
+ *
+ * @return N/A
+ *
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.<br><br>
+ *
+ * There is the possibility that this function will not return if the hardware
+ * is broken (i.e., it never sets the status bit indicating that the write is
+ * done). If this is of concern, the calling code should provide a mechanism
+ * suitable for recovery.
+ *
+ ******************************************************************************/
+void XLlTemac_PhyRead(XLlTemac *InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 *PhyDataPtr)
+{
+ u32 MiiReg;
+ u32 Rdy;
+ u32 Ie;
+ u32 Tis;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_PhyRead: BaseAddress: 0x%08x\n",
+ InstancePtr->Config.BaseAddress);
+ /*
+ * XLlTemac_PhyRead saves the state of the IE register so that it can
+ * clear the HardAcsCmplt bit and later restore the state of the IE
+ * register. Since XLlTemac_PhyRead will poll for the status already, the
+ * HardAcsCmplt bit is cleared in the IE register so that the
+ * application code above doesn't also receive the interrupt.
+ */
+ Ie = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET);
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET,
+ Ie & ~XTE_INT_HARDACSCMPLT_MASK);
+
+ /*
+ * This is a double indirect mechanism. We indirectly write the
+ * PHYAD and REGAD so we can read the PHY register back out in
+ * the LSW register.
+ *
+ * In this case, the method of reading the data is a little unusual.
+ * Normally to write to a TEMAC register, one would set the WEN bit
+ * in the CTL register so that the values of the LSW will be written.
+ *
+ * In this case, the WEN bit is not set, and the PHYAD and REGAD
+ * values in the LSW will still get sent to the PHY before actually
+ * reading the result in the LSW.
+ *
+ * What needs to be done, is the following:
+ * 1) Write lsw reg with the phyad, and the regad
+ * 2) write the ctl reg with the miimai value (BUT WEN bit set to 0!!!)
+ * 3) poll the ready bit
+ * 4) get the value out of lsw
+ */
+ MiiReg = RegisterNum & XTE_MIIM_REGAD_MASK;
+ MiiReg |= ((PhyAddress << XTE_MIIM_PHYAD_SHIFT) & XTE_MIIM_PHYAD_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_PhyRead: Mii Reg: 0x%0x; Value written: 0x%0x\n",
+ RegisterNum, MiiReg);
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_LSW_OFFSET,
+ MiiReg);
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_CTL_OFFSET,
+ XTE_MIIMAI_OFFSET);
+
+ /*
+ * Wait here polling, until the value is ready to be read.
+ */
+ do {
+ Rdy = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET);
+ } while (!(Rdy & XTE_RSE_MIIM_RR_MASK));
+
+ /* Read data */
+ *PhyDataPtr = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_LSW_OFFSET);
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_PhyRead: Value retrieved: 0x%0x\n", *PhyDataPtr);
+
+ /*
+ * Clear MII status bits. The TIS register in the hard TEMAC doesn't
+ * use the 'write a 1 to clear' method, so we need to read the TIS
+ * register, clear the MIIM RST bit, and then write it back out.
+ */
+ Tis = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TIS_OFFSET);
+ Tis &= ~XTE_RSE_MIIM_RR_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TIS_OFFSET, Tis);
+
+ /*
+ * restore the state of the IE reg
+ */
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET, Ie);
+}
+
+
+/*****************************************************************************/
+/*
+ * XLlTemac_PhyWrite writes <i>PhyData</i> to the specified PHY register,
+ * <i>RegiseterNum</i> on the PHY specified by <i>PhyAddress</i>. This Ethernet
+ * driver does not require the device to be stopped before writing to the PHY.
+ * It is the responsibility of the calling code to stop the device if it is
+ * deemed necessary.
+ *
+ * Note that the TEMAC hardware provides the ability to talk to a PHY that
+ * adheres to the Media Independent Interface (MII) as defined in the IEEE 802.3
+ * standard.
+ *
+ * <b>It is important that calling code set up the MDIO clock with
+ * XLlTemac_PhySetMdioDivisor() prior to accessing the PHY with this function.</b>
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param PhyAddress is the address of the PHY to be written (multiple
+ * PHYs supported).
+ * @param RegisterNum is the register number, 0-31, of the specific PHY register
+ * to write.
+ * @param PhyData is the 16-bit value that will be written to the register.
+ *
+ * @return N/A
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.<br><br>
+ *
+ * There is the possibility that this function will not return if the hardware
+ * is broken (i.e., it never sets the status bit indicating that the write is
+ * done). If this is of concern, the calling code should provide a mechanism
+ * suitable for recovery.
+ *
+ ******************************************************************************/
+void XLlTemac_PhyWrite(XLlTemac *InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 PhyData)
+{
+ u32 MiiReg;
+ u32 Rdy;
+ u32 Ie;
+ u32 Tis;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_PhyWrite\n");
+ /*
+ * XLlTemac_PhyWrite saves the state of the IE register so that it can
+ * clear the HardAcsCmplt bit and later restore the state of the IE
+ * register. Since XLlTemac_PhyWrite will poll for the status already, the
+ * HardAcsCmplt bit is cleared in the IE register so that the
+ * application code above doesn't also receive the interrupt.
+ */
+ Ie = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET);
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET,
+ Ie & ~XTE_INT_HARDACSCMPLT_MASK);
+
+ /*
+ * This is a double indirect mechanism. We indirectly write the
+ * PhyData to the MIIMWD register, and then indirectly write PHYAD and
+ * REGAD so the value in MIIMWD will get written to the PHY.
+ */
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_MIIMWD_OFFSET, PhyData);
+
+ MiiReg = RegisterNum & XTE_MIIM_REGAD_MASK;
+ MiiReg |= ((PhyAddress << XTE_MIIM_PHYAD_SHIFT) & XTE_MIIM_PHYAD_MASK);
+
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_MIIMAI_OFFSET, MiiReg);
+
+ /*
+ * Wait here polling, until the value is ready to be read.
+ */
+ do {
+ Rdy = XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET);
+ } while (!(Rdy & XTE_RSE_MIIM_WR_MASK));
+
+ /*
+ * Clear MII status bits. The TIS register in the hard TEMAC doesn't
+ * use the 'write a 1 to clear' method, so we need to read the TIS
+ * register, clear the MIIM WST bit, and then write it back out.
+ */
+ Tis = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TIS_OFFSET);
+ Tis &= XTE_RSE_MIIM_WR_MASK;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_TIS_OFFSET, Tis);
+
+ /*
+ * restore the state of the IE reg
+ */
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_IE_OFFSET, Ie);
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file xlltemac.h
+ *
+ * The Xilinx Tri-Mode Ethernet driver component. This driver supports the
+ * Virtex-5(TM) and Virtex-4(TM) 10/100/1000 MAC (TEMAC).
+ *
+ * For a full description of TEMAC features, please see the hardware spec. This driver
+ * supports the following features:
+ * - Memory mapped access to host interface registers
+ * - Virtual memory support
+ * - Unicast, broadcast, and multicast receive address filtering
+ * - Full duplex operation (half duplex not supported)
+ * - Automatic source address insertion or overwrite (programmable)
+ * - Automatic PAD & FCS insertion and stripping (programmable)
+ * - Flow control
+ * - VLAN frame support
+ * - Pause frame support
+ * - Jumbo frame support
+ * - Checksum offload
+ *
+ * <h2>Driver Description</h2>
+ *
+ * The device driver enables higher layer software (e.g., an application) to
+ * configure a TEMAC channel. It is intended that this driver be used in
+ * cooperation with another driver (FIFO or DMA) for data communication. This
+ * device driver can support multiple devices even when those devices have
+ * significantly different configurations.
+ *
+ * <h2>Initialization & Configuration</h2>
+ *
+ * The XLlTemac_Config structure can be used by the driver to configure itself.
+ * This configuration structure is typically created by the tool-chain based on
+ * hardware build properties, although, other methods are allowed and currently
+ * used in some systems.
+ *
+ * To support multiple runtime loading and initialization strategies employed
+ * by various operating systems, the driver instance can be initialized using
+ * the XLlTemac_CfgInitialze() routine.
+ *
+ * <h2>Interrupts and Asynchronous Callbacks</h2>
+ *
+ * The driver has no dependencies on the interrupt controller. It provides
+ * no interrupt handlers. The application/OS software should set up its own
+ * interrupt handlers if required.
+ *
+ * <h2>Device Reset</h2>
+ *
+ * When a TEMAC channel is connected up to a FIFO or DMA core in hardware,
+ * errors may be reported on one of those cores (FIFO or DMA) such that it can
+ * be determined that the TEMAC channel needs to be reset. If a reset is
+ * performed, the calling code should also reconfigure and reapply the proper
+ * settings in the TEMAC channel.
+ *
+ * When a TEMAC channel reset is required, XLlTemac_Reset() should be utilized.
+ *
+ * <h2>Virtual Memory</h2>
+ *
+ * This driver may be used in systems with virtual memory support by passing
+ * the appropriate value for the <i>EffectiveAddress</i> parameter to the
+ * XLlTemac_CfgInitialize() routine.
+ *
+ * <h2>Transfering Data</h2>
+ *
+ * The TEMAC core by itself is not cabable of transmitting or receiving data in
+ * any meaninful way. Instead one or both TEMAC channels need to be connected
+ * to a FIFO or DMA core in hardware.
+ *
+ * This TEMAC driver is modeled in a similar fashion where the application code
+ * or O/S adapter driver needs to make use of a separte FIFO or DMA driver in
+ * connection with this driver to establish meaningful communication over
+ * ethernet.
+ *
+ * <h2>Checksum Offloading</h2>
+ *
+ * If configured, the device can compute a 16-bit checksum from frame data. In
+ * most circumstances this can lead to a substantial gain in throughput.
+ *
+ * The checksum offload settings for each frame sent or recieved are
+ * transmitted through the LocalLink interface in hardware. What this means is
+ * that the checksum offload feature is indirectly controlled in the TEMAC
+ * channel through the driver for the FIFO or DMA core connected to the TEMAC
+ * channel.
+ *
+ * Refer to the documentation for the FIFO or DMA driver used for data
+ * communication on how to set the values for the relevant LocalLink header
+ * words.
+ *
+ * Since this hardware implementation is general purpose in nature system software must
+ * perform pre and post frame processing to obtain the desired results for the
+ * types of packets being transferred. Most of the time this will be TCP/IP
+ * traffic.
+ *
+ * TCP/IP and UDP/IP frames contain separate checksums for the IP header and
+ * UDP/TCP header+data. With this hardware implementation, the IP header checksum
+ * cannot be offloaded. Many stacks that support offloading will compute the IP
+ * header if required and use hardware to compute the UDP/TCP header+data checksum.
+ * There are other complications concerning the IP pseudo header that must be
+ * taken into consideration. Readers should consult a TCP/IP design reference
+ * for more details.
+ *
+ * There are certain device options that will affect the checksum calculation
+ * performed by hardware for Tx:
+ *
+ * - FCS insertion disabled (XTE_FCS_INSERT_OPTION): software is required to
+ * calculate and insert the FCS value at the end of the frame, but the
+ * checksum must be known ahead of time prior to calculating the FCS.
+ * Therefore checksum offloading cannot be used in this situation.
+ *
+ * And for Rx:
+ *
+ * - FCS/PAD stripping disabled (XTE_FCS_STRIP_OPTION): The 4 byte FCS at the
+ * end of frame will be included in the hardware calculated checksum. software must
+ * subtract out this data.
+ *
+ * - FCS/PAD stripping disabled (XTE_FCS_STRIP_OPTION): For frames smaller
+ * than 64 bytes, padding will be included in the hardware calculated checksum.
+ * software must subtract out this data. It may be better to allow the TCP/IP
+ * stack verify checksums for this type of packet.
+ *
+ * - VLAN enabled (XTE_VLAN_OPTION): The 4 extra bytes in the Ethernet header
+ * affect the hardware calculated checksum. software must subtract out the 1st two
+ * 16-bit words starting at the 15th byte.
+ *
+ * <h3>Transmit Checksum Offloading</h3>
+ *
+ * For transmit, the software can specify where in the frame the checksum
+ * calculation is to start, where the result should be inserted, and a seed
+ * value. The checksum is calculated from the start point through the end of
+ * frame.
+ *
+ * The checsum offloading settings are sent in the transmit LocalLink header
+ * words. The relevant LocalLink header words are described in brief below.
+ * Refer to the XPS_LL_TEMAC v1.00a hardware specification for more details.
+ *
+ * <h4>LocalLink header word 3:</h4>
+ * <pre>
+ * Bits 31 (MSB): Transmit Checksum Enable: 1 - enabled, 0 - disabled
+ * Bits 0-30 (LSB): Reserved
+ * </pre>
+ *
+ * <h4>LocalLink header word 4:</h4>
+ * <pre>
+ * Bits 16-31 (MSB): Transmit Checksum Insertion Point: Frame offset where the
+ * computed checksum value is stored, which should be in the
+ * TCP or UDP header
+ * Bits 0-15 (LSB): Transmit Checksum Calculation Starting Point: Offset
+ * in the frame where checksum calculation should begin
+ * </pre>
+ *
+ * <h4>LocalLink header word 5:</h4>
+ * <pre>
+ * Bits 16-31 (MSB): Transmit Checksum Calculation Initial Value: Checksum
+ * seed value
+ * Bits 0-15 (LSB): Reserved
+ * </pre>
+ *
+ * <h3>Receive Checksum Offloading</h3>
+ *
+ * For Receive, the 15th byte to end of frame is checksummed. This range of
+ * bytes is the entire Ethernet payload (for non-VLAN frames).
+ *
+ * The checsum offloading information is sent in the receive LocalLink header
+ * words. The relevant LocalLink header words are described in brief below.
+ * Refer to the XPS_LL_TEMAC v1.00a hardware specification for more details.
+ *
+ * <h4>LocalLink header word 6:</h4>
+ * <pre>
+ * Bits 16-31 (MSB): Receive Raw Checksum: Computed checksum value
+ * Bits 0-15 (LSB): Reserved
+ * </pre>
+ *
+ * <h2>PHY Communication</h2>
+ *
+ * Prior to PHY access, the MDIO clock must be setup. This driver will set a
+ * safe default that should work with PLB bus speeds of up to 150 MHz and keep
+ * the MDIO clock below 2.5 MHz. If the user wishes faster access to the PHY
+ * then the clock divisor can be set to a different value (see
+ * XLlTemac_PhySetMdioDivisor()).
+ *
+ * MII register access is performed through the functions XLlTemac_PhyRead() and
+ * XLlTemac_PhyWrite().
+ *
+ * <h2>Link Sync</h2>
+ *
+ * When the device is used in a multispeed environment, the link speed must be
+ * explicitly set using XLlTemac_SetOperatingSpeed() and must match the speed the
+ * PHY has negotiated. If the speeds are mismatched, then the MAC will not pass
+ * traffic.
+ *
+ * The application/OS software may use the AutoNegotiation interrupt to be
+ * notified when the PHY has completed auto-negotiation.
+ *
+ * <h2>Asserts</h2>
+ *
+ * Asserts are used within all Xilinx drivers to enforce constraints on argument
+ * values. Asserts can be turned off on a system-wide basis by defining, at
+ * compile time, the NDEBUG identifier. By default, asserts are turned on and it
+ * is recommended that users leave asserts on during development. For deployment
+ * use -DNDEBUG compiler switch to remove assert code.
+ *
+ * <h2>Driver Errata</h2>
+ *
+ * - A dropped receive frame indication may be reported by the driver after
+ * calling XLlTemac_Stop() followed by XLlTemac_Start(). This can occur if a
+ * frame is arriving when stop is called.
+ * - On Rx with checksum offloading enabled and FCS/PAD stripping disabled,
+ * FCS and PAD data will be included in the checksum result.
+ * - On Tx with checksum offloading enabled and auto FCS insertion disabled,
+ * the user calculated FCS will be included in the checksum result.
+ *
+ * @note
+ *
+ * Xilinx drivers are typically composed of two components, one is the driver
+ * and the other is the adapter. The driver is independent of OS and processor
+ * and is intended to be highly portable. The adapter is OS-specific and
+ * facilitates communication between the driver and an OS.
+ * <br><br>
+ * This driver is intended to be RTOS and processor independent. Any needs for
+ * dynamic memory management, threads or thread mutual exclusion, or cache
+ * control must be satisfied by the layer above this driver.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 11/10/06 First release
+ * 1.00a rpm 06/08/07 Added interrupt IDs to config structure for convenience
+ * </pre>
+ *
+ *****************************************************************************/
+
+#ifndef XTEMAC_H /* prevent circular inclusions */
+#define XTEMAC_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xenv.h"
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xlltemac_hw.h"
+
+/************************** Constant Definitions *****************************/
+
+/*
+ * Device information
+ */
+#define XTE_DEVICE_NAME "xlltemac"
+#define XTE_DEVICE_DESC "Xilinx Tri-speed 10/100/1000 MAC"
+
+/* LocalLink TYPE Enumerations */
+#define XPAR_LL_FIFO 1
+#define XPAR_LL_DMA 2
+
+/** @name Configuration options
+ *
+ * The following are device configuration options. See the
+ * <i>XLlTemac_SetOptions</i>, <i>XLlTemac_ClearOptions</i> and
+ * <i>XLlTemac_GetOptions</i> routines for information on how to use options.
+ *
+ * The default state of the options are also noted below.
+ *
+ * @{
+ */
+
+#define XTE_PROMISC_OPTION 0x00000001
+/**< XTE_PROMISC_OPTION specifies the TEMAC channel to accept all incoming
+ * packets.
+ * This driver sets this option to disabled (cleared) by default. */
+
+#define XTE_JUMBO_OPTION 0x00000002
+/**< XTE_JUMBO_OPTION specifies the TEMAC channel to accept jumbo frames
+ * for transmit and receive.
+ * This driver sets this option to disabled (cleared) by default. */
+
+#define XTE_VLAN_OPTION 0x00000004
+/**< XTE_VLAN_OPTION specifies the TEMAC channel to enable VLAN support for
+ * transmit and receive.
+ * This driver sets this option to disabled (cleared) by default. */
+
+#define XTE_FLOW_CONTROL_OPTION 0x00000008
+/**< XTE_FLOW_CONTROL_OPTION specifies the TEMAC channel to recognize
+ * received flow control frames.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_FCS_STRIP_OPTION 0x00000010
+/**< XTE_FCS_STRIP_OPTION specifies the TEMAC channel to strip FCS and PAD
+ * from received frames. Note that PAD from VLAN frames is not stripped.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_FCS_INSERT_OPTION 0x00000020
+/**< XTE_FCS_INSERT_OPTION specifies the TEMAC channel to generate the FCS
+ * field and add PAD automatically for outgoing frames.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_LENTYPE_ERR_OPTION 0x00000040
+/**< XTE_LENTYPE_ERR_OPTION specifies the TEMAC channel to enable
+ * Length/Type error checking (mismatched type/length field) for received
+ * frames.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_TRANSMITTER_ENABLE_OPTION 0x00000080
+/**< XTE_TRANSMITTER_ENABLE_OPTION specifies the TEMAC channel transmitter
+ * to be enabled.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_RECEIVER_ENABLE_OPTION 0x00000100
+/**< XTE_RECEIVER_ENABLE_OPTION specifies the TEMAC channel receiver to be
+ * enabled.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_BROADCAST_OPTION 0x00000200
+/**< XTE_BROADCAST_OPTION specifies the TEMAC channel to receive frames
+ * sent to the broadcast Ethernet address.
+ * This driver sets this option to enabled (set) by default. */
+
+#define XTE_MULTICAST_OPTION 0x00000400
+/**< XTE_MULTICAST_OPTION specifies the TEMAC channel to receive frames
+ * sent to Ethernet addresses that are programmed into the Multicast Address
+ * Table (MAT).
+ * This driver sets this option to disabled (cleared) by default. */
+
+#define XTE_DEFAULT_OPTIONS \
+ (XTE_FLOW_CONTROL_OPTION | \
+ XTE_BROADCAST_OPTION | \
+ XTE_FCS_INSERT_OPTION | \
+ XTE_FCS_STRIP_OPTION | \
+ XTE_LENTYPE_ERR_OPTION | \
+ XTE_TRANSMITTER_ENABLE_OPTION | \
+ XTE_RECEIVER_ENABLE_OPTION)
+/**< XTE_DEFAULT_OPTIONS specify the options set in XLlTemac_Reset() and
+ * XLlTemac_CfgInitialize() */
+
+/*@}*/
+
+/** @name Reset parameters
+ *
+ * These are used by function XLlTemac_Reset().
+ * @{
+ */
+#define XTE_RESET_HARD 1
+#define XTE_NORESET_HARD 0
+/*@}*/
+
+#define XTE_MULTI_MAT_ENTRIES 4 /* Number of storable addresses in
+ the Multicast Address Table */
+
+#define XTE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
+
+/* The next few constants help upper layers determine the size of memory
+ * pools used for Ethernet buffers and descriptor lists.
+ */
+#define XTE_MAC_ADDR_SIZE 6 /* MAC addresses are 6 bytes */
+#define XTE_MTU 1500 /* max MTU size of an Ethernet frame */
+#define XTE_JUMBO_MTU 8982 /* max MTU size of a jumbo Ethernet frame */
+#define XTE_HDR_SIZE 14 /* size of an Ethernet header */
+#define XTE_HDR_VLAN_SIZE 18 /* size of an Ethernet header with VLAN */
+#define XTE_TRL_SIZE 4 /* size of an Ethernet trailer (FCS) */
+#define XTE_MAX_FRAME_SIZE (XTE_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE)
+#define XTE_MAX_VLAN_FRAME_SIZE (XTE_MTU + XTE_HDR_VLAN_SIZE + XTE_TRL_SIZE)
+#define XTE_MAX_JUMBO_FRAME_SIZE (XTE_JUMBO_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE)
+
+/* Constant values returned by XLlTemac_mGetPhysicalInterface(). Note that these
+ * values match design parameters from the PLB_TEMAC spec
+ */
+#define XTE_PHY_TYPE_MII 0
+#define XTE_PHY_TYPE_GMII 1
+#define XTE_PHY_TYPE_RGMII_1_3 2
+#define XTE_PHY_TYPE_RGMII_2_0 3
+#define XTE_PHY_TYPE_SGMII 4
+#define XTE_PHY_TYPE_1000BASE_X 5
+
+/**************************** Type Definitions *******************************/
+
+
+/**
+ * This typedef contains configuration information for a TEMAC channel.
+ * Each channel is treated as a separate device from the point of view of this
+ * driver.
+ */
+typedef struct {
+ /** u16 DeviceId; < DeviceId is the unique ID of the device */
+ u32 BaseAddress;/**< BaseAddress is the physical base address of the
+ * channel's registers
+ */
+ u8 TxCsum; /**< TxCsum indicates that the channel has checksum
+ * offload on the Tx channel or not.
+ */
+ u8 RxCsum; /**< RxCsum indicates that the channel has checksum
+ * offload on the Rx channel or not.
+ */
+ u8 PhyType; /**< PhyType indicates which type of PHY interface is
+ * used (MII, GMII, RGMII, ect.
+ */
+ u8 TemacIntr; /**< TEMAC interrupt ID */
+
+ int LLDevType; /**< LLDevType is the type of device attached to the
+ * temac's local link interface.
+ */
+ u32 LLDevBaseAddress; /**< LLDevBaseAddress is the base address of then
+ * device attached to the temac's local link
+ * interface.
+ */
+ u8 LLFifoIntr; /**< LL FIFO interrupt ID (unused if DMA) */
+ u8 LLDmaRxIntr; /**< LL DMA RX interrupt ID (unused if FIFO) */
+ u8 LLDmaTxIntr; /**< LL DMA TX interrupt ID (unused if FIFO) */
+
+} XLlTemac_Config;
+
+
+/**
+ * struct XLlTemac is the type for TEMAC driver instance data. The calling code
+ * is required to use a unique instance of this structure for every TEMAC
+ * channel used in the system. Each channel is treated as a separate device
+ * from the point of view of this driver. A reference to a structure of this
+ * type is then passed to the driver API functions.
+ */
+typedef struct XLlTemac {
+ XLlTemac_Config Config; /* hardware configuration */
+ u32 IsStarted; /* Device is currently started */
+ u32 IsReady; /* Device is initialized and ready */
+ u32 Options; /* Current options word */
+ u32 Flags; /* Internal driver flags */
+} XLlTemac;
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*****************************************************************************/
+/**
+ *
+ * XLlTemac_IsStarted reports if the device is in the started or stopped state. To
+ * be in the started state, the calling code must have made a successful call to
+ * <i>XLlTemac_Start</i>. To be in the stopped state, <i>XLlTemac_Stop</i> or
+ * <i>XLlTemac_CfgInitialize</i> function must have been called.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_IsStarted returns TRUE if the device has been started.
+ * Otherwise, XLlTemac_IsStarted returns FALSE.
+ *
+ * @note
+ *
+ * Signature: u32 XLlTemac_IsStarted(XLlTemac *InstancePtr)
+ *
+ ******************************************************************************/
+#define XLlTemac_IsStarted(InstancePtr) \
+ (((InstancePtr)->IsStarted == XCOMPONENT_IS_STARTED) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* XLlTemac_IsDma reports if the device is currently connected to DMA.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @return XLlTemac_IsDma returns TRUE if the device is connected DMA. Otherwise,
+* XLlTemac_IsDma returns FALSE.
+*
+* @note
+*
+* Signature: u32 XLlTemac_IsDma(XLlTemac *InstancePtr)
+*
+******************************************************************************/
+#define XLlTemac_IsDma(InstancePtr) \
+ (((InstancePtr)->Config.LLDevType == XPAR_LL_DMA) ? TRUE: FALSE)
+
+/*****************************************************************************/
+/**
+*
+* XLlTemac_IsFifo reports if the device is currently connected to a fifo core.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @return XLlTemac_IsFifo returns TRUE if the device is connected to a fifo core.
+* Otherwise, XLlTemac_IsFifo returns FALSE.
+*
+* @note
+*
+* Signature: u32 XLlTemac_IsFifo(XLlTemac *InstancePtr)
+*
+******************************************************************************/
+#define XLlTemac_IsFifo(InstancePtr) \
+ (((InstancePtr)->Config.LLDevType == XPAR_LL_FIFO) ? TRUE: FALSE)
+
+/*****************************************************************************/
+/**
+*
+* XLlTemac_LlDevBaseAddress reports the base address of the core connected to
+* the TEMAC's local link interface.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @return XLlTemac_IsFifo returns the base address of the core connected to
+* the TEMAC's local link interface.
+*
+* @note
+*
+* Signature: u32 XLlTemac_LlDevBaseAddress(XLlTemac *InstancePtr)
+*
+******************************************************************************/
+#define XLlTemac_LlDevBaseAddress(InstancePtr) \
+ ((InstancePtr)->Config.LLDevBaseAddress)
+
+/*****************************************************************************/
+/**
+ *
+ * XLlTemac_IsRecvFrameDropped determines if the device thinks it has dropped a
+ * receive frame.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_IsRecvFrameDropped returns TRUE if the device interrupt
+ * status register reports that a frame has been dropped. Otherwise,
+ * XLlTemac_IsRecvFrameDropped returns FALSE.
+ *
+ * @note
+ *
+ * Signature: u32 XLlTemac_IsRecvFrameDropped(XLlTemac *InstancePtr)
+ *
+ ******************************************************************************/
+#define XLlTemac_IsRecvFrameDropped(InstancePtr) \
+ ((XLlTemac_ReadReg((InstancePtr)->Config.BaseAddress, XTE_IS_OFFSET) \
+ & XTE_INT_RXRJECT_MASK) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+ *
+ * XLlTemac_IsRxCsum determines if the device is configured with checksum
+ * offloading on the receive channel.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_IsRxCsum returns TRUE if the device is configured with
+ * checksum offloading on the receive channel. Otherwise,
+ * XLlTemac_IsRxCsum returns FALSE.
+ *
+ * @note
+ *
+ * Signature: u32 XLlTemac_IsRxCsum(XLlTemac *InstancePtr)
+ *
+ ******************************************************************************/
+#define XLlTemac_IsRxCsum(InstancePtr) (((InstancePtr)->Config.RxCsum) ? \
+ TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+ *
+ * XLlTemac_IsTxCsum determines if the device is configured with checksum
+ * offloading on the transmit channel.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_IsTxCsum returns TRUE if the device is configured with
+ * checksum offloading on the transmit channel. Otherwise,
+ * XLlTemac_IsTxCsum returns FALSE.
+ *
+ * @note
+ *
+ * Signature: u32 XLlTemac_IsTxCsum(XLlTemac *InstancePtr)
+ *
+ ******************************************************************************/
+#define XLlTemac_IsTxCsum(InstancePtr) (((InstancePtr)->Config.TxCsum) ? \
+ TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+ *
+ * XLlTemac_GetPhysicalInterface returns the type of PHY interface being used by
+ * the given instance, specified by <i>InstancePtr</i>.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XLlTemac_GetPhysicalInterface returns one of XTE_PHY_TYPE_<x> where
+ * <x> is MII, GMII, RGMII_1_3, RGMII_2_0, SGMII, or 1000BASE_X (defined in
+ * xlltemac.h).
+ *
+ * @note
+ *
+ * Signature: int XLlTemac_GetPhysicalInterface(XLlTemac *InstancePtr)
+ *
+ ******************************************************************************/
+#define XLlTemac_GetPhysicalInterface(InstancePtr) \
+ ((InstancePtr)->Config.PhyType)
+
+/****************************************************************************/
+/**
+*
+* XLlTemac_Status returns a bit mask of the interrupt status register (ISR).
+* XLlTemac_Status can be used to query the status without having to have
+* interrupts enabled.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @return XLlTemac_IntStatus returns a bit mask of the status conditions.
+* The mask will be a set of bitwise or'd values from the
+* <code>XTE_INT_*_MASK</code> preprocessor symbols.
+*
+* @note
+* C-style signature:
+* u32 XLlTemac_IntStatus(XLlTemac *InstancePtr)
+*
+*****************************************************************************/
+#define XLlTemac_Status(InstancePtr) \
+ XLlTemac_ReadReg((InstancePtr)->Config.BaseAddress, XTE_IS_OFFSET)
+
+/****************************************************************************/
+/**
+*
+* XLlTemac_IntEnable enables the interrupts specified in <i>Mask</i>. The
+* corresponding interrupt for each bit set to 1 in <i>Mask</i>, will be
+* enabled.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @param Mask contains a bit mask of the interrupts to enable. The mask
+* can be formed using a set of bitwise or'd values from the
+* <code>XTE_INT_*_MASK</code> preprocessor symbols.
+*
+* @return N/A
+*
+* @note
+* C-style signature:
+* void XLlTemac_IntEnable(XLlTemac *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XLlTemac_IntEnable(InstancePtr, Mask) \
+ XLlTemac_WriteReg((InstancePtr)->Config.BaseAddress, XTE_IE_OFFSET, \
+ XLlTemac_ReadReg((InstancePtr)->Config.BaseAddress, \
+ XTE_IE_OFFSET) | ((Mask) & XTE_INT_ALL_MASK)); \
+
+/****************************************************************************/
+/**
+*
+* XLlTemac_IntDisable disables the interrupts specified in <i>Mask</i>. The
+* corresponding interrupt for each bit set to 1 in <i>Mask</i>, will be
+* disabled. In other words, XLlTemac_IntDisable uses the "set a bit to clear it"
+* scheme.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @param Mask contains a bit mask of the interrupts to disable. The mask
+* can be formed using a set of bitwise or'd values from the
+* <code>XTE_INT_*_MASK</code> preprocessor symbols.
+*
+* @return N/A
+*
+* @note
+* C-style signature:
+* void XLlTemac_IntDisable(XLlTemac *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XLlTemac_IntDisable(InstancePtr, Mask) \
+ XLlTemac_WriteReg((InstancePtr)->Config.BaseAddress, XTE_IE_OFFSET, \
+ XLlTemac_ReadReg((InstancePtr)->Config.BaseAddress, \
+ XTE_IE_OFFSET) & ~((Mask) & XTE_INT_ALL_MASK)); \
+
+/****************************************************************************/
+/**
+*
+* XLlTemac_IntPending returns a bit mask of the pending interrupts. Each bit
+* set to 1 in the return value represents a pending interrupt.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @return XLlTemac_IntPending returns a bit mask of the interrupts that are
+* pending. The mask will be a set of bitwise or'd values from the
+* <code>XTE_INT_*_MASK</code> preprocessor symbols.
+*
+* @note
+* C-style signature:
+* u32 XLlTemac_IntPending(XLlTemac *InstancePtr)
+*
+*****************************************************************************/
+#define XLlTemac_IntPending(InstancePtr) \
+ XLlTemac_ReadReg((InstancePtr)->Config.BaseAddress, XTE_IP_OFFSET)
+
+/****************************************************************************/
+/**
+*
+* XLlTemac_IntClear clears pending interrupts specified in <i>Mask</i>.
+* The corresponding pending interrupt for each bit set to 1 in <i>Mask</i>,
+* will be cleared. In other words, XLlTemac_IntClear uses the "set a bit to
+* clear it" scheme.
+*
+* @param InstancePtr references the TEMAC channel on which to operate.
+*
+* @param Mask contains a bit mask of the pending interrupts to clear. The
+* mask can be formed using a set of bitwise or'd values from the
+* <code>XTE_INT_*_MASK</code> preprocessor symbols.
+*
+* @note
+* C-style signature:
+* void XLlTemac_IntClear(XLlTemac *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XLlTemac_IntClear(InstancePtr, Mask) \
+ XLlTemac_WriteReg((InstancePtr)->Config.BaseAddress, XTE_IS_OFFSET, \
+ ((Mask) & XTE_INT_ALL_MASK))
+
+/************************** Function Prototypes ******************************/
+
+/*
+ * Initialization functions in xlltemac.c
+ */
+int XLlTemac_CfgInitialize(XLlTemac *InstancePtr, XLlTemac_Config *CfgPtr,
+ u32 VirtualAddress);
+void XLlTemac_Start(XLlTemac *InstancePtr);
+void XLlTemac_Stop(XLlTemac *InstancePtr);
+void XLlTemac_Reset(XLlTemac *InstancePtr, int HardCoreAction);
+
+/*
+ * Initialization functions in xlltemac_sinit.c
+ */
+XLlTemac_Config *XLlTemac_LookupConfig(u16 DeviceId);
+
+/*
+ * MAC configuration/control functions in xlltemac_control.c
+ */
+int XLlTemac_SetOptions(XLlTemac *InstancePtr, u32 Options);
+int XLlTemac_ClearOptions(XLlTemac *InstancePtr, u32 Options);
+u32 XLlTemac_GetOptions(XLlTemac *InstancePtr);
+
+int XLlTemac_SetMacAddress(XLlTemac *InstancePtr, void *AddressPtr);
+void XLlTemac_GetMacAddress(XLlTemac *InstancePtr, void *AddressPtr);
+
+int XLlTemac_SetMacPauseAddress(XLlTemac *InstancePtr, void *AddressPtr);
+void XLlTemac_GetMacPauseAddress(XLlTemac *InstancePtr, void *AddressPtr);
+int XLlTemac_SendPausePacket(XLlTemac *InstancePtr, u16 PauseValue);
+
+int XLlTemac_GetSgmiiStatus(XLlTemac *InstancePtr, u16 *SpeedPtr);
+int XLlTemac_GetRgmiiStatus(XLlTemac *InstancePtr, u16 *SpeedPtr,
+ int *IsFullDuplexPtr, int *IsLinkUpPtr);
+u16 XLlTemac_GetOperatingSpeed(XLlTemac *InstancePtr);
+void XLlTemac_SetOperatingSpeed(XLlTemac *InstancePtr, u16 Speed);
+
+void XLlTemac_PhySetMdioDivisor(XLlTemac *InstancePtr, u8 Divisor);
+void XLlTemac_PhyRead(XLlTemac *InstancePtr, u32 PhyAddress, u32 RegisterNum,
+ u16 *PhyDataPtr);
+void XLlTemac_PhyWrite(XLlTemac *InstancePtr, u32 PhyAddress, u32 RegisterNum,
+ u16 PhyData);
+int XLlTemac_MulticastAdd(XLlTemac *InstancePtr, void *AddressPtr, int Entry);
+void XLlTemac_MulticastGet(XLlTemac *InstancePtr, void *AddressPtr, int Entry);
+int XLlTemac_MulticastClear(XLlTemac *InstancePtr, int Entry);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file xlltemac_control.c
+ *
+ * Functions in this file implement general purpose command and control related
+ * functionality. See xlltemac.h for a detailed description of the driver.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 11/10/06 First release
+ * </pre>
+ *****************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xlltemac.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+ * in the TEMAC channel's multicast filter list.
+ *
+ * XLlTemac_MulticastAdd adds the Ethernet address, <i>AddressPtr</i> to the
+ * TEMAC channel's multicast filter list, at list index <i>Entry</i>. The
+ * address referenced by <i>AddressPtr</i> may be of any unicast, multicast, or
+ * broadcast address form. The harware for the TEMAC channel can hold up to
+ * XTE_MULTI_MAT_ENTRIES addresses in this filter list.<br><br>
+ *
+ * The device must be stopped to use this function.<br><br>
+ *
+ * Once an Ethernet address is programmed, the TEMAC channel will begin
+ * receiving data sent from that address. The TEMAC hardware does not have a
+ * control bit to disable multicast filtering. The only way to prevent the
+ * TEMAC channel from receiving messages from an Ethernet address in the
+ * Multicast Address Table (MAT) is to clear it with XLlTemac_MulticastClear().
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param AddressPtr is a pointer to the 6-byte Ethernet address to set. The
+ * previous address at the location <i>Entry</i> (if any) is overwritten
+ * with the value at <i>AddressPtr</i>.
+ * @param Entry is the hardware storage location to program this address and
+ * must be between 0..XTE_MULTI_MAT_ENTRIES-1.
+ *
+ * @return On successful completion, XLlTemac_MulticastAdd returns XST_SUCCESS.
+ * Otherwise, if the TEMAC channel is not stopped, XLlTemac_MulticastAdd
+ * returns XST_DEVICE_IS_STARTED.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_MulticastAdd(XLlTemac *InstancePtr, void *AddressPtr, int Entry)
+{
+ u32 Maw0Reg;
+ u32 Maw1Reg;
+ u8 *Aptr = (u8 *) AddressPtr;
+ u32 Rdy;
+ int MaxWait = 100;
+ u32 BaseAddress = InstancePtr->Config.BaseAddress;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(AddressPtr != NULL);
+ XASSERT_NONVOID(Entry < XTE_MULTI_MAT_ENTRIES);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_MulticastAdd\n");
+
+ /* The device must be stopped before clearing the multicast hash table */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_MulticastAdd: returning DEVICE_IS_STARTED\n");
+
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Set MAC bits [31:0] */
+ Maw0Reg = Aptr[0];
+ Maw0Reg |= Aptr[1] << 8;
+ Maw0Reg |= Aptr[2] << 16;
+ Maw0Reg |= Aptr[3] << 24;
+
+ /* Set MAC bits [47:32] */
+ Maw1Reg = Aptr[4];
+ Maw1Reg |= Aptr[5] << 8;
+
+ /* Add in MAT address */
+ Maw1Reg |= (Entry << XTE_MAW1_MATADDR_SHIFT_MASK);
+
+ /* Program HW */
+ xdbg_printf(XDBG_DEBUG_GENERAL, "Setting MAT entry: %d\n", Entry);
+ XLlTemac_WriteReg(BaseAddress, XTE_LSW_OFFSET, Maw0Reg);
+ XLlTemac_WriteReg(BaseAddress, XTE_CTL_OFFSET,
+ XTE_MAW0_OFFSET | XTE_CTL_WEN_MASK);
+ Rdy = XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET);
+ while (MaxWait && (!(Rdy & XTE_RDY_HARD_ACS_RDY_MASK))) {
+ Rdy = XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET);
+ xdbg_stmnt(
+ if (MaxWait == 100) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "RDY reg not initially ready\n");
+ }
+ );
+ MaxWait--;
+ xdbg_stmnt(
+ if (MaxWait == 0) {
+ xdbg_printf (XDBG_DEBUG_GENERAL,
+ "RDY reg never showed ready\n");
+ }
+ )
+ }
+ XLlTemac_WriteReg(BaseAddress, XTE_LSW_OFFSET,
+ Maw1Reg);
+ XLlTemac_WriteReg(BaseAddress, XTE_CTL_OFFSET,
+ XTE_MAW1_OFFSET | XTE_CTL_WEN_MASK);
+ Rdy = XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET);
+ while (MaxWait && (!(Rdy & XTE_RDY_HARD_ACS_RDY_MASK))) {
+ Rdy = XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET);
+ xdbg_stmnt(
+ if (MaxWait == 100) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "RDY reg not initially ready\n");
+ }
+ );
+ MaxWait--;
+ xdbg_stmnt(
+ if (MaxWait == 0) {
+ xdbg_printf (XDBG_DEBUG_GENERAL,
+ "RDY reg never showed ready\n");
+ }
+ )
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_MulticastAdd: returning SUCCESS\n");
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_MulticastGet gets the Ethernet address stored at index <i>Entry</i>
+ * in the TEMAC channel's multicast filter list.<br><br>
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param AddressPtr references the memory buffer to store the retrieved
+ * Ethernet address. This memory buffer must be at least 6 bytes in
+ * length.
+ * @param Entry is the hardware storage location from which to retrieve the
+ * address and must be between 0..XTE_MULTI_MAT_ENTRIES-1.
+ *
+ * @return N/A
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_MulticastGet(XLlTemac *InstancePtr, void *AddressPtr, int Entry)
+{
+ u32 Maw0Reg;
+ u32 Maw1Reg;
+ u8 *Aptr = (u8 *) AddressPtr;
+ u32 Rdy;
+ int MaxWait = 100;
+ u32 BaseAddress = InstancePtr->Config.BaseAddress;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID(Entry < XTE_MULTI_MAT_ENTRIES);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET) &
+ XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_MulticastGet\n");
+
+ /*
+ * Tell HW to provide address stored in given entry.
+ * In this case, the Access is a little weird, becuase we need to
+ * write the LSW register first, then initiate a write operation,
+ * even though it's a read operation.
+ */
+ xdbg_printf(XDBG_DEBUG_GENERAL, "Getting MAT entry: %d\n", Entry);
+ XLlTemac_WriteReg(BaseAddress, XTE_LSW_OFFSET,
+ Entry << XTE_MAW1_MATADDR_SHIFT_MASK | XTE_MAW1_RNW_MASK);
+ XLlTemac_WriteReg(BaseAddress, XTE_CTL_OFFSET,
+ XTE_MAW1_OFFSET | XTE_CTL_WEN_MASK);
+ Rdy = XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET);
+ while (MaxWait && (!(Rdy & XTE_RDY_HARD_ACS_RDY_MASK))) {
+ Rdy = XLlTemac_ReadReg(BaseAddress, XTE_RDY_OFFSET);
+ xdbg_stmnt(
+ if (MaxWait == 100) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "RDY reg not initially ready\n");
+ }
+ );
+ MaxWait--;
+ xdbg_stmnt(
+ if (MaxWait == 0) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "RDY reg never showed ready\n");
+ }
+ )
+
+ }
+ Maw0Reg = XLlTemac_ReadReg(BaseAddress, XTE_LSW_OFFSET);
+ Maw1Reg = XLlTemac_ReadReg(BaseAddress, XTE_MSW_OFFSET);
+
+ /* Copy the address to the user buffer */
+ Aptr[0] = (u8) Maw0Reg;
+ Aptr[1] = (u8) (Maw0Reg >> 8);
+ Aptr[2] = (u8) (Maw0Reg >> 16);
+ Aptr[3] = (u8) (Maw0Reg >> 24);
+ Aptr[4] = (u8) Maw1Reg;
+ Aptr[5] = (u8) (Maw1Reg >> 8);
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_MulticastGet: done\n");
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_MulticastClear clears the Ethernet address stored at index <i>Entry</i>
+ * in the TEMAC channel's multicast filter list.<br><br>
+ *
+ * The device must be stopped to use this function.<br><br>
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param Entry is the HW storage location used when this address was added.
+ * It must be between 0..XTE_MULTI_MAT_ENTRIES-1.
+ * @param Entry is the hardware storage location to clear and must be between
+ * 0..XTE_MULTI_MAT_ENTRIES-1.
+ *
+ * @return On successful completion, XLlTemac_MulticastClear returns XST_SUCCESS.
+ * Otherwise, if the TEMAC channel is not stopped, XLlTemac_MulticastClear
+ * returns XST_DEVICE_IS_STARTED.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_MulticastClear(XLlTemac *InstancePtr, int Entry)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(Entry < XTE_MULTI_MAT_ENTRIES);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_MulticastClear\n");
+
+ /* The device must be stopped before clearing the multicast hash table */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_MulticastClear: returning DEVICE_IS_STARTED\n");
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Clear the entry by writing 0:0:0:0:0:0 to it */
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_MAW0_OFFSET, 0);
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_MAW1_OFFSET, Entry << XTE_MAW1_MATADDR_SHIFT_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_MulticastClear: returning SUCCESS\n");
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_SetMacPauseAddress sets the MAC address used for pause frames to
+ * <i>AddressPtr</i>. <i>AddressPtr</i> will be the address the TEMAC channel
+ * will recognize as being for pause frames. Pause frames transmitted with
+ * XLlTemac_SendPausePacket() will also use this address.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param AddressPtr is a pointer to the 6-byte Ethernet address to set.
+ *
+ * @return On successful completion, XLlTemac_SetMacPauseAddress returns
+ * XST_SUCCESS. Otherwise, if the TEMAC channel is not stopped,
+ * XLlTemac_SetMacPauseAddress returns XST_DEVICE_IS_STARTED.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_SetMacPauseAddress(XLlTemac *InstancePtr, void *AddressPtr)
+{
+ u32 MacAddr;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetMacPauseAddress\n");
+ /* Be sure device has been stopped */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SetMacPauseAddress: returning DEVICE_IS_STARTED\n");
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Set the MAC bits [31:0] in ERXC0 */
+ MacAddr = Aptr[0];
+ MacAddr |= Aptr[1] << 8;
+ MacAddr |= Aptr[2] << 16;
+ MacAddr |= Aptr[3] << 24;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW0_OFFSET, MacAddr);
+
+ /* ERCW1 contains other info that must be preserved */
+ MacAddr = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ MacAddr &= ~XTE_RCW1_PAUSEADDR_MASK;
+
+ /* Set MAC bits [47:32] */
+ MacAddr |= Aptr[4];
+ MacAddr |= Aptr[5] << 8;
+ XLlTemac_WriteIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET, MacAddr);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SetMacPauseAddress: returning SUCCESS\n");
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_GetMacPauseAddress gets the MAC address used for pause frames for the
+ * TEMAC channel specified by <i>InstancePtr</i>.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param AddressPtr references the memory buffer to store the retrieved MAC
+ * address. This memory buffer must be at least 6 bytes in length.
+ *
+ * @return N/A
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+void XLlTemac_GetMacPauseAddress(XLlTemac *InstancePtr, void *AddressPtr)
+{
+ u32 MacAddr;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_VOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetMacPauseAddress\n");
+
+ /* Read MAC bits [31:0] in ERXC0 */
+ MacAddr = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW0_OFFSET);
+ Aptr[0] = (u8) MacAddr;
+ Aptr[1] = (u8) (MacAddr >> 8);
+ Aptr[2] = (u8) (MacAddr >> 16);
+ Aptr[3] = (u8) (MacAddr >> 24);
+
+ /* Read MAC bits [47:32] in RCW1 */
+ MacAddr = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_RCW1_OFFSET);
+ Aptr[4] = (u8) MacAddr;
+ Aptr[5] = (u8) (MacAddr >> 8);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetMacPauseAddress: done\n");
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_SendPausePacket sends a pause packet with the value of
+ * <i>PauseValue</i>.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param PauseValue is the pause value in units of 512 bit times.
+ *
+ * @return On successful completion, XLlTemac_SendPausePacket returns
+ * XST_SUCCESS. Otherwise, if the TEMAC channel is not started,
+ * XLlTemac_SendPausePacket returns XST_DEVICE_IS_STOPPED.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_SendPausePacket(XLlTemac *InstancePtr, u16 PauseValue)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_SetMacPauseAddress\n");
+
+ /* Make sure device is ready for this operation */
+ if (InstancePtr->IsStarted != XCOMPONENT_IS_STARTED) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SendPausePacket: returning DEVICE_IS_STOPPED\n");
+ return (XST_DEVICE_IS_STOPPED);
+ }
+
+ /* Send flow control frame */
+ XLlTemac_WriteReg(InstancePtr->Config.BaseAddress, XTE_TPF_OFFSET,
+ (u32) PauseValue & XTE_TPF_TPFV_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_SendPausePacket: returning SUCCESS\n");
+ return (XST_SUCCESS);
+}
+
+/*****************************************************************************/
+/**
+ * XLlTemac_GetSgmiiStatus get the state of the link when using the SGMII media
+ * interface.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param SpeedPtr references the location to store the result, which is the
+ * autonegotiated link speed in units of Mbits/sec, either 0, 10, 100,
+ * or 1000.
+ *
+ * @return On successful completion, XLlTemac_GetSgmiiStatus returns XST_SUCCESS.
+ * Otherwise, if TEMAC channel is not using an SGMII interface,
+ * XLlTemac_GetSgmiiStatus returns XST_NO_FEATURE.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_GetSgmiiStatus(XLlTemac *InstancePtr, u16 *SpeedPtr)
+{
+ int PhyType;
+ u32 EgmicReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_GetSgmiiStatus\n");
+ /* Make sure PHY is SGMII */
+ PhyType = XLlTemac_GetPhysicalInterface(InstancePtr);
+ if (PhyType != XTE_PHY_TYPE_SGMII) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetSgmiiStatus: returning NO_FEATURE\n");
+ return (XST_NO_FEATURE);
+ }
+
+ /* Get the current contents of RGMII/SGMII config register */
+ EgmicReg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_PHYC_OFFSET);
+
+ /* Extract speed */
+ switch (EgmicReg & XTE_PHYC_SGMIILINKSPEED_MASK) {
+ case XTE_PHYC_SGLINKSPD_10:
+ *SpeedPtr = 10;
+ break;
+
+ case XTE_PHYC_SGLINKSPD_100:
+ *SpeedPtr = 100;
+ break;
+
+ case XTE_PHYC_SGLINKSPD_1000:
+ *SpeedPtr = 1000;
+ break;
+
+ default:
+ *SpeedPtr = 0;
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetSgmiiStatus: returning SUCCESS\n");
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * XLlTemac_GetRgmiiStatus get the state of the link when using the RGMII media
+ * interface.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param SpeedPtr references the location to store the result, which is the
+ * autonegotiaged link speed in units of Mbits/sec, either 0, 10, 100,
+ * or 1000.
+ * @param IsFullDuplexPtr references the value to set to indicate full duplex
+ * operation. XLlTemac_GetRgmiiStatus sets <i>IsFullDuplexPtr</i> to TRUE
+ * when the RGMII link is operating in full duplex mode. Otherwise,
+ * XLlTemac_GetRgmiiStatus sets <i>IsFullDuplexPtr</i> to FALSE.
+ * @param IsLinkUpPtr references the value to set to indicate the link status.
+ * XLlTemac_GetRgmiiStatus sets <i>IsLinkUpPtr</i> to TRUE when the RGMII
+ * link up. Otherwise, XLlTemac_GetRgmiiStatus sets <i>IsLinkUpPtr</i> to
+ * FALSE.
+ *
+ * @return On successful completion, XLlTemac_GetRgmiiStatus returns XST_SUCCESS.
+ * Otherwise, if TEMAC channel is not using an RGMII interface,
+ * XLlTemac_GetRgmiiStatus returns XST_NO_FEATURE.
+ *
+ * @note
+ *
+ * This routine accesses the hard TEMAC registers through a shared interface
+ * between both channels of the TEMAC. Becuase of this, the application/OS code
+ * must provide mutual exclusive access to this routine with any of the other
+ * routines in this TEMAC driverr.
+ *
+ ******************************************************************************/
+int XLlTemac_GetRgmiiStatus(XLlTemac *InstancePtr, u16 *SpeedPtr,
+ int *IsFullDuplexPtr, int *IsLinkUpPtr)
+{
+ int PhyType;
+ u32 EgmicReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ /*
+ * If the mutual exclusion is enforced properly in the calling code, we
+ * should never get into the following case.
+ */
+ XASSERT_NONVOID(XLlTemac_ReadReg(InstancePtr->Config.BaseAddress,
+ XTE_RDY_OFFSET) & XTE_RDY_HARD_ACS_RDY_MASK);
+
+ xdbg_printf(XDBG_DEBUG_GENERAL, "XLlTemac_GetRgmiiStatus\n");
+ /* Make sure PHY is RGMII */
+ PhyType = XLlTemac_GetPhysicalInterface(InstancePtr);
+ if ((PhyType != XTE_PHY_TYPE_RGMII_1_3) &&
+ (PhyType != XTE_PHY_TYPE_RGMII_2_0)) {
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetRgmiiStatus: returning NO_FEATURE\n");
+ return (XST_NO_FEATURE);
+ }
+
+ /* Get the current contents of RGMII/SGMII config register */
+ EgmicReg = XLlTemac_ReadIndirectReg(InstancePtr->Config.BaseAddress,
+ XTE_PHYC_OFFSET);
+
+ /* Extract speed */
+ switch (EgmicReg & XTE_PHYC_RGMIILINKSPEED_MASK) {
+ case XTE_PHYC_RGLINKSPD_10:
+ *SpeedPtr = 10;
+ break;
+
+ case XTE_PHYC_RGLINKSPD_100:
+ *SpeedPtr = 100;
+ break;
+
+ case XTE_PHYC_RGLINKSPD_1000:
+ *SpeedPtr = 1000;
+ break;
+
+ default:
+ *SpeedPtr = 0;
+ }
+
+ /* Extract duplex and link status */
+ if (EgmicReg & XTE_PHYC_RGMIIHD_MASK) {
+ *IsFullDuplexPtr = FALSE;
+ }
+ else {
+ *IsFullDuplexPtr = TRUE;
+ }
+
+ if (EgmicReg & XTE_PHYC_RGMIILINK_MASK) {
+ *IsLinkUpPtr = TRUE;
+ }
+ else {
+ *IsLinkUpPtr = FALSE;
+ }
+
+ xdbg_printf(XDBG_DEBUG_GENERAL,
+ "XLlTemac_GetRgmiiStatus: returning SUCCESS\n");
+ return (XST_SUCCESS);
+}
+
--- /dev/null
+/* iId: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2004-2006 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+ *
+ * @file xlltemac_hw.h
+ *
+ * This header file contains identifiers and low-level driver functions (or
+ * macros) that can be used to access the Tri-Mode MAC Ethernet (TEMAC) device.
+ * High-level driver functions are defined in xlltemac.h.
+ *
+ * @note
+ *
+ * Some registers are not accessible when a HW instance is configured for SGDMA.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 11/10/06 First release
+ * </pre>
+ *
+ ******************************************************************************/
+
+#ifndef XTEMAC_HW_H /* prevent circular inclusions */
+#define XTEMAC_HW_H /* by using protection macros */
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xio.h"
+#include "xdebug.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/************************** Constant Definitions *****************************/
+
+#define XTE_RESET_HARD_DELAY_US 4 /**< Us to delay for hard core reset */
+
+/* Register offset definitions. Unless otherwise noted, register access is
+ * 32 bit.
+ */
+
+/** @name Direct registers
+ * @{
+ */
+#define XTE_RAF_OFFSET 0x00000000 /**< Reset and address filter */
+#define XTE_TPF_OFFSET 0x00000004 /**< Transmit pause frame */
+#define XTE_IFGP_OFFSET 0x00000008 /**< Transmit inter-frame gap adjustment */
+#define XTE_IS_OFFSET 0x0000000C /**< Interrupt status */
+#define XTE_IP_OFFSET 0x00000010 /**< Interrupt pending */
+#define XTE_IE_OFFSET 0x00000014 /**< Interrupt enable */
+
+#define XTE_MSW_OFFSET 0x00000020 /**< Most significant word data */
+#define XTE_LSW_OFFSET 0x00000024 /**< Least significant word data */
+#define XTE_CTL_OFFSET 0x00000028 /**< Control */
+#define XTE_RDY_OFFSET 0x0000002C /**< Ready status */
+/*@}*/
+
+
+/** @name HARD_TEMAC Core Registers
+ * These are registers defined within the device's hard core located in the
+ * processor block. They are accessed indirectly through the registers, MSW,
+ * LSW, and CTL.
+ *
+ * Access to these registers should go through macros XLlTemac_ReadIndirectReg()
+ * and XLlTemac_WriteIndirectReg() to guarantee proper access.
+ * @{
+ */
+#define XTE_RCW0_OFFSET 0x00000200 /**< Rx configuration word 0 */
+#define XTE_RCW1_OFFSET 0x00000240 /**< Rx configuration word 1 */
+#define XTE_TC_OFFSET 0x00000280 /**< Tx configuration */
+#define XTE_FCC_OFFSET 0x000002C0 /**< Flow control configuration */
+#define XTE_EMMC_OFFSET 0x00000300 /**< EMAC mode configuration */
+#define XTE_PHYC_OFFSET 0x00000320 /**< RGMII/SGMII configuration */
+#define XTE_MC_OFFSET 0x00000340 /**< Management configuration */
+#define XTE_UAW0_OFFSET 0x00000380 /**< Unicast address word 0 */
+#define XTE_UAW1_OFFSET 0x00000384 /**< Unicast address word 1 */
+#define XTE_MAW0_OFFSET 0x00000388 /**< Multicast address word 0 */
+#define XTE_MAW1_OFFSET 0x0000038C /**< Multicast address word 1 */
+#define XTE_AFM_OFFSET 0x00000390 /**< Address Filter (promiscuous) mode */
+#define XTE_TIS_OFFSET 0x000003A0 /**< Interrupt status */
+#define XTE_TIE_OFFSET 0x000003A4 /**< Interrupt enable */
+#define XTE_MIIMWD_OFFSET 0x000003B0 /**< MII management write data */
+#define XTE_MIIMAI_OFFSET 0x000003B4 /**< MII management access initiate */
+/*@}*/
+
+
+/* Register masks. The following constants define bit locations of various
+ * control bits in the registers. Constants are not defined for those registers
+ * that have a single bit field representing all 32 bits. For further
+ * information on the meaning of the various bit masks, refer to the HW spec.
+ */
+
+/** @name Reset and Address Filter bits
+ * These bits are associated with the XTE_RAF_OFFSET register.
+ * @{
+ */
+#define XTE_RAF_HTRST_MASK 0x00000001 /**< Hard TEMAC Reset */
+#define XTE_RAF_MCSTREJ_MASK 0x00000002 /**< Reject receive multicast destination address */
+#define XTE_RAF_BCSTREJ_MASK 0x00000004 /**< Reject receive broadcast destination address */
+/*@}*/
+
+/** @name Transmit Pause Frame Register (TPF)
+ * @{
+ */
+#define XTE_TPF_TPFV_MASK 0x0000FFFF /**< Tx pause frame value */
+/*@}*/
+
+/** @name Transmit Inter-Frame Gap Adjustement Register (TFGP)
+ * @{
+ */
+#define XTE_TFGP_IFGP_MASK 0x0000007F /**< Transmit inter-frame gap adjustment value */
+/*@}*/
+
+/** @name Interrupt bits
+ * These bits are associated with the XTE_IS_OFFSET, XTE_IP_OFFSET, and
+ * XTE_IE_OFFSET registers.
+ * @{
+ */
+#define XTE_INT_HARDACSCMPLT_MASK 0x00000001 /**< Hard register access complete */
+#define XTE_INT_AUTONEG_MASK 0x00000002 /**< Auto negotiation complete */
+#define XTE_INT_RC_MASK 0x00000004 /**< Receive complete */
+#define XTE_INT_RXRJECT_MASK 0x00000008 /**< Receive frame rejected */
+#define XTE_INT_RXFIFOOVR_MASK 0x00000010 /**< Receive fifo overrun */
+#define XTE_INT_TC_MASK 0x00000020 /**< Transmit complete */
+#define XTE_INT_ALL_MASK 0x0000003f /**< All the ints */
+/*@}*/
+
+
+#define XTE_INT_RECV_ERROR_MASK \
+ (XTE_INT_RXRJECT_MASK | XTE_INT_RXFIFOOVR_MASK) /**< INT bits that indicate receive errors */
+/*@}*/
+
+
+/** @name Control Register (CTL)
+ * @{
+ */
+#define XTE_CTL_WEN_MASK 0x00008000 /**< Write Enable */
+/*@}*/
+
+
+/** @name Ready Status, TEMAC Interrupt Status, TEMAC Interrupt Enable Registers
+ * (RDY, TIS, TIE)
+ * @{
+ */
+#define XTE_RSE_FABR_RR_MASK 0x00000001 /**< Fabric read ready */
+#define XTE_RSE_MIIM_RR_MASK 0x00000002 /**< MII management read ready */
+#define XTE_RSE_MIIM_WR_MASK 0x00000004 /**< MII management write ready */
+#define XTE_RSE_AF_RR_MASK 0x00000008 /**< Address filter read ready*/
+#define XTE_RSE_AF_WR_MASK 0x00000010 /**< Address filter write ready*/
+#define XTE_RSE_CFG_RR_MASK 0x00000020 /**< Configuration register read ready*/
+#define XTE_RSE_CFG_WR_MASK 0x00000040 /**< Configuration register write ready*/
+#define XTE_RDY_HARD_ACS_RDY_MASK 0x00010000 /**< Hard register access ready */
+#define XTE_RDY_ALL (XTE_RSE_FABR_RR_MASK | \
+ XTE_RSE_MIIM_RR_MASK | \
+ XTE_RSE_MIIM_WR_MASK | \
+ XTE_RSE_AF_RR_MASK | \
+ XTE_RSE_AF_WR_MASK | \
+ XTE_RSE_CFG_RR_MASK | \
+ XTE_RSE_CFG_WR_MASK | \
+ XTE_RDY_HARD_ACS_RDY_MASK)
+/*@}*/
+
+
+/** @name Receive Configuration Word 1 (RCW1)
+ * @{
+ */
+#define XTE_RCW1_RST_MASK 0x80000000 /**< Reset */
+#define XTE_RCW1_JUM_MASK 0x40000000 /**< Jumbo frame enable */
+#define XTE_RCW1_FCS_MASK 0x20000000 /**< In-Band FCS enable (FCS not stripped) */
+#define XTE_RCW1_RX_MASK 0x10000000 /**< Receiver enable */
+#define XTE_RCW1_VLAN_MASK 0x08000000 /**< VLAN frame enable */
+#define XTE_RCW1_HD_MASK 0x04000000 /**< Half duplex mode */
+#define XTE_RCW1_LT_DIS_MASK 0x02000000 /**< Length/type field valid check disable */
+#define XTE_RCW1_PAUSEADDR_MASK 0x0000FFFF /**< Pause frame source address
+ bits [47:32]. Bits [31:0]
+ are stored in register
+ RCW0 */
+/*@}*/
+
+
+/** @name Transmitter Configuration (TC)
+ * @{
+ */
+#define XTE_TC_RST_MASK 0x80000000 /**< reset */
+#define XTE_TC_JUM_MASK 0x40000000 /**< Jumbo frame enable */
+#define XTE_TC_FCS_MASK 0x20000000 /**< In-Band FCS enable (FCS not generated) */
+#define XTE_TC_TX_MASK 0x10000000 /**< Transmitter enable */
+#define XTE_TC_VLAN_MASK 0x08000000 /**< VLAN frame enable */
+#define XTE_TC_HD_MASK 0x04000000 /**< Half duplex mode */
+#define XTE_TC_IFG_MASK 0x02000000 /**< Inter-frame gap adjustment enable */
+/*@}*/
+
+
+/** @name Flow Control Configuration (FCC)
+ * @{
+ */
+#define XTE_FCC_FCRX_MASK 0x20000000 /**< Rx flow control enable */
+#define XTE_FCC_FCTX_MASK 0x40000000 /**< Tx flow control enable */
+/*@}*/
+
+
+/** @name EMAC Configuration (EMMC)
+ * @{
+ */
+#define XTE_EMMC_LINKSPEED_MASK 0xC0000000 /**< Link speed */
+#define XTE_EMMC_RGMII_MASK 0x20000000 /**< RGMII mode enable */
+#define XTE_EMMC_SGMII_MASK 0x10000000 /**< SGMII mode enable */
+#define XTE_EMMC_GPCS_MASK 0x08000000 /**< 1000BaseX mode enable */
+#define XTE_EMMC_HOST_MASK 0x04000000 /**< Host interface enable */
+#define XTE_EMMC_TX16BIT 0x02000000 /**< 16 bit Tx client enable */
+#define XTE_EMMC_RX16BIT 0x01000000 /**< 16 bit Rx client enable */
+
+#define XTE_EMMC_LINKSPD_10 0x00000000 /**< XTE_EMCFG_LINKSPD_MASK for
+ 10 Mbit */
+#define XTE_EMMC_LINKSPD_100 0x40000000 /**< XTE_EMCFG_LINKSPD_MASK for
+ 100 Mbit */
+#define XTE_EMMC_LINKSPD_1000 0x80000000 /**< XTE_EMCFG_LINKSPD_MASK for
+ 1000 Mbit */
+/*@}*/
+
+
+/** @name EMAC RGMII/SGMII Configuration (PHYC)
+ * @{
+ */
+#define XTE_PHYC_SGMIILINKSPEED_MASK 0xC0000000 /**< SGMII link speed */
+#define XTE_PHYC_RGMIILINKSPEED_MASK 0x0000000C /**< RGMII link speed */
+#define XTE_PHYC_RGMIIHD_MASK 0x00000002 /**< RGMII Half-duplex mode */
+#define XTE_PHYC_RGMIILINK_MASK 0x00000001 /**< RGMII link status */
+
+#define XTE_PHYC_RGLINKSPD_10 0x00000000 /**< XTE_GMIC_RGLINKSPD_MASK
+ for 10 Mbit */
+#define XTE_PHYC_RGLINKSPD_100 0x00000004 /**< XTE_GMIC_RGLINKSPD_MASK
+ for 100 Mbit */
+#define XTE_PHYC_RGLINKSPD_1000 0x00000008 /**< XTE_GMIC_RGLINKSPD_MASK
+ for 1000 Mbit */
+#define XTE_PHYC_SGLINKSPD_10 0x00000000 /**< XTE_SGMIC_RGLINKSPD_MASK
+ for 10 Mbit */
+#define XTE_PHYC_SGLINKSPD_100 0x40000000 /**< XTE_SGMIC_RGLINKSPD_MASK
+ for 100 Mbit */
+#define XTE_PHYC_SGLINKSPD_1000 0x80000000 /**< XTE_SGMIC_RGLINKSPD_MASK
+ for 1000 Mbit */
+/*@}*/
+
+
+/** @name EMAC Management Configuration (MC)
+ * @{
+ */
+#define XTE_MC_MDIOEN_MASK 0x00000040 /**< MII management enable */
+#define XTE_MC_CLOCK_DIVIDE_MAX 0x3F /**< Maximum MDIO divisor */
+/*@}*/
+
+
+/** @name EMAC Unicast Address Register Word 1 (UAW1)
+ * @{
+ */
+#define XTE_UAW1_UNICASTADDR_MASK 0x0000FFFF /**< Station address bits [47:32]
+ Station address bits [31:0]
+ are stored in register
+ UAW0 */
+/*@}*/
+
+
+/** @name EMAC Multicast Address Register Word 1 (MAW1)
+ * @{
+ */
+#define XTE_MAW1_RNW_MASK 0x00800000 /**< Multicast address table register read enable */
+#define XTE_MAW1_ADDR_MASK 0x00030000 /**< Multicast address table register address */
+#define XTE_MAW1_MULTICADDR_MASK 0x0000FFFF /**< Multicast address bits [47:32]
+ Multicast address bits [31:0]
+ are stored in register
+ MAW0 */
+#define XTE_MAW1_MATADDR_SHIFT_MASK 16 /**< Number of bits to shift right
+ to align with
+ XTE_MAW1_CAMADDR_MASK */
+/*@}*/
+
+
+/** @name EMAC Address Filter Mode (AFM)
+ * @{
+ */
+#define XTE_AFM_PM_MASK 0x80000000 /**< Promiscuous mode enable */
+/*@}*/
+
+
+/** @name Media Independent Interface Management (MIIM)
+ * @{
+ */
+#define XTE_MIIM_REGAD_MASK 0x1F /**< MII Phy register address (REGAD) */
+#define XTE_MIIM_PHYAD_MASK 0x03E0 /**< MII Phy address (PHYAD) */
+#define XTE_MIIM_PHYAD_SHIFT 5 /**< MII Shift bits for PHYAD */
+/*@}*/
+
+
+/** @name Checksum offload buffer descriptor extensions
+ * @{
+ */
+/** Byte offset where checksum should begin (16 bit word) */
+#define XTE_BD_TX_CSBEGIN_OFFSET XDMAV3_BD_USR0_OFFSET
+
+/** Offset where checksum should be inserted (16 bit word) */
+#define XTE_BD_TX_CSINSERT_OFFSET (XDMAV3_BD_USR0_OFFSET + 2)
+
+/** Checksum offload control for transmit (16 bit word) */
+#define XTE_BD_TX_CSCNTRL_OFFSET XDMAV3_BD_USR1_OFFSET
+
+/** Seed value for checksum calculation (16 bit word) */
+#define XTE_BD_TX_CSINIT_OFFSET (XDMAV3_BD_USR1_OFFSET + 2)
+
+/** Receive frame checksum calculation (16 bit word) */
+#define XTE_BD_RX_CSRAW_OFFSET (XDMAV3_BD_USR5_OFFSET + 2)
+
+/*@}*/
+
+/** @name TX_CSCNTRL bit mask
+ * @{
+ */
+#define XTE_BD_TX_CSCNTRL_CALC_MASK 0x0001 /**< Enable/disable Tx
+ checksum */
+/*@}*/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+xdbg_stmnt(extern int indent_on);
+
+#define XLlTemac_indent(RegOffset) \
+ ((indent_on && ((RegOffset) >= XTE_RAF_OFFSET) && ((RegOffset) <= XTE_RDY_OFFSET)) ? "\t" : "")
+
+#define XLlTemac_reg_name(RegOffset) \
+ (((RegOffset) == XTE_RAF_OFFSET) ? "XTE_RAF_OFFSET": \
+ ((RegOffset) == XTE_TPF_OFFSET) ? "XTE_TPF_OFFSET": \
+ ((RegOffset) == XTE_IFGP_OFFSET) ? "XTE_IFGP_OFFSET": \
+ ((RegOffset) == XTE_IS_OFFSET) ? "XTE_IS_OFFSET": \
+ ((RegOffset) == XTE_IP_OFFSET) ? "XTE_IP_OFFSET": \
+ ((RegOffset) == XTE_IE_OFFSET) ? "XTE_IE_OFFSET": \
+ ((RegOffset) == XTE_MSW_OFFSET) ? "XTE_MSW_OFFSET": \
+ ((RegOffset) == XTE_LSW_OFFSET) ? "XTE_LSW_OFFSET": \
+ ((RegOffset) == XTE_CTL_OFFSET) ? "XTE_CTL_OFFSET": \
+ ((RegOffset) == XTE_RDY_OFFSET) ? "XTE_RDY_OFFSET": \
+ ((RegOffset) == XTE_RCW0_OFFSET) ? "XTE_RCW0_OFFSET": \
+ ((RegOffset) == XTE_RCW1_OFFSET) ? "XTE_RCW1_OFFSET": \
+ ((RegOffset) == XTE_TC_OFFSET) ? "XTE_TC_OFFSET": \
+ ((RegOffset) == XTE_FCC_OFFSET) ? "XTE_FCC_OFFSET": \
+ ((RegOffset) == XTE_EMMC_OFFSET) ? "XTE_EMMC_OFFSET": \
+ ((RegOffset) == XTE_PHYC_OFFSET) ? "XTE_PHYC_OFFSET": \
+ ((RegOffset) == XTE_MC_OFFSET) ? "XTE_MC_OFFSET": \
+ ((RegOffset) == XTE_UAW0_OFFSET) ? "XTE_UAW0_OFFSET": \
+ ((RegOffset) == XTE_UAW1_OFFSET) ? "XTE_UAW1_OFFSET": \
+ ((RegOffset) == XTE_MAW0_OFFSET) ? "XTE_MAW0_OFFSET": \
+ ((RegOffset) == XTE_MAW1_OFFSET) ? "XTE_MAW1_OFFSET": \
+ ((RegOffset) == XTE_AFM_OFFSET) ? "XTE_AFM_OFFSET": \
+ ((RegOffset) == XTE_TIS_OFFSET) ? "XTE_TIS_OFFSET": \
+ ((RegOffset) == XTE_TIE_OFFSET) ? "XTE_TIE_OFFSET": \
+ ((RegOffset) == XTE_MIIMWD_OFFSET) ? "XTE_MIIMWD_OFFSET": \
+ ((RegOffset) == XTE_MIIMAI_OFFSET) ? "XTE_MIIMAI_OFFSET": \
+ "unknown")
+
+#define XLlTemac_print_reg_o(BaseAddress, RegOffset, Value) \
+ xdbg_printf(XDBG_DEBUG_TEMAC_REG, "%s0x%0x -> %s(0x%0x)\n", \
+ XLlTemac_indent(RegOffset), (Value), \
+ XLlTemac_reg_name(RegOffset), (RegOffset)) \
+
+#define XLlTemac_print_reg_i(BaseAddress, RegOffset, Value) \
+ xdbg_printf(XDBG_DEBUG_TEMAC_REG, "%s%s(0x%0x) -> 0x%0x\n", \
+ XLlTemac_indent(RegOffset), XLlTemac_reg_name(RegOffset), \
+ (RegOffset), (Value)) \
+
+/****************************************************************************/
+/**
+ *
+ * XLlTemac_ReadReg returns the value read from the register specified by
+ * <i>RegOffset</i>.
+ *
+ * @param BaseAddress is the base address of the TEMAC channel.
+ * @param RegOffset is the offset of the register to be read.
+ *
+ * @return XLlTemac_ReadReg returns the 32-bit value of the register.
+ *
+ * @note
+ * C-style signature:
+ * u32 XLlTemac_mReadReg(u32 BaseAddress, u32 RegOffset)
+ *
+ *****************************************************************************/
+#ifdef DEBUG
+#define XLlTemac_ReadReg(BaseAddress, RegOffset) \
+({ \
+ u32 value; \
+ if ((RegOffset) > 0x2c) { \
+ printf ("readreg: Woah! wrong reg addr: 0x%0x\n", (RegOffset)); \
+ } \
+ value = XIo_In32(((BaseAddress) + (RegOffset))); \
+ XLlTemac_print_reg_i((BaseAddress), (RegOffset), value); \
+ value; \
+})
+#else
+#define XLlTemac_ReadReg(BaseAddress, RegOffset) \
+ (XIo_In32(((BaseAddress) + (RegOffset))))
+#endif
+
+/****************************************************************************/
+/**
+ *
+ * XLlTemac_WriteReg, writes <i>Data</i> to the register specified by
+ * <i>RegOffset</i>.
+ *
+ * @param BaseAddress is the base address of the TEMAC channel.
+ * @param RegOffset is the offset of the register to be written.
+ * @param Data is the 32-bit value to write to the register.
+ *
+ * @return N/A
+ *
+ * @note
+ * C-style signature:
+ * void XLlTemac_mWriteReg(u32 BaseAddress, u32 RegOffset, u32 Data)
+ *
+ *****************************************************************************/
+#ifdef DEBUG
+#define XLlTemac_WriteReg(BaseAddress, RegOffset, Data) \
+({ \
+ if ((RegOffset) > 0x2c) { \
+ printf ("writereg: Woah! wrong reg addr: 0x%0x\n", (RegOffset)); \
+ } \
+ XLlTemac_print_reg_o((BaseAddress), (RegOffset), (Data)); \
+ XIo_Out32(((BaseAddress) + (RegOffset)), (Data)); \
+})
+#else
+#define XLlTemac_WriteReg(BaseAddress, RegOffset, Data) \
+ XIo_Out32(((BaseAddress) + (RegOffset)), (Data))
+#endif
+
+/****************************************************************************/
+/**
+ *
+ * XLlTemac_ReadIndirectReg returns the value read from the hard TEMAC register
+ * specified by <i>RegOffset</i>.
+ *
+ * @param BaseAddress is the base address of the TEMAC channel.
+ * @param RegOffset is the offset of the hard TEMAC register to be read.
+ *
+ * @return XLlTemac_ReadIndirectReg returns the 32-bit value of the register.
+ *
+ * @note
+ * C-style signature:
+ * u32 XLlTemac_mReadIndirectReg(u32 BaseAddress, u32 RegOffset)
+ *
+ *****************************************************************************/
+#ifdef DEBUG
+extern u32 _xlltemac_rir_value;
+
+#define XLlTemac_ReadIndirectReg(BaseAddress, RegOffset) \
+( \
+ indent_on = 1, \
+ (((RegOffset) < 0x200) ? \
+ xdbg_printf(XDBG_DEBUG_ERROR, \
+ "readindirect: Woah! wrong reg addr: 0x%0x\n", \
+ (RegOffset)) : 0), \
+ (((RegOffset) > 0x3b4) ? \
+ xdbg_printf(XDBG_DEBUG_ERROR, \
+ "readindirect: Woah! wrong reg addr: 0x%0x\n", \
+ (RegOffset)) : 0), \
+ XLlTemac_WriteReg((BaseAddress), XTE_CTL_OFFSET, (RegOffset)), \
+ _xlltemac_rir_value = XLlTemac_ReadReg((BaseAddress), XTE_LSW_OFFSET), \
+ XLlTemac_print_reg_i((BaseAddress), (RegOffset), _xlltemac_rir_value), \
+ indent_on = 0, \
+ _xlltemac_rir_value \
+)
+#else
+#define XLlTemac_ReadIndirectReg(BaseAddress, RegOffset) \
+( \
+ XLlTemac_WriteReg((BaseAddress), XTE_CTL_OFFSET, (RegOffset)), \
+ XLlTemac_ReadReg((BaseAddress), XTE_LSW_OFFSET) \
+)
+#endif
+
+/****************************************************************************/
+/**
+ *
+ * XLlTemac_WriteIndirectReg, writes <i>Data</i> to the hard TEMAC register
+ * specified by <i>RegOffset</i>.
+ *
+ * @param BaseAddress is the base address of the TEMAC channel.
+ * @param RegOffset is the offset of the hard TEMAC register to be written.
+ * @param Data is the 32-bit value to write to the register.
+ *
+ * @return N/A
+ *
+ * @note
+ * C-style signature:
+ * void XLlTemac_WriteIndirectReg(u32 BaseAddress, u32 RegOffset, u32 Data)
+ *
+ *****************************************************************************/
+#ifdef DEBUG
+#define XLlTemac_WriteIndirectReg(BaseAddress, RegOffset, Data) \
+( \
+ indent_on = 1, \
+ (((RegOffset) < 0x200) ? \
+ xdbg_printf(XDBG_DEBUG_ERROR, \
+ "readindirect: Woah! wrong reg addr: 0x%0x\n", \
+ (RegOffset)) : 0), \
+ (((RegOffset) > 0x3b4) ? \
+ xdbg_printf(XDBG_DEBUG_ERROR, \
+ "readindirect: Woah! wrong reg addr: 0x%0x\n", \
+ (RegOffset)) : 0), \
+ XLlTemac_print_reg_o((BaseAddress), (RegOffset), (Data)), \
+ XLlTemac_WriteReg((BaseAddress), XTE_LSW_OFFSET, (Data)), \
+ XLlTemac_WriteReg((BaseAddress), XTE_CTL_OFFSET, \
+ ((RegOffset) | XTE_CTL_WEN_MASK)), \
+ ((XLlTemac_ReadReg((BaseAddress), XTE_RDY_OFFSET) & \
+ XTE_RDY_HARD_ACS_RDY_MASK) ? \
+ ((XLlTemac_ReadIndirectReg((BaseAddress), (RegOffset)) != (Data)) ? \
+ xdbg_printf(XDBG_DEBUG_ERROR, \
+ "data written is not read back: Reg: 0x%0x\n", \
+ (RegOffset)) \
+ : 0) \
+ : xdbg_printf(XDBG_DEBUG_ERROR, "(temac_wi) RDY reg not initially ready\n")), \
+ indent_on = 0 \
+)
+#else
+#define XLlTemac_WriteIndirectReg(BaseAddress, RegOffset, Data) \
+ XLlTemac_WriteReg((BaseAddress), XTE_LSW_OFFSET, (Data)), \
+ XLlTemac_WriteReg((BaseAddress), XTE_CTL_OFFSET, \
+ ((RegOffset) | XTE_CTL_WEN_MASK))
+#endif
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/*
+ * Xilinx Ethernet: Linux driver for the XPS_LLTEMAC core.
+ *
+ * Author: Xilinx, Inc.
+ *
+ * 2006-2007 (c) Xilinx, Inc. This file is licensed uner the terms of the GNU
+ * General Public License version 2.1. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 05/08/05 First release
+ * </pre>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/xilinx_devices.h>
+#include <asm/io.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+
+#ifdef CONFIG_OF
+// For open firmware.
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#endif
+
+#include "xbasic_types.h"
+#include "xlltemac.h"
+#include "xllfifo.h"
+#include "xlldma.h"
+#include "xlldma_bdring.h"
+
+#define LOCAL_FEATURE_RX_CSUM 0x01
+
+/*
+ * Default SEND and RECV buffer descriptors (BD) numbers.
+ * BD Space needed is (XTE_SEND_BD_CNT+XTE_RECV_BD_CNT)*Sizeof(XLlDma_Bd).
+ * Each XLlDma_Bd instance currently takes 40 bytes.
+ */
+#define XTE_SEND_BD_CNT 256
+#define XTE_RECV_BD_CNT 256
+
+/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
+#define DRIVER_NAME "xilinx_lltemac"
+#define DRIVER_DESCRIPTION "Xilinx Tri-Mode Ethernet MAC driver"
+#define DRIVER_VERSION "1.00a"
+
+#define TX_TIMEOUT (3*HZ) /* Transmission timeout is 3 seconds. */
+
+/*
+ * This version of the Xilinx TEMAC uses external DMA or FIFO cores.
+ * Currently neither the DMA or FIFO cores used require any memory alignment
+ * restrictions.
+ */
+/*
+ * ALIGNMENT_RECV = the alignement required to receive
+ * ALIGNMENT_SEND = the alignement required to send
+ * ALIGNMENT_SEND_PERF = tx alignment for better performance
+ *
+ * ALIGNMENT_SEND is used to see if we *need* to copy the data to re-align.
+ * ALIGNMENT_SEND_PERF is used if we've decided we need to copy anyway, we just
+ * copy to this alignment for better performance.
+ */
+
+#define ALIGNMENT_RECV 32
+#define ALIGNMENT_SEND 8
+#define ALIGNMENT_SEND_PERF 32
+
+#define XTE_SEND 1
+#define XTE_RECV 2
+
+/* SGDMA buffer descriptors must be aligned on a 8-byte boundary. */
+#define ALIGNMENT_BD XLLDMA_BD_MINIMUM_ALIGNMENT
+
+/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
+#define BUFFER_ALIGNSEND(adr) ((ALIGNMENT_SEND - ((u32) adr)) % ALIGNMENT_SEND)
+#define BUFFER_ALIGNSEND_PERF(adr) ((ALIGNMENT_SEND_PERF - ((u32) adr)) % ALIGNMENT_SEND_PERF)
+#define BUFFER_ALIGNRECV(adr) ((ALIGNMENT_RECV - ((u32) adr)) % ALIGNMENT_RECV)
+
+/* Default TX/RX Threshold and waitbound values for SGDMA mode */
+#define DFT_TX_THRESHOLD 24
+#define DFT_TX_WAITBOUND 254
+#define DFT_RX_THRESHOLD 4
+#define DFT_RX_WAITBOUND 254
+
+#define XTE_AUTOSTRIPPING 1
+
+/* Put Buffer Descriptors in BRAM?
+ * NOTE:
+ * Putting BDs in BRAM only works if there is only ONE instance of the TEMAC
+ * in hardware. The code does not handle multiple instances, e.g. it does
+ * not manage the memory in BRAM.
+ */
+#define BD_IN_BRAM 0
+#define BRAM_BASEADDR 0xffff8000
+
+
+/*
+ * Checksum offload macros
+ */
+#define BdCsumEnable(BdPtr) \
+ XLlDma_mBdWrite((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET, \
+ (XLlDma_mBdRead((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET)) | 1 )
+
+/* Used for debugging */
+#define BdCsumEnabled(BdPtr) \
+ ((XLlDma_mBdRead((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET)) & 1)
+
+#define BdCsumDisable(BdPtr) \
+ XLlDma_mBdWrite((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET, \
+ (XLlDma_mBdRead((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET)) & 0xFFFFFFFE )
+
+#define BdCsumSetup(BdPtr, Start, Insert) \
+ XLlDma_mBdWrite((BdPtr), XLLDMA_BD_USR1_OFFSET, ((Start) << 16) | (Insert))
+
+/* Used for debugging */
+#define BdCsumInsert(BdPtr) \
+ (XLlDma_mBdRead((BdPtr), XLLDMA_BD_USR1_OFFSET) & 0xffff)
+
+#define BdCsumSeed(BdPtr, Seed) \
+ XLlDma_mBdWrite((BdPtr), XLLDMA_BD_USR2_OFFSET, 0)
+
+#define BdCsumGet(BdPtr) \
+ XLlDma_mBdRead((BdPtr), XLLDMA_BD_USR3_OFFSET)
+
+#define BdGetRxLen(BdPtr) \
+ XLlDma_mBdRead((BdPtr), XLLDMA_BD_USR4_OFFSET)
+
+/*
+ * Our private per device data. When a net_device is allocated we will
+ * ask for enough extra space for this.
+ */
+struct net_local {
+ struct list_head rcv;
+ struct list_head xmit;
+
+ struct net_device *ndev; /* this device */
+ struct net_device *next_dev; /* The next device in dev_list */
+ struct net_device_stats stats; /* Statistics for this device */
+ struct timer_list phy_timer; /* PHY monitoring timer */
+
+ u32 index; /* Which interface is this */
+#if 0
+ XInterruptHandler Isr; /* Pointer to the XLlTemac ISR routine */
+#endif
+ u8 gmii_addr; /* The GMII address of the PHY */
+
+ /* The underlying OS independent code needs space as well. A
+ * pointer to the following XLlTemac structure will be passed to
+ * any XLlTemac_ function that requires it. However, we treat the
+ * data as an opaque object in this file (meaning that we never
+ * reference any of the fields inside of the structure). */
+ XLlFifo Fifo;
+ XLlDma Dma;
+ XLlTemac Emac;
+
+ unsigned int fifo_irq; /* fifo irq */
+ unsigned int dma_irq_s; /* send irq */
+ unsigned int dma_irq_r; /* recv irq */
+ unsigned int max_frame_size;
+
+ int cur_speed;
+
+ /* Buffer Descriptor space for both TX and RX BD ring */
+ void *desc_space; /* virtual address of BD space */
+ dma_addr_t desc_space_handle; /* physical address of BD space */
+ int desc_space_size; /* size of BD space */
+
+ /* buffer for one skb in case no room is available for transmission */
+ struct sk_buff *deferred_skb;
+
+ /* send buffers for non tx-dre hw */
+ void **tx_orig_buffers; /* Buffer addresses as returned by
+ dma_alloc_coherent() */
+ void **tx_buffers; /* Buffers addresses aligned for DMA */
+ dma_addr_t *tx_phys_buffers; /* Buffer addresses in physical memory */
+ size_t tx_buffers_cur; /* Index of current buffer used */
+
+ /* stats */
+ int max_frags_in_a_packet;
+ unsigned long realignments;
+ unsigned long tx_hw_csums;
+ unsigned long rx_hw_csums;
+ unsigned long local_features;
+#if ! XTE_AUTOSTRIPPING
+ unsigned long stripping;
+#endif
+};
+
+u32 dma_rx_int_mask = XLLDMA_CR_IRQ_ALL_EN_MASK;
+u32 dma_tx_int_mask = XLLDMA_CR_IRQ_ALL_EN_MASK;
+
+/* for exclusion of all program flows (processes, ISRs and BHs) */
+spinlock_t XTE_spinlock = SPIN_LOCK_UNLOCKED;
+spinlock_t XTE_tx_spinlock = SPIN_LOCK_UNLOCKED;
+spinlock_t XTE_rx_spinlock = SPIN_LOCK_UNLOCKED;
+
+/*
+ * ethtool has a status reporting feature where we can report any sort of
+ * status information we'd like. This is the list of strings used for that
+ * status reporting. ETH_GSTRING_LEN is defined in ethtool.h
+ */
+static char xenet_ethtool_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "txpkts", "txdropped", "txerr", "txfifoerr",
+ "rxpkts", "rxdropped", "rxerr", "rxfifoerr",
+ "rxrejerr", "max_frags", "tx_hw_csums", "rx_hw_csums",
+};
+
+#define XENET_STATS_LEN sizeof(xenet_ethtool_gstrings_stats) / ETH_GSTRING_LEN
+
+/* Helper function to determine if a given XLlTemac error warrants a reset. */
+extern inline int status_requires_reset(int s)
+{
+ return (s == XST_FIFO_ERROR ||
+ s == XST_PFIFO_DEADLOCK ||
+ s == XST_DMA_ERROR || s == XST_IPIF_ERROR);
+}
+
+/* Queues with locks */
+static LIST_HEAD(receivedQueue);
+static spinlock_t receivedQueueSpin = SPIN_LOCK_UNLOCKED;
+
+static LIST_HEAD(sentQueue);
+static spinlock_t sentQueueSpin = SPIN_LOCK_UNLOCKED;
+
+
+/* from mii.h
+ *
+ * Items in mii.h but not in gmii.h
+ */
+#define ADVERTISE_100FULL 0x0100
+#define ADVERTISE_100HALF 0x0080
+#define ADVERTISE_10FULL 0x0040
+#define ADVERTISE_10HALF 0x0020
+#define ADVERTISE_CSMA 0x0001
+
+#define EX_ADVERTISE_1000FULL 0x0200
+#define EX_ADVERTISE_1000HALF 0x0100
+
+/*
+ * items not in mii.h nor gmii.h but should be
+ */
+#define MII_EXADVERTISE 0x09
+
+/*
+ * Wrap certain temac routines with a lock, so access to the shared hard temac
+ * interface is accessed mutually exclusive for dual channel temac support.
+ */
+
+static inline void _XLlTemac_Start(XLlTemac *InstancePtr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_Start(InstancePtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline void _XLlTemac_Stop(XLlTemac *InstancePtr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_Stop(InstancePtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline void _XLlTemac_Reset(XLlTemac *InstancePtr, int HardCoreAction)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_Reset(InstancePtr, HardCoreAction);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline int _XLlTemac_SetMacAddress(XLlTemac *InstancePtr,
+ void *AddressPtr)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_SetMacAddress(InstancePtr, AddressPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+static inline void _XLlTemac_GetMacAddress(XLlTemac *InstancePtr,
+ void *AddressPtr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_GetMacAddress(InstancePtr, AddressPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline int _XLlTemac_SetOptions(XLlTemac *InstancePtr, u32 Options)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_SetOptions(InstancePtr, Options);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+static inline int _XLlTemac_ClearOptions(XLlTemac *InstancePtr, u32 Options)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_ClearOptions(InstancePtr, Options);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+static inline u16 _XLlTemac_GetOperatingSpeed(XLlTemac *InstancePtr)
+{
+ u16 speed;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ speed = XLlTemac_GetOperatingSpeed(InstancePtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return speed;
+}
+
+static inline void _XLlTemac_SetOperatingSpeed(XLlTemac *InstancePtr, u16 Speed)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_SetOperatingSpeed(InstancePtr, Speed);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ /* We need a delay after we set the speed. Otherwise the PHY will not be ready. */
+ udelay(10000);
+}
+
+static inline void _XLlTemac_PhySetMdioDivisor(XLlTemac *InstancePtr, u8 Divisor)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_PhySetMdioDivisor(InstancePtr, Divisor);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline void _XLlTemac_PhyRead(XLlTemac *InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 *PhyDataPtr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_PhyRead(InstancePtr, PhyAddress, RegisterNum, PhyDataPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline void _XLlTemac_PhyWrite(XLlTemac *InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 PhyData)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_PhyWrite(InstancePtr, PhyAddress, RegisterNum, PhyData);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+
+static inline int _XLlTemac_MulticastClear(XLlTemac *InstancePtr, int Entry)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_MulticastClear(InstancePtr, Entry);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+static inline int _XLlTemac_SetMacPauseAddress(XLlTemac *InstancePtr, void *AddressPtr)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_SetMacPauseAddress(InstancePtr, AddressPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+static inline void _XLlTemac_GetMacPauseAddress(XLlTemac *InstancePtr, void *AddressPtr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ XLlTemac_GetMacPauseAddress(InstancePtr, AddressPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+static inline int _XLlTemac_GetSgmiiStatus(XLlTemac *InstancePtr, u16 *SpeedPtr)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_GetSgmiiStatus(InstancePtr, SpeedPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+static inline int _XLlTemac_GetRgmiiStatus(XLlTemac *InstancePtr,
+ u16 *SpeedPtr,
+ int *IsFullDuplexPtr,
+ int *IsLinkUpPtr)
+{
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ status = XLlTemac_GetRgmiiStatus(InstancePtr, SpeedPtr, IsFullDuplexPtr, IsLinkUpPtr);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ return status;
+}
+
+
+#ifdef CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_RGMII
+#define MARVELL_88E1111_EXTENDED_PHY_CTL_REG_OFFSET 20
+#define MARVELL_88E1111_EXTENDED_PHY_STATUS_REG_OFFSET 27
+#endif
+
+#define DEBUG_ERROR KERN_ERR
+#define DEBUG_LOG(level, ...) printk(level __VA_ARGS__)
+
+/*
+ * Perform any necessary special phy setup. In the gmii case, nothing needs to
+ * be done.
+ */
+static void phy_setup(struct net_local *lp)
+{
+#ifdef CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_RGMII
+ u16 Register;
+
+ /*
+ * Set up MAC interface
+ *
+ * Write 0x0cc3 to reg 20 in PHY
+ * 5432 1098 7654 3210
+ * ---- ---- ---- ----
+ * 0cc3=0000 1100 1100 0011
+ * downshift counter (bits 11-9): 110 = 7 times
+ * downshift enable (bit 8): 0 = enable
+ * RGMII timing control (bit 7): 1 = add delay to rx clk ro rxd
+ * outputs
+ * Default Mac interface speed (bits 6-4): 100 = 10mbps 2.5 mhz
+ * (between phy and temac - gets renegotiated)
+ * reserved (bit 3)
+ * DTE detect (bit 2): 0 disabled
+ * RGMII transmit timing control (bit 1): 1 = add delay
+ * to tx clk ro txd outputs
+ * Transmitter Disable (bit 0): 1 = enabled
+ */
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MARVELL_88E1111_EXTENDED_PHY_CTL_REG_OFFSET, 0x0cc3);
+
+ /*
+ * Set RGMII to copper with correct hysterisis and correct mode
+ * Disable fiber/copper auto sel, choose copper
+ * RGMII /Modified MII to copper mode
+ *
+ * Write 0x848b to reg 27
+ * 5432 1098 7654 3210
+ * ---- ---- ---- ----
+ * 848b=1000 0100 1000 1011
+ * Fiber/Copper Auto Selection (bit 15): 1 = disable auto selection
+ * Interrupt Polarity (bit 10): 1 = int active low
+ * DTE detect status drop hysteresis (bts 8-5): 0100 = report 20s after DTE power status drop
+ * HWCFG mode (bits 3-0): 1011 = RGMII/Modified MII to Copper
+ */
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MARVELL_88E1111_EXTENDED_PHY_STATUS_REG_OFFSET, 0x848b);
+
+ /*
+ * Reset the PHY
+ */
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMCR, &Register);
+ Register |= BMCR_RESET;
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MII_BMCR, Register);
+
+#endif /* CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_RGMII */
+}
+
+
+typedef enum DUPLEX { UNKNOWN_DUPLEX, HALF_DUPLEX, FULL_DUPLEX } DUPLEX;
+
+int renegotiate_speed(struct net_device *dev, int speed, DUPLEX duplex)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ int retries = 2;
+ int wait_count;
+ u16 phy_reg0 = BMCR_ANENABLE | BMCR_ANRESTART;
+ u16 phy_reg1;
+ u16 phy_reg4;
+ u16 phy_reg9 = 0;
+
+
+ /*
+ * It appears that the 10baset full and half duplex settings
+ * are overloaded for gigabit ethernet
+ */
+ if ((duplex == FULL_DUPLEX) && (speed == 10)) {
+ phy_reg4 = ADVERTISE_10FULL | ADVERTISE_CSMA;
+ }
+ else if ((duplex == FULL_DUPLEX) && (speed == 100)) {
+ phy_reg4 = ADVERTISE_100FULL | ADVERTISE_CSMA;
+ }
+ else if ((duplex == FULL_DUPLEX) && (speed == 1000)) {
+ phy_reg4 = ADVERTISE_CSMA;
+ phy_reg9 = EX_ADVERTISE_1000FULL;
+ }
+ else if (speed == 10) {
+ phy_reg4 = ADVERTISE_10HALF | ADVERTISE_CSMA;
+ }
+ else if (speed == 100) {
+ phy_reg4 = ADVERTISE_100HALF | ADVERTISE_CSMA;
+ }
+ else if (speed == 1000) {
+ phy_reg4 = ADVERTISE_CSMA;
+ phy_reg9 = EX_ADVERTISE_1000HALF;
+ }
+ else {
+ printk(KERN_ERR
+ "%s: XLlTemac: unsupported speed requested: %d\n",
+ dev->name, speed);
+ return -1;
+ }
+
+ /*
+ * link status in register 1:
+ * first read / second read:
+ * 0 0 link is down
+ * 0 1 link is up (but it was down earlier)
+ * 1 0 link is down (but it was just up)
+ * 1 1 link is up
+ *
+ */
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, &phy_reg1);
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, &phy_reg1);
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MII_ADVERTISE, phy_reg4);
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MII_EXADVERTISE, phy_reg9);
+
+ while (retries--) {
+ /* initiate an autonegotiation of the speed */
+ _XLlTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MII_BMCR, phy_reg0);
+
+ wait_count = 20; /* so we don't loop forever */
+ while (wait_count--) {
+ /* wait a bit for the negotiation to complete */
+ mdelay(500);
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR,
+ &phy_reg1);
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR,
+ &phy_reg1);
+
+ if ((phy_reg1 & BMSR_LSTATUS) &&
+ (phy_reg1 & BMSR_ANEGCOMPLETE))
+ break;
+
+ }
+
+ if (phy_reg1 & BMSR_LSTATUS) {
+ printk(KERN_INFO
+ "%s: XLlTemac: We renegotiated the speed to: %d\n",
+ dev->name, speed);
+ return 0;
+ }
+ else {
+ printk(KERN_ERR
+ "%s: XLlTemac: Not able to set the speed to %d (status: 0x%0x)\n",
+ dev->name, speed, phy_reg1);
+ return -1;
+ }
+ }
+
+ printk(KERN_ERR
+ "%s: XLlTemac: Not able to set the speed to %d\n", dev->name,
+ speed);
+ return -1;
+}
+
+/*
+ * This function sets up MAC's speed according to link speed of PHY
+ */
+void set_mac_speed(struct net_local *lp)
+{
+ u16 phylinkspeed;
+ struct net_device *dev = lp->ndev;
+
+#ifdef CONFIG_XILINX_LLTEMAC_MARVELL_88E1111_GMII
+ /*
+ * This function is specific to MARVELL 88E1111 PHY chip on
+ * many Xilinx boards and assumes GMII interface is being used
+ * by the TEMAC.
+ */
+
+#define MARVELL_88E1111_PHY_SPECIFIC_STATUS_REG_OFFSET 17
+#define MARVELL_88E1111_LINKSPEED_MARK 0xC000
+#define MARVELL_88E1111_LINKSPEED_SHIFT 14
+#define MARVELL_88E1111_LINKSPEED_1000M 0x0002
+#define MARVELL_88E1111_LINKSPEED_100M 0x0001
+#define MARVELL_88E1111_LINKSPEED_10M 0x0000
+ u16 RegValue;
+
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr,
+ MARVELL_88E1111_PHY_SPECIFIC_STATUS_REG_OFFSET,
+ &RegValue);
+ /* Get current link speed */
+ phylinkspeed = (RegValue & MARVELL_88E1111_LINKSPEED_MARK)
+ >> MARVELL_88E1111_LINKSPEED_SHIFT;
+
+ /* Update TEMAC speed accordingly */
+ switch (phylinkspeed) {
+ case (MARVELL_88E1111_LINKSPEED_1000M):
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, 1000);
+ printk(KERN_INFO "%s: XLlTemac: speed set to 1000Mb/s\n",
+ dev->name);
+ lp->cur_speed = 1000;
+ break;
+ case (MARVELL_88E1111_LINKSPEED_100M):
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, 100);
+ printk(KERN_INFO "%s: XLlTemac: speed set to 100Mb/s\n",
+ dev->name);
+ lp->cur_speed = 100;
+ break;
+ case (MARVELL_88E1111_LINKSPEED_10M):
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, 10);
+ printk(KERN_INFO "%s: XLlTemac: speed set to 10Mb/s\n",
+ dev->name);
+ lp->cur_speed = 10;
+ break;
+ default:
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, 1000);
+ printk(KERN_INFO "%s: XLlTemac: speed defaults to 1000Mb/s\n",
+ dev->name);
+ lp->cur_speed = 1000;
+ break;
+ }
+
+#else /* generic PHY, there have been issues with 10Mbit with this code */
+ int ret;
+ int retry_count = 1;
+
+ if (XLlTemac_GetPhysicalInterface(&lp->Emac) == XTE_PHY_TYPE_MII) {
+ phylinkspeed = 100;
+ }
+ else {
+ phylinkspeed = 1000;
+ }
+
+ /*
+ * Try to renegotiate the speed until something sticks
+ */
+ while (phylinkspeed > 1) {
+ ret = renegotiate_speed(dev, phylinkspeed, FULL_DUPLEX);
+ /*
+ * ret == 1 - try it again
+ * ret == 0 - it worked
+ * ret < 0 - there was some failure negotiating the speed
+ */
+ if (ret == 0) {
+ /* it worked, get out of the loop */
+ break;
+ }
+
+ /* it didn't work this time, but it may work if we try again */
+ if ((ret == 1) && (retry_count)) {
+ retry_count--;
+ printk("trying again...\n");
+ continue;
+ }
+ /* reset the retry_count, becuase we're about to try a lower speed */
+ retry_count = 1;
+ phylinkspeed /= 10;
+ }
+ if (phylinkspeed == 1) {
+ printk(KERN_INFO "%s: XLlTemac: could not negotiate speed\n",
+ dev->name);
+ lp->cur_speed = 0;
+
+ return;
+ }
+
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, phylinkspeed);
+ printk(KERN_INFO "%s: XLlTemac: speed set to %dMb/s\n", dev->name,
+ phylinkspeed);
+ lp->cur_speed = phylinkspeed;
+#endif
+}
+
+/*
+ * Helper function to reset the underlying hardware. This is called
+ * when we get into such deep trouble that we don't know how to handle
+ * otherwise.
+ */
+static void reset(struct net_device *dev, u32 line_num)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u32 TxThreshold, TxWaitBound, RxThreshold, RxWaitBound;
+ u32 Options;
+ static u32 reset_cnt = 0;
+ int status;
+
+ printk(KERN_INFO "%s: XLlTemac: resets (#%u) from adapter code line %d\n",
+ dev->name, ++reset_cnt, line_num);
+
+ /* Shouldn't really be necessary, but shouldn't hurt. */
+ netif_stop_queue(dev);
+
+ /* Stop device */
+ _XLlTemac_Stop(&lp->Emac);
+
+ /*
+ * XLlTemac_Reset puts the device back to the default state. We need
+ * to save all the settings we don't already know, reset, restore
+ * the settings, and then restart the TEMAC.
+ */
+ Options = XLlTemac_GetOptions(&lp->Emac);
+
+ /*
+ * Capture the dma coalesce settings (if needed) and reset the
+ * connected core, dma or fifo
+ */
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ XLlDma_BdRingGetCoalesce(&XLlDma_mGetRxRing(&lp->Dma),
+ &RxThreshold, &RxWaitBound);
+ XLlDma_BdRingGetCoalesce(&XLlDma_mGetTxRing(&lp->Dma),
+ &TxThreshold, &TxWaitBound);
+
+ XLlDma_Reset(&lp->Dma);
+ } else {
+ XLlFifo_Reset(&lp->Fifo);
+ }
+
+ /* now we can reset the device */
+ _XLlTemac_Reset(&lp->Emac, XTE_NORESET_HARD);
+
+ /* Reset on TEMAC also resets PHY. Give it some time to finish negotiation
+ * before we move on */
+ mdelay(2000);
+
+ /*
+ * The following four functions will return an error if the
+ * EMAC is already started. We just stopped it by calling
+ * _XLlTemac_Reset() so we can safely ignore the return values.
+ */
+ (int) _XLlTemac_SetMacAddress(&lp->Emac, dev->dev_addr);
+ (int) _XLlTemac_SetOptions(&lp->Emac, Options);
+ (int) _XLlTemac_ClearOptions(&lp->Emac, ~Options);
+ Options = XLlTemac_GetOptions(&lp->Emac);
+ printk(KERN_INFO "%s: XLlTemac: Options: 0x%x\n", dev->name, Options);
+
+ phy_setup(lp);
+ set_mac_speed(lp);
+
+ if (XLlTemac_IsDma(&lp->Emac)) { /* SG DMA mode */
+ status = XLlDma_BdRingSetCoalesce(&lp->Dma.RxBdRing,
+ RxThreshold, RxWaitBound);
+ status |= XLlDma_BdRingSetCoalesce(&lp->Dma.TxBdRing,
+ TxThreshold, TxWaitBound);
+ if (status != XST_SUCCESS) {
+ /* Print the error, but keep on going as it's not a fatal error. */
+ printk(KERN_ERR "%s: XLlTemac: error setting coalesce values (probably out of range). status: %d\n",
+ dev->name, status);
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.RxBdRing, dma_rx_int_mask);
+ XLlDma_mBdRingIntEnable(&lp->Dma.TxBdRing, dma_tx_int_mask);
+ } else { /* FIFO interrupt mode */
+ XLlFifo_IntEnable(&lp->Fifo, XLLF_INT_TC_MASK |
+ XLLF_INT_RC_MASK | XLLF_INT_RXERROR_MASK |
+ XLLF_INT_TXERROR_MASK);
+ }
+ XLlTemac_IntDisable(&lp->Emac, XTE_INT_ALL_MASK);
+
+ if (lp->deferred_skb) {
+ dev_kfree_skb_any(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ lp->stats.tx_errors++;
+ }
+
+ /*
+ * XLlTemac_Start returns an error when: if configured for
+ * scatter-gather DMA and a descriptor list has not yet been created
+ * for the send or receive channel, or if no receive buffer descriptors
+ * have been initialized. Those are not happening. so ignore the returned
+ * result checking.
+ */
+ _XLlTemac_Start(&lp->Emac);
+
+ /* We're all ready to go. Start the queue in case it was stopped. */
+ netif_wake_queue(dev);
+}
+
+/*
+ * The PHY registers read here should be standard registers in all PHY chips
+ */
+static int get_phy_status(struct net_device *dev, DUPLEX * duplex, int *linkup)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u16 reg;
+
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMCR, ®);
+ *duplex = FULL_DUPLEX;
+
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, ®);
+ *linkup = (reg & BMSR_LSTATUS) != 0;
+
+ return 0;
+}
+
+/*
+ * This routine is used for two purposes. The first is to keep the
+ * EMAC's duplex setting in sync with the PHY's. The second is to keep
+ * the system apprised of the state of the link. Note that this driver
+ * does not configure the PHY. Either the PHY should be configured for
+ * auto-negotiation or it should be handled by something like mii-tool. */
+static void poll_gmii(unsigned long data)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ DUPLEX phy_duplex;
+ int phy_carrier;
+ int netif_carrier;
+
+ dev = (struct net_device *) data;
+ lp = (struct net_local *) dev->priv;
+
+ /* First, find out what's going on with the PHY. */
+ if (get_phy_status(dev, &phy_duplex, &phy_carrier)) {
+ printk(KERN_ERR "%s: XLlTemac: terminating link monitoring.\n",
+ dev->name);
+ return;
+ }
+ netif_carrier = netif_carrier_ok(dev) != 0;
+ if (phy_carrier != netif_carrier) {
+ if (phy_carrier) {
+ set_mac_speed(lp);
+ printk(KERN_INFO
+ "%s: XLlTemac: PHY Link carrier restored.\n",
+ dev->name);
+ netif_carrier_on(dev);
+ }
+ else {
+ printk(KERN_INFO "%s: XLlTemac: PHY Link carrier lost.\n",
+ dev->name);
+ netif_carrier_off(dev);
+ }
+ }
+
+ /* Set up the timer so we'll get called again in 2 seconds. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ add_timer(&lp->phy_timer);
+}
+
+static irqreturn_t xenet_temac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ /*
+ * All we care about here is the RxRject interrupts. Explanation below:
+ *
+ * Interrupt Usage Description
+ * --------- -----------------
+ * TxCmplt: Fifo or DMA will have completion interrupts. We'll use
+ * those and not the TEMAC ones.
+ * RxFifoOvr: if the RX fifo is overflowing, the last thing we need
+ * is more interrupts to handle.
+ * RxRJect: We're keeping stats on rejected packets (we could
+ * choose not to).
+ * RxCmplt: Fifo or DMA will have completion interrupts. We'll use
+ * those and not the TEMAC ones.
+ * AutoNeg: This driver doesn't make use of the autonegotation
+ * completion interrupt.
+ * HardAcsCmplt: This driver just polls the RDY register for this
+ * information instead of using an interrupt handler.
+ * CfgWst, CfgRst,
+ * AfWst, AfRst,
+ * MiimWst, MiimRst,
+ * FabrRst: All of these registers indicate when access (read or
+ * write) to one or other of the Hard Temac Core
+ * registers is complete. Instead of relying on an
+ * interrupt context switch to be notified that the
+ * access is complete, this driver instead polls for the
+ * status, which, in most cases, should be faster.
+ */
+ XLlTemac_IntClear(&lp->Emac, XTE_INT_ALL_MASK);
+
+ lp->stats.rx_errors++;
+ lp->stats.rx_crc_errors++;
+
+
+ return IRQ_HANDLED;
+}
+
+static void FifoSendHandler(struct net_device *dev);
+static void FifoRecvHandler(unsigned long p /*struct net_device *dev*/);
+
+DECLARE_TASKLET(FifoRecvBH, FifoRecvHandler, 0);
+
+static irqreturn_t xenet_fifo_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u32 irq_status;
+
+ unsigned long flags;
+
+ /*
+ * Need to:
+ * 1) Read the FIFO IS register
+ * 2) clear all bits in the FIFO IS register
+ * 3) loop on each bit in the IS register, and handle each interrupt event
+ *
+ */
+ irq_status = XLlFifo_IntPending(&lp->Fifo);
+ XLlFifo_IntClear(&lp->Fifo, irq_status);
+ while (irq_status) {
+ if (irq_status & XLLF_INT_RC_MASK) {
+ /* handle the receive completion */
+ struct list_head *cur_lp;
+ spin_lock_irqsave(&receivedQueueSpin, flags);
+ list_for_each(cur_lp, &receivedQueue) {
+ if (cur_lp == &(lp->rcv)) {
+ break;
+ }
+ }
+ if (cur_lp != &(lp->rcv)) {
+ list_add_tail(&lp->rcv, &receivedQueue);
+ XLlFifo_IntDisable(&lp->Fifo, XLLF_INT_ALL_MASK);
+ tasklet_schedule(&FifoRecvBH);
+ }
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ irq_status &= ~XLLF_INT_RC_MASK;
+ } else if (irq_status & XLLF_INT_TC_MASK) {
+ /* handle the transmit completion */
+ FifoSendHandler(dev);
+ irq_status &= ~XLLF_INT_TC_MASK;
+ } else if (irq_status & XLLF_INT_TXERROR_MASK) {
+ lp->stats.tx_errors++;
+ lp->stats.tx_fifo_errors++;
+ XLlFifo_Reset(&lp->Fifo);
+ irq_status &= ~XLLF_INT_TXERROR_MASK;
+ } else if (irq_status & XLLF_INT_RXERROR_MASK) {
+ lp->stats.rx_errors++;
+ XLlFifo_Reset(&lp->Fifo);
+ irq_status &= ~XLLF_INT_RXERROR_MASK;
+ } else {
+ /* debug
+ * if (irq_status == 0) printk("Temac: spurious fifo int\n");
+ */
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* The callback function for completed frames sent in SGDMA mode. */
+static void DmaSendHandlerBH(unsigned long p);
+static void DmaRecvHandlerBH(unsigned long p);
+
+DECLARE_TASKLET(DmaSendBH, DmaSendHandlerBH, 0);
+DECLARE_TASKLET(DmaRecvBH, DmaRecvHandlerBH, 0);
+
+static irqreturn_t xenet_dma_rx_interrupt(int irq, void *dev_id)
+{
+ u32 irq_status;
+ struct net_device *dev = dev_id;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct list_head *cur_lp;
+
+ unsigned long flags;
+
+ /* Read pending interrupts */
+ irq_status = XLlDma_mBdRingGetIrq(&lp->Dma.RxBdRing);
+
+ XLlDma_mBdRingAckIrq(&lp->Dma.RxBdRing, irq_status);
+
+ if ((irq_status & XLLDMA_IRQ_ALL_ERR_MASK)) {
+ XLlDma_Reset(&lp->Dma);
+ return IRQ_HANDLED;
+ }
+ if ((irq_status & (XLLDMA_IRQ_DELAY_MASK | XLLDMA_IRQ_COALESCE_MASK))) {
+ spin_lock_irqsave(&receivedQueueSpin, flags);
+ list_for_each(cur_lp, &receivedQueue) {
+ if (cur_lp == &(lp->rcv)) {
+ break;
+ }
+ }
+ if (cur_lp != &(lp->rcv)) {
+ list_add_tail(&lp->rcv, &receivedQueue);
+ XLlDma_mBdRingIntDisable(&lp->Dma.RxBdRing,
+ XLLDMA_CR_IRQ_ALL_EN_MASK);
+ tasklet_schedule(&DmaRecvBH);
+ }
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xenet_dma_tx_interrupt(int irq, void *dev_id)
+{
+ u32 irq_status;
+ struct net_device *dev = dev_id;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct list_head *cur_lp;
+
+ unsigned long flags;
+
+ /* Read pending interrupts */
+ irq_status = XLlDma_mBdRingGetIrq(&(lp->Dma.TxBdRing));
+
+ XLlDma_mBdRingAckIrq(&(lp->Dma.TxBdRing), irq_status);
+
+ if ((irq_status & XLLDMA_IRQ_ALL_ERR_MASK)) {
+ XLlDma_Reset(&lp->Dma);
+ return IRQ_HANDLED;
+ }
+
+ if ((irq_status & (XLLDMA_IRQ_DELAY_MASK | XLLDMA_IRQ_COALESCE_MASK))) {
+ spin_lock_irqsave(&sentQueueSpin, flags);
+ list_for_each(cur_lp, &sentQueue) {
+ if (cur_lp == &(lp->xmit)) {
+ break;
+ }
+ }
+ if (cur_lp != &(lp->xmit)) {
+ list_add_tail(&lp->xmit, &sentQueue);
+ XLlDma_mBdRingIntDisable(&lp->Dma.TxBdRing,
+ XLLDMA_CR_IRQ_ALL_EN_MASK);
+ tasklet_schedule(&DmaSendBH);
+ }
+ spin_unlock_irqrestore(&sentQueueSpin, flags);
+ }
+ return IRQ_HANDLED;
+}
+
+/*
+ * Q:
+ * Why doesn't this linux driver use an interrupt handler for the TEMAC itself?
+ *
+ * A:
+ * Let's take a look at all the possible events that could be signaled by the
+ * TEMAC core.
+ *
+ * possible events:
+ * Transmit Complete (TxCmplt) [not handled by this driver]
+ * The TEMAC TxCmplt interrupt status is ignored by software in favor of
+ * paying attention to the transmit complete status in the connected DMA
+ * or FIFO core.
+ * Receive Fifo Overflow (RxFifoOver) [not handled by this driver]
+ * We have discovered that the overhead of an interrupt context switch
+ * to attempt to handle this sort of event actually worsens the
+ * condition, and cuases further dropped packets further increasing the
+ * time spent in this interrupt handler.
+ * Receive Frame Rejected (RxRject) [not handled by this driver]
+ * We could possibly handle this interrupt and gather statistics
+ * information based on these events that occur. However it is not that
+ * critical.
+ * Receive Complete (RxCmplt) [not handled by this driver]
+ * The TEMAC RxCmplt interrupt status is ignored by software in favor of
+ * paying attention to the receive complete status in the connected DMA
+ * or FIFO core.
+ * Autonegotiaion Complete (AutoNeg) [not handled by this driver]
+ * Autonegotiation on the TEMAC is a bit complicated, and is handled in
+ * a way that does not require the use of this interrupt event.
+ * Hard Temac Core Access Complete (HardAcsCmplt) [not handled by this driver]
+ * This event really just indicates if there are any events in the TIS
+ * register. As can be seen below, none of the events from the TIS
+ * register are handled, so there is no need to handle this event
+ * either.
+ * Configuration Write Complete (CfgWst) [not handled by this driver]
+ * Configuration Read Complete (CfgRst) [not handled by this driver]
+ * Address Filter Write Complete (AfWst) [not handled by this driver]
+ * Address Filter Read Complete (AfRst) [not handled by this driver]
+ * MII Management Write Complete (MiimWst) [not handled by this driver]
+ * MII Management Read Complete (MiimRst) [not handled by this driver]
+ * Fabric Read Complete (FabrRst) [not handled by this driver]
+ * All of the above registers indicate when access (read or write) to
+ * one or other of the Hard Temac Core registers is complete. Instead of
+ * relying on an interrupt context switch to be notified that the access
+ * is complete, this driver instead polls for the status, which, in most
+ * cases, should be faster.
+ */
+
+static int xenet_open(struct net_device *dev)
+{
+ struct net_local *lp;
+ u32 Options;
+ int irqval = 0;
+
+ /*
+ * Just to be safe, stop TX queue and the device first. If the device is
+ * already stopped, an error will be returned. In this case, we don't
+ * really care.
+ */
+ netif_stop_queue(dev);
+ lp = (struct net_local *) dev->priv;
+ _XLlTemac_Stop(&lp->Emac);
+
+ INIT_LIST_HEAD(&(lp->rcv));
+ INIT_LIST_HEAD(&(lp->xmit));
+
+ /* Set the MAC address each time opened. */
+ if (_XLlTemac_SetMacAddress(&lp->Emac, dev->dev_addr) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: XLlTemac: could not set MAC address.\n",
+ dev->name);
+ return -EIO;
+ }
+
+ /*
+ * If the device is not configured for polled mode, connect to the
+ * interrupt controller and enable interrupts. Currently, there
+ * isn't any code to set polled mode, so this check is probably
+ * superfluous.
+ */
+ Options = XLlTemac_GetOptions(&lp->Emac);
+ Options |= XTE_FLOW_CONTROL_OPTION;
+ Options |= XTE_JUMBO_OPTION;
+ Options |= XTE_TRANSMITTER_ENABLE_OPTION;
+ Options |= XTE_RECEIVER_ENABLE_OPTION;
+#if XTE_AUTOSTRIPPING
+ Options |= XTE_FCS_STRIP_OPTION;
+#endif
+
+ (int) _XLlTemac_SetOptions(&lp->Emac, Options);
+ (int) _XLlTemac_ClearOptions(&lp->Emac, ~Options);
+ Options = XLlTemac_GetOptions(&lp->Emac);
+ printk(KERN_INFO "%s: XLlTemac: Options: 0x%x\n", dev->name, Options);
+
+ /* Just use interrupt driven methods - no polled mode */
+
+ irqval = request_irq(dev->irq, &xenet_temac_interrupt, IRQF_DISABLED, dev->name, dev);
+ if (irqval) {
+ printk(KERN_ERR
+ "%s: XLlTemac: could not allocate interrupt %d.\n",
+ dev->name, dev->irq);
+ return irqval;
+ }
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ printk(KERN_INFO
+ "%s: XLlTemac: allocating interrupt %d for dma mode tx.\n",
+ dev->name, lp->dma_irq_s);
+ irqval = request_irq(lp->dma_irq_s,
+ &xenet_dma_tx_interrupt, 0, "xilinx_dma_tx_int", dev);
+ if (irqval) {
+ printk(KERN_ERR
+ "%s: XLlTemac: could not allocate interrupt %d.\n",
+ dev->name, lp->dma_irq_s);
+ return irqval;
+ }
+ printk(KERN_INFO
+ "%s: XLlTemac: allocating interrupt %d for dma mode rx.\n",
+ dev->name, lp->dma_irq_r);
+ irqval = request_irq(lp->dma_irq_r,
+ &xenet_dma_rx_interrupt, 0, "xilinx_dma_rx_int", dev);
+ if (irqval) {
+ printk(KERN_ERR
+ "%s: XLlTemac: could not allocate interrupt %d.\n",
+ dev->name, lp->dma_irq_r);
+ return irqval;
+ }
+ } else {
+ printk(KERN_INFO
+ "%s: XLlTemac: allocating interrupt %d for fifo mode.\n",
+ dev->name, lp->fifo_irq);
+ /* With the way interrupts are issued on the fifo core, this needs to be
+ * fast interrupt handler.
+ */
+ irqval = request_irq(lp->fifo_irq,
+ &xenet_fifo_interrupt, IRQF_DISABLED, "xilinx_fifo_int", dev);
+ if (irqval) {
+ printk(KERN_ERR
+ "%s: XLlTemac: could not allocate interrupt %d.\n",
+ dev->name, lp->fifo_irq);
+ return irqval;
+ }
+ }
+
+ /* give the system enough time to establish a link */
+ mdelay(2000);
+
+ phy_setup(lp);
+ set_mac_speed(lp);
+
+ /* Enable interrupts - no polled mode */
+ if (XLlTemac_IsFifo(&lp->Emac)) { /* fifo direct interrupt driver mode */
+ XLlFifo_IntEnable(&lp->Fifo, XLLF_INT_TC_MASK |
+ XLLF_INT_RC_MASK | XLLF_INT_RXERROR_MASK |
+ XLLF_INT_TXERROR_MASK);
+ } else { /* SG DMA mode */
+ XLlDma_mBdRingIntEnable(&lp->Dma.RxBdRing, dma_rx_int_mask);
+ XLlDma_mBdRingIntEnable(&lp->Dma.TxBdRing, dma_tx_int_mask);
+ }
+ /*
+ * Make sure all temac interrupts are disabled. These
+ * interrupts are not data flow releated.
+ */
+ XLlTemac_IntDisable(&lp->Emac, XTE_INT_ALL_MASK);
+
+ /* Start TEMAC device */
+ _XLlTemac_Start(&lp->Emac);
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ u32 threshold_s, timer_s, threshold_r, timer_r;
+
+ XLlDma_BdRingGetCoalesce(&lp->Dma.TxBdRing, &threshold_s, &timer_s);
+ XLlDma_BdRingGetCoalesce(&lp->Dma.RxBdRing, &threshold_r, &timer_r);
+ printk(KERN_INFO
+ "%s: XLlTemac: Send Threshold = %d, Receive Threshold = %d\n",
+ dev->name, threshold_s, threshold_r);
+ printk(KERN_INFO
+ "%s: XLlTemac: Send Wait bound = %d, Receive Wait bound = %d\n",
+ dev->name, timer_s, timer_r);
+ if (XLlDma_BdRingStart(&lp->Dma.TxBdRing) == XST_FAILURE) {
+ printk(KERN_ERR "%s: XLlTemac: could not start dma tx channel\n", dev->name);
+ return -EIO;
+ }
+ if (XLlDma_BdRingStart(&lp->Dma.RxBdRing) == XST_FAILURE) {
+ printk(KERN_ERR "%s: XLlTemac: could not start dma rx channel\n", dev->name);
+ return -EIO;
+ }
+ }
+
+ /* We're ready to go. */
+ netif_start_queue(dev);
+
+ /* Set up the PHY monitoring timer. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ lp->phy_timer.data = (unsigned long) dev;
+ lp->phy_timer.function = &poll_gmii;
+ init_timer(&lp->phy_timer);
+ add_timer(&lp->phy_timer);
+ return 0;
+}
+
+static int xenet_close(struct net_device *dev)
+{
+ struct net_local *lp;
+ unsigned long flags;
+
+ lp = (struct net_local *) dev->priv;
+
+ /* Shut down the PHY monitoring timer. */
+ del_timer_sync(&lp->phy_timer);
+
+ /* Stop Send queue */
+ netif_stop_queue(dev);
+
+ /* Now we could stop the device */
+ _XLlTemac_Stop(&lp->Emac);
+
+ /*
+ * Free the interrupt - not polled mode.
+ */
+ free_irq(dev->irq, dev);
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ free_irq(lp->dma_irq_s, dev);
+ free_irq(lp->dma_irq_r, dev);
+ } else {
+ free_irq(lp->fifo_irq, dev);
+ }
+
+ spin_lock_irqsave(&receivedQueueSpin, flags);
+ list_del(&(lp->rcv));
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+
+ spin_lock_irqsave(&sentQueueSpin, flags);
+ list_del(&(lp->xmit));
+ spin_unlock_irqrestore(&sentQueueSpin, flags);
+
+ return 0;
+}
+
+static struct net_device_stats *xenet_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ return &lp->stats;
+}
+
+static int xenet_change_mtu(struct net_device *dev, int new_mtu)
+{
+#ifdef CONFIG_XILINX_GIGE_VLAN
+ int head_size = XTE_HDR_VLAN_SIZE;
+#else
+ int head_size = XTE_HDR_SIZE;
+#endif
+ struct net_local *lp = (struct net_local *) dev->priv;
+ int max_frame = new_mtu + head_size + XTE_TRL_SIZE;
+ int min_frame = 1 + head_size + XTE_TRL_SIZE;
+
+ if ((max_frame < min_frame) || (max_frame > lp->max_frame_size))
+ return -EINVAL;
+
+ dev->mtu = new_mtu; /* change mtu in net_device structure */
+ return 0;
+}
+
+static int xenet_FifoSend(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp;
+ unsigned long flags, fifo_free_bytes;
+ int total_frags = skb_shinfo(skb)->nr_frags + 1;
+ unsigned int total_len;
+ skb_frag_t *frag;
+ int i;
+ void *virt_addr;
+
+ total_len = skb_headlen(skb);
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (i = 1; i < total_frags; i++, frag++) {
+ total_len += frag->size;
+ }
+
+ /* The following lock is used to protect TxVacancy, Write
+ * and TxSetLen sequence which could happen from FifoSendHandler
+ * or other processor in SMP case.
+ */
+ spin_lock_irqsave(&XTE_tx_spinlock, flags);
+ lp = (struct net_local *) dev->priv;
+
+ fifo_free_bytes = XLlFifo_TxVacancy(&lp->Fifo) * 4;
+ if (fifo_free_bytes < total_len) {
+ netif_stop_queue(dev); /* stop send queue */
+ lp->deferred_skb = skb; /* buffer the sk_buffer and will send
+ it in interrupt context */
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+ return 0;
+ }
+
+ /* Write frame data to FIFO */
+ XLlFifo_Write(&lp->Fifo, (void *) skb->data, skb_headlen(skb));
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (i = 1; i < total_frags; i++, frag++) {
+ virt_addr =
+ (void *) page_address(frag->page) + frag->page_offset;
+ XLlFifo_Write(&lp->Fifo, virt_addr, frag->size);
+ }
+
+ /* Initiate transmit */
+ XLlFifo_TxSetLen(&lp->Fifo, total_len);
+ lp->stats.tx_bytes += total_len;
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+
+ dev_kfree_skb(skb); /* free skb */
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+/* Callback function for completed frames sent in FIFO interrupt driven mode */
+static void FifoSendHandler(struct net_device *dev)
+{
+ struct net_local *lp;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_tx_spinlock, flags);
+ lp = (struct net_local *) dev->priv;
+ lp->stats.tx_packets++;
+
+ /*Send out the deferred skb and wake up send queue if a deferred skb exists */
+ if (lp->deferred_skb) {
+ int total_frags;
+ unsigned int total_len;
+ unsigned long fifo_free_bytes;
+ skb_frag_t *frag;
+ int i;
+ void *virt_addr;
+
+ skb = lp->deferred_skb;
+ total_frags = skb_shinfo(skb)->nr_frags + 1;
+ total_len = skb_headlen(skb);
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (i = 1; i < total_frags; i++, frag++) {
+ total_len += frag->size;
+ }
+
+ fifo_free_bytes = XLlFifo_TxVacancy(&lp->Fifo) * 4;
+ if (fifo_free_bytes < total_len) {
+ /* If still no room for the deferred packet, return */
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+ return;
+ }
+
+ /* Write frame data to FIFO */
+ XLlFifo_Write(&lp->Fifo, (void *) skb->data, skb_headlen(skb));
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (i = 1; i < total_frags; i++, frag++) {
+ virt_addr =
+ (void *) page_address(frag->page) + frag->page_offset;
+ XLlFifo_Write(&lp->Fifo, virt_addr, frag->size);
+ }
+
+ /* Initiate transmit */
+ XLlFifo_TxSetLen(&lp->Fifo, total_len);
+
+ dev_kfree_skb(skb); /* free skb */
+ lp->deferred_skb = NULL;
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += total_len;
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev); /* wake up send queue */
+ }
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+}
+
+#if 0
+/*
+ * These are used for debugging purposes, left here in case they are useful
+ * for further debugging
+ */
+static unsigned int _xenet_tx_csum(struct sk_buff *skb)
+{
+ unsigned int csum = 0;
+ long csstart = skb_transport_header(skb) - skb->data;
+
+ if (csstart != skb->len) {
+ csum = skb_checksum(skb, csstart, skb->len - csstart, 0);
+ }
+
+ return csum;
+}
+
+static inline unsigned int _xenet_rx_csum(struct sk_buff *skb)
+{
+ return skb_checksum(skb, 0, skb->len, 0);
+}
+#endif
+
+/*
+ * xenet_DmaSend_internal is an internal use, send routine.
+ * Any locks that need to be acquired, should be acquired
+ * prior to calling this routine.
+ */
+static int xenet_DmaSend_internal(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp;
+ XLlDma_Bd *bd_ptr;
+ int result;
+ int total_frags;
+ int i;
+ void *virt_addr;
+ size_t len;
+ dma_addr_t phy_addr;
+ XLlDma_Bd *first_bd_ptr;
+ XLlDma_Bd *last_bd_ptr;
+ skb_frag_t *frag;
+
+ lp = (struct net_local *) dev->priv;
+
+ /* get skb_shinfo(skb)->nr_frags + 1 buffer descriptors */
+ total_frags = skb_shinfo(skb)->nr_frags + 1;
+
+ /* stats */
+ if (lp->max_frags_in_a_packet < total_frags) {
+ lp->max_frags_in_a_packet = total_frags;
+ }
+
+ if (total_frags < XTE_SEND_BD_CNT) {
+ result = XLlDma_BdRingAlloc(&lp->Dma.TxBdRing, total_frags,
+ &bd_ptr);
+
+ if (result != XST_SUCCESS) {
+ netif_stop_queue(dev); /* stop send queue */
+ lp->deferred_skb = skb; /* buffer the sk_buffer and will send
+ it in interrupt context */
+ return result;
+ }
+ } else {
+ dev_kfree_skb(skb);
+ lp->stats.tx_dropped++;
+ printk(KERN_ERR
+ "%s: XLlTemac: could not send TX socket buffers (too many fragments).\n",
+ dev->name);
+ return XST_FAILURE;
+ }
+
+ len = skb_headlen(skb);
+
+ /* get the physical address of the header */
+ phy_addr = (u32) dma_map_single(NULL, skb->data, len, DMA_TO_DEVICE);
+
+ /* get the header fragment, it's in the skb differently */
+ XLlDma_mBdSetBufAddr(bd_ptr, phy_addr);
+ XLlDma_mBdSetLength(bd_ptr, len);
+ XLlDma_mBdSetId(bd_ptr, skb);
+
+ /*
+ * if tx checksum offloading is enabled, when the ethernet stack
+ * wants us to perform the checksum in hardware,
+ * skb->ip_summed is CHECKSUM_PARTIAL. Otherwise skb->ip_summed is
+ * CHECKSUM_NONE, meaning the checksum is already done, or
+ * CHECKSUM_UNNECESSARY, meaning checksumming is turned off (e.g.
+ * loopback interface)
+ *
+ * skb->csum is an overloaded value. On send, skb->csum is the offset
+ * into the buffer (skb_transport_header(skb)) to place the csum value.
+ * On receive this feild gets set to the actual csum value, before it's
+ * passed up the stack.
+ *
+ * When we get here, the ethernet stack above will have already
+ * computed the pseudoheader csum value and have placed it in the
+ * TCP/UDP header.
+ *
+ * The IP header csum has also already been computed and inserted.
+ *
+ * Since the IP header with it's own csum should compute to a null
+ * csum, it should be ok to include it in the hw csum. If it is decided
+ * to change this scheme, skb should be examined before dma_map_single()
+ * is called, which flushes the page from the cpu's cache.
+ *
+ * skb->data points to the beginning of the whole packet
+ * skb_transport_header(skb) points to the beginning of the ip header
+ *
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+
+ unsigned int csum_start_off = skb_transport_offset(skb);
+ unsigned int csum_index_off = csum_start_off + skb->csum_offset;
+
+#if 0
+ {
+ unsigned int csum = _xenet_tx_csum(skb);
+
+ *((unsigned short *) (raw + skb->csum)) =
+ csum_fold(csum);
+ BdCsumDisable(bd_ptr);
+ }
+#else
+ BdCsumEnable(bd_ptr);
+ BdCsumSetup(bd_ptr, csum_start_off, csum_index_off);
+
+#endif
+ lp->tx_hw_csums++;
+ }
+ else {
+ /*
+ * This routine will do no harm even if hardware checksum capability is
+ * off.
+ */
+ BdCsumDisable(bd_ptr);
+ }
+
+ first_bd_ptr = bd_ptr;
+ last_bd_ptr = bd_ptr;
+
+ frag = &skb_shinfo(skb)->frags[0];
+
+ for (i = 1; i < total_frags; i++, frag++) {
+ bd_ptr = XLlDma_mBdRingNext(&lp->Dma.TxBdRing, bd_ptr);
+ last_bd_ptr = bd_ptr;
+
+ virt_addr =
+ (void *) page_address(frag->page) + frag->page_offset;
+ phy_addr =
+ (u32) dma_map_single(NULL, virt_addr, frag->size,
+ DMA_TO_DEVICE);
+
+ XLlDma_mBdSetBufAddr(bd_ptr, phy_addr);
+ XLlDma_mBdSetLength(bd_ptr, frag->size);
+ XLlDma_mBdSetId(bd_ptr, NULL);
+ BdCsumDisable(bd_ptr);
+ XLlDma_mBdSetStsCtrl(bd_ptr, 0);
+ }
+
+ if (first_bd_ptr == last_bd_ptr) {
+ XLlDma_mBdSetStsCtrl(last_bd_ptr,
+ XLLDMA_BD_STSCTRL_SOP_MASK |
+ XLLDMA_BD_STSCTRL_EOP_MASK);
+ } else {
+ XLlDma_mBdSetStsCtrl(first_bd_ptr, XLLDMA_BD_STSCTRL_SOP_MASK);
+ XLlDma_mBdSetStsCtrl(last_bd_ptr, XLLDMA_BD_STSCTRL_EOP_MASK);
+ }
+
+
+ /* Enqueue to HW */
+ result = XLlDma_BdRingToHw(&lp->Dma.TxBdRing, total_frags,
+ first_bd_ptr);
+ if (result != XST_SUCCESS) {
+ netif_stop_queue(dev); /* stop send queue */
+ dev_kfree_skb(skb);
+ XLlDma_mBdSetId(first_bd_ptr, NULL);
+ lp->stats.tx_dropped++;
+ printk(KERN_ERR
+ "%s: XLlTemac: could not send commit TX buffer descriptor (%d).\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+
+ return XST_FAILURE;
+ }
+
+ dev->trans_start = jiffies;
+
+ return XST_SUCCESS;
+}
+
+/* The send function for frames sent in DMA mode */
+static int xenet_DmaSend(struct sk_buff *skb, struct net_device *dev)
+{
+ /* The following spin_lock protects
+ * SgAlloc, SgCommit sequence, which also exists in DmaSendHandlerBH Bottom
+ * Half, or triggered by other processor in SMP case.
+ */
+ spin_lock_bh(&XTE_tx_spinlock);
+
+ xenet_DmaSend_internal(skb, dev);
+
+ spin_unlock_bh(&XTE_tx_spinlock);
+
+ return 0;
+}
+
+
+static void DmaSendHandlerBH(unsigned long p)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ XLlDma_Bd *BdPtr, *BdCurPtr;
+ unsigned long len;
+ unsigned long flags;
+ struct sk_buff *skb;
+ dma_addr_t skb_dma_addr;
+ int result = XST_SUCCESS;
+ unsigned int bd_processed, bd_processed_save;
+
+ while (1) {
+ spin_lock_irqsave(&sentQueueSpin, flags);
+ if (list_empty(&sentQueue)) {
+ spin_unlock_irqrestore(&sentQueueSpin, flags);
+ break;
+ }
+
+ lp = list_entry(sentQueue.next, struct net_local, xmit);
+
+ list_del_init(&(lp->xmit));
+ spin_unlock_irqrestore(&sentQueueSpin, flags);
+
+ spin_lock_irqsave(&XTE_tx_spinlock, flags);
+ dev = lp->ndev;
+ bd_processed_save = 0;
+ while ((bd_processed =
+ XLlDma_BdRingFromHw(&lp->Dma.TxBdRing, XTE_SEND_BD_CNT,
+ &BdPtr)) > 0) {
+
+ bd_processed_save = bd_processed;
+ BdCurPtr = BdPtr;
+ do {
+ len = XLlDma_mBdGetLength(BdCurPtr);
+ skb_dma_addr = (dma_addr_t) XLlDma_mBdGetBufAddr(BdCurPtr);
+ dma_unmap_single(NULL, skb_dma_addr, len,
+ DMA_TO_DEVICE);
+
+ /* get ptr to skb */
+ skb = (struct sk_buff *)
+ XLlDma_mBdGetId(BdCurPtr);
+ if (skb)
+ dev_kfree_skb(skb);
+
+ /* reset BD id */
+ XLlDma_mBdSetId(BdCurPtr, NULL);
+
+ lp->stats.tx_bytes += len;
+ if (XLlDma_mBdGetStsCtrl(BdCurPtr) & XLLDMA_BD_STSCTRL_EOP_MASK) {
+ lp->stats.tx_packets++;
+ }
+
+ BdCurPtr = XLlDma_mBdRingNext(&lp->Dma.TxBdRing, BdCurPtr);
+ bd_processed--;
+ } while (bd_processed > 0);
+
+ result = XLlDma_BdRingFree(&lp->Dma.TxBdRing,
+ bd_processed_save, BdPtr);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XLlDma: BdRingFree() error %d.\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+ return;
+ }
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.TxBdRing, dma_tx_int_mask);
+
+ /* Send out the deferred skb if it exists */
+ if ((lp->deferred_skb) && bd_processed_save) {
+ skb = lp->deferred_skb;
+ lp->deferred_skb = NULL;
+
+ result = xenet_DmaSend_internal(skb, dev);
+ }
+
+ if (result == XST_SUCCESS) {
+ netif_wake_queue(dev); /* wake up send queue */
+ }
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+ }
+}
+
+static void xenet_tx_timeout(struct net_device *dev)
+{
+ struct net_local *lp;
+ unsigned long flags;
+
+ /*
+ * Make sure that no interrupts come in that could cause reentrancy
+ * problems in reset.
+ */
+ spin_lock_irqsave(&XTE_tx_spinlock, flags);
+
+ lp = (struct net_local *) dev->priv;
+ printk(KERN_ERR
+ "%s: XLlTemac: exceeded transmit timeout of %lu ms. Resetting emac.\n",
+ dev->name, TX_TIMEOUT * 1000UL / HZ);
+ lp->stats.tx_errors++;
+
+ reset(dev, __LINE__);
+
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+}
+
+/* The callback function for frames received when in FIFO mode. */
+static void FifoRecvHandler(unsigned long p)
+{
+ struct net_local *lp;
+ struct sk_buff *skb;
+ u32 len;
+
+ struct net_device *dev;
+ unsigned long flags;
+ spin_lock_irqsave(&receivedQueueSpin, flags);
+ if (list_empty(&receivedQueue)) {
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ return;
+ }
+ lp = list_entry(receivedQueue.next, struct net_local, rcv);
+
+ list_del_init(&(lp->rcv));
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ dev = lp->ndev;
+
+ while (XLlFifo_RxOccupancy(&lp->Fifo) != 0) {
+
+ len = XLlFifo_RxGetLen(&lp->Fifo);
+
+ /*
+ * TODO: Hm this is odd, if we can't allocate the skb, we throw away the next packet. Why?
+ */
+ if (!(skb = /*dev_ */ alloc_skb(len + ALIGNMENT_RECV, GFP_ATOMIC))) {
+#define XTE_RX_SINK_BUFFER_SIZE 1024
+ static u32 rx_buffer_sink[XTE_RX_SINK_BUFFER_SIZE / sizeof(u32)];
+
+ /* Couldn't get memory. */
+ lp->stats.rx_dropped++;
+ printk(KERN_ERR
+ "%s: XLlTemac: could not allocate receive buffer.\n",
+ dev->name);
+
+ /* consume data in Xilinx TEMAC RX data fifo so it is sync with RX length fifo */
+ for (; len > XTE_RX_SINK_BUFFER_SIZE;
+ len -= XTE_RX_SINK_BUFFER_SIZE) {
+ XLlFifo_Read(&lp->Fifo, rx_buffer_sink,
+ XTE_RX_SINK_BUFFER_SIZE);
+ }
+ XLlFifo_Read(&lp->Fifo, rx_buffer_sink, len);
+ break;
+ }
+
+ /* Read the packet data */
+ XLlFifo_Read(&lp->Fifo, skb->data, len);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+
+ skb_put(skb, len); /* Tell the skb how much data we got. */
+ skb->dev = dev; /* Fill out required meta-data. */
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+ netif_rx(skb); /* Send the packet upstream. */
+ }
+ XLlFifo_IntEnable(&lp->Fifo, XLLF_INT_TC_MASK | XLLF_INT_RC_MASK |
+ XLLF_INT_RXERROR_MASK | XLLF_INT_TXERROR_MASK);
+
+}
+
+
+/*
+ * _xenet_DmaSetupRecvBuffers allocates as many socket buffers (sk_buff's) as it
+ * can up to the number of free RX buffer descriptors. Then it sets up the RX
+ * buffer descriptors to DMA into the socket_buffers.
+ *
+ * The net_device, dev, indcates on which device to operate for buffer
+ * descriptor allocation.
+ */
+static void _xenet_DmaSetupRecvBuffers(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ int free_bd_count = XLlDma_mBdRingGetFreeCnt(&lp->Dma.RxBdRing);
+ int num_sk_buffs;
+ struct sk_buff_head sk_buff_list;
+ struct sk_buff *new_skb;
+ u32 new_skb_baddr;
+ XLlDma_Bd *BdPtr, *BdCurPtr;
+ u32 align;
+ int result;
+
+#if 0
+ int align_max = ALIGNMENT_RECV;
+#else
+ int align_max = 0;
+#endif
+
+
+ skb_queue_head_init(&sk_buff_list);
+ for (num_sk_buffs = 0; num_sk_buffs < free_bd_count; num_sk_buffs++) {
+ new_skb = alloc_skb(lp->max_frame_size + align_max, GFP_ATOMIC);
+ if (new_skb == NULL) {
+ break;
+ }
+ /*
+ * I think the XTE_spinlock, and Recv DMA int disabled will protect this
+ * list as well, so we can use the __ version just fine
+ */
+ __skb_queue_tail(&sk_buff_list, new_skb);
+ }
+ if (!num_sk_buffs) {
+ printk(KERN_ERR "%s: XLlTemac: alloc_skb unsuccessful\n",
+ dev->name);
+ return;
+ }
+
+ /* now we got a bunch o' sk_buffs */
+ result = XLlDma_BdRingAlloc(&lp->Dma.RxBdRing, num_sk_buffs, &BdPtr);
+ if (result != XST_SUCCESS) {
+ /* we really shouldn't get this */
+ skb_queue_purge(&sk_buff_list);
+ printk(KERN_ERR "%s: XLlDma: BdRingAlloc unsuccessful (%d)\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+ return;
+ }
+
+ BdCurPtr = BdPtr;
+
+ new_skb = skb_dequeue(&sk_buff_list);
+ while (new_skb) {
+ /* make sure we're long-word aligned */
+ align = BUFFER_ALIGNRECV(new_skb->data);
+ if (align) {
+ skb_reserve(new_skb, align);
+ }
+
+ /* Get dma handle of skb->data */
+ new_skb_baddr = (u32) dma_map_single(NULL, new_skb->data,
+ lp->max_frame_size,
+ DMA_FROM_DEVICE);
+
+ XLlDma_mBdSetBufAddr(BdCurPtr, new_skb_baddr);
+ XLlDma_mBdSetLength(BdCurPtr, lp->max_frame_size);
+ XLlDma_mBdSetId(BdCurPtr, new_skb);
+ XLlDma_mBdSetStsCtrl(BdCurPtr,
+ XLLDMA_BD_STSCTRL_SOP_MASK |
+ XLLDMA_BD_STSCTRL_EOP_MASK);
+
+ BdCurPtr = XLlDma_mBdRingNext(&lp->Dma.RxBdRing, BdCurPtr);
+
+ new_skb = skb_dequeue(&sk_buff_list);
+ }
+
+ /* enqueue RxBD with the attached skb buffers such that it is
+ * ready for frame reception */
+ result = XLlDma_BdRingToHw(&lp->Dma.RxBdRing, num_sk_buffs, BdPtr);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XLlDma: (DmaSetupRecvBuffers) BdRingToHw unsuccessful (%d)\n",
+ dev->name, result);
+ skb_queue_purge(&sk_buff_list);
+ BdCurPtr = BdPtr;
+ while (num_sk_buffs > 0) {
+ XLlDma_mBdSetId(BdCurPtr, NULL);
+ BdCurPtr = XLlDma_mBdRingNext(&lp->Dma.RxBdRing,
+ BdCurPtr);
+ num_sk_buffs--;
+ }
+ reset(dev, __LINE__);
+ return;
+ }
+}
+
+static void DmaRecvHandlerBH(unsigned long p)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ struct sk_buff *skb;
+ u32 len, skb_baddr;
+ int result;
+ unsigned long flags;
+ XLlDma_Bd *BdPtr, *BdCurPtr;
+ unsigned int bd_processed, bd_processed_saved;
+
+ while (1) {
+ spin_lock_irqsave(&receivedQueueSpin, flags);
+ if (list_empty(&receivedQueue)) {
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ break;
+ }
+ lp = list_entry(receivedQueue.next, struct net_local, rcv);
+
+ list_del_init(&(lp->rcv));
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ dev = lp->ndev;
+
+ spin_lock_irqsave(&XTE_rx_spinlock, flags);
+ if ((bd_processed =
+ XLlDma_BdRingFromHw(&lp->Dma.RxBdRing, XTE_RECV_BD_CNT, &BdPtr)) > 0) {
+
+ bd_processed_saved = bd_processed;
+ BdCurPtr = BdPtr;
+ do {
+ /*
+ * Regular length field not updated on rx,
+ * USR4 updated instead.
+ */
+ len = BdGetRxLen(BdCurPtr);
+
+ /* get ptr to skb */
+ skb = (struct sk_buff *)
+ XLlDma_mBdGetId(BdCurPtr);
+
+ /* get and free up dma handle used by skb->data */
+ skb_baddr = (dma_addr_t) XLlDma_mBdGetBufAddr(BdCurPtr);
+ dma_unmap_single(NULL, skb_baddr,
+ lp->max_frame_size,
+ DMA_FROM_DEVICE);
+
+ /* reset ID */
+ XLlDma_mBdSetId(BdCurPtr, NULL);
+
+ /* setup received skb and send it upstream */
+ skb_put(skb, len); /* Tell the skb how much data we got. */
+ skb->dev = dev;
+
+ /* this routine adjusts skb->data to skip the header */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* default the ip_summed value */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* if we're doing rx csum offload, set it up */
+ if (((lp->local_features & LOCAL_FEATURE_RX_CSUM) != 0) &&
+ (skb->protocol == __constant_htons(ETH_P_IP)) &&
+ (skb->len > 64)) {
+ unsigned int csum;
+
+ /*
+ * This hardware only supports proper checksum calculations
+ * on TCP/UDP packets.
+ *
+ * skb->csum is an overloaded value. On send, skb->csum is
+ * the offset into the buffer (skb_transport_header(skb))
+ * to place the csum value. On receive this feild gets set
+ * to the actual csum value, before it's passed up the stack.
+ *
+ * If we set skb->ip_summed to CHECKSUM_COMPLETE, the ethernet
+ * stack above will compute the pseudoheader csum value and
+ * add it to the partial checksum already computed (to be
+ * placed in skb->csum) and verify it.
+ *
+ * Setting skb->ip_summed to CHECKSUM_NONE means that the
+ * cheksum didn't verify and the stack will (re)check it.
+ *
+ * Setting skb->ip_summed to CHECKSUM_UNNECESSARY means
+ * that the cheksum was verified/assumed to be good and the
+ * stack does not need to (re)check it.
+ *
+ * The ethernet stack above will (re)compute the checksum
+ * under the following conditions:
+ * 1) skb->ip_summed was set to CHECKSUM_NONE
+ * 2) skb->len does not match the length of the ethernet
+ * packet determined by parsing the packet. In this case
+ * the ethernet stack will assume any prior checksum
+ * value was miscomputed and throw it away.
+ * 3) skb->ip_summed was set to CHECKSUM_COMPLETE, skb->csum was
+ * set, but the result does not check out ok by the
+ * ethernet stack.
+ *
+ * If the TEMAC hardware stripping feature is off, each
+ * packet will contain an FCS feild which will have been
+ * computed by the hardware checksum operation. This 4 byte
+ * FCS value needs to be subtracted back out of the checksum
+ * value computed by hardware as it's not included in a
+ * normal ethernet packet checksum.
+ *
+ * The minimum transfer packet size over the wire is 64
+ * bytes. If the packet is sent as exactly 64 bytes, then
+ * it probably contains some random padding bytes. It's
+ * somewhat difficult to determine the actual length of the
+ * real packet data, so we just let the stack recheck the
+ * checksum for us.
+ *
+ * After the call to eth_type_trans(), the following holds
+ * true:
+ * skb->data points to the beginning of the ip header
+ */
+ csum = BdCsumGet(BdCurPtr);
+
+#if ! XTE_AUTOSTRIPPING
+ if (!lp->stripping) {
+ /* take off the FCS */
+ u16 *data;
+
+ /* FCS is 4 bytes */
+ skb_put(skb, -4);
+
+ data = (u16 *) (&skb->
+ data[skb->len]);
+
+ /* subtract out the FCS from the csum value */
+ csum = csum_sub(csum, *data /* & 0xffff */);
+ data++;
+ csum = csum_sub(csum, *data /* & 0xffff */);
+ }
+#endif
+ skb->csum = csum;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+
+ lp->rx_hw_csums++;
+ }
+
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+ netif_rx(skb); /* Send the packet upstream. */
+
+ BdCurPtr =
+ XLlDma_mBdRingNext(&lp->Dma.RxBdRing,
+ BdCurPtr);
+ bd_processed--;
+ } while (bd_processed > 0);
+
+ /* give the descriptor back to the driver */
+ result = XLlDma_BdRingFree(&lp->Dma.RxBdRing,
+ bd_processed_saved, BdPtr);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XLlDma: BdRingFree unsuccessful (%d)\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+ spin_unlock_irqrestore(&XTE_rx_spinlock, flags);
+ return;
+ }
+
+ _xenet_DmaSetupRecvBuffers(dev);
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.RxBdRing, dma_rx_int_mask);
+ spin_unlock_irqrestore(&XTE_rx_spinlock, flags);
+ }
+}
+
+static int descriptor_init(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ int recvsize, sendsize;
+ int dftsize;
+ u32 *recvpoolptr, *sendpoolptr;
+ void *recvpoolphy, *sendpoolphy;
+ int result;
+
+/*
+ * Buffer Descriptr
+ * word byte description
+ * 0 0h next ptr
+ * 1 4h buffer addr
+ * 2 8h buffer len
+ * 3 ch sts/ctrl | app data (0) [tx csum enable (bit 31 LSB)]
+ * 4 10h app data (1) [tx csum begin (bits 0-15 MSB) | csum insert (bits 16-31 LSB)]
+ * 5 14h app data (2) [tx csum seed (bits 16-31 LSB)]
+ * 6 18h app data (3) [rx raw csum (bits 16-31 LSB)]
+ * 7 1ch app data (4) [rx recv length (bits 18-31 LSB)]
+ */
+#if 0
+ int XferType = XDMAV3_DMACR_TYPE_BFBURST_MASK;
+ int XferWidth = XDMAV3_DMACR_DSIZE_64_MASK;
+#endif
+
+ /* calc size of descriptor space pool; alloc from non-cached memory */
+ dftsize = XLlDma_mBdRingMemCalc(ALIGNMENT_BD,
+ XTE_RECV_BD_CNT + XTE_SEND_BD_CNT);
+ printk(KERN_INFO "XLlTemac: buffer descriptor size: %d (0x%0x)\n",
+ dftsize, dftsize);
+
+#if BD_IN_BRAM == 0
+ /*
+ * Allow buffer descriptors to be cached.
+ * Old method w/cache on buffer descriptors disabled:
+ * lp->desc_space = dma_alloc_coherent(NULL, dftsize,
+ * &lp->desc_space_handle, GFP_KERNEL);
+ * (note if going back to dma_alloc_coherent() the CACHE macros in
+ * xenv_linux.h need to be disabled.
+ */
+
+ printk(KERN_INFO "XLlTemac: Allocating DMA descriptors with kmalloc");
+ lp->desc_space = kmalloc(dftsize, GFP_KERNEL);
+ lp->desc_space_handle = (dma_addr_t) page_to_phys(virt_to_page(lp->desc_space));
+#else
+ printk(KERN_INFO "XLlTemac: Allocating DMA descriptors in Block Ram");
+ lp->desc_space_handle = BRAM_BASEADDR;
+ lp->desc_space = ioremap(lp->desc_space_handle, dftsize);
+#endif
+ if (lp->desc_space == 0) {
+ return -1;
+ }
+
+ lp->desc_space_size = dftsize;
+
+ printk(KERN_INFO
+ "XLlTemac: (buffer_descriptor_init) phy: 0x%x, virt: 0x%x, size: 0x%x\n",
+ lp->desc_space_handle, (unsigned int) lp->desc_space,
+ lp->desc_space_size);
+
+ /* calc size of send and recv descriptor space */
+ recvsize = XLlDma_mBdRingMemCalc(ALIGNMENT_BD, XTE_RECV_BD_CNT);
+ sendsize = XLlDma_mBdRingMemCalc(ALIGNMENT_BD, XTE_SEND_BD_CNT);
+
+ recvpoolptr = lp->desc_space;
+ sendpoolptr = (void *) ((u32) lp->desc_space + recvsize);
+
+ recvpoolphy = (void *) lp->desc_space_handle;
+ sendpoolphy = (void *) ((u32) lp->desc_space_handle + recvsize);
+
+ result = XLlDma_BdRingCreate(&lp->Dma.RxBdRing, (u32) recvpoolphy,
+ (u32) recvpoolptr, ALIGNMENT_BD,
+ XTE_RECV_BD_CNT);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR "XLlTemac: DMA Ring Create (RECV). Error: %d\n", result);
+ return -EIO;
+ }
+
+ result = XLlDma_BdRingCreate(&lp->Dma.TxBdRing, (u32) sendpoolphy,
+ (u32) sendpoolptr, ALIGNMENT_BD,
+ XTE_SEND_BD_CNT);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR "XLlTemac: DMA Ring Create (SEND). Error: %d\n", result);
+ return -EIO;
+ }
+
+ _xenet_DmaSetupRecvBuffers(dev);
+ return 0;
+}
+
+static void free_descriptor_skb(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ XLlDma_Bd *BdPtr;
+ struct sk_buff *skb;
+ dma_addr_t skb_dma_addr;
+ u32 len, i;
+
+ /* Unmap and free skb's allocated and mapped in descriptor_init() */
+
+ /* Get the virtual address of the 1st BD in the DMA RX BD ring */
+ BdPtr = (XLlDma_Bd *) lp->Dma.RxBdRing.FirstBdAddr;
+
+ for (i = 0; i < XTE_RECV_BD_CNT; i++) {
+ skb = (struct sk_buff *) XLlDma_mBdGetId(BdPtr);
+ if (skb) {
+ skb_dma_addr = (dma_addr_t) XLlDma_mBdGetBufAddr(BdPtr);
+ dma_unmap_single(NULL, skb_dma_addr, lp->max_frame_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+ /* find the next BD in the DMA RX BD ring */
+ BdPtr = XLlDma_mBdRingNext(&lp->Dma.RxBdRing, BdPtr);
+ }
+
+ /* Unmap and free TX skb's that have not had a chance to be freed
+ * in DmaSendHandlerBH(). This could happen when TX Threshold is larger
+ * than 1 and TX waitbound is 0
+ */
+
+ /* Get the virtual address of the 1st BD in the DMA TX BD ring */
+ BdPtr = (XLlDma_Bd *) lp->Dma.TxBdRing.FirstBdAddr;
+
+ for (i = 0; i < XTE_SEND_BD_CNT; i++) {
+ skb = (struct sk_buff *) XLlDma_mBdGetId(BdPtr);
+ if (skb) {
+ skb_dma_addr = (dma_addr_t) XLlDma_mBdGetBufAddr(BdPtr);
+ len = XLlDma_mBdGetLength(BdPtr);
+ dma_unmap_single(NULL, skb_dma_addr, len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ }
+ /* find the next BD in the DMA TX BD ring */
+ BdPtr = XLlDma_mBdRingNext(&lp->Dma.TxBdRing, BdPtr);
+ }
+
+#if BD_IN_BRAM == 0
+ dma_free_coherent(NULL,
+ lp->desc_space_size,
+ lp->desc_space, lp->desc_space_handle);
+#else
+ iounmap(lp->desc_space);
+#endif
+}
+
+static int
+xenet_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u32 mac_options;
+ u32 threshold, timer;
+ u16 gmii_cmd, gmii_status, gmii_advControl;
+
+ memset(ecmd, 0, sizeof(struct ethtool_cmd));
+
+ mac_options = XLlTemac_GetOptions(&(lp->Emac));
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMCR, &gmii_cmd);
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, &gmii_status);
+
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_ADVERTISE, &gmii_advControl);
+
+ ecmd->duplex = DUPLEX_FULL;
+
+ ecmd->supported |= SUPPORTED_MII;
+
+ ecmd->port = PORT_MII;
+
+ ecmd->speed = lp->cur_speed;
+
+ if (gmii_status & BMSR_ANEGCAPABLE) {
+ ecmd->supported |= SUPPORTED_Autoneg;
+ }
+ if (gmii_status & BMSR_ANEGCOMPLETE) {
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ }
+ else {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ }
+ ecmd->phy_address = lp->Emac.Config.BaseAddress;
+ ecmd->transceiver = XCVR_INTERNAL;
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ /* get TX threshold */
+
+ XLlDma_BdRingGetCoalesce(&lp->Dma.TxBdRing, &threshold, &timer);
+ ecmd->maxtxpkt = threshold;
+
+ /* get RX threshold */
+ XLlDma_BdRingGetCoalesce(&lp->Dma.RxBdRing, &threshold, &timer);
+ ecmd->maxrxpkt = threshold;
+ }
+
+ ecmd->supported |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg;
+
+ return 0;
+}
+
+static int
+xenet_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ if ((ecmd->duplex != DUPLEX_FULL) ||
+ (ecmd->transceiver != XCVR_INTERNAL) ||
+ (ecmd->phy_address &&
+ (ecmd->phy_address != lp->Emac.Config.BaseAddress))) {
+ return -EOPNOTSUPP;
+ }
+
+ if ((ecmd->speed != 1000) && (ecmd->speed != 100) &&
+ (ecmd->speed != 10)) {
+ printk(KERN_ERR
+ "%s: XLlTemac: xenet_ethtool_set_settings speed not supported: %d\n",
+ dev->name, ecmd->speed);
+ return -EOPNOTSUPP;
+ }
+
+ if (ecmd->speed != lp->cur_speed) {
+ renegotiate_speed(dev, ecmd->speed, FULL_DUPLEX);
+ _XLlTemac_SetOperatingSpeed(&lp->Emac, ecmd->speed);
+ lp->cur_speed = ecmd->speed;
+ }
+ return 0;
+}
+
+static int
+xenet_ethtool_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u32 threshold, waitbound;
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+
+ XLlDma_BdRingGetCoalesce(&lp->Dma.RxBdRing, &threshold, &waitbound);
+ ec->rx_max_coalesced_frames = threshold;
+ ec->rx_coalesce_usecs = waitbound;
+
+ XLlDma_BdRingGetCoalesce(&lp->Dma.TxBdRing, &threshold, &waitbound);
+ ec->tx_max_coalesced_frames = threshold;
+ ec->tx_coalesce_usecs = waitbound;
+
+ return 0;
+}
+
+void disp_bd_ring(XLlDma_BdRing *bd_ring)
+{
+ int num_bds = bd_ring->AllCnt;
+ u32 *cur_bd_ptr = (u32 *) bd_ring->FirstBdAddr;
+ int idx;
+
+ printk("ChanBase: %p\n", (void *) bd_ring->ChanBase);
+ printk("FirstBdPhysAddr: %p\n", (void *) bd_ring->FirstBdPhysAddr);
+ printk("FirstBdAddr: %p\n", (void *) bd_ring->FirstBdAddr);
+ printk("LastBdAddr: %p\n", (void *) bd_ring->LastBdAddr);
+ printk("Length: %d (0x%0x)\n", bd_ring->Length, bd_ring->Length);
+ printk("RunState: %d (0x%0x)\n", bd_ring->RunState, bd_ring->RunState);
+ printk("Separation: %d (0x%0x)\n", bd_ring->Separation,
+ bd_ring->Separation);
+ printk("BD Count: %d\n", bd_ring->AllCnt);
+
+ printk("\n");
+
+ printk("FreeHead: %p\n", (void *) bd_ring->FreeHead);
+ printk("PreHead: %p\n", (void *) bd_ring->PreHead);
+ printk("HwHead: %p\n", (void *) bd_ring->HwHead);
+ printk("HwTail: %p\n", (void *) bd_ring->HwTail);
+ printk("PostHead: %p\n", (void *) bd_ring->PostHead);
+ printk("BdaRestart: %p\n", (void *) bd_ring->BdaRestart);
+
+ printk("Ring Contents:\n");
+/*
+ * Buffer Descriptr
+ * word byte description
+ * 0 0h next ptr
+ * 1 4h buffer addr
+ * 2 8h buffer len
+ * 3 ch sts/ctrl | app data (0) [tx csum enable (bit 31 LSB)]
+ * 4 10h app data (1) [tx csum begin (bits 0-15 MSB) | csum insert (bits 16-31 LSB)]
+ * 5 14h app data (2) [tx csum seed (bits 16-31 LSB)]
+ * 6 18h app data (3) [rx raw csum (bits 16-31 LSB)]
+ * 7 1ch app data (4) [rx recv length (bits 18-31 LSB)]
+ * 8 20h sw app data (0) [id]
+ */
+ printk("Idx NextBD BuffAddr Length CTL/CSE CSUM B/I CSUMSeed Raw CSUM RecvLen ID\n");
+ printk("--- -------- -------- -------- -------- -------- -------- -------- -------- --------\n");
+
+ for (idx = 0; idx < num_bds; idx++) {
+ printk("%3d %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ idx,
+ cur_bd_ptr[XLLDMA_BD_NDESC_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_BUFA_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_BUFL_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_STSCTRL_USR0_OFFSET /
+ sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_USR1_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_USR2_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_USR3_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_USR4_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XLLDMA_BD_ID_OFFSET / sizeof(*cur_bd_ptr)]);
+
+ cur_bd_ptr += bd_ring->Separation / sizeof(int);
+ }
+ printk("--------------------------------------- Done ---------------------------------------\n");
+}
+
+static int
+xenet_ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ int ret;
+ struct net_local *lp;
+
+ lp = (struct net_local *) dev->priv;
+
+ if (ec->rx_coalesce_usecs == 0) {
+ ec->rx_coalesce_usecs = 1;
+ dma_rx_int_mask = XLLDMA_CR_IRQ_ALL_EN_MASK & ~XLLDMA_CR_IRQ_DELAY_EN_MASK;
+ }
+ if ((ret = XLlDma_BdRingSetCoalesce(&lp->Dma.RxBdRing,
+ (u16) (ec->rx_max_coalesced_frames),
+ (u16) (ec->rx_coalesce_usecs))) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: XLlDma: BdRingSetCoalesce error %d\n",
+ dev->name, ret);
+ return -EIO;
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.RxBdRing, dma_rx_int_mask);
+
+ if (ec->tx_coalesce_usecs == 0) {
+ ec->tx_coalesce_usecs = 1;
+ dma_tx_int_mask = XLLDMA_CR_IRQ_ALL_EN_MASK & ~XLLDMA_CR_IRQ_DELAY_EN_MASK;
+ }
+ if ((ret = XLlDma_BdRingSetCoalesce(&lp->Dma.TxBdRing,
+ (u16) (ec->tx_max_coalesced_frames),
+ (u16) (ec->tx_coalesce_usecs))) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: XLlDma: BdRingSetCoalesce error %d\n",
+ dev->name, ret);
+ return -EIO;
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.TxBdRing, dma_tx_int_mask);
+
+ return 0;
+}
+
+static int
+xenet_ethtool_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *erp)
+{
+ memset(erp, 0, sizeof(struct ethtool_ringparam));
+
+ erp->rx_max_pending = XTE_RECV_BD_CNT;
+ erp->tx_max_pending = XTE_SEND_BD_CNT;
+ erp->rx_pending = XTE_RECV_BD_CNT;
+ erp->tx_pending = XTE_SEND_BD_CNT;
+ return 0;
+}
+
+#define EMAC_REGS_N 32
+struct mac_regsDump {
+ struct ethtool_regs hd;
+ u16 data[EMAC_REGS_N];
+};
+
+static void
+xenet_ethtool_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ret)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct mac_regsDump *dump = (struct mac_regsDump *) regs;
+ int i;
+
+ dump->hd.version = 0;
+ dump->hd.len = sizeof(dump->data);
+ memset(dump->data, 0, sizeof(dump->data));
+
+ for (i = 0; i < EMAC_REGS_N; i++) {
+ _XLlTemac_PhyRead(&lp->Emac, lp->gmii_addr, i, &(dump->data[i]));
+ }
+
+ *(int *) ret = 0;
+}
+
+static int
+xenet_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *ed)
+{
+ memset(ed, 0, sizeof(struct ethtool_drvinfo));
+ strncpy(ed->driver, DRIVER_NAME, sizeof(ed->driver) - 1);
+ strncpy(ed->version, DRIVER_VERSION, sizeof(ed->version) - 1);
+ /* Also tell how much memory is needed for dumping register values */
+ ed->regdump_len = sizeof(u16) * EMAC_REGS_N;
+ return 0;
+}
+
+static int xenet_do_ethtool_ioctl(struct net_device *dev, struct ifreq *rq)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct ethtool_cmd ecmd;
+ struct ethtool_coalesce eco;
+ struct ethtool_drvinfo edrv;
+ struct ethtool_ringparam erp;
+ struct ethtool_pauseparam epp;
+ struct mac_regsDump regs;
+ int ret = -EOPNOTSUPP;
+ u32 Options;
+
+ if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd)))
+ return -EFAULT;
+ switch (ecmd.cmd) {
+ case ETHTOOL_GSET: /* Get setting. No command option needed w/ ethtool */
+ ret = xenet_ethtool_get_settings(dev, &ecmd);
+ if (ret < 0)
+ return -EIO;
+ if (copy_to_user(rq->ifr_data, &ecmd, sizeof(ecmd)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ case ETHTOOL_SSET: /* Change setting. Use "-s" command option w/ ethtool */
+ ret = xenet_ethtool_set_settings(dev, &ecmd);
+ break;
+ case ETHTOOL_GPAUSEPARAM: /* Get pause parameter information. Use "-a" w/ ethtool */
+ ret = xenet_ethtool_get_settings(dev, &ecmd);
+ if (ret < 0)
+ return ret;
+ epp.cmd = ecmd.cmd;
+ epp.autoneg = ecmd.autoneg;
+ Options = XLlTemac_GetOptions(&lp->Emac);
+ if (Options & XTE_FCS_INSERT_OPTION) {
+ epp.rx_pause = 1;
+ epp.tx_pause = 1;
+ }
+ else {
+ epp.rx_pause = 0;
+ epp.tx_pause = 0;
+ }
+ if (copy_to_user
+ (rq->ifr_data, &epp, sizeof(struct ethtool_pauseparam)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ case ETHTOOL_SPAUSEPARAM: /* Set pause parameter. Use "-A" w/ ethtool */
+ return -EOPNOTSUPP; /* TODO: To support in next version */
+ case ETHTOOL_GRXCSUM:{ /* Get rx csum offload info. Use "-k" w/ ethtool */
+ struct ethtool_value edata = { ETHTOOL_GRXCSUM };
+
+ edata.data =
+ (lp->local_features & LOCAL_FEATURE_RX_CSUM) !=
+ 0;
+ if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_SRXCSUM:{ /* Set rx csum offload info. Use "-K" w/ ethtool */
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, rq->ifr_data, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data) {
+ if (XLlTemac_IsRxCsum(&lp->Emac) == TRUE) {
+ lp->local_features |=
+ LOCAL_FEATURE_RX_CSUM;
+ }
+ }
+ else {
+ lp->local_features &= ~LOCAL_FEATURE_RX_CSUM;
+ }
+
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_GTXCSUM:{ /* Get tx csum offload info. Use "-k" w/ ethtool */
+ struct ethtool_value edata = { ETHTOOL_GTXCSUM };
+
+ edata.data = (dev->features & NETIF_F_IP_CSUM) != 0;
+ if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_STXCSUM:{ /* Set tx csum offload info. Use "-K" w/ ethtool */
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, rq->ifr_data, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data) {
+ if (XLlTemac_IsTxCsum(&lp->Emac) == TRUE) {
+ dev->features |= NETIF_F_IP_CSUM;
+ }
+ }
+ else {
+ dev->features &= ~NETIF_F_IP_CSUM;
+ }
+
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_GSG:{ /* Get ScatterGather info. Use "-k" w/ ethtool */
+ struct ethtool_value edata = { ETHTOOL_GSG };
+
+ edata.data = (dev->features & NETIF_F_SG) != 0;
+ if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_SSG:{ /* Set ScatterGather info. Use "-K" w/ ethtool */
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, rq->ifr_data, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data) {
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ dev->features |=
+ NETIF_F_SG | NETIF_F_FRAGLIST;
+ }
+ }
+ else {
+ dev->features &=
+ ~(NETIF_F_SG | NETIF_F_FRAGLIST);
+ }
+
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_GCOALESCE: /* Get coalescing info. Use "-c" w/ ethtool */
+ if (!(XLlTemac_IsDma(&lp->Emac)))
+ break;
+ eco.cmd = ecmd.cmd;
+ ret = xenet_ethtool_get_coalesce(dev, &eco);
+ if (ret < 0) {
+ return -EIO;
+ }
+ if (copy_to_user
+ (rq->ifr_data, &eco, sizeof(struct ethtool_coalesce))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_SCOALESCE: /* Set coalescing info. Use "-C" w/ ethtool */
+ if (!(XLlTemac_IsDma(&lp->Emac)))
+ break;
+ if (copy_from_user
+ (&eco, rq->ifr_data, sizeof(struct ethtool_coalesce)))
+ return -EFAULT;
+ ret = xenet_ethtool_set_coalesce(dev, &eco);
+ break;
+ case ETHTOOL_GDRVINFO: /* Get driver information. Use "-i" w/ ethtool */
+ edrv.cmd = edrv.cmd;
+ ret = xenet_ethtool_get_drvinfo(dev, &edrv);
+ if (ret < 0) {
+ return -EIO;
+ }
+ edrv.n_stats = XENET_STATS_LEN;
+ if (copy_to_user
+ (rq->ifr_data, &edrv, sizeof(struct ethtool_drvinfo))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_GREGS: /* Get register values. Use "-d" with ethtool */
+ regs.hd.cmd = edrv.cmd;
+ xenet_ethtool_get_regs(dev, &(regs.hd), &ret);
+ if (ret < 0) {
+ return ret;
+ }
+ if (copy_to_user
+ (rq->ifr_data, ®s, sizeof(struct mac_regsDump))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_GRINGPARAM: /* Get RX/TX ring parameters. Use "-g" w/ ethtool */
+ erp.cmd = edrv.cmd;
+ ret = xenet_ethtool_get_ringparam(dev, &(erp));
+ if (ret < 0) {
+ return ret;
+ }
+ if (copy_to_user
+ (rq->ifr_data, &erp, sizeof(struct ethtool_ringparam))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_NWAY_RST: /* Restart auto negotiation if enabled. Use "-r" w/ ethtool */
+ return -EOPNOTSUPP; /* TODO: To support in next version */
+ case ETHTOOL_GSTRINGS:{
+ struct ethtool_gstrings gstrings = { ETHTOOL_GSTRINGS };
+ void *addr = rq->ifr_data;
+ char *strings = NULL;
+
+ if (copy_from_user(&gstrings, addr, sizeof(gstrings))) {
+ return -EFAULT;
+ }
+ switch (gstrings.string_set) {
+ case ETH_SS_STATS:
+ gstrings.len = XENET_STATS_LEN;
+ strings = *xenet_ethtool_gstrings_stats;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (copy_to_user(addr, &gstrings, sizeof(gstrings))) {
+ return -EFAULT;
+ }
+ addr += offsetof(struct ethtool_gstrings, data);
+ if (copy_to_user
+ (addr, strings, gstrings.len * ETH_GSTRING_LEN)) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_GSTATS:{
+ struct {
+ struct ethtool_stats cmd;
+ uint64_t data[XENET_STATS_LEN];
+ } stats = { {
+ ETHTOOL_GSTATS, XENET_STATS_LEN}};
+
+ stats.data[0] = lp->stats.tx_packets;
+ stats.data[1] = lp->stats.tx_dropped;
+ stats.data[2] = lp->stats.tx_errors;
+ stats.data[3] = lp->stats.tx_fifo_errors;
+ stats.data[4] = lp->stats.rx_packets;
+ stats.data[5] = lp->stats.rx_dropped;
+ stats.data[6] = lp->stats.rx_errors;
+ stats.data[7] = lp->stats.rx_fifo_errors;
+ stats.data[8] = lp->stats.rx_crc_errors;
+ stats.data[9] = lp->max_frags_in_a_packet;
+ stats.data[10] = lp->tx_hw_csums;
+ stats.data[11] = lp->rx_hw_csums;
+
+ if (copy_to_user(rq->ifr_data, &stats, sizeof(stats))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ }
+ default:
+ return -EOPNOTSUPP; /* All other operations not supported */
+ }
+ return ret;
+}
+
+static int xenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ /* gmii_ioctl_data has 4 u16 fields: phy_id, reg_num, val_in & val_out */
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *) &rq->ifr_data;
+ struct {
+ __u16 threshold;
+ __u32 direction;
+ } thr_arg;
+ struct {
+ __u16 waitbound;
+ __u32 direction;
+ } wbnd_arg;
+
+ int ret;
+ u32 threshold, timer;
+ XLlDma_BdRing *RingPtr;
+ u32 *dma_int_mask_ptr;
+
+ switch (cmd) {
+ case SIOCETHTOOL:
+ return xenet_do_ethtool_ioctl(dev, rq);
+ case SIOCGMIIPHY: /* Get address of GMII PHY in use. */
+ case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
+ data->phy_id = lp->gmii_addr;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read GMII PHY register. */
+ case SIOCDEVPRIVATE + 1: /* for binary compat, remove in 2.5 */
+ if (data->phy_id > 31 || data->reg_num > 31)
+ return -ENXIO;
+
+ /* Stop the PHY timer to prevent reentrancy. */
+ del_timer_sync(&lp->phy_timer);
+
+ _XLlTemac_PhyRead(&lp->Emac, data->phy_id, data->reg_num,
+ &data->val_out);
+
+ /* Start the PHY timer up again. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ add_timer(&lp->phy_timer);
+ return 0;
+
+ case SIOCSMIIREG: /* Write GMII PHY register. */
+ case SIOCDEVPRIVATE + 2: /* for binary compat, remove in 2.5 */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (data->phy_id > 31 || data->reg_num > 31)
+ return -ENXIO;
+
+ /* Stop the PHY timer to prevent reentrancy. */
+ del_timer_sync(&lp->phy_timer);
+
+ _XLlTemac_PhyWrite(&lp->Emac, data->phy_id, data->reg_num,
+ data->val_in);
+
+ /* Start the PHY timer up again. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ add_timer(&lp->phy_timer);
+ return 0;
+
+ case SIOCDEVPRIVATE + 3: /* set THRESHOLD */
+ if (XLlTemac_IsFifo(&lp->Emac))
+ return -EFAULT;
+
+ if (copy_from_user(&thr_arg, rq->ifr_data, sizeof(thr_arg)))
+ return -EFAULT;
+
+ if (thr_arg.direction == XTE_SEND) {
+ RingPtr = &lp->Dma.TxBdRing;
+ } else {
+ RingPtr = &lp->Dma.RxBdRing;
+ }
+ XLlDma_BdRingGetCoalesce(RingPtr, &threshold, &timer);
+ if (thr_arg.direction == XTE_SEND) {
+ RingPtr = &lp->Dma.TxBdRing;
+ } else {
+ RingPtr = &lp->Dma.RxBdRing;
+ }
+ if ((ret = XLlDma_BdRingSetCoalesce(RingPtr, thr_arg.threshold,
+ timer)) != XST_SUCCESS) {
+ return -EIO;
+ }
+ return 0;
+
+ case SIOCDEVPRIVATE + 4: /* set WAITBOUND */
+ if (!(XLlTemac_IsDma(&lp->Emac)))
+ return -EFAULT;
+
+ if (copy_from_user(&wbnd_arg, rq->ifr_data, sizeof(wbnd_arg)))
+ return -EFAULT;
+
+ if (wbnd_arg.direction == XTE_SEND) {
+ RingPtr = &lp->Dma.TxBdRing;
+ } else {
+ RingPtr = &lp->Dma.RxBdRing;
+ }
+ XLlDma_BdRingGetCoalesce(RingPtr, &threshold, &timer);
+ if (wbnd_arg.direction == XTE_SEND) {
+ RingPtr = &lp->Dma.TxBdRing;
+ dma_int_mask_ptr = &dma_tx_int_mask;
+ } else {
+ RingPtr = &lp->Dma.RxBdRing;
+ dma_int_mask_ptr = &dma_rx_int_mask;
+ }
+ if (wbnd_arg.waitbound == 0) {
+ wbnd_arg.waitbound = 1;
+ *dma_int_mask_ptr = XLLDMA_CR_IRQ_ALL_EN_MASK & ~XLLDMA_CR_IRQ_DELAY_EN_MASK;
+ }
+ if ((ret = XLlDma_BdRingSetCoalesce(RingPtr, threshold,
+ wbnd_arg.waitbound)) != XST_SUCCESS) {
+ return -EIO;
+ }
+ XLlDma_mBdRingIntEnable(RingPtr, *dma_int_mask_ptr);
+
+ return 0;
+
+ case SIOCDEVPRIVATE + 5: /* get THRESHOLD */
+ if (!(XLlTemac_IsDma(&lp->Emac)))
+ return -EFAULT;
+
+ if (copy_from_user(&thr_arg, rq->ifr_data, sizeof(thr_arg)))
+ return -EFAULT;
+
+ if (thr_arg.direction == XTE_SEND) {
+ RingPtr = &lp->Dma.TxBdRing;
+ } else {
+ RingPtr = &lp->Dma.RxBdRing;
+ }
+ XLlDma_BdRingGetCoalesce(RingPtr,
+ (u32 *) &(thr_arg.threshold), &timer);
+ if (copy_to_user(rq->ifr_data, &thr_arg, sizeof(thr_arg))) {
+ return -EFAULT;
+ }
+ return 0;
+
+ case SIOCDEVPRIVATE + 6: /* get WAITBOUND */
+ if (!(XLlTemac_IsDma(&lp->Emac)))
+ return -EFAULT;
+
+ if (copy_from_user(&wbnd_arg, rq->ifr_data, sizeof(wbnd_arg))) {
+ return -EFAULT;
+ }
+ if (thr_arg.direction == XTE_SEND) {
+ RingPtr = &lp->Dma.TxBdRing;
+ } else {
+ RingPtr = &lp->Dma.RxBdRing;
+ }
+ XLlDma_BdRingGetCoalesce(RingPtr, &threshold,
+ (u32 *) &(wbnd_arg.waitbound));
+ if (copy_to_user(rq->ifr_data, &wbnd_arg, sizeof(wbnd_arg))) {
+ return -EFAULT;
+ }
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+/******************************************************************************
+ *
+ * NEW FUNCTIONS FROM LINUX 2.6
+ *
+ ******************************************************************************/
+
+static void xtenet_remove_ndev(struct net_device *ndev)
+{
+ if (ndev) {
+ struct net_local *lp = netdev_priv(ndev);
+
+ if (XLlTemac_IsDma(&lp->Emac) && (lp->desc_space))
+ free_descriptor_skb(ndev);
+
+ iounmap((void *) (lp->Emac.Config.BaseAddress));
+ free_netdev(ndev);
+ }
+}
+
+static int xtenet_remove(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ unregister_netdev(ndev);
+ xtenet_remove_ndev(ndev);
+
+ return 0; /* success */
+}
+
+/* Detect the PHY address by scanning addresses 0 to 31 and
+ * looking at the MII status register (register 1) and assuming
+ * the PHY supports 10Mbps full/half duplex. Feel free to change
+ * this code to match your PHY, or hardcode the address if needed.
+ */
+/* Use MII register 1 (MII status register) to detect PHY */
+#define PHY_DETECT_REG 1
+
+/* Mask used to verify certain PHY features (or register contents)
+ * in the register above:
+ * 0x1000: 10Mbps full duplex support
+ * 0x0800: 10Mbps half duplex support
+ * 0x0008: Auto-negotiation support
+ */
+#define PHY_DETECT_MASK 0x1808
+
+static int detect_phy(struct net_local *lp, char *dev_name)
+{
+ u16 phy_reg;
+ u32 phy_addr;
+
+ for (phy_addr = 31; phy_addr > 0; phy_addr--) {
+ _XLlTemac_PhyRead(&lp->Emac, phy_addr, PHY_DETECT_REG, &phy_reg);
+
+ if ((phy_reg != 0xFFFF) &&
+ ((phy_reg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
+ /* Found a valid PHY address */
+ printk(KERN_INFO "XTemac: PHY detected at address %d.\n", phy_addr);
+ return phy_addr;
+ }
+ }
+
+ printk(KERN_WARNING "XTemac: No PHY detected. Assuming a PHY at address 0\n");
+ return 0; /* default to zero */
+}
+
+/** Shared device initialization code */
+static int xtenet_setup(
+ struct device *dev,
+ struct resource *r_mem,
+ struct resource *r_irq,
+ struct xlltemac_platform_data *pdata) {
+ int xs;
+ u32 virt_baddr; /* virtual base address of TEMAC */
+
+ XLlTemac_Config Temac_Config;
+
+ struct net_device *ndev = NULL;
+ struct net_local *lp = NULL;
+
+ int rc = 0;
+
+ /* Create an ethernet device instance */
+ ndev = alloc_etherdev(sizeof(struct net_local));
+ if (!ndev) {
+ dev_err(dev, "Could not allocate net device.\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ dev_set_drvdata(dev, ndev);
+
+ ndev->irq = r_irq->start;
+
+ /* Initialize the private data used by XEmac_LookupConfig().
+ * The private data are zeroed out by alloc_etherdev() already.
+ */
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+ lp->dma_irq_r = pdata->ll_dev_dma_rx_irq;
+ lp->dma_irq_s = pdata->ll_dev_dma_tx_irq;
+ lp->fifo_irq = pdata->ll_dev_fifo_irq;
+
+ /* Setup the Config structure for the XLlTemac_CfgInitialize() call. */
+ Temac_Config.BaseAddress = r_mem->start;
+#if 0
+ Config.RxPktFifoDepth = pdata->rx_pkt_fifo_depth;
+ Config.TxPktFifoDepth = pdata->tx_pkt_fifo_depth;
+ Config.MacFifoDepth = pdata->mac_fifo_depth;
+ Config.IpIfDmaConfig = pdata->dma_mode;
+#endif
+ Temac_Config.TxCsum = pdata->tx_csum;
+ Temac_Config.RxCsum = pdata->rx_csum;
+ Temac_Config.LLDevType = pdata->ll_dev_type;
+ Temac_Config.LLDevBaseAddress = pdata->ll_dev_baseaddress;
+ Temac_Config.PhyType = pdata->phy_type;
+
+ /* Get the virtual base address for the device */
+ virt_baddr = (u32) ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
+ if (0 == virt_baddr) {
+ dev_err(dev, "XLlTemac: Could not allocate iomem.\n");
+ rc = -EIO;
+ goto error;
+ }
+
+ if (XLlTemac_CfgInitialize(&lp->Emac, &Temac_Config, virt_baddr) !=
+ XST_SUCCESS) {
+ dev_err(dev, "XLlTemac: Could not initialize device.\n");
+
+ rc = -ENODEV;
+ goto error;
+ }
+
+ /* Set the MAC address from platform data */
+ memcpy(ndev->dev_addr, pdata->mac_addr, 6);
+
+ if (_XLlTemac_SetMacAddress(&lp->Emac, ndev->dev_addr) != XST_SUCCESS) {
+ /* should not fail right after an initialize */
+ dev_err(dev, "XLlTemac: could not set MAC address.\n");
+ rc = -EIO;
+ goto error;
+ }
+
+ dev_info(dev,
+ "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n",
+ pdata->mac_addr[0], pdata->mac_addr[1],
+ pdata->mac_addr[2], pdata->mac_addr[3],
+ pdata->mac_addr[4], pdata->mac_addr[5]);
+
+ lp->max_frame_size = XTE_MAX_JUMBO_FRAME_SIZE;
+ if (ndev->mtu > XTE_JUMBO_MTU)
+ ndev->mtu = XTE_JUMBO_MTU;
+
+
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ int result;
+
+ dev_err(dev, "XLlTemac: using DMA mode.\n");
+
+ if (pdata->dcr_host) {
+ printk("XLlTemac: DCR address: 0x%0x\n", pdata->ll_dev_baseaddress);
+ XLlDma_Initialize(&lp->Dma, pdata->ll_dev_baseaddress);
+ } else {
+ virt_baddr = (u32) ioremap(pdata->ll_dev_baseaddress, 4096);
+ if (0 == virt_baddr) {
+ dev_err(dev,
+ "XLlTemac: Could not allocate iomem for local link connected device.\n");
+ rc = -EIO;
+ goto error;
+ }
+ printk("XLlTemac: Dma base address: phy: 0x%x, virt: 0x%x\n", pdata->ll_dev_baseaddress, virt_baddr);
+ XLlDma_Initialize(&lp->Dma, virt_baddr);
+ }
+
+
+ ndev->hard_start_xmit = xenet_DmaSend;
+
+ result = descriptor_init(ndev);
+ if (result) {
+ rc = -EIO;
+ goto error;
+ }
+
+ /* set the packet threshold and wait bound for both TX/RX directions */
+ if (DFT_TX_WAITBOUND == 0) {
+ dma_tx_int_mask = XLLDMA_CR_IRQ_ALL_EN_MASK & ~XLLDMA_CR_IRQ_DELAY_EN_MASK;
+ xs = XLlDma_BdRingSetCoalesce(&lp->Dma.TxBdRing, DFT_TX_THRESHOLD, 1);
+ } else {
+ xs = XLlDma_BdRingSetCoalesce(&lp->Dma.TxBdRing, DFT_TX_THRESHOLD, DFT_TX_WAITBOUND);
+ }
+ if (xs != XST_SUCCESS) {
+ dev_err(dev,
+ "XLlTemac: could not set SEND pkt threshold/waitbound, ERROR %d",
+ xs);
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.TxBdRing, dma_tx_int_mask);
+
+ if (DFT_RX_WAITBOUND == 0) {
+ dma_rx_int_mask = XLLDMA_CR_IRQ_ALL_EN_MASK & ~XLLDMA_CR_IRQ_DELAY_EN_MASK;
+ xs = XLlDma_BdRingSetCoalesce(&lp->Dma.RxBdRing, DFT_RX_THRESHOLD, 1);
+ } else {
+ xs = XLlDma_BdRingSetCoalesce(&lp->Dma.RxBdRing, DFT_RX_THRESHOLD, DFT_RX_WAITBOUND);
+ }
+ if (xs != XST_SUCCESS) {
+ dev_err(dev,
+ "XLlTemac: Could not set RECV pkt threshold/waitbound ERROR %d",
+ xs);
+ }
+ XLlDma_mBdRingIntEnable(&lp->Dma.RxBdRing, dma_rx_int_mask);
+ }
+ else {
+ dev_err(dev,
+ "XLlTemac: using FIFO direct interrupt driven mode.\n");
+
+ virt_baddr = (u32) ioremap(pdata->ll_dev_baseaddress, 4096);
+ if (0 == virt_baddr) {
+ dev_err(dev,
+ "XLlTemac: Could not allocate iomem for local link connected device.\n");
+ rc = -EIO;
+ goto error;
+ }
+ printk("XLlTemac: Fifo base address: 0x%0x\n", virt_baddr);
+ XLlFifo_Initialize(&lp->Fifo, virt_baddr);
+
+ ndev->hard_start_xmit = xenet_FifoSend;
+ }
+
+ /** Scan to find the PHY */
+ lp->gmii_addr = detect_phy(lp, ndev->name);
+
+
+ /* initialize the netdev structure */
+ ndev->open = xenet_open;
+ ndev->stop = xenet_close;
+ ndev->change_mtu = xenet_change_mtu;
+ ndev->get_stats = xenet_get_stats;
+ ndev->flags &= ~IFF_MULTICAST;
+
+ if (XLlTemac_IsDma(&lp->Emac)) {
+ ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+
+ if (XLlTemac_IsTxCsum(&lp->Emac) == TRUE) {
+ /*
+ * This hardware only supports proper checksum calculations
+ * on TCP/UDP packets.
+ */
+ ndev->features |= NETIF_F_IP_CSUM;
+ }
+ if (XLlTemac_IsRxCsum(&lp->Emac) == TRUE) {
+ lp->local_features |= LOCAL_FEATURE_RX_CSUM;
+ }
+ }
+
+ ndev->do_ioctl = xenet_ioctl;
+ ndev->tx_timeout = xenet_tx_timeout;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+
+ /* init the stats */
+ lp->max_frags_in_a_packet = 0;
+ lp->tx_hw_csums = 0;
+ lp->rx_hw_csums = 0;
+
+#if ! XTE_AUTOSTRIPPING
+ lp->stripping =
+ (XLlTemac_GetOptions(&(lp->Emac)) & XTE_FCS_STRIP_OPTION) != 0;
+#endif
+
+ rc = register_netdev(ndev);
+ if (rc) {
+ dev_err(dev,
+ "%s: Cannot register net device, aborting.\n",
+ ndev->name);
+ goto error; /* rc is already set here... */
+ }
+
+ dev_info(dev,
+ "%s: Xilinx TEMAC at 0x%08X mapped to 0x%08X, irq=%d\n",
+ ndev->name,
+ (unsigned int)r_mem->start,
+ lp->Emac.Config.BaseAddress,
+ ndev->irq);
+
+ return 0;
+
+error:
+ if (ndev) {
+ xtenet_remove_ndev(ndev);
+ }
+ return rc;
+}
+
+static int xtenet_probe(struct device *dev)
+{
+ struct resource *r_irq = NULL; /* Interrupt resources */
+ struct resource *r_mem = NULL; /* IO mem resources */
+ struct xlltemac_platform_data *pdata;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ /* param check */
+ if (!pdev) {
+ dev_err(dev, "Probe called with NULL param.\n");
+ return -ENODEV;
+ }
+
+ pdata = (struct xlltemac_platform_data *) pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "Couldn't find platform data.\n");
+
+ return -ENODEV;
+ }
+
+ /* Get iospace and an irq for the device */
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r_irq || !r_mem) {
+ dev_err(dev, "IO resource(s) not found.\n");
+ return -ENODEV;
+ }
+
+ return xtenet_setup(dev, r_mem, r_irq, pdata);
+}
+
+static struct device_driver xtenet_driver = {
+ .name = DRIVER_NAME,
+ .bus = &platform_bus_type,
+
+ .probe = xtenet_probe,
+ .remove = xtenet_remove
+};
+
+#ifdef CONFIG_OF
+static u32 get_u32(struct of_device *ofdev, const char *s) {
+ u32 *p = (u32 *)of_get_property(ofdev->node, s, NULL);
+ if(p) {
+ return *p;
+ } else {
+ dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false.\n", s);
+ return FALSE;
+ }
+}
+
+static struct of_device_id xtenet_fifo_of_match[] = {
+ { .compatible = "xlnx,xps-ll-fifo-1.00.a", },
+ { .compatible = "xlnx,xps-ll-fifo-1.00.b", },
+ { /* end of list */ },
+};
+
+static struct of_device_id xtenet_sdma_of_match[] = {
+ { .compatible = "xlnx,ll-dma-1.00.a", },
+ { /* end of list */ },
+};
+
+static int __devinit xtenet_of_probe(struct of_device *ofdev, const struct of_device_id *match)
+{
+ struct resource r_irq_struct;
+ struct resource r_mem_struct;
+ struct resource r_connected_mem_struct;
+ struct resource r_connected_irq_struct;
+ struct xlltemac_platform_data pdata_struct;
+
+ struct resource *r_irq = &r_irq_struct; /* Interrupt resources */
+ struct resource *r_mem = &r_mem_struct; /* IO mem resources */
+ struct xlltemac_platform_data *pdata = &pdata_struct;
+ const void *mac_address;
+ int rc = 0;
+ const phandle *llink_connected_handle;
+ struct device_node *llink_connected_node;
+ u32 *dcrreg_property;
+
+ printk(KERN_INFO "Device Tree Probing \'%s\'\n",
+ ofdev->node->name);
+
+ /* Get iospace for the device */
+ rc = of_address_to_resource(ofdev->node, 0, r_mem);
+ if(rc) {
+ dev_warn(&ofdev->dev, "invalid address\n");
+ return rc;
+ }
+
+ /* Get IRQ for the device */
+ rc = of_irq_to_resource(ofdev->node, 0, r_irq);
+ if(rc == NO_IRQ) {
+ dev_warn(&ofdev->dev, "no IRQ found.\n");
+ return rc;
+ }
+
+ pdata_struct.tx_csum = get_u32(ofdev, "xlnx,txcsum");
+ pdata_struct.rx_csum = get_u32(ofdev, "xlnx,rxcsum");
+ pdata_struct.phy_type = get_u32(ofdev, "xlnx,phy-type");
+ llink_connected_handle =
+ of_get_property(ofdev->node, "llink-connected", NULL);
+ if(!llink_connected_handle) {
+ dev_warn(&ofdev->dev, "no Locallink connection found.\n");
+ return rc;
+ }
+
+ llink_connected_node =
+ of_find_node_by_phandle(*llink_connected_handle);
+ rc = of_address_to_resource(
+ llink_connected_node,
+ 0,
+ &r_connected_mem_struct);
+
+ /** Get the right information from whatever the locallink is
+ connected to. */
+ if(of_match_node(xtenet_fifo_of_match, llink_connected_node)) {
+ /** Connected to a fifo. */
+
+ if(rc) {
+ dev_warn(&ofdev->dev, "invalid address\n");
+ return rc;
+ }
+
+ pdata_struct.ll_dev_baseaddress = r_connected_mem_struct.start;
+ pdata_struct.ll_dev_type = XPAR_LL_FIFO;
+ pdata_struct.ll_dev_dma_rx_irq = NO_IRQ;
+ pdata_struct.ll_dev_dma_tx_irq = NO_IRQ;
+
+ rc = of_irq_to_resource(
+ llink_connected_node,
+ 0,
+ &r_connected_irq_struct);
+ if(rc == NO_IRQ) {
+ dev_warn(&ofdev->dev, "no IRQ found.\n");
+ return rc;
+ }
+ pdata_struct.ll_dev_fifo_irq = r_connected_irq_struct.start;
+ pdata_struct.dcr_host = 0x0;
+ } else if(of_match_node(xtenet_sdma_of_match, llink_connected_node)) {
+ /** Connected to a dma port, default to 405 type dma */
+
+ pdata->dcr_host = 0;
+ if(rc) {
+ /* no address was found, might be 440, check for dcr reg */
+
+ dcrreg_property = (u32 *)of_get_property(llink_connected_node, "dcr-reg", NULL);
+ if(dcrreg_property) {
+ r_connected_mem_struct.start = *dcrreg_property;
+ pdata->dcr_host = 0xFF;
+ } else {
+ dev_warn(&ofdev->dev, "invalid address\n");
+ return rc;
+ }
+ }
+
+ pdata_struct.ll_dev_baseaddress = r_connected_mem_struct.start;
+ pdata_struct.ll_dev_type = XPAR_LL_DMA;
+
+ rc = of_irq_to_resource(
+ llink_connected_node,
+ 0,
+ &r_connected_irq_struct);
+ if(rc == NO_IRQ) {
+ dev_warn(&ofdev->dev, "First IRQ not found.\n");
+ return rc;
+ }
+ pdata_struct.ll_dev_dma_rx_irq = r_connected_irq_struct.start;
+
+ rc = of_irq_to_resource(
+ llink_connected_node,
+ 1,
+ &r_connected_irq_struct);
+ if(rc == NO_IRQ) {
+ dev_warn(&ofdev->dev, "Second IRQ not found.\n");
+ return rc;
+ }
+ pdata_struct.ll_dev_dma_tx_irq = r_connected_irq_struct.start;
+
+ pdata_struct.ll_dev_fifo_irq = NO_IRQ;
+ } else {
+ dev_warn(&ofdev->dev, "Locallink connection not matched.\n");
+ return rc;
+ }
+
+ of_node_put(llink_connected_node);
+ mac_address = of_get_mac_address(ofdev->node);
+ if(mac_address) {
+ memcpy(pdata_struct.mac_addr, mac_address, 6);
+ } else {
+ dev_warn(&ofdev->dev, "No MAC address found.\n");
+ }
+
+ return xtenet_setup(&ofdev->dev, r_mem, r_irq, pdata);
+}
+
+static int __devexit xtenet_of_remove(struct of_device *dev)
+{
+ return xtenet_remove(&dev->dev);
+}
+
+static struct of_device_id xtenet_of_match[] = {
+ { .compatible = "xlnx,xps-ll-temac-1.00.a", },
+ { .compatible = "xlnx,xps-ll-temac-1.00.b", },
+ { .compatible = "xlnx,xps-ll-temac-1.01.a", },
+ { .compatible = "xlnx,xps-ll-temac-1.01.b", },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(of, xtenet_of_match);
+
+static struct of_platform_driver xtenet_of_driver = {
+ .name = DRIVER_NAME,
+ .match_table = xtenet_of_match,
+ .probe = xtenet_of_probe,
+ .remove = __devexit_p(xtenet_of_remove),
+};
+#endif
+
+static int __init xtenet_init(void)
+{
+ int status;
+
+ /*
+ * Make sure the locks are initialized
+ */
+ spin_lock_init(&XTE_spinlock);
+ spin_lock_init(&XTE_tx_spinlock);
+ spin_lock_init(&XTE_rx_spinlock);
+
+ INIT_LIST_HEAD(&sentQueue);
+ INIT_LIST_HEAD(&receivedQueue);
+
+ spin_lock_init(&sentQueueSpin);
+ spin_lock_init(&receivedQueueSpin);
+
+ /*
+ * No kernel boot options used,
+ * so we just need to register the driver
+ */
+ status = driver_register(&xtenet_driver);
+#ifdef CONFIG_OF
+ status |= of_register_platform_driver(&xtenet_of_driver);
+#endif
+ return status;
+
+}
+
+static void __exit xtenet_cleanup(void)
+{
+ driver_unregister(&xtenet_driver);
+#ifdef CONFIG_OF
+ of_unregister_platform_driver(&xtenet_of_driver);
+#endif
+}
+
+module_init(xtenet_init);
+module_exit(xtenet_cleanup);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL");
--- /dev/null
+#
+# Makefile for the Xilinx Tri-mode ethernet driver
+#
+
+EXTRA_CFLAGS += -Idrivers/xilinx_common -Iarch/ppc/platforms/4xx/xparameters
+#ifeq ($(CONFIG_PPC32),y)
+#EXTRA_CFLAGS += -I$(TOPDIR)/arch/ppc/platforms/xilinx_ocp
+#endif
+
+# The Linux version for the Xilinx driver code.
+xilinx_temac-objs := xtemac_linux.o
+
+# The Xilinx OS independent code.
+xilinx_temac-objs += xtemac.o xtemac_fifo.o xtemac_intr.o \
+ xtemac_intr_sgdma.o xtemac_sgdma.o \
+ xtemac_control.o xtemac_intr_fifo.o \
+ xtemac_l.o xtemac_selftest.o xtemac_stats.o
+
+obj-$(CONFIG_XILINX_TEMAC) := xilinx_temac.o
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xtemac.c
+*
+* The XTemac driver. Functions in this file are the minimum required functions
+* for this driver. See xtemac.h for a detailed description of the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rmm 06/01/05 First release
+* 2.00a rmm 11/21/05 Switched to local link DMA driver, removed simple
+* DMA code, relocated XTemac_Initialize(),
+* XTemac_VmInitialize(), and XTemac_LookupConfig() to
+* xtemac_init.c, added XTemac_CfgInitialize().
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include <linux/string.h>
+
+#include "xtemac.h"
+#include "xtemac_i.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+void XTemac_StubHandler(void); /* Default handler routine */
+static void InitHw(XTemac *InstancePtr); /* HW reset */
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+* Initialize a specific XTemac instance/driver. The initialization entails:
+* - Initialize fields of the XTemac instance structure
+* - Reset HW and apply default options
+* - Configure the packet FIFOs if present
+* - Configure the DMA channels if present
+*
+* The PHY is setup independently from the TEMAC. Use the MII or whatever other
+* interface may be present for setup.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param CfgPtr is the device configuration structure containing required HW
+* build data.
+* @param VirtualAddress is the base address of the device. If address
+* translation is not utilized, this parameter can be passed in using
+* CfgPtr->BaseAddress to specify the physical base address.
+*
+* @return
+* - XST_SUCCESS if initialization was successful
+* - XST_FAILURE if initialization of packet FIFOs or DMA channels failed, or
+* device operating mode cannot be determined
+*
+******************************************************************************/
+int XTemac_CfgInitialize(XTemac *InstancePtr, XTemac_Config *CfgPtr,
+ u32 VirtualAddress)
+{
+ int Result;
+
+ /* Verify arguments */
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /* Clear instance memory and make copy of configuration */
+ memset(InstancePtr, 0, sizeof(XTemac));
+ memcpy(&InstancePtr->Config, CfgPtr, sizeof(XTemac_Config));
+
+ /* Set device base address */
+ InstancePtr->BaseAddress = VirtualAddress;
+
+ /* Set callbacks to an initial stub routine */
+ InstancePtr->FifoRecvHandler =
+ (XTemac_FifoRecvHandler) XTemac_StubHandler;
+ InstancePtr->FifoSendHandler =
+ (XTemac_FifoSendHandler) XTemac_StubHandler;
+ InstancePtr->ErrorHandler = (XTemac_ErrorHandler) XTemac_StubHandler;
+ InstancePtr->AnegHandler = (XTemac_AnegHandler) XTemac_StubHandler;
+ InstancePtr->SgRecvHandler = (XTemac_SgHandler) XTemac_StubHandler;
+ InstancePtr->SgSendHandler = (XTemac_SgHandler) XTemac_StubHandler;
+
+ /* FIFO mode */
+ if (XTemac_mIsFifo(InstancePtr)) {
+ /* Select best processor based transfer method to/from FIFOs */
+ Result = XTemac_ConfigureFifoAccess(InstancePtr);
+ if (Result != XST_SUCCESS) {
+ return (XST_FAILURE);
+ }
+ }
+
+ /* SGDMA mode */
+ else if (XTemac_mIsSgDma(InstancePtr)) {
+ Result = XDmaV3_Initialize(&InstancePtr->RecvDma,
+ InstancePtr->BaseAddress +
+ XTE_DMA_RECV_OFFSET);
+ if (Result != XST_SUCCESS) {
+ return (XST_FAILURE);
+ }
+
+ Result = XDmaV3_Initialize(&InstancePtr->SendDma,
+ InstancePtr->BaseAddress +
+ XTE_DMA_SEND_OFFSET);
+ if (Result != XST_SUCCESS) {
+ return (XST_FAILURE);
+ }
+ }
+
+ /* Unknown mode */
+ else {
+ return (XST_FAILURE);
+ }
+
+ /* Reset the hardware and set default options */
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+ XTemac_Reset(InstancePtr, XTE_NORESET_HARD);
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+* Start the Ethernet controller as follows:
+* - Enable transmitter if XTE_TRANSMIT_ENABLE_OPTION is set
+* - Enable receiver if XTE_RECEIVER_ENABLE_OPTION is set
+* - If not polled mode, then start the SG DMA send and receive channels (if
+* configured) and enable the global device interrupt
+*
+* If starting for the first time after calling XTemac_Initialize() or
+* XTemac_Reset(), send and receive interrupts will not be generated until
+* XTemac_IntrFifoEnable() or XTemac_IntrSgEnable() are called. Otherwise,
+* interrupt settings made by these functions will be restored.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+*
+* @return
+* - XST_SUCCESS if the device was started successfully
+* - XST_DMA_SG_NO_LIST if configured for scatter-gather DMA and a descriptor
+* list has not yet been created for the send or receive channel
+*
+* @note
+* The driver tries to match the hardware configuration. So if the hardware
+* is configured with scatter-gather DMA, the driver expects to start the
+* scatter-gather channels and expects that the user has previously set up
+* the buffer descriptor lists.
+*
+* This function makes use of internal resources that are shared between the
+* Start, Stop, and Set/ClearOptions functions. So if one task might be setting
+* device options while another is trying to start the device, the user is
+* required to provide protection of this shared data (typically using a
+* semaphore).
+*
+* This function must not be preempted by an interrupt that may service the
+* device.
+*
+******************************************************************************/
+int XTemac_Start(XTemac *InstancePtr)
+{
+ u32 Reg;
+ int Result;
+
+ /* Assert bad arguments and conditions */
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* If already started, then there is nothing to do */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_SUCCESS);
+ }
+
+ /* Start SG DMA */
+ if (XTemac_mIsSgDma(InstancePtr)) {
+ /* When starting the DMA channels, both transmit and receive sides
+ * need an initialized BD list.
+ */
+ Result = XDmaV3_SgStart(&InstancePtr->RecvDma);
+ if (Result == XST_DMA_SG_NO_LIST) {
+ return (Result);
+ }
+
+ Result = XDmaV3_SgStart(&InstancePtr->SendDma);
+ if (Result == XST_DMA_SG_NO_LIST) {
+ return (Result);
+ }
+ }
+
+ /* Enable transmitter if not already enabled */
+ if (InstancePtr->Options & XTE_TRANSMITTER_ENABLE_OPTION) {
+ Reg = XTemac_mGetHostReg(XTE_TXC_OFFSET);
+ if (!(Reg & XTE_TXC_TXEN_MASK)) {
+ XTemac_mSetHostReg(XTE_TXC_OFFSET,
+ Reg | XTE_TXC_TXEN_MASK);
+ }
+ }
+
+ /* Enable receiver? */
+ if (InstancePtr->Options & XTE_RECEIVER_ENABLE_OPTION) {
+ Reg = XTemac_mGetHostReg(XTE_RXC1_OFFSET) | XTE_RXC1_RXEN_MASK;
+ XTemac_mSetHostReg(XTE_RXC1_OFFSET, Reg);
+ }
+
+ /* Mark as started */
+ InstancePtr->IsStarted = XCOMPONENT_IS_STARTED;
+
+ /* Allow interrupts (if not in polled mode) and exit */
+ if ((InstancePtr->Options & XTE_POLLED_OPTION) == 0) {
+ XTemac_mSetIpifReg(XTE_DGIE_OFFSET, XTE_DGIE_ENABLE_MASK);
+ }
+
+ return (XST_SUCCESS);
+}
+
+/*****************************************************************************/
+/**
+* Gracefully stop the Ethernet MAC as follows:
+* - Disable all interrupts from this device
+* - Stop DMA channels (if configured)
+* - Disable the receiver
+*
+* Device options currently in effect are not changed.
+*
+* This function will disable all interrupts by clearing the global interrupt
+* enable. Any interrupts settings that had been enabled through
+* XTemac_IntrFifoEnable(), XTemac_IntrFifoDmaEnable(), or
+* XTemac_IntrSgEnable() will be restored when XTemac_Start() is called.
+*
+* Since the transmitter is not disabled, frames currently in the packet FIFO
+* or in process by the SGDMA engine are allowed to be transmitted. XTemac API
+* functions that place new data in the packet FIFOs will not be allowed to do
+* so until XTemac_Start() is called.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+*
+* @note
+* This function makes use of internal resources that are shared between the
+* Start, Stop, SetOptions, and ClearOptions functions. So if one task might be
+* setting device options while another is trying to start the device, the user
+* is required to provide protection of this shared data (typically using a
+* semaphore).
+*
+* Stopping the DMA channels may cause this function to block until the DMA
+* operation is complete. This function will not block waiting for frame data to
+* to exit the packet FIFO to the transmitter.
+*
+******************************************************************************/
+void XTemac_Stop(XTemac *InstancePtr)
+{
+ volatile u32 Reg;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* If already stopped, then there is nothing to do */
+ if (InstancePtr->IsStarted == 0) {
+ return;
+ }
+
+ /* Disable interrupts */
+ XTemac_mSetIpifReg(XTE_DGIE_OFFSET, 0);
+
+ /* For SGDMA, use the DMA driver function to stop the channels */
+ if (XTemac_mIsSgDma(InstancePtr)) {
+ (void) XDmaV3_SgStop(&InstancePtr->SendDma);
+ (void) XDmaV3_SgStop(&InstancePtr->RecvDma);
+ }
+
+ /* Disable the receiver */
+ Reg = XTemac_mGetHostReg(XTE_RXC1_OFFSET);
+ Reg &= ~XTE_RXC1_RXEN_MASK;
+ XTemac_mSetHostReg(XTE_RXC1_OFFSET, Reg);
+
+ /* Stopping the receiver in mid-packet causes a dropped packet indication
+ * from HW. Clear it.
+ */
+ Reg = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+ if (Reg & XTE_IPXR_RECV_REJECT_MASK) {
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET, XTE_IPXR_RECV_REJECT_MASK);
+ }
+
+ /* Mark as stopped */
+ InstancePtr->IsStarted = 0;
+}
+
+
+/*****************************************************************************/
+/**
+* Perform a graceful reset of the Ethernet MAC. Resets the DMA channels, the
+* FIFOs, the transmitter, and the receiver.
+*
+* All options are placed in their default state. Any frames in the scatter-
+* gather descriptor lists will remain in the lists. The side effect of doing
+* this is that after a reset and following a restart of the device, frames that
+* were in the list before the reset may be transmitted or received.
+*
+* The upper layer software is responsible for re-configuring (if necessary)
+* and restarting the MAC after the reset. Note also that driver statistics
+* are not cleared on reset. It is up to the upper layer software to clear the
+* statistics if needed.
+*
+* When a reset is required due to an internal error, the driver notifies the
+* upper layer software of this need through the ErrorHandler callback and
+* specific status codes. The upper layer software is responsible for calling
+* this Reset function and then re-configuring the device.
+*
+* Resetting the IPIF should suffice in most circumstances. As a last resort
+* however, the hard TEMAC core can be reset as well using the HardCoreAction
+* parameter. In systems with two TEMACs, the reset signal is shared between
+* both devices resulting in BOTH being reset. This requires the user save the
+* state of both TEMAC's prior to resetting the hard core on either device
+* instance.
+
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param HardCoreAction describes how the hard core part of the TEMAC should
+* be managed. If XTE_RESET_HARD is passed in, then the reset signal is
+* asserted to the hard core block. This will reset both hard cores.
+* If any other value is passed in, then only the IPIF of the given
+* instance is reset.
+*
+******************************************************************************/
+void XTemac_Reset(XTemac *InstancePtr, int HardCoreAction)
+{
+ u32 Data;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Stop the device and reset HW */
+ XTemac_Stop(InstancePtr);
+ InstancePtr->Options = XTE_DEFAULT_OPTIONS;
+
+ /* Reset IPIF */
+ XTemac_mSetIpifReg(XTE_DSR_OFFSET, XTE_DSR_RESET_MASK);
+ udelay(XTE_RESET_IPIF_DELAY_US);
+
+ /* Reset hard core if required */
+ if (HardCoreAction == XTE_RESET_HARD) {
+ Data = XTemac_mGetIpifReg(XTE_CR_OFFSET);
+ XTemac_mSetIpifReg(XTE_CR_OFFSET, Data | XTE_CR_HRST_MASK);
+ udelay(XTE_RESET_HARD_DELAY_US);
+ }
+
+ /* Setup HW */
+ InitHw(InstancePtr);
+}
+
+
+/******************************************************************************
+ * Perform one-time setup of HW. The setups performed here only need to occur
+ * once after any reset.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ *
+ ******************************************************************************/
+static void InitHw(XTemac *InstancePtr)
+{
+ u32 Reg;
+
+ /* Disable the receiver */
+ Reg = XTemac_mGetHostReg(XTE_RXC1_OFFSET);
+ Reg &= ~XTE_RXC1_RXEN_MASK;
+ XTemac_mSetHostReg(XTE_RXC1_OFFSET, Reg);
+
+ /* Stopping the receiver in mid-packet causes a dropped packet indication
+ * from HW. Clear it.
+ */
+ Reg = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+ if (Reg & XTE_IPXR_RECV_REJECT_MASK) {
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET, XTE_IPXR_RECV_REJECT_MASK);
+ }
+
+ /* Default IPIF interrupt block enable mask */
+ Reg = (XTE_DXR_CORE_MASK | XTE_DXR_DPTO_MASK | XTE_DXR_TERR_MASK);
+
+ if (XTemac_mIsFifo(InstancePtr)) {
+ Reg |= (XTE_DXR_RECV_FIFO_MASK | XTE_DXR_SEND_FIFO_MASK);
+ }
+
+ XTemac_mSetIpifReg(XTE_DIER_OFFSET, Reg);
+
+ if (XTemac_mIsSgDma(InstancePtr)) {
+ /* Setup SGDMA interupt coalescing defaults */
+ (void) XTemac_IntrSgCoalSet(InstancePtr, XTE_SEND,
+ XTE_SGDMA_DFT_THRESHOLD,
+ XTE_SGDMA_DFT_WAITBOUND);
+ (void) XTemac_IntrSgCoalSet(InstancePtr, XTE_RECV,
+ XTE_SGDMA_DFT_THRESHOLD,
+ XTE_SGDMA_DFT_WAITBOUND);
+
+ /* Setup interrupt enable data for each channel */
+ Reg = (XDMAV3_IPXR_PCTR_MASK |
+ XDMAV3_IPXR_PWBR_MASK | XDMAV3_IPXR_DE_MASK);
+
+ XDmaV3_SetInterruptEnable(&InstancePtr->SendDma, Reg);
+ XDmaV3_SetInterruptEnable(&InstancePtr->RecvDma, Reg);
+ }
+
+ /* Sync default options with HW but leave receiver and transmitter
+ * disabled. They get enabled with XTemac_Start() if XTE_TRANSMITTER_ENABLE-
+ * _OPTION and XTE_RECEIVER_ENABLE_OPTION are set
+ */
+ XTemac_SetOptions(InstancePtr, InstancePtr->Options &
+ ~(XTE_TRANSMITTER_ENABLE_OPTION |
+ XTE_RECEIVER_ENABLE_OPTION));
+
+ XTemac_ClearOptions(InstancePtr, ~InstancePtr->Options);
+
+ /* Set default MDIO divisor */
+ XTemac_PhySetMdioDivisor(InstancePtr, XTE_MDIO_DIV_DFT);
+}
+
+/******************************************************************************/
+/**
+ * This is a stub for the asynchronous callbacks. The stub is here in case the
+ * upper layer forgot to set the handler(s). On initialization, all handlers are
+ * set to this callback. It is considered an error for this handler to be
+ * invoked.
+ *
+ ******************************************************************************/
+void XTemac_StubHandler(void)
+{
+ XASSERT_VOID_ALWAYS();
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file xtemac.h
+ *
+ * The Xilinx Tri-Mode Ethernet driver component. This driver supports the
+ * Virtex-4(TM) 10/100/1000 MAC (TEMAC).
+ *
+ * For a full description of TEMAC features, please see the HW spec. This driver
+ * supports the following features:
+ * - Memory mapped access to host interface registers
+ * - API for polled frame transfers (FIFO direct HW configuration only)
+ * - API for interrupt driven frame transfers for HW configured with FIFO
+ * direct, or Scatter Gather DMA
+ * - Virtual memory support
+ * - Unicast, broadcast, and multicast receive address filtering
+ * - Full duplex operation (half duplex not supported)
+ * - Automatic source address insertion or overwrite (programmable)
+ * - Automatic PAD & FCS insertion and stripping (programmable)
+ * - Flow control
+ * - VLAN frame support
+ * - Pause frame support
+ * - Jumbo frame support
+ * - Data Realignment Engine (DRE)
+ * - Checksum offload
+ *
+ * <b>Driver Description</b>
+ *
+ * The device driver enables higher layer software (e.g., an application) to
+ * communicate to the TEMAC. The driver handles transmission and reception of
+ * Ethernet frames, as well as configuration and control. No pre or post
+ * processing of frame data is performed. The driver does not validate the
+ * contents of an incoming frame in addition to what has already occurred in HW.
+ * A single device driver can support multiple devices even when those devices
+ * have significantly different configurations.
+ *
+ * <b>Initialization & Configuration</b>
+ *
+ * The XTemac_Config structure is used by the driver to configure itself. This
+ * configuration structure is typically created by the tool-chain based on HW
+ * build properties.
+ *
+ * To support multiple runtime loading and initialization strategies employed
+ * by various operating systems, the driver instance can be initialized in one
+ * of the following ways:
+ *
+ * - XTemac_Initialize(InstancePtr, DeviceId): The driver looks up its own
+ * configuration structure created by the tool-chain based on an ID provided
+ * by the tool-chain.
+ *
+ * - XTemac_VmInitialize(InstancePtr, DeviceId, VirtualAddress): Operates
+ * like XTemac_Initialize() except the physical base address found in the
+ * configuration structure is replaced with the provided virtual address
+ *
+ * - XTemac_CfgInitialize(InstancePtr, CfgPtr, VirtualAddress): Uses a
+ * configuration structure provided by the caller. If running in a system
+ * with address translation, the provided virtual memory base address
+ * replaces the physical address present in the configuration structure.
+ *
+ * The device can be configured for 2 major modes of operation: FIFO direct,
+ * or scatter gather DMA (SGDMA). Each of these modes are independent of one
+ * another and have their own frame transfer API. This driver can manage an
+ * arbitrary number of devices each with its own operating mode and supporting
+ * features and options.
+ *
+ * The driver tries to use the features built into the device as described
+ * by the configuration structure. So if the hardware is configured with
+ * SGDMA, the driver expects to start the SGDMA channels and expects that
+ * the user has set up the buffer descriptor lists.
+ *
+ * <b>Interrupts and Asynchronous Callbacks</b>
+ *
+ * The driver has no dependencies on the interrupt controller. It provides
+ * one interrupt handler per mode of operation (FIFO, SGDMA) that can be
+ * connected to the system interrupt controller by BSP/OS specific means.
+ *
+ * When an interrupt occurs, the handler will perform a small amount of
+ * housekeeping work, determine the source of the interrupt, and call the
+ * appropriate callback function. All callbacks are registered by the user
+ * level application.
+ *
+ * SGDMA implements interrupt coalescing features that reduce the frequency
+ * of interrupts. A more complete discussion of this feature occurs in the API
+ * section below.
+ *
+ * <b>Device Reset</b>
+ *
+ * Some errors that can occur require a device reset. These errors are listed
+ * in the XTemac_ErrorHandler() function typedef header. The user's error
+ * callback handler is responsible for resetting and re-configuring the device.
+ * When a device reset is required, XTemac_Reset() should be utilized.
+ *
+ * <b>Virtual Memory</b>
+ *
+ * This driver may be used in systems with virtual memory support by using one
+ * of the initialization functions that supply the virtual memory address of
+ * the device.
+ *
+ * All virtual to physical memory mappings must occur prior to accessing the
+ * driver API. The driver does not support multiple virtual memory translations
+ * that map to the same physical address.
+ *
+ * For DMA transactions, user buffers supplied to the driver must be in terms
+ * of their physical address.
+ *
+ * <b>Transfer Mode APIs</b>
+ *
+ * Using the proper API depends on how the HW has been configured. There are
+ * two interrupt driven modes (FIFO Direct, and SGDMA). FIFO Direct also
+ * supports a polled mode of operation.
+ *
+ * It is the user's responsibilty to use the API that matches the device
+ * configuration. Most API functions do not perform runtime checks to verify
+ * proper configuration. If an API function is called in error on a device
+ * instance, then that function may attempt to access registers that are not
+ * present resulting in bus errors and/or corrupted data. Macros are defined
+ * that help the user determine which API can be used.
+ *
+ * All API functions are prototyped in xtemac.h and are implemented in various
+ * xtemac_*.c files by feature.
+ *
+ * The following sections discuss in more detail each of the available APIs.
+ *
+ * <b>FIFO Direct API</b>
+ *
+ * This device mode utilizes the processor to transfer data between user buffers
+ * and the packet FIFOs. HW configured in this way uses the least amount of FPGA
+ * resources but provides the lowest data throughput.
+ *
+ * This API allows user independent access to the data packet, packet length,
+ * and event FIFOs. While more sophisticated device modes keep these FIFOs
+ * in sync automatically, the user has the primary responsibility in FIFO
+ * direct mode.
+ *
+ * The packet FIFOs contain the frame data while the length/status FIFOs contain
+ * receive lengths, transmit lengths, and transmit statuses. When these FIFOs
+ * go out of sync, then packet data will become corrupted.
+ *
+ * On the transmit side, the transmit packet FIFO may contain more than one
+ * Ethernet packet placed there by XTemac_FifoWrite(). The number of packets it
+ * may contain depends on its depth which is controlled at HW build time. For
+ * each packet in the FIFO, the user must initiate a transmit by writing into
+ * the transmit length FIFO (see XTemac_FifoSend()). The number of bytes
+ * specified to transmit must match exactly the lengths of packets in the
+ * packet FIFO. For example, if a 76 byte packet was written followed by a
+ * 124 byte packet, then the transmit length FIFO must be written with 76
+ * followed by 124. At the completion of the transmission, the transmit status
+ * FIFO must be read to obtain the outcome of the operation. The first status
+ * will be for the 76 byte packet followed by the 124 byte packet.
+ *
+ * If there is not enough data in the packet FIFO to complete a transmit
+ * operation, an underrun condition will be reported. The frame that gets
+ * transmitted in this case is forced to a corrupted state so that it
+ * will flagged as invalid by other receivers.
+ *
+ * On the receive side, it is a little easier to keep things in sync because
+ * the HW writes to the receive packet FIFO. Just like the transmit packet FIFO,
+ * the receive packet FIFO can contain more than one received Ethernet frame.
+ * Each time a length is extracted from the receive length FIFO (see
+ * XTemac_FifoRecv()), then that many bytes must be read from the receive
+ * packet FIFO by XTemac_FifoRead().
+ *
+ * The easiest way to keep these FIFOs in sync is to process a single frame at
+ * a time. But when performance is an issue, it may be desirable to process
+ * multiple or even partial frames from non-contiguous memory regions. The
+ * examples that accompany this driver illustrate how these advanced frame
+ * processing methods can be implemented.
+ *
+ * In interrupt driven mode, user callbacks are invoked by the interrupt handler
+ * to signal that frames have arrived, frames have been transmitted, or an
+ * error has occurred. When the XTE_POLLED_OPTION is set, the user must use
+ * send and receive query status functions to determine when these events
+ * occur.
+ *
+ * <b>SGDMA API</b>
+ *
+ * This API utilizes scatter-gather DMA (SGDMA) channels to transfer frame data
+ * between user buffers and the packet FIFOs.
+ *
+ * The SGDMA engine uses buffer descriptors (BDs) to describe Ethernet frames.
+ * These BDs are typically chained together into a list the HW follows when
+ * transferring data in and out of the packet FIFOs. Each BD describes a memory
+ * region containing either a full or partial Ethernet packet.
+ *
+ * The frequency of interrupts can be controlled with the interrupt coalescing
+ * features of the SG DMA engine. These features can be used to optimize
+ * interrupt latency and throughput for the user's network traffic conditions.
+ * The packet threshold count will delay processor interrupts until a
+ * programmable number of packets have arrived or have been transmitted. The
+ * packet wait bound timer can be used to cause a processor interrupt even though
+ * the packet threshold has not been reached. The timer begins counting after the
+ * last packet is processed. If no other packet is processed as the timer
+ * expires, then an interrupt will be generated.
+ *
+ * Another form of interrupt control is provided with the XTE_SGEND_INT_OPTION
+ * option. When enabled, an interrupt will occur when SGDMA engine completes the
+ * last BD to be processed and transitions to an idle state. This feature may be
+ * useful when a set of BDs have been queued up and the user only wants to be
+ * notified when they have all been processed by the HW. To use this feature
+ * effectively, interrupt coalescing should be disabled (packet threshold = 0,
+ * wait bound timer = 0), or the packet threshold should be set to a number
+ * larger than the number of packets queued up.
+ *
+ * By default, the driver will set the packet threshold = 1, wait bound timer =
+ * 0, and disable the XTE_SGEND_INT_OPTION. These settings will cause one
+ * interrupt per packet.
+ *
+ * This API requires the user to understand the how the SGDMA driver operates.
+ * The following paragraphs provide some explanation, but the user is encouraged
+ * to read documentation in xdmav3.h and xdmabdv3.h as well as study example code
+ * that accompanies this driver.
+ *
+ * The API is designed to get BDs to and from the SGDMA engine in the most
+ * efficient means possible. The first step is to establish a memory region to
+ * contain all BDs for a specific channel. This is done with XTemac_SgSetSpace()
+ * and assumes the memory region is non-cached. This function sets up a BD ring
+ * that HW will follow as BDs are processed. The ring will consist of a user
+ * defined number of BDs which will all be partially initialized. For example on
+ * the transmit channel, the driver will initialize all BDs' so that they are
+ * configured for transmit. The more fields that can be permanently setup at
+ * initialization, then the fewer accesses will be needed to each BD while the
+ * SGDMA engine is in operation resulting in better throughput and CPU
+ * utilization. The best case initialization would require the user to set only
+ * a frame buffer address and length prior to submitting the BD to the engine.
+ *
+ * BDs move through the engine with the help of functions XTemac_SgAlloc(),
+ * XTemac_SgCommit(), XTemac_SgGetProcessed(), and XTemac_SgFree(). All these
+ * functions handle BDs that are in place. That is, there are no copies of BDs
+ * kept anywhere and any BD the user interacts with is an actual BD from the
+ * same ring HW accesses. Changing fields within BDs is done through an API
+ * defined in xdmabdv3.h as well as checksum offloading macros defined in
+ * xtemac.h.
+ *
+ * BDs in the ring go through a series of states as follows:
+ * 1. Idle. The driver controls BDs in this state.
+ * 2. The user has data to transfer. XTemac_SgAlloc() is called to reserve
+ * BD(s). Once allocated, the user may setup the BD(s) with frame buffer
+ * address, length, and other attributes. The user controls BDs in this
+ * state.
+ * 3. The user submits BDs to the SGDMA engine with XTemac_SgCommit. BDs in
+ * this state are either waiting to be processed by HW, are in process, or
+ * have been processed. The SGDMA engine controls BDs in this state.
+ * 4. Processed BDs are retrieved with XTemac_SgGetProcessed() by the
+ * user. Once retrieved, the user can examine each BD for the outcome of
+ * the DMA transfer. The user controls BDs in this state. After examining
+ * the BDs the user calls XTemac_SgFree() which places the BDs back into
+ * state 1.
+ *
+ * Each of the four BD accessor functions operate on a set of BDs. A set is
+ * defined as a segment of the BD ring consisting of one or more BDs. The user
+ * views the set as a pointer to the first BD along with the number of BDs for
+ * that set. The set can be navigated by using macros XTemac_mSgRecvBdNext() or
+ * XTemac_mSgSendBdNext(). The user must exercise extreme caution when changing
+ * BDs in a set as there is nothing to prevent doing a mSgRecvBdNext past the
+ * end of the set and modifying a BD out of bounds.
+ *
+ * XTemac_SgAlloc() + XTemac_SgCommit(), as well as XTemac_SgGetProcessed() +
+ * XTemac_SgFree() are designed to be used in tandem. The same BD set retrieved
+ * with SgAlloc should be the same one provided to HW with SgCommit. Same goes
+ * with SgGetProcessed and SgFree.
+ *
+ * <b>SG DMA Troubleshooting</b>
+ *
+ * To verify internal structures of BDs and the BD ring, the function
+ * XTemac_SgCheck() is provided. This function should be used as a debugging
+ * or diagnostic tool. If it returns a failure, the user must perform more
+ * in depth debugging to find the root cause.
+ *
+ * To avoid problems, do not use the following BD macros for transmit channel
+ * BDs (XTE_SEND):
+ *
+ * - XDmaBdV3_mClear()
+ * - XDmaBdV3_mSetRxDir()
+ *
+ * and for receive channel BDs (XTE_RECV):
+ *
+ * - XDmaBdV3_mClear()
+ * - XDmaBdV3_mSetTxDir()
+ *
+ * <b>Alignment & Data Cache Restrictions</b>
+ *
+ * FIFO Direct:
+ *
+ * - No frame buffer alignment restrictions for Tx or Rx
+ * - Buffers not aligned on a 4-byte boundary will take longer to process
+ * as the driver uses a small transfer buffer to realign them prior to
+ * packet FIFO access
+ * - Frame buffers may be in cached memory
+ *
+ * SGDMA Tx with DRE:
+ *
+ * - No frame buffer alignment restrictions
+ * - If frame buffers exist in cached memory, then they must be flushed prior
+ * to committing them to HW
+ * - Descriptors must be 4-byte aligned
+ * - Descriptors must be in non-cached memory
+ *
+ * SGDMA Tx without DRE:
+ *
+ * - Frame buffers must be 8-byte aligned
+ * - If frame buffers exist in cached memory, then they must be flushed prior
+ * to committing them to HW
+ * - Descriptors must be 4-byte aligned
+ * - Descriptors must be in non-cached memory
+ *
+ * SGDMA Rx with DRE:
+ *
+ * - No frame buffer alignment restrictions
+ * - If frame buffers exist in cached memory, then the cache must be
+ * invalidated for the memory region containing the frame prior to data
+ * access
+ * - Descriptors must be 4-byte aligned
+ * - Descriptors must be in non-cached memory
+ *
+ * SGDMA Rx without DRE:
+ *
+ * - Frame buffers must be 8-byte aligned
+ * - If frame buffers exist in cached memory, then the cache must be
+ * invalidated for the memory region containing the frame prior to data
+ * access
+ * - Descriptors must be 4-byte aligned
+ * - Descriptors must be in non-cached memory
+ *
+ * <b>Buffer Copying</b>
+ *
+ * The driver is designed for a zero-copy buffer scheme. That is, the driver will
+ * not copy buffers. This avoids potential throughput bottlenecks within the
+ * driver.
+ *
+ * The only exception to this is when buffers are passed to XTemac_FifoRead() and
+ * XTemac_FifoWrite() on 1, 2, or 3 byte alignments. These buffers will be byte
+ * copied into a small holding area on their way to or from the packet FIFOs.
+ * For PLB TEMAC this holding area is 8 bytes each way. If byte copying is
+ * required, then the transfer will take longer to complete.
+ *
+ * <b>Checksum Offloading</b>
+ *
+ * If configured, the device can compute a 16-bit checksum from frame data. In
+ * most circumstances this can lead to a substantial gain in throughput.
+ *
+ * For Tx, the SW can specify where in the frame the checksum calculation is
+ * to start, where it should be inserted, and a seed value. The checksum is
+ * calculated from the start point through the end of frame. For Rx, the 15th
+ * byte to end of frame is checksummed. This is the entire Ethernet payload
+ * for non-VLAN frames.
+ *
+ * Setting up and accessing checksum data is done with XTemac API macro calls
+ * on buffer descriptors on a per-frame basis.
+ *
+ * Since this HW implementation is general purpose in nature system SW must
+ * perform pre and post frame processing to obtain the desired results for the
+ * types of packets being transferred. Most of the time this will be TCP/IP
+ * traffic.
+ *
+ * TCP/IP and UDP/IP frames contain separate checksums for the IP header and
+ * UDP/TCP header+data. With this HW implementation, the IP header checksum
+ * cannot be offloaded. Many stacks that support offloading will compute the IP
+ * header if required and use HW to compute the UDP/TCP header+data checksum.
+ * There are other complications concerning the IP pseudo header that must be
+ * taken into consideration. Readers should consult a TCP/IP design reference
+ * for more details.
+ *
+ * There are certain device options that will affect the checksum calculation
+ * performed by HW for Tx:
+ *
+ * - FCS insertion disabled (XTE_FCS_INSERT_OPTION): SW is required to
+ * calculate and insert the FCS value at the end of the frame, but the
+ * checksum must be known ahead of time prior to calculating the FCS.
+ * Therefore checksum offloading cannot be used in this situation.
+ *
+ * And for Rx:
+ *
+ * - FCS/PAD stripping disabled (XTE_FCS_STRIP_OPTION): The 4 byte FCS at the
+ * end of frame will be included in the HW calculated checksum. SW must
+ * subtract out this data.
+ *
+ * - FCS/PAD stripping disabled (XTE_FCS_STRIP_OPTION): For frames smaller
+ * than 64 bytes, padding will be included in the HW calculated checksum.
+ * SW must subtract out this data. It may be better to allow the TCP/IP
+ * stack verify checksums for this type of packet.
+ *
+ * - VLAN enabled (XTE_VLAN_OPTION): The 4 extra bytes in the Ethernet header
+ * affect the HW calculated checksum. SW must subtract out the 1st two
+ * 16-bit words starting at the 15th byte.
+ *
+ * <b>PHY Communication</b>
+ *
+ * Prior to PHY access, the MDIO clock must be setup. This driver will set a
+ * safe default that should work with PLB bus speeds of up to 150 MHz and keep
+ * the MDIO clock below 2.5 MHz. If the user wishes faster access to the PHY
+ * then the clock divisor can be set to a different value (see
+ * XTemac_PhySetMdioDivisor()).
+ *
+ * MII register access is performed through the functions XTemac_PhyRead() and
+ * XTemac_PhyWrite().
+ *
+ * <b>Link Sync</b>
+ *
+ * When the device is used in a multispeed environment, the link speed must be
+ * explicitly set using XTemac_SetOperatingSpeed() and must match the speed the
+ * PHY has negotiated. If the speeds are mismatched, then the MAC will not pass
+ * traffic.
+ *
+ * Using the XTE_ANEG_OPTION and the provided callback handler, SW can be
+ * notified when the PHY has completed auto-negotiation.
+ *
+ * <b>Asserts</b>
+ *
+ * Asserts are used within all Xilinx drivers to enforce constraints on argument
+ * values. Asserts can be turned off on a system-wide basis by defining, at
+ * compile time, the NDEBUG identifier. By default, asserts are turned on and it
+ * is recommended that users leave asserts on during development. For deployment
+ * use -DNDEBUG compiler switch to remove assert code.
+ *
+ * <b>Driver Errata</b>
+ *
+ * - A dropped receive frame indication may be reported by the driver after
+ * calling XTemac_Stop() followed by XTemac_Start(). This can occur if a
+ * frame is arriving when stop is called.
+ * - On Rx with checksum offloading enabled and FCS/PAD stripping disabled,
+ * FCS and PAD data will be included in the checksum result.
+ * - On Tx with checksum offloading enabled and auto FCS insertion disabled,
+ * the user calculated FCS will be included in the checksum result.
+ *
+ * @note
+ *
+ * Xilinx drivers are typically composed of two components, one is the driver
+ * and the other is the adapter. The driver is independent of OS and processor
+ * and is intended to be highly portable. The adapter is OS-specific and
+ * facilitates communication between the driver and an OS.
+ * <br><br>
+ * This driver is intended to be RTOS and processor independent. Any needs for
+ * dynamic memory management, threads or thread mutual exclusion, or cache
+ * control must be satisfied by the layer above this driver.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a rmm 06/01/05 First release
+ * 1.00b rmm 09/23/05 Replaced XTemac_GetPhysicalInterface() with macro
+ * XTemac_mGetPhysicalInterface(). Implemented
+ * XTemac_PhyRead/Write() functions. Redesigned MII/RGMII/
+ * SGMII status functions. Renamed most of the host
+ * registers to reflect latest changes in HW spec, added
+ * XST_FIFO_ERROR return code to polled FIFO query
+ * functions.
+ * 2.00a rmm 11/21/05 Switched to local link DMA driver, removed simple-DMA
+ * mode, added auto-negotiation callback, added checksum
+ * offload access macros, removed XST_SEND_ERROR error
+ * class completely since TSR bits went away, removed
+ * XST_FAILURE return code for XTemac_FifoQuerySendStatus(),
+ * added static init feature, changed XTE_FCS_STRIP_OPTION
+ * to default to set.
+ * </pre>
+ *
+ *****************************************************************************/
+
+#ifndef XTEMAC_H /* prevent circular inclusions */
+#define XTEMAC_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include <asm/delay.h>
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xparameters.h"
+#include "xipif_v1_23_b.h"
+#include "xpacket_fifo_v2_00_a.h"
+#include "xdmav3.h"
+#include "xtemac_l.h"
+
+/************************** Constant Definitions *****************************/
+
+/*
+ * Device information
+ */
+#define XTE_DEVICE_NAME "xtemac"
+#define XTE_DEVICE_DESC "Xilinx Tri-speed 10/100/1000 MAC"
+
+
+/** @name Configuration options
+ *
+ * Device configuration options. See the XTemac_SetOptions(),
+ * XTemac_ClearOptions() and XTemac_GetOptions() for information on how to use
+ * options.
+ *
+ * The default state of the options are noted and are what the device and driver
+ * will be set to after calling XTemac_Reset() or XTemac_Initialize().
+ *
+ * @{
+ */
+
+#define XTE_PROMISC_OPTION 0x00000001
+/**< Accept all incoming packets.
+ * This option defaults to disabled (cleared) */
+
+#define XTE_JUMBO_OPTION 0x00000002
+/**< Jumbo frame support for Tx & Rx.
+ * This option defaults to disabled (cleared) */
+
+#define XTE_VLAN_OPTION 0x00000004
+/**< VLAN Rx & Tx frame support.
+ * This option defaults to disabled (cleared) */
+
+#define XTE_FLOW_CONTROL_OPTION 0x00000010
+/**< Enable recognition of flow control frames on Rx
+ * This option defaults to enabled (set) */
+
+#define XTE_FCS_STRIP_OPTION 0x00000020
+/**< Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
+ * stripped.
+ * This option defaults to disabled (set) */
+
+#define XTE_FCS_INSERT_OPTION 0x00000040
+/**< Generate FCS field and add PAD automatically for outgoing frames.
+ * This option defaults to enabled (set) */
+
+#define XTE_LENTYPE_ERR_OPTION 0x00000080
+/**< Enable Length/Type error checking for incoming frames. When this option is
+ * set, the MAC will filter frames that have a mismatched type/length field
+ * and if XTE_REPORT_RXERR_OPTION is set, the user is notified when these
+ * types of frames are encountered. When this option is cleared, the MAC will
+ * allow these types of frames to be received.
+ *
+ * This option defaults to enabled (set) */
+
+#define XTE_SGEND_INT_OPTION 0x00000100
+/**< Enable the SGEND interrupt with SG DMA. When enabled, an interrupt will
+ * be triggered when the end of the buffer descriptor list is reached. The
+ * interrupt will occur despite interrupt coalescing settings.
+ * This option defaults to disabled (cleared) */
+
+#define XTE_POLLED_OPTION 0x00000200
+/**< Polled mode communications. Enables use of XTemac_FifoQuerySendStatus()
+ * and XTemac_FifoQueryRecvStatus(). Users may enter/exit polled mode
+ * from any interrupt driven mode.
+ * This option defaults to disabled (cleared) */
+
+#define XTE_REPORT_RXERR_OPTION 0x00000400
+/**< Enable reporting of dropped receive packets due to errors
+ * This option defaults to enabled (set) */
+
+#define XTE_TRANSMITTER_ENABLE_OPTION 0x00000800
+/**< Enable the transmitter.
+ * This option defaults to enabled (set) */
+
+#define XTE_RECEIVER_ENABLE_OPTION 0x00001000
+/**< Enable the receiver
+ * This option defaults to enabled (set) */
+
+#define XTE_BROADCAST_OPTION 0x00002000
+/**< Allow reception of the broadcast address
+ * This option defaults to enabled (set) */
+
+#define XTE_MULTICAST_CAM_OPTION 0x00004000
+/**< Allows reception of multicast addresses programmed into CAM
+ * This option defaults to disabled (clear) */
+
+#define XTE_REPORT_TXSTATUS_OVERRUN_OPTION 0x00008000
+/**< Enable reporting the overrun of the Transmit status FIFO. This type of
+ * error is latched by HW and can be cleared only by a reset. SGDMA systems,
+ * this option should be enabled since the DMA engine is responsible for
+ * keeping this from occurring. For FIFO direct systems, this error may be
+ * a nuisance because a SW system may be able to transmit frames faster
+ * than the interrupt handler can handle retrieving statuses.
+ * This option defaults to enabled (set) */
+
+#define XTE_ANEG_OPTION 0x00010000
+/**< Enable autonegotiation interrupt
+ This option defaults to disabled (clear) */
+
+#define XTE_DEFAULT_OPTIONS \
+ (XTE_FLOW_CONTROL_OPTION | \
+ XTE_BROADCAST_OPTION | \
+ XTE_FCS_INSERT_OPTION | \
+ XTE_FCS_STRIP_OPTION | \
+ XTE_LENTYPE_ERR_OPTION | \
+ XTE_TRANSMITTER_ENABLE_OPTION | \
+ XTE_REPORT_RXERR_OPTION | \
+ XTE_REPORT_TXSTATUS_OVERRUN_OPTION | \
+ XTE_RECEIVER_ENABLE_OPTION)
+/**< Default options set when device is initialized or reset */
+
+/*@}*/
+
+/** @name Direction identifiers
+ *
+ * These are used by several functions and callbacks that need
+ * to specify whether an operation specifies a send or receive channel.
+ * @{
+ */
+#define XTE_SEND 1
+#define XTE_RECV 2
+/*@}*/
+
+/** @name Reset parameters
+ *
+ * These are used by function XTemac_Reset().
+ * @{
+ */
+#define XTE_RESET_HARD 1
+#define XTE_NORESET_HARD 0
+/*@}*/
+
+/** @name XTemac_FifoWrite/Read() function arguments
+ *
+ * These are used by XTemac_FifoWrite/Read() End Of Packet (Eop)
+ * parameter.
+ * @{
+ */
+#define XTE_END_OF_PACKET 1 /**< The data written is the last for the
+ * current packet */
+#define XTE_PARTIAL_PACKET 0 /**< There is more data to come for the
+ * current packet */
+/*@}*/
+
+/** @name Callback identifiers
+ *
+ * These constants are used as parameters to XTemac_SetHandler()
+ * @{
+ */
+#define XTE_HANDLER_FIFOSEND 1
+#define XTE_HANDLER_FIFORECV 2
+#define XTE_HANDLER_SGSEND 5
+#define XTE_HANDLER_SGRECV 6
+#define XTE_HANDLER_ERROR 7
+#define XTE_HANDLER_ANEG 8
+/*@}*/
+
+
+/* Constants to determine the configuration of the hardware device. They are
+ * used to allow the driver to verify it can operate with the hardware.
+ */
+#define XTE_CFG_NO_DMA 1 /* No DMA */
+#define XTE_CFG_SIMPLE_DMA 2 /* Simple DMA (not supported) */
+#define XTE_CFG_DMA_SG 3 /* DMA scatter gather */
+
+#define XTE_MULTI_CAM_ENTRIES 4 /* Number of storable addresses in
+ the CAM */
+
+#define XTE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
+
+/* Some default values for interrupt coalescing within the scatter-gather
+ * DMA engine.
+ */
+#define XTE_SGDMA_DFT_THRESHOLD 1 /* Default pkt threshold */
+#define XTE_SGDMA_MAX_THRESHOLD 1023 /* Maximum pkt theshold */
+#define XTE_SGDMA_DFT_WAITBOUND 0 /* Default pkt wait bound (msec) */
+#define XTE_SGDMA_MAX_WAITBOUND 1023 /* Maximum pkt wait bound (msec) */
+
+/* The next few constants help upper layers determine the size of memory
+ * pools used for Ethernet buffers and descriptor lists.
+ */
+#define XTE_MAC_ADDR_SIZE 6 /* six-byte MAC address */
+#define XTE_MTU 1500 /* max MTU size of Ethernet frame */
+#define XTE_JUMBO_MTU 8982 /* max MTU size of jumbo Ethernet frame */
+#define XTE_HDR_SIZE 14 /* size of Ethernet header */
+#define XTE_HDR_VLAN_SIZE 18 /* size of Ethernet header with VLAN */
+#define XTE_TRL_SIZE 4 /* size of Ethernet trailer (FCS) */
+#define XTE_MAX_FRAME_SIZE (XTE_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE)
+#define XTE_MAX_VLAN_FRAME_SIZE (XTE_MTU + XTE_HDR_VLAN_SIZE + XTE_TRL_SIZE)
+#define XTE_MAX_JUMBO_FRAME_SIZE (XTE_JUMBO_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE)
+
+/* Constant values returned by XTemac_mGetPhysicalInterface(). Note that these
+ * values match design parameters from the PLB_TEMAC spec
+ */
+#define XTE_PHY_TYPE_MII 0
+#define XTE_PHY_TYPE_GMII 1
+#define XTE_PHY_TYPE_RGMII_1_3 2
+#define XTE_PHY_TYPE_RGMII_2_0 3
+#define XTE_PHY_TYPE_SGMII 4
+#define XTE_PHY_TYPE_1000BASE_X 5
+
+/**************************** Type Definitions *******************************/
+
+
+/** @name Typedefs for callback functions
+ *
+ * These callbacks are invoked in interrupt context.
+ * @{
+ */
+
+/**
+ * Callback invoked when frame(s) have been sent in interrupt driven FIFO
+ * direct mode. To set this callback, invoke XTemac_SetHander() with
+ * XTE_HANDLER_FIFOSEND in the HandlerType parameter.
+ *
+ * @param CallBackRef is user data assigned when the callback was set.
+ * @param StatusCnt is the number of statuses read from the device indicating
+ * a successful frame transmit.
+ *
+ */
+typedef void (*XTemac_FifoSendHandler) (void *CallBackRef, unsigned StatusCnt);
+
+/**
+ * Callback invoked when frame(s) have been received in interrupt driven FIFO
+ * direct mode. To set this callback, invoke XTemac_SetHander() with
+ * XTE_HANDLER_FIFORECV in the HandlerType parameter.
+ *
+ * @param CallBackRef is user data assigned when the callback was set.
+ *
+ */
+typedef void (*XTemac_FifoRecvHandler) (void *CallBackRef);
+
+/**
+ * Callback invoked when frame(s) have been sent or received in interrupt
+ * driven SGDMA mode. To set the send callback, invoke XTemac_SetHandler()
+ * with XTE_HANDLER_SGSEND in the HandlerType parameter. For the receive
+ * callback use XTE_HANDLER_SGRECV.
+ *
+ * @param CallBackRef is user data assigned when the callback was set.
+ */
+typedef void (*XTemac_SgHandler) (void *CallBackRef);
+
+/**
+ * Callback invoked when auto-negotiation interrupt is asserted
+ * To set this callback, invoke XTemac_SetHandler() with XTE_HANDLER_ANEG in
+ * the HandlerType parameter.
+ *
+ * @param CallBackRef is user data assigned when the callback was set.
+ */
+typedef void (*XTemac_AnegHandler) (void *CallBackRef);
+
+/**
+ * Callback when an asynchronous error occurs. To set this callback, invoke
+ * XTemac_SetHandler() with XTE_HANDLER_ERROR in the HandlerType paramter.
+ *
+ * @param CallBackRef is user data assigned when the callback was set.
+ * @param ErrorClass defines what class of error is being reported
+ * @param ErrorWord1 definition varies with ErrorClass
+ * @param ErrorWord2 definition varies with ErrorClass
+ *
+ * The following information lists what each ErrorClass is, the source of the
+ * ErrorWords, what they mean, and if the device should be reset should it be
+ * reported
+ *
+ * <b>ErrorClass == XST_FIFO_ERROR</b>
+ *
+ * This error class means there was a fatal error with one of the device FIFOs.
+ * This type of error cannot be cleared. The user should initiate a device reset.
+ *
+ * ErrorWord1 is defined as a bit mask from XTE_IPXR_FIFO_FATAL_ERROR_MASK
+ * that originates from the device's IPISR register.
+ *
+ * ErrorWord2 is reserved.
+ *
+ *
+ * <b>ErrorClass == XST_PFIFO_DEADLOCK</b>
+ *
+ * This error class indicates that one of the packet FIFOs is reporting a
+ * deadlock condition. This means the FIFO is reporting that it is empty and
+ * full at the same time. This condition will occur when data being written
+ * exceeds the capacity of the packet FIFO. The device should be reset if this
+ * error is reported.
+ *
+ * Note that this error is reported only if the device is configured for FIFO
+ * direct mode. For SGDMA, this error is reported in ErrorClass XST_FIFO_ERROR.
+ *
+ * If ErrorWord1 = XTE_RECV, then the deadlock occurred in the receive channel.
+ * If ErrorWord1 = XTE_SEND, then the deadlock occurred in the send channel.
+ *
+ * ErrorWord2 is reserved.
+ *
+ *
+ * <b>ErrorClass == XST_IPIF_ERROR</b>
+ *
+ * This error means that a register read or write caused a bus error within the
+ * TEMAC's IPIF. This condition is fatal. The user should initiate a device
+ * reset.
+ *
+ * ErrorWord1 is defined as the contents XTE_DISR_OFFSET register where these
+ * errors are reported. Bits XTE_DXR_DPTO_MASK and XTE_DXR_TERR_MASK are
+ * relevent in this context.
+ *
+ * ErrorWord2 is reserved.
+ *
+ *
+ * <b>ErrorClass == XST_DMA_ERROR</b>
+ *
+ * This error class means there was a problem during a DMA transfer.
+ *
+ * ErrorWord1 defines which channel caused the error XTE_RECV or XTE_SEND.
+ *
+ * ErrorWord2 is set to the DMA status register XDMAV3_DMASR_OFFSET.
+ * The relevent bits to test are XDMAV3_DMASR_DBE_MASK and XDMAV3_DMASR_DBT_MASK.
+ * If either of these bits are set, a reset is recommended.
+ *
+ *
+ * <b>ErrorClass == XST_RECV_ERROR</b>
+ *
+ * This error class means a packet was dropped.
+ *
+ * ErrorWord1 is defined as the contents of the device's XTE_IPISR_OFFSET
+ * relating to receive errors. If any bit is set in the
+ * XTE_IPXR_RECV_DROPPED_MASK then a packet was rejected. Refer to xtemac_l.h
+ * for more information on what each bit in this mask means.
+ *
+ * ErrorWord2 is reserved.
+ *
+ * No action is typically required when this error occurs.
+ *
+ * Reporting of this error class can be disabled by clearing the
+ * XTE_REPORT_RXERR_OPTION.
+ *
+ * @note
+ * See xtemac_l.h for bitmasks definitions and the device hardware spec for
+ * further information on their meaning.
+ *
+ */
+typedef void (*XTemac_ErrorHandler) (void *CallBackRef, int ErrorClass,
+ u32 ErrorWord1, u32 ErrorWord2);
+/*@}*/
+
+
+/**
+ * Statistics maintained by the driver
+ */
+typedef struct {
+ u32 TxDmaErrors; /**< Number of Tx DMA errors detected */
+ u32 TxPktFifoErrors;
+ /**< Number of Tx packet FIFO errors detected */
+ u32 TxStatusErrors;
+ /**< Number of Tx errors derived from XTE_TSR_OFFSET
+ register */
+ u32 RxRejectErrors;
+ /**< Number of frames discarded due to errors */
+ u32 RxDmaErrors; /**< Number of Rx DMA errors detected */
+ u32 RxPktFifoErrors;
+ /**< Number of Rx packet FIFO errors detected */
+
+ u32 FifoErrors; /**< Number of length/status FIFO errors detected */
+ u32 IpifErrors; /**< Number of IPIF transaction and data phase errors
+ detected */
+ u32 Interrupts; /**< Number of interrupts serviced */
+} XTemac_SoftStats;
+
+
+/**
+ * This typedef contains configuration information for a device.
+ */
+typedef struct {
+ u16 DeviceId; /**< Unique ID of device */
+ u32 BaseAddress;/**< Physical base address of IPIF registers */
+ u32 RxPktFifoDepth;
+ /**< Depth of receive packet FIFO in bits */
+ u32 TxPktFifoDepth;
+ /**< Depth of transmit packet FIFO in bits */
+ u16 MacFifoDepth;
+ /**< Depth of the status/length FIFOs in entries */
+ u8 IpIfDmaConfig;
+ /**< IPIF/DMA hardware configuration */
+ u8 TxDre; /**< Has data realignment engine on Tx channel */
+ u8 RxDre; /**< Has data realignment engine on Rx channel */
+ u8 TxCsum; /**< Has checksum offload on Tx channel */
+ u8 RxCsum; /**< Has checksum offload on Tx channel */
+ u8 PhyType; /**< Which type of PHY interface is used (MII,
+ GMII, RGMII, ect. */
+} XTemac_Config;
+
+
+/* This type encapsulates a packet FIFO channel and support attributes to
+ * allow unaligned data transfers.
+ */
+typedef struct XTemac_PacketFifo {
+ u32 Hold[2]; /* Holding register */
+ unsigned ByteIndex; /* Holding register index */
+ unsigned Width; /* Width of packet FIFO's keyhole data port in
+ bytes */
+ XPacketFifoV200a Fifo; /* Packet FIFO channel */
+ /* Function used to transfer data between
+ FIFO and a buffer */
+ int (*XferFn) (struct XTemac_PacketFifo *Fptr, void *BufPtr,
+ u32 ByteCount, int Eop);
+} XTemac_PacketFifo;
+
+
+/**
+ * The XTemac driver instance data. The user is required to allocate a
+ * structure of this type for every TEMAC device in the system. A pointer
+ * to a structure of this type is then passed to the driver API functions.
+ */
+typedef struct XTemac {
+ u32 BaseAddress; /* Base address of IPIF register set */
+ u32 IsStarted; /* Device is currently started */
+ u32 IsReady; /* Device is initialized and ready */
+ u32 Options; /* Current options word */
+ u32 Flags; /* Internal driver flags */
+ XTemac_Config Config; /* HW configuration */
+
+ /* Packet FIFO channels */
+ XTemac_PacketFifo RecvFifo; /* Receive channel */
+ XTemac_PacketFifo SendFifo; /* Transmit channel */
+
+ /* DMA channels */
+ XDmaV3 RecvDma; /* Receive channel */
+ XDmaV3 SendDma; /* Transmit channel */
+
+ /* Callbacks for FIFO direct modes */
+ XTemac_FifoRecvHandler FifoRecvHandler;
+ XTemac_FifoSendHandler FifoSendHandler;
+ void *FifoRecvRef;
+ void *FifoSendRef;
+
+ /* Callbacks for SG DMA mode */
+ XTemac_SgHandler SgRecvHandler;
+ XTemac_SgHandler SgSendHandler;
+ void *SgRecvRef;
+ void *SgSendRef;
+
+ /* Auto negotiation callback */
+ XTemac_AnegHandler AnegHandler;
+ void *AnegRef;
+
+ /* Error callback */
+ XTemac_ErrorHandler ErrorHandler;
+ void *ErrorRef;
+
+ /* Driver maintained statistics */
+ XTemac_SoftStats Stats;
+
+} XTemac;
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*****************************************************************************/
+/**
+*
+* This macro can be used to determine if the device is in the started or
+* stopped state. To be in the started state, the user must have made a
+* successful call to XTemac_Start(). To be in the stopped state, XTemac_Stop()
+* or one of the XTemac initialize functions must have been called.
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device has been started, FALSE otherwise
+*
+* @note
+*
+* Signature: u32 XTemac_mIsStarted(XTemac *InstancePtr)
+*
+******************************************************************************/
+#define XTemac_mIsStarted(InstancePtr) \
+ (((InstancePtr)->IsStarted == XCOMPONENT_IS_STARTED) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device thinks it has received a frame. This
+* function is useful if the device is operating in FIFO direct interrupt driven
+* mode. For polled mode, use XTemac_FifoQueryRecvStatus().
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device interrupt status register reports that a frame
+* status and length is available. FALSE otherwise.
+*
+* @note
+*
+* Signature: u32 XTemac_mIsRecvFrame(XTemac *InstancePtr)
+*
+******************************************************************************/
+#define XTemac_mIsRecvFrame(InstancePtr) \
+ ((XTemac_mReadReg((InstancePtr)->BaseAddress, XTE_IPISR_OFFSET) \
+ & XTE_IPXR_RECV_DONE_MASK) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device thinks it has dropped a receive frame.
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device interrupt status register reports that a frame
+* has been dropped. FALSE otherwise.
+*
+* @note
+*
+* Signature: u32 XTemac_mIsRecvFrameDropped(XTemac *InstancePtr)
+*
+******************************************************************************/
+#define XTemac_mIsRecvFrameDropped(InstancePtr) \
+ ((XTemac_mReadReg((InstancePtr)->BaseAddress, XTE_IPISR_OFFSET) \
+ & XTE_IPXR_RECV_REJECT_MASK) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is currently configured for
+* FIFO direct mode
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured for FIFO direct, or FALSE
+* if it is not.
+*
+* @note
+*
+* Signature: u32 XTemac_mIsFifo(XTemac *InstancePtr)
+*
+******************************************************************************/
+#define XTemac_mIsFifo(InstancePtr) \
+ (((InstancePtr)->Config.IpIfDmaConfig == XTE_CFG_NO_DMA) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is currently configured for
+* scatter-gather DMA.
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured for scatter-gather DMA, or FALSE
+* if it is not.
+*
+* @note
+*
+* Signature: u32 XTemac_mIsSgDma(XTemac *InstancePtr)
+*
+******************************************************************************/
+#define XTemac_mIsSgDma(InstancePtr) \
+ (((InstancePtr)->Config.IpIfDmaConfig == XTE_CFG_DMA_SG) ? TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is configured with the Data Realignment
+* Engine (DRE) on the receive channel
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with DRE, or FALSE otherwise.
+*
+* @note
+*
+* Signature: u32 XTemac_mIsRxDre(XTemac *InstancePtr)
+*
+******************************************************************************/
+#define XTemac_mIsRxDre(InstancePtr) (((InstancePtr)->Config.RxDre) ? \
+ TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is configured with the Data Realignment
+* Engine (DRE) on the transmit channel
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with DRE, or FALSE otherwise.
+*
+* @note
+*
+* Signature: u32 XTemac_mIsTxDre(XTemac *InstancePtr)
+*
+******************************************************************************/
+#define XTemac_mIsTxDre(InstancePtr) (((InstancePtr)->Config.TxDre) ? \
+ TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is configured with checksum offloading
+* on the receive channel
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with checksum offloading, or
+* FALSE otherwise.
+*
+* @note
+*
+* Signature: u32 XTemac_mIsRxCsum(XTemac *InstancePtr)
+*
+******************************************************************************/
+#define XTemac_mIsRxCsum(InstancePtr) (((InstancePtr)->Config.RxCsum) ? \
+ TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro determines if the device is configured with checksum offloading
+* on the transmit channel
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with checksum offloading, or
+* FALSE otherwise.
+*
+* @note
+*
+* Signature: u32 XTemac_mIsTxCsum(XTemac *InstancePtr)
+*
+******************************************************************************/
+#define XTemac_mIsTxCsum(InstancePtr) (((InstancePtr)->Config.TxCsum) ? \
+ TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+*
+* This macro returns the type of PHY interface being used by the given
+* instance.
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+*
+* @return
+*
+* One of XTE_PHY_TYPE_<x> where <x> is MII, GMII, RGMII_1_3, RGMII_2_0,
+* SGMII, or 1000BASE_X.
+*
+* @note
+*
+* Signature: int XTemac_mGetPhysicalInterface(XTemac *InstancePtr)
+*
+******************************************************************************/
+#define XTemac_mGetPhysicalInterface(InstancePtr) \
+ ((InstancePtr)->Config.PhyType)
+
+/*****************************************************************************/
+/**
+*
+* Return the next buffer descriptor in the list on the send channel.
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+* @param BdPtr is the source descriptor
+*
+* @return Next descriptor in the SGDMA transmit ring (i.e. BdPtr->Next)
+*
+* @note
+*
+* Signature: XDmaBdV3 XTemac_mSgSendBdNext(XTemac *InstancePtr,
+* XDmaBdV3 *BdPtr)
+*
+******************************************************************************/
+#define XTemac_mSgSendBdNext(InstancePtr, BdPtr) \
+ XDmaV3_mSgBdNext(&(InstancePtr)->SendDma, (BdPtr))
+
+/*****************************************************************************/
+/**
+*
+* Return the previous buffer descriptor in the list on the send channel.
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+* @param BdPtr is the source descriptor
+*
+* @return Previous descriptor in the SGDMA transmit ring (i.e. BdPtr->Prev)
+*
+* @note
+*
+* Signature: XDmaBdV3 XTemac_mSgSendBdPrev(XTemac *InstancePtr,
+* XDmaBdV3 *BdPtr)
+*
+******************************************************************************/
+#define XTemac_mSgSendBdPrev(InstancePtr, BdPtr) \
+ XDmaV3_mSgBdPrev(&(InstancePtr)->SendDma, (BdPtr))
+
+/*****************************************************************************/
+/**
+*
+* Return the next buffer descriptor in the list on the receive channel.
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+* @param BdPtr is the source descriptor
+*
+* @return Next descriptor in the SGDMA receive ring (i.e. BdPtr->Next)
+*
+* @note
+*
+* Signature: XDmaBdV3 XTemac_mSgRecvBdNext(XTemac *InstancePtr,
+* XDmaBdV3 *BdPtr)
+*
+******************************************************************************/
+#define XTemac_mSgRecvBdNext(InstancePtr, BdPtr) \
+ XDmaV3_mSgBdNext(&(InstancePtr)->RecvDma, (BdPtr))
+
+/*****************************************************************************/
+/**
+*
+* Return the previous buffer descriptor in the list on the receive channel.
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+* @param BdPtr is the source descriptor
+*
+* @return Previous descriptor in the SGDMA receive ring (i.e. BdPtr->Prev)
+*
+* @note
+*
+* Signature: XDmaBdV3 XTemac_mSgRecvBdPrev(XTemac *InstancePtr,
+* XDmaBdV3 *BdPtr)
+*
+******************************************************************************/
+#define XTemac_mSgRecvBdPrev(InstancePtr, BdPtr) \
+ XDmaV3_mSgBdNext(&(InstancePtr)->RecvDma, (BdPtr))
+
+/*****************************************************************************/
+/**
+*
+* Retrieve the received frame checksum as calculated by HW
+*
+* @param BdPtr is the source descriptor
+*
+* @return 16-bit checksum value
+
+* @note
+*
+* Signature: u16 XTemac_mSgRecvBdCsumGet(XDmaBdV3 *BdPtr)
+*
+******************************************************************************/
+#define XTemac_mSgRecvBdCsumGet(BdPtr) \
+ (*(u16*)((u32)(BdPtr) + XTE_BD_RX_CSRAW_OFFSET))
+
+/*****************************************************************************/
+/**
+*
+* Enable transmit side checksum calculation for the given descriptor.
+*
+* @param BdPtr is the source descriptor
+*
+* @note
+*
+* Signature: void XTemac_mSgSendBdCsumEnable(XDmaBdV3 *BdPtr)
+*
+******************************************************************************/
+#define XTemac_mSgSendBdCsumEnable(BdPtr) \
+ *(u16*)((u32)(BdPtr) + XTE_BD_TX_CSCNTRL_OFFSET) = \
+ XTE_BD_TX_CSCNTRL_CALC_MASK
+
+/*****************************************************************************/
+/**
+*
+* Disable transmit side checksum calculation for the given descriptor.
+*
+* @param BdPtr is the source descriptor
+*
+* @note
+*
+* Signature: void XTemac_mSgSendBdCsumDisable(XDmaBdV3 *BdPtr)
+*
+******************************************************************************/
+#define XTemac_mSgSendBdCsumDisable(BdPtr) \
+ *(u16*)((u32)(BdPtr) + XTE_BD_TX_CSCNTRL_OFFSET) = 0
+
+/*****************************************************************************/
+/**
+*
+* Setup checksum attributes for a transmit frame. If a seed value is required
+* XTemac_mSgSendBdCsumSeed() can be used
+*
+* @param BdPtr is the source descriptor
+* @param StartOffset is the byte offset where HW will begin checksumming data
+* @param InsertOffset is the byte offset where HW will insert the calculated
+* checksum value
+*
+* @note
+*
+* Signature: void XTemac_mSgSendBdCsumSetup(XDmaBdV3 *BdPtr,
+* u16 StartOffset,
+* u16 InsertOffset)
+*
+******************************************************************************/
+#define XTemac_mSgSendBdCsumSetup(BdPtr, StartOffset, InsertOffset) \
+ *(u32*)((u32)(BdPtr) + XTE_BD_TX_CSBEGIN_OFFSET) = \
+ ((StartOffset) << 16) | (InsertOffset)
+
+/*****************************************************************************/
+/**
+*
+* Set the initial checksum seed for a transmit frame. HW will add this value
+* to the calculated frame checksum. If not required then the seed should be
+* set to 0.
+*
+* @param BdPtr is the source descriptor
+* @param Seed is added to the calculated checksum
+*
+* @note
+*
+* Signature: void XTemac_mSgSendBdCsumSeed(XDmaBdV3 *BdPtr, u16 Seed)
+*
+******************************************************************************/
+#define XTemac_mSgSendBdCsumSeed(BdPtr, Seed) \
+ *(u16*)((u32)(BdPtr) + XTE_BD_TX_CSINIT_OFFSET) = (Seed)
+
+
+/************************** Function Prototypes ******************************/
+
+/*
+ * Initialization functions in xtemac.c
+ */
+int XTemac_CfgInitialize(XTemac *InstancePtr, XTemac_Config *CfgPtr,
+ u32 VirtualAddress);
+int XTemac_Start(XTemac *InstancePtr);
+void XTemac_Stop(XTemac *InstancePtr);
+void XTemac_Reset(XTemac *InstancePtr, int HardCoreAction);
+
+/*
+ * Initialization functions in xtemac_sinit.c
+ */
+int XTemac_Initialize(XTemac *InstancePtr, u16 DeviceId);
+int XTemac_VmInitialize(XTemac *InstancePtr, u16 DeviceId, u32 VirtualAddress);
+XTemac_Config *XTemac_LookupConfig(u16 DeviceId);
+
+/*
+ * General interrupt-related functions in xtemac_intr.c
+ */
+int XTemac_SetHandler(XTemac *InstancePtr, u32 HandlerType,
+ void *CallbackFunc, void *CallbackRef);
+
+/*
+ * Fifo direct mode functions implemented in xtemac_fifo.c
+ */
+int XTemac_FifoWrite(XTemac *InstancePtr, void *BufPtr, u32 ByteCount, int Eop);
+int XTemac_FifoSend(XTemac *InstancePtr, u32 TxByteCount);
+
+int XTemac_FifoRecv(XTemac *InstancePtr, u32 *ByteCountPtr);
+int XTemac_FifoRead(XTemac *InstancePtr, void *BufPtr, u32 ByteCount, int Eop);
+u32 XTemac_FifoGetFreeBytes(XTemac *InstancePtr, u32 Direction);
+
+int XTemac_FifoQuerySendStatus(XTemac *InstancePtr, u32 *SendStatusPtr);
+int XTemac_FifoQueryRecvStatus(XTemac *InstancePtr);
+
+/*
+ * Interrupt management functions for FIFO direct mode implemented in
+ * xtemac_intr_fifo.c.
+ */
+void XTemac_IntrFifoEnable(XTemac *InstancePtr, u32 Direction);
+void XTemac_IntrFifoDisable(XTemac *InstancePtr, u32 Direction);
+extern void XTemac_IntrFifoHandler(void *InstancePtr);
+
+/*
+ * SG DMA mode functions implemented in xtemac_sgdma.c
+ */
+int XTemac_SgAlloc(XTemac *InstancePtr, u32 Direction,
+ unsigned NumBd, XDmaBdV3 ** BdPtr);
+int XTemac_SgUnAlloc(XTemac *InstancePtr, u32 Direction,
+ unsigned NumBd, XDmaBdV3 * BdPtr);
+int XTemac_SgCommit(XTemac *InstancePtr, u32 Direction,
+ unsigned NumBd, XDmaBdV3 * BdPtr);
+unsigned XTemac_SgGetProcessed(XTemac *InstancePtr, u32 Direction,
+ unsigned NumBd, XDmaBdV3 ** BdPtr);
+int XTemac_SgFree(XTemac *InstancePtr, u32 Direction,
+ unsigned NumBd, XDmaBdV3 * BdPtr);
+
+int XTemac_SgCheck(XTemac *InstancePtr, u32 Direction);
+
+int XTemac_SgSetSpace(XTemac *InstancePtr, u32 Direction,
+ u32 PhysicalAddr, u32 VirtualAddr,
+ u32 Alignment, unsigned BdCount, XDmaBdV3 * BdTemplate);
+
+/*
+ * Interrupt management functions for SG DMA mode implemented in
+ * xtemac_intr_sgdma.c
+ */
+void XTemac_IntrSgEnable(XTemac *InstancePtr, u32 Direction);
+void XTemac_IntrSgDisable(XTemac *InstancePtr, u32 Direction);
+int XTemac_IntrSgCoalSet(XTemac *InstancePtr, u32 Direction,
+ u16 Threshold, u16 Timer);
+int XTemac_IntrSgCoalGet(XTemac *InstancePtr, u32 Direction,
+ u16 *ThresholdPtr, u16 *TimerPtr);
+
+extern void XTemac_IntrSgHandler(void *InstancePtr);
+
+/*
+ * MAC configuration/control functions in xtemac_control.c
+ */
+int XTemac_SetOptions(XTemac *InstancePtr, u32 Options);
+int XTemac_ClearOptions(XTemac *InstancePtr, u32 Options);
+u32 XTemac_GetOptions(XTemac *InstancePtr);
+
+int XTemac_SetMacAddress(XTemac *InstancePtr, void *AddressPtr);
+void XTemac_GetMacAddress(XTemac *InstancePtr, void *AddressPtr);
+
+int XTemac_SetMacPauseAddress(XTemac *InstancePtr, void *AddressPtr);
+void XTemac_GetMacPauseAddress(XTemac *InstancePtr, void *AddressPtr);
+int XTemac_SendPausePacket(XTemac *InstancePtr, u16 PauseValue);
+
+int XTemac_GetSgmiiStatus(XTemac *InstancePtr, u16 *SpeedPtr);
+int XTemac_GetRgmiiStatus(XTemac *InstancePtr, u16 *SpeedPtr,
+ u32 *IsFullDuplexPtr, u32 *IsLinkUpPtr);
+u16 XTemac_GetOperatingSpeed(XTemac *InstancePtr);
+void XTemac_SetOperatingSpeed(XTemac *InstancePtr, u16 Speed);
+
+void XTemac_PhySetMdioDivisor(XTemac *InstancePtr, u8 Divisor);
+int XTemac_PhyRead(XTemac *InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 *PhyDataPtr);
+int XTemac_PhyWrite(XTemac *InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 PhyData);
+int XTemac_MulticastAdd(XTemac *InstancePtr, void *AddressPtr, int Entry);
+void XTemac_MulticastGet(XTemac *InstancePtr, void *AddressPtr, int Entry);
+int XTemac_MulticastClear(XTemac *InstancePtr, int Entry);
+
+/*
+ * Statistics in xtemac_stats.c
+ */
+void XTemac_GetSoftStats(XTemac *InstancePtr, XTemac_SoftStats *StatsPtr);
+void XTemac_ClearSoftStats(XTemac *InstancePtr);
+
+/*
+ * Diagnostic functions in xtemac_selftest.c
+ */
+int XTemac_SelfTest(XTemac *InstancePtr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file xtemac_control.c
+ *
+ * Functions in this file implement general purpose command and control related
+ * functionality. See xtemac.h for a detailed description of the driver.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a rmm 06/01/05 First release
+ * 1.00b rmm 09/23/05 Implemented PhyRead/Write and multicast functions,
+ * removed Set/Get IFG functions. Redesigned MII/RGMII/
+ * SGMII status functions.
+ * 2.00a rmm 11/21/05 Added auto negotiate to options processing funcs,
+ * fixed XTE_MGTDR_OFFSET and XTE_MGTCR_OFFSET to be
+ * accessed with IPIF instead of host macros, removed
+ * half duplex option processing
+ * rmm 06/22/06 Fixed c++ compiler warnings and errors
+ * </pre>
+ *****************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xtemac.h"
+#include "xtemac_i.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+ * Set the MAC address for this driver/device. The address is a 48-bit value.
+ * The device must be stopped before calling this function.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param AddressPtr is a pointer to a 6-byte MAC address.
+ *
+ * @return
+ * - XST_SUCCESS if the MAC address was set successfully
+ * - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+ *
+ ******************************************************************************/
+int XTemac_SetMacAddress(XTemac *InstancePtr, void *AddressPtr)
+{
+ u32 MacAddr;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Be sure device has been stopped */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Set the MAC bits [31:0] in EUAW0 */
+ MacAddr = Aptr[0] & 0x000000FF;
+ MacAddr |= Aptr[1] << 8;
+ MacAddr |= Aptr[2] << 16;
+ MacAddr |= Aptr[3] << 24;
+ XTemac_mSetHostReg(XTE_UAW0_OFFSET, MacAddr);
+
+ /* There are reserved bits in EUAW1 so don't affect them */
+ MacAddr = XTemac_mGetHostReg(XTE_UAW1_OFFSET);
+ MacAddr &= ~XTE_UAW1_MASK;
+
+ /* Set MAC bits [47:32] in EUAW1 */
+ MacAddr |= Aptr[4] & 0x000000FF;
+ MacAddr |= Aptr[5] << 8;
+ XTemac_mSetHostReg(XTE_UAW1_OFFSET, MacAddr);
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Get the MAC address for this driver/device.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param AddressPtr is an output parameter, and is a pointer to a buffer into
+ * which the current MAC address will be copied. The buffer must be at
+ * least 6 bytes in length.
+ *
+ ******************************************************************************/
+void XTemac_GetMacAddress(XTemac *InstancePtr, void *AddressPtr)
+{
+ u32 MacAddr;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Read MAC bits [31:0] in EUAW0 */
+ MacAddr = XTemac_mGetHostReg(XTE_UAW0_OFFSET);
+ Aptr[0] = (u8) MacAddr;
+ Aptr[1] = (u8) (MacAddr >> 8);
+ Aptr[2] = (u8) (MacAddr >> 16);
+ Aptr[3] = (u8) (MacAddr >> 24);
+
+ /* Read MAC bits [47:32] in EUAW1 */
+ MacAddr = XTemac_mGetHostReg(XTE_UAW1_OFFSET);
+ Aptr[4] = (u8) MacAddr;
+ Aptr[5] = (u8) (MacAddr >> 8);
+}
+
+
+/*****************************************************************************/
+/**
+ * Add an Ethernet address to the list that will be accepted by the receiver.
+ * The address may be any unicast, multicast, or the broadcast address form.
+ * Up to XTE_MULTI_CAM_ENTRIES addresses may be filtered in this way. The
+ * device must be stopped to use this function.
+ *
+ * Once an address is programmed, it will be received by the device. There is
+ * no control bit to disable multicast filtering. The only way to prevent a
+ * CAM address from being received is to clear it with XTemac_MulticastClear().
+ *
+ * @param InstancePtr is a pointer to the XTemac instance to be worked on.
+ * @param AddressPtr is a pointer to a 6-byte Ethernet address. The previous
+ * address at this entry location (if any) is overwritten with the new
+ * one.
+ * @param Entry is the storage location the HW uses to program this address.
+ * It must be between 0..XTE_MULTI_CAM_ENTRIES-1.
+ *
+ * @return
+ *
+ * - XST_SUCCESS if the address was added successfully
+ * - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+ ******************************************************************************/
+int XTemac_MulticastAdd(XTemac *InstancePtr, void *AddressPtr, int Entry)
+{
+ u32 Emaw0Reg;
+ u32 Emaw1Reg;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(Entry < XTE_MULTI_CAM_ENTRIES);
+
+ /* The device must be stopped before clearing the multicast hash table */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Set MAC bits [31:0] */
+ Emaw0Reg = Aptr[0] & 0x000000FF;
+ Emaw0Reg |= Aptr[1] << 8;
+ Emaw0Reg |= Aptr[2] << 16;
+ Emaw0Reg |= Aptr[3] << 24;
+
+ /* Set MAC bits [47:32] */
+ Emaw1Reg = Aptr[4] & 0x000000FF;
+ Emaw1Reg |= Aptr[5] << 8;
+
+ /* Add in CAM address */
+ Emaw1Reg |= (Entry << XTE_MAW1_CAMMADDR_SHIFT_MASK);
+
+ /* Program HW */
+ XTemac_mSetHostReg(XTE_MAW0_OFFSET, Emaw0Reg);
+ XTemac_mSetHostReg(XTE_MAW1_OFFSET, Emaw1Reg);
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Retrieve an Ethernet address set by XTemac_MulticastAdd().
+ *
+ * @param InstancePtr is a pointer to the XTemac instance to be worked on.
+ * @param AddressPtr is an output parameter, and is a pointer to a buffer into
+ * which the current MAC address will be copied. The buffer must be at
+ * least 6 bytes in length.
+ * @param Entry is the storage location in the HW. It must be between
+ * 0..XTE_MULTI_CAM_ENTRIES-1.
+ *
+ * @return
+ *
+ * - XST_SUCCESS if the address was added successfully
+ * - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+ ******************************************************************************/
+void XTemac_MulticastGet(XTemac *InstancePtr, void *AddressPtr, int Entry)
+{
+ u32 Emaw0Reg;
+ u32 Emaw1Reg;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID(Entry < XTE_MULTI_CAM_ENTRIES);
+
+ /* Tell HW to provide address stored in given entry */
+ XTemac_mSetHostReg(XTE_MAW1_OFFSET, XTE_MAW1_CAMRNW_MASK |
+ (Entry << XTE_MAW1_CAMMADDR_SHIFT_MASK));
+
+ /* The HW should now have provided the CAM entry */
+ Emaw0Reg = XTemac_mGetHostReg(XTE_MAW0_OFFSET);
+ Emaw1Reg = XTemac_mGetHostReg(XTE_MAW1_OFFSET);
+
+ /* Copy the address to the user buffer */
+ Aptr[0] = (u8) Emaw0Reg;
+ Aptr[1] = (u8) (Emaw0Reg >> 8);
+ Aptr[2] = (u8) (Emaw0Reg >> 16);
+ Aptr[3] = (u8) (Emaw0Reg >> 24);
+ Aptr[4] = (u8) Emaw1Reg;
+ Aptr[5] = (u8) (Emaw1Reg >> 8);
+}
+
+/*****************************************************************************/
+/**
+* Clear an address set by XTemac_MulticastAdd(). The device must be stopped
+* before calling this function.
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+* @param Entry is the HW storage location used when this address was added.
+* It must be between 0..XTE_MULTI_CAM_ENTRIES-1.
+*
+* @return
+*
+* - XST_SUCCESS if the address was cleared
+* - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+*
+******************************************************************************/
+int XTemac_MulticastClear(XTemac *InstancePtr, int Entry)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(Entry < XTE_MULTI_CAM_ENTRIES);
+
+ /* The device must be stopped before clearing the multicast hash table */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Clear the entry by writing 0:0:0:0:0:0 to it */
+ XTemac_mSetHostReg(XTE_MAW0_OFFSET, 0);
+ XTemac_mSetHostReg(XTE_MAW1_OFFSET,
+ Entry << XTE_MAW1_CAMMADDR_SHIFT_MASK);
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Set the MAC address for pause frames. This is the address the device will
+ * recognize as pause frames. Pause frames transmitted with
+ * XTemac_SendPausePacket() will also use this address.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param AddressPtr is a pointer to a 6-byte MAC address.
+ *
+ * @return
+ * - XST_SUCCESS if the MAC address was set successfully
+ * - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+ *
+ ******************************************************************************/
+int XTemac_SetMacPauseAddress(XTemac *InstancePtr, void *AddressPtr)
+{
+ u32 MacAddr;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Be sure device has been stopped */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Set the MAC bits [31:0] in ERXC0 */
+ MacAddr = Aptr[0] & 0x000000FF;
+ MacAddr |= Aptr[1] << 8;
+ MacAddr |= Aptr[2] << 16;
+ MacAddr |= Aptr[3] << 24;
+ XTemac_mSetHostReg(XTE_RXC0_OFFSET, MacAddr);
+
+ /* ERXC1 contains other info that must be preserved */
+ MacAddr = XTemac_mGetHostReg(XTE_RXC1_OFFSET);
+ MacAddr &= ~XTE_RXC1_ERXC1_MASK;;
+
+ /* Set MAC bits [47:32] */
+ MacAddr |= Aptr[4] & 0x000000FF;
+ MacAddr |= Aptr[5] << 8;
+ XTemac_mSetHostReg(XTE_RXC1_OFFSET, MacAddr);
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Get the MAC address for pause frames for this driver/device.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param AddressPtr is an output parameter, and is a pointer to a buffer into
+ * which the current MAC address will be copied. The buffer must be at
+ * least 6 bytes in length.
+ *
+ ******************************************************************************/
+void XTemac_GetMacPauseAddress(XTemac *InstancePtr, void *AddressPtr)
+{
+ u32 MacAddr;
+ u8 *Aptr = (u8 *) AddressPtr;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Read MAC bits [31:0] in ERXC0 */
+ MacAddr = XTemac_mGetHostReg(XTE_RXC0_OFFSET);
+ Aptr[0] = (u8) MacAddr;
+ Aptr[1] = (u8) (MacAddr >> 8);
+ Aptr[2] = (u8) (MacAddr >> 16);
+ Aptr[3] = (u8) (MacAddr >> 24);
+
+ /* Read MAC bits [47:32] in RXC1 */
+ MacAddr = XTemac_mGetHostReg(XTE_RXC1_OFFSET);
+ Aptr[4] = (u8) MacAddr;
+ Aptr[5] = (u8) (MacAddr >> 8);
+}
+
+
+/*****************************************************************************/
+/**
+ * Set options for the driver/device. The driver should be stopped with
+ * XTemac_Stop() before changing options.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Options are the options to set. Multiple options can be set by OR'ing
+ * XTE_*_OPTIONS constants together. Options not specified are not
+ * affected.
+ *
+ * @return
+ * - XST_SUCCESS if the options were set successfully
+ * - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+ * - XST_NO_FEATURE if setting an option requires HW support not present
+ *
+ * @note
+ * See xtemac.h for a description of the available options.
+ *
+ ******************************************************************************/
+int XTemac_SetOptions(XTemac *InstancePtr, u32 Options)
+{
+ u32 Reg; /* Generic register contents */
+ u32 RegErxc1; /* Reflects original contents of ERXC1 */
+ u32 RegEtxc; /* Reflects original contents of ETXC */
+ u32 RegNewErxc1; /* Reflects new contents of ERXC1 */
+ u32 RegNewEtxc; /* Reflects new contents of ETXC */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Be sure device has been stopped */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Polled mode requires FIFO direct */
+ if ((Options & XTE_POLLED_OPTION) && (!XTemac_mIsFifo(InstancePtr))) {
+ return (XST_NO_FEATURE);
+ }
+
+ /* Many of these options will change the ERXC1 or ETXC registers.
+ * To reduce the amount of IO to the device, group these options here
+ * and change them all at once.
+ */
+
+ /* Grab current register contents */
+ RegErxc1 = XTemac_mGetHostReg(XTE_RXC1_OFFSET);
+ RegEtxc = XTemac_mGetHostReg(XTE_TXC_OFFSET);
+ RegNewErxc1 = RegErxc1;
+ RegNewEtxc = RegEtxc;
+
+ /* Turn on jumbo packet support for both Rx and Tx */
+ if (Options & XTE_JUMBO_OPTION) {
+ RegNewEtxc |= XTE_TXC_TXJMBO_MASK;
+ RegNewErxc1 |= XTE_RXC1_RXJMBO_MASK;
+ }
+
+ /* Turn on VLAN packet support for both Rx and Tx */
+ if (Options & XTE_VLAN_OPTION) {
+ RegNewEtxc |= XTE_TXC_TXVLAN_MASK;
+ RegNewErxc1 |= XTE_RXC1_RXVLAN_MASK;
+ }
+
+ /* Turn on FCS stripping on receive packets */
+ if (Options & XTE_FCS_STRIP_OPTION) {
+ RegNewErxc1 &= ~XTE_RXC1_RXFCS_MASK;
+ }
+
+ /* Turn on FCS insertion on transmit packets */
+ if (Options & XTE_FCS_INSERT_OPTION) {
+ RegNewEtxc &= ~XTE_TXC_TXFCS_MASK;
+ }
+
+ /* Turn on length/type field checking on receive packets */
+ if (Options & XTE_LENTYPE_ERR_OPTION) {
+ RegNewErxc1 &= ~XTE_RXC1_RXLT_MASK;
+ }
+
+ /* Officially change the ETXC or ERXC1 registers if they need to be
+ * modified
+ */
+ if (RegEtxc != RegNewEtxc) {
+ XTemac_mSetHostReg(XTE_TXC_OFFSET, RegNewEtxc);
+ }
+
+ if (RegErxc1 != RegNewErxc1) {
+ XTemac_mSetHostReg(XTE_RXC1_OFFSET, RegNewErxc1);
+ }
+
+ /* Rest of options twiddle bits of other registers. Handle them one at
+ * a time
+ */
+
+ /* Turn on flow control */
+ if (Options & XTE_FLOW_CONTROL_OPTION) {
+ Reg = XTemac_mGetHostReg(XTE_FCC_OFFSET);
+ Reg |= XTE_FCC_RXFLO_MASK;
+ XTemac_mSetHostReg(XTE_FCC_OFFSET, Reg);
+ }
+
+ /* Turn on promiscuous frame filtering (all frames are received ) */
+ if (Options & XTE_PROMISC_OPTION) {
+ Reg = XTemac_mGetHostReg(XTE_AFM_OFFSET);
+ Reg |= XTE_AFM_EPPRM_MASK;
+ XTemac_mSetHostReg(XTE_AFM_OFFSET, Reg);
+ }
+
+ /* Allow broadcast address filtering */
+ if (Options & XTE_BROADCAST_OPTION) {
+ Reg = XTemac_mGetIpifReg(XTE_CR_OFFSET);
+ Reg &= ~XTE_CR_BCREJ_MASK;
+ XTemac_mSetIpifReg(XTE_CR_OFFSET, Reg);
+ }
+
+ /* Allow multicast address filtering */
+ if (Options & XTE_MULTICAST_CAM_OPTION) {
+ Reg = XTemac_mGetIpifReg(XTE_CR_OFFSET);
+ Reg &= ~XTE_CR_MCREJ_MASK;
+ XTemac_mSetIpifReg(XTE_CR_OFFSET, Reg);
+ }
+
+ /* Enable interrupts related to rejection of bad frames */
+ if (Options & XTE_REPORT_RXERR_OPTION) {
+ /* Clear out any previous error conditions that may have existed
+ * prior to enabling the reporting of these types of errors
+ */
+ Reg = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET,
+ Reg & XTE_IPXR_RECV_DROPPED_MASK);
+
+ /* Whether these are enabled here are based on the last call to
+ * XTemac_IntrFifoEnable/Disable() and XTemac_IntrSgDmaEnable/Disable()
+ * for the receive channel.
+ *
+ * If receive interrupts are enabled, then enable these interrupts. This
+ * way, when XTemac_Start() is called, these interrupt enables take
+ * effect right away.
+ *
+ * If receive interrupts are disabled, then don't do anything here. The
+ * XTemac_IntrFifoEnable() and XTemac_IntrSgDmaEnable() functions when
+ * called will check this option and enable these interrupts if needed.
+ */
+ if (InstancePtr->Flags &
+ (XTE_FLAGS_RECV_FIFO_INT_ENABLE |
+ XTE_FLAGS_RECV_SGDMA_INT_ENABLE)) {
+ Reg = XTemac_mGetIpifReg(XTE_IPIER_OFFSET);
+ Reg |= XTE_IPXR_RECV_DROPPED_MASK;
+ XTemac_mSetIpifReg(XTE_IPIER_OFFSET, Reg);
+ }
+ }
+
+ /* Enable interrrupt related to assertion of auto-negotiate HW interrupt */
+ if (Options & XTE_ANEG_OPTION) {
+ /* Clear out any previous interupt condition that may have existed
+ * prior to enabling the reporting of auto negotiation
+ */
+ Reg = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET,
+ Reg & XTE_IPXR_AUTO_NEG_MASK);
+
+ /* Make this interupt source enabled when XTemac_Start() is called */
+ Reg = XTemac_mGetIpifReg(XTE_IPIER_OFFSET);
+ XTemac_mSetIpifReg(XTE_IPIER_OFFSET,
+ Reg & XTE_IPXR_AUTO_NEG_MASK);
+ }
+
+ /* Enable interrupts upon completing a SG list */
+ if ((Options & XTE_SGEND_INT_OPTION) && XTemac_mIsSgDma(InstancePtr)) {
+ Reg = XDmaV3_GetInterruptEnable(&InstancePtr->SendDma);
+ Reg |= XDMAV3_IPXR_SGEND_MASK;
+ XDmaV3_SetInterruptEnable(&InstancePtr->SendDma, Reg);
+
+ Reg = XDmaV3_GetInterruptEnable(&InstancePtr->RecvDma);
+ Reg |= XDMAV3_IPXR_SGEND_MASK;
+ XDmaV3_SetInterruptEnable(&InstancePtr->RecvDma, Reg);
+ }
+
+ /* The remaining options not handled here are managed elsewhere in the
+ * driver. No register modifications are needed at this time. Reflecting the
+ * option in InstancePtr->Options is good enough for now.
+ */
+
+ /* Set options word to its new value */
+ InstancePtr->Options |= Options;
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Clear options for the driver/device
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Options are the options to clear. Multiple options can be cleared by
+ * OR'ing XTE_*_OPTIONS constants together. Options not specified are not
+ * affected.
+ *
+ * @return
+ * - XST_SUCCESS if the options were set successfully
+ * - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+ *
+ * @note
+ * See xtemac.h for a description of the available options.
+ *
+ ******************************************************************************/
+int XTemac_ClearOptions(XTemac *InstancePtr, u32 Options)
+{
+ volatile u32 Reg; /* Generic */
+ u32 RegErxc1; /* Reflects original contents of ERXC1 */
+ u32 RegEtxc; /* Reflects original contents of ETXC */
+ u32 RegNewErxc1; /* Reflects new contents of ERXC1 */
+ u32 RegNewEtxc; /* Reflects new contents of ETXC */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Be sure device has been stopped */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Many of these options will change the ERXC1 or ETXC registers.
+ * Group these options here and change them all at once. What we are
+ * trying to accomplish is to reduce the amount of IO to the device
+ */
+
+ /* Grab current register contents */
+ RegErxc1 = XTemac_mGetHostReg(XTE_RXC1_OFFSET);
+ RegEtxc = XTemac_mGetHostReg(XTE_TXC_OFFSET);
+ RegNewErxc1 = RegErxc1;
+ RegNewEtxc = RegEtxc;
+
+ /* Turn off jumbo packet support for both Rx and Tx */
+ if (Options & XTE_JUMBO_OPTION) {
+ RegNewEtxc &= ~XTE_TXC_TXJMBO_MASK;
+ RegNewErxc1 &= ~XTE_RXC1_RXJMBO_MASK;
+ }
+
+ /* Turn off VLAN packet support for both Rx and Tx */
+ if (Options & XTE_VLAN_OPTION) {
+ RegNewEtxc &= ~XTE_TXC_TXVLAN_MASK;
+ RegNewErxc1 &= ~XTE_RXC1_RXVLAN_MASK;
+ }
+
+ /* Turn off FCS stripping on receive packets */
+ if (Options & XTE_FCS_STRIP_OPTION) {
+ RegNewErxc1 |= XTE_RXC1_RXFCS_MASK;
+ }
+
+ /* Turn off FCS insertion on transmit packets */
+ if (Options & XTE_FCS_INSERT_OPTION) {
+ RegNewEtxc |= XTE_TXC_TXFCS_MASK;
+ }
+
+ /* Turn off length/type field checking on receive packets */
+ if (Options & XTE_LENTYPE_ERR_OPTION) {
+ RegNewErxc1 |= XTE_RXC1_RXLT_MASK;
+ }
+
+ /* Disable transmitter */
+ if (Options & XTE_TRANSMITTER_ENABLE_OPTION) {
+ RegNewEtxc &= ~XTE_TXC_TXEN_MASK;
+ }
+
+ /* Disable receiver */
+ if (Options & XTE_RECEIVER_ENABLE_OPTION) {
+ RegNewErxc1 &= ~XTE_RXC1_RXEN_MASK;
+ }
+
+ /* Officially change the ETXC or ERXC1 registers if they need to be
+ * modified
+ */
+ if (RegEtxc != RegNewEtxc) {
+ XTemac_mSetHostReg(XTE_TXC_OFFSET, RegNewEtxc);
+ }
+
+ if (RegErxc1 != RegNewErxc1) {
+ XTemac_mSetHostReg(XTE_RXC1_OFFSET, RegNewErxc1);
+ }
+
+ /* Rest of options twiddle bits of other registers. Handle them one at
+ * a time
+ */
+
+ /* Turn off flow control */
+ if (Options & XTE_FLOW_CONTROL_OPTION) {
+ Reg = XTemac_mGetHostReg(XTE_FCC_OFFSET);
+ Reg &= ~XTE_FCC_RXFLO_MASK;
+ XTemac_mSetHostReg(XTE_FCC_OFFSET, Reg);
+ }
+
+ /* Turn off promiscuous frame filtering */
+ if (Options & XTE_PROMISC_OPTION) {
+ Reg = XTemac_mGetHostReg(XTE_AFM_OFFSET);
+ Reg &= ~XTE_AFM_EPPRM_MASK;
+ XTemac_mSetHostReg(XTE_AFM_OFFSET, Reg);
+ }
+
+ /* Disable broadcast address filtering */
+ if (Options & XTE_BROADCAST_OPTION) {
+ Reg = XTemac_mGetIpifReg(XTE_CR_OFFSET);
+ Reg |= XTE_CR_BCREJ_MASK;
+ XTemac_mSetIpifReg(XTE_CR_OFFSET, Reg);
+ }
+
+ /* Disable multicast address filtering */
+ if (Options & XTE_MULTICAST_CAM_OPTION) {
+ Reg = XTemac_mGetIpifReg(XTE_CR_OFFSET);
+ Reg |= XTE_CR_MCREJ_MASK;
+ XTemac_mSetIpifReg(XTE_CR_OFFSET, Reg);
+ }
+
+ /* Disable interrupts related to rejection of bad frames */
+ if (Options & XTE_REPORT_RXERR_OPTION) {
+ Reg = XTemac_mGetIpifReg(XTE_IPIER_OFFSET);
+ Reg &= ~XTE_IPXR_RECV_DROPPED_MASK;
+ XTemac_mSetIpifReg(XTE_IPIER_OFFSET, Reg);
+ }
+
+ /* Disable interrupts related to auto negotiate */
+ if (Options & XTE_ANEG_OPTION) {
+ Reg = XTemac_mGetIpifReg(XTE_IPIER_OFFSET);
+ Reg &= ~XTE_IPXR_AUTO_NEG_MASK;
+ XTemac_mSetIpifReg(XTE_IPIER_OFFSET, Reg);
+ }
+
+ /* Disable interrupts upon completing a SG list */
+ if ((Options & XTE_SGEND_INT_OPTION) && XTemac_mIsSgDma(InstancePtr)) {
+ Reg = XDmaV3_GetInterruptEnable(&InstancePtr->SendDma);
+ Reg &= ~XDMAV3_IPXR_SGEND_MASK;
+ XDmaV3_SetInterruptEnable(&InstancePtr->SendDma, Reg);
+
+ Reg = XDmaV3_GetInterruptEnable(&InstancePtr->RecvDma);
+ Reg &= ~XDMAV3_IPXR_SGEND_MASK;
+ XDmaV3_SetInterruptEnable(&InstancePtr->RecvDma, Reg);
+ }
+
+ /* The remaining options not handled here are managed elsewhere in the
+ * driver. No register modifications are needed at this time. Reflecting the
+ * option in InstancePtr->Options is good enough for now.
+ */
+
+ /* Set options word to its new value */
+ InstancePtr->Options &= ~Options;
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Get current option settings
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ *
+ * @return
+ * A bitmask of XTE_*_OPTION constants. Any bit set to 1 is to be interpreted
+ * as a set opion.
+ *
+ * @note
+ * See xtemac.h for a description of the available options.
+ *
+ ******************************************************************************/
+u32 XTemac_GetOptions(XTemac *InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ return (InstancePtr->Options);
+}
+
+
+/*****************************************************************************/
+/**
+ * Send a pause packet
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param PauseValue is the pause value in units of 512 bit times.
+ *
+ * @return
+ * - XST_SUCCESS if pause frame transmission was initiated
+ * - XST_DEVICE_IS_STOPPED if the device has not been started.
+ *
+ ******************************************************************************/
+int XTemac_SendPausePacket(XTemac *InstancePtr, u16 PauseValue)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Make sure device is ready for this operation */
+ if (InstancePtr->IsStarted != XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STOPPED);
+ }
+
+ /* Send flow control frame */
+ XTemac_mSetIpifReg(XTE_TPPR_OFFSET, (u32) PauseValue);
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Get the current operating link speed. This may be the value set by
+ * XTemac_SetOperatingSpeed() or a HW default.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ *
+ * @return Link speed in units of megabits per second
+ *
+ ******************************************************************************/
+u16 XTemac_GetOperatingSpeed(XTemac *InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ switch (XTemac_mGetHostReg(XTE_EMCFG_OFFSET) & XTE_EMCFG_LINKSPD_MASK) {
+ case XTE_EMCFG_LINKSPD_1000:
+ return (1000);
+
+ case XTE_EMCFG_LINKSPD_100:
+ return (100);
+
+ case XTE_EMCFG_LINKSPD_10:
+ return (10);
+
+ default:
+ return (0);
+ }
+}
+
+
+/*****************************************************************************/
+/**
+ * Set the current operating link speed. For any traffic to be passed, this
+ * speed must match the current MII/GMII/SGMII/RGMII link speed.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Speed is the speed to set in units of Mbps. Valid values are 10, 100,
+ * or 1000. Invalid values result in no change to the device.
+ *
+ ******************************************************************************/
+void XTemac_SetOperatingSpeed(XTemac *InstancePtr, u16 Speed)
+{
+ u32 EcfgReg;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID((Speed == 10) || (Speed == 100) || (Speed == 1000));
+
+ /* Get the current contents of the EMAC config register and zero out
+ * speed bits
+ */
+ EcfgReg =
+ XTemac_mGetHostReg(XTE_EMCFG_OFFSET) & ~XTE_EMCFG_LINKSPD_MASK;
+
+ switch (Speed) {
+ case 10:
+ break;
+
+ case 100:
+ EcfgReg |= XTE_EMCFG_LINKSPD_100;
+ break;
+
+ case 1000:
+ EcfgReg |= XTE_EMCFG_LINKSPD_1000;
+ break;
+
+ default:
+ return;
+ }
+
+ /* Set register and return */
+ XTemac_mSetHostReg(XTE_EMCFG_OFFSET, EcfgReg);
+}
+
+/*****************************************************************************/
+/**
+ * Get the current state of the link when media interface is of the SGMII type
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param SpeedPtr is a return value set to either 0, 10, 100, or 1000. Units
+ * are in Mbits/sec.
+ *
+ * @return
+ * - XST_SUCCESS if the SGMII status was read and return values set.
+ * - XST_NO_FEATURE if the device is not using SGMII.
+ *
+ ******************************************************************************/
+int XTemac_GetSgmiiStatus(XTemac *InstancePtr, u16 *SpeedPtr)
+{
+ int PhyType;
+ u32 EgmicReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Make sure PHY is SGMII */
+ PhyType = XTemac_mGetPhysicalInterface(InstancePtr);
+ if (PhyType != XTE_PHY_TYPE_SGMII) {
+ return (XST_NO_FEATURE);
+ }
+
+ /* Get the current contents of RGMII/SGMII config register */
+ EgmicReg = XTemac_mGetHostReg(XTE_GMIC_OFFSET);
+
+ /* Extract speed */
+ switch (EgmicReg & XTE_GMIC_RGLINKSPD_MASK) {
+ case XTE_GMIC_RGLINKSPD_10:
+ *SpeedPtr = 10;
+ break;
+
+ case XTE_GMIC_RGLINKSPD_100:
+ *SpeedPtr = 100;
+ break;
+
+ case XTE_GMIC_RGLINKSPD_1000:
+ *SpeedPtr = 1000;
+ break;
+
+ default:
+ *SpeedPtr = 0;
+ }
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Get the current state of the link when media interface is of the RGMII type
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param SpeedPtr is a return value set to either 0, 10, 100, or 1000. Units
+ * are in Mbits/sec.
+ * @param IsFullDuplexPtr is a return value set to TRUE if the RGMII link
+ * is operating in full duplex, or FALSE if operating in half duplex.
+ * XTE_RGMII_LINK_UP.
+ * @param IsLinkUpPtr is a return value set to TRUE if the RGMII link is up,
+ * or FALSE if the link is down.
+ *
+ * @return
+ * - XST_SUCCESS if the RGMII status was read and return values set.
+ * - XST_NO_FEATURE if the device is not using RGMII.
+ *
+ ******************************************************************************/
+int XTemac_GetRgmiiStatus(XTemac *InstancePtr, u16 *SpeedPtr,
+ u32 *IsFullDuplexPtr, u32 *IsLinkUpPtr)
+{
+ int PhyType;
+ u32 EgmicReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Make sure PHY is RGMII */
+ PhyType = XTemac_mGetPhysicalInterface(InstancePtr);
+ if ((PhyType != XTE_PHY_TYPE_RGMII_1_3) &&
+ (PhyType != XTE_PHY_TYPE_RGMII_2_0)) {
+ return (XST_NO_FEATURE);
+ }
+
+ /* Get the current contents of RGMII/SGMII config register */
+ EgmicReg = XTemac_mGetHostReg(XTE_GMIC_OFFSET);
+
+ /* Extract speed */
+ switch (EgmicReg & XTE_GMIC_RGLINKSPD_MASK) {
+ case XTE_GMIC_RGLINKSPD_10:
+ *SpeedPtr = 10;
+ break;
+
+ case XTE_GMIC_RGLINKSPD_100:
+ *SpeedPtr = 100;
+ break;
+
+ case XTE_GMIC_RGLINKSPD_1000:
+ *SpeedPtr = 1000;
+ break;
+
+ default:
+ *SpeedPtr = 0;
+ }
+
+ /* Extract duplex and link status */
+ if (EgmicReg & XTE_GMIC_RGHALFDUPLEX_MASK) {
+ *IsFullDuplexPtr = FALSE;
+ }
+ else {
+ *IsFullDuplexPtr = TRUE;
+ }
+
+ if (EgmicReg & XTE_GMIC_RGSTATUS_MASK) {
+ *IsLinkUpPtr = TRUE;
+ }
+ else {
+ *IsLinkUpPtr = FALSE;
+ }
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Set the MDIO clock divisor. This function must be called once after each
+ * reset prior to accessing MII PHY registers.
+ *
+ * Calculating the divisor:
+ *
+ * From the Virtex-4 Embedded Tri-Mode Ethernet MAC User's Guide, the
+ * following equation governs the MDIO clock to the PHY:
+ *
+ * <pre>
+ * f[HOSTCLK]
+ * f[MDC] = -----------------
+ * (1 + Divisor) * 2
+ * </pre>
+ *
+ * where f[HOSTCLK] is the bus clock frequency in MHz, and f[MDC] is the
+ * MDIO clock frequency in MHz to the PHY. Typically, f[MDC] should not
+ * exceed 2.5 MHz. Some PHYs can tolerate faster speeds which means faster
+ * access.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Divisor is the divisor to set. Range is 0 to XTE_MC_CLK_DVD_MAX.
+ *
+ ******************************************************************************/
+void XTemac_PhySetMdioDivisor(XTemac *InstancePtr, u8 Divisor)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY)
+ XASSERT_VOID(Divisor <= XTE_MC_CLK_DVD_MAX);
+
+ XTemac_mSetHostReg(XTE_MC_OFFSET, (u32) Divisor | XTE_MC_MDIO_MASK);
+}
+
+
+/*****************************************************************************/
+/*
+*
+* Read the current value of the PHY register indicated by the PhyAddress and
+* the RegisterNum parameters. The MAC provides the driver with the ability to
+* talk to a PHY that adheres to the Media Independent Interface (MII) as
+* defined in the IEEE 802.3 standard.
+*
+* Prior to PHY access with this function, the user should have setup the MDIO
+* clock with XTemac_PhySetMdioDivisor().
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+* @param PhyAddress is the address of the PHY to be read (supports multiple
+* PHYs)
+* @param RegisterNum is the register number, 0-31, of the specific PHY register
+* to read
+* @param PhyDataPtr is an output parameter, and points to a 16-bit buffer into
+* which the current value of the register will be copied.
+*
+* @return
+*
+* - XST_SUCCESS if the PHY was read from successfully
+* - XST_NO_FEATURE if the device is not configured with MII support
+* - XST_EMAC_MII_BUSY if there is another PHY operation in progress
+*
+* @note
+*
+* This function is not thread-safe. The user must provide mutually exclusive
+* access to this function if there are to be multiple threads that can call it.
+* <br><br>
+* There is the possibility that this function will not return if the hardware
+* is broken (i.e., it never sets the status bit indicating that the read is
+* done). If this is of concern to the user, the user should provide a mechanism
+* suitable to their needs for recovery.
+* <br><br>
+* For the duration of this function, all host interface reads and writes are
+* blocked to the current Temac instance and also the 2nd instance if it exists
+* in the system. This is a HW limitation. See xtemac.h for a list of functions
+* that will be blocked until this operation completes.
+*
+******************************************************************************/
+int XTemac_PhyRead(XTemac *InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 *PhyDataPtr)
+{
+ u32 Mgtcr;
+ volatile u32 Ipisr;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /* Make sure no other PHY operation is currently in progress */
+ if (XTemac_mGetIpifReg(XTE_IPISR_OFFSET) & XTE_IPXR_MII_PEND_MASK) {
+ return (XST_EMAC_MII_BUSY);
+ }
+
+ /* Construct Mgtcr mask for the operation */
+ Mgtcr = RegisterNum & XTE_MGTCR_REGAD_MASK;
+ Mgtcr |= ((PhyAddress << XTE_MGTCR_PHYAD_SHIFT_MASK) &
+ XTE_MGTCR_PHYAD_MASK);
+ Mgtcr |= XTE_MGTCR_RWN_MASK;
+
+ /* Write Mgtcr and wait for completion */
+ XTemac_mSetIpifReg(XTE_MGTCR_OFFSET, Mgtcr);
+
+ do {
+ Ipisr = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+ } while (!(Ipisr & XTE_IPXR_MII_DONE_MASK));
+
+ /* Read data */
+ *PhyDataPtr = XTemac_mGetIpifReg(XTE_MGTDR_OFFSET);
+
+ /* Clear MII status bits */
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET,
+ Ipisr & (XTE_IPXR_MII_DONE_MASK |
+ XTE_IPXR_MII_PEND_MASK));
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/*
+* Write data to the specified PHY register. The Ethernet driver does not
+* require the device to be stopped before writing to the PHY. Although it is
+* probably a good idea to stop the device, it is the responsibility of the
+* application to deem this necessary. The MAC provides the driver with the
+* ability to talk to a PHY that adheres to the Media Independent Interface
+* (MII) as defined in the IEEE 802.3 standard.
+*
+* Prior to PHY access with this function, the user should have setup the MDIO
+* clock with XTemac_PhySetMdioDivisor().
+*
+* @param InstancePtr is a pointer to the XTemac instance to be worked on.
+* @param PhyAddress is the address of the PHY to be written (supports multiple
+* PHYs)
+* @param RegisterNum is the register number, 0-31, of the specific PHY register
+* to write
+* @param PhyData is the 16-bit value that will be written to the register
+*
+* @return
+*
+* - XST_SUCCESS if the PHY was written to successfully. Since there is no error
+* status from the MAC on a write, the user should read the PHY to verify the
+* write was successful.
+* - XST_NO_FEATURE if the device is not configured with MII support
+* - XST_EMAC_MII_BUSY if there is another PHY operation in progress
+*
+* @note
+*
+* This function is not thread-safe. The user must provide mutually exclusive
+* access to this function if there are to be multiple threads that can call it.
+* <br><br>
+* There is the possibility that this function will not return if the hardware
+* is broken (i.e., it never sets the status bit indicating that the write is
+* done). If this is of concern to the user, the user should provide a mechanism
+* suitable to their needs for recovery.
+* <br><br>
+* For the duration of this function, all host interface reads and writes are
+* blocked to the current Temac instance and also the 2nd instance if it exists
+* in the system. This is a HW limitation. See xtemac.h for a list of functions
+* that will be blocked until this operation completes.
+*
+******************************************************************************/
+int XTemac_PhyWrite(XTemac *InstancePtr, u32 PhyAddress,
+ u32 RegisterNum, u16 PhyData)
+{
+ u32 Mgtcr;
+ volatile u32 Ipisr;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /* Make sure no other PHY operation is currently in progress */
+ if (XTemac_mGetIpifReg(XTE_IPISR_OFFSET) & XTE_IPXR_MII_PEND_MASK) {
+ return (XST_EMAC_MII_BUSY);
+ }
+
+ /* Construct Mgtcr mask for the operation */
+ Mgtcr = RegisterNum & XTE_MGTCR_REGAD_MASK;
+ Mgtcr |= ((PhyAddress << XTE_MGTCR_PHYAD_SHIFT_MASK) &
+ XTE_MGTCR_PHYAD_MASK);
+
+ /* Write Mgtdr and Mgtcr and wait for completion */
+ XTemac_mSetIpifReg(XTE_MGTDR_OFFSET, (u32) PhyData);
+ XTemac_mSetIpifReg(XTE_MGTCR_OFFSET, Mgtcr);
+
+ do {
+ Ipisr = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+ } while (!(Ipisr & XTE_IPXR_MII_DONE_MASK));
+
+ /* Clear MII status bits */
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET,
+ Ipisr & (XTE_IPXR_MII_DONE_MASK |
+ XTE_IPXR_MII_PEND_MASK));
+
+ return (XST_SUCCESS);
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xtemac_fifo.c
+*
+* Functions in this file implement FIFO direct and Simple DMA frame transfer
+* mode. See xtemac.h for a detailed description of the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rmm 06/01/05 First release
+* 1.00b rmm 09/23/05 Fixed void* arithmetic usage, added XST_FIFO_ERROR
+* return code to send/recv query functions.
+* 2.00a rmm 11/21/05 Removed XST_FAILURE return code for XTemac_FifoQuery-
+* SendStatus, removed simple dma code
+* rmm 06/22/06 Fixed C++ compiler warnings
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xtemac.h"
+#include "xtemac_i.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+#define PFIFO_64BIT_WIDTH_BYTES 8
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*******************************************************************************
+ * Primitives that modify the hold structure for XTemac_PacketFifo. All F
+ * parameters refer to a pointer to XTemac_PacketFifo.
+ *
+ * mHold_GetIndex(F) - Get the ByteIndex of Hold
+ * mHold_SetIndex(F,D) - Set the ByteIndex of Hold to D
+ * mHold_Advance(F,D) - Advance the ByteIndex of Hold by D bytes
+ * mHold_CopyIn(F,I,D) - Set Hold[I] to D
+ * mHold_CopyOut(F,I,D) - Set D to Hold[I]
+ * mHoldS_IsFull(F) - Is a write channel Hold full of data
+ * mHoldS_IsEmpty(F) - Is a write channel Hold empty
+ * mHoldS_SetEmpty(F) - Set a write channel Hold empty
+ * mHoldR_IsFull(F) - Is a read channel Hold full of data
+ * mHoldR_IsEmpty(F) - Is a read channel Hold empty
+ * mHoldR_SetEmpty(F) - Set a read channel Hold empty
+ *
+ * @param F - Address to a XTemac_PacketFifo structure
+ * @param SrcPtr - Source data address aligned on 4 byte boundary
+ *
+ ******************************************************************************/
+#define mHold_GetIndex(F) ((F)->ByteIndex)
+#define mHold_SetIndex(F, D) ((F)->ByteIndex = (D))
+#define mHold_Advance(F, D) ((F)->ByteIndex += (D))
+#define mHold_CopyIn(F, I, D) (*(u8*)(((u8*)(&(F)->Hold[0])) + (I)) = (D))
+#define mHold_CopyOut(F, I, D) ((D) = (*(u8*)(((u8*)(&(F)->Hold[0])) + (I))))
+
+#define mHoldS_IsFull(F) ((F)->ByteIndex >= (F)->Width)
+#define mHoldS_IsEmpty(F) ((F)->ByteIndex == 0)
+#define mHoldS_SetEmpty(F) ((F)->ByteIndex = 0)
+
+#define mHoldR_IsFull(F) ((F)->ByteIndex == 0)
+#define mHoldR_IsEmpty(F) ((F)->ByteIndex >= (F)->Width)
+#define mHoldR_SetEmpty(F) ((F)->ByteIndex = (F)->Width)
+
+/*******************************************************************************
+ * Primitive write to 64 bit FIFO. Use two 32-bit wide I/O accesses.
+ *
+ * @param F - Address to a XTemac_PacketFifo structure
+ * @param SrcPtr - Source data address aligned on 4 byte boundary
+ *
+ ******************************************************************************/
+#define mWriteFifo64(F, SrcPtr) \
+ { \
+ register u32 Faddr = F->Fifo.DataBaseAddress; \
+ XIo_Out32(Faddr, (SrcPtr)[0]); \
+ XIo_Out32(Faddr + 4, (SrcPtr)[1]); \
+ }
+
+/*******************************************************************************
+ * Primitive read from 64 bit FIFO. Use two 32-bit wide I/O accesses.
+ *
+ * @param F - Address to a XTemac_PacketFifo structure
+ * @param DestPtr - Destination data address aligned on 4 byte boundary
+ *
+ ******************************************************************************/
+#define mReadFifo64(F, DestPtr) \
+ (DestPtr)[0] = XIo_In32(F->Fifo.DataBaseAddress); \
+ (DestPtr)[1] = XIo_In32(F->Fifo.DataBaseAddress + 4);
+
+/*******************************************************************************
+ * Primitive to transfer the holding data to the FIFO 64 bits at a time
+ *
+ * @param F - Address to a XTemac_PacketFifo structure
+ *
+ ******************************************************************************/
+#define mPush64(F) mWriteFifo64(F, &F->Hold[0])
+
+/*******************************************************************************
+ * Primitive to tranfer FIFO contents into the holding data 64 bits at a time
+ *
+ * @param F - Address to a XTemac_PacketFifo structure
+ *
+ ******************************************************************************/
+#define mPop64(F) mReadFifo64(F, &F->Hold[0])
+
+
+/************************** Function Prototypes ******************************/
+
+/* The following functions will be attached to the FifoRead and FifoWrite
+ * attribute of an instance by XTemac_ConfigureFifoAccess
+ */
+static int Write_64(XTemac_PacketFifo *Fptr, void *BufPtr,
+ u32 ByteCount, int Eop);
+static int Read_64(XTemac_PacketFifo *Fptr, void *BufPtr,
+ u32 ByteCount, int Eop);
+
+/* 64 bit wide FIFO support functions */
+static void Write64_Unaligned(XTemac_PacketFifo *F, void *BufPtr,
+ u32 ByteCount);
+static void Write64_Aligned(XTemac_PacketFifo *F, u32 *BufPtr, u32 ByteCount);
+static void Read64_Unaligned(XTemac_PacketFifo *F, void *BufPtr, u32 ByteCount);
+static void Read64_Aligned(XTemac_PacketFifo *F, u32 *BufPtr, u32 ByteCount);
+
+
+/*******************************************************************************
+ * Select the best method for accessing the read and write FIFOs for FIFO direct
+ * frame transfer mode. On the write (transmit) side, the choices are DRE or via
+ * the holding structure. Both methods allow unaligned transfers. On the read
+ * (receive) side, the only choice is the holding structure.
+ *
+ * This function should be called only from XTemac_Initialize().
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ *
+ * @return XST_SUCCESS or XST_FAILURE if an error was detected
+ *
+ ******************************************************************************/
+int XTemac_ConfigureFifoAccess(XTemac *InstancePtr)
+{
+ int Result;
+
+ /* Initialize the packet FIFOs */
+ Result = XPacketFifoV200a_Initialize(&InstancePtr->RecvFifo.Fifo,
+ InstancePtr->BaseAddress +
+ XTE_PFIFO_RXREG_OFFSET,
+ InstancePtr->BaseAddress +
+ XTE_PFIFO_RXDATA_OFFSET);
+ if (Result != XST_SUCCESS) {
+ return (XST_FAILURE);
+ }
+
+ Result = XPacketFifoV200a_Initialize(&InstancePtr->SendFifo.Fifo,
+ InstancePtr->BaseAddress +
+ XTE_PFIFO_TXREG_OFFSET,
+ InstancePtr->BaseAddress +
+ XTE_PFIFO_TXDATA_OFFSET);
+
+ if (Result != XST_SUCCESS) {
+ return (XST_FAILURE);
+ }
+
+ /* Choose an access algorithm.
+ * Note: 64-bit wide FIFO is the only width supported at this time
+ */
+ InstancePtr->RecvFifo.Width = PFIFO_64BIT_WIDTH_BYTES;
+ InstancePtr->RecvFifo.XferFn = Read_64;
+ InstancePtr->SendFifo.Width = PFIFO_64BIT_WIDTH_BYTES;
+ InstancePtr->SendFifo.XferFn = Write_64;
+
+ /* Initialize the holds */
+ mHoldS_SetEmpty(&InstancePtr->SendFifo);
+ mHoldR_SetEmpty(&InstancePtr->RecvFifo);
+
+ return (XST_SUCCESS);
+}
+
+/******************************************************************************/
+/**
+ * Copy data from a user buffer to the transmit packet FIFO. The data copied
+ * may comprise of single, multiple, or partial packets. The data is not
+ * transmitted until XTemac_FifoSend() is called.
+ *
+ * If the user buffer contains multiple packets, then extra care must be taken.
+ * In this special situation, the end of one packet and the beginning of a new
+ * packet is specified within the user buffer. The beginning of each NEW packet
+ * must begin on a 4 byte alignment. The user is responsible for adding filler
+ * data between packets to acheive this alignment. The amount of filler data
+ * depends on what byte the end of the previous packet falls on. When calling
+ * XTemac_FifoSend() to transmit the packets, DO NOT specify the filler bytes
+ * in the TxByteCount parameter. For example, if a user buffer contains two
+ * complete packets of 15 bytes each with 1 byte of filler between them, then
+ * XTemac_FifoWrite() is called once to write all 31 bytes to the FIFO.
+ * XTemac_FifoSend() is called twice specifying 15 bytes each time to transmit
+ * the packets (the 1 byte of filler data is ignored by the TEMAC). Of course
+ * you could also just call XTemac_FifoWrite() once for each packet. This way,
+ * the driver will manage the filler data.
+ *
+ * If the user's buffer is not aligned on a 4 byte boundary, then the transfer
+ * may take longer to complete.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param BufPtr is the buffer containing user data that will be transferred
+ * into the transmit FIFO. The buffer may be on any alignment.
+ * @param ByteCount is the number of bytes to transfer from 1 to the number
+ * of bytes available in the FIFO at the time of invocation. See usage
+ * note for situations when a value of 0 is legal.
+ * @param Eop specifies whether the last byte of BufPtr marks the End Of Packet.
+ * If set to XTE_END_OF_PACKET, then any partial bytes being buffered by
+ * the driver are flushed into the packet FIFO. If set to
+ * XTE_PARTIAL_PACKET, then more packet data is expected to be written
+ * through more calls to this function. Failure to use XTE_END_OF_PACKET
+ * prior to calling XTemac_FifoSend() may cause a packet FIFO underrun.
+ *
+ * @return
+ * - XST_SUCCESS if the data was transferred to the FIFO.
+ * - XST_DEVICE_IS_STOPPED if the device has not been started.
+ * - XST_PFIFO_ERROR if there was a packet FIFO overflow during the transfer.
+ * This is a fatal condition. If this value is returned in polled mode, then
+ * the device must be reset. For interrupt driven modes, an interrupt will be
+ * asserted resulting in a call to the registered error handler which should
+ * handle reset of the device.
+ * - XST_IPIF_ERROR if a data or bus error occurred within the TEMAC's IPIF.
+ * Like the PFIFO error, this is a fatal condition and should be handled
+ * in the same manner.
+ *
+ * @note
+ * Calling this function with ByteCount = 0 will not result in the transfer of
+ * data from BufPtr to the FIFO. However, if at the same time Eop is set to
+ * XTE_END_OF_PACKET, then all data previously written with this function is
+ * guaranteed to be flushed into the packet FIFO and available for transmission
+ * with XTemac_FifoSend().
+ ******************************************************************************/
+int XTemac_FifoWrite(XTemac *InstancePtr, void *BufPtr, u32 ByteCount, int Eop)
+{
+ u32 RegDISR;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(!
+ ((Eop != XTE_END_OF_PACKET) &&
+ (Eop != XTE_PARTIAL_PACKET)));
+
+ /* Make sure device is ready for this operation */
+ if (InstancePtr->IsStarted != XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STOPPED);
+ }
+
+ /* Transfer the data using the best/fastest method */
+ InstancePtr->SendFifo.XferFn(&InstancePtr->SendFifo, BufPtr, ByteCount,
+ Eop);
+
+ /* Make sure the packet FIFO didn't report an error */
+ RegDISR = XTemac_mGetIpifReg(XTE_DISR_OFFSET);
+ if (RegDISR & XTE_DXR_SEND_FIFO_MASK) {
+ /* Only bump stats in polled mode. For interrupt driven mode, this stat
+ * is bumped in XTemac_IntrFifoHandler()
+ */
+ if (InstancePtr->Options & XTE_POLLED_OPTION) {
+ XTemac_mBumpStats(TxPktFifoErrors, 1);
+ }
+ return (XST_PFIFO_ERROR);
+ }
+
+ /* Verify no IPIF errors */
+ if (RegDISR & (XTE_DXR_DPTO_MASK | XTE_DXR_TERR_MASK)) {
+ /* Only bump stats in polled mode. For interrupt driven mode, this stat
+ * is bumped in XTemac_IntrFifoHandler()
+ */
+ if (InstancePtr->Options & XTE_POLLED_OPTION) {
+ XTemac_mBumpStats(IpifErrors, 1);
+ }
+ return (XST_IPIF_ERROR);
+ }
+
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************/
+/**
+ * Initiate a transmit of one packet of data previously written with
+ * XTemac_FifoWrite(). The given length in bytes is written to the transmit
+ * length FIFO. There should be at least this many bytes in the packet FIFO
+ * ready for transmit.
+ *
+ * If FIFO interrupts are enabled (see XTemac_IntrFifoEnable()), then upon
+ * completion of the transmit, the registered XTemac_FifoSendHandler() is
+ * invoked.
+ *
+ * If more bytes that are in the packet FIFO are specified in the TxByteCount
+ * parameter, then a packet FIFO underrun error will result.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param TxByteCount is the number of bytes to transmit. Range is 1 to the
+ * total number of bytes available in the packet FIFO to be transmitted.
+ *
+ * @return
+ * - XST_SUCCESS if transmit was initiated.
+ * - XST_DEVICE_IS_STOPPED if the device has not been started.
+ * - XST_FIFO_NO_ROOM if the transmit was not initiated because the transmit
+ * length FIFO was full. This is not a fatal condition. The user may need to
+ * wait for other packets to transmit before this condition clears itself.
+ *
+ ******************************************************************************/
+int XTemac_FifoSend(XTemac *InstancePtr, u32 TxByteCount)
+{
+ u32 RegIPISR;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(TxByteCount != 0);
+
+ /* Make sure device is ready for this operation */
+ if (InstancePtr->IsStarted != XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STOPPED);
+ }
+
+ /* See if transmit length FIFO is full. If it is, try to clear the
+ * status. If it the status remains, then return an error
+ */
+ RegIPISR = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+ if (RegIPISR & XTE_IPXR_XMIT_LFIFO_FULL_MASK) {
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET,
+ XTE_IPXR_XMIT_LFIFO_FULL_MASK);
+
+ RegIPISR = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+ if (RegIPISR & XTE_IPXR_XMIT_LFIFO_FULL_MASK) {
+ XTemac_mBumpStats(FifoErrors, 1);
+ return (XST_FIFO_NO_ROOM);
+ }
+ }
+
+ /* Start transmit */
+ XTemac_mSetIpifReg(XTE_TPLR_OFFSET, TxByteCount);
+
+ /* Return sucess */
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************/
+/**
+ * Return the length of a received packet. If a packet is waiting in the
+ * receive packet FIFO, then it may be copied to a user buffer with
+ * XTemac_FifoRead().
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param ByteCountPtr is the length of the next received packet if the return
+ * status is XST_SUCCESS.
+ *
+ * @return
+ * - XST_SUCCESS if a packet has been received and a value has been written to
+ * ByteCountPtr.
+ * - XST_DEVICE_IS_STOPPED if the device has been stopped.
+ * - XST_NO_DATA if no packet length is available. ByteCountPtr is not modified.
+ *
+ ******************************************************************************/
+int XTemac_FifoRecv(XTemac *InstancePtr, u32 *ByteCountPtr)
+{
+ u32 RegIPISR;
+ volatile u32 RegRSR;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(ByteCountPtr != NULL);
+
+ /* Make sure device is ready for this operation */
+ if (InstancePtr->IsStarted != XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STOPPED);
+ }
+
+ /* If the receive length FIFO is empty, then there's no packet waiting */
+ RegIPISR = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+ if (!(RegIPISR & XTE_IPXR_RECV_DONE_MASK)) {
+ return (XST_NO_DATA);
+ }
+
+ /* Get the length */
+ *ByteCountPtr = XTemac_mGetIpifReg(XTE_RPLR_OFFSET);
+
+ /* The IPXR_RECV_DONE_MASK status bit is tied to the RSR register. To clear
+ * this condition, read from the RSR (which has no information) then write
+ * to the IPISR register to ack the status.
+ */
+ RegRSR = XTemac_mGetIpifReg(XTE_RSR_OFFSET);
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET, XTE_IPXR_RECV_DONE_MASK);
+
+ /* Return sucess */
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************/
+/**
+ * Copy data from the receive packet FIFO into a user buffer. The number of
+ * bytes to copy is derived from XTemac_FifoRecv(). The packet data may be
+ * copied out of the FIFO all at once or with multiple calls to this function.
+ * The latter method supports systems that keep packet data in non-contiguous
+ * memory regions. For example:
+ * <pre>
+ * if (XTemac_FifoRecv(Tptr, &PacketLength) == XST_SUCCESS)
+ * {
+ * if (PacketLength > 14)
+ * {
+ * HeaderLength = 14;
+ * PayloadLength = PacketLength - HeaderLength;
+ *
+ * Status = XTemac_FifoRead(Tptr, UserHeaderBuf, HeaderLength,
+ * XTE_PARTIAL_PACKET);
+ * Status |= XTemac_FifoRead(Tptr, UserPayloadBuf, PayloadLength,
+ * XTE_END_OF_PACKET);
+ *
+ * if (Status != XST_SUCCESS)
+ * {
+ * // handle error
+ * }
+ * }
+ * }
+ * </pre>
+ *
+ * If the user's buffer is not aligned on a 4 byte boundary, then the transfer
+ * may take longer to complete.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param BufPtr is the user buffer that will recieve packet data from the FIFO.
+ * The buffer may be on any alignment.
+ * @param ByteCount is the number of bytes to transfer
+ * @param Eop specifies whether the last byte read is the last byte of a packet.
+ * If set to XTE_END_OF_PACKET, then any partial bytes being buffered by
+ * the driver at the end of the transfer are discarded. These discarded
+ * bytes are filler provided by the hardware and have no meaning. If set
+ * to XTE_PARTIAL_PACKET, then more packet data is expected to be read
+ * through more calls to this function. Failure to use this parameter
+ * properly will result in undefined filler bytes being copied into
+ * BufPtr.
+ *
+ * @return
+ * - XST_SUCCESS if the data was transferred to the user buffer
+ * - XST_DEVICE_IS_STOPPED if the device has not been started.
+ * - XST_NO_DATA if there was not enough data in the packet FIFO to satisfy the
+ * request.
+ *
+ * @note
+ * Do not attempt to read more than one packets worth of data at a time with
+ * this function.
+ ******************************************************************************/
+int XTemac_FifoRead(XTemac *InstancePtr, void *BufPtr, u32 ByteCount, int Eop)
+{
+ int Status;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(!
+ ((Eop != XTE_END_OF_PACKET) &&
+ (Eop != XTE_PARTIAL_PACKET)));
+
+ /* Make sure device is ready for this operation */
+ if (InstancePtr->IsStarted != XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STOPPED);
+ }
+
+ /* Transfer the data using the best/fastest method */
+ Status = InstancePtr->RecvFifo.XferFn(&InstancePtr->RecvFifo, BufPtr,
+ ByteCount, Eop);
+
+ /* Return correct status */
+ if (Status == XST_NO_DATA) {
+ return (XST_NO_DATA);
+ }
+ else {
+ return (XST_SUCCESS);
+ }
+}
+
+
+/******************************************************************************/
+/**
+ * Retrieve the number of free bytes in the packet FIFOs.
+ *
+ * For the transmit packet FIFO, the number returned is the number of bytes
+ * that can be written by XTemac_FifoWrite(). If a non-zero number is returned,
+ * then at least 1 packet of that size can be transmitted.
+ *
+ * For the receive packet FIFO, the number returned is the number of bytes that
+ * can arrive from an external Ethernet device. This number does not reflect
+ * the state of the receive length FIFO. If this FIFO is full, then arriving
+ * packets will get dropped by the HW if there is no place to store the length.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Direction selects which packet FIFO to examine. If XTE_SEND, then
+ * the transmit packet FIFO is selected. If XTE_RECV, then the receive
+ * packet FIFO is selected.
+ *
+ * @return
+ * Number of bytes available in the selected packet FIFO.
+ *
+ ******************************************************************************/
+u32 XTemac_FifoGetFreeBytes(XTemac *InstancePtr, u32 Direction)
+{
+ u32 RegIPISR;
+ u32 Count;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(!(Direction & ~(XTE_SEND | XTE_RECV)));
+
+ /* For the send direction, even though there may be room in the
+ * packet FIFO, the length FIFO may be full. When this is the case,
+ * another packet cannot be transmiited so return 0.
+ */
+ if (Direction == XTE_SEND) {
+ /* Check length FIFO */
+ RegIPISR = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+ if (RegIPISR & XTE_IPXR_XMIT_LFIFO_FULL_MASK) {
+ return (0);
+ }
+
+ /* Get FIFO entries */
+ Count = XPF_V200A_GET_COUNT(&InstancePtr->SendFifo.Fifo);
+ }
+
+ /* Handle receive direction */
+ else {
+ Count = XPF_V200A_COUNT_MASK -
+ XPF_V200A_GET_COUNT(&InstancePtr->RecvFifo.Fifo);
+ }
+
+ /* Multiply free entries by the width of the packet FIFO to arrive at
+ * bytes
+ */
+ return (Count * InstancePtr->RecvFifo.Width);
+}
+
+
+/******************************************************************************/
+/**
+ * Query the device for the latest transmit status for FIFO direct frame
+ * transfer mode. This function should be used for polled mode operation only.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param SendStatusPtr is the contents of the XTE_TSR_OFFSET register when the
+ * return code is XST_FAILURE. Otherwise 0 is returned.
+ *
+ * @return
+ * - XST_NO_DATA if a transmit status is not currently available.
+ * - XST_DEVICE_IS_STOPPED if the device has not been started.
+ * - XST_NOT_POLLED if the device has not been set to polled mode.
+ * - XST_SUCCESS if a transmit status was found and indicates that there was
+ * no error.
+ * - XST_FIFO_ERROR if the transmit length or transmit status FIFOs error has
+ * been detected. If this error is returned, then the device must be reset
+ * before this function will return a valid transmit status indication.
+ * - XST_PFIFO_ERROR if the transmit packet FIFO is deadlocked. If this error
+ * is returned, then the device must be reset before this function will
+ * return a valid transmit status indication
+ * - XST_IPIF_ERROR if there has been a data phase timeout or transaction error
+ * in the IPIF. This is a fatal error.
+ *
+ * @note
+ * When XST_FAILURE is returned with the XTE_TSR_PFIFOU_MASK bit set in the
+ * SendStatusPtr parameter, then an attempt was made to transmit more data than
+ * was present in the packet FIFO. No reset is required in this situation.
+ *
+ ******************************************************************************/
+int XTemac_FifoQuerySendStatus(XTemac *InstancePtr, u32 *SendStatusPtr)
+{
+ u32 RegDISR;
+ u32 RegIPISR;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(SendStatusPtr != NULL);
+
+ /* Make sure device is ready for this operation */
+ if (InstancePtr->IsStarted != XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STOPPED);
+ }
+
+ /* Have to be in polled mode to use this function */
+ if (!(InstancePtr->Options & XTE_POLLED_OPTION)) {
+ return (XST_NOT_POLLED);
+ }
+
+ /* Make sure send packet FIFO isn't deadlocked */
+ RegDISR = XTemac_mGetIpifReg(XTE_DISR_OFFSET);
+ if (RegDISR & XTE_DXR_SEND_FIFO_MASK) {
+ XTemac_mBumpStats(TxPktFifoErrors, 1);
+ return (XST_PFIFO_ERROR);
+ }
+
+ /* Make sure no IPIF errors are present */
+ if (RegDISR & (XTE_DXR_TERR_MASK | XTE_DXR_DPTO_MASK)) {
+ XTemac_mBumpStats(IpifErrors, 1);
+ return (XST_IPIF_ERROR);
+ }
+
+ /* Read the IPISR
+ * If any errors are detetected, try to clear and return error
+ */
+ RegIPISR = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+ if (RegIPISR & XTE_IPXR_XMIT_ERROR_MASK) {
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET,
+ RegIPISR & XTE_IPXR_XMIT_ERROR_MASK);
+ XTemac_mBumpStats(FifoErrors, 1);
+ return (XST_FIFO_ERROR);
+ }
+
+ /* No FIFO errors, so see of a transmit has completed */
+ if (!(RegIPISR & XTE_IPXR_XMIT_DONE_MASK)) {
+ return (XST_NO_DATA);
+ }
+
+ /* Transmit has completed, get the status, ack the condition */
+ *SendStatusPtr = XTemac_mGetIpifReg(XTE_TSR_OFFSET);
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET, XTE_IPXR_XMIT_DONE_MASK);
+
+ /* no errors to report */
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************/
+/**
+ * Query the device for the latest receive status for FIFO direct frame
+ * transfer mode. This function should be used for polled mode operation only.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ *
+ * @return
+ * - XST_SUCCESS if a frame has been received and no receive error was detected.
+ * - XST_DEVICE_IS_STOPPED if the device has not been started.
+ * - XST_NO_DATA if no frame has been received and no receive related error has
+ * been detected.
+ * - XST_NOT_POLLED if the device has not been set to polled mode.
+ * - XST_DATA_LOST if the device reports that it dropped a receive frame. This
+ * is not a serious problem but may indicate that frames are arriving faster
+ * than the system can process them.
+ * - XST_FIFO_ERROR if an error was detected with the receive length FIFO. If
+ * this error is returned, then the device must be reset before any new frame
+ * can be received.
+ * - XST_PFIFO_ERROR if the receive packet FIFO is deadlocked. If this error is
+ * returned, then the device must be reset before any new frame can be
+ * received.
+ * - XST_IPIF_ERROR if there has been a data phase timeout or transaction error
+ * in the IPIF. This is a fatal error.
+ *
+ * @note
+ * In situations where simultaneously a frame has been received for which an
+ * XST_SUCCESS can be returned and a dropped frame for which an XST_DATA_LOST
+ * can be returned, then this function will give priority to XST_SUCCESS so the
+ * user can receive the frame.
+ ******************************************************************************/
+int XTemac_FifoQueryRecvStatus(XTemac *InstancePtr)
+{
+ u32 RegDISR;
+ u32 RegIPISR;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Make sure device is ready for this operation */
+ if (InstancePtr->IsStarted != XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STOPPED);
+ }
+
+ /* Have to be in polled mode to use this function */
+ if (!(InstancePtr->Options & XTE_POLLED_OPTION)) {
+ return (XST_NOT_POLLED);
+ }
+
+ /* Read the DISR */
+ RegDISR = XTemac_mGetIpifReg(XTE_DISR_OFFSET);
+
+ /* Make sure recv packet FIFO isn't deadlocked */
+ if (RegDISR & XTE_DXR_RECV_FIFO_MASK) {
+ XTemac_mBumpStats(RxPktFifoErrors, 1);
+ return (XST_PFIFO_ERROR);
+ }
+
+ /* Make sure no IPIF errors are present */
+ if (RegDISR & (XTE_DXR_TERR_MASK | XTE_DXR_DPTO_MASK)) {
+ XTemac_mBumpStats(IpifErrors, 1);
+ return (XST_IPIF_ERROR);
+ }
+
+ /* Read the IPISR */
+ RegIPISR = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+
+ /* Check for other recv related FIFO errors */
+ if (RegIPISR & (XTE_IPXR_RECV_ERROR_MASK - XTE_IPXR_RECV_DROPPED_MASK)) {
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET,
+ RegIPISR & XTE_IPXR_RECV_ERROR_MASK);
+ XTemac_mBumpStats(FifoErrors, 1);
+ return (XST_FIFO_ERROR);
+ }
+
+ /* See if a frame has been received */
+ if (RegIPISR & XTE_IPXR_RECV_DONE_MASK) {
+ return (XST_SUCCESS);
+ }
+
+ /* If option to detect recv reject errors is set, check for rejected
+ * receive frames. If one is detected, clear it and return error.
+ */
+ if (InstancePtr->Options & XTE_REPORT_RXERR_OPTION) {
+ if (RegIPISR & XTE_IPXR_RECV_DROPPED_MASK) {
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET,
+ RegIPISR &
+ XTE_IPXR_RECV_DROPPED_MASK);
+ return (XST_DATA_LOST);
+ }
+ }
+
+ /* No frame has been received and no errors detected */
+ return (XST_NO_DATA);
+}
+
+
+/*******************************************************************************
+* Algorithm to write to a 64 bit wide transmit packet FIFO through the holding
+* buffer.
+*
+* @param FPtr is a pointer to a Temac FIFO instance to worked on.
+* @param BufPtr is the source buffer address on any alignment
+* @param ByteCount is the number of bytes to transfer
+* @param Eop specifies whether the last byte written is the last byte of the
+* packet.
+*
+* @return XST_SUCCESS
+*******************************************************************************/
+static int Write_64(XTemac_PacketFifo *Fptr, void *BufPtr,
+ u32 ByteCount, int Eop)
+{
+ unsigned BufAlignment = (unsigned) BufPtr & 3;
+ unsigned PartialBytes;
+ unsigned HoldAlignment = mHold_GetIndex(Fptr);
+
+ /* Case 1: Buffer aligned on 4-byte boundary and Hold is empty
+ *
+ * 1. Write all bytes using the fastest transfer method
+ */
+ if ((BufAlignment == 0) && (mHoldS_IsEmpty(Fptr))) {
+ Write64_Aligned(Fptr, (u32 *) BufPtr, ByteCount);
+ }
+
+ /* Case 2: Buffer and Hold are byte aligned with each other
+ *
+ * 1. Transfer enough bytes from the buffer to the Hold to trigger a flush
+ * to the FIFO.
+ *
+ * 2. The state of the buffer and Hold are as described by Case 1 so
+ * write remaining bytes using the fastest transfer method
+ */
+ else if (BufAlignment == (HoldAlignment % PFIFO_64BIT_WIDTH_BYTES)) {
+ PartialBytes = PFIFO_64BIT_WIDTH_BYTES - HoldAlignment;
+
+ if (ByteCount < PartialBytes) {
+ PartialBytes = ByteCount;
+ }
+
+ Write64_Unaligned(Fptr, BufPtr, PartialBytes);
+ Write64_Aligned(Fptr, (u32 *) ((u32) BufPtr + PartialBytes),
+ ByteCount - PartialBytes);
+ }
+
+ /* Case 3: No alignment to take advantage of
+ *
+ * 1. Read FIFOs using the slower method.
+ */
+ else {
+ Write64_Unaligned(Fptr, BufPtr, ByteCount);
+ }
+
+ /* If TxBytes is non-zero then the caller wants to transmit data from the
+ * FIFO
+ */
+ if (Eop == XTE_END_OF_PACKET) {
+ /* Push the hold to the FIFO if data is present */
+ if (!mHoldS_IsEmpty(Fptr)) {
+ mPush64(Fptr);
+ mHoldS_SetEmpty(Fptr);
+ }
+ }
+
+ return (XST_SUCCESS);
+}
+
+
+/*******************************************************************************
+* Algorithm to read from a 64 bit wide receive packet FIFO with through the
+* holding buffer.
+*
+* @param Fptr is a pointer to a Temac FIFO instance to worked on.
+* @param BufPtr is the destination address on any alignment
+* @param ByteCount is the number of bytes to transfer
+*
+* @return XST_SUCCESS if transfer completed or XST_NO_DATA if the amount of
+* data being buffered by the driver plus the amount of data in the
+* packet FIFO is not enough to satisfy the number of bytes requested
+* by the ByteCount parameter.
+*******************************************************************************/
+static int Read_64(XTemac_PacketFifo *Fptr, void *BufPtr,
+ u32 ByteCount, int Eop)
+{
+ unsigned BufAlignment = (unsigned) BufPtr & 3;
+ unsigned PartialBytes;
+ unsigned MaxBytes;
+ unsigned HoldAlignment = mHold_GetIndex(Fptr);
+
+ /* Determine how many bytes can be read from the packet FIFO */
+ MaxBytes = XPF_V200A_COUNT_MASK & XPF_V200A_GET_COUNT(&Fptr->Fifo);
+ MaxBytes *= PFIFO_64BIT_WIDTH_BYTES;
+
+ /* Case 1: Buffer aligned on 4-byte boundary and Hold is empty
+ *
+ * 1. Read all bytes using the fastest transfer method
+ */
+ if ((BufAlignment == 0) && (mHoldR_IsEmpty(Fptr))) {
+ /* Enough data in fifo? */
+ if (ByteCount > MaxBytes) {
+ return (XST_NO_DATA);
+ }
+
+ Read64_Aligned(Fptr, (u32 *) BufPtr, ByteCount);
+ }
+
+ /* Case 2: Buffer and Hold are byte aligned with each other
+ *
+ * 1. Transfer enough bytes from the Hold to the buffer to trigger a
+ * read from the FIFO.
+ *
+ * 2. The state of the buffer and Hold are now as described by Case 1 so
+ * read remaining bytes using the fastest transfer method
+ */
+ else if (BufAlignment == (HoldAlignment % PFIFO_64BIT_WIDTH_BYTES)) {
+ PartialBytes = PFIFO_64BIT_WIDTH_BYTES - HoldAlignment;
+
+ if (ByteCount < PartialBytes) {
+ PartialBytes = ByteCount;
+ }
+
+ /* Enough data in fifo? Must account for the number of bytes the driver
+ * is currently buffering
+ */
+ if (ByteCount > (MaxBytes + PartialBytes)) {
+ return (XST_NO_DATA);
+ }
+
+ Read64_Unaligned(Fptr, BufPtr, PartialBytes);
+ Read64_Aligned(Fptr, (u32 *) ((u32) BufPtr + PartialBytes),
+ ByteCount - PartialBytes);
+ }
+
+ /* Case 3: No alignment to take advantage of
+ *
+ * 1. Read FIFOs using the slower method.
+ */
+ else {
+ /* Enough data in fifo? Must account for the number of bytes the driver
+ * is currently buffering
+ */
+ PartialBytes = PFIFO_64BIT_WIDTH_BYTES - HoldAlignment;
+ if (ByteCount > (MaxBytes + PartialBytes)) {
+ return (XST_NO_DATA);
+ }
+
+ Read64_Unaligned(Fptr, BufPtr, ByteCount);
+ }
+
+ /* If this marks the end of packet, then dump any remaining data in the
+ * hold. The dumped data in this context is meaningless.
+ */
+ if (Eop == XTE_END_OF_PACKET) {
+ mHoldR_SetEmpty(Fptr);
+ }
+
+ return (XST_SUCCESS);
+}
+
+
+/*******************************************************************************
+* Write to the 64 bit holding buffer. Each time it becomes full, then it is
+* pushed to the transmit FIFO.
+*
+* @param F is a pointer to the packet FIFO instance to be worked on.
+* @param BufPtr is the source buffer address on any alignment
+* @param ByteCount is the number of bytes to transfer
+*
+*******************************************************************************/
+static void Write64_Unaligned(XTemac_PacketFifo *F, void *BufPtr, u32 ByteCount)
+{
+ u8 *SrcPtr = (u8 *) BufPtr;
+ unsigned FifoTransfersLeft;
+ unsigned PartialBytes;
+ unsigned BytesLeft;
+ unsigned i;
+
+ /* Stage 1: The hold may be partially full. Write enough bytes to it to
+ * cause a push to the FIFO
+ */
+
+ /* Calculate the number of bytes needed to trigger a push, if not enough
+ * bytes have been specified to cause a push, then adjust accordingly
+ */
+ i = mHold_GetIndex(F);
+ PartialBytes = PFIFO_64BIT_WIDTH_BYTES - i;
+ if (PartialBytes > ByteCount) {
+ PartialBytes = ByteCount;
+ }
+
+ /* Calculate the number of bytes remaining after the first push */
+ BytesLeft = ByteCount - PartialBytes;
+
+ /* Write to the hold and advance its index */
+ mHold_Advance(F, PartialBytes);
+
+ while (PartialBytes--) {
+ mHold_CopyIn(F, i, *SrcPtr);
+ SrcPtr++;
+ i++;
+ }
+
+ /* Push to fifo if needed */
+ if (mHoldS_IsFull(F)) {
+ mPush64(F);
+ mHoldS_SetEmpty(F);
+ }
+
+ /* No more data to process */
+ if (!BytesLeft) {
+ return;
+ }
+
+ /* Stage 2: The hold is empty now, if any more bytes are left to process, then
+ * it will begin with nothing in the hold. Use the hold as a temporary storage
+ * area to contain the data.
+ *
+ * The hold is filled then pushed out to the FIFOs a number of times based on
+ * how many bytes are left to process.
+ */
+
+ /* Calculate the number of times a push will need to occur */
+ FifoTransfersLeft = BytesLeft / PFIFO_64BIT_WIDTH_BYTES;
+
+ /* Calculate the number of partial bytes left after this stage */
+ PartialBytes =
+ BytesLeft - (FifoTransfersLeft * PFIFO_64BIT_WIDTH_BYTES);
+
+ /* Write to the hold and push data to the FIFO */
+ while (FifoTransfersLeft--) {
+ for (i = 0; i < PFIFO_64BIT_WIDTH_BYTES; i++) {
+ mHold_CopyIn(F, i, *SrcPtr);
+ SrcPtr++;
+ }
+ mPush64(F);
+ }
+
+ /* No more data to process
+ * HoldIndex was left at 0 by stage 1, at this point, that is
+ * still the correct value.
+ */
+ if (!PartialBytes) {
+ return;
+ }
+
+ /* Stage 3: All that is left is to fill the hold with the remaining data
+ * to be processed. There will be no push to the FIFO because there is not
+ * enough data left to cause one.
+ */
+
+ /* Write to the hold and push data to the FIFO */
+ for (i = 0; i < PartialBytes; i++) {
+ mHold_CopyIn(F, i, *SrcPtr);
+ SrcPtr++;
+ }
+
+ /* Set the hold's index to its final correct value */
+ mHold_SetIndex(F, PartialBytes);
+}
+
+
+/*******************************************************************************
+* Write directly to the 64 bit wide transmit FIFO from an aligned source
+* buffer. Leftover bytes are written to the holding buffer.
+*
+* @param F is a pointer to the packet FIFO instance to be worked on.
+* @param BufPtr is the source buffer address on 32-bit alignment
+* @param ByteCount is the number of bytes to transfer
+*
+*******************************************************************************/
+static void Write64_Aligned(XTemac_PacketFifo *F, u32 *BufPtr, u32 ByteCount)
+{
+ unsigned FifoTransfersLeft = ByteCount / PFIFO_64BIT_WIDTH_BYTES;
+ unsigned PartialBytes = ByteCount & (PFIFO_64BIT_WIDTH_BYTES - 1);
+
+ /* Direct transfer */
+ while (FifoTransfersLeft--) {
+ mWriteFifo64(F, BufPtr);
+ BufPtr += 2;
+ }
+
+ /* Leftover bytes are left in the holding area */
+ if (PartialBytes) {
+ Write64_Unaligned(F, BufPtr, PartialBytes);
+ }
+}
+
+
+/*******************************************************************************
+* Read into the 64 bit holding buffer from the receive packet FIFO.
+* Each time the holding buffer becomes full, then it is flushed to the
+* provided buffer.
+*
+* @param F is a pointer to the packet FIFO instance to be worked on.
+* @param BufPtr is the destination buffer address on any alignment
+* @param ByteCount is the number of bytes to transfer
+*
+*******************************************************************************/
+static void Read64_Unaligned(XTemac_PacketFifo *F, void *BufPtr, u32 ByteCount)
+{
+ u8 *DestPtr = (u8 *) BufPtr;
+ unsigned FifoTransfersLeft;
+ unsigned PartialBytes;
+ unsigned BytesLeft;
+ unsigned i;
+
+ /* Stage 1: The hold may have some residual bytes that must be flushed
+ * to the buffer before anything is read from the FIFO
+ */
+
+ /* Calculate the number of bytes to flush to the buffer from the hold.
+ * If the number of bytes to flush is greater than the "Bytes" requested,
+ * then adjust accordingly.
+ */
+ i = mHold_GetIndex(F);
+ PartialBytes = PFIFO_64BIT_WIDTH_BYTES - i;
+
+ if (PartialBytes > ByteCount) {
+ PartialBytes = ByteCount;
+ }
+
+ /* Calculate the number of bytes remaining after flushing to the buffer */
+ BytesLeft = ByteCount - PartialBytes;
+
+ /* Move the hold's index forward */
+ mHold_Advance(F, PartialBytes);
+
+ /* Copy bytes */
+ while (PartialBytes--) {
+ mHold_CopyOut(F, i, *DestPtr);
+ i++;
+ DestPtr++;
+ }
+
+ /* No more data to process */
+ if (!BytesLeft) {
+ return;
+ }
+
+ /* Stage 2: The hold is empty now, if any more bytes are left to process, then
+ * it will begin with nothing in the hold. Use the hold as a temporary storage
+ * area to contain the data.
+ *
+ * The hold is filled with FIFO data, then that data is written to the buffer.
+ * Do this FifoTransfersLeft times
+ */
+
+ /* Calculate the number of times a push will need to occur */
+ FifoTransfersLeft = BytesLeft / PFIFO_64BIT_WIDTH_BYTES;
+
+ /* Calculate the number of partial bytes left after this stage */
+ PartialBytes =
+ BytesLeft - (FifoTransfersLeft * PFIFO_64BIT_WIDTH_BYTES);
+
+ /* Write to the hold and push data to the FIFO */
+ while (FifoTransfersLeft--) {
+ /* Load the hold with the next data set from the FIFO */
+ mPop64(F);
+
+ /* Write hold to buffer */
+ for (i = 0; i < PFIFO_64BIT_WIDTH_BYTES; i++) {
+ mHold_CopyOut(F, i, *DestPtr);
+ DestPtr++;
+ }
+ }
+
+ /* No more data to process
+ * After processing full FIFO chunks of data, the hold is empty at this
+ * point
+ */
+ if (!PartialBytes) {
+ return;
+ }
+
+ /* Stage 3: All that is left is to fill the hold one more time with FIFO
+ * data, then write the remaining requested bytes to the buffer
+ */
+
+ /* Get FIFO data */
+ mPop64(F);
+
+ /* Copy bytes from the hold to the buffer */
+ for (i = 0; i < PartialBytes; i++) {
+ mHold_CopyOut(F, i, *DestPtr);
+ DestPtr++;
+ }
+
+ /* Set the hold's index to its final correct value */
+ mHold_SetIndex(F, PartialBytes);
+}
+
+
+/*******************************************************************************
+* Read directly from the 64 bit wide receive FIFO into an aligned destination
+* buffer. Leftover bytes are written to the holding buffer.
+*
+* @param F is a pointer to the packet FIFO instance to be worked on.
+* @param BufPtr is the destination buffer address on 32-bit alignment
+* @param ByteCount is the number of bytes to transfer
+*
+*******************************************************************************/
+static void Read64_Aligned(XTemac_PacketFifo *F, u32 *BufPtr, u32 ByteCount)
+{
+ unsigned FifoTransfersLeft = ByteCount / PFIFO_64BIT_WIDTH_BYTES;
+ unsigned PartialBytes = ByteCount & (PFIFO_64BIT_WIDTH_BYTES - 1);
+
+ /* Direct transfer */
+ while (FifoTransfersLeft--) {
+ mReadFifo64(F, BufPtr);
+ BufPtr += 2;
+ }
+
+ /* Leftover bytes are left in the holding area */
+ if (PartialBytes) {
+ Read64_Unaligned(F, BufPtr, PartialBytes);
+ }
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xtemac_i.h
+*
+* This header file contains internal identifiers, which are those shared
+* between XTemac components. The identifiers in this file are not intended for
+* use external to the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rmm 06/01/05 First release
+* 2.00a rmm 11/21/05 Removed simple dma
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XTEMAC_I_H /* prevent circular inclusions */
+#define XTEMAC_I_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xtemac.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/* Internal flags kept in Instance's Flags attribute */
+#define XTE_FLAGS_RECV_SGDMA_INT_ENABLE 0x0020
+#define XTE_FLAGS_SEND_SGDMA_INT_ENABLE 0x0010
+#define XTE_FLAGS_RECV_FIFO_INT_ENABLE 0x0002
+#define XTE_FLAGS_SEND_FIFO_INT_ENABLE 0x0001
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*****************************************************************************
+* Statistics increment macros
+* The referenced InstancePtr is an implicitly assumed parameter.
+******************************************************************************/
+#define XTemac_mBumpStats(Counter, Value) \
+ InstancePtr->Stats.Counter += (Value);
+
+
+/*****************************************************************************
+* Register accessors.
+*
+* The goal of these four functions is to make the code look cleaner. These
+* simply wrap to the level 0 macros defined in xtemac_l.h.
+*
+* The referenced InstancePtr is an implicitly assumed parameter.
+*
+******************************************************************************/
+#define XTemac_mGetHostReg(RegOffset) \
+ XTemac_mReadHostReg(InstancePtr->BaseAddress, RegOffset)
+
+#define XTemac_mSetHostReg(RegOffset, Data) \
+ XTemac_mWriteHostReg(InstancePtr->BaseAddress, RegOffset, Data)
+
+#define XTemac_mGetIpifReg(RegOffset) \
+ XTemac_mReadReg(InstancePtr->BaseAddress, RegOffset)
+
+#define XTemac_mSetIpifReg(RegOffset, Data) \
+ XTemac_mWriteReg(InstancePtr->BaseAddress, RegOffset, Data)
+
+
+
+/************************** Function Prototypes ******************************/
+
+int XTemac_ConfigureFifoAccess(XTemac *InstancePtr);
+
+/************************** Variable Definitions *****************************/
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xtemac_intr.c
+*
+* Functions in this file implement general purpose interrupt processing related
+* functionality. See xtemac.h for a detailed description of the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rmm 06/01/05 First release
+* 2.00a rmm 11/21/05 Added auto-negotiation callback, removed simple DMA
+* callback
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xtemac.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+ * Install an asynchronious handler function for the given HandlerType:
+ *
+ * <pre>
+ * HandlerType Function Type
+ * ------------------------ ---------------------------
+ * XTE_HANDLER_FIFOSEND XTemac_FifoSendHandler
+ * XTE_HANDLER_FIFORECV XTemac_FifoRecvHandler
+ * XTE_HANDLER_ANEG XTemac_AnegHandler
+ * XTE_HANDLER_SGSEND XTemac_SgHandler
+ * XTE_HANDLER_SGRECV XTemac_SgHandler
+ * XTE_HANDLER_ERROR XTemac_ErrorHandler
+ *
+ * HandlerType Invoked by this driver when:
+ * ------------------------ --------------------------------------------------
+ * XTE_HANDLER_FIFOSEND A packet transmitted by a call to
+ * XTemac_FifoSend() has been sent successfully.
+ * XTE_HANDLER_FIFORECV When a packet has been received and is sitting in
+ * the packet FIFO.
+ * XTE_HANDLER_ANEG Auto negotiation interrupt is asserted by HW and
+ * XTE_ANEG_OPTION is set.
+ * XTE_HANDLER_SGSEND SG DMA has completed an operation on the transmit
+ * side. Transmitted buffer descriptors require post
+ * processing.
+ * XTE_HANDLER_SGRECV SG DMA has completed an operation on the receive
+ * side. Buffer descriptors contain received packets.
+ * XTE_HANDLER_ERROR Any type of error has been detected.
+ * </pre>
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param HandlerType specifies which handler is to be attached.
+ * @param CallbackFunc is the address of the callback function
+ * @param CallbackRef is a user data item that will be passed to the callback
+ * when it is invoked.
+ *
+ * @return
+ * - XST_SUCCESS when handler is installed.
+ * - XST_INVALID_PARAM when HandlerType is invalid
+ *
+ * @note
+ * Invoking this function for a handler that already has been installed replaces
+ * it with the new handler.
+ *
+ ******************************************************************************/
+int XTemac_SetHandler(XTemac *InstancePtr, u32 HandlerType,
+ void *CallbackFunc, void *CallbackRef)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_NONVOID(CallbackFunc != NULL);
+
+ switch (HandlerType) {
+ case XTE_HANDLER_FIFOSEND:
+ InstancePtr->FifoSendHandler =
+ (XTemac_FifoSendHandler) CallbackFunc;
+ InstancePtr->FifoSendRef = CallbackRef;
+ break;
+
+ case XTE_HANDLER_FIFORECV:
+ InstancePtr->FifoRecvHandler =
+ (XTemac_FifoRecvHandler) CallbackFunc;
+ InstancePtr->FifoRecvRef = CallbackRef;
+ break;
+
+ case XTE_HANDLER_ANEG:
+ InstancePtr->AnegHandler = (XTemac_AnegHandler) CallbackFunc;
+ InstancePtr->AnegRef = CallbackRef;
+ break;
+
+ case XTE_HANDLER_SGSEND:
+ InstancePtr->SgSendHandler = (XTemac_SgHandler) CallbackFunc;
+ InstancePtr->SgSendRef = CallbackRef;
+ break;
+
+ case XTE_HANDLER_SGRECV:
+ InstancePtr->SgRecvHandler = (XTemac_SgHandler) CallbackFunc;
+ InstancePtr->SgRecvRef = CallbackRef;
+ break;
+
+ case XTE_HANDLER_ERROR:
+ InstancePtr->ErrorHandler = (XTemac_ErrorHandler) CallbackFunc;
+ InstancePtr->ErrorRef = CallbackRef;
+ break;
+
+ default:
+ return (XST_INVALID_PARAM);
+
+ }
+ return (XST_SUCCESS);
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xtemac_intr_fifo.c
+*
+* Functions in this file implement interrupt related operations for
+* FIFO direct frame transfer mode. See xtemac.h for a detailed description of
+* the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rmm 06/01/05 First release
+* 2.00a rmm 11/21/05 Removed simple DMA, added auto-negotiate handling,
+* removed XST_SEND_ERROR reporting
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xtemac.h"
+#include "xtemac_i.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/* shortcut macros */
+#define ERR_HANDLER(Class, Word1, Word2) \
+ InstancePtr->ErrorHandler(InstancePtr->ErrorRef, Class, Word1, Word2)
+
+#define FIFOSEND_HANDLER(Cnt) \
+ InstancePtr->FifoSendHandler(InstancePtr->FifoSendRef, Cnt)
+
+#define FIFORECV_HANDLER() \
+ InstancePtr->FifoRecvHandler(InstancePtr->FifoRecvRef)
+
+#define ANEG_HANDLER() \
+ InstancePtr->AnegHandler(InstancePtr->AnegRef)
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+ *
+ * Enable FIFO related interrupts for FIFO direct frame transfer mode. Dma
+ * interrupts are not affected.
+ *
+ * Do not use this function when using SG DMA frame transfer mode.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Direction specifies whether the transmit related (XTE_SEND) or
+ * receive related (XTE_RECV) interrupts should be affected, or
+ * both (XTE_SEND | XTE_RECV).
+ *
+ * @return None
+ *
+ * @note The state of the transmitter and receiver are not modified by this
+ * function.
+ *
+ * @note If the device is configured for SGDMA, then this function has no
+ * effect. Use XTemac_IntrSgDmaDisable() instead.
+ *
+ ******************************************************************************/
+void XTemac_IntrFifoEnable(XTemac *InstancePtr, u32 Direction)
+{
+ u32 RegIPIER;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID(!(Direction & ~(XTE_SEND | XTE_RECV)));
+
+ /* Don't allow if device is configured for SGDMA */
+ if (XTemac_mIsSgDma(InstancePtr)) {
+ return;
+ }
+
+ /* Get contents of IPIER register */
+ RegIPIER = XTemac_mGetIpifReg(XTE_IPIER_OFFSET);
+
+ /* Handle send direction */
+ if (Direction & XTE_SEND) {
+ RegIPIER |=
+ (XTE_IPXR_XMIT_ERROR_MASK | XTE_IPXR_XMIT_DONE_MASK);
+ InstancePtr->Flags |= XTE_FLAGS_SEND_FIFO_INT_ENABLE;
+
+ /* Don't allow Tx status overrun interrupt if option is cleared */
+ if (!
+ (InstancePtr->
+ Options & XTE_REPORT_TXSTATUS_OVERRUN_OPTION)) {
+ RegIPIER &= ~XTE_IPXR_XMIT_SFIFO_OVER_MASK;;
+ }
+ }
+
+ /* Handle receive direction */
+ if (Direction & XTE_RECV) {
+ RegIPIER |=
+ (XTE_IPXR_RECV_ERROR_MASK | XTE_IPXR_RECV_DONE_MASK);
+ InstancePtr->Flags |= XTE_FLAGS_RECV_FIFO_INT_ENABLE;
+
+ /* Don't enable recv reject errors if option is cleared */
+ if (!(InstancePtr->Options & XTE_REPORT_RXERR_OPTION)) {
+ RegIPIER &= ~XTE_IPXR_RECV_DROPPED_MASK;
+ }
+ }
+
+ /* Update IPIER with new setting */
+ XTemac_mSetIpifReg(XTE_IPIER_OFFSET, RegIPIER);
+}
+
+
+/*****************************************************************************/
+/**
+ *
+ * Disable FIFO related interrupts for FIFO direct frame transfer mode. Dma
+ * interrupts are not affected.
+ *
+ * Do not use this function when using SG DMA frame transfer mode.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Direction specifies whether the transmit related (XTE_SEND) or
+ * receive related (XTE_RECV) interrupts should be affected, or
+ * both (XTE_SEND | XTE_RECV).
+ *
+ * @return None
+ *
+ * @note The state of the transmitter and receiver are not modified by this
+ * function.
+ *
+ * @note If the device is configured for SGDMA, then this function has no
+ * effect. Use XTemac_IntrSgDmaDisable() instead.
+ *
+ ******************************************************************************/
+void XTemac_IntrFifoDisable(XTemac *InstancePtr, u32 Direction)
+{
+ u32 RegIPIER;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID(!(Direction & ~(XTE_SEND | XTE_RECV)));
+
+ /* Don't allow if device is configured for SGDMA */
+ if (XTemac_mIsSgDma(InstancePtr)) {
+ return;
+ }
+
+ /* Get contents of IPIER register */
+ RegIPIER = XTemac_mGetIpifReg(XTE_IPIER_OFFSET);
+
+ /* Handle send direction */
+ if (Direction & XTE_SEND) {
+ RegIPIER &=
+ ~(XTE_IPXR_XMIT_ERROR_MASK | XTE_IPXR_XMIT_DONE_MASK);
+ InstancePtr->Flags &= ~XTE_FLAGS_SEND_FIFO_INT_ENABLE;
+ }
+
+ /* Handle receive direction */
+ if (Direction & XTE_RECV) {
+ RegIPIER &=
+ ~(XTE_IPXR_RECV_ERROR_MASK | XTE_IPXR_RECV_DONE_MASK);
+ InstancePtr->Flags &= ~XTE_FLAGS_RECV_FIFO_INT_ENABLE;
+ }
+
+ /* Update IPIER with new setting */
+ XTemac_mSetIpifReg(XTE_IPIER_OFFSET, RegIPIER);
+}
+
+
+
+/*****************************************************************************/
+/**
+ *
+ * Master interrupt handler for FIFO direct frame transfer mode. This routine
+ * will query the status of the device, bump statistics, and invoke user
+ * callbacks in the following priority:
+ *
+ * This routine must be connected to an interrupt controller using OS/BSP
+ * specific methods.
+ *
+ * @param InstancePtr is a pointer to the TEMAC instance that has caused the
+ * interrupt.
+ *
+ * @return None
+ *
+ ******************************************************************************/
+void XTemac_IntrFifoHandler(void *TemacPtr)
+{
+ u32 RegDISR;
+ u32 CorePending;
+ u32 RegMisc;
+ unsigned Cnt;
+ XTemac *InstancePtr = (XTemac *) TemacPtr;
+
+ XASSERT_VOID(InstancePtr != NULL);
+
+ /* This ISR will try to handle as many interrupts as it can in a single
+ * call. However, in most of the places where the user's error handler is
+ * called, this ISR exits because it is expected that the user will reset
+ * the device most of the time.
+ */
+
+ /* Log interrupt */
+ XTemac_mBumpStats(Interrupts, 1);
+
+ /* Get top level interrupt status. The status is self clearing when the
+ * interrupt source is cleared
+ */
+ RegDISR = XTemac_mGetIpifReg(XTE_DISR_OFFSET);
+
+ /* Handle IPIF and packet FIFO errors */
+ if (RegDISR & (XTE_DXR_DPTO_MASK | XTE_DXR_TERR_MASK |
+ XTE_DXR_RECV_FIFO_MASK | XTE_DXR_SEND_FIFO_MASK)) {
+ /* IPIF transaction or data phase error */
+ if (RegDISR & (XTE_DXR_DPTO_MASK | XTE_DXR_TERR_MASK)) {
+ XTemac_mBumpStats(IpifErrors, 1);
+ ERR_HANDLER(XST_IPIF_ERROR, RegDISR, 0);
+ return;
+ }
+
+ /* Receive packet FIFO is deadlocked */
+ if (RegDISR & XTE_DXR_RECV_FIFO_MASK) {
+ XTemac_mBumpStats(RxPktFifoErrors, 1);
+ ERR_HANDLER(XST_PFIFO_DEADLOCK, XTE_RECV, 0);
+ return;
+ }
+
+ /* Transmit packet FIFO is deadlocked */
+ if (RegDISR & XTE_DXR_SEND_FIFO_MASK) {
+ XTemac_mBumpStats(TxPktFifoErrors, 1);
+ ERR_HANDLER(XST_PFIFO_DEADLOCK, XTE_SEND, 0);
+ return;
+ }
+ }
+
+ /* Handle core interrupts */
+ if (RegDISR & XTE_DXR_CORE_MASK) {
+ /* Calculate which enabled interrupts have been asserted */
+ CorePending = XTemac_mGetIpifReg(XTE_IPIER_OFFSET) &
+ XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+
+ /* Check for fatal status/length FIFO errors. These errors can't be
+ * cleared
+ */
+ if (CorePending & XTE_IPXR_FIFO_FATAL_ERROR_MASK) {
+ XTemac_mBumpStats(FifoErrors, 1);
+ ERR_HANDLER(XST_FIFO_ERROR, CorePending &
+ XTE_IPXR_FIFO_FATAL_ERROR_MASK, 0);
+ return;
+ }
+
+ /* A receive packet has arrived. Call the receive handler.
+ *
+ * Acking this interrupt is not done here. The handler has a choice:
+ * 1) Call XTemac_FifoRecv() which will ack this interrupt source, or
+ * 2) Call XTemac_IntrFifoDisable() and defer XTEmac_FifoRecv() to a
+ * later time. Failure to do one of these actions will leave this
+ * interupt still pending resulting in an exception loop.
+ */
+ if (CorePending & XTE_IPXR_RECV_DONE_MASK) {
+ FIFORECV_HANDLER();
+ }
+
+ /* A transmit has completed. Pull off all statuses that are available.
+ * For each status that contains a non-fatal error, the error handler
+ * is invoked. For fatal errors, the error handler is invoked once and
+ * assumes the callback will reset the device.
+ *
+ * Unless there was a fatal error, then call the send handler since
+ * resources in the packet FIFO, transmit length FIFO, and transmit
+ * status FIFO have been freed up. This gives the handler a chance
+ * to enqueue new frame(s).
+ */
+ if (CorePending & XTE_IPXR_XMIT_DONE_MASK) {
+ Cnt = 0;
+
+ /* While XMIT_DONE persists */
+ do {
+ /* Get TSR, try to clear XMIT_DONE */
+ RegMisc = XTemac_mGetIpifReg(XTE_TSR_OFFSET);
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET,
+ XTE_IPXR_XMIT_DONE_MASK);
+
+ Cnt++;
+
+ /* Read IPISR and test XMIT_DONE again */
+ RegMisc = XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+ } while (RegMisc & XTE_IPXR_XMIT_DONE_MASK);
+
+ FIFOSEND_HANDLER(Cnt);
+ }
+
+ /* Auto negotiation interrupt */
+ if (CorePending & XTE_IPXR_AUTO_NEG_MASK) {
+ ANEG_HANDLER();
+ }
+
+ /* Check for dropped receive frame. Ack the interupt then call the
+ * error handler
+ */
+ if (CorePending & XTE_IPXR_RECV_DROPPED_MASK) {
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET,
+ CorePending &
+ XTE_IPXR_RECV_DROPPED_MASK);
+
+ XTemac_mBumpStats(RxRejectErrors, 1);
+ ERR_HANDLER(XST_RECV_ERROR,
+ CorePending & XTE_IPXR_RECV_DROPPED_MASK,
+ 0);
+
+ /* no return here, nonfatal error */
+ }
+ }
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xtemac_intr_sgdma.c
+*
+* Functions in this file implement interrupt related operations for
+* scatter gather DMA packet transfer mode. See xtemac.h for a detailed
+* description of the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rmm 06/01/05 First release
+* 2.00a rmm 11/21/05 Switched to local link DMA driver
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xtemac.h"
+#include "xtemac_i.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/* shortcut macros */
+#define ERR_HANDLER(Class, Word1, Word2) \
+ InstancePtr->ErrorHandler(InstancePtr->ErrorRef, Class, Word1, Word2)
+
+#define SGSEND_HANDLER() \
+ InstancePtr->SgSendHandler(InstancePtr->SgSendRef)
+
+#define SGRECV_HANDLER() \
+ InstancePtr->SgRecvHandler(InstancePtr->SgRecvRef)
+
+#define ANEG_HANDLER() \
+ InstancePtr->AnegHandler(InstancePtr->AnegRef)
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+*
+* Enable DMA related interrupts for SG DMA frame transfer mode.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param Direction specifies whether the transmit related (XTE_SEND) or
+* receive related (XTE_RECV) interrupts should be affected, or
+* both (XTE_SEND | XTE_RECV).
+*
+* @note
+* The state of the transmitter and receiver are not modified by this function.
+*
+******************************************************************************/
+void XTemac_IntrSgEnable(XTemac *InstancePtr, u32 Direction)
+{
+ u32 RegIPIER;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID(!(Direction & ~(XTE_SEND | XTE_RECV)));
+
+ /* Get current contents of core's IER. Depending on direction(s)
+ * specified, status/length FIFO error interrupt enables will be enabled
+ */
+ RegIPIER = XTemac_mGetIpifReg(XTE_IPIER_OFFSET);
+
+ /* Set interrupts for transmit DMA channel */
+ if (Direction & XTE_SEND) {
+ /* DMA interrupt enable */
+ InstancePtr->Flags |= XTE_FLAGS_SEND_SGDMA_INT_ENABLE;
+
+ /* Mask in core's transmit interrupt enables */
+ RegIPIER |= (XTE_IPXR_XMIT_DMA_MASK | XTE_IPXR_XMIT_ERROR_MASK);
+ }
+
+ /* Set interrupts for receive DMA channel */
+ if (Direction & XTE_RECV) {
+ /* DMA interrupt enable */
+ InstancePtr->Flags |= XTE_FLAGS_RECV_SGDMA_INT_ENABLE;
+
+ /* Mask in core's receive interrupt enables */
+ RegIPIER |= (XTE_IPXR_RECV_DMA_MASK | XTE_IPXR_RECV_ERROR_MASK);
+
+ /* Don't enable recv reject errors if option is cleared */
+ if (!(InstancePtr->Options & XTE_REPORT_RXERR_OPTION)) {
+ RegIPIER &= ~XTE_IPXR_RECV_DROPPED_MASK;
+ }
+ }
+
+ /* Update core interrupt enables */
+ XTemac_mSetIpifReg(XTE_IPIER_OFFSET, RegIPIER);
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Disable DMA related interrupts for SG DMA frame transfer mode.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param Direction specifies whether the transmit related (XTE_SEND) or
+* receive related (XTE_RECV) interrupts should be affected, or
+* both (XTE_SEND | XTE_RECV).
+*
+* @note
+* The state of the transmitter and receiver are not modified by this function.
+*
+******************************************************************************/
+void XTemac_IntrSgDisable(XTemac *InstancePtr, u32 Direction)
+{
+ u32 RegIPIER;
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+ XASSERT_VOID(!(Direction & ~(XTE_SEND | XTE_RECV)));
+
+ /* Get contents of IPIER register */
+ RegIPIER = XTemac_mGetIpifReg(XTE_IPIER_OFFSET);
+
+ if (Direction & XTE_SEND) {
+ /* Disable DMA channel interrupt */
+ InstancePtr->Flags &= ~XTE_FLAGS_SEND_SGDMA_INT_ENABLE;
+
+ /* Mask out core's transmit interrupt enables */
+ RegIPIER &=
+ ~(XTE_IPXR_XMIT_DMA_MASK | XTE_IPXR_XMIT_ERROR_MASK);
+ }
+
+ if (Direction & XTE_RECV) {
+ /* Disable DMA channel interrupt */
+ InstancePtr->Flags &= ~XTE_FLAGS_RECV_SGDMA_INT_ENABLE;
+
+ /* Mask out core's receive interrupt enables */
+ RegIPIER &=
+ ~(XTE_IPXR_RECV_DMA_MASK | XTE_IPXR_RECV_ERROR_MASK);
+ }
+
+ /* Update IPIER with new setting */
+ XTemac_mSetIpifReg(XTE_IPIER_OFFSET, RegIPIER);
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Set the SGDMA interrupt coalescing parameters. The device must be stopped
+* before setting these parameters. See xtemac.h for a complete discussion of
+* the interrupt coalescing features of this device.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param Direction indicates the channel, XTE_SEND or XTE_RECV, to set.
+* @param Threshold is the value of the packet threshold count used during
+* interrupt coalescing. Valid range is 0 - 1023. A value of 0 disables
+* the use of packet threshold by the hardware.
+* @param Timer is the waitbound timer value in units of approximately
+* milliseconds. Valid range is 0 - 1023. A value of 0 disables the use
+* of the waitbound timer by the hardware.
+*
+* @return
+* - XST_SUCCESS if the threshold was successfully set
+* - XST_NO_FEATURE if the MAC is not configured for scatter-gather DMA
+* - XST_DEVICE_IS_STARTED if the device has not been stopped
+* - XST_INVALID_PARAM if Direction does not indicate a valid channel
+*
+******************************************************************************/
+int XTemac_IntrSgCoalSet(XTemac *InstancePtr, u32 Direction,
+ u16 Threshold, u16 Timer)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Must be SGDMA */
+ if (!XTemac_mIsSgDma(InstancePtr)) {
+ return (XST_NO_FEATURE);
+ }
+
+ /* Device must be stopped before changing these settings */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Set HW */
+ if (Direction == XTE_SEND) {
+ (void) XDmaV3_SgSetPktThreshold(&InstancePtr->SendDma,
+ Threshold);
+ XDmaV3_SgSetPktWaitbound(&InstancePtr->SendDma, Timer);
+ }
+ else if (Direction == XTE_RECV) {
+ (void) XDmaV3_SgSetPktThreshold(&InstancePtr->RecvDma,
+ Threshold);
+ XDmaV3_SgSetPktWaitbound(&InstancePtr->RecvDma, Timer);
+ }
+ else {
+ return (XST_INVALID_PARAM);
+ }
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Get the current interrupt coalescing settings. See xtemac.h for more
+* discussion of interrupt coalescing features.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param Direction indicates the channel, XTE_SEND or XTE_RECV, to get.
+* @param ThresholdPtr is a pointer to the word into which the current value of
+* the packet threshold will be copied.
+* @param TimerPtr is a pointer to the word into which the current value of the
+* waitbound timer will be copied.
+*
+* @return
+* - XST_SUCCESS if the packet threshold was retrieved successfully
+* - XST_NO_FEATURE if the MAC is not configured for scatter-gather DMA
+* - XST_INVALID_PARAM if Direction does not indicate a valid channel
+*
+******************************************************************************/
+int XTemac_IntrSgCoalGet(XTemac *InstancePtr, u32 Direction,
+ u16 *ThresholdPtr, u16 *TimerPtr)
+{
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(ThresholdPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Must be SGDMA */
+ if (!XTemac_mIsSgDma(InstancePtr)) {
+ return (XST_NO_FEATURE);
+ }
+
+ /* Get data from HW */
+ if (Direction == XTE_SEND) {
+ *ThresholdPtr = XDmaV3_SgGetPktThreshold(&InstancePtr->SendDma);
+ *TimerPtr = XDmaV3_SgGetPktWaitbound(&InstancePtr->SendDma);
+ }
+ else if (Direction == XTE_RECV) {
+ *ThresholdPtr = XDmaV3_SgGetPktThreshold(&InstancePtr->RecvDma);
+ *TimerPtr = XDmaV3_SgGetPktWaitbound(&InstancePtr->RecvDma);
+ }
+ else {
+ return (XST_INVALID_PARAM);
+ }
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+* Master interrupt handler for SGDMA frame transfer mode. This routine will
+* query the status of the device, bump statistics, and invoke user callbacks
+* in the following priority:
+*
+* This routine must be connected to an interrupt controller using OS/BSP
+* specific methods.
+*
+* @param InstancePtr is a pointer to the TEMAC instance that has caused the
+* interrupt.
+*
+******************************************************************************/
+void XTemac_IntrSgHandler(void *TemacPtr)
+{
+ u32 RegDISR;
+ u32 CorePending = 0;
+ u32 RegDmaPending;
+ XTemac *InstancePtr = (XTemac *) TemacPtr;
+
+ XASSERT_VOID(InstancePtr != NULL);
+
+ /* This ISR will try to handle as many interrupts as it can in a single
+ * call. However, in most of the places where the user's error handler is
+ * called, this ISR exits because it is expected that the user will reset
+ * the device in nearly all instances.
+ */
+
+ /* Log interrupt */
+ XTemac_mBumpStats(Interrupts, 1);
+
+ /* Get top level interrupt status */
+ RegDISR = XTemac_mGetIpifReg(XTE_DISR_OFFSET);
+
+ /* IPIF transaction or data phase error */
+ if (RegDISR & (XTE_DXR_DPTO_MASK | XTE_DXR_TERR_MASK)) {
+ XTemac_mBumpStats(IpifErrors, 1);
+ ERR_HANDLER(XST_IPIF_ERROR, RegDISR, 0);
+ return;
+ }
+
+ /* Handle core interupts */
+ if (RegDISR & XTE_DXR_CORE_MASK) {
+ /* Get currently pending core interrupts */
+ CorePending = XTemac_mGetIpifReg(XTE_IPIER_OFFSET) &
+ XTemac_mGetIpifReg(XTE_IPISR_OFFSET);
+
+ /* Check for fatal status/length FIFO errors. These errors can't be
+ * cleared
+ */
+ if (CorePending & XTE_IPXR_FIFO_FATAL_ERROR_MASK) {
+ XTemac_mBumpStats(FifoErrors, 1);
+ ERR_HANDLER(XST_FIFO_ERROR,
+ CorePending &
+ XTE_IPXR_FIFO_FATAL_ERROR_MASK, 0);
+ return;
+ }
+
+ /* Check for SGDMA receive interrupts */
+ if (CorePending & XTE_IPXR_RECV_DMA_MASK) {
+ RegDmaPending =
+ XDmaV3_GetInterruptStatus(&InstancePtr->
+ RecvDma) &
+ XDmaV3_GetInterruptEnable(&InstancePtr->
+ RecvDma);
+
+ XDmaV3_SetInterruptStatus(&InstancePtr->RecvDma,
+ RegDmaPending);
+
+ /* Check for errors */
+ if (RegDmaPending & XDMAV3_IPXR_DE_MASK) {
+ XDmaV3_SetInterruptStatus(&InstancePtr->RecvDma,
+ XDMAV3_IPXR_DE_MASK);
+
+ XTemac_mBumpStats(RxDmaErrors, 1);
+ ERR_HANDLER(XST_DMA_ERROR, XTE_RECV,
+ XDmaV3_mGetStatus(&InstancePtr->
+ RecvDma));
+ return;
+ }
+
+ /* Check for packets processed */
+ if (RegDmaPending & (XDMAV3_IPXR_PCTR_MASK |
+ XDMAV3_IPXR_PWBR_MASK |
+ XDMAV3_IPXR_SGEND_MASK)) {
+ /* Invoke the user's receive handler. The handler may remove the
+ * ready BDs from the list right away or defer until later
+ */
+ SGRECV_HANDLER();
+ }
+ }
+
+ /* Check for SGDMA transmit interrupts */
+ if (CorePending & XTE_IPXR_XMIT_DMA_MASK) {
+ RegDmaPending =
+ XDmaV3_GetInterruptStatus(&InstancePtr->
+ SendDma) &
+ XDmaV3_GetInterruptEnable(&InstancePtr->
+ SendDma);
+
+ XDmaV3_SetInterruptStatus(&InstancePtr->SendDma,
+ RegDmaPending);
+
+ /* Check for errors */
+ if (RegDmaPending & XDMAV3_IPXR_DE_MASK) {
+ XDmaV3_SetInterruptStatus(&InstancePtr->SendDma,
+ XDMAV3_IPXR_DE_MASK);
+
+ XTemac_mBumpStats(TxDmaErrors, 1);
+ ERR_HANDLER(XST_DMA_ERROR, XTE_SEND,
+ XDmaV3_mGetStatus(&InstancePtr->
+ SendDma));
+ return;
+ }
+
+ /* Check for packets processed */
+ if (RegDmaPending & (XDMAV3_IPXR_PCTR_MASK |
+ XDMAV3_IPXR_PWBR_MASK |
+ XDMAV3_IPXR_SGEND_MASK)) {
+ /* Invoke the user's send handler. The handler may remove the
+ * ready BDs from the list right away or defer until later
+ */
+ SGSEND_HANDLER();
+ }
+ }
+
+ /* Auto negotiation interrupt */
+ if (CorePending & XTE_IPXR_AUTO_NEG_MASK) {
+ ANEG_HANDLER();
+ }
+
+ /* Check for dropped receive frame. Ack the interupt then call the
+ * error handler
+ */
+ if (CorePending & XTE_IPXR_RECV_DROPPED_MASK) {
+ XTemac_mBumpStats(RxRejectErrors, 1);
+ ERR_HANDLER(XST_RECV_ERROR,
+ CorePending & XTE_IPXR_RECV_DROPPED_MASK,
+ 0);
+
+ /* no return here, nonfatal error */
+ }
+ }
+
+ /* Ack core top level interrupt status */
+ XTemac_mSetIpifReg(XTE_IPISR_OFFSET, CorePending);
+ XTemac_mSetIpifReg(XTE_DISR_OFFSET, RegDISR);
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xtemac_l.c
+*
+* This file contains low-level functions to send and receive Ethernet frames.
+*
+* @note
+*
+* This API cannot be used when device is configured in SGDMA mode.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- ------------------------------------------------------
+* 1.00a rmm 06/01/05 First release
+* 2.00a rmm 11/21/05 Modified to match HW 3.00a
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xtemac_l.h"
+#include "xpacket_fifo_l_v2_00_a.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Function Prototypes ******************************/
+
+/************************** Variable Definitions *****************************/
+
+/*****************************************************************************/
+/**
+*
+* Reset and enable the transmitter and receiver. The contents of the Rx and Tx
+* control registers are preserved.
+*
+* @param BaseAddress is the base address of the device
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* If hardware is not behaving properly, then this function may never return.
+*
+******************************************************************************/
+void XTemac_Enable(u32 BaseAddress)
+{
+ u32 CR_save0;
+ u32 CR_save1;
+ volatile u32 CR;
+
+ /* Save contents of the Rx control registers, then reset the receiver */
+ CR_save0 = XTemac_mReadHostReg(BaseAddress, XTE_RXC0_OFFSET);
+ CR_save1 = XTemac_mReadHostReg(BaseAddress, XTE_RXC1_OFFSET);
+ XTemac_mWriteHostReg(BaseAddress, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
+
+ /* Wait for the receiver to finish reset */
+ do {
+ CR = XTemac_mReadHostReg(BaseAddress, XTE_RXC1_OFFSET);
+ } while (CR & XTE_RXC1_RXRST_MASK);
+
+ /* Restore contents of Rx control registers, enable receiver */
+ XTemac_mWriteHostReg(BaseAddress, XTE_RXC0_OFFSET, CR_save0);
+ XTemac_mWriteHostReg(BaseAddress, XTE_RXC1_OFFSET,
+ CR_save1 | XTE_RXC1_RXEN_MASK);
+
+ /* Save contents of the Tx control register, then reset the transmitter */
+ CR_save0 = XTemac_mReadHostReg(BaseAddress, XTE_TXC_OFFSET);
+ XTemac_mWriteHostReg(BaseAddress, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
+
+ /* Wait for the transmitter to finish reset */
+ do {
+ CR = XTemac_mReadHostReg(BaseAddress, XTE_TXC_OFFSET);
+ } while (CR & XTE_TXC_TXRST_MASK);
+
+ /* Restore contents of Tx control register, enable transmitter */
+ XTemac_mWriteHostReg(BaseAddress, XTE_TXC_OFFSET,
+ CR_save0 | XTE_TXC_TXEN_MASK);
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Disable the transmitter and receiver.
+*
+* @param BaseAddress is the base address of the device
+*
+* @return
+*
+* None.
+*
+* @note
+*
+******************************************************************************/
+void XTemac_Disable(u32 BaseAddress)
+{
+ u32 CR;
+
+ /* Disable the receiver */
+ CR = XTemac_mReadHostReg(BaseAddress, XTE_RXC1_OFFSET);
+ XTemac_mWriteHostReg(BaseAddress, XTE_RXC1_OFFSET,
+ CR & ~XTE_RXC1_RXEN_MASK);
+
+ /* Disable the transmitter */
+ CR = XTemac_mReadHostReg(BaseAddress, XTE_TXC_OFFSET);
+ XTemac_mWriteHostReg(BaseAddress, XTE_TXC_OFFSET,
+ CR & ~XTE_TXC_TXEN_MASK);
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Send an Ethernet frame. This size is the total frame size, including header.
+* This function will return immediately upon dispatching of the frame to the
+* transmit FIFO. Upon return, the provided frame buffer can be reused. To
+* monitor the transmit status, use XTemac_mIsTxDone(). If desired, the
+* transmit status register (XTE_TSR_OFFSET) can be read to obtain the outcome
+* of the transaction. This function can be used only when the device is
+* configured for FIFO direct mode.
+*
+* @param BaseAddress is the base address of the device
+* @param FramePtr is a pointer to a 32-bit aligned frame
+* @param Size is the size, in bytes, of the frame
+*
+* @return
+*
+* - Size of the frame sent (Size parameter)
+* - 0 if the frame will not fit in the data FIFO.
+*
+* @note
+*
+* A transmit length FIFO overrun (XTE_IPXR_XMIT_LFIFO_OVER_MASK) condition may
+* occur if too many frames are pending transmit. This situation can happen when
+* many small frames are being sent. To prevent this condition, pause sending
+* when transmit length FIFO full (XTE_IPXR_XMIT_LFIFO_FULL_MASK) is indicated in
+* the XTE_XTE_IPISR_OFFSET register.
+*
+******************************************************************************/
+int XTemac_SendFrame(u32 BaseAddress, void *FramePtr, int Size)
+{
+ int Result;
+
+ /* Clear the status so it can be checked by the caller
+ * Must handle toggle-on-write for status bits...unfortunately
+ */
+ if (XTemac_mReadReg(BaseAddress, XTE_IPISR_OFFSET) &
+ XTE_IPXR_XMIT_DONE_MASK) {
+ XTemac_mWriteReg(BaseAddress, XTE_IPISR_OFFSET,
+ XTE_IPXR_XMIT_DONE_MASK);
+ }
+
+ /* Use the packet fifo driver write the FIFO */
+ Result = XPacketFifoV200a_L0Write(BaseAddress + XTE_PFIFO_TXREG_OFFSET,
+ BaseAddress + XTE_PFIFO_TXDATA_OFFSET,
+ (u8 *) FramePtr, Size);
+
+ /* No room in the FIFO */
+ if (Result != XST_SUCCESS) {
+ return (0);
+ }
+
+ /* The frame is in the Fifo, now send it */
+ XIo_Out32(BaseAddress + XTE_TPLR_OFFSET, Size);
+
+ return (Size);
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Receive a frame. This function can be used only when the device is
+* configured for FIFO direct mode.
+*
+* @param BaseAddress is the base address of the device
+* @param FramePtr is a pointer to a 32-bit aligned buffer where the frame will
+* be stored
+*
+* @return
+*
+* The size, in bytes, of the frame received.
+* 0 if no frame has been received.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XTemac_RecvFrame(u32 BaseAddress, void *FramePtr)
+{
+ int Length;
+
+ /* Is there a received frame present */
+ if (XTemac_mIsRxEmpty(BaseAddress)) {
+ return (0);
+ }
+
+ /* Get the length of the frame that arrived */
+ Length = XTemac_mReadReg(BaseAddress, XTE_RPLR_OFFSET);
+
+ /* Clear the status now that the length is read so we're ready again
+ * next time
+ */
+ XTemac_mWriteReg(BaseAddress, XTE_IPISR_OFFSET,
+ XTE_IPXR_RECV_DONE_MASK);
+
+ /* Use the packet fifo driver to read the FIFO. We assume the Length is
+ * valid and there is enough data in the FIFO - so we ignore the return
+ * code.
+ */
+ (void) XPacketFifoV200a_L0Read(BaseAddress + XTE_PFIFO_RXREG_OFFSET,
+ BaseAddress + XTE_PFIFO_RXDATA_OFFSET,
+ (u8 *) FramePtr, Length);
+ return (Length);
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2004-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xtemac_l.h
+*
+* This header file contains identifiers and low-level driver functions (or
+* macros) that can be used to access the Tri-Mode MAC Ethernet (TEMAC) device.
+* High-level driver functions are defined in xtemac.h.
+*
+* @note
+*
+* Some registers are not accessible when a HW instance is configured for SGDMA.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rmm 05/24/04 First release for early access.
+* 1.00a rmm 06/01/05 General release
+* 1.00b rmm 09/23/05 Added MII interrupt bit definitions, removed IFGP register
+* and associated bit masks, added MGT register and
+* associated bit masks, removed DIID register, renamed host
+* register names to match those in the latest HW spec.
+* 2.00a rmm 11/21/05 Modified to match HW 3.00a
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XTEMAC_L_H /* prevent circular inclusions */
+#define XTEMAC_L_H /* by using protection macros */
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xio.h"
+#include "xdmav3_l.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/************************** Constant Definitions *****************************/
+
+#define XTE_PLB_BD_ALIGNMENT 4 /**< Minimum buffer descriptor alignment
+ on the PLB bus */
+#define XTE_PLB_BUF_ALIGNMENT 8 /**< Minimum buffer alignment when using
+ HW options that impose alignment
+ restrictions on the buffer data on
+ the PLB bus */
+
+#define XTE_RESET_IPIF_DELAY_US 1 /**< Number of Us to delay after IPIF
+ reset */
+#define XTE_RESET_HARD_DELAY_US 4 /**< Number of Us to delay after hard core
+ reset */
+
+/* Register offset definitions. Unless otherwise noted, register access is
+ * 32 bit.
+ */
+
+/** @name IPIF interrupt and reset registers
+ * @{
+ */
+#define XTE_DISR_OFFSET 0x00000000 /**< Device interrupt status */
+#define XTE_DIPR_OFFSET 0x00000004 /**< Device interrupt pending */
+#define XTE_DIER_OFFSET 0x00000008 /**< Device interrupt enable */
+#define XTE_DGIE_OFFSET 0x0000001C /**< Device global interrupt enable */
+#define XTE_IPISR_OFFSET 0x00000020 /**< IP interrupt status */
+#define XTE_IPIER_OFFSET 0x00000028 /**< IP interrupt enable */
+#define XTE_DSR_OFFSET 0x00000040 /**< Device software reset (write) */
+#define XTE_MIR_OFFSET 0x00000040 /**< Device software reset (read) */
+/*@}*/
+
+
+/** @name IPIF transmit and receive packet fifo base offsets
+ * Individual registers and bit definitions are defined in
+ * xpacket_fifo_l_v2_00_a.h. This register group is not accessible if
+ * the device instance is configured for SGDMA.
+ * @{
+ */
+#define XTE_PFIFO_TXREG_OFFSET 0x00002000 /**< Packet FIFO Tx channel */
+#define XTE_PFIFO_RXREG_OFFSET 0x00002010 /**< Packet FIFO Rx channel */
+/*@}*/
+
+
+/** @name IPIF transmit and receive packet fifo data offsets. This register
+ * group is not accessible if the device instance is configured for
+ * SGDMA.
+ * @{
+ */
+#define XTE_PFIFO_TXDATA_OFFSET 0x00002100 /**< IPIF Tx packet fifo port */
+#define XTE_PFIFO_RXDATA_OFFSET 0x00002200 /**< IPIF Rx packet fifo port */
+/*@}*/
+
+
+/** @name IPIF transmit and recieve DMA offsets
+ * Individual registers and bit definitions are defined in xdmav3.h.
+ * This register group is not accessible if the device instance is
+ * configured for FIFO direct.
+ * @{
+ */
+#define XTE_DMA_SEND_OFFSET 0x00002300 /**< DMA Tx channel */
+#define XTE_DMA_RECV_OFFSET 0x00002340 /**< DMA Rx channel */
+/*@}*/
+
+
+/** @name PLB_TEMAC registers. The TPLR, TSR, RPLR, and RSR are not accessible
+ * when a device instance is configured for SGDMA. LLPS is not accessible
+ * when a device instance is configured for FIFO direct.
+ * @{
+ */
+#define XTE_CR_OFFSET 0x00001000 /**< Control */
+#define XTE_TPLR_OFFSET 0x00001004 /**< Tx packet length (FIFO) */
+#define XTE_TSR_OFFSET 0x00001008 /**< Tx status (FIFO) */
+#define XTE_RPLR_OFFSET 0x0000100C /**< Rx packet length (FIFO) */
+#define XTE_RSR_OFFSET 0x00001010 /**< Receive status */
+#define XTE_TPPR_OFFSET 0x00001014 /**< Tx pause packet */
+#define XTE_LLPS_OFFSET 0x00001018 /**< LLINK PFIFO status */
+#define XTE_MGTDR_OFFSET 0x000033B0 /**< MII data */
+#define XTE_MGTCR_OFFSET 0x000033B4 /**< MII control */
+/*@}*/
+
+
+/** @name HARD_TEMAC Core Registers
+ * These are registers defined within the device's hard core located in the
+ * processor block. They are accessed with the host interface. These registers
+ * are addressed offset by XTE_HOST_IPIF_OFFSET or by the DCR base address
+ * if so configured.
+ *
+ * Access to these registers should go through macros XTemac_mReadHostReg()
+ * and XTemac_mWriteHostReg() to guarantee proper access.
+ * @{
+ */
+#define XTE_HOST_IPIF_OFFSET 0x00003000 /**< Offset of host registers when
+ memory mapped into IPIF */
+#define XTE_RXC0_OFFSET 0x00000200 /**< Rx configuration word 0 */
+#define XTE_RXC1_OFFSET 0x00000240 /**< Rx configuration word 1 */
+#define XTE_TXC_OFFSET 0x00000280 /**< Tx configuration */
+#define XTE_FCC_OFFSET 0x000002C0 /**< Flow control configuration */
+#define XTE_EMCFG_OFFSET 0x00000300 /**< EMAC configuration */
+#define XTE_GMIC_OFFSET 0x00000320 /**< RGMII/SGMII configuration */
+#define XTE_MC_OFFSET 0x00000340 /**< Management configuration */
+#define XTE_UAW0_OFFSET 0x00000380 /**< Unicast address word 0 */
+#define XTE_UAW1_OFFSET 0x00000384 /**< Unicast address word 1 */
+#define XTE_MAW0_OFFSET 0x00000388 /**< Multicast address word 0 */
+#define XTE_MAW1_OFFSET 0x0000038C /**< Multicast address word 1 */
+#define XTE_AFM_OFFSET 0x00000390 /**< Promisciuous mode */
+/*@}*/
+
+
+/* Register masks. The following constants define bit locations of various
+ * control bits in the registers. Constants are not defined for those registers
+ * that have a single bit field representing all 32 bits. For further
+ * information on the meaning of the various bit masks, refer to the HW spec.
+ */
+
+/** @name Interrupt status bits for top level interrupts
+ * These bits are associated with the XTE_DISR_OFFSET, XTE_DIPR_OFFSET,
+ * and XTE_DIER_OFFSET registers.
+ * @{
+ */
+#define XTE_DXR_SEND_FIFO_MASK 0x00000040 /**< Send FIFO channel */
+#define XTE_DXR_RECV_FIFO_MASK 0x00000020 /**< Receive FIFO channel */
+#define XTE_DXR_CORE_MASK 0x00000004 /**< Core */
+#define XTE_DXR_DPTO_MASK 0x00000002 /**< Data phase timeout */
+#define XTE_DXR_TERR_MASK 0x00000001 /**< Transaction error */
+/*@}*/
+
+/** @name Interrupt status bits for MAC interrupts
+ * These bits are associated with XTE_IPISR_OFFSET and XTE_IPIER_OFFSET
+ * registers.
+ *
+ * @{
+ */
+#define XTE_IPXR_XMIT_DONE_MASK 0x00000001 /**< Tx complete */
+#define XTE_IPXR_RECV_DONE_MASK 0x00000002 /**< Rx complete */
+#define XTE_IPXR_AUTO_NEG_MASK 0x00000004 /**< Auto negotiation complete */
+#define XTE_IPXR_RECV_REJECT_MASK 0x00000008 /**< Rx packet rejected */
+#define XTE_IPXR_XMIT_SFIFO_EMPTY_MASK 0x00000010 /**< Tx status fifo empty */
+#define XTE_IPXR_RECV_LFIFO_EMPTY_MASK 0x00000020 /**< Rx length fifo empty */
+#define XTE_IPXR_XMIT_LFIFO_FULL_MASK 0x00000040 /**< Tx length fifo full */
+#define XTE_IPXR_RECV_LFIFO_OVER_MASK 0x00000080 /**< Rx length fifo overrun
+ Note that this signal is
+ no longer asserted by HW
+ */
+#define XTE_IPXR_RECV_LFIFO_UNDER_MASK 0x00000100 /**< Rx length fifo underrun */
+#define XTE_IPXR_XMIT_SFIFO_OVER_MASK 0x00000200 /**< Tx status fifo overrun */
+#define XTE_IPXR_XMIT_SFIFO_UNDER_MASK 0x00000400 /**< Tx status fifo underrun */
+#define XTE_IPXR_XMIT_LFIFO_OVER_MASK 0x00000800 /**< Tx length fifo overrun */
+#define XTE_IPXR_XMIT_LFIFO_UNDER_MASK 0x00001000 /**< Tx length fifo underrun */
+#define XTE_IPXR_RECV_PFIFO_ABORT_MASK 0x00002000 /**< Rx packet rejected due to
+ full packet FIFO */
+#define XTE_IPXR_RECV_LFIFO_ABORT_MASK 0x00004000 /**< Rx packet rejected due to
+ full length FIFO */
+#define XTE_IPXR_MII_PEND_MASK 0x00008000 /**< Mii operation now
+ pending */
+#define XTE_IPXR_MII_DONE_MASK 0x00010000 /**< Mii operation has
+ completed */
+#define XTE_IPXR_XMIT_PFIFO_UNDER_MASK 0x00020000 /**< Tx packet FIFO
+ underrun */
+#define XTE_IPXR_XMIT_DMA_MASK 0x00080000 /**< Rx dma channel */
+#define XTE_IPXR_RECV_DMA_MASK 0x00100000 /**< Tx dma channel */
+#define XTE_IPXR_RECV_FIFO_LOCK_MASK 0x00200000 /**< Rx FIFO deadlock */
+#define XTE_IPXR_XMIT_FIFO_LOCK_MASK 0x00400000 /**< Tx FIFO deadlock */
+
+
+#define XTE_IPXR_RECV_DROPPED_MASK \
+ (XTE_IPXR_RECV_REJECT_MASK | \
+ XTE_IPXR_RECV_PFIFO_ABORT_MASK | \
+ XTE_IPXR_RECV_LFIFO_ABORT_MASK) /**< IPXR bits that indicate a dropped
+ receive frame */
+
+#define XTE_IPXR_XMIT_ERROR_MASK \
+ (XTE_IPXR_XMIT_SFIFO_OVER_MASK | \
+ XTE_IPXR_XMIT_SFIFO_UNDER_MASK | \
+ XTE_IPXR_XMIT_LFIFO_OVER_MASK | \
+ XTE_IPXR_XMIT_LFIFO_UNDER_MASK | \
+ XTE_IPXR_XMIT_PFIFO_UNDER_MASK) /**< IPXR bits that indicate transmit
+ errors */
+
+#define XTE_IPXR_RECV_ERROR_MASK \
+ (XTE_IPXR_RECV_DROPPED_MASK | \
+ XTE_IPXR_RECV_LFIFO_UNDER_MASK) /**< IPXR bits that indicate receive
+ errors */
+
+#define XTE_IPXR_FIFO_FATAL_ERROR_MASK \
+ (XTE_IPXR_RECV_FIFO_LOCK_MASK | \
+ XTE_IPXR_XMIT_FIFO_LOCK_MASK | \
+ XTE_IPXR_XMIT_SFIFO_OVER_MASK | \
+ XTE_IPXR_XMIT_SFIFO_UNDER_MASK | \
+ XTE_IPXR_XMIT_LFIFO_OVER_MASK | \
+ XTE_IPXR_XMIT_LFIFO_UNDER_MASK | \
+ XTE_IPXR_XMIT_PFIFO_UNDER_MASK | \
+ XTE_IPXR_RECV_LFIFO_UNDER_MASK) /**< IPXR bits that indicate fatal FIFO
+ errors. These bits can only be
+ cleared by a device reset */
+/*@}*/
+
+
+/** @name Software reset register (DSR)
+ * @{
+ */
+#define XTE_DSR_RESET_MASK 0x0000000A /**< Write this value to DSR to
+ reset entire core */
+/*@}*/
+
+
+/** @name Global interrupt enable register (DGIE)
+ * @{
+ */
+#define XTE_DGIE_ENABLE_MASK 0x80000000 /**< Write this value to DGIE to
+ enable interrupts from this
+ device */
+/*@}*/
+
+/** @name Control Register (CR)
+ * @{
+ */
+#define XTE_CR_BCREJ_MASK 0x00000004 /**< Disable broadcast address
+ filtering */
+#define XTE_CR_MCREJ_MASK 0x00000002 /**< Disable multicast address
+ filtering */
+#define XTE_CR_HRST_MASK 0x00000001 /**< Reset the hard TEMAC core */
+/*@}*/
+
+
+/** @name Transmit Packet Length Register (TPLR)
+ * @{
+ */
+#define XTE_TPLR_TXPL_MASK 0x00003FFF /**< Tx packet length in bytes */
+/*@}*/
+
+
+/** @name Transmit Status Register (TSR)
+ * @{
+ */
+#define XTE_TSR_TPCF_MASK 0x00000001 /**< Transmit packet complete
+ flag */
+
+/*@}*/
+
+
+/** @name Receive Packet Length Register (RPLR)
+ * @{
+ */
+#define XTE_RPLR_RXPL_MASK 0x00003FFF /**< Rx packet length in bytes */
+/*@}*/
+
+
+/** @name Receive Status Register (RSR)
+ * @{
+ */
+#define XTE_RSR_RPCF_MASK 0x00000001 /**< Receive packet complete
+ flag */
+/*@}*/
+
+
+/** @name MII Mamagement Data register (MGTDR)
+ * @{
+ */
+#define XTE_MGTDR_MIID_MASK 0x0000FFFF /**< MII data */
+/*@}*/
+
+
+/** @name MII Mamagement Control register (MGTCR)
+ * @{
+ */
+#define XTE_MGTCR_RWN_MASK 0x00000400 /**< Read-not-write,0=read
+ 1=write */
+#define XTE_MGTCR_PHYAD_MASK 0x000003E0 /**< PHY address */
+#define XTE_MGTCR_REGAD_MASK 0x0000001F /**< PHY register address */
+
+#define XTE_MGTCR_PHYAD_SHIFT_MASK 5 /**< Shift bits for PHYAD */
+/*@}*/
+
+
+/** @name Transmit Pause Packet Register (TPPR)
+ * @{
+ */
+#define XTE_TPPR_TPPD_MASK 0x0000FFFF /**< Tx pause packet data */
+/*@}*/
+
+
+/** @name Receiver Configuration Word 1 (RXC1)
+ * @{
+ */
+#define XTE_RXC1_RXRST_MASK 0x80000000 /**< Receiver reset */
+#define XTE_RXC1_RXJMBO_MASK 0x40000000 /**< Jumbo frame enable */
+#define XTE_RXC1_RXFCS_MASK 0x20000000 /**< FCS not stripped */
+#define XTE_RXC1_RXEN_MASK 0x10000000 /**< Receiver enable */
+#define XTE_RXC1_RXVLAN_MASK 0x08000000 /**< VLAN enable */
+#define XTE_RXC1_RXHD_MASK 0x04000000 /**< Half duplex */
+#define XTE_RXC1_RXLT_MASK 0x02000000 /**< Length/type check disable */
+#define XTE_RXC1_ERXC1_MASK 0x0000FFFF /**< Pause frame source address
+ bits [47:32]. Bits [31:0]
+ are stored in register
+ ERXC0 */
+/*@}*/
+
+
+/** @name Transmitter Configuration (TXC)
+ * @{
+ */
+#define XTE_TXC_TXRST_MASK 0x80000000 /**< Transmitter reset */
+#define XTE_TXC_TXJMBO_MASK 0x40000000 /**< Jumbo frame enable */
+#define XTE_TXC_TXFCS_MASK 0x20000000 /**< Generate FCS */
+#define XTE_TXC_TXEN_MASK 0x10000000 /**< Transmitter enable */
+#define XTE_TXC_TXVLAN_MASK 0x08000000 /**< VLAN enable */
+#define XTE_TXC_TXHD_MASK 0x04000000 /**< Half duplex */
+#define XTE_TXC_TXIFG_MASK 0x02000000 /**< IFG adjust enable */
+/*@}*/
+
+
+/** @name Flow Control Configuration (FCC)
+ * @{
+ */
+#define XTE_FCC_RXFLO_MASK 0x20000000 /**< Rx flow control enable */
+#define XTE_FCC_TXFLO_MASK 0x40000000 /**< Tx flow control enable */
+/*@}*/
+
+
+/** @name EMAC Configuration (EMCFG)
+ * @{
+ */
+#define XTE_EMCFG_LINKSPD_MASK 0xC0000000 /**< Link speed */
+#define XTE_EMCFG_RGMII_MASK 0x20000000 /**< RGMII mode enable */
+#define XTE_EMCFG_SGMII_MASK 0x10000000 /**< SGMII mode enable */
+#define XTE_EMCFG_1000BASEX_MASK 0x08000000 /**< 1000BaseX mode enable */
+#define XTE_EMCFG_HOSTEN_MASK 0x04000000 /**< Host interface enable */
+#define XTE_EMCFG_TX16BIT 0x02000000 /**< 16 bit Tx client enable */
+#define XTE_EMCFG_RX16BIT 0x01000000 /**< 16 bit Rx client enable */
+
+#define XTE_EMCFG_LINKSPD_10 0x00000000 /**< XTE_EMCFG_LINKSPD_MASK for
+ 10 Mbit */
+#define XTE_EMCFG_LINKSPD_100 0x40000000 /**< XTE_EMCFG_LINKSPD_MASK for
+ 100 Mbit */
+#define XTE_EMCFG_LINKSPD_1000 0x80000000 /**< XTE_EMCFG_LINKSPD_MASK for
+ 1000 Mbit */
+/*@}*/
+
+
+/** @name EMAC RGMII/SGMII Configuration (GMIC)
+ * @{
+ */
+#define XTE_GMIC_RGLINKSPD_MASK 0xC0000000 /**< RGMII link speed */
+#define XTE_GMIC_SGLINKSPD_MASK 0x0000000C /**< SGMII link speed */
+#define XTE_GMIC_RGSTATUS_MASK 0x00000002 /**< RGMII link status */
+#define XTE_GMIC_RGHALFDUPLEX_MASK 0x00000001 /**< RGMII half duplex */
+
+#define XTE_GMIC_RGLINKSPD_10 0x00000000 /**< XTE_GMIC_RGLINKSPD_MASK
+ for 10 Mbit */
+#define XTE_GMIC_RGLINKSPD_100 0x40000000 /**< XTE_GMIC_RGLINKSPD_MASK
+ for 100 Mbit */
+#define XTE_GMIC_RGLINKSPD_1000 0x80000000 /**< XTE_GMIC_RGLINKSPD_MASK
+ for 1000 Mbit */
+#define XTE_GMIC_SGLINKSPD_10 0x00000000 /**< XTE_SGMIC_RGLINKSPD_MASK
+ for 10 Mbit */
+#define XTE_GMIC_SGLINKSPD_100 0x00000004 /**< XTE_SGMIC_RGLINKSPD_MASK
+ for 100 Mbit */
+#define XTE_GMIC_SGLINKSPD_1000 0x00000008 /**< XTE_SGMIC_RGLINKSPD_MASK
+ for 1000 Mbit */
+/*@}*/
+
+
+/** @name EMAC Management Configuration (MC)
+ * @{
+ */
+#define XTE_MC_MDIO_MASK 0x00000040 /**< MII management enable */
+#define XTE_MC_CLK_DVD_MAX 0x3F /**< Maximum MDIO divisor */
+/*@}*/
+
+
+/** @name EMAC Unicast Address Register Word 1 (UAW1)
+ * @{
+ */
+#define XTE_UAW1_MASK 0x0000FFFF /**< Station address bits [47:32]
+ Station address bits [31:0]
+ are stored in register
+ UAW0 */
+/*@}*/
+
+
+/** @name EMAC Multicast Address Register Word 1 (MAW1)
+ * @{
+ */
+#define XTE_MAW1_CAMRNW_MASK 0x00800000 /**< CAM read/write control */
+#define XTE_MAW1_CAMADDR_MASK 0x00030000 /**< CAM address mask */
+#define XTE_MAW1_MASK 0x0000FFFF /**< Multicast address bits [47:32]
+ Multicast address bits [31:0]
+ are stored in register
+ MAW0 */
+#define XTE_MAW1_CAMMADDR_SHIFT_MASK 16 /**< Number of bits to shift right
+ to align with
+ XTE_MAW1_CAMADDR_MASK */
+/*@}*/
+
+
+/** @name EMAC Address Filter Mode (AFM)
+ * @{
+ */
+#define XTE_AFM_EPPRM_MASK 0x80000000 /**< Promiscuous mode enable */
+/*@}*/
+
+
+/** @name Checksum offload buffer descriptor extensions
+ * @{
+ */
+/** Byte offset where checksum should begin (16 bit word) */
+#define XTE_BD_TX_CSBEGIN_OFFSET XDMAV3_BD_USR0_OFFSET
+
+/** Offset where checksum should be inserted (16 bit word) */
+#define XTE_BD_TX_CSINSERT_OFFSET (XDMAV3_BD_USR0_OFFSET + 2)
+
+/** Checksum offload control for transmit (16 bit word) */
+#define XTE_BD_TX_CSCNTRL_OFFSET XDMAV3_BD_USR1_OFFSET
+
+/** Seed value for checksum calculation (16 bit word) */
+#define XTE_BD_TX_CSINIT_OFFSET (XDMAV3_BD_USR1_OFFSET + 2)
+
+/** Receive frame checksum calculation (16 bit word) */
+#define XTE_BD_RX_CSRAW_OFFSET (XDMAV3_BD_USR5_OFFSET + 2)
+
+/*@}*/
+
+/** @name TX_CSCNTRL bit mask
+ * @{
+ */
+#define XTE_BD_TX_CSCNTRL_CALC_MASK 0x0001 /**< Enable/disable Tx
+ checksum */
+/*@}*/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/****************************************************************************/
+/**
+*
+* Read the given IPIF register.
+*
+* @param BaseAddress is the IPIF base address of the device
+* @param RegOffset is the register offset to be read
+*
+* @return The 32-bit value of the register
+*
+* @note
+* C-style signature:
+* u32 XTemac_mReadReg(u32 BaseAddress, u32 RegOffset)
+*
+*****************************************************************************/
+#define XTemac_mReadReg(BaseAddress, RegOffset) \
+ XIo_In32((BaseAddress) + (RegOffset))
+
+
+/****************************************************************************/
+/**
+*
+* Write the given IPIF register.
+*
+* @param BaseAddress is the IPIF base address of the device
+* @param RegOffset is the register offset to be written
+* @param Data is the 32-bit value to write to the register
+*
+* @return None.
+*
+* @note
+* C-style signature:
+* void XTemac_mWriteReg(u32 BaseAddress, u32 RegOffset, u32 Data)
+*
+*****************************************************************************/
+#define XTemac_mWriteReg(BaseAddress, RegOffset, Data) \
+ XIo_Out32((BaseAddress) + (RegOffset), (Data))
+
+/****************************************************************************/
+/**
+*
+* Convert host register offset to a proper DCR or memory mapped offset (DCR
+* not currently supported).
+*
+* @param HostRegOffset is the relative regster offset to be converted
+*
+* @return The correct offset of the register
+*
+* @note
+* C-style signature:
+* u32 XTemac_mHostOffset(u32 RegOffset)
+*
+*****************************************************************************/
+#define XTemac_mHostOffset(HostRegOffset) \
+ ((u32)(HostRegOffset) + XTE_HOST_IPIF_OFFSET)
+
+/****************************************************************************/
+/**
+*
+* Read the given host register.
+*
+* @param BaseAddress is the base address of the device
+* @param HostRegOffset is the register offset to be read
+*
+* @return The 32-bit value of the register
+*
+* @note
+* C-style signature:
+* u32 XTemac_mReadHostReg(u32 BaseAddress, u32 HostRegOffset)
+*
+*****************************************************************************/
+#define XTemac_mReadHostReg(BaseAddress, HostRegOffset) \
+ XIo_In32((BaseAddress) + XTemac_mHostOffset(HostRegOffset))
+
+
+/****************************************************************************/
+/**
+*
+* Write the given host register.
+*
+* @param BaseAddress is the base address of the device
+* @param HostRegOffset is the register offset to be written
+* @param Data is the 32-bit value to write to the register
+*
+* @return None.
+*
+* C-style signature:
+* void XTemac_mWriteHostReg(u32 BaseAddress, u32 RegOffset,
+* u32 Data)
+*
+*****************************************************************************/
+#define XTemac_mWriteHostReg(BaseAddress, HostRegOffset, Data) \
+ XIo_Out32((BaseAddress) + XTemac_mHostOffset(HostRegOffset), (Data))
+
+
+/****************************************************************************/
+/**
+*
+* Set the station address.
+*
+* @param BaseAddress is the base address of the device
+* @param AddressPtr is a pointer to a 6-byte MAC address
+*
+* @return None.
+*
+* @note
+* C-style signature:
+* u32 XTemac_mSetMacAddress(u32 BaseAddress, u8 *AddressPtr)
+*
+*****************************************************************************/
+#define XTemac_mSetMacAddress(BaseAddress, AddressPtr) \
+{ \
+ u32 Reg; \
+ u8* Aptr = (u8*)(AddressPtr); \
+ \
+ Reg = Aptr[0] & 0x000000FF; \
+ Reg |= Aptr[1] << 8; \
+ Reg |= Aptr[2] << 16; \
+ Reg |= Aptr[3] << 24; \
+ XTemac_mWriteHostReg((BaseAddress), XTE_UAW0_OFFSET, Reg); \
+ \
+ Reg = XTemac_mReadHostReg((BaseAddress), XTE_UAW1_OFFSET); \
+ Reg &= ~XTE_UAW1_MASK; \
+ Reg |= Aptr[4] & 0x000000FF; \
+ Reg |= Aptr[5] << 8; \
+ XTemac_mWriteHostReg((BaseAddress), XTE_UAW1_OFFSET, Reg); \
+}
+
+
+/****************************************************************************/
+/**
+*
+* Check to see if the transmission is complete.
+*
+* @param BaseAddress is the base address of the device
+*
+* @return TRUE if it is done, or FALSE if it is not.
+*
+* @note
+* C-style signature:
+* XBoolean XTemac_mIsTxDone(u32 BaseAddress)
+*
+*****************************************************************************/
+#define XTemac_mIsTxDone(BaseAddress) \
+ (((XIo_In32((BaseAddress) + XTE_IPISR_OFFSET) & XTE_IPXR_XMIT_DONE_MASK) == \
+ XTE_IPXR_XMIT_DONE_MASK) ? TRUE : FALSE)
+
+
+/****************************************************************************/
+/**
+*
+* Check to see if the receive FIFO is empty.
+*
+* @param BaseAddress is the base address of the device
+*
+* @return TRUE if it is empty, or FALSE if it is not.
+*
+* @note
+* C-style signature:
+* XBoolean XTemac_mIsRxEmpty(u32 BaseAddress)
+*
+*****************************************************************************/
+#define XTemac_mIsRxEmpty(BaseAddress) \
+ ((XIo_In32((BaseAddress) + XTE_IPISR_OFFSET) & XTE_IPXR_RECV_DONE_MASK) \
+ ? FALSE : TRUE)
+
+
+/****************************************************************************/
+/**
+*
+* Reset the entire core including any attached PHY. Note that there may be a
+* settling time required after initiating a reset. See the core spec and the
+* PHY datasheet.
+*
+* @param BaseAddress is the base address of the device
+*
+* @return Nothing
+*
+* @note
+* C-style signature:
+* void XTemac_mReset(u32 BaseAddress)
+*
+*****************************************************************************/
+#define XTemac_mReset(BaseAddress) \
+ XIo_Out32((BaseAddress) + XTE_DSR_OFFSET, XTE_DSR_RESET_MASK)
+
+
+/************************** Function Prototypes ******************************/
+
+void XTemac_Enable(u32 BaseAddress);
+void XTemac_Disable(u32 BaseAddress);
+int XTemac_SendFrame(u32 BaseAddress, void *FramePtr, int Size);
+int XTemac_RecvFrame(u32 BaseAddress, void *FramePtr);
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/*
+ * Xilinx Ethernet Linux component to interface XTemac component to Linux
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * 2002-2004 (c) MontaVista, Software, Inc. This file is licensed under the terms
+ * of the GNU General Public License version 2.1. This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a xd 12/12/05 First release
+ * 2.00a jvb 12/21/05 Added support for checksum offload, and receive side DRE
+ * 2.00b wgr 08/17/06 Port to kernel 2.6.10_mvl401.
+ * 2.00c rpm 12/12/06 Updated PHY address detection code, as well as PHY
+ * autonegotiation support (still not great - but better). Changed
+ * XILINX_PLB_TEMAC_3_00A_ML403_PHY_SUPPORT to MARVELL_88E1111.
+ * </pre>
+ *
+ */
+
+/*
+ * This driver is a bit unusual in that it is composed of two logical
+ * parts where one part is the OS independent code and the other part is
+ * the OS dependent code. Xilinx provides their drivers split in this
+ * fashion. This file represents the Linux OS dependent part known as
+ * the Linux adapter. The other files in this directory are the OS
+ * independent files as provided by Xilinx with no changes made to them.
+ * The names exported by those files begin with XTemac_. All functions
+ * in this file that are called by Linux have names that begin with
+ * xenet_. The functions in this file that have Handler in their name
+ * are registered as callbacks with the underlying Xilinx OS independent
+ * layer. Any other functions are static helper functions.
+ */
+
+/*
+ * With the way the hardened PLB Temac works, the driver needs to communicate
+ * with the PHY controller. Since each board will have a different
+ * type of PHY, the code that communicates with the MII type controller
+ * is inside #ifdef MARVELL_88E1111_PHY conditional
+ * compilation. For your specific board, you will want to replace this code with
+ * code of your own for your specific board.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/xilinx_devices.h>
+#include <asm/io.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+
+#include "xbasic_types.h"
+#include "xtemac.h"
+#include "xipif_v1_23_b.h"
+#include "xpacket_fifo_v2_00_a.h"
+#include "xdmav3.h"
+#include "xdmabdv3.h"
+
+
+#define LOCAL_FEATURE_RX_CSUM 0x01
+#define LOCAL_FEATURE_RX_DRE 0x02
+
+/*
+ * Default SEND and RECV buffer descriptors (BD) numbers.
+ * BD Space needed is (XTE_SEND_BD_CNT+XTE_RECV_BD_CNT)*Sizeof(XDmaBdV3).
+ * Each XDmaBdV3 instance currently takes 40 bytes.
+ */
+#define XTE_SEND_BD_CNT 256
+#define XTE_RECV_BD_CNT 256
+
+/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
+#define DRIVER_NAME "xilinx_temac"
+#define DRIVER_DESCRIPTION "Xilinx Tri-Mode Ethernet MAC driver"
+#define DRIVER_VERSION "2.00b"
+
+#define TX_TIMEOUT (3*HZ) /* Transmission timeout is 3 seconds. */
+
+/*
+ * When Xilinx TEMAC is configured to use the TX Data Realignment Engine (DRE),
+ * alignment restrictions are as follows:
+ * - SGDMA transmit buffers can be aligned on any boundary, but receive buffers
+ * must be aligned on a 8-byte boundary.
+ *
+ * Without TX DRE, buffer alignment restrictions are as follows:
+ * - SGDMA transmit and receive buffers must be aligned on a 8-byte boundary
+ *
+ * There are no alignment restrictions when using XTemac_FifoRead() and
+ * XTemac_FifoWrite().
+ *
+ */
+/*
+ * ALIGNMENT_RECV = the alignement required to receive (8 required by plb bus w/no DRE)
+ * ALIGNMENT_SEND = the alignement required to send (8 required by plb bus w/no DRE)
+ * ALIGNMENT_SEND_PERF = tx alignment for better performance
+ *
+ * ALIGNMENT_SEND is used to see if we *need* to copy the data to re-align.
+ * ALIGNMENT_SEND_PERF is used if we've decided we need to copy anyway, we just
+ * copy to this alignment for better performance.
+ */
+
+#define ALIGNMENT_RECV 32
+#define ALIGNMENT_SEND 8
+#define ALIGNMENT_SEND_PERF 32
+
+
+/* SGDMA buffer descriptors must be aligned on a 8-byte boundary. */
+#define ALIGNMENT_BD 4
+
+/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
+#define BUFFER_ALIGNSEND(adr) ((ALIGNMENT_SEND - ((u32) adr)) % ALIGNMENT_SEND)
+#define BUFFER_ALIGNSEND_PERF(adr) ((ALIGNMENT_SEND_PERF - ((u32) adr)) % ALIGNMENT_SEND_PERF)
+#define BUFFER_ALIGNRECV(adr) ((ALIGNMENT_RECV - ((u32) adr)) % ALIGNMENT_RECV)
+
+/* Default TX/RX Threshold and waitbound values for SGDMA mode */
+#define DFT_TX_THRESHOLD 16
+#define DFT_TX_WAITBOUND 1
+#define DFT_RX_THRESHOLD 2
+#define DFT_RX_WAITBOUND 1
+
+#define XTE_AUTOSTRIPPING 1
+
+/* Put Buffer Descriptors in BRAM?
+ * NOTE:
+ * Putting BDs in BRAM only works if there is only ONE instance of the TEMAC
+ * in hardware. The code does not handle multiple instances, e.g. it does
+ * not manage the memory in BRAM.
+ */
+#define BD_IN_BRAM 0
+#define BRAM_BASEADDR 0xffff8000
+
+/*
+ * Our private per device data. When a net_device is allocated we will
+ * ask for enough extra space for this.
+ */
+struct net_local {
+ struct list_head rcv;
+ struct list_head xmit;
+
+ struct net_device *ndev; /* this device */
+ struct net_device *next_dev; /* The next device in dev_list */
+ struct net_device_stats stats; /* Statistics for this device */
+ struct timer_list phy_timer; /* PHY monitoring timer */
+
+ u32 index; /* Which interface is this */
+ XInterruptHandler Isr; /* Pointer to the XTemac ISR routine */
+ u8 gmii_addr; /* The GMII address of the PHY */
+
+ /* The underlying OS independent code needs space as well. A
+ * pointer to the following XTemac structure will be passed to
+ * any XTemac_ function that requires it. However, we treat the
+ * data as an opaque object in this file (meaning that we never
+ * reference any of the fields inside of the structure). */
+ XTemac Emac;
+
+ unsigned int max_frame_size;
+
+ int cur_speed;
+
+ /* Buffer Descriptor space for both TX and RX BD ring */
+ void *desc_space; /* virtual address of BD space */
+ dma_addr_t desc_space_handle; /* physical address of BD space */
+ int desc_space_size; /* size of BD space */
+
+ /* buffer for one skb in case no room is available for transmission */
+ struct sk_buff *deferred_skb;
+
+ /* send buffers for non tx-dre hw */
+ void **tx_orig_buffers; /* Buffer addresses as returned by
+ dma_alloc_coherent() */
+ void **tx_buffers; /* Buffers addresses aligned for DMA */
+ dma_addr_t *tx_phys_buffers; /* Buffer addresses in physical memory */
+ size_t tx_buffers_cur; /* Index of current buffer used */
+
+ /* stats */
+ int max_frags_in_a_packet;
+ unsigned long realignments;
+ unsigned long tx_hw_csums;
+ unsigned long rx_hw_csums;
+ unsigned long local_features;
+#if ! XTE_AUTOSTRIPPING
+ unsigned long stripping;
+#endif
+};
+
+/* for exclusion of all program flows (processes, ISRs and BHs) */
+spinlock_t XTE_spinlock;
+spinlock_t XTE_tx_spinlock;
+spinlock_t XTE_rx_spinlock;
+
+/*
+ * ethtool has a status reporting feature where we can report any sort of
+ * status information we'd like. This is the list of strings used for that
+ * status reporting. ETH_GSTRING_LEN is defined in ethtool.h
+ */
+static char xenet_ethtool_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "txdmaerr", "txpfifoerr", "txstatuserr", "rxrejerr", "rxdmaerr",
+ "rxpfifoerror", "fifoerr", "ipiferr", "intr",
+ "max_frags", "tx_hw_csums", "rx_hw_csums",
+};
+
+#define XENET_STATS_LEN sizeof(xenet_ethtool_gstrings_stats) / ETH_GSTRING_LEN
+
+/* Helper function to determine if a given XTemac error warrants a reset. */
+extern inline int status_requires_reset(int s)
+{
+ return (s == XST_FIFO_ERROR ||
+ s == XST_PFIFO_DEADLOCK ||
+ s == XST_DMA_ERROR || s == XST_IPIF_ERROR);
+}
+
+/* BH statics */
+static LIST_HEAD(receivedQueue);
+static spinlock_t receivedQueueSpin = SPIN_LOCK_UNLOCKED;
+static LIST_HEAD(sentQueue);
+static spinlock_t sentQueueSpin = SPIN_LOCK_UNLOCKED;
+
+/* from mii.h
+ *
+ * Items in mii.h but not in gmii.h
+ */
+#define ADVERTISE_100FULL 0x0100
+#define ADVERTISE_100HALF 0x0080
+#define ADVERTISE_10FULL 0x0040
+#define ADVERTISE_10HALF 0x0020
+#define ADVERTISE_CSMA 0x0001
+
+#define EX_ADVERTISE_1000FULL 0x0200
+#define EX_ADVERTISE_1000HALF 0x0100
+
+/*
+ * items not in mii.h nor gmii.h but should be
+ */
+#define MII_EXADVERTISE 0x09
+
+typedef enum DUPLEX { UNKNOWN_DUPLEX, HALF_DUPLEX, FULL_DUPLEX } DUPLEX;
+
+int renegotiate_speed(struct net_device *dev, int speed, DUPLEX duplex)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ int status;
+ int retries = 2;
+ int wait_count;
+ u16 phy_reg0 = BMCR_ANENABLE | BMCR_ANRESTART;
+ u16 phy_reg1;
+ u16 phy_reg4;
+ u16 phy_reg9 = 0;
+
+
+ /*
+ * It appears that the 10baset full and half duplex settings
+ * are overloaded for gigabit ethernet
+ */
+ if ((duplex == FULL_DUPLEX) && (speed == 10)) {
+ phy_reg4 = ADVERTISE_10FULL | ADVERTISE_CSMA;
+ }
+ else if ((duplex == FULL_DUPLEX) && (speed == 100)) {
+ phy_reg4 = ADVERTISE_100FULL | ADVERTISE_CSMA;
+ }
+ else if ((duplex == FULL_DUPLEX) && (speed == 1000)) {
+ phy_reg4 = ADVERTISE_CSMA;
+ phy_reg9 = EX_ADVERTISE_1000FULL;
+ }
+ else if (speed == 10) {
+ phy_reg4 = ADVERTISE_10HALF | ADVERTISE_CSMA;
+ }
+ else if (speed == 100) {
+ phy_reg4 = ADVERTISE_100HALF | ADVERTISE_CSMA;
+ }
+ else if (speed == 1000) {
+ phy_reg4 = ADVERTISE_CSMA;
+ phy_reg9 = EX_ADVERTISE_1000HALF;
+ }
+ else {
+ printk(KERN_ERR
+ "%s: XTemac: unsupported speed requested: %d\n",
+ dev->name, speed);
+ return -1;
+ }
+
+ /*
+ * link status in register 1:
+ * first read / second read:
+ * 0 0 link is down
+ * 0 1 link is up (but it was down earlier)
+ * 1 0 link is down (but it was just up)
+ * 1 1 link is up
+ *
+ */
+ status = XTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, &phy_reg1);
+ status |= XTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, &phy_reg1);
+ status |=
+ XTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MII_ADVERTISE,
+ phy_reg4);
+ status |=
+ XTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MII_EXADVERTISE,
+ phy_reg9);
+ if (status != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: error accessing PHY: %d\n", dev->name,
+ status);
+ return -1;
+ }
+
+ while (retries--) {
+ /* initiate an autonegotiation of the speed */
+ status = XTemac_PhyWrite(&lp->Emac, lp->gmii_addr, MII_BMCR,
+ phy_reg0);
+ if (status != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: error starting autonegotiateion: %d\n",
+ dev->name, status);
+ return -1;
+ }
+
+ wait_count = 20; /* so we don't loop forever */
+ while (wait_count--) {
+ /* wait a bit for the negotiation to complete */
+ mdelay(500);
+ status = XTemac_PhyRead(&lp->Emac, lp->gmii_addr,
+ MII_BMSR, &phy_reg1);
+ status |=
+ XTemac_PhyRead(&lp->Emac, lp->gmii_addr,
+ MII_BMSR, &phy_reg1);
+ if (status != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: error reading MII status %d\n",
+ dev->name, status);
+ return -1;
+ }
+ if ((phy_reg1 & BMSR_LSTATUS) &&
+ (phy_reg1 & BMSR_ANEGCAPABLE))
+ break;
+
+ }
+
+ if (phy_reg1 & BMSR_LSTATUS) {
+ printk(KERN_INFO
+ "%s: XTemac: We renegotiated the speed to: %d\n",
+ dev->name, speed);
+ return 0;
+ }
+ else {
+ printk(KERN_ERR
+ "%s: XTemac: Not able to set the speed to %d (status: 0x%0x)\n",
+ dev->name, speed, phy_reg1);
+ return -1;
+ }
+ }
+
+ printk(KERN_ERR
+ "%s: XTemac: Not able to set the speed to %d\n", dev->name,
+ speed);
+ return -1;
+}
+
+/* The following code tries to detect the MAC speed so that the silicon-
+ * based TEMAC speed can be set to match. There is some PHY-specific code
+ * that works with Marvel PHY (Xilinx ML4xx boards), or some more general
+ * code that tries to start autonegotiation and detect the result. If you
+ * don't like this or it doesn't work for you, change it or hardcode the speed.
+ *
+ * Note also a silicon issue with Xilinx V4FX with regards to MDIO access:
+ * pre-CES4 chips (ML403, pre-production ML405/ML410)
+ * use hard_temac_v3_00_a
+ * CES4 or later chips (production ML405, ML410 boards)
+ * use hard_temac_v3_00_b
+ */
+#define MARVELL_88E1111_PHY
+
+/*
+ * This function sets up MAC's speed according to link speed of PHY
+ * This function is specific to MARVELL 88E1111 PHY chip and assumes GMII
+ * interface is being used by the TEMAC
+ */
+void set_mac_speed(struct net_local *lp)
+{
+ u16 phylinkspeed;
+ struct net_device *dev = lp->ndev;
+ int ret;
+
+#ifndef MARVELL_88E1111_PHY
+ int retry_count = 1;
+#endif
+
+ /* See comments at top for an explanation of MARVELL_88E1111_PHY */
+#ifdef MARVELL_88E1111_PHY
+#define MARVELL_88E1111_PHY_SPECIFIC_STATUS_REG_OFFSET 17
+#define MARVELL_88E1111_LINKSPEED_MARK 0xC000
+#define MARVELL_88E1111_LINKSPEED_SHIFT 14
+#define MARVELL_88E1111_LINKSPEED_1000M 0x0002
+#define MARVELL_88E1111_LINKSPEED_100M 0x0001
+#define MARVELL_88E1111_LINKSPEED_10M 0x0000
+ u16 RegValue;
+
+ /* Loop until read of PHY specific status register is successful. */
+ do {
+ ret = XTemac_PhyRead(&lp->Emac, lp->gmii_addr,
+ MARVELL_88E1111_PHY_SPECIFIC_STATUS_REG_OFFSET,
+ &RegValue);
+ } while (ret != XST_SUCCESS);
+
+
+ /* Get current link speed */
+ phylinkspeed = (RegValue & MARVELL_88E1111_LINKSPEED_MARK)
+ >> MARVELL_88E1111_LINKSPEED_SHIFT;
+
+ /* Update TEMAC speed accordingly */
+ switch (phylinkspeed) {
+ case (MARVELL_88E1111_LINKSPEED_1000M):
+ XTemac_SetOperatingSpeed(&lp->Emac, 1000);
+ printk(KERN_INFO "%s: XTemac: speed set to 1000Mb/s\n",
+ dev->name);
+ lp->cur_speed = 1000;
+ break;
+ case (MARVELL_88E1111_LINKSPEED_100M):
+ XTemac_SetOperatingSpeed(&lp->Emac, 100);
+ printk(KERN_INFO "%s: XTemac: speed set to 100Mb/s\n",
+ dev->name);
+ lp->cur_speed = 100;
+ break;
+ case (MARVELL_88E1111_LINKSPEED_10M):
+ XTemac_SetOperatingSpeed(&lp->Emac, 10);
+ printk(KERN_INFO "%s: XTemac: speed set to 10Mb/s\n",
+ dev->name);
+ lp->cur_speed = 10;
+ break;
+ default:
+ XTemac_SetOperatingSpeed(&lp->Emac, 1000);
+ printk(KERN_INFO "%s: XTemac: speed set to 1000Mb/s\n",
+ dev->name);
+ lp->cur_speed = 1000;
+ break;
+ }
+
+#else /* generic PHY */
+ if (XTemac_mGetPhysicalInterface(&lp->Emac) == XTE_PHY_TYPE_MII) {
+ phylinkspeed = 100;
+ }
+ else {
+ phylinkspeed = 1000;
+ }
+
+ /*
+ * Try to renegotiate the speed until something sticks
+ */
+ while (phylinkspeed > 1) {
+ ret = renegotiate_speed(dev, phylinkspeed, FULL_DUPLEX);
+ /*
+ * ret == 1 - try it again
+ * ret == 0 - it worked
+ * ret < 0 - there was some failure negotiating the speed
+ */
+ if (ret == 0) {
+ /* it worked, get out of the loop */
+ break;
+ }
+
+ /* it didn't work this time, but it may work if we try again */
+ if ((ret == 1) && (retry_count)) {
+ retry_count--;
+ printk("trying again...\n");
+ continue;
+ }
+ /* reset the retry_count, becuase we're about to try a lower speed */
+ retry_count = 1;
+ phylinkspeed /= 10;
+ }
+ if (phylinkspeed == 1) {
+ printk(KERN_INFO "%s: XTemac: could not negotiate speed\n",
+ dev->name);
+ lp->cur_speed = 0;
+ return;
+ }
+
+ XTemac_SetOperatingSpeed(&lp->Emac, phylinkspeed);
+ printk(KERN_INFO "%s: XTemac: speed set to %dMb/s\n", dev->name,
+ phylinkspeed);
+ lp->cur_speed = phylinkspeed;
+#endif
+}
+
+/*
+ * Helper function to reset the underlying hardware. This is called
+ * when we get into such deep trouble that we don't know how to handle
+ * otherwise.
+ */
+
+/*
+ * This reset function should handle five different reset request types
+ * from other functions. The reset request types include
+ * 1. FIFO error: FifoWrite()/FifoSend()/FifoRecv()/FifoRead() fails
+ * 2. DMA error: SgAlloc()/SgCommit()/SgFree() fails
+ * 3. DUPLEX error: MAC DUPLEX is not full duplex or does not match
+ * PHY setting
+ * 4. TX Timeout: Timeout occurs for a TX frame given to this adapter
+ * 5. Error Status: Temac Error interrupt occurs and asks for a reset
+ *
+ */
+
+static void reset(struct net_device *dev, u32 line_num)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u16 TxThreshold, TxWaitBound, RxThreshold, RxWaitBound;
+ u32 Options;
+ static u32 reset_cnt = 0;
+
+ printk(KERN_INFO "%s: XTemac: resets (#%u) from code line %d\n",
+ dev->name, ++reset_cnt, line_num);
+
+ /* Shouldn't really be necessary, but shouldn't hurt. */
+ netif_stop_queue(dev);
+
+ /* Stop device */
+ XTemac_Stop(&lp->Emac);
+
+ /*
+ * XTemac_Reset puts the device back to the default state. We need
+ * to save all the settings we don't already know, reset, restore
+ * the settings, and then restart the temac.
+ */
+ Options = XTemac_GetOptions(&lp->Emac);
+ if (XTemac_mIsSgDma(&lp->Emac)) {
+ /*
+ * The following two functions will return an error if we are
+ * not doing scatter-gather DMA. We just checked that so we
+ * can safely ignore the return values.
+ */
+ (int) XTemac_IntrSgCoalGet(&lp->Emac, XTE_RECV, &RxThreshold,
+ &RxWaitBound);
+ (int) XTemac_IntrSgCoalGet(&lp->Emac, XTE_SEND, &TxThreshold,
+ &TxWaitBound);
+
+ }
+
+ /* now we can reset the device */
+ XTemac_Reset(&lp->Emac, 0);
+
+ /* Reset on TEMAC also resets PHY. Give it some time to finish negotiation
+ * before we move on */
+ mdelay(2000);
+
+ /*
+ * The following four functions will return an error if the
+ * EMAC is already started. We just stopped it by calling
+ * XTemac_Reset() so we can safely ignore the return values.
+ */
+ (int) XTemac_SetMacAddress(&lp->Emac, dev->dev_addr);
+ (int) XTemac_SetOptions(&lp->Emac, Options);
+ (int) XTemac_ClearOptions(&lp->Emac, ~Options);
+ Options = XTemac_GetOptions(&lp->Emac);
+ printk(KERN_INFO "%s: XTemac: Options: 0x%x\n", dev->name, Options);
+
+ set_mac_speed(lp);
+
+ if (XTemac_mIsSgDma(&lp->Emac)) { /* SG DMA mode */
+ /*
+ * The following 2 functions will return an error if
+ * we are not doing scatter-gather DMA or if the EMAC is
+ * already started. We just checked that we are indeed
+ * doing scatter-gather and we just stopped the EMAC so
+ * we can safely ignore the return values.
+ */
+ (int) XTemac_IntrSgCoalSet(&lp->Emac, XTE_RECV, RxThreshold,
+ RxWaitBound);
+ (int) XTemac_IntrSgCoalSet(&lp->Emac, XTE_SEND, TxThreshold,
+ TxWaitBound);
+
+ /* Enable both SEND and RECV interrupts */
+ XTemac_IntrSgEnable(&lp->Emac, XTE_SEND | XTE_RECV);
+ }
+ else { /* FIFO interrupt mode */
+ XTemac_IntrFifoEnable(&lp->Emac, XTE_RECV | XTE_SEND);
+ }
+
+ if (lp->deferred_skb) {
+ dev_kfree_skb_any(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ lp->stats.tx_errors++;
+ }
+
+ /*
+ * XTemac_Start returns an error when: if configured for
+ * scatter-gather DMA and a descriptor list has not yet been created
+ * for the send or receive channel, or if no receive buffer descriptors
+ * have been initialized. Those are not happening. so ignore the returned
+ * result checking.
+ */
+ (int) XTemac_Start(&lp->Emac);
+
+ /* We're all ready to go. Start the queue in case it was stopped. */
+ netif_wake_queue(dev);
+}
+
+/*
+ * The PHY registers read here should be standard registers in all PHY chips
+ */
+static int get_phy_status(struct net_device *dev, DUPLEX * duplex, int *linkup)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u16 reg;
+ int xs;
+
+ xs = XTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMCR, ®);
+ if (xs != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: could not read PHY control register; error %d\n",
+ dev->name, xs);
+ return -1;
+ }
+ *duplex = FULL_DUPLEX;
+
+ xs = XTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, ®);
+ if (xs != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: could not read PHY status register; error %d\n",
+ dev->name, xs);
+ return -1;
+ }
+ *linkup = (reg & BMSR_LSTATUS) != 0;
+
+ return 0;
+}
+
+/*
+ * This routine is used for two purposes. The first is to keep the
+ * EMAC's duplex setting in sync with the PHY's. The second is to keep
+ * the system apprised of the state of the link. Note that this driver
+ * does not configure the PHY. Either the PHY should be configured for
+ * auto-negotiation or it should be handled by something like mii-tool.
+ */
+static void poll_gmii(unsigned long data)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ DUPLEX phy_duplex;
+ int phy_carrier;
+ int netif_carrier;
+ unsigned long flags;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ dev = (struct net_device *) data;
+ lp = (struct net_local *) dev->priv;
+
+ /* First, find out what's going on with the PHY. */
+ if (get_phy_status(dev, &phy_duplex, &phy_carrier)) {
+ printk(KERN_ERR "%s: XTemac: terminating link monitoring.\n",
+ dev->name);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return;
+ }
+
+ netif_carrier = netif_carrier_ok(dev) != 0;
+
+ if (phy_carrier != netif_carrier) {
+ if (phy_carrier) {
+ printk(KERN_INFO
+ "%s: XTemac: PHY Link carrier restored.\n",
+ dev->name);
+ netif_carrier_on(dev);
+ }
+ else {
+ printk(KERN_INFO "%s: XTemac: PHY Link carrier lost.\n",
+ dev->name);
+ netif_carrier_off(dev);
+ }
+ }
+
+ /* Set up the timer so we'll get called again in 2 seconds. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ add_timer(&lp->phy_timer);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+}
+
+/*
+ * This routine is registered with the OS as the function to call when
+ * the TEMAC interrupts. It in turn, calls the Xilinx OS independent
+ * interrupt function. There are different interrupt functions for FIFO
+ * and scatter-gather so we just set a pointer (Isr) into our private
+ * data so we don't have to figure it out here. The Xilinx OS
+ * independent interrupt function will in turn call any callbacks that
+ * we have registered for various conditions.
+ */
+static irqreturn_t xenet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ /* Call it. */
+ (*(lp->Isr)) (&lp->Emac);
+
+ /* Right now, our IRQ handlers do not return a status. Let's always return
+ * IRQ_HANDLED here for now.
+ */
+ return IRQ_HANDLED;
+}
+
+static int xenet_open(struct net_device *dev)
+{
+ struct net_local *lp;
+ u32 Options;
+ unsigned long flags;
+
+ /*
+ * Just to be safe, stop TX queue and the device first. If the device is
+ * already stopped, an error will be returned. In this case, we don't
+ * really care.
+ */
+ netif_stop_queue(dev);
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ lp = (struct net_local *) dev->priv;
+ XTemac_Stop(&lp->Emac);
+
+ /* Set the MAC address each time opened. */
+ if (XTemac_SetMacAddress(&lp->Emac, dev->dev_addr) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: XTemac: could not set MAC address.\n",
+ dev->name);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return -EIO;
+ }
+
+ /*
+ * If the device is not configured for polled mode, connect to the
+ * interrupt controller and enable interrupts. Currently, there
+ * isn't any code to set polled mode, so this check is probably
+ * superfluous.
+ */
+ Options = XTemac_GetOptions(&lp->Emac);
+ Options &= ~XTE_SGEND_INT_OPTION;
+ Options &= ~XTE_REPORT_RXERR_OPTION;
+ Options |= XTE_FLOW_CONTROL_OPTION;
+ Options |= XTE_JUMBO_OPTION;
+#if XTE_AUTOSTRIPPING
+ Options |= XTE_FCS_STRIP_OPTION;
+#endif
+
+ (int) XTemac_SetOptions(&lp->Emac, Options);
+ (int) XTemac_ClearOptions(&lp->Emac, ~Options);
+ Options = XTemac_GetOptions(&lp->Emac);
+ printk(KERN_INFO "%s: XTemac: Options: 0x%x\n", dev->name, Options);
+
+ /* Register interrupt handler */
+ if ((Options & XTE_POLLED_OPTION) == 0) {
+ int retval;
+
+ /* Grab the IRQ */
+ retval = request_irq(dev->irq, &xenet_interrupt, 0, dev->name,
+ dev);
+ if (retval) {
+ printk(KERN_ERR
+ "%s: XTemac: could not allocate interrupt %d.\n",
+ dev->name, dev->irq);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return retval;
+ }
+ }
+
+ /* give the system enough time to establish a link */
+ mdelay(2000);
+
+ set_mac_speed(lp);
+
+ INIT_LIST_HEAD(&(lp->rcv));
+ INIT_LIST_HEAD(&(lp->xmit));
+
+ /* Enable interrupts if not in polled mode */
+ if ((Options & XTE_POLLED_OPTION) == 0) {
+ if (!XTemac_mIsSgDma(&lp->Emac)) { /*fifo direct interrupt driver mode */
+ XTemac_IntrFifoEnable(&lp->Emac, XTE_RECV | XTE_SEND);
+ }
+ else { /* SG DMA mode */
+ XTemac_IntrSgEnable(&lp->Emac, XTE_SEND | XTE_RECV);
+ }
+ }
+
+ /* Start TEMAC device */
+ if (XTemac_Start(&lp->Emac) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: XTemac: could not start device.\n",
+ dev->name);
+ free_irq(dev->irq, dev);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ if (XTemac_mIsSgDma(&lp->Emac)) {
+ u16 threshold_s, timer_s, threshold_r, timer_r;
+
+ (int) XTemac_IntrSgCoalGet(&lp->Emac, XTE_SEND, &threshold_s,
+ &timer_s);
+ (int) XTemac_IntrSgCoalGet(&lp->Emac, XTE_RECV, &threshold_r,
+ &timer_r);
+ printk(KERN_INFO
+ "%s: XTemac: Send Threshold = %d, Receive Threshold = %d\n",
+ dev->name, threshold_s, threshold_r);
+ printk(KERN_INFO
+ "%s: XTemac: Send Wait bound = %d, Receive Wait bound = %d\n",
+ dev->name, timer_s, timer_r);
+ }
+
+ /* We're ready to go. */
+ netif_start_queue(dev);
+
+ /* Set up the PHY monitoring timer. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ lp->phy_timer.data = (unsigned long) dev;
+ lp->phy_timer.function = &poll_gmii;
+ init_timer(&lp->phy_timer);
+ add_timer(&lp->phy_timer);
+ return 0;
+}
+
+static int xenet_close(struct net_device *dev)
+{
+ struct net_local *lp;
+ unsigned long flags, flags_reset;
+
+ spin_lock_irqsave(&XTE_spinlock, flags_reset);
+ lp = (struct net_local *) dev->priv;
+
+ /* Shut down the PHY monitoring timer. */
+ del_timer_sync(&lp->phy_timer);
+
+ /* Stop Send queue */
+ netif_stop_queue(dev);
+
+ /* Now we could stop the device */
+ XTemac_Stop(&lp->Emac);
+
+ /*
+ * If not in polled mode, free the interrupt. Currently, there
+ * isn't any code to set polled mode, so this check is probably
+ * superfluous.
+ */
+ if ((XTemac_GetOptions(&lp->Emac) & XTE_POLLED_OPTION) == 0)
+ free_irq(dev->irq, dev);
+
+ spin_unlock_irqrestore(&XTE_spinlock, flags_reset);
+
+ spin_lock_irqsave(&receivedQueueSpin, flags);
+ list_del(&(lp->rcv));
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+
+ spin_lock_irqsave(&sentQueueSpin, flags);
+ list_del(&(lp->xmit));
+ spin_unlock_irqrestore(&sentQueueSpin, flags);
+
+ return 0;
+}
+
+static struct net_device_stats *xenet_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ return &lp->stats;
+}
+
+static int xenet_change_mtu(struct net_device *dev, int new_mtu)
+{
+#ifdef CONFIG_XILINX_GIGE_VLAN
+ int head_size = XTE_HDR_VLAN_SIZE;
+#else
+ int head_size = XTE_HDR_SIZE;
+#endif
+ struct net_local *lp = (struct net_local *) dev->priv;
+ int max_frame = new_mtu + head_size + XTE_TRL_SIZE;
+ int min_frame = 1 + head_size + XTE_TRL_SIZE;
+
+ if ((max_frame < min_frame) || (max_frame > lp->max_frame_size))
+ return -EINVAL;
+
+ dev->mtu = new_mtu; /* change mtu in net_device structure */
+ return 0;
+}
+
+static int xenet_FifoSend(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp;
+ unsigned int len;
+ int result;
+ unsigned long flags, fifo_free_bytes;
+
+ /* The following lock is used to protect GetFreeBytes, FifoWrite
+ * and FifoSend sequence which could happen from FifoSendHandler
+ * or other processor in SMP case.
+ */
+ spin_lock_irqsave(&XTE_tx_spinlock, flags);
+ lp = (struct net_local *) dev->priv;
+ len = skb->len;
+
+ fifo_free_bytes = XTemac_FifoGetFreeBytes(&lp->Emac, XTE_SEND);
+ if (fifo_free_bytes < len) {
+ netif_stop_queue(dev); /* stop send queue */
+ lp->deferred_skb = skb; /* buffer the sk_buffer and will send
+ it in interrupt context */
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+ return 0;
+ }
+
+ /* Write frame data to FIFO */
+ result = XTemac_FifoWrite(&lp->Emac, (void *) skb->data, len,
+ XTE_END_OF_PACKET);
+ if (result != XST_SUCCESS) {
+ reset(dev, __LINE__);
+ lp->stats.tx_errors++;
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+ return -EIO;
+ }
+
+ /* Initiate transmit */
+ if ((result = XTemac_FifoSend(&lp->Emac, len)) != XST_SUCCESS) {
+ reset(dev, __LINE__);
+ lp->stats.tx_errors++;
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+ return -EIO;
+ }
+ lp->stats.tx_bytes += len;
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+
+ dev_kfree_skb(skb); /* free skb */
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+/* Callback function for completed frames sent in FIFO interrupt driven mode */
+static void FifoSendHandler(void *CallbackRef)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ int result;
+ struct sk_buff *skb;
+
+ spin_lock(&XTE_tx_spinlock);
+ dev = (struct net_device *) CallbackRef;
+ lp = (struct net_local *) dev->priv;
+ lp->stats.tx_packets++;
+
+ /*Send out the deferred skb and wake up send queue if a deferred skb exists */
+ if (lp->deferred_skb) {
+
+ skb = lp->deferred_skb;
+ /* If no room for the deferred packet, return */
+ if (XTemac_FifoGetFreeBytes(&lp->Emac, XTE_SEND) < skb->len) {
+ spin_unlock(&XTE_tx_spinlock);
+ return;
+ }
+
+ /* Write frame data to FIFO */
+ result = XTemac_FifoWrite(&lp->Emac, (void *) skb->data,
+ skb->len, XTE_END_OF_PACKET);
+ if (result != XST_SUCCESS) {
+ reset(dev, __LINE__);
+ lp->stats.tx_errors++;
+ spin_unlock(&XTE_tx_spinlock);
+ return;
+ }
+
+ /* Initiate transmit */
+ if ((result =
+ XTemac_FifoSend(&lp->Emac, skb->len)) != XST_SUCCESS) {
+ reset(dev, __LINE__);
+ lp->stats.tx_errors++;
+ spin_unlock(&XTE_tx_spinlock);
+ return;
+ }
+
+ dev_kfree_skb_irq(skb);
+ lp->deferred_skb = NULL;
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += skb->len;
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev); /* wake up send queue */
+ }
+ spin_unlock(&XTE_tx_spinlock);
+}
+
+#if 0
+/*
+ * These are used for debugging purposes, left here in case they are useful
+ * for further debugging
+ */
+static unsigned int _xenet_tx_csum(struct sk_buff *skb)
+{
+ unsigned int csum = 0;
+ long csstart = skb->h.raw - skb->data;
+
+ if (csstart != skb->len) {
+ csum = skb_checksum(skb, csstart, skb->len - csstart, 0);
+ }
+
+ return csum;
+}
+
+static inline unsigned int _xenet_rx_csum(struct sk_buff *skb)
+{
+ return skb_checksum(skb, 0, skb->len, 0);
+}
+#endif
+
+/*
+ * xenet_SgSend_internal is an internal use, send routine.
+ * Any locks that need to be acquired, should be acquired
+ * prior to calling this routine.
+ */
+static int xenet_SgSend_internal(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp;
+ XDmaBdV3 *bd_ptr;
+ int result;
+ int total_frags;
+ int i;
+ void *virt_addr;
+ size_t len;
+ dma_addr_t phy_addr;
+ XDmaBdV3 *first_bd_ptr;
+ skb_frag_t *frag;
+
+ lp = (struct net_local *) dev->priv;
+
+ /* get skb_shinfo(skb)->nr_frags + 1 buffer descriptors */
+ total_frags = skb_shinfo(skb)->nr_frags + 1;
+
+ /* stats */
+ if (lp->max_frags_in_a_packet < total_frags) {
+ lp->max_frags_in_a_packet = total_frags;
+ }
+
+ if (total_frags < XTE_SEND_BD_CNT) {
+ result = XTemac_SgAlloc(&lp->Emac, XTE_SEND, total_frags,
+ &bd_ptr);
+
+ if (result != XST_SUCCESS) {
+ netif_stop_queue(dev); /* stop send queue */
+ lp->deferred_skb = skb; /* buffer the sk_buffer and will send
+ it in interrupt context */
+ return result;
+ }
+ }
+ else {
+ dev_kfree_skb(skb);
+ lp->stats.tx_dropped++;
+ printk(KERN_ERR
+ "%s: XTemac: could not send TX socket buffers (too many fragments).\n",
+ dev->name);
+ return XST_FAILURE;
+ }
+
+ len = skb_headlen(skb);
+
+ /* get the physical address of the header */
+ phy_addr = (u32) dma_map_single(NULL, skb->data, len, DMA_TO_DEVICE);
+
+ /* get the header fragment, it's in the skb differently */
+ XDmaBdV3_mSetBufAddrLow(bd_ptr, phy_addr);
+ XDmaBdV3_mSetLength(bd_ptr, len);
+ XDmaBdV3_mSetId(bd_ptr, skb);
+ XDmaBdV3_mClearLast(bd_ptr);
+
+ /*
+ * if tx checksum offloading is enabled, when the ethernet stack
+ * wants us to perform the checksum in hardware,
+ * skb->ip_summed is CHECKSUM_PARTIAL. Otherwise skb->ip_summed is
+ * CHECKSUM_NONE, meaning the checksum is already done, or
+ * CHECKSUM_UNNECESSARY, meaning checksumming is turned off (e.g.
+ * loopback interface)
+ *
+ * skb->csum is an overloaded value. On send, skb->csum is the offset
+ * into the buffer (skb->h.raw) to place the csum value. On receive
+ * this feild gets set to the actual csum value, before it's passed up
+ * the stack.
+ *
+ * When we get here, the ethernet stack above will have already
+ * computed the pseudoheader csum value and have placed it in the
+ * TCP/UDP header.
+ *
+ * The IP header csum has also already been computed and inserted.
+ *
+ * Since the IP header with it's own csum should compute to a null
+ * csum, it should be ok to include it in the hw csum. If it is decided
+ * to change this scheme, skb should be examined before dma_map_single()
+ * is called, which flushes the page from the cpu's cache.
+ *
+ * skb->data points to the beginning of the whole packet
+ * skb->h.raw points to the beginning of the ip header
+ *
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+#if 0
+ {
+ unsigned int csum = _xenet_tx_csum(skb);
+
+ *((unsigned short *) (skb->h.raw + skb->csum)) =
+ csum_fold(csum);
+ XTemac_mSgSendBdCsumDisable(bd_ptr);
+ }
+#else
+ XTemac_mSgSendBdCsumEnable(bd_ptr);
+ XTemac_mSgSendBdCsumSetup(bd_ptr,
+ skb->transport_header - skb->data,
+ (skb->transport_header - skb->data) +
+ skb->csum);
+#endif
+ lp->tx_hw_csums++;
+ }
+ else {
+ /*
+ * This routine will do no harm even if hardware checksum capability is
+ * off.
+ */
+ XTemac_mSgSendBdCsumDisable(bd_ptr);
+ }
+
+ first_bd_ptr = bd_ptr;
+
+ frag = &skb_shinfo(skb)->frags[0];
+
+ for (i = 1; i < total_frags; i++, frag++) {
+ bd_ptr = XTemac_mSgSendBdNext(&lp->Emac, bd_ptr);
+
+ virt_addr =
+ (void *) page_address(frag->page) + frag->page_offset;
+ phy_addr =
+ (u32) dma_map_single(NULL, virt_addr, frag->size,
+ DMA_TO_DEVICE);
+
+ XDmaBdV3_mSetBufAddrLow(bd_ptr, phy_addr);
+ XDmaBdV3_mSetLength(bd_ptr, frag->size);
+ XDmaBdV3_mSetId(bd_ptr, NULL);
+
+ if (i < (total_frags - 1)) {
+ XDmaBdV3_mClearLast(bd_ptr);
+ }
+ }
+
+ XDmaBdV3_mSetLast(bd_ptr);
+
+ /* Enqueue to HW */
+ result = XTemac_SgCommit(&lp->Emac, XTE_SEND, total_frags,
+ first_bd_ptr);
+ if (result != XST_SUCCESS) {
+ netif_stop_queue(dev); /* stop send queue */
+ dev_kfree_skb(skb);
+ XDmaBdV3_mSetId(first_bd_ptr, NULL);
+ lp->stats.tx_dropped++;
+ printk(KERN_ERR
+ "%s: XTemac: could not send commit TX buffer descriptor (%d).\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+
+ return XST_FAILURE;
+ }
+
+ dev->trans_start = jiffies;
+
+ return XST_SUCCESS;
+}
+
+/* The send function for frames sent in SGDMA mode and TEMAC has TX DRE. */
+static int xenet_SgSend(struct sk_buff *skb, struct net_device *dev)
+{
+ /* The following spin_lock protects
+ * SgAlloc, SgCommit sequence, which also exists in SgSendHandlerBH Bottom
+ * Half, or triggered by other processor in SMP case.
+ */
+ spin_lock_bh(&XTE_tx_spinlock);
+
+ xenet_SgSend_internal(skb, dev);
+
+ spin_unlock_bh(&XTE_tx_spinlock);
+
+ return 0;
+}
+
+
+/* The send function for frames sent in SGDMA mode (and no TX DRE is in TEMAC). */
+static int xenet_SgSend_NoDRE(struct sk_buff *skb, struct net_device *dev)
+{
+ int result;
+
+ void *tx_addr;
+ void *cur_addr;
+ dma_addr_t phy_addr;
+ size_t len;
+
+ XDmaBdV3 *bd_ptr;
+ skb_frag_t *frag;
+ int nr_frags;
+ int total_frags;
+ int i;
+
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ /* Without the DRE hardware engine, DMA transfers must be double word
+ * aligned (8 bytes), front and back. If there are no fragments, and the
+ * main chunk is aligned at the front, let the regular, SgSend handle it.
+ * Otherwise, just go ahead and copy the whole darn thing to the tx ring
+ * buffer before sending it out.
+ *
+ * For better performance the tx rign buffer alignment set in
+ * ALIGNMENT_SEND can be set to 32 which is cache line aligned, on the
+ * PPC405 and PPC440.
+ */
+ if (!skb_is_nonlinear(skb) && (0 == BUFFER_ALIGNSEND(skb->data))) {
+ /* buffer is linear and already aligned nicely. We can send it using
+ * xenet_SgSend(). Done.
+ */
+ return xenet_SgSend(skb, dev);
+ }
+
+ /* The buffer is either nonlinear or not aligned. We have to copy it.
+ */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ total_frags = nr_frags + 1;
+
+ /* stats */
+ lp->realignments++;
+ if (lp->max_frags_in_a_packet < total_frags) {
+ lp->max_frags_in_a_packet = total_frags;
+ }
+
+ /* Copy the skb. Get the address of the next buffer in the ring. Also,
+ * remember the physical address of that buffer for the DMA setup.
+ */
+ cur_addr = lp->tx_buffers[lp->tx_buffers_cur];
+ phy_addr = lp->tx_phys_buffers[lp->tx_buffers_cur];
+
+ /* set up tx_buffers_cur for the next use */
+ lp->tx_buffers_cur++;
+ if (lp->tx_buffers_cur >= XTE_SEND_BD_CNT) {
+ lp->tx_buffers_cur = 0;
+ }
+
+ tx_addr = cur_addr;
+
+ len = skb_headlen(skb);
+
+ cacheable_memcpy(cur_addr, skb->data, len);
+ cur_addr += len;
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (i = 1; i < nr_frags; i++, frag++) {
+ void *p = (void *) page_address(frag->page) + frag->page_offset;
+
+ len = frag->size;
+ cacheable_memcpy(cur_addr, p, len);
+ cur_addr += len;
+ }
+
+ /*
+ * set up the transfer
+ */
+ result = XTemac_SgAlloc(&lp->Emac, XTE_SEND, 1, &bd_ptr);
+
+ if (result != XST_SUCCESS) {
+ netif_stop_queue(dev); /* stop send queue */
+ lp->deferred_skb = skb; /* buffer the sk_buffer and will send
+ it in interrupt context */
+ return result;
+ }
+
+ /* get the header fragment, it's in the skb differently */
+ XDmaBdV3_mSetBufAddrLow(bd_ptr, phy_addr);
+ XDmaBdV3_mSetLength(bd_ptr, len);
+ XDmaBdV3_mSetId(bd_ptr, skb);
+ XDmaBdV3_mClearLast(bd_ptr);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /*
+ * skb->data points to the beginning of the whole packet
+ * skb->h.raw points to the beginning of the ip header
+ * skb->csum, on send, is the offset into the buffer (skb->h.raw)
+ * to place the csum value.
+ * tx_addr is the address where the data is really copied (for
+ * alignment)
+ */
+ XTemac_mSgSendBdCsumEnable(bd_ptr);
+
+ XTemac_mSgSendBdCsumSetup(bd_ptr,
+ (u32) (tx_addr +
+ (skb->transport_header -
+ skb->data)),
+ (u32) (tx_addr +
+ (skb->transport_header -
+ skb->data) + skb->csum));
+ lp->tx_hw_csums++;
+ }
+ else {
+ /*
+ * This routine will do no harm even if hardware checksum capability is
+ * off.
+ */
+ XTemac_mSgSendBdCsumDisable(bd_ptr);
+ }
+ XDmaBdV3_mSetLast(bd_ptr);
+
+ /* Enqueue to HW */
+ result = XTemac_SgCommit(&lp->Emac, XTE_SEND, total_frags, bd_ptr);
+ if (result != XST_SUCCESS) {
+ netif_stop_queue(dev); /* stop send queue */
+ dev_kfree_skb(skb);
+ XDmaBdV3_mSetId(bd_ptr, NULL);
+ lp->stats.tx_dropped++;
+ printk(KERN_ERR
+ "%s: XTemac: could not send commit TX buffer descriptor (%d).\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+
+ return XST_FAILURE;
+ }
+
+ dev->trans_start = jiffies;
+
+ return XST_SUCCESS;
+}
+
+/* The callback function for completed frames sent in SGDMA mode. */
+static void SgSendHandlerBH(unsigned long p);
+static void SgRecvHandlerBH(unsigned long p);
+
+static DECLARE_TASKLET(SgSendBH, SgSendHandlerBH, 0);
+static DECLARE_TASKLET(SgRecvBH, SgRecvHandlerBH, 0);
+
+static void SgSendHandlerBH(unsigned long p)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ XDmaBdV3 *BdPtr, *BdCurPtr;
+ unsigned long len;
+ unsigned long flags;
+ struct sk_buff *skb;
+ dma_addr_t skb_dma_addr;
+ int result = XST_SUCCESS;
+ unsigned int bd_processed, bd_processed_save;
+
+ while (1) {
+ spin_lock_irqsave(&sentQueueSpin, flags);
+ if (list_empty(&sentQueue)) {
+ spin_unlock_irqrestore(&sentQueueSpin, flags);
+ break;
+ }
+
+ lp = list_entry(sentQueue.next, struct net_local, xmit);
+
+ list_del_init(&(lp->xmit));
+ spin_unlock_irqrestore(&sentQueueSpin, flags);
+
+ spin_lock(&XTE_tx_spinlock);
+ dev = lp->ndev;
+ bd_processed_save = 0;
+ while ((bd_processed =
+ XTemac_SgGetProcessed(&lp->Emac, XTE_SEND,
+ XTE_SEND_BD_CNT, &BdPtr)) > 0) {
+
+ bd_processed_save = bd_processed;
+ BdCurPtr = BdPtr;
+ do {
+ len = XDmaBdV3_mGetLength(BdCurPtr);
+ skb_dma_addr =
+ (dma_addr_t)
+ XDmaBdV3_mGetBufAddrLow(BdCurPtr);
+ dma_unmap_single(NULL, skb_dma_addr, len,
+ DMA_TO_DEVICE);
+
+ /* get ptr to skb */
+ skb = (struct sk_buff *)
+ XDmaBdV3_mGetId(BdCurPtr);
+ if (skb)
+ dev_kfree_skb(skb);
+
+ /* reset BD id */
+ XDmaBdV3_mSetId(BdCurPtr, NULL);
+
+ lp->stats.tx_bytes += len;
+ if (XDmaBdV3_mSetLast(&BdCurPtr)) {
+ lp->stats.tx_packets++;
+ }
+
+ BdCurPtr =
+ XTemac_mSgSendBdNext(&lp->Emac,
+ BdCurPtr);
+ bd_processed--;
+ } while (bd_processed > 0);
+
+ result = XTemac_SgFree(&lp->Emac, XTE_SEND,
+ bd_processed_save, BdPtr);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: SgFree() error %d.\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+ spin_unlock(&XTE_tx_spinlock);
+ return;
+ }
+ }
+ XTemac_IntrSgEnable(&lp->Emac, XTE_SEND);
+
+ /* Send out the deferred skb if it exists */
+ if ((lp->deferred_skb) && bd_processed_save) {
+ skb = lp->deferred_skb;
+ lp->deferred_skb = NULL;
+
+ result = xenet_SgSend_internal(skb, dev);
+ }
+
+ if (result == XST_SUCCESS) {
+ netif_wake_queue(dev); /* wake up send queue */
+ }
+ spin_unlock(&XTE_tx_spinlock);
+ }
+}
+
+static void SgSendHandler(void *CallBackRef)
+{
+ struct net_local *lp;
+ struct list_head *cur_lp;
+
+ spin_lock(&sentQueueSpin);
+
+ lp = (struct net_local *) CallBackRef;
+ list_for_each(cur_lp, &sentQueue) {
+ if (cur_lp == &(lp->xmit)) {
+ break;
+ }
+ }
+ if (cur_lp != &(lp->xmit)) {
+ list_add_tail(&lp->xmit, &sentQueue);
+ XTemac_IntrSgDisable(&lp->Emac, XTE_SEND);
+ tasklet_schedule(&SgSendBH);
+ }
+ spin_unlock(&sentQueueSpin);
+}
+
+static void xenet_tx_timeout(struct net_device *dev)
+{
+ struct net_local *lp;
+ unsigned long flags;
+
+ /*
+ * Make sure that no interrupts come in that could cause reentrancy
+ * problems in reset.
+ */
+ spin_lock_irqsave(&XTE_tx_spinlock, flags);
+
+ lp = (struct net_local *) dev->priv;
+ printk(KERN_ERR
+ "%s: XTemac: exceeded transmit timeout of %lu ms. Resetting emac.\n",
+ dev->name, TX_TIMEOUT * 1000UL / HZ);
+ lp->stats.tx_errors++;
+
+ reset(dev, __LINE__);
+
+ spin_unlock_irqrestore(&XTE_tx_spinlock, flags);
+}
+
+/* The callback function for frames received when in FIFO mode. */
+static void FifoRecvHandler(void *CallbackRef)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ struct sk_buff *skb;
+ u32 len;
+ int Result;
+
+#define XTE_RX_SINK_BUFFER_SIZE 1024
+ static u32 rx_buffer_sink[XTE_RX_SINK_BUFFER_SIZE / sizeof(u32)];
+
+ spin_lock(&XTE_rx_spinlock);
+ dev = (struct net_device *) CallbackRef;
+ lp = (struct net_local *) dev->priv;
+
+ Result = XTemac_FifoRecv(&lp->Emac, &len);
+ if (Result != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: could not read received packet length, error=%d.\n",
+ dev->name, Result);
+ lp->stats.rx_errors++;
+ reset(dev, __LINE__);
+ spin_unlock(&XTE_rx_spinlock);
+ return;
+ }
+
+ if (!(skb = /*dev_ */ alloc_skb(len + ALIGNMENT_RECV, GFP_ATOMIC))) {
+ /* Couldn't get memory. */
+ lp->stats.rx_dropped++;
+ printk(KERN_ERR
+ "%s: XTemac: could not allocate receive buffer.\n",
+ dev->name);
+
+ /* consume data in Xilinx TEMAC RX data fifo so it is sync with RX length fifo */
+ for (; len > XTE_RX_SINK_BUFFER_SIZE;
+ len -= XTE_RX_SINK_BUFFER_SIZE) {
+ XTemac_FifoRead(&lp->Emac, rx_buffer_sink,
+ XTE_RX_SINK_BUFFER_SIZE,
+ XTE_PARTIAL_PACKET);
+ }
+ XTemac_FifoRead(&lp->Emac, rx_buffer_sink, len,
+ XTE_END_OF_PACKET);
+
+ spin_unlock(&XTE_rx_spinlock);
+ return;
+ }
+
+ /* Read the packet data */
+ Result = XTemac_FifoRead(&lp->Emac, skb->data, len, XTE_END_OF_PACKET);
+ if (Result != XST_SUCCESS) {
+ lp->stats.rx_errors++;
+ dev_kfree_skb_irq(skb);
+ printk(KERN_ERR
+ "%s: XTemac: could not receive buffer, error=%d.\n",
+ dev->name, Result);
+ reset(dev, __LINE__);
+ spin_unlock(&XTE_rx_spinlock);
+ return;
+ }
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+ spin_unlock(&XTE_rx_spinlock);
+
+ skb_put(skb, len); /* Tell the skb how much data we got. */
+ skb->dev = dev; /* Fill out required meta-data. */
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+ netif_rx(skb); /* Send the packet upstream. */
+}
+
+
+/*
+ * _xenet_SgSetupRecvBuffers allocates as many socket buffers (sk_buff's) as it
+ * can up to the number of free RX buffer descriptors. Then it sets up the RX
+ * buffer descriptors to DMA into the socket_buffers.
+ *
+ * The net_device, dev, indcates on which device to operate for buffer
+ * descriptor allocation.
+ */
+static void _xenet_SgSetupRecvBuffers(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ int free_bd_count = XDmaV3_mSgGetFreeCnt(&(lp->Emac.RecvDma));
+ int num_sk_buffs;
+ struct sk_buff_head sk_buff_list;
+ struct sk_buff *new_skb;
+ u32 new_skb_baddr;
+ XDmaBdV3 *BdPtr, *BdCurPtr;
+ u32 align;
+ int result;
+ int align_max = ALIGNMENT_RECV;
+
+ if (lp->local_features & LOCAL_FEATURE_RX_DRE) {
+ align_max = 0;
+ }
+
+ skb_queue_head_init(&sk_buff_list);
+ for (num_sk_buffs = 0; num_sk_buffs < free_bd_count; num_sk_buffs++) {
+ new_skb = alloc_skb(lp->max_frame_size + align_max, GFP_ATOMIC);
+ if (new_skb == NULL) {
+ break;
+ }
+ /*
+ * I think the XTE_spinlock, and Recv DMA int disabled will protect this
+ * list as well, so we can use the __ version just fine
+ */
+ __skb_queue_tail(&sk_buff_list, new_skb);
+ }
+ if (!num_sk_buffs) {
+ printk(KERN_ERR "%s: XTemac: alloc_skb unsuccessful\n",
+ dev->name);
+ return;
+ }
+
+ /* now we got a bunch o' sk_buffs */
+ result = XTemac_SgAlloc(&lp->Emac, XTE_RECV, num_sk_buffs, &BdPtr);
+ if (result != XST_SUCCESS) {
+ /* we really shouldn't get this */
+ skb_queue_purge(&sk_buff_list);
+ printk(KERN_ERR "%s: XTemac: SgAlloc unsuccessful (%d)\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+ return;
+ }
+
+ BdCurPtr = BdPtr;
+
+ new_skb = skb_dequeue(&sk_buff_list);
+ while (new_skb) {
+ /* make sure we're long-word aligned */
+ if (lp->local_features & LOCAL_FEATURE_RX_DRE) {
+ align = BUFFER_ALIGNRECV(new_skb->data);
+ if (align) {
+ skb_reserve(new_skb, align);
+ }
+ }
+
+ /* Get dma handle of skb->data */
+ new_skb_baddr = (u32) dma_map_single(NULL, new_skb->data,
+ lp->max_frame_size,
+ DMA_FROM_DEVICE);
+
+ XDmaBdV3_mSetBufAddrLow(BdCurPtr, new_skb_baddr);
+ XDmaBdV3_mSetLength(BdCurPtr, lp->max_frame_size);
+ XDmaBdV3_mSetId(BdCurPtr, new_skb);
+
+ BdCurPtr = XTemac_mSgRecvBdNext(&lp->Emac, BdCurPtr);
+
+ new_skb = skb_dequeue(&sk_buff_list);
+ }
+
+ /* enqueue RxBD with the attached skb buffers such that it is
+ * ready for frame reception */
+ result = XTemac_SgCommit(&lp->Emac, XTE_RECV, num_sk_buffs, BdPtr);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: (SgSetupRecvBuffers) XTemac_SgCommit unsuccessful (%d)\n",
+ dev->name, result);
+ skb_queue_purge(&sk_buff_list);
+ BdCurPtr = BdPtr;
+ while (num_sk_buffs > 0) {
+ XDmaBdV3_mSetId(BdCurPtr, NULL);
+ BdCurPtr = XTemac_mSgRecvBdNext(&lp->Emac, BdCurPtr);
+ num_sk_buffs--;
+ }
+ reset(dev, __LINE__);
+ return;
+ }
+}
+
+static void SgRecvHandlerBH(unsigned long p)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ struct sk_buff *skb;
+ u32 len, skb_baddr;
+ int result;
+ unsigned long flags;
+ XDmaBdV3 *BdPtr, *BdCurPtr;
+ unsigned int bd_processed, bd_processed_saved;
+
+ while (1) {
+ spin_lock_irqsave(&receivedQueueSpin, flags);
+ if (list_empty(&receivedQueue)) {
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+ break;
+ }
+ lp = list_entry(receivedQueue.next, struct net_local, rcv);
+
+ list_del_init(&(lp->rcv));
+ dev = lp->ndev;
+ spin_unlock_irqrestore(&receivedQueueSpin, flags);
+
+ spin_lock(&XTE_rx_spinlock);
+ if ((bd_processed =
+ XTemac_SgGetProcessed(&lp->Emac, XTE_RECV, XTE_RECV_BD_CNT,
+ &BdPtr)) > 0) {
+
+ bd_processed_saved = bd_processed;
+ BdCurPtr = BdPtr;
+ do {
+ len = XDmaBdV3_mGetLength(BdCurPtr);
+
+ /* get ptr to skb */
+ skb = (struct sk_buff *)
+ XDmaBdV3_mGetId(BdCurPtr);
+
+ /* get and free up dma handle used by skb->data */
+ skb_baddr =
+ (dma_addr_t)
+ XDmaBdV3_mGetBufAddrLow(BdCurPtr);
+ dma_unmap_single(NULL, skb_baddr,
+ lp->max_frame_size,
+ DMA_FROM_DEVICE);
+
+ /* reset ID */
+ XDmaBdV3_mSetId(BdCurPtr, NULL);
+
+ /* setup received skb and send it upstream */
+ skb_put(skb, len); /* Tell the skb how much data we got. */
+ skb->dev = dev;
+
+ /* this routine adjusts skb->data to skip the header */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* default the ip_summed value */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* if we're doing rx csum offload, set it up */
+ if (((lp->
+ local_features & LOCAL_FEATURE_RX_CSUM) !=
+ 0) &&
+ (skb->protocol ==
+ __constant_htons(ETH_P_IP)) &&
+ (skb->len > 64)) {
+ unsigned int csum;
+
+ /*
+ * This hardware only supports proper checksum calculations
+ * on TCP/UDP packets.
+ *
+ * skb->csum is an overloaded value. On send, skb->csum is
+ * the offset into the buffer (skb->h.raw) to place the
+ * csum value. On receive this feild gets set to the actual
+ * csum value, before it's passed up the stack.
+ *
+ * If we set skb->ip_summed to CHECKSUM_PARTIAL, the ethernet
+ * stack above will compute the pseudoheader csum value and
+ * add it to the partial checksum already computed (to be
+ * placed in skb->csum) and verify it.
+ *
+ * Setting skb->ip_summed to CHECKSUM_NONE means that the
+ * cheksum didn't verify and the stack will (re)check it.
+ *
+ * Setting skb->ip_summed to CHECKSUM_UNNECESSARY means
+ * that the cheksum was verified/assumed to be good and the
+ * stack does not need to (re)check it.
+ *
+ * The ethernet stack above will (re)compute the checksum
+ * under the following conditions:
+ * 1) skb->ip_summed was set to CHECKSUM_NONE
+ * 2) skb->len does not match the length of the ethernet
+ * packet determined by parsing the packet. In this case
+ * the ethernet stack will assume any prior checksum
+ * value was miscomputed and throw it away.
+ * 3) skb->ip_summed was set to CHECKSUM_PARTIAL, skb->csum was
+ * set, but the result does not check out ok by the
+ * ethernet stack.
+ *
+ * If the TEMAC hardware stripping feature is off, each
+ * packet will contain an FCS feild which will have been
+ * computed by the hardware checksum operation. This 4 byte
+ * FCS value needs to be subtracted back out of the checksum
+ * value computed by hardware as it's not included in a
+ * normal ethernet packet checksum.
+ *
+ * The minimum transfer packet size over the wire is 64
+ * bytes. If the packet is sent as exactly 64 bytes, then
+ * it probably contains some random padding bytes. It's
+ * somewhat difficult to determine the actual length of the
+ * real packet data, so we just let the stack recheck the
+ * checksum for us.
+ *
+ * After the call to eth_type_trans(), the following holds
+ * true:
+ * skb->data points to the beginning of the ip header
+ */
+ csum = XTemac_mSgRecvBdCsumGet
+ (BdCurPtr);
+
+#if ! XTE_AUTOSTRIPPING
+ if (!lp->stripping) {
+ /* take off the FCS */
+ u16 *data;
+
+ /* FCS is 4 bytes */
+ skb_put(skb, -4);
+
+ data = (u16 *) (&skb->
+ data[skb->len]);
+
+ /* subtract out the FCS from the csum value */
+ csum = csum_sub(csum,
+ *data
+ /* & 0xffff */
+ );
+ data++;
+ csum = csum_sub(csum,
+ *data
+ /* & 0xffff */
+ );
+ }
+#endif
+ skb->csum = csum;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ lp->rx_hw_csums++;
+ }
+
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+ netif_rx(skb); /* Send the packet upstream. */
+
+ BdCurPtr =
+ XTemac_mSgRecvBdNext(&lp->Emac,
+ BdCurPtr);
+ bd_processed--;
+ } while (bd_processed > 0);
+
+
+ /* give the descriptor back to the driver */
+ result = XTemac_SgFree(&lp->Emac, XTE_RECV,
+ bd_processed_saved, BdPtr);
+ if (result != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: SgFree unsuccessful (%d)\n",
+ dev->name, result);
+ reset(dev, __LINE__);
+ spin_unlock(&XTE_rx_spinlock);
+ return;
+ }
+
+ _xenet_SgSetupRecvBuffers(dev);
+ }
+ XTemac_IntrSgEnable(&lp->Emac, XTE_RECV);
+ spin_unlock(&XTE_rx_spinlock);
+ }
+}
+
+static void SgRecvHandler(void *CallBackRef)
+{
+ struct net_local *lp;
+ struct list_head *cur_lp;
+
+ spin_lock(&receivedQueueSpin);
+ lp = (struct net_local *) CallBackRef;
+ list_for_each(cur_lp, &receivedQueue) {
+ if (cur_lp == &(lp->rcv)) {
+ break;
+ }
+ }
+ if (cur_lp != &(lp->rcv)) {
+ list_add_tail(&lp->rcv, &receivedQueue);
+ XTemac_IntrSgDisable(&lp->Emac, XTE_RECV);
+ tasklet_schedule(&SgRecvBH);
+ }
+ spin_unlock(&receivedQueueSpin);
+}
+
+/* The callback function for errors. */
+static void ErrorHandler(void *CallbackRef, int ErrClass, u32 Word1, u32 Word2)
+{
+ struct net_device *dev;
+ struct net_local *lp;
+ int need_reset;
+
+ spin_lock(&XTE_spinlock);
+ dev = (struct net_device *) CallbackRef;
+ lp = (struct net_local *) dev->priv;
+
+ need_reset = status_requires_reset(ErrClass);
+ printk(KERN_ERR "%s: XTemac device error %d (%d, %d) %s\n",
+ dev->name, ErrClass, Word1, Word2,
+ need_reset ? ", resetting device." : "");
+
+ if (need_reset)
+ reset(dev, __LINE__);
+
+ spin_unlock(&XTE_spinlock);
+}
+
+static int descriptor_init(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ int recvsize, sendsize;
+ int dftsize;
+ u32 *recvpoolptr, *sendpoolptr;
+ void *recvpoolphy, *sendpoolphy;
+ int result;
+ XDmaBdV3 bd_tx_template;
+ XDmaBdV3 bd_rx_template;
+ int XferType = XDMAV3_DMACR_TYPE_BFBURST_MASK;
+ int XferWidth = XDMAV3_DMACR_DSIZE_64_MASK;
+
+ /* calc size of descriptor space pool; alloc from non-cached memory */
+ dftsize =
+ XDmaV3_mSgListMemCalc(ALIGNMENT_BD,
+ XTE_RECV_BD_CNT + XTE_SEND_BD_CNT);
+ printk(KERN_INFO "XTemac: buffer descriptor size: %d (0x%0x)\n",
+ dftsize, dftsize);
+
+#if BD_IN_BRAM == 0
+ lp->desc_space = dma_alloc_coherent(NULL, dftsize,
+ &lp->desc_space_handle, GFP_KERNEL);
+#else
+ lp->desc_space_handle = BRAM_BASEADDR;
+ lp->desc_space = ioremap(lp->desc_space_handle, dftsize);
+#endif
+ if (lp->desc_space == 0) {
+ return -1;
+ }
+
+ lp->desc_space_size = dftsize;
+
+ printk(KERN_INFO
+ "XTemac: (buffer_descriptor_init) phy: 0x%x, virt: 0x%x, size: 0x%x\n",
+ lp->desc_space_handle, (unsigned int) lp->desc_space,
+ lp->desc_space_size);
+
+ /* calc size of send and recv descriptor space */
+ recvsize = XDmaV3_mSgListMemCalc(ALIGNMENT_BD, XTE_RECV_BD_CNT);
+ sendsize = XDmaV3_mSgListMemCalc(ALIGNMENT_BD, XTE_SEND_BD_CNT);
+
+ recvpoolptr = lp->desc_space;
+ sendpoolptr = (void *) ((u32) lp->desc_space + recvsize);
+
+ recvpoolphy = (void *) lp->desc_space_handle;
+ sendpoolphy = (void *) ((u32) lp->desc_space_handle + recvsize);
+
+ /* set up descriptor spaces using a template */
+
+ /* rx template */
+ /*
+ * Create the ring for Rx descriptors.
+ * The following attributes will be in effect for all RxBDs
+ */
+ XDmaBdV3_mClear(&bd_rx_template);
+ XDmaBdV3_mSetLast(&bd_rx_template); /* 1:1 mapping of BDs to buffers */
+ XDmaBdV3_mSetBufIncrement(&bd_rx_template); /* Buffers exist along incrementing
+ addresses */
+ XDmaBdV3_mSetBdPage(&bd_rx_template, 0); /* Default to 32 bit addressing */
+ XDmaBdV3_mSetBufAddrHigh(&bd_rx_template, 0); /* Default to 32 bit addressing */
+ XDmaBdV3_mSetDevSel(&bd_rx_template, 0); /* Always 0 */
+ XDmaBdV3_mSetTransferType(&bd_rx_template, XferType, XferWidth); /* Data bus
+ attributes */
+
+
+ /* tx template */
+ /*
+ * Create the ring for Tx descriptors. If no Tx DRE then buffers must occupy
+ * a single descriptor, so set the "last" field for all descriptors.
+ */
+ XDmaBdV3_mClear(&bd_tx_template);
+ XDmaBdV3_mUseDre(&bd_tx_template); /* Always use DRE if available */
+ XDmaBdV3_mSetBufIncrement(&bd_tx_template); /* Buffers exist along incrementing
+ addresses */
+ XDmaBdV3_mSetBdPage(&bd_tx_template, 0); /* Default to 32 bit addressing */
+ XDmaBdV3_mSetBufAddrHigh(&bd_tx_template, 0); /* Default to 32 bit addressing */
+ XDmaBdV3_mSetDevSel(&bd_tx_template, 0); /* Always 0 */
+ XDmaBdV3_mSetTransferType(&bd_tx_template, XferType, XferWidth); /* Data bus
+ attributes */
+ XTemac_mSgSendBdCsumDisable(&bd_tx_template); /* Disable csum offload by default */
+ XTemac_mSgSendBdCsumSeed(&bd_tx_template, 0); /* Don't need csum seed feature */
+
+ if (XTemac_mIsTxDre(&lp->Emac) == FALSE) {
+ XDmaBdV3_mSetLast(&bd_tx_template);
+ }
+
+ if ((result = XTemac_SgSetSpace(&lp->Emac, XTE_RECV, (u32) recvpoolphy,
+ (u32) recvpoolptr, ALIGNMENT_BD,
+ XTE_RECV_BD_CNT,
+ &bd_rx_template)) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: XTemac: SgSetSpace RECV ERROR %d\n",
+ dev->name, result);
+ return -EIO;
+ }
+
+ if ((result = XTemac_SgSetSpace(&lp->Emac, XTE_SEND, (u32) sendpoolphy,
+ (u32) sendpoolptr, ALIGNMENT_BD,
+ XTE_SEND_BD_CNT,
+ &bd_tx_template)) != XST_SUCCESS) {
+ printk(KERN_ERR "%s: XTemac: SgSetSpace SEND ERROR %d\n",
+ dev->name, result);
+ return -EIO;
+ }
+
+ _xenet_SgSetupRecvBuffers(dev);
+ return 0;
+}
+
+/*
+ * If DRE is not enabled, allocate a ring buffer to use to aid in transferring
+ * aligned packets for DMA.
+ */
+static int tx_ring_buffer_init(struct net_device *dev, unsigned max_frame_size)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ int idx;
+
+ lp->tx_buffers_cur = -1;
+
+ /* pre-initialize values. The error handling code relies on those. */
+ lp->tx_buffers = NULL;
+ lp->tx_orig_buffers = NULL;
+ lp->tx_phys_buffers = NULL;
+ idx = -1;
+
+ if (XTemac_mIsTxDre(&lp->Emac) == FALSE) {
+ /* Allocate the space for the buffer pointer array.
+ */
+ lp->tx_orig_buffers = vmalloc(sizeof(void *) * XTE_SEND_BD_CNT);
+ lp->tx_phys_buffers =
+ vmalloc(sizeof(dma_addr_t) * XTE_SEND_BD_CNT);
+ lp->tx_buffers = vmalloc(sizeof(void *) * XTE_SEND_BD_CNT);
+
+ /* Handle allocation error
+ */
+ if ((!lp->tx_orig_buffers) || (!lp->tx_buffers) ||
+ (!lp->tx_phys_buffers)) {
+ printk(KERN_ERR
+ "XTemac: Could not vmalloc descriptor pointer arrays.\n");
+ goto error;
+ }
+
+ /* Now, allocate the actual buffers.
+ */
+ for (idx = 0; idx < XTE_SEND_BD_CNT; idx++) {
+ lp->tx_orig_buffers[idx] = dma_alloc_coherent(NULL,
+ max_frame_size
+ +
+ ALIGNMENT_SEND_PERF,
+ &lp->
+ tx_phys_buffers
+ [idx],
+ GFP_KERNEL);
+ /* Handle allocation error.
+ */
+ if (!lp->tx_orig_buffers[idx]) {
+ printk(KERN_ERR
+ "XTemac: Could not alloc TX buffer %d (%d bytes). "
+ "Cleaning up.\n", idx,
+ max_frame_size + ALIGNMENT_SEND_PERF);
+ goto error;
+ }
+
+ lp->tx_buffers[idx] = lp->tx_orig_buffers[idx] +
+ BUFFER_ALIGNSEND_PERF(lp->tx_orig_buffers[idx]);
+ }
+ lp->tx_buffers_cur = 0;
+ }
+ return 0;
+
+ error:
+ /* Check, if buffers have already been allocated.
+ */
+ if (-1 != idx) {
+ /* Yes, free them... Note, idx points to the failed allocation.
+ * Therefore the pre-decrement.
+ */
+ while (--idx >= 0) {
+ dma_free_coherent(NULL,
+ max_frame_size + ALIGNMENT_SEND_PERF,
+ lp->tx_orig_buffers[idx],
+ lp->tx_phys_buffers[idx]);
+ }
+ }
+
+ /* Free allocated buffer pointer arrays if allocated.
+ */
+ if (lp->tx_orig_buffers) {
+ vfree(lp->tx_orig_buffers);
+ }
+ if (lp->tx_phys_buffers) {
+ vfree(lp->tx_phys_buffers);
+ }
+ if (lp->tx_buffers) {
+ vfree(lp->tx_buffers);
+ }
+
+ lp->tx_orig_buffers = NULL;
+ lp->tx_phys_buffers = NULL;
+ lp->tx_buffers = NULL;
+
+ return 1; /* 1 == general error */
+}
+
+static void free_descriptor_skb(struct net_device *dev)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ XDmaBdV3 *BdPtr;
+ struct sk_buff *skb;
+ dma_addr_t skb_dma_addr;
+ u32 len, i;
+
+ /* Unmap and free skb's allocated and mapped in descriptor_init() */
+
+ /* Get the virtual address of the 1st BD in the DMA RX BD ring */
+ BdPtr = (XDmaBdV3 *) lp->Emac.RecvDma.BdRing.BaseAddr;
+
+ for (i = 0; i < XTE_RECV_BD_CNT; i++) {
+ skb = (struct sk_buff *) XDmaBdV3_mGetId(BdPtr);
+ if (skb) {
+ skb_dma_addr =
+ (dma_addr_t) XDmaBdV3_mGetBufAddrLow(BdPtr);
+ dma_unmap_single(NULL, skb_dma_addr, lp->max_frame_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+ /* find the next BD in the DMA RX BD ring */
+ BdPtr = XTemac_mSgRecvBdNext(&lp->Emac, BdPtr);
+ }
+
+ /* Unmap and free TX skb's that have not had a chance to be freed
+ * in SgSendHandlerBH(). This could happen when TX Threshold is larger
+ * than 1 and TX waitbound is 0
+ */
+
+ /* Get the virtual address of the 1st BD in the DMA TX BD ring */
+ BdPtr = (XDmaBdV3 *) lp->Emac.SendDma.BdRing.BaseAddr;
+
+ for (i = 0; i < XTE_SEND_BD_CNT; i++) {
+ skb = (struct sk_buff *) XDmaBdV3_mGetId(BdPtr);
+ if (skb) {
+ skb_dma_addr =
+ (dma_addr_t) XDmaBdV3_mGetBufAddrLow(BdPtr);
+ len = XDmaBdV3_mGetLength(BdPtr);
+ dma_unmap_single(NULL, skb_dma_addr, len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ }
+ /* find the next BD in the DMA TX BD ring */
+ BdPtr = XTemac_mSgSendBdNext(&lp->Emac, BdPtr);
+ }
+
+#if BD_IN_BRAM == 0
+ dma_free_coherent(NULL,
+ lp->desc_space_size,
+ lp->desc_space, lp->desc_space_handle);
+#else
+ iounmap(lp->desc_space);
+#endif
+}
+
+static int
+xenet_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ int ret;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u32 mac_options;
+ u16 threshold, timer;
+ u16 gmii_cmd, gmii_status, gmii_advControl;
+ int xs;
+
+ memset(ecmd, 0, sizeof(struct ethtool_cmd));
+
+ mac_options = XTemac_GetOptions(&(lp->Emac));
+ xs = XTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMCR, &gmii_cmd);
+ if (xs != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: could not read gmii command register; error %d\n",
+ dev->name, xs);
+ return -1;
+ }
+ xs = XTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_BMSR, &gmii_status);
+ if (xs != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: could not read gmii status register; error %d\n",
+ dev->name, xs);
+ return -1;
+ }
+
+ xs = XTemac_PhyRead(&lp->Emac, lp->gmii_addr, MII_ADVERTISE,
+ &gmii_advControl);
+ if (xs != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: could not read gmii advertisement control register; error %d\n",
+ dev->name, xs);
+ return -1;
+ }
+
+ ecmd->duplex = DUPLEX_FULL;
+
+ ecmd->supported |= SUPPORTED_MII;
+
+ ecmd->port = PORT_MII;
+
+ ecmd->speed = lp->cur_speed;
+
+ if (gmii_status & BMSR_ANEGCAPABLE) {
+ ecmd->supported |= SUPPORTED_Autoneg;
+ }
+ if (gmii_status & BMSR_ANEGCOMPLETE) {
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ }
+ else {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ }
+ ecmd->phy_address = lp->Emac.BaseAddress;
+ ecmd->transceiver = XCVR_INTERNAL;
+ if (XTemac_mIsSgDma(&lp->Emac)) {
+ /* get TX threshold */
+ if ((ret =
+ XTemac_IntrSgCoalGet(&lp->Emac, XTE_SEND, &threshold,
+ &timer))
+ == XST_SUCCESS) {
+ ecmd->maxtxpkt = threshold;
+ }
+ else {
+ return -EIO;
+ }
+
+ /* get RX threshold */
+ if ((ret =
+ XTemac_IntrSgCoalGet(&lp->Emac, XTE_RECV, &threshold,
+ &timer))
+ == XST_SUCCESS) {
+ ecmd->maxrxpkt = threshold;
+ }
+ else {
+ return -EIO;
+ }
+ }
+
+ ecmd->supported |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg;
+
+ return 0;
+}
+
+static int
+xenet_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ if ((ecmd->duplex != DUPLEX_FULL) ||
+ (ecmd->transceiver != XCVR_INTERNAL) ||
+ (ecmd->phy_address &&
+ (ecmd->phy_address != lp->Emac.BaseAddress))) {
+ return -EOPNOTSUPP;
+ }
+
+ if ((ecmd->speed != 1000) && (ecmd->speed != 100) &&
+ (ecmd->speed != 10)) {
+ printk(KERN_ERR
+ "%s: XTemac: xenet_ethtool_set_settings speed not supported: %d\n",
+ dev->name, ecmd->speed);
+ return -EOPNOTSUPP;
+ }
+
+ if (ecmd->speed != lp->cur_speed) {
+ renegotiate_speed(dev, ecmd->speed, FULL_DUPLEX);
+ XTemac_SetOperatingSpeed(&lp->Emac, ecmd->speed);
+ lp->cur_speed = ecmd->speed;
+ }
+ return 0;
+}
+
+static int
+xenet_ethtool_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ int ret;
+ struct net_local *lp = (struct net_local *) dev->priv;
+ u16 threshold, waitbound;
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+
+ if ((ret =
+ XTemac_IntrSgCoalGet(&lp->Emac, XTE_RECV, &threshold, &waitbound))
+ != XST_SUCCESS) {
+ printk(KERN_ERR "%s: XTemac: IntrSgCoalGet error %d\n",
+ dev->name, ret);
+ return -EIO;
+ }
+ ec->rx_max_coalesced_frames = threshold;
+ ec->rx_coalesce_usecs = waitbound;
+
+ if ((ret =
+ XTemac_IntrSgCoalGet(&lp->Emac, XTE_SEND, &threshold, &waitbound))
+ != XST_SUCCESS) {
+ printk(KERN_ERR "%s: XTemac: IntrSgCoalGet error %d\n",
+ dev->name, ret);
+ return -EIO;
+ }
+ ec->tx_max_coalesced_frames = threshold;
+ ec->tx_coalesce_usecs = waitbound;
+
+ return 0;
+}
+
+#if 0
+void disp_bd_ring(XDmaV3_BdRing bd_ring)
+{
+ int num_bds = bd_ring.AllCnt;
+ u32 *cur_bd_ptr = bd_ring.BaseAddr;
+ int idx;
+
+ printk("PhysBaseAddr: %p\n", (void *) bd_ring.PhysBaseAddr);
+ printk("BaseAddr: %p\n", (void *) bd_ring.BaseAddr);
+ printk("HighAddr: %p\n", (void *) bd_ring.HighAddr);
+ printk("Length: %d (0x%0x)\n", bd_ring.Length, bd_ring.Length);
+ printk("RunState: %d (0x%0x)\n", bd_ring.RunState, bd_ring.RunState);
+ printk("Separation: %d (0x%0x)\n", bd_ring.Separation,
+ bd_ring.Separation);
+ printk("BD Count: %d\n", bd_ring.AllCnt);
+
+ printk("\n");
+
+ printk("FreeHead: %p\n", (void *) bd_ring.FreeHead);
+ printk("PreHead: %p\n", (void *) bd_ring.PreHead);
+ printk("HwHead: %p\n", (void *) bd_ring.HwHead);
+ printk("HwTail: %p\n", (void *) bd_ring.HwTail);
+ printk("PostHead: %p\n", (void *) bd_ring.PostHead);
+ printk("BdaRestart: %p\n", (void *) bd_ring.BdaRestart);
+
+ printk("Ring Contents:\n");
+ printk("Idx Addr DMASR LSBA BDA Length USR0 USR1 USR5 ID\n");
+ printk("--- -------- -------- -------- -------- -------- -------- -------- -------- --------\n");
+
+ for (idx = 0; idx < num_bds; idx++) {
+ printk("%3d %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ idx, cur_bd_ptr,
+ cur_bd_ptr[XDMAV3_BD_DMASR_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XDMAV3_BD_LSBA_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XDMAV3_BD_BDA_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XDMAV3_BD_LENGTH_OFFSET /
+ sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XDMAV3_BD_USR0_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XDMAV3_BD_USR1_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XDMAV3_BD_USR5_OFFSET / sizeof(*cur_bd_ptr)],
+ cur_bd_ptr[XDMAV3_BD_ID_OFFSET / sizeof(*cur_bd_ptr)]);
+
+ cur_bd_ptr += bd_ring.Separation / sizeof(int);
+ }
+ printk("--------------------------------------- Done ---------------------------------------\n");
+}
+#endif
+
+static int
+xenet_ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ int ret;
+ struct net_local *lp;
+ unsigned long flags;
+ int dev_started;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ lp = (struct net_local *) dev->priv;
+
+ if ((dev_started = XTemac_mIsStarted(&lp->Emac)) == TRUE)
+ XTemac_Stop(&lp->Emac);
+
+ if ((ret = XTemac_IntrSgCoalSet(&lp->Emac, XTE_RECV,
+ (u16) (ec->rx_max_coalesced_frames),
+ (u16) (ec->rx_coalesce_usecs))) !=
+ XST_SUCCESS) {
+ printk(KERN_ERR "%s: XTemac: IntrSgCoalSet error %d\n",
+ dev->name, ret);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return -EIO;
+ }
+
+ if ((ret = XTemac_IntrSgCoalSet(&lp->Emac, XTE_SEND,
+ (u16) (ec->tx_max_coalesced_frames),
+ (u16) (ec->tx_coalesce_usecs))) !=
+ XST_SUCCESS) {
+ printk(KERN_ERR "%s: XTemac: IntrSgCoalSet error %d\n",
+ dev->name, ret);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return -EIO;
+ }
+
+ if (dev_started == TRUE) {
+ if ((ret = XTemac_Start(&lp->Emac)) != XST_SUCCESS) {
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return -EIO;
+ }
+ }
+
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return 0;
+}
+
+static int
+xenet_ethtool_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *erp)
+{
+ memset(erp, 0, sizeof(struct ethtool_ringparam));
+
+ erp->rx_max_pending = XTE_RECV_BD_CNT;
+ erp->tx_max_pending = XTE_SEND_BD_CNT;
+ erp->rx_pending = XTE_RECV_BD_CNT;
+ erp->tx_pending = XTE_SEND_BD_CNT;
+ return 0;
+}
+
+#define EMAC_REGS_N 32
+struct mac_regsDump {
+ struct ethtool_regs hd;
+ u16 data[EMAC_REGS_N];
+};
+
+static void
+xenet_ethtool_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ret)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct mac_regsDump *dump = (struct mac_regsDump *) regs;
+ int i;
+ int r;
+
+ dump->hd.version = 0;
+ dump->hd.len = sizeof(dump->data);
+ memset(dump->data, 0, sizeof(dump->data));
+
+ for (i = 0; i < EMAC_REGS_N; i++) {
+ if ((r =
+ XTemac_PhyRead(&(lp->Emac), lp->gmii_addr, i,
+ &(dump->data[i])))
+ != XST_SUCCESS) {
+ printk(KERN_INFO "%s: XTemac: PhyRead ERROR %d\n",
+ dev->name, r);
+ *(int *) ret = -EIO;
+ return;
+ }
+ }
+
+ *(int *) ret = 0;
+}
+
+static int
+xenet_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *ed)
+{
+ memset(ed, 0, sizeof(struct ethtool_drvinfo));
+ strncpy(ed->driver, DRIVER_NAME, sizeof(ed->driver) - 1);
+ strncpy(ed->version, DRIVER_VERSION, sizeof(ed->version) - 1);
+ /* Also tell how much memory is needed for dumping register values */
+ ed->regdump_len = sizeof(u16) * EMAC_REGS_N;
+ return 0;
+}
+
+static int xenet_do_ethtool_ioctl(struct net_device *dev, struct ifreq *rq)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+ struct ethtool_cmd ecmd;
+ struct ethtool_coalesce eco;
+ struct ethtool_drvinfo edrv;
+ struct ethtool_ringparam erp;
+ struct ethtool_pauseparam epp;
+ struct mac_regsDump regs;
+ unsigned long flags;
+ int ret = -EOPNOTSUPP;
+ u32 Options;
+ XTemac_SoftStats stat;
+
+ if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd)))
+ return -EFAULT;
+ switch (ecmd.cmd) {
+ case ETHTOOL_GSET: /* Get setting. No command option needed w/ ethtool */
+ ret = xenet_ethtool_get_settings(dev, &ecmd);
+ if (ret < 0)
+ return -EIO;
+ if (copy_to_user(rq->ifr_data, &ecmd, sizeof(ecmd)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ case ETHTOOL_SSET: /* Change setting. Use "-s" command option w/ ethtool */
+ ret = xenet_ethtool_set_settings(dev, &ecmd);
+ break;
+ case ETHTOOL_GPAUSEPARAM: /* Get pause parameter information. Use "-a" w/ ethtool */
+ ret = xenet_ethtool_get_settings(dev, &ecmd);
+ if (ret < 0)
+ return ret;
+ epp.cmd = ecmd.cmd;
+ epp.autoneg = ecmd.autoneg;
+ Options = XTemac_GetOptions(&lp->Emac);
+ if (Options & XTE_FCS_INSERT_OPTION) {
+ epp.rx_pause = 1;
+ epp.tx_pause = 1;
+ }
+ else {
+ epp.rx_pause = 0;
+ epp.tx_pause = 0;
+ }
+ if (copy_to_user
+ (rq->ifr_data, &epp, sizeof(struct ethtool_pauseparam)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ case ETHTOOL_SPAUSEPARAM: /* Set pause parameter. Use "-A" w/ ethtool */
+ return -EOPNOTSUPP; /* TODO: To support in next version */
+ case ETHTOOL_GRXCSUM:{ /* Get rx csum offload info. Use "-k" w/ ethtool */
+ struct ethtool_value edata = { ETHTOOL_GRXCSUM };
+
+ edata.data =
+ (lp->local_features & LOCAL_FEATURE_RX_CSUM) !=
+ 0;
+ if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_SRXCSUM:{ /* Set rx csum offload info. Use "-K" w/ ethtool */
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, rq->ifr_data, sizeof(edata)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ if (edata.data) {
+ if (XTemac_mIsRxCsum(&lp->Emac) == TRUE) {
+ lp->local_features |=
+ LOCAL_FEATURE_RX_CSUM;
+ }
+ }
+ else {
+ lp->local_features &= ~LOCAL_FEATURE_RX_CSUM;
+ }
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_GTXCSUM:{ /* Get tx csum offload info. Use "-k" w/ ethtool */
+ struct ethtool_value edata = { ETHTOOL_GTXCSUM };
+
+ edata.data = (dev->features & NETIF_F_IP_CSUM) != 0;
+ if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_STXCSUM:{ /* Set tx csum offload info. Use "-K" w/ ethtool */
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, rq->ifr_data, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data) {
+ if (XTemac_mIsTxCsum(&lp->Emac) == TRUE) {
+ dev->features |= NETIF_F_IP_CSUM;
+ }
+ }
+ else {
+ dev->features &= ~NETIF_F_IP_CSUM;
+ }
+
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_GSG:{ /* Get ScatterGather info. Use "-k" w/ ethtool */
+ struct ethtool_value edata = { ETHTOOL_GSG };
+
+ edata.data = (dev->features & NETIF_F_SG) != 0;
+ if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_SSG:{ /* Set ScatterGather info. Use "-K" w/ ethtool */
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, rq->ifr_data, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data) {
+ if ((XTemac_mIsTxDre(&lp->Emac) == TRUE) &&
+ (XTemac_mIsSgDma(&lp->Emac) == TRUE)) {
+ dev->features |=
+ NETIF_F_SG | NETIF_F_FRAGLIST;
+ }
+ }
+ else {
+ dev->features &=
+ ~(NETIF_F_SG | NETIF_F_FRAGLIST);
+ }
+
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_GCOALESCE: /* Get coalescing info. Use "-c" w/ ethtool */
+ if (!(XTemac_mIsSgDma(&lp->Emac)))
+ break;
+ eco.cmd = ecmd.cmd;
+ ret = xenet_ethtool_get_coalesce(dev, &eco);
+ if (ret < 0) {
+ return -EIO;
+ }
+ if (copy_to_user
+ (rq->ifr_data, &eco, sizeof(struct ethtool_coalesce))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_SCOALESCE: /* Set coalescing info. Use "-C" w/ ethtool */
+ if (!(XTemac_mIsSgDma(&lp->Emac)))
+ break;
+ if (copy_from_user
+ (&eco, rq->ifr_data, sizeof(struct ethtool_coalesce)))
+ return -EFAULT;
+ ret = xenet_ethtool_set_coalesce(dev, &eco);
+ break;
+ case ETHTOOL_GDRVINFO: /* Get driver information. Use "-i" w/ ethtool */
+ edrv.cmd = edrv.cmd;
+ ret = xenet_ethtool_get_drvinfo(dev, &edrv);
+ if (ret < 0) {
+ return -EIO;
+ }
+ edrv.n_stats = XENET_STATS_LEN;
+ if (copy_to_user
+ (rq->ifr_data, &edrv, sizeof(struct ethtool_drvinfo))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_GREGS: /* Get register values. Use "-d" with ethtool */
+ regs.hd.cmd = edrv.cmd;
+ xenet_ethtool_get_regs(dev, &(regs.hd), &ret);
+ if (ret < 0) {
+ return ret;
+ }
+ if (copy_to_user
+ (rq->ifr_data, ®s, sizeof(struct mac_regsDump))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_GRINGPARAM: /* Get RX/TX ring parameters. Use "-g" w/ ethtool */
+ erp.cmd = edrv.cmd;
+ ret = xenet_ethtool_get_ringparam(dev, &(erp));
+ if (ret < 0) {
+ return ret;
+ }
+ if (copy_to_user
+ (rq->ifr_data, &erp, sizeof(struct ethtool_ringparam))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ case ETHTOOL_NWAY_RST: /* Restart auto negotiation if enabled. Use "-r" w/ ethtool */
+ return -EOPNOTSUPP; /* TODO: To support in next version */
+ case ETHTOOL_GSTRINGS:{
+ struct ethtool_gstrings gstrings = { ETHTOOL_GSTRINGS };
+ void *addr = rq->ifr_data;
+ char *strings = NULL;
+
+ if (copy_from_user(&gstrings, addr, sizeof(gstrings))) {
+ return -EFAULT;
+ }
+ switch (gstrings.string_set) {
+ case ETH_SS_STATS:
+ gstrings.len = XENET_STATS_LEN;
+ strings = *xenet_ethtool_gstrings_stats;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (copy_to_user(addr, &gstrings, sizeof(gstrings))) {
+ return -EFAULT;
+ }
+ addr += offsetof(struct ethtool_gstrings, data);
+ if (copy_to_user
+ (addr, strings, gstrings.len * ETH_GSTRING_LEN)) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ }
+ case ETHTOOL_GSTATS:{
+ struct {
+ struct ethtool_stats cmd;
+ uint64_t data[XENET_STATS_LEN];
+ } stats = { {
+ ETHTOOL_GSTATS, XENET_STATS_LEN}};
+
+ XTemac_GetSoftStats(&lp->Emac, &stat);
+ stats.data[0] = stat.TxDmaErrors;
+ stats.data[1] = stat.TxPktFifoErrors;
+ stats.data[2] = stat.TxStatusErrors;
+ stats.data[3] = stat.RxRejectErrors;
+ stats.data[4] = stat.RxDmaErrors;
+ stats.data[5] = stat.RxPktFifoErrors;
+ stats.data[6] = stat.FifoErrors;
+ stats.data[7] = stat.IpifErrors;
+ stats.data[8] = stat.Interrupts;
+ stats.data[9] = lp->max_frags_in_a_packet;
+ stats.data[10] = lp->tx_hw_csums;
+ stats.data[11] = lp->rx_hw_csums;
+
+ if (copy_to_user(rq->ifr_data, &stats, sizeof(stats))) {
+ return -EFAULT;
+ }
+ ret = 0;
+ break;
+ }
+ default:
+ return -EOPNOTSUPP; /* All other operations not supported */
+ }
+ return ret;
+}
+
+static int xenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct net_local *lp = (struct net_local *) dev->priv;
+
+ /* gmii_ioctl_data has 4 u16 fields: phy_id, reg_num, val_in & val_out */
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *) &rq->ifr_data;
+ struct {
+ __u16 threshold;
+ __u32 direction;
+ } thr_arg;
+ struct {
+ __u16 waitbound;
+ __u32 direction;
+ } wbnd_arg;
+
+ int ret;
+ unsigned long flags;
+ u16 threshold, timer;
+ int dev_started;
+
+ switch (cmd) {
+ case SIOCETHTOOL:
+ return xenet_do_ethtool_ioctl(dev, rq);
+ case SIOCGMIIPHY: /* Get address of GMII PHY in use. */
+ case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
+ data->phy_id = lp->gmii_addr;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read GMII PHY register. */
+ case SIOCDEVPRIVATE + 1: /* for binary compat, remove in 2.5 */
+ if (data->phy_id > 31 || data->reg_num > 31)
+ return -ENXIO;
+
+ /* Stop the PHY timer to prevent reentrancy. */
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ del_timer_sync(&lp->phy_timer);
+
+ ret = XTemac_PhyRead(&lp->Emac, data->phy_id,
+ data->reg_num, &data->val_out);
+
+ /* Start the PHY timer up again. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ add_timer(&lp->phy_timer);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ if (ret != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: could not read from PHY, error=%d.\n",
+ dev->name, ret);
+ return -EBUSY;
+ }
+ return 0;
+
+ case SIOCSMIIREG: /* Write GMII PHY register. */
+ case SIOCDEVPRIVATE + 2: /* for binary compat, remove in 2.5 */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (data->phy_id > 31 || data->reg_num > 31)
+ return -ENXIO;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ /* Stop the PHY timer to prevent reentrancy. */
+ del_timer_sync(&lp->phy_timer);
+
+ ret = XTemac_PhyWrite(&lp->Emac, data->phy_id,
+ data->reg_num, data->val_in);
+
+ /* Start the PHY timer up again. */
+ lp->phy_timer.expires = jiffies + 2 * HZ;
+ add_timer(&lp->phy_timer);
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+
+ if (ret != XST_SUCCESS) {
+ printk(KERN_ERR
+ "%s: XTemac: could not write to PHY, error=%d.\n",
+ dev->name, ret);
+ return -EBUSY;
+ }
+ return 0;
+
+ case SIOCDEVPRIVATE + 3: /* set THRESHOLD */
+ if (!(XTemac_mIsSgDma(&lp->Emac)))
+ return -EFAULT;
+
+ if (copy_from_user(&thr_arg, rq->ifr_data, sizeof(thr_arg)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ if ((dev_started = XTemac_mIsStarted(&lp->Emac)) == TRUE)
+ XTemac_Stop(&lp->Emac);
+
+ if ((ret = XTemac_IntrSgCoalGet(&lp->Emac, thr_arg.direction,
+ &threshold,
+ &timer)) != XST_SUCCESS) {
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return -EIO;
+ }
+ if ((ret = XTemac_IntrSgCoalSet(&lp->Emac, thr_arg.direction,
+ thr_arg.threshold,
+ timer)) != XST_SUCCESS) {
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return -EIO;
+ }
+ if (dev_started == TRUE) {
+ if ((ret = XTemac_Start(&lp->Emac)) != XST_SUCCESS) {
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return -EIO;
+ }
+ }
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return 0;
+
+ case SIOCDEVPRIVATE + 4: /* set WAITBOUND */
+ if (!(XTemac_mIsSgDma(&lp->Emac)))
+ return -EFAULT;
+
+ if (copy_from_user(&wbnd_arg, rq->ifr_data, sizeof(wbnd_arg)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&XTE_spinlock, flags);
+ if ((dev_started = XTemac_mIsStarted(&lp->Emac)) == TRUE)
+ XTemac_Stop(&lp->Emac);
+
+ if ((ret = XTemac_IntrSgCoalGet(&lp->Emac, wbnd_arg.direction,
+ &threshold,
+ &timer)) != XST_SUCCESS) {
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return -EIO;
+ }
+ if ((ret =
+ XTemac_IntrSgCoalSet(&lp->Emac, wbnd_arg.direction,
+ threshold,
+ wbnd_arg.waitbound)) != XST_SUCCESS) {
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return -EIO;
+ }
+ if (dev_started == TRUE) {
+ if ((ret = XTemac_Start(&lp->Emac)) != XST_SUCCESS) {
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return -EIO;
+ }
+ }
+ spin_unlock_irqrestore(&XTE_spinlock, flags);
+ return 0;
+
+ case SIOCDEVPRIVATE + 5: /* get THRESHOLD */
+ if (!(XTemac_mIsSgDma(&lp->Emac)))
+ return -EFAULT;
+
+ if (copy_from_user(&thr_arg, rq->ifr_data, sizeof(thr_arg)))
+ return -EFAULT;
+
+ if ((ret = XTemac_IntrSgCoalGet(&lp->Emac, thr_arg.direction,
+ (u16 *) &(thr_arg.threshold),
+ &timer)) != XST_SUCCESS) {
+ return -EIO;
+ }
+ if (copy_to_user(rq->ifr_data, &thr_arg, sizeof(thr_arg))) {
+ return -EFAULT;
+ }
+ return 0;
+
+ case SIOCDEVPRIVATE + 6: /* get WAITBOUND */
+ if (!(XTemac_mIsSgDma(&lp->Emac)))
+ return -EFAULT;
+
+ if (copy_from_user(&wbnd_arg, rq->ifr_data, sizeof(wbnd_arg))) {
+ return -EFAULT;
+ }
+ if ((ret = XTemac_IntrSgCoalGet(&lp->Emac, wbnd_arg.direction,
+ &threshold,
+ (u16 *) &(wbnd_arg.
+ waitbound))) !=
+ XST_SUCCESS) {
+ return -EIO;
+ }
+ if (copy_to_user(rq->ifr_data, &wbnd_arg, sizeof(wbnd_arg))) {
+ return -EFAULT;
+ }
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+/******************************************************************************
+ *
+ * NEW FUNCTIONS FROM LINUX 2.6
+ *
+ ******************************************************************************/
+
+static void xtenet_remove_ndev(struct net_device *ndev)
+{
+ if (ndev) {
+ struct net_local *lp = netdev_priv(ndev);
+
+ if (XTemac_mIsSgDma(&lp->Emac) && (lp->desc_space))
+ free_descriptor_skb(ndev);
+
+ iounmap((void *) (lp->Emac.BaseAddress));
+ free_netdev(ndev);
+ }
+}
+
+static int xtenet_remove(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ unregister_netdev(ndev);
+ xtenet_remove_ndev(ndev);
+
+ return 0; /* success */
+}
+
+/* Detect the PHY address by scanning addresses 0 to 31 and
+ * looking at the MII status register (register 1) and assuming
+ * the PHY supports 10Mbps full/half duplex. Feel free to change
+ * this code to match your PHY, or hardcode the address if needed.
+ */
+/* Use MII register 1 (MII status register) to detect PHY */
+#define PHY_DETECT_REG 1
+
+/* Mask used to verify certain PHY features (or register contents)
+ * in the register above:
+ * 0x1000: 10Mbps full duplex support
+ * 0x0800: 10Mbps half duplex support
+ * 0x0008: Auto-negotiation support
+ */
+#define PHY_DETECT_MASK 0x1808
+
+static int detect_phy(struct net_local *lp, char *dev_name)
+{
+ int status;
+ u16 phy_reg;
+ u32 phy_addr;
+ int i;
+
+ for (phy_addr = 0; phy_addr <= 31; phy_addr++) {
+ status = XTemac_PhyRead(&lp->Emac, phy_addr, PHY_DETECT_REG,
+ &phy_reg);
+
+ if ((status == XST_SUCCESS) && (phy_reg != 0xFFFF) &&
+ ((phy_reg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
+ /* Found a valid PHY address */
+ printk(KERN_INFO
+ "%s: XTemac: PHY detected at address %d.\n",
+ dev_name, phy_addr);
+
+ for (i = 0; i < 32; i++) {
+ if ((i % 8) == 0) {
+ if (i != 0)
+ printk("\n");
+ printk(KERN_INFO "%.2x: ", i);
+ }
+ XTemac_PhyRead(&lp->Emac, phy_addr, i,
+ &phy_reg);
+ printk(" %.4x", phy_reg);
+ }
+ printk("\n");
+ return phy_addr;
+ }
+ }
+
+ printk(KERN_WARNING
+ "%s: XTemac: No PHY detected. Assuming a PHY at address 0\n",
+ dev_name);
+ return 0; /* default to zero */
+}
+
+static int xtenet_probe(struct device *dev)
+{
+ int xs;
+ u32 hwid;
+ u32 virt_baddr; /* virtual base address of temac */
+
+ XTemac_Config Config;
+
+ struct resource *r_irq = NULL; /* Interrupt resources */
+ struct resource *r_mem = NULL; /* IO mem resources */
+
+ struct xtemac_platform_data *pdata;
+
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = NULL;
+ struct net_local *lp = NULL;
+
+ int rc = 0;
+
+
+ /* param check */
+ if (!pdev) {
+ printk(KERN_ERR
+ "XTemac: Internal error. Probe called with NULL param.\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ pdata = (struct xtemac_platform_data *) pdev->dev.platform_data;
+ if (!pdata) {
+ printk(KERN_ERR "xtemac %d: Couldn't find platform data.\n",
+ pdev->id);
+
+ rc = -ENODEV;
+ goto error;
+ }
+
+ /* Create an ethernet device instance */
+ ndev = alloc_etherdev(sizeof(struct net_local));
+ if (!ndev) {
+ printk(KERN_ERR "xtemac %d: Could not allocate net device.\n",
+ pdev->id);
+ rc = -ENOMEM;
+ goto error;
+ }
+ dev_set_drvdata(dev, ndev);
+
+ /* Get iospace and an irq for the device */
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r_irq || !r_mem) {
+ printk(KERN_ERR "xtemac %d: IO resource(s) not found.\n",
+ pdev->id);
+ rc = -ENODEV;
+ goto error;
+ }
+ ndev->irq = r_irq->start;
+
+
+ /* Initialize the private data used by XEmac_LookupConfig().
+ * The private data are zeroed out by alloc_etherdev() already.
+ */
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+ lp->index = pdev->id;
+
+ /* Setup the Config structure for the XTemac_CfgInitialize() call. */
+ Config.DeviceId = pdev->id;
+ Config.BaseAddress = r_mem->start;
+ Config.RxPktFifoDepth = pdata->rx_pkt_fifo_depth;
+ Config.TxPktFifoDepth = pdata->tx_pkt_fifo_depth;
+ Config.MacFifoDepth = pdata->mac_fifo_depth;
+ Config.IpIfDmaConfig = pdata->dma_mode;
+#ifdef XPAR_TEMAC_0_INCLUDE_RX_CSUM
+ Config.TxDre = pdata->tx_dre;
+ Config.RxDre = pdata->rx_dre;
+ Config.TxCsum = pdata->tx_csum;
+ Config.RxCsum = pdata->rx_csum;
+ Config.PhyType = pdata->phy_type;
+#endif
+// Config.DcrHost = pdata->dcr_host;
+// Config.Dre = pdata->dre;
+
+ /* Get the virtual base address for the device */
+ virt_baddr = (u32) ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
+ if (0 == virt_baddr) {
+ printk(KERN_ERR "XTemac: Could not allocate iomem.\n");
+ rc = -EIO;
+ goto error;
+ }
+
+
+ if (XTemac_CfgInitialize(&lp->Emac, &Config, virt_baddr) != XST_SUCCESS) {
+ printk(KERN_ERR "XTemac: Could not initialize device.\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ /* Set the MAC address */
+ /* wgr TODO: Get the MAC address right! */
+ ndev->dev_addr[0] = 0x01;
+ ndev->dev_addr[1] = 0x02;
+ ndev->dev_addr[2] = 0x03;
+ ndev->dev_addr[3] = 0x04;
+ ndev->dev_addr[4] = 0x05;
+ ndev->dev_addr[5] = 0x06;
+// -wgr- memcpy(ndev->dev_addr, ((bd_t *) &__res)->bi_enetaddr, 6);
+ if (XTemac_SetMacAddress(&lp->Emac, ndev->dev_addr) != XST_SUCCESS) {
+ /* should not fail right after an initialize */
+ printk(KERN_ERR "XTemac: could not set MAC address.\n");
+ rc = -EIO;
+ goto error;
+ }
+
+
+ lp->max_frame_size = XTE_MAX_JUMBO_FRAME_SIZE;
+ if (ndev->mtu > XTE_JUMBO_MTU)
+ ndev->mtu = XTE_JUMBO_MTU;
+
+
+ if (XTemac_mIsSgDma(&lp->Emac)) {
+ int result;
+
+ printk(KERN_ERR "XTemac: using sgDMA mode.\n");
+ XTemac_SetHandler(&lp->Emac, XTE_HANDLER_SGSEND, SgSendHandler,
+ lp);
+ XTemac_SetHandler(&lp->Emac, XTE_HANDLER_SGRECV, SgRecvHandler,
+ lp);
+ lp->Isr = XTemac_IntrSgHandler;
+
+ if (XTemac_mIsTxDre(&lp->Emac) == TRUE) {
+ printk(KERN_INFO "XTemac: using TxDRE mode\n");
+ ndev->hard_start_xmit = xenet_SgSend;
+ }
+ else {
+ printk(KERN_INFO "XTemac: not using TxDRE mode\n");
+ ndev->hard_start_xmit = xenet_SgSend_NoDRE;
+ }
+ if (XTemac_mIsRxDre(&lp->Emac) == TRUE) {
+ printk(KERN_INFO "XTemac: using RxDRE mode\n");
+ lp->local_features |= LOCAL_FEATURE_RX_DRE;
+ }
+ else {
+ printk(KERN_INFO "XTemac: not using RxDRE mode\n");
+ lp->local_features &= ~LOCAL_FEATURE_RX_DRE;
+ }
+
+ result = descriptor_init(ndev);
+ if (result) {
+ rc = -EIO;
+ goto error;
+ }
+
+ if (XTemac_mIsTxDre(&lp->Emac) == FALSE) {
+ result = tx_ring_buffer_init(ndev, lp->max_frame_size);
+ if (result) {
+ printk(KERN_ERR
+ "XTemac: Could not allocate TX buffers.\n");
+ rc = -EIO;
+ goto error;
+ }
+ }
+
+ /* set the packet threshold and wait bound for both TX/RX directions */
+ if ((xs =
+ XTemac_IntrSgCoalSet(&lp->Emac, XTE_SEND, DFT_TX_THRESHOLD,
+ DFT_TX_WAITBOUND)) != XST_SUCCESS) {
+ printk(KERN_ERR
+ "XTemac: could not set SEND pkt threshold/waitbound, ERROR %d",
+ xs);
+ }
+ if ((xs =
+ XTemac_IntrSgCoalSet(&lp->Emac, XTE_RECV, DFT_RX_THRESHOLD,
+ DFT_RX_WAITBOUND)) != XST_SUCCESS) {
+ printk(KERN_ERR
+ "XTemac: Could not set RECV pkt threshold/waitbound ERROR %d",
+ xs);
+ }
+ }
+ else {
+ printk(KERN_INFO
+ "XTemac: using FIFO direct interrupt driven mode.\n");
+ XTemac_SetHandler(&lp->Emac, XTE_HANDLER_FIFORECV,
+ FifoRecvHandler, ndev);
+ XTemac_SetHandler(&lp->Emac, XTE_HANDLER_FIFOSEND,
+ FifoSendHandler, ndev);
+ ndev->hard_start_xmit = xenet_FifoSend;
+ lp->Isr = XTemac_IntrFifoHandler;
+ }
+ XTemac_SetHandler(&lp->Emac, XTE_HANDLER_ERROR, ErrorHandler, ndev);
+
+ /* Scan to find the PHY */
+ lp->gmii_addr = detect_phy(lp, ndev->name);
+
+
+ /* initialize the netdev structure */
+ ndev->open = xenet_open;
+ ndev->stop = xenet_close;
+ ndev->change_mtu = xenet_change_mtu;
+ ndev->get_stats = xenet_get_stats;
+ ndev->flags &= ~IFF_MULTICAST;
+
+ /* TX DRE and SGDMA need to go together for this to work right */
+ if ((XTemac_mIsTxDre(&lp->Emac) == TRUE) &&
+ (XTemac_mIsSgDma(&lp->Emac) == TRUE)) {
+ ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ }
+
+ if (XTemac_mIsTxCsum(&lp->Emac) == TRUE) {
+ /*
+ * This hardware only supports proper checksum calculations
+ * on TCP/UDP packets.
+ */
+ ndev->features |= NETIF_F_IP_CSUM;
+ }
+ if (XTemac_mIsRxCsum(&lp->Emac) == TRUE) {
+ lp->local_features |= LOCAL_FEATURE_RX_CSUM;
+ }
+
+ ndev->do_ioctl = xenet_ioctl;
+ ndev->tx_timeout = xenet_tx_timeout;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+
+ /* init the stats */
+ lp->max_frags_in_a_packet = 0;
+ lp->tx_hw_csums = 0;
+ lp->rx_hw_csums = 0;
+
+#if ! XTE_AUTOSTRIPPING
+ lp->stripping =
+ (XTemac_GetOptions(&(lp->Emac)) & XTE_FCS_STRIP_OPTION) != 0;
+#endif
+
+ rc = register_netdev(ndev);
+ if (rc) {
+ printk(KERN_ERR
+ "%s: Cannot register net device, aborting.\n",
+ ndev->name);
+ goto error; /* rc is already set here... */
+ }
+
+ printk(KERN_INFO
+ "%s: Xilinx TEMAC #%d at 0x%08X mapped to 0x%08X, irq=%d\n",
+ ndev->name, lp->Emac.Config.DeviceId,
+ lp->Emac.Config.BaseAddress, lp->Emac.BaseAddress, ndev->irq);
+
+ /* print h/w id */
+ hwid = XIo_In32((lp->Emac).BaseAddress + XIIF_V123B_RESETR_OFFSET);
+
+ printk(KERN_INFO
+ "%s: XTemac id %d.%d%c, block id %d, type %d\n",
+ ndev->name, (hwid >> 28) & 0xf, (hwid >> 21) & 0x7f,
+ ((hwid >> 16) & 0x1f) + 'a', (hwid >> 16) & 0xff,
+ (hwid >> 0) & 0xff);
+
+ return 0;
+
+ error:
+ if (ndev) {
+ xtenet_remove_ndev(ndev);
+ }
+ return rc;
+}
+
+
+
+static struct device_driver xtenet_driver = {
+ .name = DRIVER_NAME,
+ .bus = &platform_bus_type,
+
+ .probe = xtenet_probe,
+ .remove = xtenet_remove
+};
+
+static int __init xtenet_init(void)
+{
+ /*
+ * No kernel boot options used,
+ * so we just need to register the driver
+ */
+ return driver_register(&xtenet_driver);
+}
+
+static void __exit xtenet_cleanup(void)
+{
+ driver_unregister(&xtenet_driver);
+}
+
+module_init(xtenet_init);
+module_exit(xtenet_cleanup);
+
+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL");
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xtemac_selftest.c
+*
+* Self-test and diagnostic functions of the XTemac driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rmm 06/01/05 First release
+* 2.00a rmm 11/21/05 Switched to local link DMA driver
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xtemac.h"
+#include "xtemac_i.h"
+
+/************************** Constant Definitions *****************************/
+#define XTE_IPIF_IP_INTR_COUNT 13
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+* Performs a self-test on the Ethernet device. The test includes:
+* - Run self-test on DMA channel, FIFO, and IPIF components
+*
+* This self-test is destructive. On successful completion, the device is reset
+* and returned to its default configuration. The caller is responsible for
+* re-configuring the device after the self-test is run, and starting it when
+* ready to send and receive frames.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+*
+* @return
+*
+* - XST_SUCCESS Self-test was successful
+* - XST_FAILURE Self-test failed
+*
+* @note
+* There is the possibility that this function will not return if the hardware is
+* broken (i.e., it never sets the status bit indicating that transmission is
+* done). If this is of concern to the user, the user should provide protection
+* from this problem - perhaps by using a different timer thread to monitor the
+* self-test thread.
+*
+******************************************************************************/
+int XTemac_SelfTest(XTemac *InstancePtr)
+{
+ int Result;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Run self-test on the DMA (if configured) */
+ if (XTemac_mIsSgDma(InstancePtr)) {
+ Result = XDmaV3_SelfTest(&InstancePtr->RecvDma);
+ if (Result != XST_SUCCESS) {
+ return (XST_FAILURE);
+ }
+
+ Result = XDmaV3_SelfTest(&InstancePtr->SendDma);
+ if (Result != XST_SUCCESS) {
+ return (XST_FAILURE);
+ }
+ }
+
+ /* Run self-test on packet fifos */
+ if (XTemac_mIsFifo(InstancePtr)) {
+ Result = XPacketFifoV200a_SelfTest(&InstancePtr->RecvFifo.Fifo,
+ XPF_V200A_READ_FIFO_TYPE);
+ if (Result != XST_SUCCESS) {
+ return (XST_FAILURE);
+ }
+
+ Result = XPacketFifoV200a_SelfTest(&InstancePtr->SendFifo.Fifo,
+ XPF_V200A_WRITE_FIFO_TYPE);
+ if (Result != XST_SUCCESS) {
+ return (XST_FAILURE);
+ }
+ }
+
+ /* Run the IPIF self-test */
+ Result = XIpIfV123b_SelfTest(InstancePtr->BaseAddress,
+ XTE_IPIF_IP_INTR_COUNT);
+ if (Result != XST_SUCCESS) {
+ return (XST_FAILURE);
+ }
+
+ /* Reset the Ethernet MAC to leave it in a known good state */
+ XTemac_Reset(InstancePtr, XTE_NORESET_HARD);
+
+ return (XST_SUCCESS);
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file xtemac_sgdma.c
+ *
+ * Functions in this file implement scatter-gather DMA frame transfer mode.
+ * See xtemac.h for a detailed description of the driver.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a rmm 06/01/05 First release
+ * 2.00a rmm 11/21/05 Switched to local link DMA driver
+ * rmm 06/22/06 Fixed C++ compiler warnings
+ *
+ * </pre>
+ *
+ ******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xtemac.h"
+#include "xtemac_i.h"
+#include "xdmav3_l.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/*****************************************************************************/
+/**
+ * Allocate a set of BDs from the given SGDMA channel. It is expected the user
+ * will attach buffers and set other DMA transaction parameters to the returned
+ * BDs in preparation to calling XTemac_SgCommit(). The set of BDs returned is
+ * a list starting with the BdPtr and extending for NumBd BDs. The list can be
+ * navigated with macros XTemac_mSgRecvBdNext() for the XTE_RECV channel, and
+ * XTemac_mSgSendBdNext() for the XTE_SEND channel.
+ *
+ * The BDs returned by this function are a segment of the BD ring maintained
+ * by the SGDMA driver. Do not modify BDs past the end of the returned list.
+ * Doing so will cause data corruption and may lead to system instability.
+ *
+ * This function and XTemac_SgCommit() must be called in the correct order. See
+ * xtemac.h for more information on the SGDMA use model.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Direction is the channel to address (XTE_SEND or XTE_RECV).
+ * @param NumBd is the number of BDs to allocate.
+ * @param BdPtr is an output parameter, it points to the first BD in the
+ * returned list.
+ *
+ * @return
+ * - XST_SUCCESS if the requested number of BDs was returned.
+ * - XST_INVALID_PARAM if Direction did not specify a valid channel.
+ * - XST_FAILURE if there were not enough free BDs to satisfy the request.
+ *
+ * @note
+ * This function is not thread-safe. The user must provide mutually exclusive
+ * access to this function if there are to be multiple threads that can call it.
+ *
+ ******************************************************************************/
+int XTemac_SgAlloc(XTemac *InstancePtr, u32 Direction,
+ unsigned NumBd, XDmaBdV3 ** BdPtr)
+{
+ int Status;
+ XDmaV3 *DmaPtr;
+ u32 DgieReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BdPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Which channel to address */
+ if (Direction == XTE_RECV) {
+ DmaPtr = &InstancePtr->RecvDma;
+ }
+ else if (Direction == XTE_SEND) {
+ DmaPtr = &InstancePtr->SendDma;
+ }
+ else {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* XDmaV3_SgBdAlloc() will return either XST_SUCCESS or XST_FAILURE
+ * This is a critical section, prevent interrupts from the device while
+ * the BD ring is being modified.
+ */
+ DgieReg = XTemac_mGetIpifReg(XTE_DGIE_OFFSET);
+ XTemac_mSetIpifReg(XTE_DGIE_OFFSET, 0);
+ Status = XDmaV3_SgBdAlloc(DmaPtr, NumBd, BdPtr);
+ XTemac_mSetIpifReg(XTE_DGIE_OFFSET, DgieReg);
+ return (Status);
+}
+
+/*****************************************************************************/
+/**
+ * Fully or partially undo a XTemac_SgAlloc() operation. Use this function to
+ * free BDs prior to being given to HW with XTemac_SgCommit().
+ *
+ * An UnAlloc operation may be required if for some reason there is an error
+ * (OS out of resources for example) prior to committing them. The last BD
+ * in the list provided by XTemac_SgAlloc() must be the last BD in the list
+ * provided to XTemac_SgUnAlloc().
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Direction is the channel to address (XTE_SEND or XTE_RECV).
+ * @param NumBd is the number of BDs to allocate.
+ * @param BdPtr is an output parameter, it points to the first BD in the
+ * returned list.
+ *
+ * @return
+ * - XST_SUCCESS if the requested number of BDs was returned.
+ * - XST_INVALID_PARAM if Direction did not specify a valid channel.
+ * - XST_FAILURE if there were not enough free BDs to satisfy the request.
+ *
+ * @note
+ * This function is not thread-safe. The user must provide mutually exclusive
+ * access to this function if there are to be multiple threads that can call it.
+ *
+ ******************************************************************************/
+int XTemac_SgUnAlloc(XTemac *InstancePtr, u32 Direction,
+ unsigned NumBd, XDmaBdV3 * BdPtr)
+{
+ int Status;
+ XDmaV3 *DmaPtr;
+ u32 DgieReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BdPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Which channel to address */
+ if (Direction == XTE_RECV) {
+ DmaPtr = &InstancePtr->RecvDma;
+ }
+ else if (Direction == XTE_SEND) {
+ DmaPtr = &InstancePtr->SendDma;
+ }
+ else {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* XDmaV3_SgBdAlloc() will return either XST_SUCCESS or XST_FAILURE
+ * This is a critical section, prevent interrupts from the device while
+ * the BD ring is being modified.
+ */
+ DgieReg = XTemac_mGetIpifReg(XTE_DGIE_OFFSET);
+ XTemac_mSetIpifReg(XTE_DGIE_OFFSET, 0);
+ Status = XDmaV3_SgBdUnAlloc(DmaPtr, NumBd, BdPtr);
+ XTemac_mSetIpifReg(XTE_DGIE_OFFSET, DgieReg);
+ return (Status);
+}
+
+/*****************************************************************************/
+/**
+ * Commit a set of BDs to the SGDMA engine that had been allocated by
+ * XTemac_SgAlloc() and prepared by the user to describe SGDMA transaction(s).
+ *
+ * This function and XTemac_SgAlloc() must be called in the correct order. See
+ * xtemac.h for more information on the SGDMA use model.
+ *
+ * Upon return, the committed BDs go under hardware control. Do not modify BDs
+ * after they have been committed. Doing so may cause data corruption and system
+ * instability.
+ *
+ * This function may be called if the TEMAC device is started or stopped. If
+ * started (see XTemac_Start()), then the BDs may be processed by HW at any
+ * time.
+ *
+ * This function is non-blocking. Notification of error or successful
+ * transmission/reception is done asynchronously through callback functions.
+ *
+ * For transmit (XTE_SEND):
+ *
+ * It is assumed that the upper layer software supplies a correctly formatted
+ * Ethernet frame, including the destination and source addresses, the
+ * type/length field, and the data field.
+ *
+ * For receive (XTE_RECV):
+ *
+ * It is assumed that BDs have an appropriately sized frame buffer attached
+ * that corresponds to the network MTU.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Direction is the channel to address (XTE_SEND or XTE_RECV).
+ * @param NumBd is the number of BDs to commit. This is typically the same
+ * value used when the BDs were allocated with XTemac_SgAlloc().
+ * @param BdPtr is the first BD in the set to commit and is typically the
+ * same value returned by XTemac_SgAlloc().
+ *
+ * @return
+ * - XST_SUCCESS if the requested number of BDs was returned.
+ * - XST_INVALID_PARAM if Direction did not specify a valid channel.
+ * - XST_FAILURE if the last BD in the set does not have its "last" bit
+ * set (see XDmaBdV3_mSetLast()).
+ * - XST_DMA_SG_LIST_ERROR if BdPtr parameter does not reflect the correct
+ * insertion point within the internally maintained BD ring. This error occurs
+ * when this function and XTemac_SgAlloc() are called out of order.
+ *
+ * @note
+ * This function is not thread-safe. The user must provide mutually exclusive
+ * access to this function if there are to be multiple threads that can call it.
+ *
+ ******************************************************************************/
+int XTemac_SgCommit(XTemac *InstancePtr, u32 Direction,
+ unsigned NumBd, XDmaBdV3 * BdPtr)
+{
+ int Status;
+ XDmaV3 *DmaPtr;
+ u32 DgieReg;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Which channel to address */
+ if (Direction == XTE_RECV) {
+ DmaPtr = &InstancePtr->RecvDma;
+ }
+ else if (Direction == XTE_SEND) {
+ DmaPtr = &InstancePtr->SendDma;
+ }
+ else {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* XDmaV3_SgToHw() will return either XST_SUCCESS, XST_FAILURE, or
+ * XST_DMA_SG_LIST_ERROR
+ *
+ * This is a critical section, prevent interrupts from the device while
+ * the BD ring is being modified.
+ */
+ DgieReg = XTemac_mGetIpifReg(XTE_DGIE_OFFSET);
+ XTemac_mSetIpifReg(XTE_DGIE_OFFSET, 0);
+ Status = XDmaV3_SgBdToHw(DmaPtr, NumBd, BdPtr);
+ XTemac_mSetIpifReg(XTE_DGIE_OFFSET, DgieReg);
+ return (Status);
+}
+
+
+/*****************************************************************************/
+/**
+ * Retrieve BDs that have been processed by the SGDMA channel. This function is
+ * called typically after the XTE_HANDLER_SGRECV handler has been invoked for
+ * the receive channel or XTE_HANDLER_SGSEND for the transmit channel.
+ *
+ * The set of BDs returned is a list starting with the BdPtr and extending
+ * for 1 or more BDs (the exact number is the return value of this function).
+ * The list can be navigated with macros XTemac_mSgRecvBdNext() for the
+ * XTE_RECV channel, and XTemac_mSgSendBdNext() for the XTE_SEND channel.
+ * Treat the returned BDs as read-only.
+ *
+ * This function and XTemac_SgFree() must be called in the correct order. See
+ * xtemac.h for more information on the SGDMA use model.
+ *
+ * The last BD in the returned list is guaranteed to have the "Last" bit set
+ * (i.e. XDmaBdV3_IsLast evaluates to true).
+ *
+ * The returned BDs can be examined for the outcome of the SGDMA transaction.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Direction is the channel to address (XTE_SEND or XTE_RECV).
+ * @param BdPtr is an output parameter that points to the 1st BD in the returned
+ * list. If no BDs were ready, then this parameter is unchanged.
+ * @param NumBd is an upper limit to the number of BDs to retrieve.
+ *
+ * @return
+ * Number of BDs that are ready for post processing. If the direction parameter
+ * is invalid, then 0 is returned.
+ *
+ * @note
+ * This function is not thread-safe. The user must provide mutually exclusive
+ * access to this function if there are to be multiple threads that can call it.
+ *
+ ******************************************************************************/
+unsigned XTemac_SgGetProcessed(XTemac *InstancePtr, u32 Direction,
+ unsigned NumBd, XDmaBdV3 ** BdPtr)
+{
+ u32 DgieReg;
+ XDmaV3 *DmaPtr;
+ unsigned Rc;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BdPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Which channel to address */
+ if (Direction == XTE_RECV) {
+ DmaPtr = &InstancePtr->RecvDma;
+ }
+ else if (Direction == XTE_SEND) {
+ DmaPtr = &InstancePtr->SendDma;
+ }
+ else {
+ return (0);
+ }
+
+ /* This is a critical section. Prevent interrupts from the device while
+ * the BD ring is being modified.
+ */
+ DgieReg = XTemac_mGetIpifReg(XTE_DGIE_OFFSET);
+ XTemac_mSetIpifReg(XTE_DGIE_OFFSET, 0);
+
+ /* Extract ready BDs */
+ Rc = XDmaV3_SgBdFromHw(DmaPtr, NumBd, BdPtr);
+
+ /* End critical section */
+ XTemac_mSetIpifReg(XTE_DGIE_OFFSET, DgieReg);
+
+ return (Rc);
+}
+
+
+/*****************************************************************************/
+/**
+ * Free a set of BDs that had been retrieved by XTemac_SgGetProcessed(). If BDs
+ * are not freed, then eventually the channel will run out of BDs to
+ * XTemac_SgAlloc().
+ *
+ * This function and XTemac_SgGetProcessed() must be called in the correct
+ * order. See xtemac.h for more information on the SGDMA use model.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Direction is the channel to address (XTE_SEND or XTE_RECV).
+ * @param BdPtr is the first BD in the set to free. This is typically the same
+ * value returned by XTemac_SgGetProcessed().
+ * @param NumBd is the number of BDs to free. This is typically the same value
+ * returned by XTemac_SgGetProcessed().
+ *
+ * @return
+ * - XST_SUCCESS if the requested number of BDs was returned.
+ * - XST_INVALID_PARAM if Direction did not specify a valid channel.
+ * - XST_DMA_SG_LIST_ERROR if BdPtr parameter does not reflect the correct
+ * insertion point within the internally maintained BD ring. This error occurs
+ * when this function and XTemac_SgGetProcessed() are called out of order.
+ *
+ * @note
+ * This function is not thread-safe. The user must provide mutually exclusive
+ * access to this function if there are to be multiple threads that can call it.
+ *
+ ******************************************************************************/
+int XTemac_SgFree(XTemac *InstancePtr, u32 Direction,
+ unsigned NumBd, XDmaBdV3 * BdPtr)
+{
+ u32 DgieReg;
+ XDmaV3 *DmaPtr;
+ int Status;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Which channel to address */
+ if (Direction == XTE_RECV) {
+ DmaPtr = &InstancePtr->RecvDma;
+ }
+ else if (Direction == XTE_SEND) {
+ DmaPtr = &InstancePtr->SendDma;
+ }
+ else {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* This is a critical section. Prevent interrupts from the device while
+ * the BD ring is being modified.
+ */
+ DgieReg = XTemac_mGetIpifReg(XTE_DGIE_OFFSET);
+ XTemac_mSetIpifReg(XTE_DGIE_OFFSET, 0);
+ Status = XDmaV3_SgBdFree(DmaPtr, NumBd, BdPtr);
+ XTemac_mSetIpifReg(XTE_DGIE_OFFSET, DgieReg);
+
+ return (Status);
+}
+
+
+/*****************************************************************************/
+/**
+ * Give the driver memory space to be used for the scatter-gather DMA
+ * descriptor list. This function should only be called once for each channel
+ * during initialization. If a list had already been created, then it is
+ * destroyed and replaced with a new one.
+ *
+ * To increase performance, a BdTemplate parameter is provided to allow the
+ * user to permanently set BD fields in all BDs for this SGDMA channel. For
+ * example, if every BD describes a buffer that will contain a full packet (as
+ * it typically does with receive channels), then XDmaBdV3_mSetLast(BdTemplate)
+ * can be performed prior to calling this function and when it returns every BD
+ * will have the "last" bit set in it's DMACR word. The user will never have to
+ * explicitly set the "last" bit again.
+ *
+ * The following operations can be replicated for the BdTemplate:
+ * - XDmaBdV3_mSetId()
+ * - XDmaBdV3_mSetLast()
+ * - XDmaBdV3_mClearLast()
+ * - XDmaBdV3_mSetBufIncrement()
+ * - XDmaBdV3_mSetBufNoIncrement()
+ * - XDmaBdV3_mSetDevSel()
+ * - XDmaBdV3_mSetBdPage()
+ * - XDmaBdV3_mSetTransferType()
+ * - XDmaBdV3_mSetBufAddrHigh()
+ * - XDmaBdV3_mSetBufAddrLow()
+ * - XTemac_mSgSendBdCsumEnable() -- transmit channel only
+ * - XTemac_mSgSendBdCsumDisable() -- transmit channel only
+ * - XTemac_mSgSendBdCsumSetup() -- transmit channel only
+ * - XTemac_mSgSendBdCsumSeed() -- transmit channel only
+ *
+ * The base address of the memory space must be aligned according to buffer
+ * descriptor requirements (see xtemac.h).
+ *
+ * The size of the memory space is assumed to be big enough to contain BdCount
+ * buffers at the given alignment. If the region is too small, then adjacent
+ * data may be overwritten causing system instability. There are tools in the
+ * DMA driver that help calculate the sizing requirments. See macros
+ * XDmaV3_mSgListCntCalc() and XDmaV3_mSgListMemCalc().
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Direction is the channel to address.
+ * @param PhysicalAddr is the physical base address of user memory region.
+ * @param VirtualAddr is the virtual base address of the user memory region. If
+ * address translation is not being utilized, then VirtAddr should be
+ * equivalent to PhysAddr.
+ * @param Alignment governs the byte alignment of individual BDs. This function
+ * will enforce a minimum alignment of 8 bytes with no maximum as long as
+ * it is specified as a power of 2.
+ * @param BdCount is the number of BDs to allocate in the memory region. It is
+ * assumed the region is large enough to contain all the BDs.
+ * @param BdTemplate is copied to each BD after the list is created. If the user
+ * does not have a need to replicate any BD fields then this parameter
+ * should be zeroed (XDmaBdV3_mClear()). This parameter will be modified
+ * by this function.
+ *
+ * @return
+ * - XST_SUCCESS if the space was initialized successfully
+ * - XST_DEVICE_IS_STARTED if the device has not been stopped.
+ * - XST_NOT_SGDMA if the MAC is not configured for scatter-gather DMA per
+ * the configuration information contained in XTemac_Config.
+ * - XST_INVALID_PARAM if: 1) Direction is not either XTE_SEND or XTE_RECV;
+ * 2) PhysicalAddr and/or VirtualAddr are not aligned to the given
+ * alignment parameter; 3) Alignment parameter does not meet minimum
+ * requirements of this device; 3) BdCount is 0.
+ * - XST_DMA_SG_LIST_ERROR if the memory segment containing the list spans
+ * over address 0x00000000 in virtual address space.
+ * - XST_NO_FEATURE if the DMA sub-driver discovers that the HW is not SGDMA
+ * capable.
+ * - XST_FAILURE for other failures that shouldn't occur. If this is returned,
+ * then the driver is experiencing a problem that should be reported to
+ * Xilinx.
+ *
+ * @note
+ * If the device is configured for scatter-gather DMA, this function must be
+ * called AFTER the XTemac_Initialize() function because the DMA channel
+ * components must be initialized before the memory space is set.
+ *
+ ******************************************************************************/
+int XTemac_SgSetSpace(XTemac *InstancePtr, u32 Direction,
+ u32 PhysicalAddr, u32 VirtualAddr,
+ u32 Alignment, unsigned BdCount, XDmaBdV3 * BdTemplate)
+{
+ int Status;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BdTemplate != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* Make sure device is ready for this operation */
+ if (InstancePtr->IsStarted == XCOMPONENT_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Must have sgdma */
+ if (!XTemac_mIsSgDma(InstancePtr)) {
+ return (XST_NOT_SGDMA);
+ }
+
+ /* Check alignment */
+ if (Alignment < XTE_PLB_BD_ALIGNMENT) {
+ return (XST_INVALID_PARAM);
+ }
+
+ if (Direction == XTE_SEND) {
+ /* Create the list. This function will return one of XST_SUCCESS,
+ * XST_INVALID_PARAM (for alignment violations), or
+ * XST_DMA_SG_LIST_ERROR (if memory segment spans address 0)
+ */
+ Status = XDmaV3_SgListCreate(&InstancePtr->SendDma,
+ PhysicalAddr, VirtualAddr,
+ Alignment, BdCount);
+ if (Status != XST_SUCCESS) {
+ return (Status);
+ }
+
+ /* Clone the template BD. This should always work. If it does not
+ * then something is seriously wrong
+ */
+ Status = XDmaV3_SgListClone(&InstancePtr->SendDma, BdTemplate);
+ if (Status != XST_SUCCESS) {
+ return (XST_FAILURE);
+ }
+ else {
+ return (XST_SUCCESS);
+ }
+ }
+ else if (Direction == XTE_RECV) {
+ /* Create the list. This function will return one of XST_SUCCESS,
+ * XST_INVALID_PARAM (for alignment violations), or
+ * XST_DMA_SG_LIST_ERROR (if memory segment spans address 0)
+ */
+ Status = XDmaV3_SgListCreate(&InstancePtr->RecvDma,
+ PhysicalAddr, VirtualAddr,
+ Alignment, BdCount);
+ if (Status != XST_SUCCESS) {
+ return (Status);
+ }
+
+ /* Clone the template BD */
+ Status = XDmaV3_SgListClone(&InstancePtr->RecvDma, BdTemplate);
+ if (Status != XST_SUCCESS) {
+ return (XST_FAILURE);
+ }
+ else {
+ return (XST_SUCCESS);
+ }
+ }
+
+ /* Direction is incorrect */
+ return (XST_INVALID_PARAM);
+}
+
+
+/*****************************************************************************/
+/**
+ * Verify the consistency of the SGDMA BD ring. While the check occurs, the
+ * device is stopped. If any problems are found the device is left stopped.
+ *
+ * Use this function to troubleshoot SGDMA problems.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Direction is the channel to check (XTE_SEND or XTE_RECV)
+ *
+ * @return
+ * - XST_SUCCESS if no problems are found.
+ * - XST_INVALID_PARAM if Direction is not XTE_SEND or XTE_RECV.
+ * - XST_DMA_SG_NO_LIST if the SG list has not yet been setup.
+ * - XST_DMA_BD_ERROR if a BD has been corrupted.
+ * - XST_DMA_SG_LIST_ERROR if the internal data structures of the BD ring are
+ * inconsistent.
+ *
+ * @note
+ * This function is not thread-safe. The user must provide mutually exclusive
+ * access to this function if there are to be multiple threads that can call it.
+ *
+ ******************************************************************************/
+int XTemac_SgCheck(XTemac *InstancePtr, u32 Direction)
+{
+ XDmaV3 *DmaPtr;
+ XDmaBdV3 *BdPtr;
+ unsigned i;
+ int Restart = 0;
+ int Rc;
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /* Select channel to check */
+ if (Direction == XTE_SEND) {
+ DmaPtr = &InstancePtr->SendDma;
+ }
+ else if (Direction == XTE_RECV) {
+ DmaPtr = &InstancePtr->RecvDma;
+ }
+ else {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* Stop the device if it is running */
+ if (InstancePtr->IsStarted == XST_DEVICE_IS_STARTED) {
+ XTemac_Stop(InstancePtr);
+ Restart = 1;
+ }
+
+ /* Perform check of ring structure using DMA driver routine */
+ Rc = XDmaV3_SgCheck(DmaPtr);
+
+ /* Check BDs for consistency as used by TEMAC */
+ if (Rc == XST_SUCCESS) {
+ /* Verify DMACR is setup for Tx direction */
+ if (Direction == XTE_SEND) {
+ BdPtr = (XDmaBdV3 *) DmaPtr->BdRing.BaseAddr;
+ for (i = 0; i < DmaPtr->BdRing.AllCnt; i++) {
+ if (XDmaV3_mReadBd(BdPtr, XDMAV3_DMACR_OFFSET) &
+ XDMAV3_DMACR_DIR_RX_MASK) {
+ return (XST_DMA_BD_ERROR);
+ }
+ }
+ }
+ else { /* XTE_RECV */
+
+ BdPtr = (XDmaBdV3 *) DmaPtr->BdRing.BaseAddr;
+ for (i = 0; i < DmaPtr->BdRing.AllCnt; i++) {
+ if (!
+ (XDmaV3_mReadBd(BdPtr, XDMAV3_DMACR_OFFSET)
+ & XDMAV3_DMACR_DIR_RX_MASK)) {
+ return (XST_DMA_BD_ERROR);
+ }
+ }
+ }
+ }
+
+ /* Restart the device if it was stopped by this function */
+ if ((Rc == XST_SUCCESS) && Restart) {
+ XTemac_Start(InstancePtr);
+ }
+
+ return (Rc);
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xtemac_stats.c
+*
+* Functions in this file implement statistics related functionality.
+* See xtemac.h for a detailed description of the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rmm 06/01/05 First release
+* 2.00a rmm 11/21/05 Changed copyright
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include <linux/string.h>
+
+#include "xtemac.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+* Get a current copy of the software maintained statistics. See
+* XTemac_SoftStats structure for information on what counters are maintained.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param StatsPtr is an output parameter, and is a pointer to a stats buffer
+* into which the current statistics will be copied.
+*
+******************************************************************************/
+void XTemac_GetSoftStats(XTemac *InstancePtr, XTemac_SoftStats *StatsPtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(StatsPtr != NULL);
+
+ memcpy(StatsPtr, &InstancePtr->Stats, sizeof(XTemac_SoftStats));
+}
+
+
+/*****************************************************************************/
+/**
+* Zero out the software maintained statistics counters.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+*
+******************************************************************************/
+void XTemac_ClearSoftStats(XTemac *InstancePtr)
+{
+ XASSERT_VOID(InstancePtr != NULL);
+
+ memset(&InstancePtr->Stats, 0, sizeof(XTemac_SoftStats));
+}
config OF_DEVICE
def_bool y
- depends on OF && (SPARC || PPC_OF)
+ depends on OF && (SPARC || PPC_OF || MICROBLAZE)
config OF_GPIO
def_bool y
config SERIAL_UARTLITE
tristate "Xilinx uartlite serial port support"
- depends on PPC32
+ depends on XILINX_DRIVERS
select SERIAL_CORE
help
Say Y here if you want to use the Xilinx uartlite serial controller.
* kind, whether express or implied.
*/
+#undef DEBUG
+
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/console.h>
.verify_port = ulite_verify_port
};
+/**
+ * ulite_get_port: Get the uart_port for a given port number and base addr
+ */
+static struct uart_port * ulite_get_port(int id)
+{
+ struct uart_port *port;
+
+ /* if id = -1; then scan for a free id and use that */
+ if (id < 0) {
+ for (id = 0; id < ULITE_NR_UARTS; id++)
+ if (ulite_ports[id].mapbase == 0)
+ break;
+ }
+
+ if ((id < 0) || (id >= ULITE_NR_UARTS)) {
+ printk(KERN_WARNING "uartlite: invalid id: %i\n", id);
+ return NULL;
+ }
+
+ /* The ID is valid, so get the address of the uart_port structure */
+ port = &ulite_ports[id];
+
+ /* Is the structure is already initialized? */
+ if (port->mapbase)
+ return port;
+
+ /* At this point, we've got an empty uart_port struct, initialize it */
+ spin_lock_init(&port->lock);
+ port->membase = NULL;
+ port->fifosize = 16;
+ port->regshift = 2;
+ port->iotype = UPIO_MEM;
+ port->iobase = 1; /* mark port in use */
+ port->ops = &ulite_ops;
+ port->irq = NO_IRQ;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->dev = NULL;
+ port->type = PORT_UNKNOWN;
+ port->line = id;
+
+ return port;
+}
+
/* ---------------------------------------------------------------------
* Console driver operations
*/
spin_unlock_irqrestore(&port->lock, flags);
}
+#if defined(CONFIG_OF)
+static inline u32 __init ulite_console_of_find_device(int id)
+{
+ struct device_node *np;
+ struct resource res;
+ const unsigned int *of_id;
+ int rc;
+ const struct of_device_id *matches = ulite_of_match;
+
+ while (matches->compatible[0]) {
+ for_each_compatible_node(np, NULL, matches->compatible) {
+ if (!of_match_node(matches, np))
+ continue;
+
+ of_id = of_get_property(np, "port-number", NULL);
+ if ((!of_id) || (*of_id != id))
+ continue;
+
+ rc = of_address_to_resource(np, 0, &res);
+ if (rc)
+ continue;
+
+ of_node_put(np);
+ return res.start+3;
+ }
+ matches++;
+ }
+
+ return 0;
+}
+#else /* CONFIG_OF */
+static inline u32 __init ulite_console_of_find_device(int id) { return 0; }
+#endif /* CONFIG_OF */
+
static int __init ulite_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int bits = 8;
int parity = 'n';
int flow = 'n';
+ u32 base;
- if (co->index < 0 || co->index >= ULITE_NR_UARTS)
- return -EINVAL;
+ /* Find a matching uart port in the device tree */
+ base = ulite_console_of_find_device(co->index);
- port = &ulite_ports[co->index];
-
- /* Has the device been initialized yet? */
- if (!port->mapbase) {
- pr_debug("console on ttyUL%i not present\n", co->index);
+ /* Get the port structure */
+ port = ulite_get_port(co->index);
+ if (!port)
return -ENODEV;
+
+ /* was it initialized for this device? */
+ if (base) {
+ if ((port->mapbase) && (port->mapbase != base)) {
+ pr_debug(KERN_DEBUG "ulite: addr mismatch; %x != %x\n",
+ port->mapbase, base);
+ return -ENODEV; /* port used by another device; bail */
+ }
+ port->mapbase = base;
}
- /* not initialized yet? */
+ if (!port->mapbase)
+ return -ENODEV;
+
+ /* registers mapped yet? */
if (!port->membase) {
- if (ulite_request_port(port))
+ port->membase = ioremap(port->mapbase, ULITE_REGION);
+ if (!port->membase)
return -ENODEV;
}
struct uart_port *port;
int rc;
- /* if id = -1; then scan for a free id and use that */
- if (id < 0) {
- for (id = 0; id < ULITE_NR_UARTS; id++)
- if (ulite_ports[id].mapbase == 0)
- break;
- }
- if (id < 0 || id >= ULITE_NR_UARTS) {
- dev_err(dev, "%s%i too large\n", ULITE_NAME, id);
- return -EINVAL;
+ port = ulite_get_port(id);
+ if (!port) {
+ dev_err(dev, "Cannot get uart_port structure\n");
+ return -ENODEV;
}
- if ((ulite_ports[id].mapbase) && (ulite_ports[id].mapbase != base)) {
- dev_err(dev, "cannot assign to %s%i; it is already in use\n",
- ULITE_NAME, id);
- return -EBUSY;
+ /* was it initialized for this device? */
+ if ((port->mapbase) && (port->mapbase != base)) {
+ pr_debug(KERN_DEBUG "ulite: addr mismatch; %x != %x\n",
+ port->mapbase, base);
+ return -ENODEV;
}
- port = &ulite_ports[id];
-
- spin_lock_init(&port->lock);
- port->fifosize = 16;
- port->regshift = 2;
- port->iotype = UPIO_MEM;
- port->iobase = 1; /* mark port in use */
port->mapbase = base;
- port->membase = NULL;
- port->ops = &ulite_ops;
port->irq = irq;
- port->flags = UPF_BOOT_AUTOCONF;
port->dev = dev;
- port->type = PORT_UNKNOWN;
- port->line = id;
-
dev_set_drvdata(dev, port);
/* Register the port */
config FB_XILINX
tristate "Xilinx frame buffer support"
- depends on FB && XILINX_VIRTEX
+ depends on FB && XILINX_DRIVERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
If unsure, say N.
+config XILINX_CONSOLE
+ depends on SP3E && HAVE_XILINX_OPB_COLOR_VIDEO_CTRL
+ tristate "XILINX text console"
+ help
+ Say Y here if you want to enable the XILINX text console.
+
config SGI_NEWPORT_CONSOLE
tristate "SGI Newport Console support"
depends on SGI_IP22
obj-$(CONFIG_PROM_CONSOLE) += promcon.o promcon_tbl.o
obj-$(CONFIG_STI_CONSOLE) += sticon.o sticore.o font.o
obj-$(CONFIG_VGA_CONSOLE) += vgacon.o
+obj-$(CONFIG_XILINX_CONSOLE) += xilcon.o
obj-$(CONFIG_MDA_CONSOLE) += mdacon.o
obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += fbcon.o bitblit.o font.o softcursor.o
ifeq ($(CONFIG_FB_TILEBLITTING),y)
--- /dev/null
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/kd.h>
+#include <linux/slab.h>
+#include <linux/vt_kern.h>
+#include <linux/vt_buffer.h>
+#include <linux/selection.h>
+#include <linux/spinlock.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/xparameters.h>
+
+/*
+Character color mapping used by this driver
+-------------------------------------------
+'a' red
+'b' green
+'c' yellow
+'d' blue
+'e' magenta
+'f' cyan
+'g' white
+'h' black
+*/
+
+#define XIo_Out32(OutputPtr, Value) \
+ (*(volatile unsigned int *)((OutputPtr)) = (Value))
+
+
+#define SCR_BUF_BASEADDR (XPAR_OPB_COLOR_VIDEO_CTRL_0_BASEADDR)
+#define SCR_CTRL_REG_BASEADDR (XPAR_OPB_COLOR_VIDEO_CTRL_0_BASEADDR + 0xA000)
+#define SCR_CHAR_MAP_BASEADDR (XPAR_OPB_COLOR_VIDEO_CTRL_0_BASEADDR + 0xC000)
+#define xy2scroffset(x, y) (((y * SCR_X) + x) << 2)
+#define out32 XIo_Out32
+#define pack_scr_char(c, clr) ((((unsigned int)clr) << 8) | (c & 0xff))
+#define XIL_ADDR(x,y) ((void *)(SCR_BUF_BASEADDR + xy2scroffset(x,y)))
+
+static unsigned int null_char[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static unsigned int solid_square_char[8] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+static unsigned int horiz_line[8] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static unsigned int horiz_barred_line[8] = { 0xff, 0xff, 0x03, 0x03, 0x03, 0x00, 0x00, 0x00 };
+static unsigned int vert_line[8] = { 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18 };
+
+static void xilscr_write_char (int x, int y, char c, char color);
+static void xilscr_redefine_char (unsigned char c, unsigned int *defptr);
+
+
+
+/* console information */
+
+static int xil_first_vc = 1;
+static int xil_last_vc = 16;
+
+static struct vc_data *xil_display_fg = NULL;
+
+module_param(xil_first_vc, int, 0);
+module_param(xil_last_vc, int, 0);
+
+/* XILINX register values */
+
+#define CLR_R 1
+#define CLR_G 2
+#define CLR_B 4
+
+#define SCR_X_PIXELS 800
+#define SCR_Y_PIXELS 600
+#define SCR_X 100
+#define SCR_Y 75
+
+#define BLANK_CHAR 0
+#define HORIZ_LINE_CHAR 128
+#define VERT_LINE_CHAR 129
+#define HORIZ_BARRED_LINE_CHAR 130
+#define SOLID_SQUARE_CHAR 131
+
+
+
+#ifndef MODULE
+
+static int __init xilcon_setup(char *str)
+{
+ int ints[3];
+
+ str = get_options(str, ARRAY_SIZE(ints), ints);
+
+ if (ints[0] < 2)
+ return 0;
+
+ if (ints[1] < 1 || ints[1] > MAX_NR_CONSOLES ||
+ ints[2] < 1 || ints[2] > MAX_NR_CONSOLES)
+ return 0;
+
+ xil_first_vc = ints[1];
+ xil_last_vc = ints[2];
+ return 1;
+}
+
+__setup("xilcon=", xilcon_setup);
+
+#endif
+
+
+static void xilscr_redefine_char (unsigned char c, unsigned int *defptr)
+{
+ unsigned int *charp = (unsigned int*)(SCR_CHAR_MAP_BASEADDR + (unsigned int)(((unsigned int)c)<<5));
+
+ *charp++ = *defptr++;
+ *charp++ = *defptr++;
+ *charp++ = *defptr++;
+ *charp++ = *defptr++;
+ *charp++ = *defptr++;
+ *charp++ = *defptr++;
+ *charp++ = *defptr++;
+ *charp++ = *defptr++;
+}
+
+
+
+static const char __init *xilcon_startup(void)
+{
+ char video_mode;
+ video_mode = 0;
+ // Enable the character mode in the control register of the videocontroller
+ out32 (SCR_CTRL_REG_BASEADDR, (video_mode<<8) | 0x02);
+ xilscr_redefine_char (BLANK_CHAR, null_char);
+ xilscr_redefine_char (HORIZ_LINE_CHAR, horiz_line);
+ xilscr_redefine_char (VERT_LINE_CHAR, vert_line);
+ xilscr_redefine_char (HORIZ_BARRED_LINE_CHAR, horiz_barred_line);
+ xilscr_redefine_char (SOLID_SQUARE_CHAR, solid_square_char);
+
+ return "XILINX_OPB_CHAR";
+}
+
+static void xilcon_init(struct vc_data *c, int init)
+{
+ c->vc_can_do_color = 1;
+ c->vc_complement_mask = 0x0800; /* reverse video */
+ c->vc_display_fg = &xil_display_fg;
+
+ if (init) {
+ c->vc_cols = 100;
+ c->vc_rows = 75;
+ } else
+ vc_resize(c, 100, 75);
+
+ /* make the first XIL console visible */
+
+ if (xil_display_fg == NULL)
+ xil_display_fg = c;
+}
+
+static void xilcon_deinit(struct vc_data *c)
+{
+ if (xil_display_fg == c)
+ xil_display_fg = NULL;
+}
+
+
+static u8 xilcon_build_attr(struct vc_data *c, u8 color, u8 intensity,
+ u8 blink, u8 underline, u8 reverse)
+{
+
+ return color;
+}
+
+
+static void xilscr_write_char (int x, int y, char c, char color)
+{
+ XIo_Out32((SCR_BUF_BASEADDR + xy2scroffset (x,y)), pack_scr_char (c, color));
+}
+
+
+static void xilcon_putc(struct vc_data *c, int ch, int y, int x)
+{
+ xilscr_write_char (x, y, (ch)&0xFF, (ch>>8)&0xFF);
+}
+
+static void xilcon_putcs(struct vc_data *c, const unsigned short *s,
+ int count, int y, int x)
+{
+ for (; count > 0; count--) {
+ xilcon_putc(c, *(s++), y, x++);
+ }
+}
+
+
+static void xilcon_clear(struct vc_data *c, int y, int x,
+ int height, int width)
+{
+ unsigned int *scr_buf_start = (unsigned int*)(SCR_BUF_BASEADDR);
+ unsigned int *scr_buf_end = (unsigned int*)(SCR_BUF_BASEADDR + xy2scroffset (SCR_X, SCR_Y));
+ unsigned int *bufp;
+
+ bufp = scr_buf_start;
+ while (bufp <= scr_buf_end)
+ *bufp++ = 0x0;
+}
+
+
+static int xilcon_switch(struct vc_data *c)
+{
+ return 1; /* redrawing needed */
+}
+
+static int xilcon_set_palette(struct vc_data *c, unsigned char *table)
+{
+ return -EINVAL;
+}
+
+static int xilcon_blank(struct vc_data *c, int blank, int mode_switch)
+{
+ if(blank)
+ out32 (SCR_CTRL_REG_BASEADDR, 0x0);
+ else
+ out32 (SCR_CTRL_REG_BASEADDR, 0x2);
+
+ return 0;
+}
+
+static int xilcon_scrolldelta(struct vc_data *c, int lines)
+{
+ return 0;
+}
+
+static void xilcon_cursor(struct vc_data *c, int mode)
+{
+ unsigned short car1;
+
+ car1 = c->vc_screenbuf[c->vc_x + c->vc_y * c->vc_cols];
+ switch (mode) {
+ case CM_ERASE:
+ xilcon_putc(c, car1, c->vc_y, c->vc_x);
+ break;
+ case CM_MOVE:
+ case CM_DRAW:
+ switch (c->vc_cursor_type & 0x0f) {
+ case CUR_UNDERLINE:
+ case CUR_LOWER_THIRD:
+ case CUR_LOWER_HALF:
+ case CUR_TWO_THIRDS:
+ case CUR_BLOCK:
+ xilcon_putc(c, (7<<8) | 131, c->vc_y, c->vc_x);
+ break;
+ }
+ break;
+ }
+
+}
+
+static int xilcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
+{
+ if (!lines)
+ return 0;
+
+ if (lines > c->vc_rows)
+ lines = c->vc_rows;
+
+ switch (dir) {
+
+ case SM_UP:
+ scr_memmovew((void *)(SCR_BUF_BASEADDR + xy2scroffset(0,t)),
+ (void *)(SCR_BUF_BASEADDR + xy2scroffset(0,t + lines)),
+ (b-t-lines)*75*8);
+ scr_memsetw((void *)(SCR_BUF_BASEADDR + xy2scroffset(0,b-lines)), (('h'<<8) || 0x00),
+ lines*75*8);
+ break;
+ case SM_DOWN:
+ scr_memmovew((void *)(SCR_BUF_BASEADDR + xy2scroffset(0,t + lines)),
+ (void *)(SCR_BUF_BASEADDR + xy2scroffset(0,t)),
+ (b-t-lines)*75*8);
+ scr_memsetw((void *)(SCR_BUF_BASEADDR + xy2scroffset(0,t)), (('h'<<8) || 0x00), lines*75*8);
+ break;
+ }
+
+ return 0;
+}
+
+static void xilcon_bmove(struct vc_data *c, int sy, int sx,
+ int dy, int dx, int height, int width)
+{
+ u16 *src, *dest;
+
+ if (width <= 0 || height <= 0)
+ return;
+
+ if (sx==0 && dx==0 && width==100) {
+ scr_memmovew(XIL_ADDR(0,dy), XIL_ADDR(0,sy), height*width*2);
+
+ } else if (dy < sy || (dy == sy && dx < sx)) {
+ src = XIL_ADDR(sx, sy);
+ dest = XIL_ADDR(dx, dy);
+
+ for (; height > 0; height--) {
+ scr_memmovew(dest, src, width*2);
+ src += 100;
+ dest += 100;
+ }
+ } else {
+ src = XIL_ADDR(sx, sy+height-1);
+ dest = XIL_ADDR(dx, dy+height-1);
+
+ for (; height > 0; height--) {
+ scr_memmovew(dest, src, width*2);
+ src -= 100;
+ dest -= 100;
+ }
+ }
+
+ return;
+}
+
+/*
+ * The console `switch' structure for the XILINX based console
+ */
+
+const struct consw xil_con = {
+ .owner = THIS_MODULE,
+ .con_startup = xilcon_startup,
+ .con_init = xilcon_init,
+ .con_deinit = xilcon_deinit,
+ .con_clear = xilcon_clear,
+ .con_putc = xilcon_putc,
+ .con_putcs = xilcon_putcs,
+ .con_switch = xilcon_switch,
+ .con_blank = xilcon_blank,
+ .con_set_palette = xilcon_set_palette,
+ .con_scrolldelta = xilcon_scrolldelta,
+ .con_build_attr = xilcon_build_attr,
+ .con_cursor = xilcon_cursor,
+ .con_scroll = xilcon_scroll,
+ .con_bmove = xilcon_bmove,
+};
+
+
+int __init xilinx_console_init(void)
+{
+ if (xil_first_vc > xil_last_vc)
+ return 1;
+ return take_over_console(&xil_con, xil_first_vc-1, xil_last_vc-1, 1);
+}
+
+static void __exit xilinx_console_exit(void)
+{
+ give_up_console(&xil_con);
+}
+
+module_init(xilinx_console_init);
+module_exit(xilinx_console_exit);
+
+MODULE_LICENSE("GPL");
+
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
#if defined(CONFIG_OF)
#include <linux/of_device.h>
#include <linux/of_platform.h>
#endif
#include <asm/io.h>
#include <linux/xilinxfb.h>
+#include <asm/dcr.h>
#define DRIVER_NAME "xilinxfb"
#define DRIVER_DESCRIPTION "Xilinx TFT LCD frame buffer driver"
struct fb_info info; /* FB driver info record */
- u32 regs_phys; /* phys. address of the control registers */
- u32 __iomem *regs; /* virt. address of the control registers */
+ dcr_host_t dcr_host;
+ unsigned int dcr_start;
+ unsigned int dcr_len;
void *fb_virt; /* virt. address of the frame buffer */
dma_addr_t fb_phys; /* phys. address of the frame buffer */
* when it's needed.
*/
#define xilinx_fb_out_be32(driverdata, offset, val) \
- out_be32(driverdata->regs + offset, val)
+ dcr_write(driverdata->dcr_host, offset, val)
static int
xilinx_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
* Bus independent setup/teardown
*/
-static int xilinxfb_assign(struct device *dev, unsigned long physaddr,
+static int xilinxfb_assign(struct device *dev, dcr_host_t dcr_host,
+ unsigned int dcr_start, unsigned int dcr_len,
struct xilinxfb_platform_data *pdata)
{
struct xilinxfb_drvdata *drvdata;
}
dev_set_drvdata(dev, drvdata);
- /* Map the control registers in */
- if (!request_mem_region(physaddr, 8, DRIVER_NAME)) {
- dev_err(dev, "Couldn't lock memory region at 0x%08lX\n",
- physaddr);
- rc = -ENODEV;
- goto err_region;
- }
- drvdata->regs_phys = physaddr;
- drvdata->regs = ioremap(physaddr, 8);
- if (!drvdata->regs) {
- dev_err(dev, "Couldn't lock memory region at 0x%08lX\n",
- physaddr);
- rc = -ENODEV;
- goto err_map;
- }
+ drvdata->dcr_start = dcr_start;
+ drvdata->dcr_len = dcr_len;
+ drvdata->dcr_host = dcr_host;
/* Allocate the framebuffer memory */
if (pdata->fb_phys) {
if (!drvdata->fb_virt) {
dev_err(dev, "Could not allocate frame buffer memory\n");
rc = -ENOMEM;
- goto err_fbmem;
+ goto err_region;
}
/* Clear (turn to black) the framebuffer */
}
/* Put a banner in the log (for DEBUG) */
- dev_dbg(dev, "regs: phys=%lx, virt=%p\n", physaddr, drvdata->regs);
dev_dbg(dev, "fb: phys=%p, virt=%p, size=%x\n",
(void*)drvdata->fb_phys, drvdata->fb_virt, fbsize);
/* Turn off the display */
xilinx_fb_out_be32(drvdata, REG_CTRL, 0);
-err_fbmem:
- iounmap(drvdata->regs);
-
-err_map:
- release_mem_region(physaddr, 8);
-
err_region:
kfree(drvdata);
dev_set_drvdata(dev, NULL);
/* Turn off the display */
xilinx_fb_out_be32(drvdata, REG_CTRL, 0);
- iounmap(drvdata->regs);
- release_mem_region(drvdata->regs_phys, 8);
+ dcr_unmap(drvdata->dcr_host, drvdata->dcr_len);
kfree(drvdata);
dev_set_drvdata(dev, NULL);
return 0;
}
-/* ---------------------------------------------------------------------
- * Platform bus binding
- */
-
-static int
-xilinxfb_platform_probe(struct platform_device *pdev)
-{
- struct xilinxfb_platform_data *pdata;
- struct resource *res;
-
- /* Find the registers address */
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!res) {
- dev_err(&pdev->dev, "Couldn't get registers resource\n");
- return -ENODEV;
- }
-
- /* If a pdata structure is provided, then extract the parameters */
- pdata = &xilinx_fb_default_pdata;
- if (pdev->dev.platform_data) {
- pdata = pdev->dev.platform_data;
- if (!pdata->xres)
- pdata->xres = xilinx_fb_default_pdata.xres;
- if (!pdata->yres)
- pdata->yres = xilinx_fb_default_pdata.yres;
- if (!pdata->xvirt)
- pdata->xvirt = xilinx_fb_default_pdata.xvirt;
- if (!pdata->yvirt)
- pdata->yvirt = xilinx_fb_default_pdata.yvirt;
- }
-
- return xilinxfb_assign(&pdev->dev, res->start, pdata);
-}
-
-static int
-xilinxfb_platform_remove(struct platform_device *pdev)
-{
- return xilinxfb_release(&pdev->dev);
-}
-
-
-static struct platform_driver xilinxfb_platform_driver = {
- .probe = xilinxfb_platform_probe,
- .remove = xilinxfb_platform_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- },
-};
-
/* ---------------------------------------------------------------------
* OF bus binding
*/
static int __devinit
xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match)
{
- struct resource res;
const u32 *prop;
struct xilinxfb_platform_data pdata;
int size, rc;
+ int start, len;
+ dcr_host_t dcr_host;
/* Copy with the default pdata (not a ptr reference!) */
pdata = xilinx_fb_default_pdata;
dev_dbg(&op->dev, "xilinxfb_of_probe(%p, %p)\n", op, match);
- rc = of_address_to_resource(op->node, 0, &res);
- if (rc) {
+ start = dcr_resource_start(op->node, 0);
+ len = dcr_resource_len(op->node, 0);
+ dcr_host = dcr_map(op->node, start, len);
+ if (!DCR_MAP_OK(dcr_host)) {
dev_err(&op->dev, "invalid address\n");
- return rc;
+ return -ENODEV;
}
prop = of_get_property(op->node, "phys-size", &size);
if (of_find_property(op->node, "rotate-display", NULL))
pdata.rotate_screen = 1;
- return xilinxfb_assign(&op->dev, res.start, &pdata);
+ return xilinxfb_assign(&op->dev, dcr_host, start, len, &pdata);
}
static int __devexit xilinxfb_of_remove(struct of_device *op)
/* Match table for of_platform binding */
static struct of_device_id xilinxfb_of_match[] __devinitdata = {
{ .compatible = "xlnx,plb-tft-cntlr-ref-1.00.a", },
+ { .compatible = "xlnx,plb-dvi-cntlr-ref-1.00.c", },
{},
};
MODULE_DEVICE_TABLE(of, xilinxfb_of_match);
static int __init
xilinxfb_init(void)
{
- int rc;
- rc = xilinxfb_of_register();
- if (rc)
- return rc;
-
- rc = platform_driver_register(&xilinxfb_platform_driver);
- if (rc)
- xilinxfb_of_unregister();
-
- return rc;
+ return xilinxfb_of_register();
}
static void __exit
xilinxfb_cleanup(void)
{
- platform_driver_unregister(&xilinxfb_platform_driver);
xilinxfb_of_unregister();
}
--- /dev/null
+config XILINX_EDK
+ bool
+ depends on XILINX_VIRTEX
+ default y
+
+config XILINX_LLDMA_USE_DCR
+ bool
+ depends on NEED_XILINX_LLDMA
+ default XILINX_VIRTEX_5_FXT
--- /dev/null
+# The Xilinx OS common code
+
+obj-$(CONFIG_XILINX_EDK) += xbasic_types.o \
+ xversion.o xpacket_fifo_v2_00_a.o xpacket_fifo_l_v2_00_a.o \
+ xdma_channel.o xdma_channel_sg.o xio.o
+
+obj-$(CONFIG_NEED_XILINX_DMAV3) += \
+ xdmav3.o xdmav3_intr.o xdmav3_sg.o \
+ xdmav3_selftest.o xdmav3_simple.o
+
+obj-$(CONFIG_NEED_XILINX_LLDMA) += \
+ xlldma_bdring.o xlldma.o \
+ xllfifo.o xstreamer.o
+
+obj-$(CONFIG_XILINX_LLDMA_USE_DCR) += \
+ xio_dcr.o
+
+obj-$(CONFIG_NEED_XILINX_IPIF) += \
+ xipif_v1_23_b.o
--- /dev/null
+/* $Id $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2003 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xbasic_types.c
+*
+* This file contains basic functions for Xilinx software IP.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rpm 11/07/03 Added XNullHandler function as a stub interrupt handler
+* 1.00a xd 11/03/04 Improved support for doxygen.
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Variable Definitions *****************************/
+
+/**
+ * This variable allows testing to be done easier with asserts. An assert
+ * sets this variable such that a driver can evaluate this variable
+ * to determine if an assert occurred.
+ */
+unsigned int XAssertStatus;
+
+/**
+ * This variable allows the assert functionality to be changed for testing
+ * such that it does not wait infinitely. Use the debugger to disable the
+ * waiting during testing of asserts.
+ */
+u32 XWaitInAssert = TRUE;
+
+/* The callback function to be invoked when an assert is taken */
+static XAssertCallback XAssertCallbackRoutine = (XAssertCallback) NULL;
+
+/************************** Function Prototypes ******************************/
+
+/*****************************************************************************/
+/**
+*
+* Implements assert. Currently, it calls a user-defined callback function
+* if one has been set. Then, it potentially enters an infinite loop depending
+* on the value of the XWaitInAssert variable.
+*
+* @param File is the name of the filename of the source
+* @param Line is the linenumber within File
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void XAssert(char *File, int Line)
+{
+ /* if the callback has been set then invoke it */
+ if (XAssertCallbackRoutine != NULL) {
+ (*XAssertCallbackRoutine) (File, Line);
+ }
+
+ /* if specified, wait indefinitely such that the assert will show up
+ * in testing
+ */
+ while (XWaitInAssert) {
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Sets up a callback function to be invoked when an assert occurs. If there
+* was already a callback installed, then it is replaced.
+*
+* @param Routine is the callback to be invoked when an assert is taken
+*
+* @return None.
+*
+* @note This function has no effect if NDEBUG is set
+*
+******************************************************************************/
+void XAssertSetCallback(XAssertCallback Routine)
+{
+ XAssertCallbackRoutine = Routine;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Null handler function. This follows the XInterruptHandler signature for
+* interrupt handlers. It can be used to assign a null handler (a stub) to an
+* interrupt controller vector table.
+*
+* @param NullParameter is an arbitrary void pointer and not used.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void XNullHandler(void *NullParameter)
+{
+}
--- /dev/null
+/* $Id: xbasic_types.h,v 1.1 2006/12/13 14:21:22 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xbasic_types.h
+*
+* This file contains basic types for Xilinx software IP. These types do not
+* follow the standard naming convention with respect to using the component
+* name in front of each name because they are considered to be primitives.
+*
+* @note
+*
+* This file contains items which are architecture dependent.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rmm 12/14/01 First release
+* rmm 05/09/03 Added "xassert always" macros to rid ourselves of diab
+* compiler warnings
+* 1.00a rpm 11/07/03 Added XNullHandler function as a stub interrupt handler
+* 1.00a rpm 07/21/04 Added XExceptionHandler typedef for processor exceptions
+* 1.00a xd 11/03/04 Improved support for doxygen.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XBASIC_TYPES_H /* prevent circular inclusions */
+#define XBASIC_TYPES_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/***************************** Include Files *********************************/
+
+#include <linux/types.h>
+
+/************************** Constant Definitions *****************************/
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE !(TRUE)
+#endif
+
+#define XCOMPONENT_IS_READY 0x11111111 /**< component has been initialized */
+#define XCOMPONENT_IS_STARTED 0x22222222 /**< component has been started */
+
+/* the following constants and declarations are for unit test purposes and are
+ * designed to be used in test applications.
+ */
+#define XTEST_PASSED 0
+#define XTEST_FAILED 1
+
+#define XASSERT_NONE 0
+#define XASSERT_OCCURRED 1
+
+extern unsigned int XAssertStatus;
+extern void XAssert(char *, int);
+
+/**************************** Type Definitions *******************************/
+/**
+ * This data type defines an interrupt handler for a device.
+ * The argument points to the instance of the component
+ */
+typedef void (*XInterruptHandler) (void *InstancePtr);
+
+/**
+ * This data type defines an exception handler for a processor.
+ * The argument points to the instance of the component
+ */
+typedef void (*XExceptionHandler) (void *InstancePtr);
+
+/**
+ * This data type defines a callback to be invoked when an
+ * assert occurs. The callback is invoked only when asserts are enabled
+ */
+typedef void (*XAssertCallback) (char *FilenamePtr, int LineNumber);
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+#ifndef NDEBUG
+
+/*****************************************************************************/
+/**
+* This assert macro is to be used for functions that do not return anything
+* (void). This in conjunction with the XWaitInAssert boolean can be used to
+* accomodate tests so that asserts which fail allow execution to continue.
+*
+* @param expression is the expression to evaluate. If it evaluates to
+* false, the assert occurs.
+*
+* @return Returns void unless the XWaitInAssert variable is true, in which
+* case no return is made and an infinite loop is entered.
+*
+* @note None.
+*
+******************************************************************************/
+#define XASSERT_VOID(expression) \
+{ \
+ if (expression) \
+ { \
+ XAssertStatus = XASSERT_NONE; \
+ } \
+ else \
+ { \
+ XAssert(__FILE__, __LINE__); \
+ XAssertStatus = XASSERT_OCCURRED; \
+ return; \
+ } \
+}
+
+/*****************************************************************************/
+/**
+* This assert macro is to be used for functions that do return a value. This in
+* conjunction with the XWaitInAssert boolean can be used to accomodate tests so
+* that asserts which fail allow execution to continue.
+*
+* @param expression is the expression to evaluate. If it evaluates to false,
+* the assert occurs.
+*
+* @return Returns 0 unless the XWaitInAssert variable is true, in which case
+* no return is made and an infinite loop is entered.
+*
+* @note None.
+*
+******************************************************************************/
+#define XASSERT_NONVOID(expression) \
+{ \
+ if (expression) \
+ { \
+ XAssertStatus = XASSERT_NONE; \
+ } \
+ else \
+ { \
+ XAssert(__FILE__, __LINE__); \
+ XAssertStatus = XASSERT_OCCURRED; \
+ return 0; \
+ } \
+}
+
+/*****************************************************************************/
+/**
+* Always assert. This assert macro is to be used for functions that do not
+* return anything (void). Use for instances where an assert should always
+* occur.
+*
+* @return Returns void unless the XWaitInAssert variable is true, in which case
+* no return is made and an infinite loop is entered.
+*
+* @note None.
+*
+******************************************************************************/
+#define XASSERT_VOID_ALWAYS() \
+{ \
+ XAssert(__FILE__, __LINE__); \
+ XAssertStatus = XASSERT_OCCURRED; \
+ return; \
+}
+
+/*****************************************************************************/
+/**
+* Always assert. This assert macro is to be used for functions that do return
+* a value. Use for instances where an assert should always occur.
+*
+* @return Returns void unless the XWaitInAssert variable is true, in which case
+* no return is made and an infinite loop is entered.
+*
+* @note None.
+*
+******************************************************************************/
+#define XASSERT_NONVOID_ALWAYS() \
+{ \
+ XAssert(__FILE__, __LINE__); \
+ XAssertStatus = XASSERT_OCCURRED; \
+ return 0; \
+}
+
+
+#else
+
+#define XASSERT_VOID(expression)
+#define XASSERT_VOID_ALWAYS()
+#define XASSERT_NONVOID(expression)
+#define XASSERT_NONVOID_ALWAYS()
+#endif
+
+/************************** Function Prototypes ******************************/
+
+void XAssertSetCallback(XAssertCallback Routine);
+void XNullHandler(void *NullParameter);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xbuf_descriptor.h,v 1.1 2006/12/13 14:21:30 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2001-2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xbuf_descriptor.h
+*
+* <b>Description</b>
+*
+* This file contains the interface for the XBufDescriptor component.
+* The XBufDescriptor component is a passive component that only maps over
+* a buffer descriptor data structure shared by the scatter gather DMA hardware
+* and software. The component's primary purpose is to provide encapsulation of
+* the buffer descriptor processing. See the source file xbuf_descriptor.c for
+* details.
+*
+* @note
+*
+* Most of the functions of this component are implemented as macros in order
+* to optimize the processing. The names are not all uppercase such that they
+* can be switched between macros and functions easily.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a xd 10/27/04 Doxygenated for inclusion in API documentation
+* 1.00b ecm 10/31/05 Updated for the check sum offload changes.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XBUF_DESCRIPTOR_H /* prevent circular inclusions */
+#define XBUF_DESCRIPTOR_H /* by using protection macros */
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xdma_channel_i.h"
+
+/************************** Constant Definitions *****************************/
+
+/** @name Buffer Descriptor fields
+ *
+ * @{
+ */
+/** This constant allows access to fields of a buffer descriptor
+ * and is necessary at this level of visibility to allow macros to access
+ * and modify the fields of a buffer descriptor. It is not expected that the
+ * user of a buffer descriptor would need to use this constant.
+ */
+#define XBD_DEVICE_STATUS_OFFSET 0
+#define XBD_CONTROL_OFFSET 1
+#define XBD_SOURCE_OFFSET 2
+#define XBD_DESTINATION_OFFSET 3
+#define XBD_LENGTH_OFFSET 4
+#define XBD_STATUS_OFFSET 5
+#define XBD_NEXT_PTR_OFFSET 6
+#define XBD_ID_OFFSET 7
+#define XBD_FLAGS_OFFSET 8
+#define XBD_RQSTED_LENGTH_OFFSET 9
+#define XBD_SIZE_IN_WORDS 10
+/* @} */
+
+/**
+ * The following constants define the bits of the flags field of a buffer
+ * descriptor
+ */
+#define XBD_FLAGS_LOCKED_MASK 1UL
+
+/**************************** Type Definitions *******************************/
+
+typedef u32 XBufDescriptor[XBD_SIZE_IN_WORDS];
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/**
+ * each of the following macros are named the same as functions rather than all
+ * upper case in order to allow either the macros or the functions to be
+ * used, see the source file xbuf_descriptor.c for documentation
+ */
+
+
+/*****************************************************************************/
+/**
+*
+* This function initializes a buffer descriptor component by zeroing all of the
+* fields of the buffer descriptor. This function should be called prior to
+* using a buffer descriptor.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_Initialize(InstancePtr) \
+{ \
+ (*((u32 *)InstancePtr + XBD_CONTROL_OFFSET) = 0); \
+ (*((u32 *)InstancePtr + XBD_SOURCE_OFFSET) = 0); \
+ (*((u32 *)InstancePtr + XBD_DESTINATION_OFFSET) = 0); \
+ (*((u32 *)InstancePtr + XBD_LENGTH_OFFSET) = 0); \
+ (*((u32 *)InstancePtr + XBD_STATUS_OFFSET) = 0); \
+ (*((u32 *)InstancePtr + XBD_DEVICE_STATUS_OFFSET) = 0); \
+ (*((u32 *)InstancePtr + XBD_NEXT_PTR_OFFSET) = 0); \
+ (*((u32 *)InstancePtr + XBD_ID_OFFSET) = 0); \
+ (*((u32 *)InstancePtr + XBD_FLAGS_OFFSET) = 0); \
+ (*((u32 *)InstancePtr + XBD_RQSTED_LENGTH_OFFSET) = 0); \
+}
+
+/*****************************************************************************/
+/**
+*
+* This function gets the control field of a buffer descriptor component. The
+* DMA channel hardware transfers the control field from the buffer descriptor
+* into the DMA control register when a buffer descriptor is processed. It
+* controls the details of the DMA transfer.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* The control field contents of the buffer descriptor. One or more of the
+* following values may be contained the field. Each of the values are
+* unique bit masks.
+* <br><br>
+* - XDC_DMACR_SOURCE_INCR_MASK Increment the source address
+* <br><br>
+* - XDC_DMACR_DEST_INCR_MASK Increment the destination address
+* <br><br>
+* - XDC_DMACR_SOURCE_LOCAL_MASK Local source address
+* <br><br>
+* - XDC_DMACR_DEST_LOCAL_MASK Local destination address
+* <br><br>
+* - XDC_DMACR_SG_ENABLE_MASK Scatter gather enable
+* <br><br>
+* - XDC_DMACR_GEN_BD_INTR_MASK Individual buffer descriptor interrupt
+* <br><br>
+* - XDC_DMACR_LAST_BD_MASK Last buffer descriptor in a packet
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetControl(InstancePtr) \
+ (u32)(*((u32 *)InstancePtr + XBD_CONTROL_OFFSET))
+
+/*****************************************************************************/
+/**
+*
+* This function sets the control field of a buffer descriptor component. The
+* DMA channel hardware transfers the control field from the buffer descriptor
+* into the DMA control register when a buffer descriptor is processed. It
+* controls the details of the DMA transfer such as
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @param
+*
+* Control contains the value to be written to the control field of the buffer
+* descriptor. One or more of the following values may be contained the field.
+* Each of the values are unique bit masks such that they may be ORed together
+* to enable multiple bits or inverted and ANDed to disable multiple bits.
+* - XDC_DMACR_SOURCE_INCR_MASK Increment the source address
+* - XDC_DMACR_DEST_INCR_MASK Increment the destination address
+* - XDC_DMACR_SOURCE_LOCAL_MASK Local source address
+* - XDC_DMACR_DEST_LOCAL_MASK Local destination address
+* - XDC_DMACR_SG_ENABLE_MASK Scatter gather enable
+* - XDC_DMACR_GEN_BD_INTR_MASK Individual buffer descriptor interrupt
+* - XDC_DMACR_LAST_BD_MASK Last buffer descriptor in a packet
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetControl(InstancePtr, Control) \
+ (*((u32 *)InstancePtr + XBD_CONTROL_OFFSET) = (u32)Control)
+
+/*****************************************************************************/
+/**
+*
+* This function determines if this buffer descriptor is marked as being the
+* last in the control field. A packet may be broken up across multiple
+* buffer descriptors such that the last buffer descriptor is the end of the
+* packet. The DMA channel hardware copies the control field from the buffer
+* descriptor to the control register of the DMA channel when the buffer
+* descriptor is processed.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* TRUE if the buffer descriptor is marked as last in the control field,
+* otherwise, FALSE.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_IsLastControl(InstancePtr) \
+ (u32)((*((u32 *)InstancePtr + XBD_CONTROL_OFFSET) & \
+ XDC_CONTROL_LAST_BD_MASK) == XDC_CONTROL_LAST_BD_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This function marks the buffer descriptor as being last in the control
+* field of the buffer descriptor. A packet may be broken up across multiple
+* buffer descriptors such that the last buffer descriptor is the end of the
+* packet. The DMA channel hardware copies the control field from the buffer
+* descriptor to the control register of the DMA channel when the buffer
+* descriptor is processed.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetLast(InstancePtr) \
+ (*((u32 *)InstancePtr + XBD_CONTROL_OFFSET) |= XDC_CONTROL_LAST_BD_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This function gets the source address field of the buffer descriptor.
+* The source address indicates the address of memory which is the
+* source of a DMA scatter gather operation. The DMA channel hardware
+* copies the source address from the buffer descriptor to the source
+* address register of the DMA channel when the buffer descriptor is processed.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* The source address field of the buffer descriptor.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetSrcAddress(InstancePtr) \
+ ((u32 *)(*((u32 *)InstancePtr + XBD_SOURCE_OFFSET)))
+
+/*****************************************************************************/
+/**
+*
+* This function sets the source address field of the buffer descriptor.
+* The source address indicates the address of memory which is the
+* source of a DMA scatter gather operation. The DMA channel hardware
+* copies the source address from the buffer descriptor to the source
+* address register of the DMA channel when the buffer descriptor is processed.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @param
+*
+* SourceAddress contains the source address field for the buffer descriptor.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetSrcAddress(InstancePtr, Source) \
+ (*((u32 *)InstancePtr + XBD_SOURCE_OFFSET) = (u32)Source)
+
+/*****************************************************************************/
+/**
+*
+* This function gets the destination address field of the buffer descriptor.
+* The destination address indicates the address of memory which is the
+* destination of a DMA scatter gather operation. The DMA channel hardware
+* copies the destination address from the buffer descriptor to the destination
+* address register of the DMA channel when the buffer descriptor is processed.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* The destination address field of the buffer descriptor.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetDestAddress(InstancePtr) \
+ ((u32 *)(*((u32 *)InstancePtr + XBD_DESTINATION_OFFSET)))
+
+/*****************************************************************************/
+/**
+*
+* This function sets the destination address field of the buffer descriptor.
+* The destination address indicates the address of memory which is the
+* destination of a DMA scatter gather operation. The DMA channel hardware
+* copies the destination address from the buffer descriptor to the destination
+* address register of the DMA channel when the buffer descriptor is processed.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @param
+*
+* DestinationAddress contains the destination address field for the buffer
+* descriptor.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetDestAddress(InstancePtr, Destination) \
+ (*((u32 *)InstancePtr + XBD_DESTINATION_OFFSET) = (u32)Destination)
+
+/*****************************************************************************/
+/**
+*
+* This function gets the length of the data transfer if the buffer descriptor
+* has been processed by the DMA channel hardware. If the buffer descriptor
+* has not been processed, the return value will be zero indicating that no data
+* has been transferred yet. This function uses both the length and requested
+* length fields of the buffer descriptor to determine the number of bytes
+* transferred by the DMA operation. The length field of the buffer descriptor
+* contains the number of bytes remaining from the requested length.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* The number of bytes which have been transferred by a DMA operation on the
+* buffer descriptor.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetLength(InstancePtr) \
+ (u32)(*((u32 *)InstancePtr + XBD_RQSTED_LENGTH_OFFSET) - \
+ *((u32 *)InstancePtr + XBD_LENGTH_OFFSET))
+
+/*****************************************************************************/
+/**
+*
+* This function sets the length and the requested length fields of the buffer
+* descriptor. The length field indicates the number of bytes to transfer for
+* the DMA operation and the requested length is written with the same value.
+* The requested length is not modified by the DMA hardware while the length
+* field is modified by the hardware to indicate the number of bytes remaining
+* in the transfer after the transfer is complete. The requested length allows
+* the software to calculate the actual number of bytes transferred for the DMA
+* operation.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @param
+*
+* Length contains the length to put in the length and requested length fields
+* of the buffer descriptor.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetLength(InstancePtr, Length) \
+{ \
+ (*((u32 *)InstancePtr + XBD_LENGTH_OFFSET) = (u32)(Length)); \
+ (*((u32 *)InstancePtr + XBD_RQSTED_LENGTH_OFFSET) = (u32)(Length));\
+}
+
+/*****************************************************************************/
+/**
+*
+* This function gets the status field of a buffer descriptor component. The
+* status field is written to the buffer descriptor by the DMA channel hardware
+* after processing of a buffer descriptor is complete. The status field
+* indicates the status of the DMA operation.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* The status field contents of the buffer descriptor. One or more of the
+* following values may be contained the field. Each of the values are
+* unique bit masks.
+* <br><br>
+* - XDC_DMASR_BUSY_MASK The DMA channel is busy
+* <br><br>
+* - XDC_DMASR_BUS_ERROR_MASK A bus error occurred
+* <br><br>
+* - XDC_DMASR_BUS_TIMEOUT_MASK A bus timeout occurred
+* <br><br>
+* - XDC_DMASR_LAST_BD_MASK The last buffer descriptor of a packet
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetStatus(InstancePtr) \
+ (u32)(*((u32 *)InstancePtr + XBD_STATUS_OFFSET))
+
+/*****************************************************************************/
+/**
+*
+* This function sets the status field of a buffer descriptor component. The
+* status field is written to the buffer descriptor by the DMA channel hardware
+* after processing of a buffer descriptor is complete. This function would
+* typically be used during debugging of buffer descriptor processing.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @param
+*
+* Status contains the status field for the buffer descriptor.
+* The status register contents of the DMA channel. One or more of the
+* following values may be contained the register. Each of the values are
+* unique bit masks.
+* - XDC_DMASR_BUSY_MASK The DMA channel is busy
+* - XDC_DMASR_BUS_ERROR_MASK A bus error occurred
+* - XDC_DMASR_BUS_TIMEOUT_MASK A bus timeout occurred
+* - XDC_DMASR_LAST_BD_MASK The last buffer descriptor of a packet
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetStatus(InstancePtr, Status) \
+ (*((u32 *)InstancePtr + XBD_STATUS_OFFSET) = (u32)Status)
+
+/*****************************************************************************/
+/**
+*
+* This function determines if this buffer descriptor is marked as being the
+* last in the status field. A packet may be broken up across multiple
+* buffer descriptors such that the last buffer descriptor is the end of the
+* packet. The DMA channel hardware copies the status register contents to
+* the buffer descriptor of the DMA channel after processing of the buffer
+* descriptor is complete.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* TRUE if the buffer descriptor is marked as last in the status field,
+* otherwise, FALSE.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_IsLastStatus(InstancePtr) \
+ (u32)((*((u32 *)InstancePtr + XBD_STATUS_OFFSET) & \
+ XDC_STATUS_LAST_BD_MASK) == XDC_STATUS_LAST_BD_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This function gets the device status field of the buffer descriptor. The
+* device status is device specific such that the definition of the contents
+* of this field are not defined in this function. The device is defined as the
+* device which is using the DMA channel, such as an ethernet controller. The
+* DMA channel hardware copies the contents of the device status register into
+* the buffer descriptor when processing of the buffer descriptor is complete.
+* This value is typically used by the device driver for the device to determine
+* the status of the DMA operation with respect to the device.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* The device status field of the buffer descriptor.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetDeviceStatus(InstancePtr) \
+ ((u32)(*((u32 *)InstancePtr + XBD_DEVICE_STATUS_OFFSET)))
+
+/*****************************************************************************/
+/**
+*
+* This function sets the device status field of the buffer descriptor. The
+* device status is device specific such that the definition of the contents
+* of this field are not defined in this function. The device is defined as the
+* device which is using the DMA channel, such as an ethernet controller. This
+* function is typically only used for debugging/testing.
+*
+* The DMA channel hardware copies the contents of the device status register
+* into the buffer descriptor when processing of the buffer descriptor is
+* complete. This value is typically used by the device driver for the device
+* to determine the status of the DMA operation with respect to the device.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @param
+*
+* Status contains the device status field for the buffer descriptor.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetDeviceStatus(InstancePtr, Status) \
+{ \
+ u32 Register; \
+ Register = (*((u32 *)InstancePtr + XBD_DEVICE_STATUS_OFFSET)); \
+ Register &= XDC_DMASR_RX_CS_RAW_MASK; \
+ (*((u32 *)InstancePtr + XBD_DEVICE_STATUS_OFFSET)) = \
+ Register | ((u32) (Status)); \
+}
+
+/*****************************************************************************/
+/**
+*
+* This function gets the next pointer field of the buffer descriptor. This
+* field is used to link the buffer descriptors together such that multiple DMA
+* operations can be automated for scatter gather. It also allows a single
+* packet to be broken across multiple buffer descriptors. The DMA channel
+* hardware traverses the list of buffer descriptors using the next pointer
+* of each buffer descriptor.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* The next pointer field of the buffer descriptor.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetNextPtr(InstancePtr) \
+ (XBufDescriptor *)(*((u32 *)InstancePtr + XBD_NEXT_PTR_OFFSET))
+
+/*****************************************************************************/
+/**
+*
+* This function sets the next pointer field of the buffer descriptor. This
+* field is used to link the buffer descriptors together such that many DMA
+* operations can be automated for scatter gather. It also allows a single
+* packet to be broken across multiple buffer descriptors. The DMA channel
+* hardware traverses the list of buffer descriptors using the next pointer
+* of each buffer descriptor.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @param
+*
+* NextPtr contains the next pointer field for the buffer descriptor.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetNextPtr(InstancePtr, NextPtr) \
+ (*((u32 *)InstancePtr + XBD_NEXT_PTR_OFFSET) = (u32)NextPtr)
+
+/*****************************************************************************/
+/**
+*
+* This function gets the ID field of the buffer descriptor. The ID field is
+* provided to allow a device driver to correlate the buffer descriptor to other
+* data structures which may be operating system specific, such as a pointer to
+* a higher level memory block. The ID field is not used by the DMA channel
+* hardware and is application specific.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* The ID field of the buffer descriptor.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetId(InstancePtr) \
+ (u32)(*((u32 *)InstancePtr + XBD_ID_OFFSET))
+
+/*****************************************************************************/
+/**
+*
+* This function sets the ID field of the buffer descriptor. The ID field is
+* provided to allow a device driver to correlate the buffer descriptor to other
+* data structures which may be operating system specific, such as a pointer to
+* a higher level memory block. The ID field is not used by the DMA channel
+* hardware and is application specific.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @param
+*
+* Id contains the ID field for the buffer descriptor.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetId(InstancePtr, Id) \
+ (*((u32 *)InstancePtr + XBD_ID_OFFSET) = (u32)Id)
+
+/*****************************************************************************/
+/**
+*
+* This function gets the flags field of the buffer descriptor. The flags
+* field is not used by the DMA channel hardware and is used for software
+* processing of buffer descriptors.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* The flags field of the buffer descriptor. The field may contain one or more
+* of the following values which are bit masks.
+* <br><br>
+* - XBD_FLAGS_LOCKED_MASK Indicates the buffer descriptor is locked
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetFlags(InstancePtr) \
+ (u32)(*((u32 *)InstancePtr + XBD_FLAGS_OFFSET))
+
+/*****************************************************************************/
+/**
+*
+* This function sets the flags field of the buffer descriptor. The flags
+* field is not used by the DMA channel hardware and is used for software
+* processing of buffer descriptors.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @param
+*
+* Flags contains the flags field for the buffer descriptor. The field may
+* contain one or more of the following values which are bit masks.
+* - XBD_FLAGS_LOCKED_MASK Indicates the buffer descriptor is locked
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetFlags(InstancePtr, Flags) \
+ (*((u32 *)InstancePtr + XBD_FLAGS_OFFSET) = (u32)Flags)
+
+/*****************************************************************************/
+/**
+*
+* This function locks the buffer descriptor. A lock is specific to the
+* scatter gather processing and prevents a buffer descriptor from being
+* overwritten in the scatter gather list. This field is not used by the DMA
+* channel hardware such that the hardware could still write to the buffer
+* descriptor. Locking a buffer descriptor is application specific and not
+* necessary to allow the DMA channel to use the buffer descriptor, but is
+* provided for flexibility in designing device drivers.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_Lock(InstancePtr) \
+ (*((u32 *)InstancePtr + XBD_FLAGS_OFFSET) |= XBD_FLAGS_LOCKED_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This function unlocks the buffer descriptor. A lock is specific to the
+* scatter gather processing and prevents a buffer descriptor from being
+* overwritten in the scatter gather list. This field is not used by the DMA
+* channel hardware such that the hardware could still write to the buffer
+* descriptor. Locking a buffer descriptor is application specific and not
+* necessary to allow the DMA channel to use the buffer descriptor, but is
+* provided for flex ability in designing device drivers.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_Unlock(InstancePtr) \
+ (*((u32 *)InstancePtr + XBD_FLAGS_OFFSET) &= ~XBD_FLAGS_LOCKED_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This function determines if the buffer descriptor is locked. The lock
+* is not used by the DMA channel hardware and is used for software processing
+* of buffer descriptors.
+*
+* @param
+*
+* InstancePtr points to the buffer descriptor to operate on.
+*
+* @return
+*
+* TRUE if the buffer descriptor is locked, otherwise FALSE.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_IsLocked(InstancePtr) \
+ (u32) ((*((u32 *)InstancePtr + XBD_FLAGS_OFFSET) & \
+ XBD_FLAGS_LOCKED_MASK) == XBD_FLAGS_LOCKED_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This function gets the Initial value for the CS offload function.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* The initial value that will be used for checksum offload operation as DMA
+* moves the data.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetCSInit(InstancePtr)\
+(*((u32 *)InstancePtr + XBD_CONTROL_OFFSET) &= XDC_DMACR_TX_CS_INIT_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This function Sets the Initial value for the CS offload function.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* None
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetCSInit(InstancePtr, InitialValue) \
+{ \
+ u32 Register; \
+ Register = (*((u32 *)InstancePtr + XBD_CONTROL_OFFSET)); \
+ Register &= ~XDC_DMACR_TX_CS_INIT_MASK; \
+ (*((u32 *)InstancePtr + XBD_CONTROL_OFFSET)) = \
+ Register | ((u32) (InitialValue)); \
+}
+/*****************************************************************************/
+/**
+*
+* This function gets the byte position where the CS offload function
+* inserts the calculated checksum.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* The insert byte location value that will be used to place the results of
+* the checksum offload.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetCSInsertLoc(InstancePtr) \
+(*((u32 *)InstancePtr + XBD_DESTINATION_OFFSET) &= XDC_DAREG_CS_INSERT_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This function sets the byte position where the CS offload function
+* inserts the calculated checksum.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* None
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetCSInsertLoc(InstancePtr, InsertLocation) \
+{ \
+ u32 Register; \
+ Register = (*((u32 *)InstancePtr + XBD_DESTINATION_OFFSET)); \
+ Register &= ~XDC_DAREG_CS_INSERT_MASK; \
+ (*((u32 *)InstancePtr + XBD_DESTINATION_OFFSET)) = \
+ Register | ((u32) (InsertLocation)); \
+}
+
+/*****************************************************************************/
+/**
+*
+* This function gets the byte position where the CS offload function
+* begins the calculation of the checksum.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* The insert byte location value that will be used to place the results of
+* the checksum offload.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetCSBegin(InstancePtr) \
+(u16)((*((u32 *)InstancePtr + XBD_DESTINATION_OFFSET)) >> 16)
+/*****************************************************************************/
+/**
+*
+* This function sets the byte position where the CS offload function
+* begins the calculation of the checksum.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* None
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_SetCSBegin(InstancePtr, BeginLocation) \
+{ \
+ u32 Register; \
+ Register = (*((u32 *)InstancePtr + XBD_DESTINATION_OFFSET)); \
+ Register &= ~XDC_DAREG_CS_BEGIN_MASK; \
+ (*((u32 *)InstancePtr + XBD_DESTINATION_OFFSET)) = \
+ Register | (((u32) (BeginLocation)) << 16); \
+}
+/*****************************************************************************/
+/**
+*
+* This function gets the resulting checksum from the rx channel.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* The raw checksum calculation from the receive operation. It needs to
+* be adjusted to remove the header and packet FCS to be correct.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XBufDescriptor_GetCSRaw(InstancePtr) \
+(u16)((*((u32 *)InstancePtr + XBD_DEVICE_STATUS_OFFSET)) >> 16)
+
+/************************** Function Prototypes ******************************/
+
+/* The following prototypes are provided to allow each of the functions to
+ * be implemented as a function rather than a macro, and to provide the
+ * syntax to allow users to understand how to call the macros, they are
+ * commented out to prevent linker errors
+ *
+
+u32 XBufDescriptor_Initialize(XBufDescriptor* InstancePtr);
+
+u32 XBufDescriptor_GetControl(XBufDescriptor* InstancePtr);
+void XBufDescriptor_SetControl(XBufDescriptor* InstancePtr, u32 Control);
+
+u32 XBufDescriptor_IsLastControl(XBufDescriptor* InstancePtr);
+void XBufDescriptor_SetLast(XBufDescriptor* InstancePtr);
+
+u32 XBufDescriptor_GetLength(XBufDescriptor* InstancePtr);
+void XBufDescriptor_SetLength(XBufDescriptor* InstancePtr, u32 Length);
+
+u32 XBufDescriptor_GetStatus(XBufDescriptor* InstancePtr);
+void XBufDescriptor_SetStatus(XBufDescriptor* InstancePtr, u32 Status);
+u32 XBufDescriptor_IsLastStatus(XBufDescriptor* InstancePtr);
+
+u32 XBufDescriptor_GetDeviceStatus(XBufDescriptor* InstancePtr);
+void XBufDescriptor_SetDeviceStatus(XBufDescriptor* InstancePtr,
+ u32 Status);
+
+u32 XBufDescriptor_GetSrcAddress(XBufDescriptor* InstancePtr);
+void XBufDescriptor_SetSrcAddress(XBufDescriptor* InstancePtr,
+ u32 SourceAddress);
+
+u32 XBufDescriptor_GetDestAddress(XBufDescriptor* InstancePtr);
+void XBufDescriptor_SetDestAddress(XBufDescriptor* InstancePtr,
+ u32 DestinationAddress);
+
+XBufDescriptor* XBufDescriptor_GetNextPtr(XBufDescriptor* InstancePtr);
+void XBufDescriptor_SetNextPtr(XBufDescriptor* InstancePtr,
+ XBufDescriptor* NextPtr);
+
+u32 XBufDescriptor_GetId(XBufDescriptor* InstancePtr);
+void XBufDescriptor_SetId(XBufDescriptor* InstancePtr, u32 Id);
+
+u32 XBufDescriptor_GetFlags(XBufDescriptor* InstancePtr);
+void XBufDescriptor_SetFlags(XBufDescriptor* InstancePtr, u32 Flags);
+
+void XBufDescriptor_Lock(XBufDescriptor* InstancePtr);
+void XBufDescriptor_Unlock(XBufDescriptor* InstancePtr);
+u32 XBufDescriptor_IsLocked(XBufDescriptor* InstancePtr);
+
+u16 XBufDescriptor_GetCSInit(XBufDescriptor* InstancePtr)
+void XBufDescriptor_SetCSInit(XBufDescriptor* InstancePtr, u16 InitialValue)
+
+u16 XBufDescriptor_GetCSInsertLoc(XBufDescriptor* InstancePtr)
+void XBufDescriptor_SetCSInsertLoc(XBufDescriptor* InstancePtr, u16 InsertLocation)
+
+u16 XBufDescriptor_GetCSBegin(XBufDescriptor* InstancePtr)
+void XBufDescriptor_SetCSBegin(XBufDescriptor* InstancePtr, u16 BeginLocation)
+
+u16 XBufDescriptor_GetCSRaw(XBufDescriptor* InstancePtr)
+
+void XBufDescriptor_Copy(XBufDescriptor* InstancePtr,
+ XBufDescriptor* DestinationPtr);
+
+*/
+
+#endif /* end of protection macro */
--- /dev/null
+#ifndef XDEBUG
+#define XDEBUG
+
+#undef DEBUG
+
+#if defined(DEBUG) && !defined(NDEBUG)
+
+#ifndef XDEBUG_WARNING
+#define XDEBUG_WARNING
+#warning DEBUG is enabled
+#endif
+
+int printf(const char *format, ...);
+
+#define XDBG_DEBUG_ERROR 0x00000001 /* error condition messages */
+#define XDBG_DEBUG_GENERAL 0x00000002 /* general debug messages */
+#define XDBG_DEBUG_ALL 0xFFFFFFFF /* all debugging data */
+
+#define XDBG_DEBUG_FIFO_REG 0x00000100 /* display register reads/writes */
+#define XDBG_DEBUG_FIFO_RX 0x00000101 /* receive debug messages */
+#define XDBG_DEBUG_FIFO_TX 0x00000102 /* transmit debug messages */
+#define XDBG_DEBUG_FIFO_ALL 0x0000010F /* all fifo debug messages */
+
+#define XDBG_DEBUG_TEMAC_REG 0x00000400 /* display register reads/writes */
+#define XDBG_DEBUG_TEMAC_RX 0x00000401 /* receive debug messages */
+#define XDBG_DEBUG_TEMAC_TX 0x00000402 /* transmit debug messages */
+#define XDBG_DEBUG_TEMAC_ALL 0x0000040F /* all temac debug messages */
+
+#define XDBG_DEBUG_TEMAC_ADPT_RX 0x00000800 /* receive debug messages */
+#define XDBG_DEBUG_TEMAC_ADPT_TX 0x00000801 /* transmit debug messages */
+#define XDBG_DEBUG_TEMAC_ADPT_IOCTL 0x00000802 /* ioctl debug messages */
+#define XDBG_DEBUG_TEMAC_ADPT_MISC 0x00000803 /* debug msg for other routines */
+#define XDBG_DEBUG_TEMAC_ADPT_ALL 0x0000080F /* all temac adapter debug messages */
+
+#define xdbg_current_types (XDBG_DEBUG_ERROR | XDBG_DEBUG_GENERAL | XDBG_DEBUG_FIFO_REG | XDBG_DEBUG_TEMAC_REG)
+
+#define xdbg_stmnt(x) x
+#define xdbg_printf(type, ...) (if ((type) & xdbg_current_types) printf (__VA_ARGS__) : 0)
+
+#else
+#define xdbg_stmnt(x)
+#define xdbg_printf(...)
+#endif
+
+
+
+
+#endif /* XDEBUG */
--- /dev/null
+/* $Id: xdma_channel.c,v 1.1 2006/12/13 14:21:45 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2001-2004 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xdma_channel.c
+*
+* <b>Description</b>
+*
+* This file contains the DMA channel component. This component supports
+* a distributed DMA design in which each device can have it's own dedicated
+* DMA channel, as opposed to a centralized DMA design. This component
+* performs processing for DMA on all devices.
+*
+* See xdma_channel.h for more information about this component.
+*
+* @note
+*
+* None.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a xd 10/27/04 Doxygenated for inclusion in API documentation
+* 1.00b ecm 10/31/05 Updated for the check sum offload changes.
+* 1.00b xd 03/22/06 Fixed a multi-descriptor packet related bug that sgdma
+* engine is restarted in case no scatter gather disabled
+* bit is set yet
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xdma_channel.h"
+#include "xbasic_types.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/*****************************************************************************/
+/**
+*
+* This function initializes a DMA channel. This function must be called
+* prior to using a DMA channel. Initialization of a channel includes setting
+* up the registers base address, and resetting the channel such that it's in a
+* known state. Interrupts for the channel are disabled when the channel is
+* reset.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @param
+*
+* BaseAddress contains the base address of the registers for the DMA channel.
+*
+* @return
+*
+* XST_SUCCESS indicating initialization was successful.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XDmaChannel_Initialize(XDmaChannel * InstancePtr, u32 BaseAddress)
+{
+ /* assert to verify input arguments, don't assert base address */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /* setup the base address of the registers for the DMA channel such
+ * that register accesses can be done
+ */
+ InstancePtr->RegBaseAddress = BaseAddress;
+
+ /* initialize the scatter gather list such that it indicates it has not
+ * been created yet and the DMA channel is ready to use (initialized)
+ */
+ InstancePtr->GetPtr = NULL;
+ InstancePtr->PutPtr = NULL;
+ InstancePtr->CommitPtr = NULL;
+ InstancePtr->LastPtr = NULL;
+
+ InstancePtr->TotalDescriptorCount = 0;
+ InstancePtr->ActiveDescriptorCount = 0;
+
+ InstancePtr->ActivePacketCount = 0;
+ InstancePtr->Committed = FALSE;
+
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+
+ /* initialize the version of the component
+ */
+ XVersion_FromString(&InstancePtr->Version, "1.00a");
+
+ /* reset the DMA channel such that it's in a known state and ready
+ * and indicate the initialization occurred with no errors, note that
+ * the is ready variable must be set before this call or reset will assert
+ */
+ XDmaChannel_Reset(InstancePtr);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function determines if a DMA channel component has been successfully
+* initialized such that it's ready to use.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* TRUE if the DMA channel component is ready, FALSE otherwise.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XDmaChannel_IsReady(XDmaChannel * InstancePtr)
+{
+ /* assert to verify input arguments used by the base component */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ return InstancePtr->IsReady == XCOMPONENT_IS_READY;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function gets the software version for the specified DMA channel
+* component.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* A pointer to the software version of the specified DMA channel.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+XVersion *XDmaChannel_GetVersion(XDmaChannel * InstancePtr)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* return a pointer to the version of the DMA channel */
+
+ return &InstancePtr->Version;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function performs a self test on the specified DMA channel. This self
+* test is destructive as the DMA channel is reset and a register default is
+* verified.
+*
+* @param
+*
+* InstancePtr is a pointer to the DMA channel to be operated on.
+*
+* @return
+*
+* XST_SUCCESS is returned if the self test is successful, or one of the
+* following errors.
+* <br><br>
+* - XST_DMA_RESET_REGISTER_ERROR Indicates the control register value
+* after a reset was not correct
+*
+* @note
+*
+* This test does not performs a DMA transfer to test the channel because the
+* DMA hardware will not currently allow a non-local memory transfer to non-local
+* memory (memory copy), but only allows a non-local memory to or from the device
+* memory (typically a FIFO).
+*
+******************************************************************************/
+
+#define XDC_CONTROL_REG_RESET_MASK 0x98000000UL /* control reg reset value */
+
+int XDmaChannel_SelfTest(XDmaChannel * InstancePtr)
+{
+ u32 ControlReg;
+
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* reset the DMA channel such that it's in a known state before the test
+ * it resets to no interrupts enabled, the desired state for the test
+ */
+ XDmaChannel_Reset(InstancePtr);
+
+ /* this should be the first test to help prevent a lock up with the polling
+ * loop that occurs later in the test, check the reset value of the DMA
+ * control register to make sure it's correct, return with an error if not
+ */
+ ControlReg = XDmaChannel_GetControl(InstancePtr);
+ if (ControlReg != XDC_CONTROL_REG_RESET_MASK) {
+ return XST_DMA_RESET_REGISTER_ERROR;
+ }
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function resets the DMA channel. This is a destructive operation such
+* that it should not be done while a channel is being used. If the DMA channel
+* is transferring data into other blocks, such as a FIFO, it may be necessary
+* to reset other blocks. This function does not modify the contents of a
+* scatter gather list for a DMA channel such that the user is responsible for
+* getting buffer descriptors from the list if necessary.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XDmaChannel_Reset(XDmaChannel * InstancePtr)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* reset the DMA channel such that it's in a known state, the reset
+ * register is self clearing such that it only has to be set
+ */
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_RST_REG_OFFSET,
+ XDC_RESET_MASK);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function gets the control register contents of the DMA channel.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* The control register contents of the DMA channel. One or more of the
+* following values may be contained the register. Each of the values are
+* unique bit masks.
+* <br><br>
+* - XDC_DMACR_SOURCE_INCR_MASK Increment the source address
+* <br><br>
+* - XDC_DMACR_DEST_INCR_MASK Increment the destination address
+* <br><br>
+* - XDC_DMACR_SOURCE_LOCAL_MASK Local source address
+* <br><br>
+* - XDC_DMACR_DEST_LOCAL_MASK Local destination address
+* <br><br>
+* - XDC_DMACR_SG_ENABLE_MASK Scatter gather enable
+* <br><br>
+* - XDC_DMACR_GEN_BD_INTR_MASK Individual buffer descriptor interrupt
+* <br><br>
+* - XDC_DMACR_LAST_BD_MASK Last buffer descriptor in a packet
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XDmaChannel_GetControl(XDmaChannel * InstancePtr)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* return the contents of the DMA control register */
+
+ return XIo_In32(InstancePtr->RegBaseAddress + XDC_DMAC_REG_OFFSET);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets the control register of the specified DMA channel.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @param
+*
+* Control contains the value to be written to the control register of the DMA
+* channel. One or more of the following values may be contained the register.
+* Each of the values are unique bit masks such that they may be ORed together
+* to enable multiple bits or inverted and ANDed to disable multiple bits.
+* - XDC_DMACR_SOURCE_INCR_MASK Increment the source address
+* - XDC_DMACR_DEST_INCR_MASK Increment the destination address
+* - XDC_DMACR_SOURCE_LOCAL_MASK Local source address
+* - XDC_DMACR_DEST_LOCAL_MASK Local destination address
+* - XDC_DMACR_SG_ENABLE_MASK Scatter gather enable
+* - XDC_DMACR_GEN_BD_INTR_MASK Individual buffer descriptor interrupt
+* - XDC_DMACR_LAST_BD_MASK Last buffer descriptor in a packet
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XDmaChannel_SetControl(XDmaChannel * InstancePtr, u32 Control)
+{
+ u32 Register;
+
+ /* assert to verify input arguments except the control which can't be
+ * asserted since all values are valid
+ */
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /*
+ * set the DMA control register to the specified value, not altering the
+ * other fields in the register
+ */
+
+ Register = XIo_In32(InstancePtr->RegBaseAddress + XDC_DMAC_REG_OFFSET);
+ Register &= XDC_DMACR_TX_CS_INIT_MASK;
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_DMAC_REG_OFFSET,
+ Register | Control);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function gets the status register contents of the DMA channel.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* The status register contents of the DMA channel. One or more of the
+* following values may be contained the register. Each of the values are
+* unique bit masks.
+* <br><br>
+* - XDC_DMASR_BUSY_MASK The DMA channel is busy
+* <br><br>
+* - XDC_DMASR_BUS_ERROR_MASK A bus error occurred
+* <br><br>
+* - XDC_DMASR_BUS_TIMEOUT_MASK A bus timeout occurred
+* <br><br>
+* - XDC_DMASR_LAST_BD_MASK The last buffer descriptor of a packet
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XDmaChannel_GetStatus(XDmaChannel * InstancePtr)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* return the contents of the DMA status register */
+
+ return XIo_In32(InstancePtr->RegBaseAddress + XDC_DMAS_REG_OFFSET);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets the interrupt status register of the specified DMA channel.
+* Setting any bit of the interrupt status register will clear the bit to
+* indicate the interrupt processing has been completed. The definitions of each
+* bit in the register match the definition of the bits in the interrupt enable
+* register.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @param
+*
+* Status contains the value to be written to the status register of the DMA
+* channel. One or more of the following values may be contained the register.
+* Each of the values are unique bit masks such that they may be ORed together
+* to enable multiple bits or inverted and ANDed to disable multiple bits.
+* - XDC_IXR_DMA_DONE_MASK The dma operation is done
+* - XDC_IXR_DMA_ERROR_MASK The dma operation had an error
+* - XDC_IXR_PKT_DONE_MASK A packet is complete
+* - XDC_IXR_PKT_THRESHOLD_MASK The packet count threshold reached
+* - XDC_IXR_PKT_WAIT_BOUND_MASK The packet wait bound reached
+* - XDC_IXR_SG_DISABLE_ACK_MASK The scatter gather disable completed
+* - XDC_IXR_BD_MASK A buffer descriptor is done
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XDmaChannel_SetIntrStatus(XDmaChannel * InstancePtr, u32 Status)
+{
+ /* assert to verify input arguments except the status which can't be
+ * asserted since all values are valid
+ */
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* set the interrupt status register with the specified value such that
+ * all bits which are set in the register are cleared effectively clearing
+ * any active interrupts
+ */
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_IS_REG_OFFSET, Status);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function gets the interrupt status register of the specified DMA channel.
+* The interrupt status register indicates which interrupts are active
+* for the DMA channel. If an interrupt is active, the status register must be
+* set (written) with the bit set for each interrupt which has been processed
+* in order to clear the interrupts. The definitions of each bit in the register
+* match the definition of the bits in the interrupt enable register.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* The interrupt status register contents of the specified DMA channel.
+* One or more of the following values may be contained the register.
+* Each of the values are unique bit masks.
+* <br><br>
+* - XDC_IXR_DMA_DONE_MASK The dma operation is done
+* <br><br>
+* - XDC_IXR_DMA_ERROR_MASK The dma operation had an error
+* <br><br>
+* - XDC_IXR_PKT_DONE_MASK A packet is complete
+* <br><br>
+* - XDC_IXR_PKT_THRESHOLD_MASK The packet count threshold reached
+* <br><br>
+* - XDC_IXR_PKT_WAIT_BOUND_MASK The packet wait bound reached
+* <br><br>
+* - XDC_IXR_SG_DISABLE_ACK_MASK The scatter gather disable completed
+* <br><br>
+* - XDC_IXR_SG_END_MASK Current descriptor was the end of the list
+* <br><br>
+* - XDC_IXR_BD_MASK A buffer descriptor is done
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XDmaChannel_GetIntrStatus(XDmaChannel * InstancePtr)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* return the contents of the interrupt status register */
+
+ return XIo_In32(InstancePtr->RegBaseAddress + XDC_IS_REG_OFFSET);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets the interrupt enable register of the specified DMA
+* channel. The interrupt enable register contains bits which enable
+* individual interrupts for the DMA channel. The definitions of each bit
+* in the register match the definition of the bits in the interrupt status
+* register.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @param
+*
+* Enable contains the interrupt enable register contents to be written
+* in the DMA channel. One or more of the following values may be contained
+* the register. Each of the values are unique bit masks such that they may be
+* ORed together to enable multiple bits or inverted and ANDed to disable
+* multiple bits.
+* - XDC_IXR_DMA_DONE_MASK The dma operation is done
+* - XDC_IXR_DMA_ERROR_MASK The dma operation had an error
+* - XDC_IXR_PKT_DONE_MASK A packet is complete
+* - XDC_IXR_PKT_THRESHOLD_MASK The packet count threshold reached
+* - XDC_IXR_PKT_WAIT_BOUND_MASK The packet wait bound reached
+* - XDC_IXR_SG_DISABLE_ACK_MASK The scatter gather disable completed
+* - XDC_IXR_SG_END_MASK Current descriptor was the end of the list
+* - XDC_IXR_BD_MASK A buffer descriptor is done
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XDmaChannel_SetIntrEnable(XDmaChannel * InstancePtr, u32 Enable)
+{
+ /* assert to verify input arguments except the enable which can't be
+ * asserted since all values are valid
+ */
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* set the interrupt enable register to the specified value */
+
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_IE_REG_OFFSET, Enable);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function gets the interrupt enable of the DMA channel. The
+* interrupt enable contains flags which enable individual interrupts for the
+* DMA channel. The definitions of each bit in the register match the definition
+* of the bits in the interrupt status register.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @return
+*
+* The interrupt enable of the DMA channel. One or more of the following values
+* may be contained the register. Each of the values are unique bit masks.
+* <br><br>
+* - XDC_IXR_DMA_DONE_MASK The dma operation is done
+* <br><br>
+* - XDC_IXR_DMA_ERROR_MASK The dma operation had an error
+* <br><br>
+* - XDC_IXR_PKT_DONE_MASK A packet is complete
+* <br><br>
+* - XDC_IXR_PKT_THRESHOLD_MASK The packet count threshold reached
+* <br><br>
+* - XDC_IXR_PKT_WAIT_BOUND_MASK The packet wait bound reached
+* <br><br>
+* - XDC_IXR_SG_DISABLE_ACK_MASK The scatter gather disable completed
+* <br><br>
+* - XDC_IXR_BD_MASK A buffer descriptor is done
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XDmaChannel_GetIntrEnable(XDmaChannel * InstancePtr)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* return the contents of the interrupt enable register */
+
+ return XIo_In32(InstancePtr->RegBaseAddress + XDC_IE_REG_OFFSET);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function starts the DMA channel transferring data from a memory source
+* to a memory destination. This function only starts the operation and returns
+* before the operation may be complete. If the interrupt is enabled, an
+* interrupt will be generated when the operation is complete, otherwise it is
+* necessary to poll the channel status to determine when it's complete. It is
+* the responsibility of the caller to determine when the operation is complete
+* by handling the generated interrupt or polling the status. It is also the
+* responsibility of the caller to ensure that the DMA channel is not busy with
+* another transfer before calling this function.
+*
+* @param
+*
+* InstancePtr contains a pointer to the DMA channel to operate on.
+*
+* @param
+*
+* SourcePtr contains a pointer to the source memory where the data is to
+* be transferred from and must be 32 bit aligned.
+*
+* @param
+*
+* DestinationPtr contains a pointer to the destination memory where the data
+* is to be transferred and must be 32 bit aligned.
+*
+* @param
+*
+* ByteCount contains the number of bytes to transfer during the DMA operation.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* The DMA hw will not currently allow a non-local memory transfer to non-local
+* memory (memory copy), but only allows a non-local memory to or from the device
+* memory (typically a FIFO).
+* <br><br>
+* It is the responsibility of the caller to ensure that the cache is
+* flushed and invalidated both before and after the DMA operation completes
+* if the memory pointed to is cached. The caller must also ensure that the
+* pointers contain a physical address rather than a virtual address
+* if address translation is being used.
+*
+******************************************************************************/
+void XDmaChannel_Transfer(XDmaChannel * InstancePtr,
+ u32 *SourcePtr, u32 *DestinationPtr, u32 ByteCount)
+{
+ /* assert to verify input arguments and the alignment of any arguments
+ * which have expected alignments
+ */
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(SourcePtr != NULL);
+ XASSERT_VOID(((u32) SourcePtr & 3) == 0);
+ XASSERT_VOID(DestinationPtr != NULL);
+ XASSERT_VOID(((u32) DestinationPtr & 3) == 0);
+ XASSERT_VOID(ByteCount != 0);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* setup the source and destination address registers for the transfer */
+
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_SA_REG_OFFSET,
+ (u32) SourcePtr);
+
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_DA_REG_OFFSET,
+ (u32) DestinationPtr);
+
+ /* start the DMA transfer to copy from the source buffer to the
+ * destination buffer by writing the length to the length register
+ */
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_LEN_REG_OFFSET, ByteCount);
+}
--- /dev/null
+/******************************************************************************
+*
+* Author: Xilinx, Inc.
+*
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A
+* COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS
+* ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR STANDARD,
+* XILINX IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION IS FREE
+* FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE FOR OBTAINING
+* ANY THIRD PARTY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION.
+* XILINX EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO
+* THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY
+* WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM
+* CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND
+* FITNESS FOR A PARTICULAR PURPOSE.
+*
+*
+* Xilinx hardware products are not intended for use in life support
+* appliances, devices, or systems. Use in such applications is
+* expressly prohibited.
+*
+*
+* (c) Copyright 2002-2004 Xilinx Inc.
+* All rights reserved.
+*
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+* FILENAME:
+*
+* xdma_channel.h
+*
+* DESCRIPTION:
+*
+* This file contains the DMA channel component implementation. This component
+* supports a distributed DMA design in which each device can have it's own
+* dedicated DMA channel, as opposed to a centralized DMA design.
+* A device which uses DMA typically contains two DMA channels, one for
+* sending data and the other for receiving data.
+*
+* This component is designed to be used as a basic building block for
+* designing a device driver. It provides registers accesses such that all
+* DMA processing can be maintained easier, but the device driver designer
+* must still understand all the details of the DMA channel.
+*
+* The DMA channel allows a CPU to minimize the CPU interaction required to move
+* data between a memory and a device. The CPU requests the DMA channel to
+* perform a DMA operation and typically continues performing other processing
+* until the DMA operation completes. DMA could be considered a primitive form
+* of multiprocessing such that caching and address translation can be an issue.
+*
+* Scatter Gather Operations
+*
+* The DMA channel may support scatter gather operations. A scatter gather
+* operation automates the DMA channel such that multiple buffers can be
+* sent or received with minimal software interaction with the hardware. Buffer
+* descriptors, contained in the XBufDescriptor component, are used by the
+* scatter gather operations of the DMA channel to describe the buffers to be
+* processed.
+*
+* Scatter Gather List Operations
+*
+* A scatter gather list may be supported by each DMA channel. The scatter
+* gather list allows buffer descriptors to be put into the list by a device
+* driver which requires scatter gather. The hardware processes the buffer
+* descriptors which are contained in the list and modifies the buffer
+* descriptors to reflect the status of the DMA operations. The device driver
+* is notified by interrupt that specific DMA events occur including scatter
+* gather events. The device driver removes the completed buffer descriptors
+* from the scatter gather list to evaluate the status of each DMA operation.
+*
+* The scatter gather list is created and buffer descriptors are inserted into
+* the list. Buffer descriptors are never removed from the list after it's
+* creation such that a put operation copies from a temporary buffer descriptor
+* to a buffer descriptor in the list. Get operations don't copy from the list
+* to a temporary, but return a pointer to the buffer descriptor in the list.
+* A buffer descriptor in the list may be locked to prevent it from being
+* overwritten by a put operation. This allows the device driver to get a
+* descriptor from a scatter gather list and prevent it from being overwritten
+* until the buffer associated with the buffer descriptor has been processed.
+*
+* Typical Scatter Gather Processing
+*
+* The following steps illustrate the typical processing to use the
+* scatter gather features of a DMA channel.
+*
+* 1. Create a scatter gather list for the DMA channel which puts empty buffer
+* descriptors into the list.
+* 2. Create buffer descriptors which describe the buffers to be filled with
+* receive data or the buffers which contain data to be sent.
+* 3. Put buffer descriptors into the DMA channel scatter list such that scatter
+* gather operations are requested.
+* 4. Commit the buffer descriptors in the list such that they are ready to be
+* used by the DMA channel hardware.
+* 5. Start the scatter gather operations of the DMA channel.
+* 6. Process any interrupts which occur as a result of the scatter gather
+* operations or poll the DMA channel to determine the status.
+*
+* Interrupts
+*
+* Each DMA channel has the ability to generate an interrupt. This component
+* does not perform processing for the interrupt as this processing is typically
+* tightly coupled with the device which is using the DMA channel. It is the
+* responsibility of the caller of DMA functions to manage the interrupt
+* including connecting to the interrupt and enabling/disabling the interrupt.
+*
+* Critical Sections
+*
+* It is the responsibility of the device driver designer to use critical
+* sections as necessary when calling functions of the DMA channel. This
+* component does not use critical sections and it does access registers using
+* read-modify-write operations. Calls to DMA functions from a main thread
+* and from an interrupt context could produce unpredictable behavior such that
+* the caller must provide the appropriate critical sections.
+*
+* Address Translation
+*
+* All addresses of data structures which are passed to DMA functions must
+* be physical (real) addresses as opposed to logical (virtual) addresses.
+*
+* Caching
+*
+* The memory which is passed to the function which creates the scatter gather
+* list must not be cached such that buffer descriptors are non-cached. This
+* is necessary because the buffer descriptors are kept in a ring buffer and
+* not directly accessible to the caller of DMA functions.
+*
+* The caller of DMA functions is responsible for ensuring that any data
+* buffers which are passed to the DMA channel are cache-line aligned if
+* necessary.
+*
+* The caller of DMA functions is responsible for ensuring that any data
+* buffers which are passed to the DMA channel have been flushed from the cache.
+*
+* The caller of DMA functions is responsible for ensuring that the cache is
+* invalidated prior to using any data buffers which are the result of a DMA
+* operation.
+*
+* Memory Alignment
+*
+* The addresses of data buffers which are passed to DMA functions must be
+* 32 bit word aligned since the DMA hardware performs 32 bit word transfers.
+*
+* Mutual Exclusion
+*
+* The functions of the DMA channel are not thread safe such that the caller
+* of all DMA functions is responsible for ensuring mutual exclusion for a
+* DMA channel. Mutual exclusion across multiple DMA channels is not
+* necessary.
+*
+* NOTES:
+*
+* Many of the provided functions which are register accessors don't provide
+* a lot of error detection. The caller is expected to understand the impact
+* of a function call based upon the current state of the DMA channel. This
+* is done to minimize the overhead in this component.
+*
+******************************************************************************/
+
+#ifndef XDMA_CHANNEL_H /* prevent circular inclusions */
+#define XDMA_CHANNEL_H /* by using protection macros */
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xversion.h"
+#include "xbuf_descriptor.h"
+#include "xdma_channel_i.h" /* constants shared with buffer descriptor */
+
+/************************** Constant Definitions *****************************/
+
+/* the following constants provide access to the bit fields of the DMA control
+ * register (DMACR)
+ */
+#define XDC_DMACR_SOURCE_INCR_MASK 0x80000000UL /* increment source address */
+#define XDC_DMACR_DEST_INCR_MASK 0x40000000UL /* increment dest address */
+#define XDC_DMACR_SOURCE_LOCAL_MASK 0x20000000UL /* local source address */
+#define XDC_DMACR_DEST_LOCAL_MASK 0x10000000UL /* local dest address */
+#define XDC_DMACR_SG_DISABLE_MASK 0x08000000UL /* scatter gather disable */
+#define XDC_DMACR_GEN_BD_INTR_MASK 0x04000000UL /* descriptor interrupt */
+#define XDC_DMACR_LAST_BD_MASK XDC_CONTROL_LAST_BD_MASK /* last buffer */
+ /* descriptor */
+#define XDC_DMACR_DRE_MODE_MASK 0x01000000UL /* DRE/normal mode */
+
+#define XDC_DMACR_TX_CS_INIT_MASK 0x0000FFFFUL /* Initial value for TX
+ CS offload */
+#define XDC_DMACR_CS_OFFLOAD_MASK 0x00800000UL /* Enable CS offload */
+
+/* the following constants provide access to the bit fields of the DMA status
+ * register (DMASR)
+ */
+#define XDC_DMASR_BUSY_MASK 0x80000000UL /* channel is busy */
+#define XDC_DMASR_BUS_ERROR_MASK 0x40000000UL /* bus error occurred */
+#define XDC_DMASR_BUS_TIMEOUT_MASK 0x20000000UL /* bus timeout occurred */
+#define XDC_DMASR_LAST_BD_MASK XDC_STATUS_LAST_BD_MASK /* last buffer */
+ /* descriptor */
+#define XDC_DMASR_SG_BUSY_MASK 0x08000000UL /* scatter gather is busy */
+/* @} */
+
+/** @name DMA destination address register bit fields when checksum offload is
+ * used
+ *
+ * the following constants provide access to the bit fields of the
+ * Destination Address Register (DAREG)
+ * @{
+ */
+#define XDC_DAREG_CS_BEGIN_MASK 0xFFFF0000UL /* byte position to begin
+ checksum calculation */
+#define XDC_DAREG_CS_INSERT_MASK 0x0000FFFFUL /* byte position to place
+ calculated checksum */
+/* the following constants provide access to the bit fields of the interrupt
+ * status register (ISR) and the interrupt enable register (IER), bit masks
+ * match for both registers such that they are named IXR
+ */
+#define XDC_IXR_DMA_DONE_MASK 0x1UL /* dma operation done */
+#define XDC_IXR_DMA_ERROR_MASK 0x2UL /* dma operation error */
+#define XDC_IXR_PKT_DONE_MASK 0x4UL /* packet done */
+#define XDC_IXR_PKT_THRESHOLD_MASK 0x8UL /* packet count threshold */
+#define XDC_IXR_PKT_WAIT_BOUND_MASK 0x10UL /* packet wait bound reached */
+#define XDC_IXR_SG_DISABLE_ACK_MASK 0x20UL /* scatter gather disable
+ acknowledge occurred */
+#define XDC_IXR_SG_END_MASK 0x40UL /* last buffer descriptor
+ disabled scatter gather */
+#define XDC_IXR_BD_MASK 0x80UL /* buffer descriptor done */
+
+/**************************** Type Definitions *******************************/
+
+/*
+ * the following structure contains data which is on a per instance basis
+ * for the XDmaChannel component
+ */
+typedef struct XDmaChannelTag {
+ XVersion Version; /* version of the driver */
+ u32 RegBaseAddress; /* base address of registers */
+ u32 IsReady; /* device is initialized and ready */
+
+ XBufDescriptor *PutPtr; /* keep track of where to put into list */
+ XBufDescriptor *GetPtr; /* keep track of where to get from list */
+ XBufDescriptor *CommitPtr; /* keep track of where to commit in list */
+ XBufDescriptor *LastPtr; /* keep track of the last put in the list */
+ void *VirtPtr; /* virtual base of memory */
+ void *PhyPtr; /* physical base of memory */
+ u32 TotalDescriptorCount; /* total # of descriptors in the list */
+ u32 ActiveDescriptorCount; /* # of descriptors pointing to buffers
+ * in the buffer descriptor list */
+ u32 ActivePacketCount; /* # of packets that have been put into
+ the list and transmission confirmation
+ have not been received by the driver */
+ u32 Committed; /* CommitPuts is called? */
+} XDmaChannel;
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Function Prototypes ******************************/
+
+int XDmaChannel_Initialize(XDmaChannel * InstancePtr, u32 BaseAddress);
+u32 XDmaChannel_IsReady(XDmaChannel * InstancePtr);
+XVersion *XDmaChannel_GetVersion(XDmaChannel * InstancePtr);
+int XDmaChannel_SelfTest(XDmaChannel * InstancePtr);
+void XDmaChannel_Reset(XDmaChannel * InstancePtr);
+
+/* Control functions */
+
+u32 XDmaChannel_GetControl(XDmaChannel * InstancePtr);
+void XDmaChannel_SetControl(XDmaChannel * InstancePtr, u32 Control);
+
+/* Status functions */
+
+u32 XDmaChannel_GetStatus(XDmaChannel * InstancePtr);
+void XDmaChannel_SetIntrStatus(XDmaChannel * InstancePtr, u32 Status);
+u32 XDmaChannel_GetIntrStatus(XDmaChannel * InstancePtr);
+void XDmaChannel_SetIntrEnable(XDmaChannel * InstancePtr, u32 Enable);
+u32 XDmaChannel_GetIntrEnable(XDmaChannel * InstancePtr);
+
+/* DMA without scatter gather functions */
+
+void XDmaChannel_Transfer(XDmaChannel * InstancePtr,
+ u32 *SourcePtr, u32 *DestinationPtr, u32 ByteCount);
+
+/* Scatter gather functions */
+
+int XDmaChannel_SgStart(XDmaChannel * InstancePtr);
+int XDmaChannel_SgStop(XDmaChannel * InstancePtr,
+ XBufDescriptor ** BufDescriptorPtr);
+int XDmaChannel_CreateSgList(XDmaChannel * InstancePtr,
+ u32 *MemoryPtr, u32 ByteCount, void *PhyPtr);
+u32 XDmaChannel_IsSgListEmpty(XDmaChannel * InstancePtr);
+
+int XDmaChannel_PutDescriptor(XDmaChannel * InstancePtr,
+ XBufDescriptor * BufDescriptorPtr);
+int XDmaChannel_CommitPuts(XDmaChannel * InstancePtr);
+int XDmaChannel_GetDescriptor(XDmaChannel * InstancePtr,
+ XBufDescriptor ** BufDescriptorPtr);
+
+/* Packet functions for interrupt collescing */
+
+u32 XDmaChannel_GetPktCount(XDmaChannel * InstancePtr);
+void XDmaChannel_DecrementPktCount(XDmaChannel * InstancePtr);
+int XDmaChannel_SetPktThreshold(XDmaChannel * InstancePtr, u8 Threshold);
+u8 XDmaChannel_GetPktThreshold(XDmaChannel * InstancePtr);
+void XDmaChannel_SetPktWaitBound(XDmaChannel * InstancePtr, u32 WaitBound);
+u32 XDmaChannel_GetPktWaitBound(XDmaChannel * InstancePtr);
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xdma_channel_i.h,v 1.1 2006/12/13 14:22:04 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2001-2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xdma_channel_i.h
+*
+* <b>Description</b>
+*
+* This file contains data which is shared internal data for the DMA channel
+* component. It is also shared with the buffer descriptor component which is
+* very tightly coupled with the DMA channel component.
+*
+* @note
+*
+* The last buffer descriptor constants must be located here to prevent a
+* circular dependency between the DMA channel component and the buffer
+* descriptor component.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a xd 10/27/04 Doxygenated for inclusion in API documentation
+* 1.00b ecm 10/31/05 Updated for the check sum offload changes.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XDMA_CHANNEL_I_H /* prevent circular inclusions */
+#define XDMA_CHANNEL_I_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xversion.h"
+
+/************************** Constant Definitions *****************************/
+
+#define XDC_DMA_CHANNEL_V1_00_B "1.00b"
+
+/** @name DMA control register bit fields
+ *
+ * the following constant provides access to the bit fields of the DMA control
+ * register (DMACR) which must be shared between the DMA channel component
+ * and the buffer descriptor component
+ * @{
+ */
+#define XDC_CONTROL_LAST_BD_MASK 0x02000000UL /**< last buffer descriptor */
+/* @} */
+
+/** @name DMA status register bit fields
+ *
+ * the following constant provides access to the bit fields of the DMA status
+ * register (DMASR) which must be shared between the DMA channel component
+ * and the buffer descriptor component
+ * @{
+ */
+#define XDC_STATUS_LAST_BD_MASK 0x10000000UL /**< last buffer descriptor */
+
+#define XDC_DMASR_RX_CS_RAW_MASK 0xFFFF0000UL /**< RAW CS value for RX data */
+/* @} */
+
+/** @name DMA Channel register offsets
+ *
+ * the following constants provide access to each of the registers of a DMA
+ * channel
+ * @{
+ */
+#define XDC_RST_REG_OFFSET 0 /**< reset register */
+#define XDC_MI_REG_OFFSET 0 /**< module information register */
+#define XDC_DMAC_REG_OFFSET 4 /**< DMA control register */
+#define XDC_SA_REG_OFFSET 8 /**< source address register */
+#define XDC_DA_REG_OFFSET 12 /**< destination address register */
+#define XDC_LEN_REG_OFFSET 16 /**< length register */
+#define XDC_DMAS_REG_OFFSET 20 /**< DMA status register */
+#define XDC_BDA_REG_OFFSET 24 /**< buffer descriptor address register */
+#define XDC_SWCR_REG_OFFSET 28 /**< software control register */
+#define XDC_UPC_REG_OFFSET 32 /**< unserviced packet count register */
+#define XDC_PCT_REG_OFFSET 36 /**< packet count threshold register */
+#define XDC_PWB_REG_OFFSET 40 /**< packet wait bound register */
+#define XDC_IS_REG_OFFSET 44 /**< interrupt status register */
+#define XDC_IE_REG_OFFSET 48 /**< interrupt enable register */
+/* @} */
+
+/**
+ * the following constant is written to the reset register to reset the
+ * DMA channel
+ */
+#define XDC_RESET_MASK 0x0000000AUL
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/******************************************************************************
+*
+* Author: Xilinx, Inc.
+*
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A
+* COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS
+* ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR STANDARD,
+* XILINX IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION IS FREE
+* FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE FOR OBTAINING
+* ANY THIRD PARTY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION.
+* XILINX EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO
+* THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY
+* WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM
+* CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND
+* FITNESS FOR A PARTICULAR PURPOSE.
+*
+*
+* Xilinx hardware products are not intended for use in life support
+* appliances, devices, or systems. Use in such applications is
+* expressly prohibited.
+*
+*
+* (c) Copyright 2002-2004 Xilinx Inc.
+* All rights reserved.
+*
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+* FILENAME:
+*
+* xdma_channel_sg.c
+*
+* DESCRIPTION:
+*
+* This file contains the implementation of the XDmaChannel component which is
+* related to scatter gather operations.
+*
+* Scatter Gather Operations
+*
+* The DMA channel may support scatter gather operations. A scatter gather
+* operation automates the DMA channel such that multiple buffers can be
+* sent or received with minimal software interaction with the hardware. Buffer
+* descriptors, contained in the XBufDescriptor component, are used by the
+* scatter gather operations of the DMA channel to describe the buffers to be
+* processed.
+*
+* Scatter Gather List Operations
+*
+* A scatter gather list may be supported by each DMA channel. The scatter
+* gather list allows buffer descriptors to be put into the list by a device
+* driver which requires scatter gather. The hardware processes the buffer
+* descriptors which are contained in the list and modifies the buffer
+* descriptors to reflect the status of the DMA operations. The device driver
+* is notified by interrupt that specific DMA events occur including scatter
+* gather events. The device driver removes the completed buffer descriptors
+* from the scatter gather list to evaluate the status of each DMA operation.
+*
+* The scatter gather list is created and buffer descriptors are inserted into
+* the list. Buffer descriptors are never removed from the list after it's
+* creation such that a put operation copies from a temporary buffer descriptor
+* to a buffer descriptor in the list. Get operations don't copy from the list
+* to a temporary, but return a pointer to the buffer descriptor in the list.
+* A buffer descriptor in the list may be locked to prevent it from being
+* overwritten by a put operation. This allows the device driver to get a
+* descriptor from a scatter gather list and prevent it from being overwritten
+* until the buffer associated with the buffer descriptor has been processed.
+*
+* The get and put functions only operate on the list and are asynchronous from
+* the hardware which may be using the list of descriptors. This is important
+* because there are no checks in the get and put functions to ensure that the
+* hardware has processed the descriptors. This must be handled by the driver
+* using the DMA scatter gather channel through the use of the other functions.
+* When a scatter gather operation is started, the start function does ensure
+* that the descriptor to start has not already been processed by the hardware
+* and is not the first of a series of descriptors that have not been committed
+* yet.
+*
+* Descriptors are put into the list but not marked as ready to use by the
+* hardware until a commit operation is done. This allows multiple descriptors
+* which may contain a single packet of information for a protocol to be
+* guaranteed not to cause any underflow conditions during transmission. The
+* hardware design only allows descriptors to cause it to stop after a descriptor
+* has been processed rather than before it is processed. A series of
+* descriptors are put into the list followed by a commit operation, or each
+* descriptor may be commited. A commit operation is performed by changing a
+* single descriptor, the first of the series of puts, to indicate that the
+* hardware may now use all descriptors after it. The last descriptor in the
+* list is always set to cause the hardware to stop after it is processed.
+*
+* Typical Scatter Gather Processing
+*
+* The following steps illustrate the typical processing to use the
+* scatter gather features of a DMA channel.
+*
+* 1. Create a scatter gather list for the DMA channel which puts empty buffer
+* descriptors into the list.
+* 2. Create buffer descriptors which describe the buffers to be filled with
+* receive data or the buffers which contain data to be sent.
+* 3. Put buffer descriptors into the DMA channel scatter list such that scatter
+* gather operations are requested.
+* 4. Commit the buffer descriptors in the list such that they are ready to be
+* used by the DMA channel hardware.
+* 5. Start the scatter gather operations of the DMA channel.
+* 6. Process any interrupts which occur as a result of the scatter gather
+* operations or poll the DMA channel to determine the status. This may
+* be accomplished by getting the packet count for the channel and then
+* getting the appropriate number of descriptors from the list for that
+* number of packets.
+*
+* Minimizing Interrupts
+*
+* The Scatter Gather operating mode is designed to reduce the amount of CPU
+* throughput necessary to manage the hardware for devices. A key to the CPU
+* throughput is the number and rate of interrupts that the CPU must service.
+* Devices with higher data rates can cause larger numbers of interrupts and
+* higher frequency interrupts. Ideally the number of interrupts can be reduced
+* by only generating an interrupt when a specific amount of data has been
+* received from the interface. This design suffers from a lack of interrupts
+* when the amount of data received is less than the specified amount of data
+* to generate an interrupt. In order to help minimize the number of interrupts
+* which the CPU must service, an algorithm referred to as "interrupt coalescing"
+* is utilized.
+*
+* Interrupt Coalescing
+*
+* The principle of interrupt coalescing is to wait before generating an
+* interrupt until a certain number of packets have been received or sent. An
+* interrupt is also generated if a smaller number of packets have been received
+* followed by a certain period of time with no packet reception. This is a
+* trade-off of latency for bandwidth and is accomplished using several
+* mechanisms of the hardware including a counter for packets received or
+* transmitted and a packet timer. These two hardware mechanisms work in
+* combination to allow a reduction in the number of interrupts processed by the
+* CPU for packet reception.
+*
+* Unserviced Packet Count
+*
+* The purpose of the packet counter is to count the number of packets received
+* or transmitted and provide an interrupt when a specific number of packets
+* have been processed by the hardware. An interrupt is generated whenever the
+* counter is greater than or equal to the Packet Count Threshold. This counter
+* contains an accurate count of the number of packets that the hardware has
+* processed, either received or transmitted, and the software has not serviced.
+*
+* The packet counter allows the number of interrupts to be reduced by waiting
+* to generate an interrupt until enough packets are received. For packet
+* reception, packet counts of less than the number to generate an interrupt
+* would not be serviced without the addition of a packet timer. This counter is
+* continuously updated by the hardware, not latched to the value at the time
+* the interrupt occurred.
+*
+* The packet counter can be used within the interrupt service routine for the
+* device to reduce the number of interrupts. The interrupt service routine
+* loops while performing processing for each packet which has been received or
+* transmitted and decrements the counter by a specified value. At the same time,
+* the hardware is possibly continuing to receive or transmit more packets such
+* that the software may choose, based upon the value in the packet counter, to
+* remain in the interrupt service routine rather than exiting and immediately
+* returning. This feature should be used with caution as reducing the number of
+* interrupts is beneficial, but unbounded interrupt processing is not desirable.
+*
+* Since the hardware may be incrementing the packet counter simultaneously
+* with the software decrementing the counter, there is a need for atomic
+* operations. The hardware ensures that the operation is atomic such that
+* simultaneous accesses are properly handled.
+*
+* Packet Wait Bound
+*
+* The purpose of the packet wait bound is to augment the unserviced packet
+* count. Whenever there is no pending interrupt for the channel and the
+* unserviced packet count is non-zero, a timer starts counting timeout at the
+* value contained the the packet wait bound register. If the timeout is
+* reached, an interrupt is generated such that the software may service the
+* data which was buffered.
+*
+* NOTES:
+*
+* Special Test Conditions:
+*
+* The scatter gather list processing must be thoroughly tested if changes are
+* made. Testing should include putting and committing single descriptors and
+* putting multiple descriptors followed by a single commit. There are some
+* conditions in the code which handle the exception conditions.
+*
+* The Put Pointer points to the next location in the descriptor list to copy
+* in a new descriptor. The Get Pointer points to the next location in the
+* list to get a descriptor from. The Get Pointer only allows software to
+* have a traverse the list after the hardware has finished processing some
+* number of descriptors. The Commit Pointer points to the descriptor in the
+* list which is to be committed. It is also used to determine that no
+* descriptor is waiting to be commited (NULL). The Last Pointer points to
+* the last descriptor that was put into the list. It typically points
+* to the previous descriptor to the one pointed to by the Put Pointer.
+* Comparisons are done between these pointers to determine when the following
+* special conditions exist.
+
+* Single Put And Commit
+*
+* The buffer descriptor is ready to be used by the hardware so it is important
+* for the descriptor to not appear to be waiting to be committed. The commit
+* pointer is reset when a commit is done indicating there are no descriptors
+* waiting to be committed. In all cases but this one, the descriptor is
+* changed to cause the hardware to go to the next descriptor after processing
+* this one. But in this case, this is the last descriptor in the list such
+* that it must not be changed.
+*
+* 3 Or More Puts And Commit
+*
+* A series of 3 or more puts followed by a single commit is different in that
+* only the 1st descriptor put into the list is changed when the commit is done.
+* This requires each put starting on the 3rd to change the previous descriptor
+* so that it allows the hardware to continue to the next descriptor in the list.
+*
+* The 1st Put Following A Commit
+*
+* The commit caused the commit pointer to be NULL indicating that there are no
+* descriptors waiting to be committed. It is necessary for the next put to set
+* the commit pointer so that a commit must follow the put for the hardware to
+* use the descriptor.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- ------------------------------------------------------
+* 1.00a rpm 02/03/03 Removed the XST_DMA_SG_COUNT_EXCEEDED return code
+* from SetPktThreshold.
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xdma_channel.h"
+#include "xbasic_types.h"
+#include "xio.h"
+#include "xbuf_descriptor.h"
+#include "xstatus.h"
+
+/* simple virt<-->phy pointer conversions for a single dma channel */
+#define P_TO_V(p) \
+ ((p) ? \
+ (InstancePtr->VirtPtr + ((u32)(p) - (u32)InstancePtr->PhyPtr)) : \
+ 0)
+
+#define V_TO_P(v) \
+ ((v) ? \
+ (InstancePtr->PhyPtr + ((u32)(v) - (u32)InstancePtr->VirtPtr)) : \
+ 0)
+
+/************************** Constant Definitions *****************************/
+
+#define XDC_SWCR_SG_ENABLE_MASK 0x80000000UL /* scatter gather enable */
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/* the following macro copies selected fields of a buffer descriptor to another
+ * buffer descriptor, this was provided by the buffer descriptor component but
+ * was moved here since it is only used internally to this component and since
+ * it does not copy all fields
+ */
+#define CopyBufferDescriptor(InstancePtr, DestinationPtr) \
+{ \
+ *((u32 *)DestinationPtr + XBD_CONTROL_OFFSET) = \
+ *((u32 *)InstancePtr + XBD_CONTROL_OFFSET); \
+ *((u32 *)DestinationPtr + XBD_SOURCE_OFFSET) = \
+ *((u32 *)InstancePtr + XBD_SOURCE_OFFSET); \
+ *((u32 *)DestinationPtr + XBD_DESTINATION_OFFSET) = \
+ *((u32 *)InstancePtr + XBD_DESTINATION_OFFSET); \
+ *((u32 *)DestinationPtr + XBD_LENGTH_OFFSET) = \
+ *((u32 *)InstancePtr + XBD_LENGTH_OFFSET); \
+ *((u32 *)DestinationPtr + XBD_STATUS_OFFSET) = \
+ *((u32 *)InstancePtr + XBD_STATUS_OFFSET); \
+ *((u32 *)DestinationPtr + XBD_DEVICE_STATUS_OFFSET) = \
+ *((u32 *)InstancePtr + XBD_DEVICE_STATUS_OFFSET); \
+ *((u32 *)DestinationPtr + XBD_ID_OFFSET) = \
+ *((u32 *)InstancePtr + XBD_ID_OFFSET); \
+ *((u32 *)DestinationPtr + XBD_FLAGS_OFFSET) = \
+ *((u32 *)InstancePtr + XBD_FLAGS_OFFSET); \
+ *((u32 *)DestinationPtr + XBD_RQSTED_LENGTH_OFFSET) = \
+ *((u32 *)InstancePtr + XBD_RQSTED_LENGTH_OFFSET); \
+}
+
+/************************** Variable Definitions *****************************/
+
+/************************** Function Prototypes ******************************/
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_SgStart
+*
+* DESCRIPTION:
+*
+* This function starts a scatter gather operation for a scatter gather
+* DMA channel. The first buffer descriptor in the buffer descriptor list
+* will be started with the scatter gather operation. A scatter gather list
+* should have previously been created for the DMA channel and buffer
+* descriptors put into the scatter gather list such that there are scatter
+* operations ready to be performed.
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* RETURN VALUE:
+*
+* A status containing XST_SUCCESS if scatter gather was started successfully
+* for the DMA channel.
+*
+* A value of XST_DMA_SG_NO_LIST indicates the scatter gather list has not
+* been created.
+*
+* A value of XST_DMA_SG_LIST_EMPTY indicates scatter gather was not started
+* because the scatter gather list of the DMA channel does not contain any
+* buffer descriptors that are ready to be processed by the hardware.
+*
+* A value of XST_DMA_SG_IS_STARTED indicates scatter gather was not started
+* because the scatter gather was not stopped, but was already started.
+*
+* A value of XST_DMA_SG_BD_NOT_COMMITTED indicates the buffer descriptor of
+* scatter gather list which was to be started is not committed to the list.
+* This status is more likely if this function is being called from an ISR
+* and non-ISR processing is putting descriptors into the list.
+*
+* A value of XST_DMA_SG_NO_DATA indicates that the buffer descriptor of the
+* scatter gather list which was to be started had already been used by the
+* hardware for a DMA transfer that has been completed.
+*
+* NOTES:
+*
+* It is the responsibility of the caller to get all the buffer descriptors
+* after performing a stop operation and before performing a start operation.
+* If buffer descriptors are not retrieved between stop and start operations,
+* buffer descriptors may be processed by the hardware more than once.
+*
+******************************************************************************/
+int XDmaChannel_SgStart(XDmaChannel * InstancePtr)
+{
+ u32 Register;
+ XBufDescriptor *LastDescriptorPtr;
+
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* if a scatter gather list has not been created yet, return a status */
+
+ if (InstancePtr->TotalDescriptorCount == 0) {
+ return XST_DMA_SG_NO_LIST;
+ }
+
+ /* if the scatter gather list exists but is empty then return a status */
+
+ if (XDmaChannel_IsSgListEmpty(InstancePtr)) {
+ return XST_DMA_SG_LIST_EMPTY;
+ }
+
+ /* if scatter gather is busy for the DMA channel, return a status because
+ * restarting it could lose data
+ */
+
+ Register = XIo_In32(InstancePtr->RegBaseAddress + XDC_DMAS_REG_OFFSET);
+ if (Register & XDC_DMASR_SG_BUSY_MASK) {
+ return XST_DMA_SG_IS_STARTED;
+ }
+
+ /* get the address of the last buffer descriptor which the DMA hardware
+ * finished processing
+ */
+ LastDescriptorPtr = (XBufDescriptor *)
+ P_TO_V(XIo_In32
+ (InstancePtr->RegBaseAddress + XDC_BDA_REG_OFFSET));
+
+ /* setup the first buffer descriptor that will be sent when the scatter
+ * gather channel is enabled, this is only necessary one time since
+ * the BDA register of the channel maintains the last buffer descriptor
+ * processed
+ */
+ if (LastDescriptorPtr == NULL) {
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_BDA_REG_OFFSET,
+ (u32) V_TO_P(InstancePtr->GetPtr));
+ }
+ else {
+ XBufDescriptor *NextDescriptorPtr;
+
+ /* get the next descriptor to be started, if the status indicates it
+ * hasn't already been used by the h/w, then it's OK to start it,
+ * s/w sets the status of each descriptor to busy and then h/w clears
+ * the busy when it is complete
+ */
+ NextDescriptorPtr =
+ P_TO_V(XBufDescriptor_GetNextPtr(LastDescriptorPtr));
+
+ if ((XBufDescriptor_GetStatus(NextDescriptorPtr) &
+ XDC_DMASR_BUSY_MASK) == 0) {
+ return XST_DMA_SG_NO_DATA;
+ }
+ /* don't start the DMA SG channel if the descriptor to be processed
+ * by h/w is to be committed by the s/w, this function can be called
+ * such that it interrupts a thread that was putting into the list
+ */
+ if (NextDescriptorPtr == InstancePtr->CommitPtr) {
+ return XST_DMA_SG_BD_NOT_COMMITTED;
+ }
+ }
+
+ /* start the scatter gather operation by clearing the stop bit in the
+ * control register and setting the enable bit in the s/w control register,
+ * both of these are necessary to cause it to start, right now the order of
+ * these statements is important, the software control register should be
+ * set 1st. The other order can cause the CPU to have a loss of sync
+ * because it cannot read/write the register while the DMA operation is
+ * running
+ */
+
+ Register = XIo_In32(InstancePtr->RegBaseAddress + XDC_SWCR_REG_OFFSET);
+
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_SWCR_REG_OFFSET,
+ Register | XDC_SWCR_SG_ENABLE_MASK);
+
+ Register = XIo_In32(InstancePtr->RegBaseAddress + XDC_DMAC_REG_OFFSET);
+
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_DMAC_REG_OFFSET,
+ Register & ~XDC_DMACR_SG_DISABLE_MASK);
+
+ /* indicate the DMA channel scatter gather operation was started
+ * successfully
+ */
+ return XST_SUCCESS;
+}
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_SgStop
+*
+* DESCRIPTION:
+*
+* This function stops a scatter gather operation for a scatter gather
+* DMA channel. This function starts the process of stopping a scatter
+* gather operation that is in progress and waits for the stop to be completed.
+* Since it waits for the operation to stopped before returning, this function
+* could take an amount of time relative to the size of the DMA scatter gather
+* operation which is in progress. The scatter gather list of the DMA channel
+* is not modified by this function such that starting the scatter gather
+* channel after stopping it will cause it to resume. This operation is
+* considered to be a graceful stop in that the scatter gather operation
+* completes the current buffer descriptor before stopping.
+*
+* If the interrupt is enabled, an interrupt will be generated when the
+* operation is stopped and the caller is responsible for handling the
+* interrupt.
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* BufDescriptorPtr is also a return value which contains a pointer to the
+* buffer descriptor which the scatter gather operation completed when it
+* was stopped.
+*
+* RETURN VALUE:
+*
+* A status containing XST_SUCCESS if scatter gather was stopped successfully
+* for the DMA channel.
+*
+* A value of XST_DMA_SG_IS_STOPPED indicates scatter gather was not stoppped
+* because the scatter gather is not started, but was already stopped.
+*
+* BufDescriptorPtr contains a pointer to the buffer descriptor which was
+* completed when the operation was stopped.
+*
+* NOTES:
+*
+* This function implements a loop which polls the hardware for an infinite
+* amount of time. If the hardware is not operating correctly, this function
+* may never return.
+*
+******************************************************************************/
+int
+XDmaChannel_SgStop(XDmaChannel * InstancePtr,
+ XBufDescriptor ** BufDescriptorPtr)
+{
+ u32 Register;
+
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BufDescriptorPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* get the contents of the software control register, if scatter gather is not
+ * enabled (started), then return a status because the disable acknowledge
+ * would not be generated
+ */
+ Register = XIo_In32(InstancePtr->RegBaseAddress + XDC_SWCR_REG_OFFSET);
+
+ if ((Register & XDC_SWCR_SG_ENABLE_MASK) == 0) {
+ return XST_DMA_SG_IS_STOPPED;
+ }
+
+ /* disable scatter gather by writing to the software control register
+ * without modifying any other bits of the register
+ */
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_SWCR_REG_OFFSET,
+ Register & ~XDC_SWCR_SG_ENABLE_MASK);
+
+ /* scatter gather does not disable immediately, but after the current
+ * buffer descriptor is complete, so wait for the DMA channel to indicate
+ * the disable is complete
+ */
+ do {
+ Register =
+ XIo_In32(InstancePtr->RegBaseAddress +
+ XDC_DMAS_REG_OFFSET);
+ }
+ while (Register & XDC_DMASR_SG_BUSY_MASK);
+
+ /* set the specified buffer descriptor pointer to point to the buffer
+ * descriptor that the scatter gather DMA channel was processing
+ */
+ *BufDescriptorPtr = (XBufDescriptor *)
+ P_TO_V(XIo_In32
+ (InstancePtr->RegBaseAddress + XDC_BDA_REG_OFFSET));
+
+ return XST_SUCCESS;
+}
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_CreateSgList
+*
+* DESCRIPTION:
+*
+* This function creates a scatter gather list in the DMA channel. A scatter
+* gather list consists of a list of buffer descriptors that are available to
+* be used for scatter gather operations. Buffer descriptors are put into the
+* list to request a scatter gather operation to be performed.
+*
+* A number of buffer descriptors are created from the specified memory and put
+* into a buffer descriptor list as empty buffer descriptors. This function must
+* be called before non-empty buffer descriptors may be put into the DMA channel
+* to request scatter gather operations.
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* MemoryPtr contains a pointer to the memory which is to be used for buffer
+* descriptors and must not be cached (virtual).
+*
+* ByteCount contains the number of bytes for the specified memory to be used
+* for buffer descriptors.
+*
+* PhyPtr contains a pointer to the physical memory use for buffer descriptors.
+*
+* RETURN VALUE:
+*
+* A status contains XST_SUCCESS if the scatter gather list was successfully
+* created.
+*
+* A value of XST_DMA_SG_LIST_EXISTS indicates that the scatter gather list
+* was not created because the list has already been created.
+*
+* NOTES:
+*
+* None.
+*
+******************************************************************************/
+int
+XDmaChannel_CreateSgList(XDmaChannel * InstancePtr,
+ u32 *MemoryPtr, u32 ByteCount, void *PhyPtr)
+{
+ XBufDescriptor *BufferDescriptorPtr = (XBufDescriptor *) MemoryPtr;
+ XBufDescriptor *PreviousDescriptorPtr = NULL;
+ XBufDescriptor *StartOfListPtr = BufferDescriptorPtr;
+ u32 UsedByteCount;
+
+ /* assert to verify valid input arguments, alignment for those
+ * arguments that have alignment restrictions, and at least enough
+ * memory for one buffer descriptor
+ */
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(MemoryPtr != NULL);
+ XASSERT_NONVOID(((u32) MemoryPtr & 3) == 0);
+ XASSERT_NONVOID(ByteCount != 0);
+ XASSERT_NONVOID(ByteCount >= sizeof(XBufDescriptor));
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* if the scatter gather list has already been created, then return
+ * with a status
+ */
+ if (InstancePtr->TotalDescriptorCount != 0) {
+ return XST_DMA_SG_LIST_EXISTS;
+ }
+
+ /* save this up front so V_TO_P() works correctly */
+ InstancePtr->VirtPtr = MemoryPtr;
+ InstancePtr->PhyPtr = PhyPtr;
+
+ /* loop thru the specified memory block and create as many buffer
+ * descriptors as possible putting each into the list which is
+ * implemented as a ring buffer, make sure not to use any memory which
+ * is not large enough for a complete buffer descriptor
+ */
+ UsedByteCount = 0;
+ while ((UsedByteCount + sizeof(XBufDescriptor)) <= ByteCount) {
+ /* setup a pointer to the next buffer descriptor in the memory and
+ * update # of used bytes to know when all of memory is used
+ */
+ BufferDescriptorPtr = (XBufDescriptor *) ((u32) MemoryPtr +
+ UsedByteCount);
+
+ /* initialize the new buffer descriptor such that it doesn't contain
+ * garbage which could be used by the DMA hardware
+ */
+ XBufDescriptor_Initialize(BufferDescriptorPtr);
+
+ /* if this is not the first buffer descriptor to be created,
+ * then link it to the last created buffer descriptor
+ */
+ if (PreviousDescriptorPtr != NULL) {
+ XBufDescriptor_SetNextPtr(PreviousDescriptorPtr,
+ V_TO_P(BufferDescriptorPtr));
+ }
+
+ /* always keep a pointer to the last created buffer descriptor such
+ * that they can be linked together in the ring buffer
+ */
+ PreviousDescriptorPtr = BufferDescriptorPtr;
+
+ /* keep a count of the number of descriptors in the list to allow
+ * error processing to be performed
+ */
+ InstancePtr->TotalDescriptorCount++;
+
+ UsedByteCount += sizeof(XBufDescriptor);
+ }
+
+ /* connect the last buffer descriptor created and inserted in the list
+ * to the first such that a ring buffer is created
+ */
+ XBufDescriptor_SetNextPtr(BufferDescriptorPtr, V_TO_P(StartOfListPtr));
+
+ /* initialize the ring buffer to indicate that there are no
+ * buffer descriptors in the list which point to valid data buffers
+ */
+ InstancePtr->PutPtr = BufferDescriptorPtr;
+ InstancePtr->GetPtr = BufferDescriptorPtr;
+ InstancePtr->CommitPtr = NULL;
+ InstancePtr->LastPtr = BufferDescriptorPtr;
+ InstancePtr->ActiveDescriptorCount = 0;
+ InstancePtr->ActivePacketCount = 0;
+ InstancePtr->Committed = FALSE;
+
+ /* indicate the scatter gather list was successfully created */
+
+ return XST_SUCCESS;
+}
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_IsSgListEmpty
+*
+* DESCRIPTION:
+*
+* This function determines if the scatter gather list of a DMA channel is
+* empty with regard to buffer descriptors which are pointing to buffers to be
+* used for scatter gather operations.
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* RETURN VALUE:
+*
+* A value of TRUE if the scatter gather list is empty, otherwise a value of
+* FALSE.
+*
+* NOTES:
+*
+* None.
+*
+******************************************************************************/
+u32 XDmaChannel_IsSgListEmpty(XDmaChannel * InstancePtr)
+{
+ /* assert to verify valid input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* if the number of descriptors which are being used in the list is zero
+ * then the list is empty
+ */
+ return (InstancePtr->ActiveDescriptorCount == 0);
+}
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_PutDescriptor
+*
+* DESCRIPTION:
+*
+* This function puts a buffer descriptor into the DMA channel scatter
+* gather list. A DMA channel maintains a list of buffer descriptors which are
+* to be processed. This function puts the specified buffer descriptor
+* at the next location in the list. Note that since the list is already intact,
+* the information in the parameter is copied into the list (rather than modify
+* list pointers on the fly).
+*
+* After buffer descriptors are put into the list, they must also be committed
+* by calling another function. This allows multiple buffer descriptors which
+* span a single packet to be put into the list while preventing the hardware
+* from starting the first buffer descriptor of the packet.
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* BufferDescriptorPtr is a pointer to the buffer descriptor to be put into
+* the next available location of the scatter gather list.
+*
+* RETURN VALUE:
+*
+* A status which indicates XST_SUCCESS if the buffer descriptor was
+* successfully put into the scatter gather list.
+*
+* A value of XST_DMA_SG_NO_LIST indicates the scatter gather list has not
+* been created.
+*
+* A value of XST_DMA_SG_LIST_FULL indicates the buffer descriptor was not
+* put into the list because the list was full.
+*
+* A value of XST_DMA_SG_BD_LOCKED indicates the buffer descriptor was not
+* put into the list because the buffer descriptor in the list which is to
+* be overwritten was locked. A locked buffer descriptor indicates the higher
+* layered software is still using the buffer descriptor.
+*
+* NOTES:
+*
+* It is necessary to create a scatter gather list for a DMA channel before
+* putting buffer descriptors into it.
+*
+******************************************************************************/
+int
+XDmaChannel_PutDescriptor(XDmaChannel * InstancePtr,
+ XBufDescriptor * BufferDescriptorPtr)
+{
+ u32 Control;
+
+ /* assert to verify valid input arguments and alignment for those
+ * arguments that have alignment restrictions
+ */
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BufferDescriptorPtr != NULL);
+ XASSERT_NONVOID(((u32) BufferDescriptorPtr & 3) == 0);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* if a scatter gather list has not been created yet, return a status */
+
+ if (InstancePtr->TotalDescriptorCount == 0) {
+ return XST_DMA_SG_NO_LIST;
+ }
+
+ /* if the list is full because all descriptors are pointing to valid
+ * buffers, then indicate an error, this code assumes no list or an
+ * empty list is detected above
+ */
+ if (InstancePtr->ActiveDescriptorCount ==
+ InstancePtr->TotalDescriptorCount) {
+ return XST_DMA_SG_LIST_FULL;
+ }
+
+ /* if the buffer descriptor in the list which is to be overwritten is
+ * locked, then don't overwrite it and return a status
+ */
+ if (XBufDescriptor_IsLocked(InstancePtr->PutPtr)) {
+ return XST_DMA_SG_BD_LOCKED;
+ }
+
+ /* set the scatter gather stop bit in the control word of the descriptor
+ * to cause the h/w to stop after it processes this descriptor since it
+ * will be the last in the list
+ */
+ Control = XBufDescriptor_GetControl(BufferDescriptorPtr);
+ XBufDescriptor_SetControl(BufferDescriptorPtr,
+ Control | XDC_DMACR_SG_DISABLE_MASK);
+
+ /* set both statuses in the descriptor so we tell if they are updated with
+ * the status of the transfer, the hardware should change the busy in the
+ * DMA status to be false when it completes
+ */
+ XBufDescriptor_SetStatus(BufferDescriptorPtr, XDC_DMASR_BUSY_MASK);
+ XBufDescriptor_SetDeviceStatus(BufferDescriptorPtr, 0);
+
+ /* copy the descriptor into the next position in the list so it's ready to
+ * be used by the h/w, this assumes the descriptor in the list prior to this
+ * one still has the stop bit in the control word set such that the h/w
+ * use this one yet
+ */
+ CopyBufferDescriptor(BufferDescriptorPtr, InstancePtr->PutPtr);
+
+ /* End of a packet is reached. Bump the packet counter */
+ if (XBufDescriptor_IsLastControl(InstancePtr->PutPtr)) {
+ InstancePtr->ActivePacketCount++;
+ }
+
+ /* only the last in the list and the one to be committed have scatter gather
+ * disabled in the control word, a commit requires only one descriptor
+ * to be changed, when # of descriptors to commit > 2 all others except the
+ * 1st and last have scatter gather enabled
+ */
+ if ((InstancePtr->CommitPtr != InstancePtr->LastPtr) &&
+ (InstancePtr->CommitPtr != NULL)) {
+ Control = XBufDescriptor_GetControl(InstancePtr->LastPtr);
+ XBufDescriptor_SetControl(InstancePtr->LastPtr,
+ Control & ~XDC_DMACR_SG_DISABLE_MASK);
+ }
+
+ /* update the list data based upon putting a descriptor into the list,
+ * these operations must be last
+ */
+ InstancePtr->ActiveDescriptorCount++;
+
+ /* only update the commit pointer if it is not already active, this allows
+ * it to be deactivated after every commit such that a single descriptor
+ * which is committed does not appear to be waiting to be committed
+ */
+ if (InstancePtr->CommitPtr == NULL) {
+ InstancePtr->CommitPtr = InstancePtr->LastPtr;
+ }
+
+ /* these updates MUST BE LAST after the commit pointer update in order for
+ * the commit pointer to track the correct descriptor to be committed
+ */
+ InstancePtr->LastPtr = InstancePtr->PutPtr;
+ InstancePtr->PutPtr =
+ P_TO_V(XBufDescriptor_GetNextPtr(InstancePtr->PutPtr));
+
+ return XST_SUCCESS;
+}
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_CommitPuts
+*
+* DESCRIPTION:
+*
+* This function commits the buffer descriptors which have been put into the
+* scatter list for the DMA channel since the last commit operation was
+* performed. This enables the calling functions to put several buffer
+* descriptors into the list (e.g.,a packet's worth) before allowing the scatter
+* gather operations to start. This prevents the DMA channel hardware from
+* starting to use the buffer descriptors in the list before they are ready
+* to be used (multiple buffer descriptors for a single packet).
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* RETURN VALUE:
+*
+* A status indicating XST_SUCCESS if the buffer descriptors of the list were
+* successfully committed.
+*
+* A value of XST_DMA_SG_NOTHING_TO_COMMIT indicates that the buffer descriptors
+* were not committed because there was nothing to commit in the list. All the
+* buffer descriptors which are in the list are commited.
+*
+* NOTES:
+*
+* None.
+*
+******************************************************************************/
+int XDmaChannel_CommitPuts(XDmaChannel * InstancePtr)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* if the buffer descriptor to be committed is already committed or
+ * the list is empty (none have been put in), then indicate an error
+ */
+ if ((InstancePtr->CommitPtr == NULL) ||
+ XDmaChannel_IsSgListEmpty(InstancePtr)) {
+ return XST_DMA_SG_NOTHING_TO_COMMIT;
+ }
+
+ /* last descriptor in the list must have scatter gather disabled so the end
+ * of the list is hit by h/w, if descriptor to commit is not last in list,
+ * commit descriptors by enabling scatter gather in the descriptor
+ */
+ if (InstancePtr->CommitPtr != InstancePtr->LastPtr) {
+ u32 Control;
+
+ Control = XBufDescriptor_GetControl(InstancePtr->CommitPtr);
+ XBufDescriptor_SetControl(InstancePtr->CommitPtr, Control &
+ ~XDC_DMACR_SG_DISABLE_MASK);
+ }
+
+ /* Buffer Descriptors are committed. DMA is ready to be enabled */
+ InstancePtr->Committed = TRUE;
+
+ /* Update the commit pointer to indicate that there is nothing to be
+ * committed, this state is used by start processing to know that the
+ * buffer descriptor to start is not waiting to be committed
+ */
+ InstancePtr->CommitPtr = NULL;
+
+ return XST_SUCCESS;
+}
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_GetDescriptor
+*
+* DESCRIPTION:
+*
+* This function gets a buffer descriptor from the scatter gather list of the
+* DMA channel. The buffer descriptor is retrieved from the scatter gather list
+* and the scatter gather list is updated to not include the retrieved buffer
+* descriptor. This is typically done after a scatter gather operation
+* completes indicating that a data buffer has been successfully sent or data
+* has been received into the data buffer. The purpose of this function is to
+* allow the device using the scatter gather operation to get the results of the
+* operation.
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* BufDescriptorPtr is a pointer to a pointer to the buffer descriptor which
+* was retrieved from the list. The buffer descriptor is not really removed
+* from the list, but it is changed to a state such that the hardware will not
+* use it again until it is put into the scatter gather list of the DMA channel.
+*
+* RETURN VALUE:
+*
+* A status indicating XST_SUCCESS if a buffer descriptor was retrieved from
+* the scatter gather list of the DMA channel.
+*
+* A value of XST_DMA_SG_NO_LIST indicates the scatter gather list has not
+* been created.
+*
+* A value of XST_DMA_SG_LIST_EMPTY indicates no buffer descriptor was
+* retrieved from the list because there are no buffer descriptors to be
+* processed in the list.
+*
+* BufDescriptorPtr is updated to point to the buffer descriptor which was
+* retrieved from the list if the status indicates success.
+*
+* NOTES:
+*
+* None.
+*
+******************************************************************************/
+int
+XDmaChannel_GetDescriptor(XDmaChannel * InstancePtr,
+ XBufDescriptor ** BufDescriptorPtr)
+{
+ u32 Control;
+
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BufDescriptorPtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* if a scatter gather list has not been created yet, return a status */
+
+ if (InstancePtr->TotalDescriptorCount == 0) {
+ return XST_DMA_SG_NO_LIST;
+ }
+
+ /* if the buffer descriptor list is empty, then indicate an error */
+
+ if (XDmaChannel_IsSgListEmpty(InstancePtr)) {
+ return XST_DMA_SG_LIST_EMPTY;
+ }
+
+ /* retrieve the next buffer descriptor which is ready to be processed from
+ * the buffer descriptor list for the DMA channel, set the control word
+ * such that hardware will stop after the descriptor has been processed
+ */
+ Control = XBufDescriptor_GetControl(InstancePtr->GetPtr);
+ XBufDescriptor_SetControl(InstancePtr->GetPtr,
+ Control | XDC_DMACR_SG_DISABLE_MASK);
+
+ /* set the input argument, which is also an output, to point to the
+ * buffer descriptor which is to be retrieved from the list
+ */
+ *BufDescriptorPtr = InstancePtr->GetPtr;
+
+ /* update the pointer of the DMA channel to reflect the buffer descriptor
+ * was retrieved from the list by setting it to the next buffer descriptor
+ * in the list and indicate one less descriptor in the list now
+ */
+ InstancePtr->GetPtr =
+ P_TO_V(XBufDescriptor_GetNextPtr(InstancePtr->GetPtr));
+ InstancePtr->ActiveDescriptorCount--;
+
+ return XST_SUCCESS;
+}
+
+/*********************** Interrupt Collescing Functions **********************/
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_GetPktCount
+*
+* DESCRIPTION:
+*
+* This function returns the value of the unserviced packet count register of
+* the DMA channel. This count represents the number of packets that have been
+* sent or received by the hardware, but not processed by software.
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* RETURN VALUE:
+*
+* The unserviced packet counter register contents for the DMA channel.
+*
+* NOTES:
+*
+* None.
+*
+******************************************************************************/
+u32 XDmaChannel_GetPktCount(XDmaChannel * InstancePtr)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* get the unserviced packet count from the register and return it */
+
+ return XIo_In32(InstancePtr->RegBaseAddress + XDC_UPC_REG_OFFSET);
+}
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_DecrementPktCount
+*
+* DESCRIPTION:
+*
+* This function decrements the value of the unserviced packet count register.
+* This informs the hardware that the software has processed a packet. The
+* unserviced packet count register may only be decremented by one in the
+* hardware.
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* RETURN VALUE:
+*
+* None.
+*
+* NOTES:
+*
+* None.
+*
+******************************************************************************/
+void XDmaChannel_DecrementPktCount(XDmaChannel * InstancePtr)
+{
+ u32 Register;
+
+ /* assert to verify input arguments */
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* if the unserviced packet count register can be decremented (rather
+ * than rolling over) decrement it by writing a 1 to the register,
+ * this is the only valid write to the register as it serves as an
+ * acknowledge that a packet was handled by the software
+ */
+ Register = XIo_In32(InstancePtr->RegBaseAddress + XDC_UPC_REG_OFFSET);
+ if (Register > 0) {
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_UPC_REG_OFFSET,
+ 1UL);
+ }
+}
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_SetPktThreshold
+*
+* DESCRIPTION:
+*
+* This function sets the value of the packet count threshold register of the
+* DMA channel. It reflects the number of packets that must be sent or
+* received before generating an interrupt. This value helps implement
+* a concept called "interrupt coalescing", which is used to reduce the number
+* of interrupts from devices with high data rates.
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* Threshold is the value that is written to the threshold register of the
+* DMA channel.
+*
+* RETURN VALUE:
+*
+* A status containing XST_SUCCESS if the packet count threshold was
+* successfully set.
+*
+* NOTES:
+*
+* The packet threshold could be set to larger than the number of descriptors
+* allocated to the DMA channel. In this case, the wait bound will take over
+* and always indicate data arrival. There was a check in this function that
+* returned an error if the treshold was larger than the number of descriptors,
+* but that was removed because users would then have to set the threshold
+* only after they set descriptor space, which is an order dependency that
+* caused confustion.
+*
+******************************************************************************/
+int XDmaChannel_SetPktThreshold(XDmaChannel * InstancePtr, u8 Threshold)
+{
+ /* assert to verify input arguments, don't assert the threshold since
+ * it's range is unknown
+ */
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* set the packet count threshold in the register such that an interrupt
+ * may be generated, if enabled, when the packet count threshold is
+ * reached or exceeded
+ */
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_PCT_REG_OFFSET,
+ (u32) Threshold);
+
+ /* indicate the packet count threshold was successfully set */
+
+ return XST_SUCCESS;
+}
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_GetPktThreshold
+*
+* DESCRIPTION:
+*
+* This function gets the value of the packet count threshold register of the
+* DMA channel. This value reflects the number of packets that must be sent or
+* received before generating an interrupt. This value helps implement a concept
+* called "interrupt coalescing", which is used to reduce the number of
+* interrupts from devices with high data rates.
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* RETURN VALUE:
+*
+* The packet threshold register contents for the DMA channel and is a value in
+* the range 0 - 1023. A value of 0 indicates the packet wait bound timer is
+* disabled.
+*
+* NOTES:
+*
+* None.
+*
+******************************************************************************/
+u8 XDmaChannel_GetPktThreshold(XDmaChannel * InstancePtr)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* get the packet count threshold from the register and return it,
+ * since only 8 bits are used, cast it to return only those bits */
+
+ return (u8) XIo_In32(InstancePtr->RegBaseAddress + XDC_PCT_REG_OFFSET);
+}
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_SetPktWaitBound
+*
+* DESCRIPTION:
+*
+* This function sets the value of the packet wait bound register of the
+* DMA channel. This value reflects the timer value used to trigger an
+* interrupt when not enough packets have been received to reach the packet
+* count threshold.
+*
+* The timer is in millisecond units with +/- 33% accuracy.
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* WaitBound is the value, in milliseconds, to be stored in the wait bound
+* register of the DMA channel and is a value in the range 0 - 1023. A value
+* of 0 disables the packet wait bound timer.
+*
+* RETURN VALUE:
+*
+* None.
+*
+* NOTES:
+*
+* None.
+*
+******************************************************************************/
+void XDmaChannel_SetPktWaitBound(XDmaChannel * InstancePtr, u32 WaitBound)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(WaitBound < 1024);
+ XASSERT_VOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* set the packet wait bound in the register such that interrupt may be
+ * generated, if enabled, when packets have not been handled for a specific
+ * amount of time
+ */
+ XIo_Out32(InstancePtr->RegBaseAddress + XDC_PWB_REG_OFFSET, WaitBound);
+}
+
+/******************************************************************************
+*
+* FUNCTION:
+*
+* XDmaChannel_GetPktWaitBound
+*
+* DESCRIPTION:
+*
+* This function gets the value of the packet wait bound register of the
+* DMA channel. This value contains the timer value used to trigger an
+* interrupt when not enough packets have been received to reach the packet
+* count threshold.
+*
+* The timer is in millisecond units with +/- 33% accuracy.
+*
+* ARGUMENTS:
+*
+* InstancePtr contains a pointer to the DMA channel to operate on. The DMA
+* channel should be configured to use scatter gather in order for this function
+* to be called.
+*
+* RETURN VALUE:
+*
+* The packet wait bound register contents for the DMA channel.
+*
+* NOTES:
+*
+* None.
+*
+******************************************************************************/
+u32 XDmaChannel_GetPktWaitBound(XDmaChannel * InstancePtr)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* get the packet wait bound from the register and return it */
+
+ return XIo_In32(InstancePtr->RegBaseAddress + XDC_PWB_REG_OFFSET);
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file xdmabdv3.h
+ *
+ * This header provides operations to manage buffer descriptors in support
+ * of simple and scatter-gather DMA (see xdmav3.h).
+ *
+ * The API exported by this header defines abstracted macros that allow the
+ * user to read/write specific BD fields.
+ *
+ * <b>Buffer Descriptors</b>
+ *
+ * A buffer descriptor (BD) defines a DMA transaction (see "Transaction"
+ * section in xdmav3.h). The macros defined by this header file allow access
+ * to most fields within a BD to tailor a DMA transaction according to user
+ * and HW requirements. See the HW IP DMA spec for more information on BD
+ * fields and how they affect transfers.
+ *
+ * The XDmaBdV3 structure defines a BD. The organization of this structure is
+ * driven mainly by the hardware for use in scatter-gather DMA transfers.
+ *
+ * <b>Accessor Macros</b>
+ *
+ * Most of the BD attributes can be accessed through macro functions defined
+ * here in this API. Words such as XDMAV3_BD_USR0_OFFSET (see xdmav3_l.h)
+ * should be accessed using XDmaV3_mReadBd() and XDmaV3_mWriteBd() as defined in
+ * xdmav3_l.h. The USR words are implementation dependent. For example, they may
+ * implement checksum offloading fields for Ethernet devices. Accessor macros
+ * may be defined in the device specific API to get at this data.
+ *
+ * <b>Performance</b>
+ *
+ * BDs are typically in a non-cached memory space. Limiting I/O to BDs can
+ * improve overall performance of the DMA channel.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 3.00a rmm 03/11/06 First release
+ * rmm 06/22/06 Added extern "C"
+ * </pre>
+ *
+ * ***************************************************************************
+ */
+
+#ifndef XDMABD_H /* prevent circular inclusions */
+#define XDMABD_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include <asm/delay.h>
+#include "xdmav3_l.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/**
+ * The XDmaBdV3 is the type for buffer descriptors (BDs).
+ */
+typedef u32 XDmaBdV3[XDMAV3_BD_NUM_WORDS];
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*****************************************************************************/
+/**
+ * Zero out BD fields
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @return Nothing
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mClear(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mClear(BdPtr) \
+ memset((BdPtr), 0, sizeof(XDmaBdV3))
+
+
+/*****************************************************************************/
+/**
+ * Retrieve the BD's Packet DMA transfer status word.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @return Word at offset XDMAV3_BD_DMASR_OFFSET
+ *
+ * @note
+ * C-style signature:
+ * u32 XDmaBdV3_mGetStatus(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mGetStatus(BdPtr) \
+ XDmaV3_mReadBd((BdPtr), XDMAV3_BD_DMASR_OFFSET)
+
+
+/*****************************************************************************/
+/**
+ * Retrieve the BD's Packet status word. This is the first word of local link
+ * footer information for receive channels.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @return Word at offset XDMAV3_BD_SR_OFFSET
+ *
+ * @note
+ * C-style signature:
+ * u32 XDmaBdV3_mGetPacketStatus(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mGetPacketStatus(BdPtr) \
+ XDmaV3_mReadBd((BdPtr), XDMAV3_BD_SR_OFFSET)
+
+
+/*****************************************************************************/
+/**
+ * Retrieve the BD length field.
+ *
+ * For Tx channels, the returned value is the same as that written with
+ * XDmaBdV3_mSetLength().
+ *
+ * For Rx channels, the returned value is the size of the received packet.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @return Bytes processed by HW or set by XDmaBdV3_mSetLength().
+ *
+ * @note
+ * C-style signature:
+ * u32 XDmaBdV3_mGetLength(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mGetLength(BdPtr) \
+ XDmaV3_mReadBd((BdPtr), XDMAV3_BD_LENGTH_OFFSET)
+
+
+/*****************************************************************************/
+/**
+ * Retrieve the BD length copy field. See XDmaBdV3_mSetLengthCopy() for
+ * more information.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @return Value as set by XDmaBdV3_mSetLengthCopy().
+ *
+ * @note
+ * C-style signature:
+ * u32 XDmaBdV3_mGetLengthCopy(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mGetLengthCopy(BdPtr) \
+ XDmaV3_mReadBd((BdPtr), XDMAV3_BD_LENCPY_OFFSET)
+
+
+/*****************************************************************************/
+/**
+ * Test whether the given BD has been marked as the last BD of a packet.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @return TRUE if BD represents the "Last" BD of a packet, FALSE otherwise
+ *
+ * @note
+ * C-style signature:
+ * u32 XDmaBdV3_mIsLast(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mIsLast(BdPtr) \
+ ((XDmaV3_mReadBd((BdPtr), XDMAV3_BD_DMACR_OFFSET) & XDMAV3_DMACR_LAST_MASK) ? \
+ TRUE : FALSE)
+
+/*****************************************************************************/
+/**
+ * Set the ID field of the given BD. The ID is an arbitrary piece of data the
+ * user can associate with a specific BD.
+ *
+ * @param BdPtr is the BD to operate on
+ * @param Id is a 32 bit quantity to set in the BD
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mSetId(XDmaBdV3* BdPtr, void Id)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mSetId(BdPtr, Id) \
+ (XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_ID_OFFSET, (u32)Id))
+
+
+/*****************************************************************************/
+/**
+ * Retrieve the ID field of the given BD previously set with XDmaBdV3_mSetId.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @note
+ * C-style signature:
+ * u32 XDmaBdV3_mGetId(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mGetId(BdPtr) (XDmaV3_mReadBd((BdPtr), XDMAV3_BD_ID_OFFSET))
+
+
+/*****************************************************************************/
+/**
+ * Causes the DMA engine to increment the buffer address during the DMA
+ * transfer for this BD. This is the desirable setting when the buffer data
+ * occupies a memory range.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mSetBufIncrement(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mSetBufIncrement(BdPtr) \
+ (XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_DMACR_OFFSET, \
+ XDmaV3_mReadBd((BdPtr), XDMAV3_BD_DMACR_OFFSET) | XDMAV3_DMACR_AINC_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Cause the DMA engine to use the same memory buffer address during the DMA
+ * transfer for this BD. This is the desirable setting when the buffer data
+ * occupies a single address as may be the case if transferring to/from a FIFO.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mSetBufNoIncrement(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mSetBufNoIncrement(BdPtr) \
+ (XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_DMACR_OFFSET, \
+ XDmaV3_mReadBd((BdPtr), XDMAV3_BD_DMACR_OFFSET) & ~XDMAV3_DMACR_AINC_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Bypass data realignment engine (DRE) if DMA channel has DRE capability.
+ * Has no effect if channel does not have DRE.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mIgnoreDre(XDmaBdV3* BdPtr)
+ *
+ ******************************************************************************/
+#define XDmaBdV3_mIgnoreDre(BdPtr) \
+ (XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_DMACR_OFFSET, \
+ XDmaV3_mReadBd((BdPtr), XDMAV3_BD_DMACR_OFFSET) | XDMAV3_DMACR_BPDRE_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Use data realignment engine (DRE) if DMA channel has DRE capability.
+ * Has no effect if channel does not have DRE.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mUseDre(XDmaBdV3* BdPtr)
+ *
+ ******************************************************************************/
+#define XDmaBdV3_mUseDre(BdPtr) \
+ (XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_DMACR_OFFSET, \
+ XDmaV3_mReadBd((BdPtr), XDMAV3_BD_DMACR_OFFSET) & ~XDMAV3_DMACR_BPDRE_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Tell the SG DMA engine that the given BD marks the end of the current packet
+ * to be processed.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mSetLast(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mSetLast(BdPtr) \
+ (XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_DMACR_OFFSET, \
+ XDmaV3_mReadBd((BdPtr), XDMAV3_BD_DMACR_OFFSET) | XDMAV3_DMACR_LAST_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Tell the SG DMA engine that the current packet does not end with the given
+ * BD.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mClearLast(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mClearLast(BdPtr) \
+ (XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_DMACR_OFFSET, \
+ XDmaV3_mReadBd((BdPtr), XDMAV3_BD_DMACR_OFFSET) & ~XDMAV3_DMACR_LAST_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Set the Device Select field of the given BD.
+ *
+ * @param BdPtr is the BD to operate on
+ * @param DevSel is the IP device select to use with LSB of 1. This value
+ * selects which IP block the transaction will address. Normally this
+ * is set to 0, but complex IP may require a specific DEVSEL.
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mSetDevSel(XDmaBdV3* BdPtr, unsigned DevSel)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mSetDevSel(BdPtr, DevSel) \
+ { \
+ u32 Dmacr; \
+ Dmacr = XDmaV3_mReadBd((BdPtr), XDMAV3_BD_DMACR_OFFSET); \
+ Dmacr = Dmacr | (((DevSel) << XDMAV3_DMACR_DEVSEL_SHIFT) & \
+ XDMAV3_DMACR_DEVSEL_MASK); \
+ XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_DMACR_OFFSET, Dmacr); \
+ }
+
+
+/*****************************************************************************/
+/**
+ * Set the Page field of the given BD. The Page must be in terms of a physical
+ * address. Use this macro if using 36 bit bus addressing.
+ *
+ * @param BdPtr is the BD to operate on
+ * @param Page is the page to set. LSB=1
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mSetBdPage(XDmaBdV3* BdPtr, unsigned Page)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mSetBdPage(BdPtr, Page) \
+ { \
+ u32 Dmacr; \
+ Dmacr = XDmaV3_mReadBd((BdPtr), XDMAV3_BD_DMACR_OFFSET); \
+ Dmacr = Dmacr | (((Page) << XDMAV3_DMACR_BDPAGE_SHIFT) & \
+ XDMAV3_DMACR_BDPAGE_MASK); \
+ XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_DMACR_OFFSET, Dmacr); \
+ }
+
+
+/*****************************************************************************/
+/**
+ * Set transfer attributes for the given BD.
+ *
+ * @param BdPtr is the BD to operate on
+ * @param Type defines whether the transfer occurs with single beat or burst
+ * transfers on the target bus. This parameter must be one of the
+ * XDMAV3_DMACR_TYPE_*_MASK constants defined in xdma_l.h.
+ * @param Width defines the width of the transfer as it occurs on the target
+ * bus. This parameter must be one of the XDMAV3_DMACR_DSIZE_*_MASK
+ * constants defined in xdma_l.h
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mSetTransferType(XDmaBdV3* BdPtr, unsigned Type,
+ * unsigned Width)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mSetTransferType(BdPtr, Type, Width) \
+ (XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_DMACR_OFFSET, \
+ XDmaV3_mReadBd((BdPtr), XDMAV3_BD_DMACR_OFFSET) | \
+ ((Type) & XDMAV3_DMACR_TYPE_MASK) | ((Width) & XDMAV3_DMACR_DSIZE_MASK)))
+
+
+/*****************************************************************************/
+/**
+ * Set transfer length in bytes for the given BD. The length must be set each
+ * time a BD is submitted to HW.
+ *
+ * @param BdPtr is the BD to operate on
+ * @param LenBytes is the number of bytes to transfer.
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mSetLength(XDmaBdV3* BdPtr, u32 LenBytes)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mSetLength(BdPtr, LenBytes) \
+ XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_LENGTH_OFFSET, (LenBytes))
+
+
+/*****************************************************************************/
+/**
+ * Write the given length to the length copy offset of the BD. This function
+ * is useful only if an application needs to recover the number of bytes
+ * originally set by XDmaBdV3_mSetLength() for Rx channels.
+ *
+ * To effectively use this function, an application would call
+ * XDmaBdV3_mSetLength() to set the length on a Rx descriptor, followed by a
+ * call to this macro to set the same length. When HW has processed the Rx
+ * descriptor it will overwrite the BD length field with the actual length of
+ * the packet. When the application performs post processing of the Rx
+ * descriptor, it can call XDmaBdV3_mGetLengthCopy() to find out how many bytes
+ * were originally allocated to the descriptor.
+ *
+ * @param BdPtr is the BD to operate on
+ * @param LenBytes is the number of bytes to transfer.
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mSetLengthCopy(XDmaBdV3* BdPtr, u32 LenBytes)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mSetLengthCopy(BdPtr, LenBytes) \
+ XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_LENCPY_OFFSET, (LenBytes))
+
+
+/*****************************************************************************/
+/**
+ * Set the high order address of the BD's buffer address. Use this macro when
+ * the address bus width is greater than 32 bits.
+ *
+ * @param BdPtr is the BD to operate on
+ * @param HighAddr is the high order address bits to set, LSB = 2^32.
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mSetBufAddrHigh(XDmaBdV3* BdPtr, u32 HighAddr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mSetBufAddrHigh(BdPtr, HighAddr) \
+ (XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_MSBA_OFFSET, (u32)(HighAddr)))
+
+
+/*****************************************************************************/
+/**
+ * Set the low order address (bits 0..31) of the BD's buffer address.
+ *
+ * @param BdPtr is the BD to operate on
+ * @param LowAddr is the low order address bits to set, LSB = 1.
+ *
+ * @note
+ * C-style signature:
+ * void XDmaBdV3_mSetBufAddrLow(XDmaBdV3* BdPtr, u32 LowAddr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mSetBufAddrLow(BdPtr, LowAddr) \
+ (XDmaV3_mWriteBd((BdPtr), XDMAV3_BD_LSBA_OFFSET, (u32)(LowAddr)))
+
+
+/*****************************************************************************/
+/**
+ * Get the high order address of the BD's buffer address. Use this macro when
+ * the address bus width is greater than 32 bits.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @note
+ * C-style signature:
+ * u32 XDmaBdV3_mGetBufAddrHigh(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mGetBufAddrHigh(BdPtr) \
+ (XDmaV3_mReadBd((BdPtr), XDMAV3_BD_MSBA_OFFSET))
+
+
+/*****************************************************************************/
+/**
+ * Get the low order address (bits 0..31) of the BD's buffer address.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @note
+ * C-style signature:
+ * u32 XDmaBdV3_mGetBufAddrLow(XDmaBdV3* BdPtr)
+ *
+ *****************************************************************************/
+#define XDmaBdV3_mGetBufAddrLow(BdPtr) \
+ (XDmaV3_mReadBd((BdPtr), XDMAV3_BD_LSBA_OFFSET))
+
+
+/************************** Function Prototypes ******************************/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xdmav3.c
+*
+* This file implements initialization and control related functions. For more
+* information on this driver, see xdmav3.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 3.00a rmm 03/11/05 First release
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include <linux/string.h>
+#include <asm/delay.h>
+
+#include "xdmav3.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+* This function initializes a DMA channel. This function must be called
+* prior to using a DMA channel. Initialization of a channel includes setting
+* up the register base address, setting up the instance data, and ensuring the
+* HW is in a quiescent state.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param BaseAddress is where the registers for this channel can be found.
+* If address translation is being used, then this parameter must
+* reflect the virtual base address.
+*
+* @return
+* - XST_SUCCESS if initialization was successful
+*
+******************************************************************************/
+int XDmaV3_Initialize(XDmaV3 * InstancePtr, u32 BaseAddress)
+{
+ u32 Dmasr;
+
+ /* Setup the instance */
+ memset(InstancePtr, 0, sizeof(XDmaV3));
+ InstancePtr->RegBase = BaseAddress;
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+ InstancePtr->BdRing.RunState = XST_DMA_SG_IS_STOPPED;
+
+ /* If this is SGDMA channel, then make sure it is stopped */
+ Dmasr = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_DMASR_OFFSET);
+ if (Dmasr & (XDMAV3_DMASR_DMACNFG_SGDMARX_MASK |
+ XDMAV3_DMASR_DMACNFG_SGDMATX_MASK |
+ XDMAV3_DMASR_DMACNFG_SSGDMA_MASK)) {
+ XDmaV3_SgStop(InstancePtr);
+ }
+
+ return (XST_SUCCESS);
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xdmav3.h
+*
+* The Xilinx Simple and Scatter Gather DMA driver. This component supports a
+* distributed DMA design in which each device can have it's own dedicated DMA
+* channel, as opposed to a centralized DMA design. A device which uses DMA
+* typically contains two DMA channels, one for sending data and the other for
+* receiving data.
+*
+* This component is designed to be used as a basic building block for
+* designing a device driver. It provides registers accesses such that all
+* DMA processing can be maintained easier, but the device driver designer
+* must still understand all the details of the DMA channel.
+*
+* For a full description of DMA features, please see the HW spec. This driver
+* supports the following features:
+* - Simple DMA
+* - Scatter-Gather DMA (SGDMA)
+* - Interrupts
+* - Programmable interrupt coalescing for SGDMA
+* - 36 Bit bus addressing
+* - Programmable transaction types
+* - APIs to manage Buffer Descriptors (BD) movement to and from the SGDMA
+* engine
+* - Virtual memory support
+*
+* <b>Transactions</b>
+*
+* To describe a DMA transaction in its simplest form, you need a source address,
+* destination address, and the number of bytes to transfer. When using a DMA
+* receive channel, the source address is within some piece of IP HW and doesn't
+* require the user explicitly set it. Likewise with a transmit channel and the
+* destination address. So this leaves a user buffer address and the number
+* bytes to transfer as the primary transaction attributes. There are more
+* obscure attributes such as:
+*
+* - Is the user buffer a fixed address FIFO or a range of memory
+* - The size of the data bus over which the transaction occurs.
+* - Does the transfer use single beat or bursting capabilities of the
+* bus over which the transaction occurs.
+* - If the transaction occurs on a bus wider than 32 bits, what are the
+* highest order address bits.
+* - If SGDMA, does this transaction represent the end of a packet.
+*
+* The object used to describe a transaction is referred to as a Buffer
+* Descriptor (BD). The format of a BD closely matches that of the DMA HW.
+* Many fields within the BD correspond directly with the same fields within the
+* HW registers. See xdmabdv3.h for a detailed description of and the API for
+* manipulation of these objects.
+*
+* <b>Simple DMA</b>
+*
+* Simple DMA is a single transaction type of operation. The user uses this
+* driver to setup a transaction, initiate the transaction, then either wait for
+* an interrupt or poll the HW for completion of the transaction. A new
+* transaction may not be initiated until the current one completes.
+*
+* <b>Scatter-Gather DMA</b>
+*
+* SGDMA is more sophisticated in that it allows the user to define a list of
+* transactions in memory which the HW will process without further user
+* intervention. During this time, the user is free to continue adding more work
+* to keep the HW busy.
+*
+* Notification of completed transactions can be done either by polling the HW,
+* or using interrupts that signal a transaction has completed or a series of
+* transactions have been processed.
+*
+* SGDMA processes in units of packets. A packet is defined as a series of
+* data bytes that represent a message. SGDMA allows a packet of data to be
+* broken up into one or more transactions. For example, take an Ethernet IP
+* packet which consists of a 14 byte header followed by a 1 or more byte
+* payload. With SGDMA, the user may point a BD to the header and another BD to
+* the payload, then transfer them as a single message. This strategy can make a
+* TCP/IP stack more efficient by allowing it to keep packet headers and data in
+* different memory regions instead of assembling packets into contiguous blocks
+* of memory.
+*
+* <b>Interrupt Coalescing</b>
+*
+* SGDMA provides control over the frequency of interrupts. On a high speed link
+* significant processor overhead may be used servicing interrupts. Interrupt
+* coalescing provides two mechanisms that help control interrupt frequency.
+*
+* The packet threshold will hold off interrupting the CPU until a programmable
+* number of packets have been processed by the engine. The packet waitbound
+* timer is used to interrupt the CPU if after a programmable amount of time
+* after processing the last packet, no new packets were processed.
+*
+* <b>Interrupts</b>
+*
+* This driver does not service interrupts. This is done typically within
+* a higher level driver that uses DMA. This driver does provide an API to
+* enable or disable specific interrupts.
+*
+* <b>SGDMA List Management</b>
+*
+* The HW expectes BDs to be setup as a singly linked list. As BDs are completed,
+* the DMA engine will dereference BD.Next and load the next BD to process.
+* This driver uses a fixed buffer ring where all BDs are linked to the next
+* adjacent BD in memory. The last BD in the ring is linked to the first.
+*
+* Within the BD ring, the driver maintains four groups of BDs. Each group
+* consists of 0 or more adjacent BDs:
+*
+* - Free group: Those BDs that can be allocated by the user with
+* XDmaV3_SgBdAlloc(). These BDs are under driver control.
+*
+* - Pre-work group: Those BDs that have been allocated with
+* XDmaV3_SgBdAlloc(). These BDs are under user control. The user modifies
+* these BDs in preparation for future DMA transactions.
+*
+* - Work group: Those BDs that have been enqueued to HW with
+* XDmaV3_SgBdToHw(). These BDs are under HW control and may be in a
+* state of awaiting HW processing, in process, or processed by HW.
+*
+* - Post-work group: Those BDs that have been processed by HW and have been
+* extracted from the work group with XDmaV3_SgBdFromHw(). These BDs are under
+* user control. The user may access these BDs to determine the result
+* of DMA transactions. When the user is finished, XDmaV3_SgBdFree() should
+* be called to place them back into the Free group.
+*
+* It is considered an error for the user to change BDs while they are in the
+* Work group. Doing so can cause data corruption and lead to system instability.
+*
+* The API provides macros that allow BD list traversal. These macros should be
+* used with care as they do not understand where one group ends and another
+* begins.
+*
+* The driver does not cache or keep copies of any BD. When the user modifies
+* BDs returned by XDmaV3_SgBdAlloc() or XDmaV3_SgBdFromHw(), they are modifying
+* the same BD list that HW accesses.
+*
+* Certain pairs of list modification functions have usage restrictions. See
+* the function headers for XDmaV3_SgBdAlloc() and XDmaV3_SgBdFromHw() for
+* more information.
+*
+* <b>SGDMA List Creation</b>
+*
+* During initialization, the function XDmaV3_SgListCreate() is used to setup
+* a user supplied memory block to contain all BDs for the DMA channel. This
+* function takes as an argument the number of BDs to place in the list. To
+* arrive at this number, the user is given two methods of calculating it.
+*
+* The first method assumes the user has a block of memory and they just
+* want to fit as many BDs as possible into it. The user must calculate the
+* number of BDs that will fit with XDmaV3_mSgListCntCalc(), then supply that
+* number into the list creation function.
+*
+* The second method allows the user to just supply the number directly. The
+* driver assumes the memory block is large enough to contain them all. To
+* double-check, the user should invoke XDmaV3_mSgListMemCalc() to verify the
+* memory block is adequate.
+*
+* Once the list has been created, it can be used right away to perform DMA
+* transactions. However, there are optional steps that can be done to increase
+* throughput and decrease user code complexity by the use of XDmaV3_SgListClone().
+*
+* BDs have many user accessible attributes that affect how DMA transactions are
+* carried out. Many of these attributes (such as the bus width) will probably
+* be constant at run-time. The cloning function can be used to copy a template
+* BD to every BD in the list relieving the user of having to setup transactions
+* from scratch every time a BD is submitted to HW.
+*
+* Ideally, the only transaction parameters that need to be set at run-time
+* should be: buffer address, bytes to transfer, and whether the BD is the
+* "Last" BD of a packet.
+*
+* <b>Adding / Removing BDs from the SGDMA Engine</b>
+*
+* BDs may be enqueued (see XDmaV3_SgBdToHw()) to the engine any time after
+* the SGDMA list is created. If the channel is running (see XDmaV3_SgStart()),
+* then newly added BDs will be processed as soon as the engine reaches them.
+* If the channel is stopped (see XDmaV3_SgStop()), the newly added BDs will
+* be accepted but not processed by the engine until it is restarted.
+*
+* Processed BDs may be removed (see XDmaV3_SgBdFromHw()) at any time
+* after the SGDMA list is created provided the engine has processed any.
+*
+* <b>Address Translation</b>
+*
+* When the BD list is setup with XDmaV3_SgListCreate(), a physical and
+* virtual address is supplied for the segment of memory containing the
+* descriptors. The driver will handle any translations internally. Subsequent
+* access of descriptors by the user is done in terms of the virtual address.
+*
+* <b>Alignment</b>
+*
+* Except for 4 byte alignment of BDs there are no other alignment restrictions
+* imposed by this driver. Individual DMA channels may, based on their
+* capabilities or which bus they are a master of, have more stringent alignment
+* requirements. It is up to the user to match the requirements of the DMA
+* channel being used.
+*
+* Aside from the initial creation of BD list (see XDmaV3_SgListCreate()),
+* there are no other run-time checks for proper alignment. Misaligned user
+* buffers or BDs may result in corrupted data.
+*
+* <b>Cache Coherency</b>
+*
+* This driver expects all user buffers attached to BDs to be in cache coherent
+* memory. Buffers for transmit should be flushed from the cache before passing
+* the associated BD to this driver. Buffers for receive should be invalidated
+* before being accessed.
+*
+* If the user wishes that the BD space itself be in cached memory, then
+* modification of this driver is required. The driver helps the user in
+* this area by: 1) Allowing the user to specify what alignment BDs should
+* use (ie. aligned along cache lines); 2) Provide unimplemented invalidate/flush
+* macro placeholders in the driver source code where needed.
+*
+* <b>Reset After Stopping</b>
+*
+* This driver is designed to allow for stop-reset-start cycles of the DMA
+* HW while keeping the BD list intact. When restarted after a reset, this
+* driver will point the DMA engine to where it left off after stopping it.
+*
+* <b>Limitations</b>
+*
+* This driver requires exclusive use of the hardware DMACR.SGS bit. This
+* applies to the actual HW register and BDs submitted through this driver to
+* be processed. If a BD is encountered with this bit set, then it will be
+* cleared within the driver.
+*
+* This driver does not have any mechanism for mutual exclusion. It is up to the
+* user to provide this protection.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 3.00a rmm 03/11/06 First release
+* rmm 06/22/06 Added extern "C"
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XDMAV3_H /* prevent circular inclusions */
+#define XDMAV3_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xdmabdv3.h"
+#include "xstatus.h"
+
+/************************** Constant Definitions *****************************/
+
+/* Minimum alignment */
+#define XDMABDV3_MINIMUM_ALIGNMENT 4
+
+
+/**************************** Type Definitions *******************************/
+
+/** This is an internal structure used to maintain the SGDMA list */
+typedef struct {
+ u32 PhysBaseAddr;
+ /**< Physical address of 1st BD in list */
+ u32 BaseAddr; /**< Virtual address of 1st BD in list */
+ u32 HighAddr; /**< Virtual address of last BD in the list */
+ u32 Length; /**< Total size of ring in bytes */
+ u32 RunState; /**< Flag to indicate SGDMA is started */
+ u32 Separation;/**< Number of bytes between the starting address
+ of adjacent BDs */
+ XDmaBdV3 *FreeHead;/**< First BD in the free group */
+ XDmaBdV3 *PreHead; /**< First BD in the pre-work group */
+ XDmaBdV3 *HwHead; /**< First BD in the work group */
+ XDmaBdV3 *HwTail; /**< Last BD in the work group */
+ XDmaBdV3 *PostHead;/**< First BD in the post-work group */
+ XDmaBdV3 *BdaRestart;
+ /**< BDA to load when channel is started */
+ unsigned HwCnt; /**< Number of BDs in work group */
+ unsigned PreCnt; /**< Number of BDs in pre-work group */
+ unsigned FreeCnt; /**< Number of allocatable BDs in the free group */
+ unsigned PostCnt; /**< Number of BDs in post-work group */
+ unsigned AllCnt; /**< Total Number of BDs for channel */
+} XDmaV3_BdRing;
+
+/**
+ * The XDmaV3 driver instance data. An instance must be allocated for each DMA
+ * channel in use. If address translation is enabled, then all addresses and
+ * pointers excluding PhysBase are expressed in terms of the virtual address.
+ */
+typedef struct XDmaV3 {
+ u32 RegBase; /**< Base address of channel registers */
+ u32 IsReady; /**< Flag to indicate device is ready to use */
+ XDmaV3_BdRing BdRing; /**< BD storage for SGDMA */
+} XDmaV3;
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*****************************************************************************/
+/**
+* Use this macro at initialization time to determine how many BDs will fit
+* in a BD list within the given memory constraints.
+*
+* The results of this macro can be provided to XDmaV3_SgListCreate().
+*
+* @param Alignment specifies what byte alignment the BDs must fall on and
+* must be a power of 2 to get an accurate calculation (32, 64, 126,...)
+* @param Bytes is the number of bytes to be used to store BDs.
+*
+* @return Number of BDs that can fit in the given memory area
+*
+* @note
+* C-style signature:
+* u32 XDmaV3_mSgListCntCalc(u32 Alignment, u32 Bytes)
+*
+******************************************************************************/
+#define XDmaV3_mSgListCntCalc(Alignment, Bytes) \
+ (u32)((Bytes) / ((sizeof(XDmaBdV3) + ((Alignment)-1)) & ~((Alignment)-1)))
+
+/*****************************************************************************/
+/**
+* Use this macro at initialization time to determine how many bytes of memory
+* is required to contain a given number of BDs at a given alignment.
+*
+* @param Alignment specifies what byte alignment the BDs must fall on. This
+* parameter must be a power of 2 to get an accurate calculation (32, 64,
+* 128,...)
+* @param NumBd is the number of BDs to calculate memory size requirements for
+*
+* @return The number of bytes of memory required to create a BD list with the
+* given memory constraints.
+*
+* @note
+* C-style signature:
+* u32 XDmaV3_mSgListMemCalc(u32 Alignment, u32 NumBd)
+*
+******************************************************************************/
+#define XDmaV3_mSgListMemCalc(Alignment, NumBd) \
+ (u32)((sizeof(XDmaBdV3) + ((Alignment)-1)) & ~((Alignment)-1)) * (NumBd)
+
+
+/****************************************************************************/
+/**
+* Return the total number of BDs allocated by this channel with
+* XDmaV3_SgListCreate().
+*
+* @param InstancePtr is the DMA channel to operate on.
+*
+* @return The total number of BDs allocated for this channel.
+*
+* @note
+* C-style signature:
+* u32 XDmaBdV3_mSgGetCnt(XDmaV3* InstancePtr)
+*
+*****************************************************************************/
+#define XDmaV3_mSgGetCnt(InstancePtr) ((InstancePtr)->BdRing.AllCnt)
+
+
+/****************************************************************************/
+/**
+* Return the number of BDs allocatable with XDmaV3_SgBdAlloc() for pre-
+* processing.
+*
+* @param InstancePtr is the DMA channel to operate on.
+*
+* @return The number of BDs currently allocatable.
+*
+* @note
+* C-style signature:
+* u32 XDmaBdV3_mSgGetFreeCnt(XDmaV3* InstancePtr)
+*
+*****************************************************************************/
+#define XDmaV3_mSgGetFreeCnt(InstancePtr) ((InstancePtr)->BdRing.FreeCnt)
+
+
+/****************************************************************************/
+/**
+* Return the next BD in a list.
+*
+* @param InstancePtr is the DMA channel to operate on.
+* @param BdPtr is the BD to operate on.
+*
+* @return The next BD in the list relative to the BdPtr parameter.
+*
+* @note
+* C-style signature:
+* XDmaBdV3 *XDmaV3_mSgBdNext(XDmaV3* InstancePtr, XDmaBdV3 *BdPtr)
+*
+*****************************************************************************/
+#define XDmaV3_mSgBdNext(InstancePtr, BdPtr) \
+ (((u32)(BdPtr) >= (InstancePtr)->BdRing.HighAddr) ? \
+ (XDmaBdV3*)(InstancePtr)->BdRing.BaseAddr : \
+ (XDmaBdV3*)((u32)(BdPtr) + (InstancePtr)->BdRing.Separation))
+
+
+/****************************************************************************/
+/**
+* Return the previous BD in the list.
+*
+* @param InstancePtr is the DMA channel to operate on.
+* @param BdPtr is the BD to operate on
+*
+* @return The previous BD in the list relative to the BdPtr parameter.
+*
+* @note
+* C-style signature:
+* XDmaBdV3 *XDmaV3_mSgBdPrev(XDmaV3* InstancePtr, XDmaBdV3 *BdPtr)
+*
+*****************************************************************************/
+#define XDmaV3_mSgBdPrev(InstancePtr, BdPtr) \
+ (((u32)(BdPtr) <= (InstancePtr)->BdRing.BaseAddr) ? \
+ (XDmaBdV3*)(InstancePtr)->BdRing.HighAddr : \
+ (XDmaBdV3*)((u32)(BdPtr) - (InstancePtr)->BdRing.Separation))
+
+
+/****************************************************************************/
+/**
+* Retrieve the current contents of the DMASR register. This macro can be
+* used to poll the DMA HW for completion of a transaction.
+*
+* @param InstancePtr is the DMA channel to operate on.
+*
+* @return The current contents of the DMASR register.
+*
+* @note
+* C-style signature:
+* u32 XDmaV3_mGetStatus(XDmaV3* InstancePtr)
+*
+*****************************************************************************/
+#define XDmaV3_mGetStatus(InstancePtr) \
+ XDmaV3_mReadReg((InstancePtr)->RegBase, XDMAV3_DMASR_OFFSET)
+
+
+/************************** Function Prototypes ******************************/
+
+/*
+ * Initialization and control functions in xdmav3.c
+ */
+int XDmaV3_Initialize(XDmaV3 * InstancePtr, u32 BaseAddress);
+
+/*
+ * Interrupt related functions in xdmav3_intr.c
+ */
+void XDmaV3_SetInterruptStatus(XDmaV3 * InstancePtr, u32 Mask);
+u32 XDmaV3_GetInterruptStatus(XDmaV3 * InstancePtr);
+void XDmaV3_SetInterruptEnable(XDmaV3 * InstancePtr, u32 Mask);
+u32 XDmaV3_GetInterruptEnable(XDmaV3 * InstancePtr);
+
+/*
+ * Simple DMA related functions in xdmav3_simple.c
+ */
+int XDmaV3_SimpleTransfer(XDmaV3 * InstancePtr, XDmaBdV3 * Bdptr);
+
+/*
+ * Scatter gather DMA related functions in xdmav3_sg.c
+ */
+int XDmaV3_SgStart(XDmaV3 * InstancePtr);
+void XDmaV3_SgStop(XDmaV3 * InstancePtr);
+int XDmaV3_SgSetPktThreshold(XDmaV3 * InstancePtr, u16 Threshold);
+int XDmaV3_SgSetPktWaitbound(XDmaV3 * InstancePtr, u16 TimerVal);
+u16 XDmaV3_SgGetPktThreshold(XDmaV3 * InstancePtr);
+u16 XDmaV3_SgGetPktWaitbound(XDmaV3 * InstancePtr);
+
+int XDmaV3_SgListCreate(XDmaV3 * InstancePtr, u32 PhysAddr,
+ u32 VirtAddr, u32 Alignment, unsigned BdCount);
+int XDmaV3_SgListClone(XDmaV3 * InstancePtr, XDmaBdV3 * SrcBdPtr);
+int XDmaV3_SgCheck(XDmaV3 * InstancePtr);
+int XDmaV3_SgBdAlloc(XDmaV3 * InstancePtr, unsigned NumBd,
+ XDmaBdV3 ** BdSetPtr);
+int XDmaV3_SgBdUnAlloc(XDmaV3 * InstancePtr, unsigned NumBd,
+ XDmaBdV3 * BdSetPtr);
+int XDmaV3_SgBdToHw(XDmaV3 * InstancePtr, unsigned NumBd, XDmaBdV3 * BdSetPtr);
+int XDmaV3_SgBdFree(XDmaV3 * InstancePtr, unsigned NumBd, XDmaBdV3 * BdSetPtr);
+unsigned XDmaV3_SgBdFromHw(XDmaV3 * InstancePtr, unsigned BdLimit,
+ XDmaBdV3 ** BdSetPtr);
+
+/*
+ * Selftest functions in xdmav3_selftest.c
+ */
+int XDmaV3_SelfTest(XDmaV3 * InstancePtr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xdmav3_intr.c
+*
+* This file implements interrupt control related functions. For more
+* information on this driver, see xdmav3.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 3.00a rmm 03/11/06 First release
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xdmav3.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+* Set the interrupt status register for this channel. Use this function
+* to ack pending interrupts.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param Mask is a logical OR of XDMAV3_IPXR_*_MASK constants found in
+* xdmav3_l.h.
+*
+******************************************************************************/
+void XDmaV3_SetInterruptStatus(XDmaV3 * InstancePtr, u32 Mask)
+{
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_ISR_OFFSET, Mask);
+}
+
+
+/*****************************************************************************/
+/**
+* Retrieve the interrupt status for this channel. OR the results of this
+* function with results from XDmaV3_GetInterruptEnable() to determine which
+* interrupts are currently pending to the processor.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+*
+* @return Mask of interrupt bits made up of XDMAV3_IPXR_*_MASK constants found
+* in xdmav3_l.h.
+*
+******************************************************************************/
+u32 XDmaV3_GetInterruptStatus(XDmaV3 * InstancePtr)
+{
+ return (XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_ISR_OFFSET));
+}
+
+
+/*****************************************************************************/
+/**
+* Enable specific DMA interrupts.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param Mask is a logical OR of of XDMAV3_IPXR_*_MASK constants found in
+* xdmav3_l.h.
+*
+******************************************************************************/
+void XDmaV3_SetInterruptEnable(XDmaV3 * InstancePtr, u32 Mask)
+{
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_IER_OFFSET, Mask);
+}
+
+
+/*****************************************************************************/
+/**
+* Retrieve the interrupt enable register for this channel. Use this function to
+* determine which interrupts are currently enabled to the processor.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+*
+* @return Mask of interrupt bits made up of XDMAV3_IPXR_*_MASK constants found in
+* xdmav3_l.h.
+*
+******************************************************************************/
+u32 XDmaV3_GetInterruptEnable(XDmaV3 * InstancePtr)
+{
+ return (XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_IER_OFFSET));
+}
--- /dev/null
+/* $Id: */
+
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xdmav3_l.h
+*
+* This header file contains identifiers and low-level driver functions (or
+* macros) that can be used to access the Direct Memory Access and Scatter
+* Gather (SG DMA) device.
+*
+* For more information about the operation of this device, see the hardware
+* specification and documentation in the higher level driver xdma.h source
+* code file.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 3.00a rmm 03/11/06 First release
+* rmm 06/22/06 Added extern "C"
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XDMAV3_L_H /* prevent circular inclusions */
+#define XDMAV3_L_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/* Register offset definitions. Unless otherwise noted, register access is
+ * 32 bit.
+ */
+
+/** @name DMA channel registers
+ * @{
+ */
+#define XDMAV3_DMASR_OFFSET 0x00000000 /**< DMA Status Register */
+#define XDMAV3_DMACR_OFFSET 0x00000004 /**< DMA Control Register */
+#define XDMAV3_MSBA_OFFSET 0x00000008 /**< Most Significant Bus Address */
+#define XDMAV3_LSBA_OFFSET 0x0000000C /**< Least Significant Bus Address */
+#define XDMAV3_BDA_OFFSET 0x00000010 /**< Buffer Descriptor Address */
+#define XDMAV3_LENGTH_OFFSET 0x00000014 /**< DMA Length */
+#define XDMAV3_ISR_OFFSET 0x00000018 /**< Interrupt Status Register */
+#define XDMAV3_IER_OFFSET 0x0000001C /**< Interrupt Enable Register */
+#define XDMAV3_SWCR_OFFSET 0x00000020 /**< Software Control Register */
+/*@}*/
+
+/** @name Buffer Descriptor register offsets
+ * @{
+ */
+#define XDMAV3_BD_DMASR_OFFSET 0x00 /**< Channel DMASR register contents */
+#define XDMAV3_BD_DMACR_OFFSET 0x04 /**< Channel DMACR register contents */
+#define XDMAV3_BD_MSBA_OFFSET 0x08 /**< Channel MSBA register contents */
+#define XDMAV3_BD_LSBA_OFFSET 0x0C /**< Channel LSBA register contents */
+#define XDMAV3_BD_BDA_OFFSET 0x10 /**< Next buffer descriptor pointer */
+#define XDMAV3_BD_LENGTH_OFFSET 0x14 /**< Channel LENGTH register contents */
+#define XDMAV3_BD_SR_OFFSET 0x18 /**< Packet Status */
+#define XDMAV3_BD_RSVD_OFFSET 0x1C /**< Reserved */
+#define XDMAV3_BD_USR0_OFFSET 0x20 /**< HW User defined */
+#define XDMAV3_BD_USR1_OFFSET 0x24 /**< HW User defined */
+#define XDMAV3_BD_USR2_OFFSET 0x28 /**< HW User defined */
+#define XDMAV3_BD_USR3_OFFSET 0x2C /**< HW User defined */
+#define XDMAV3_BD_USR4_OFFSET 0x30 /**< HW User defined */
+#define XDMAV3_BD_USR5_OFFSET 0x34 /**< HW User defined */
+#define XDMAV3_BD_LENCPY_OFFSET 0x38 /**< SW Driver usage */
+#define XDMAV3_BD_ID_OFFSET 0x3C /**< SW Driver usage */
+
+#define XDMAV3_BD_NUM_WORDS 16 /**< Number of 32-bit words that make
+ up a BD */
+/*@}*/
+
+/* Register masks. The following constants define bit locations of various
+ * control bits in the registers. Constants are not defined for those registers
+ * that have a single bit field representing all 32 bits. For further
+ * information on the meaning of the various bit masks, refer to the HW spec.
+ */
+
+
+/** @name DMA Status Register (DMASR) bitmasks
+ * @note These bitmasks are identical between XDMAV3_DMASR_OFFSET and
+ * XDMAV3_BD_DMASR_OFFSET
+ * @{
+ */
+#define XDMAV3_DMASR_DMABSY_MASK 0x80000000 /**< DMA busy */
+#define XDMAV3_DMASR_DBE_MASK 0x40000000 /**< Bus error */
+#define XDMAV3_DMASR_DBT_MASK 0x20000000 /**< Bus timeout */
+#define XDMAV3_DMASR_DMADONE_MASK 0x10000000 /**< DMA done */
+#define XDMAV3_DMASR_SGBSY_MASK 0x08000000 /**< SG channel busy */
+#define XDMAV3_DMASR_LAST_MASK 0x04000000 /**< Last BD of packet */
+#define XDMAV3_DMASR_SGDONE_MASK 0x01000000 /**< SGDMA done */
+#define XDMAV3_DMASR_DMACNFG_MASK 0x00300000 /**< DMA configuration */
+
+#define XDMAV3_DMASR_DMACNFG_SIMPLE_MASK 0x00000000 /**< Simple DMA config */
+#define XDMAV3_DMASR_DMACNFG_SSGDMA_MASK 0x00100000 /**< Simple SGDMA config */
+#define XDMAV3_DMASR_DMACNFG_SGDMATX_MASK 0x00200000 /**< SGDMA xmit config */
+#define XDMAV3_DMASR_DMACNFG_SGDMARX_MASK 0x00300000 /**< SGDMA recv config */
+#define XDMAV3_DMASR_DMACNFG_MASK 0x00300000 /**< Mask for all */
+
+/*@}*/
+
+/** @name DMA Control Register (DMACR) bitmasks
+ * @note These bitmasks are identical between XDMAV3_DMACR_OFFSET and
+ * XDMAV3_BD_DMACR_OFFSET
+ * @{
+ */
+#define XDMAV3_DMACR_AINC_MASK 0x80000000 /**< Address increment */
+#define XDMAV3_DMACR_BPDRE_MASK 0x20000000 /**< Bypass DRE */
+#define XDMAV3_DMACR_SGS_MASK 0x08000000 /**< Scatter gather stop */
+#define XDMAV3_DMACR_LAST_MASK 0x04000000 /**< Last BD of packet */
+#define XDMAV3_DMACR_DEVSEL_MASK 0x00FF0000 /**< Device select */
+#define XDMAV3_DMACR_BDPAGE_MASK 0x00000F00 /**< BD page address */
+#define XDMAV3_DMACR_TYPE_MASK 0x00000070 /**< DMA transfer type */
+#define XDMAV3_DMACR_DSIZE_MASK 0x00000007 /**< DMA transfer width */
+
+/* Sub-fields within XDMAV3_DMACR_DIR_MASK */
+#define XDMAV3_DMACR_DIR_RX_MASK 0x40000000 /**< Xfer in Rx direction */
+#define XDMAV3_DMACR_DIR_TX_MASK 0x00000000 /**< Xfer in Tx direction */
+
+/* Sub-fields within XDMAV3_DMACR_TYPE_MASK */
+#define XDMAV3_DMACR_TYPE_BFBURST_MASK 0x00000010 /**< Bounded fixed length
+ burst */
+#define XDMAV3_DMACR_TYPE_BIBURST_MASK 0x00000020 /**< Bounded indeterminate
+ burst */
+
+/* Sub-fields within XDMAV3_DMACR_DSIZE_MASK */
+#define XDMAV3_DMACR_DSIZE_8_MASK 0x00000000 /**< Xfer width = 8 bits */
+#define XDMAV3_DMACR_DSIZE_16_MASK 0x00000001 /**< Xfer width = 16 bits */
+#define XDMAV3_DMACR_DSIZE_32_MASK 0x00000002 /**< Xfer width = 32 bits */
+#define XDMAV3_DMACR_DSIZE_64_MASK 0x00000003 /**< Xfer width = 64 bits */
+#define XDMAV3_DMACR_DSIZE_128_MASK 0x00000004 /**< Xfer width = 128 bits */
+
+/* Left shift values for selected masks */
+#define XDMAV3_DMACR_DEVSEL_SHIFT 16
+#define XDMAV3_DMACR_BDPAGE_SHIFT 8
+/*@}*/
+
+/** @name Interrupt status bits for MAC interrupts
+ * These bits are associated with XDMAV3_ISR_OFFSET and
+ * XDMAV3_IER_OFFSET registers.
+ * @{
+ */
+#define XDMAV3_IPXR_DD_MASK 0x00000040 /**< DMA complete */
+#define XDMAV3_IPXR_DE_MASK 0x00000020 /**< DMA error */
+#define XDMAV3_IPXR_PD_MASK 0x00000010 /**< Pkt done */
+#define XDMAV3_IPXR_PCTR_MASK 0x00000008 /**< Pkt count threshold reached */
+#define XDMAV3_IPXR_PWBR_MASK 0x00000004 /**< Pkt waitbound reached */
+#define XDMAV3_IPXR_SGDA_MASK 0x00000002 /**< SG Disable ack */
+#define XDMAV3_IPXR_SGEND_MASK 0x00000001 /**< SG End */
+/*@}*/
+
+/** @name Software control register (SWCR) bitmasks
+ * @{
+ */
+#define XDMAV3_SWCR_SGE_MASK 0x80000000 /**< SG Enable */
+#define XDMAV3_SWCR_SGD_MASK 0x40000000 /**< SG Disable */
+#define XDMAV3_SWCR_DSGAR_MASK 0x20000000 /**< SG Disable auto-restart */
+#define XDMAV3_SWCR_PWB_MASK 0x00FFF000 /**< Pkt waitbound */
+#define XDMAV3_SWCR_PCT_MASK 0x00000FFF /**< Pkt threshold count */
+
+/* Left shift values for selected masks */
+#define XDMAV3_SWCR_PCT_SHIFT 0
+#define XDMAV3_SWCR_PWB_SHIFT 12
+/*@}*/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/****************************************************************************/
+/**
+*
+* Read the given IPIF register.
+*
+* @param BaseAddress is the IPIF base address of the device
+* @param RegOffset is the register offset to be read
+*
+* @return The 32-bit value of the register
+*
+* @note
+* C-style signature:
+* u32 XDmaV3_mReadReg(u32 BaseAddress, u32 RegOffset)
+*
+*****************************************************************************/
+#define XDmaV3_mReadReg(BaseAddress, RegOffset) \
+ XIo_In32((u32)(BaseAddress) + (u32)(RegOffset))
+
+
+/****************************************************************************/
+/**
+*
+* Write the given IPIF register.
+*
+* @param BaseAddress is the IPIF base address of the device
+* @param RegOffset is the register offset to be written
+* @param Data is the 32-bit value to write to the register
+*
+* @return None.
+*
+* @note
+* C-style signature:
+* void XDmaV3_mWriteReg(u32 BaseAddress, u32 RegOffset, u32 Data)
+*
+*****************************************************************************/
+#define XDmaV3_mWriteReg(BaseAddress, RegOffset, Data) \
+ XIo_Out32((u32)(BaseAddress) + (u32)(RegOffset), (u32)(Data))
+
+
+/****************************************************************************/
+/**
+*
+* Read the given Buffer Descriptor word.
+*
+* @param BaseAddress is the base address of the BD to read
+* @param Offset is the word offset to be read
+*
+* @return The 32-bit value of the field
+*
+* @note
+* C-style signature:
+* u32 XDmaV3_mReadBd(u32 BaseAddress, u32 Offset)
+*
+*****************************************************************************/
+#define XDmaV3_mReadBd(BaseAddress, Offset) \
+ (*(u32*)((u32)(BaseAddress) + (u32)(Offset)))
+
+
+/****************************************************************************/
+/**
+*
+* Write the given Buffer Descriptor word.
+*
+* @param BaseAddress is the base address of the BD to write
+* @param Offset is the word offset to be written
+* @param Data is the 32-bit value to write to the field
+*
+* @return None.
+*
+* @note
+* C-style signature:
+* void XDmaV3_mWriteReg(u32 BaseAddress, u32 Offset, u32 Data)
+*
+*****************************************************************************/
+#define XDmaV3_mWriteBd(BaseAddress, Offset, Data) \
+ (*(u32*)((u32)(BaseAddress) + (u32)(Offset)) = (Data))
+
+
+/************************** Function Prototypes ******************************/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xdmav3_selftest.c
+*
+* This file implements DMA selftest related functions. For more
+* information on this driver, see xdmav3.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 3.00a rmm 03/11/06 First release
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xdmav3.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+* Selftest is not implemented.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+*
+* @return
+* - XST_SUCCESS if the self test passes
+*
+******************************************************************************/
+int XDmaV3_SelfTest(XDmaV3 * InstancePtr)
+{
+ return (XST_SUCCESS);
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xdmav3_sg.c
+*
+* This file implements Scatter-Gather DMA (SGDMA) related functions. For more
+* information on this driver, see xdmav3.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 3.00a rmm 03/11/06 First release
+* rmm 06/22/06 Fixed C++ compiler warnings
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include <linux/string.h>
+#include <asm/delay.h>
+
+#include "xdmav3.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/****************************************************************************
+ * These cache macros are used throughout this source code file to show
+ * users where cache operations should occur if BDs were to be placed in
+ * a cached memory region. Cacheing BD regions, however, is not common.
+ *
+ * The macros are implemented as NULL operations, but may be hooked into
+ * XENV macros in future revisions of this driver.
+ ****************************************************************************/
+#define XDMAV3_CACHE_FLUSH(BdPtr)
+#define XDMAV3_CACHE_INVALIDATE(BdPtr)
+
+/****************************************************************************
+ * Compute the virtual address of a descriptor from its physical address
+ *
+ * @param Ring is the ring BdPtr appears in
+ * @param BdPtr is the physical address of the BD
+ *
+ * @returns Virtual address of BdPtr
+ *
+ * @note Assume BdPtr is always a valid BD in the ring
+ ****************************************************************************/
+#define XDMAV3_PHYS_TO_VIRT(Ring, BdPtr) \
+ ((u32)BdPtr + (Ring->BaseAddr - Ring->PhysBaseAddr))
+
+/****************************************************************************
+ * Compute the physical address of a descriptor from its virtual address
+ *
+ * @param Ring is the ring BdPtr appears in
+ * @param BdPtr is the physical address of the BD
+ *
+ * @returns Physical address of BdPtr
+ *
+ * @note Assume BdPtr is always a valid BD in the ring
+ ****************************************************************************/
+#define XDMAV3_VIRT_TO_PHYS(Ring, BdPtr) \
+ ((u32)BdPtr - (Ring->BaseAddr - Ring->PhysBaseAddr))
+
+/****************************************************************************
+ * Clear or set the SGS bit of the DMACR register
+ ****************************************************************************/
+#define XDMAV3_HW_SGS_CLEAR \
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_DMACR_OFFSET, \
+ XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_DMACR_OFFSET) \
+ & ~XDMAV3_DMACR_SGS_MASK)
+
+#define XDMAV3_HW_SGS_SET \
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_DMACR_OFFSET, \
+ XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_DMACR_OFFSET) \
+ | XDMAV3_DMACR_SGS_MASK)
+
+/****************************************************************************
+ * Move the BdPtr argument ahead an arbitrary number of BDs wrapping around
+ * to the beginning of the ring if needed.
+ *
+ * We know if a wrapaound should occur if the new BdPtr is greater than
+ * the high address in the ring OR if the new BdPtr crosses over the
+ * 0xFFFFFFFF to 0 boundary. The latter test is a valid one since we do not
+ * allow a BD space to span this boundary.
+ *
+ * @param Ring is the ring BdPtr appears in
+ * @param BdPtr on input is the starting BD position and on output is the
+ * final BD position
+ * @param NumBd is the number of BD spaces to increment
+ *
+ ****************************************************************************/
+#define XDMAV3_RING_SEEKAHEAD(Ring, BdPtr, NumBd) \
+ { \
+ u32 Addr = (u32)BdPtr; \
+ \
+ Addr += (Ring->Separation * NumBd); \
+ if ((Addr > Ring->HighAddr) || ((u32)BdPtr > Addr)) \
+ { \
+ Addr -= Ring->Length; \
+ } \
+ \
+ BdPtr = (XDmaBdV3*)Addr; \
+ }
+
+/****************************************************************************
+ * Move the BdPtr argument backwards an arbitrary number of BDs wrapping
+ * around to the end of the ring if needed.
+ *
+ * We know if a wrapaound should occur if the new BdPtr is less than
+ * the base address in the ring OR if the new BdPtr crosses over the
+ * 0xFFFFFFFF to 0 boundary. The latter test is a valid one since we do not
+ * allow a BD space to span this boundary.
+ *
+ * @param Ring is the ring BdPtr appears in
+ * @param BdPtr on input is the starting BD position and on output is the
+ * final BD position
+ * @param NumBd is the number of BD spaces to increment
+ *
+ ****************************************************************************/
+#define XDMAV3_RING_SEEKBACK(Ring, BdPtr, NumBd) \
+ { \
+ u32 Addr = (u32)BdPtr; \
+ \
+ Addr -= (Ring->Separation * NumBd); \
+ if ((Addr < Ring->BaseAddr) || ((u32)BdPtr < Addr)) \
+ { \
+ Addr += Ring->Length; \
+ } \
+ \
+ BdPtr = (XDmaBdV3*)Addr; \
+ }
+
+
+/************************** Function Prototypes ******************************/
+
+static int IsSgDmaChannel(XDmaV3 * InstancePtr);
+
+
+/************************** Variable Definitions *****************************/
+
+/******************************************************************************/
+/**
+ * Start the SGDMA channel.
+ *
+ * @param InstancePtr is a pointer to the instance to be started.
+ *
+ * @return
+ * - XST_SUCCESS if channel was started.
+ * - XST_DMA_SG_NO_LIST if the channel has no initialized BD ring.
+ *
+ ******************************************************************************/
+int XDmaV3_SgStart(XDmaV3 * InstancePtr)
+{
+ XDmaV3_BdRing *Ring = &InstancePtr->BdRing;
+ u32 Swcr;
+
+ /* BD list has yet to be created for this channel */
+ if (Ring->AllCnt == 0) {
+ return (XST_DMA_SG_NO_LIST);
+ }
+
+ /* Do nothing if already started */
+ if (Ring->RunState == XST_DMA_SG_IS_STARTED) {
+ return (XST_SUCCESS);
+ }
+
+ /* Note as started */
+ Ring->RunState = XST_DMA_SG_IS_STARTED;
+
+ /* Restore BDA */
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_BDA_OFFSET,
+ Ring->BdaRestart);
+
+ /* If there are unprocessed BDs then we want to channel to begin processing
+ * right away
+ */
+ if ((XDmaV3_mReadBd(XDMAV3_PHYS_TO_VIRT(Ring, Ring->BdaRestart),
+ XDMAV3_BD_DMASR_OFFSET) & XDMAV3_DMASR_DMADONE_MASK)
+ == 0) {
+ /* DMACR.SGS = 0 */
+ XDMAV3_HW_SGS_CLEAR;
+ }
+
+ /* To start, clear SWCR.DSGAR, and set SWCR.SGE */
+ Swcr = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET);
+ Swcr &= ~XDMAV3_SWCR_DSGAR_MASK;
+ Swcr |= XDMAV3_SWCR_SGE_MASK;
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET, Swcr);
+
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************/
+/**
+ * Stop the SGDMA or Simple SGDMA channel gracefully. Any DMA operation
+ * currently in progress is allowed to finish.
+ *
+ * An interrupt may be generated as the DMA engine finishes the packet in
+ * process. To prevent this (if desired) then disabled DMA interrupts prior to
+ * invoking this function.
+ *
+ * If after stopping the channel, new BDs are enqueued with XDmaV3_SgBdToHw(),
+ * then those BDs will not be processed until after XDmaV3_SgStart() is called.
+ *
+ * @param InstancePtr is a pointer to the instance to be stopped.
+ *
+ * @note This function will block until the HW indicates that DMA has stopped.
+ *
+ ******************************************************************************/
+void XDmaV3_SgStop(XDmaV3 * InstancePtr)
+{
+ volatile u32 Swcr;
+ u32 Dmasr;
+ XDmaV3_BdRing *Ring = &InstancePtr->BdRing;
+ u32 Ier;
+
+ /* Save the contents of the interrupt enable register then disable
+ * interrupts. This register will be restored at the end of the function
+ */
+ Ier = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_IER_OFFSET);
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_IER_OFFSET, 0);
+
+ /* Stopping the HW is a three step process:
+ * 1. Set SWCR.SGD=1
+ * 2. Wait for SWCR.SGE=0
+ * 3. Set SWCR.DSGAR=0 and SWCR.SGE=1
+ *
+ * Once we've successfully gone through this process, the HW is fully
+ * stopped. To restart we must give the HW a new BDA.
+ */
+ Swcr = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET);
+
+ /* If the channel is currently active, stop it by setting SWCR.SGD=1
+ * and waiting for SWCR.SGE to toggle to 0
+ */
+ if (Swcr & XDMAV3_SWCR_SGE_MASK) {
+ Swcr |= XDMAV3_SWCR_SGD_MASK;
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET,
+ Swcr);
+
+ while (Swcr & XDMAV3_SWCR_SGE_MASK) {
+ Swcr = XDmaV3_mReadReg(InstancePtr->RegBase,
+ XDMAV3_SWCR_OFFSET);
+ }
+ }
+
+ /* Note as stopped */
+ Ring->RunState = XST_DMA_SG_IS_STOPPED;
+
+ /* Save the BDA to restore when channel is restarted */
+ Ring->BdaRestart =
+ (XDmaBdV3 *) XDmaV3_mReadReg(InstancePtr->RegBase,
+ XDMAV3_BDA_OFFSET);
+
+ /* If this is a receive channel, then the BDA restore may require a more
+ * complex treatment. If the channel stopped without processing a packet,
+ * then DMASR.SGDONE will be clear. The BDA we've already read in this case
+ * is really BDA->BDA so we need to backup one BDA to get the correct
+ * restart point.
+ */
+ Dmasr = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_DMASR_OFFSET);
+ if ((Dmasr & XDMAV3_DMASR_DMACNFG_MASK) ==
+ XDMAV3_DMASR_DMACNFG_SGDMARX_MASK) {
+ if (!(Dmasr & XDMAV3_DMASR_SGDONE_MASK)) {
+ Ring->BdaRestart =
+ (XDmaBdV3 *) XDMAV3_PHYS_TO_VIRT(Ring,
+ Ring->
+ BdaRestart);
+ Ring->BdaRestart =
+ XDmaV3_mSgBdPrev(InstancePtr, Ring->BdaRestart);
+ Ring->BdaRestart =
+ (XDmaBdV3 *) XDMAV3_VIRT_TO_PHYS(Ring,
+ Ring->
+ BdaRestart);
+ }
+ }
+
+ Swcr |= XDMAV3_SWCR_DSGAR_MASK;
+ Swcr &= ~XDMAV3_SWCR_SGD_MASK;
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET, Swcr);
+
+ /* Restore interrupt enables. If an interrupt occurs due to this function
+ * stopping the channel then it will happen right here
+ */
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_IER_OFFSET, Ier);
+}
+
+
+/******************************************************************************/
+/**
+ * Set the packet threshold for this SGDMA channel. This has the effect of
+ * delaying processor interrupts until the given number of packets (not BDs)
+ * have been processed.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Threshold is the packet threshold to set. If 0 is specified, then
+ * this feature is disabled. Maximum threshold is 2^12 - 1.
+ *
+ * @return
+ * - XST_SUCCESS if threshold set properly.
+ * - XST_NO_FEATURE if this function was called on a DMA channel that does not
+ * have interrupt coalescing capabilities.
+ *
+ * @note This function should not be prempted by another XDmaV3 function.
+ *
+ ******************************************************************************/
+int XDmaV3_SgSetPktThreshold(XDmaV3 * InstancePtr, u16 Threshold)
+{
+ u32 Reg;
+
+ /* Is this a SGDMA channel */
+ Reg = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_DMASR_OFFSET);
+ if (!IsSgDmaChannel(InstancePtr)) {
+ return (XST_NO_FEATURE);
+ }
+
+ /* Replace the pkt threshold field in the SWCR */
+ Reg = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET);
+ Reg &= ~XDMAV3_SWCR_PCT_MASK;
+ Reg |= ((Threshold << XDMAV3_SWCR_PCT_SHIFT) & XDMAV3_SWCR_PCT_MASK);
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET, Reg);
+
+ /* Finished */
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************/
+/**
+ * Set the packet waitbound timer for this SGDMA channel. See xdmav3.h for more
+ * information on interrupt coalescing and the effects of the waitbound timer.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param TimerVal is the waitbound period to set. If 0 is specified, then
+ * this feature is disabled. Maximum waitbound is 2^12 - 1. LSB is
+ * 1 millisecond (approx).
+ *
+ * @return
+ * - XST_SUCCESS if waitbound set properly.
+ * - XST_NO_FEATURE if this function was called on a DMA channel that does not
+ * have interrupt coalescing capabilities.
+ *
+ * @note This function should not be prempted by another XDmaV3 function.
+ *
+ ******************************************************************************/
+int XDmaV3_SgSetPktWaitbound(XDmaV3 * InstancePtr, u16 TimerVal)
+{
+ u32 Reg;
+
+ /* Is this a SGDMA channel */
+ Reg = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_DMASR_OFFSET);
+ if (!IsSgDmaChannel(InstancePtr)) {
+ return (XST_NO_FEATURE);
+ }
+
+ /* Replace the waitbound field in the SWCR */
+ Reg = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET);
+ Reg &= ~XDMAV3_SWCR_PWB_MASK;
+ Reg |= ((TimerVal << XDMAV3_SWCR_PWB_SHIFT) & XDMAV3_SWCR_PWB_MASK);
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET, Reg);
+
+ /* Finished */
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************/
+/**
+ * Get the packet threshold for this channel that was set with
+ * XDmaV3_SgSetPktThreshold().
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ *
+ * @return Current packet threshold as reported by HW. If the channel does not
+ * include interrupt coalescing, then the return value will always be 0.
+ ******************************************************************************/
+u16 XDmaV3_SgGetPktThreshold(XDmaV3 * InstancePtr)
+{
+ u32 Reg;
+
+ /* Is this a SGDMA channel */
+ Reg = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_DMASR_OFFSET);
+ if (!IsSgDmaChannel(InstancePtr)) {
+ return (0);
+ }
+
+ /* Get the threshold */
+ Reg = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET);
+ Reg &= XDMAV3_SWCR_PCT_MASK;
+ Reg >>= XDMAV3_SWCR_PCT_SHIFT;
+ return ((u16) Reg);
+}
+
+
+/******************************************************************************/
+/**
+ * Get the waitbound timer for this channel that was set with
+ * XDmaV3_SgSetPktWaitbound().
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ *
+ * @return Current waitbound timer as reported by HW. If the channel does not
+ * include interrupt coalescing, then the return value will always be 0.
+ ******************************************************************************/
+u16 XDmaV3_SgGetPktWaitbound(XDmaV3 * InstancePtr)
+{
+ u32 Reg;
+
+ /* Is this a SGDMA channel */
+ Reg = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_DMASR_OFFSET);
+ if (!IsSgDmaChannel(InstancePtr)) {
+ return (0);
+ }
+
+ /* Get the threshold */
+ Reg = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET);
+ Reg &= XDMAV3_SWCR_PWB_MASK;
+ Reg >>= XDMAV3_SWCR_PWB_SHIFT;
+ return ((u16) Reg);
+}
+
+
+/******************************************************************************/
+/**
+ * Using a memory segment allocated by the caller, create and setup the BD list
+ * for the given SGDMA channel.
+ *
+ * @param InstancePtr is the instance to be worked on.
+ * @param PhysAddr is the physical base address of user memory region.
+ * @param VirtAddr is the virtual base address of the user memory region. If
+ * address translation is not being utilized, then VirtAddr should be
+ * equivalent to PhysAddr.
+ * @param Alignment governs the byte alignment of individual BDs. This function
+ * will enforce a minimum alignment of 4 bytes with no maximum as long as
+ * it is specified as a power of 2.
+ * @param BdCount is the number of BDs to setup in the user memory region. It is
+ * assumed the region is large enough to contain the BDs. Refer to the
+ * "SGDMA List Creation" section in xdmav3.h for more information on
+ * list creation.
+ *
+ * @return
+ *
+ * - XST_SUCCESS if initialization was successful
+ * - XST_NO_FEATURE if the provided instance is a non SGDMA type of DMA
+ * channel.
+ * - XST_INVALID_PARAM under any of the following conditions: 1) PhysAddr and/or
+ * VirtAddr are not aligned to the given Alignment parameter; 2) Alignment
+ * parameter does not meet minimum requirements or is not a power of 2 value;
+ * 3) BdCount is 0.
+ * - XST_DMA_SG_LIST_ERROR if the memory segment containing the list spans
+ * over address 0x00000000 in virtual address space.
+ *
+ * @note
+ *
+ * Some DMA HW requires 8 or more byte alignments of BDs. Make sure the correct
+ * value is passed into the Alignment parameter to meet individual DMA HW
+ * requirements.
+ *
+ ******************************************************************************/
+int XDmaV3_SgListCreate(XDmaV3 * InstancePtr, u32 PhysAddr, u32 VirtAddr,
+ u32 Alignment, unsigned BdCount)
+{
+ unsigned i;
+ u32 BdV;
+ u32 BdP;
+ XDmaV3_BdRing *Ring = &InstancePtr->BdRing;
+
+ /* In case there is a failure prior to creating list, make sure the following
+ * attributes are 0 to prevent calls to other SG functions from doing anything
+ */
+ Ring->AllCnt = 0;
+ Ring->FreeCnt = 0;
+ Ring->HwCnt = 0;
+ Ring->PreCnt = 0;
+ Ring->PostCnt = 0;
+
+ /* Is this a SGDMA channel */
+ if (!IsSgDmaChannel(InstancePtr)) {
+ return (XST_NO_FEATURE);
+ }
+
+ /* Make sure Alignment parameter meets minimum requirements */
+ if (Alignment < XDMABDV3_MINIMUM_ALIGNMENT) {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* Make sure Alignment is a power of 2 */
+ if ((Alignment - 1) & Alignment) {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* Make sure PhysAddr and VirtAddr are on same Alignment */
+ if ((PhysAddr % Alignment) || (VirtAddr % Alignment)) {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* Is BdCount reasonable? */
+ if (BdCount == 0) {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* Parameters are sane. Stop the HW just to be safe */
+ XDmaV3_SgStop(InstancePtr);
+
+ /* Figure out how many bytes will be between the start of adjacent BDs */
+ Ring->Separation =
+ (sizeof(XDmaBdV3) + (Alignment - 1)) & ~(Alignment - 1);
+
+ /* Must make sure the ring doesn't span address 0x00000000. If it does,
+ * then the next/prev BD traversal macros will fail.
+ */
+ if (VirtAddr > (VirtAddr + (Ring->Separation * BdCount) - 1)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Initial ring setup:
+ * - Clear the entire space
+ * - Setup each BD's BDA field with the physical address of the next BD
+ * - Set each BD's DMASR.DMADONE bit
+ */
+ memset((void *) VirtAddr, 0, (Ring->Separation * BdCount));
+
+ BdV = VirtAddr;
+ BdP = PhysAddr + Ring->Separation;
+ for (i = 1; i < BdCount; i++) {
+ XDmaV3_mWriteBd(BdV, XDMAV3_BD_BDA_OFFSET, BdP);
+ XDmaV3_mWriteBd(BdV, XDMAV3_BD_DMASR_OFFSET,
+ XDMAV3_DMASR_DMADONE_MASK);
+ XDMAV3_CACHE_FLUSH(BdV);
+ BdV += Ring->Separation;
+ BdP += Ring->Separation;
+ }
+
+ /* At the end of the ring, link the last BD back to the top */
+ XDmaV3_mWriteBd(BdV, XDMAV3_BD_BDA_OFFSET, PhysAddr);
+
+ /* Setup and initialize pointers and counters */
+ InstancePtr->BdRing.RunState = XST_DMA_SG_IS_STOPPED;
+ Ring->BaseAddr = VirtAddr;
+ Ring->PhysBaseAddr = PhysAddr;
+ Ring->HighAddr = BdV;
+ Ring->Length = Ring->HighAddr - Ring->BaseAddr + Ring->Separation;
+ Ring->AllCnt = BdCount;
+ Ring->FreeCnt = BdCount;
+ Ring->FreeHead = (XDmaBdV3 *) VirtAddr;
+ Ring->PreHead = (XDmaBdV3 *) VirtAddr;
+ Ring->HwHead = (XDmaBdV3 *) VirtAddr;
+ Ring->HwTail = (XDmaBdV3 *) VirtAddr;
+ Ring->PostHead = (XDmaBdV3 *) VirtAddr;
+ Ring->BdaRestart = (XDmaBdV3 *) PhysAddr;
+
+ /* Make sure the DMACR.SGS is 1 so that no DMA operations proceed until
+ * the start function is called.
+ */
+ XDMAV3_HW_SGS_SET;
+
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************/
+/**
+ * Clone the given BD into every BD in the list. Except for XDMAV3_BD_BDA_OFFSET,
+ * every field of the source BD is replicated in every BD of the list.
+ *
+ * This function can be called only when all BDs are in the free group such as
+ * they are immediately after initialization with XDmaV3_SgListCreate(). This
+ * prevents modification of BDs while they are in use by HW or the user.
+ *
+ * @param InstancePtr is the instance to be worked on.
+ * @param SrcBdPtr is the source BD template to be cloned into the list. This BD
+ * will be modified.
+ *
+ * @return
+ * - XST_SUCCESS if the list was modified.
+ * - XST_DMA_SG_NO_LIST if a list has not been created.
+ * - XST_DMA_SG_LIST_ERROR if some of the BDs in this channel are under HW
+ * or user control.
+ * - XST_DEVICE_IS_STARTED if the DMA channel has not been stopped.
+ *
+ ******************************************************************************/
+int XDmaV3_SgListClone(XDmaV3 * InstancePtr, XDmaBdV3 * SrcBdPtr)
+{
+ unsigned i;
+ u32 CurBd;
+ u32 Save;
+ XDmaV3_BdRing *Ring = &InstancePtr->BdRing;
+
+ /* Can't do this function if there isn't a ring */
+ if (Ring->AllCnt == 0) {
+ return (XST_DMA_SG_NO_LIST);
+ }
+
+ /* Can't do this function with the channel running */
+ if (Ring->RunState == XST_DMA_SG_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Can't do this function with some of the BDs in use */
+ if (Ring->FreeCnt != Ring->AllCnt) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Modify the template by setting DMASR.DMADONE */
+ Save = XDmaV3_mReadBd(SrcBdPtr, XDMAV3_BD_DMASR_OFFSET);
+ Save |= XDMAV3_DMASR_DMADONE_MASK;
+ XDmaV3_mWriteBd(SrcBdPtr, XDMAV3_BD_DMASR_OFFSET, Save);
+
+ /* Starting from the top of the ring, save BD.Next, overwrite the entire BD
+ * with the template, then restore BD.Next
+ */
+ for (i = 0, CurBd = Ring->BaseAddr;
+ i < Ring->AllCnt; i++, CurBd += Ring->Separation) {
+ Save = XDmaV3_mReadBd(CurBd, XDMAV3_BD_BDA_OFFSET);
+ memcpy((void *) CurBd, SrcBdPtr, sizeof(XDmaBdV3));
+ XDmaV3_mWriteBd(CurBd, XDMAV3_BD_BDA_OFFSET, Save);
+ XDMAV3_CACHE_FLUSH(CurBd);
+ }
+
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************/
+/**
+ * Reserve locations in the BD list. The set of returned BDs may be modified in
+ * preparation for future DMA transaction(s). Once the BDs are ready to be
+ * submitted to HW, the user must call XDmaV3_SgBdToHw() in the same order which
+ * they were allocated here. Example:
+ *
+ * <pre>
+ * NumBd = 2;
+ * Status = XDmaV3_SgBdAlloc(MyDmaInstPtr, NumBd, &MyBdSet);
+ *
+ * if (Status != XST_SUCCESS)
+ * {
+ * // Not enough BDs available for the request
+ * }
+ *
+ * CurBd = MyBdSet;
+ * for (i=0; i<NumBd; i++)
+ * {
+ * // Prepare CurBd.....
+ *
+ * // Onto next BD
+ * CurBd = XDmaV3_mSgBdNext(MyDmaInstPtr, CurBd);
+ * }
+ *
+ * // Give list to HW
+ * Status = XDmaV3_SgBdToHw(MyDmaInstPtr, NumBd, MyBdSet);
+ * </pre>
+ *
+ * A more advanced use of this function may allocate multiple sets of BDs.
+ * They must be allocated and given to HW in the correct sequence:
+ * <pre>
+ * // Legal
+ * XDmaV3_SgBdAlloc(MyDmaInstPtr, NumBd1, &MySet1);
+ * XDmaV3_SgBdToHw(MyDmaInstPtr, NumBd1, MySet1);
+ *
+ * // Legal
+ * XDmaV3_SgBdAlloc(MyDmaInstPtr, NumBd1, &MySet1);
+ * XDmaV3_SgBdAlloc(MyDmaInstPtr, NumBd2, &MySet2);
+ * XDmaV3_SgBdToHw(MyDmaInstPtr, NumBd1, MySet1);
+ * XDmaV3_SgBdToHw(MyDmaInstPtr, NumBd2, MySet2);
+ *
+ * // Not legal
+ * XDmaV3_SgBdAlloc(MyDmaInstPtr, NumBd1, &MySet1);
+ * XDmaV3_SgBdAlloc(MyDmaInstPtr, NumBd2, &MySet2);
+ * XDmaV3_SgBdToHw(MyDmaInstPtr, NumBd2, MySet2);
+ * XDmaV3_SgBdToHw(MyDmaInstPtr, NumBd1, MySet1);
+ * </pre>
+ *
+ * Use the API defined in xdmabdv3.h to modify individual BDs. Traversal of the
+ * BD set can be done using XDmaV3_mSgBdNext() and XDmaV3_mSgBdPrev().
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param NumBd is the number of BDs to allocate
+ * @param BdSetPtr is an output parameter, it points to the first BD available
+ * for modification.
+ *
+ * @return
+ * - XST_SUCCESS if the requested number of BDs was returned in the BdSetPtr
+ * parameter.
+ * - XST_FAILURE if there were not enough free BDs to satisfy the request.
+ *
+ * @note This function should not be preempted by another XDmaV3 function call
+ * that modifies the BD space. It is the caller's responsibility to
+ * provide a mutual exclusion mechanism.
+ *
+ * @note Do not modify more BDs than the number requested with the NumBd
+ * parameter. Doing so will lead to data corruption and system
+ * instability.
+ *
+ ******************************************************************************/
+int XDmaV3_SgBdAlloc(XDmaV3 * InstancePtr, unsigned NumBd, XDmaBdV3 ** BdSetPtr)
+{
+ XDmaV3_BdRing *Ring = &InstancePtr->BdRing;
+
+ /* Enough free BDs available for the request? */
+ if (Ring->FreeCnt < NumBd) {
+ return (XST_FAILURE);
+ }
+
+ /* Set the return argument and move FreeHead forward */
+ *BdSetPtr = Ring->FreeHead;
+ XDMAV3_RING_SEEKAHEAD(Ring, Ring->FreeHead, NumBd);
+ Ring->FreeCnt -= NumBd;
+ Ring->PreCnt += NumBd;
+ return (XST_SUCCESS);
+}
+
+/******************************************************************************/
+/**
+ * Fully or partially undo an XDmaV3_SgBdAlloc() operation. Use this function
+ * if all the BDs allocated by XDmaV3_SgBdAlloc() could not be transferred to
+ * HW with XDmaV3_SgBdToHw().
+ *
+ * This function helps out in situations when an unrelated error occurs after
+ * BDs have been allocated but before they have been given to HW. An example of
+ * this type of error would be an OS running out of resources.
+ *
+ * This function is not the same as XDmaV3_SgBdFree(). The Free function returns
+ * BDs to the free list after they have been processed by HW, while UnAlloc
+ * returns them before being processed by HW.
+ *
+ * There are two scenarios where this function can be used. Full UnAlloc or
+ * Partial UnAlloc. A Full UnAlloc means all the BDs Alloc'd will be returned:
+ *
+ * <pre>
+ * Status = XDmaV3_SgBdAlloc(Inst, 10, &BdPtr);
+ * .
+ * .
+ * if (Error)
+ * {
+ * Status = XDmaV3_SgBdUnAlloc(Inst, 10, &BdPtr);
+ * }
+ * </pre>
+ *
+ * A partial UnAlloc means some of the BDs Alloc'd will be returned:
+ *
+ * <pre>
+ * Status = XDmaV3_SgBdAlloc(Inst, 10, &BdPtr);
+ * BdsLeft = 10;
+ * CurBdPtr = BdPtr;
+ *
+ * while (BdsLeft)
+ * {
+ * if (Error)
+ * {
+ * Status = XDmaV3_SgBdUnAlloc(Inst, BdsLeft, CurBdPtr);
+ * }
+ *
+ * CurBdPtr = XDmaV3_SgBdNext(Inst, CurBdPtr);
+ * BdsLeft--;
+ * }
+ * </pre>
+ *
+ * A partial UnAlloc must include the last BD in the list that was Alloc'd.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param NumBd is the number of BDs to allocate
+ * @param BdSetPtr is an output parameter, it points to the first BD available
+ * for modification.
+ *
+ * @return
+ * - XST_SUCCESS if the BDs were unallocated.
+ * - XST_FAILURE if NumBd parameter was greater that the number of BDs in the
+ * preprocessing state.
+ *
+ * @note This function should not be preempted by another XDmaV3 function call
+ * that modifies the BD space. It is the caller's responsibility to
+ * provide a mutual exclusion mechanism.
+ *
+ ******************************************************************************/
+int XDmaV3_SgBdUnAlloc(XDmaV3 * InstancePtr, unsigned NumBd,
+ XDmaBdV3 * BdSetPtr)
+{
+ XDmaV3_BdRing *Ring = &InstancePtr->BdRing;
+
+ /* Enough BDs in the free state for the request? */
+ if (Ring->PreCnt < NumBd) {
+ return (XST_FAILURE);
+ }
+
+ /* Set the return argument and move FreeHead backward */
+ XDMAV3_RING_SEEKBACK(Ring, Ring->FreeHead, NumBd);
+ Ring->FreeCnt += NumBd;
+ Ring->PreCnt -= NumBd;
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************/
+/**
+ * Enqueue a set of BDs to HW that were previously allocated by
+ * XDmaV3_SgBdAlloc(). Once this function returns, the argument BD set goes
+ * under HW control. Any changes made to these BDs after this point will corrupt
+ * the BD list leading to data corruption and system instability.
+ *
+ * The set will be rejected if the last BD of the set does not mark the end of
+ * a packet (see XDmaBdV3_mSetLast()).
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param NumBd is the number of BDs in the set.
+ * @param BdSetPtr is the first BD of the set to commit to HW.
+ *
+ * @return
+ * - XST_SUCCESS if the set of BDs was accepted and enqueued to HW.
+ * - XST_FAILURE if the set of BDs was rejected because the last BD of the set
+ * did not have its "last" bit set.
+ * - XST_DMA_SG_LIST_ERROR if this function was called out of sequence with
+ * XDmaV3_SgBdAlloc().
+ *
+ * @note This function should not be preempted by another XDmaV3 function call
+ * that modifies the BD space. It is the caller's responsibility to
+ * provide a mutual exclusion mechanism.
+ *
+ ******************************************************************************/
+int XDmaV3_SgBdToHw(XDmaV3 * InstancePtr, unsigned NumBd, XDmaBdV3 * BdSetPtr)
+{
+ XDmaV3_BdRing *Ring = &InstancePtr->BdRing;
+ XDmaBdV3 *LastBdPtr;
+ unsigned i;
+ u32 Dmacr;
+ u32 Swcr;
+
+ /* Make sure we are in sync with XDmaV3_SgBdAlloc() */
+ if ((Ring->PreCnt < NumBd) || (Ring->PreHead != BdSetPtr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* For all BDs in this set (except the last one)
+ * - Clear DMASR except for DMASR.DMABSY
+ * - Clear DMACR.SGS
+ *
+ * For the last BD in this set
+ * - Clear DMASR except for DMASR.DMABSY
+ * - Set DMACR.SGS (marks the end of the new active list)
+ */
+ LastBdPtr = BdSetPtr;
+ for (i = 1; i < NumBd; i++) {
+ XDmaV3_mWriteBd(LastBdPtr, XDMAV3_BD_DMASR_OFFSET,
+ XDMAV3_DMASR_DMABSY_MASK);
+
+ Dmacr = XDmaV3_mReadBd(LastBdPtr, XDMAV3_BD_DMACR_OFFSET);
+ XDmaV3_mWriteBd(LastBdPtr, XDMAV3_BD_DMACR_OFFSET, /* DMACR.SGS = 0 */
+ Dmacr & ~XDMAV3_DMACR_SGS_MASK);
+ XDMAV3_CACHE_FLUSH(LastBdPtr);
+
+ LastBdPtr = XDmaV3_mSgBdNext(InstancePtr, LastBdPtr);
+ }
+
+ /* Last BD */
+ XDmaV3_mWriteBd(LastBdPtr, XDMAV3_BD_DMASR_OFFSET,
+ XDMAV3_DMASR_DMABSY_MASK);
+
+ Dmacr = XDmaV3_mReadBd(LastBdPtr, XDMAV3_BD_DMACR_OFFSET);
+ XDmaV3_mWriteBd(LastBdPtr, XDMAV3_BD_DMACR_OFFSET, /* DMACR.SGS = 1 */
+ Dmacr | XDMAV3_DMACR_SGS_MASK);
+ XDMAV3_CACHE_FLUSH(LastBdPtr);
+
+ /* The last BD should have DMACR.LAST set */
+ if (!(Dmacr & XDMAV3_DMACR_LAST_MASK)) {
+ return (XST_FAILURE);
+ }
+
+ /* This set has completed pre-processing, adjust ring pointers & counters */
+ XDMAV3_RING_SEEKAHEAD(Ring, Ring->PreHead, NumBd);
+ Ring->PreCnt -= NumBd;
+
+ /* If it is running, tell the DMA engine to pause */
+ Swcr = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET);
+ if (Ring->RunState == XST_DMA_SG_IS_STARTED) {
+ Swcr |= XDMAV3_SWCR_SGD_MASK;
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET,
+ Swcr);
+ }
+
+ /* Transfer control of the BDs to the DMA engine. There are two cases to
+ * consider:
+ *
+ * 1) No currently active list.
+ * In this case, just resume the engine.
+ *
+ * 2) Active list.
+ * In this case, the last BD in the current list should have DMACR.SGS
+ * cleared so the engine will never stop there. The new stopping
+ * point is at the end of the extended list. Once the SGS bits are
+ * changed, resume the engine.
+ */
+ if (Ring->HwCnt != 0) {
+ /* Handle case 2 */
+ Dmacr = XDmaV3_mReadBd(Ring->HwTail, XDMAV3_BD_DMACR_OFFSET);
+ Dmacr &= ~XDMAV3_DMACR_SGS_MASK;
+ XDmaV3_mWriteBd(Ring->HwTail, XDMAV3_BD_DMACR_OFFSET, Dmacr);
+ XDMAV3_CACHE_FLUSH(Ring->HwTail);
+ }
+
+ /* Adjust Hw pointers and counters. XDMAV3_RING_SEEKAHEAD could be used to
+ * advance HwTail, but it will always evaluate to LastBdPtr
+ */
+ Ring->HwTail = LastBdPtr;
+ Ring->HwCnt += NumBd;
+
+ /* If it was enabled, tell the engine to resume */
+ if (Ring->RunState == XST_DMA_SG_IS_STARTED) {
+ Swcr &= ~XDMAV3_SWCR_SGD_MASK;
+ Swcr |= XDMAV3_SWCR_SGE_MASK;
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_SWCR_OFFSET,
+ Swcr);
+ }
+
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************/
+/**
+ * Returns a set of BD(s) that have been processed by HW. The returned BDs may
+ * be examined to determine the outcome of the DMA transaction(s). Once the BDs
+ * have been examined, the user must call XDmaV3_SgBdFree() in the same order
+ * which they were retrieved here. Example:
+ *
+ * <pre>
+ * MaxBd = 0xFFFFFFFF; // Ensure we get all that are ready
+ *
+ * NumBd = XDmaV3_SgBdFromHw(MyDmaInstPtr, MaxBd, &MyBdSet);
+ *
+ * if (NumBd == 0)
+ * {
+ * // HW has nothing ready for us yet
+ * }
+ *
+ * CurBd = MyBdSet;
+ * for (i=0; i<NumBd; i++)
+ * {
+ * // Examine CurBd for post processing.....
+ *
+ * // Onto next BD
+ * CurBd = XDmaV3_mSgBdNext(MyDmaInstPtr, CurBd);
+ * }
+ *
+ * XDmaV3_SgBdFree(MyDmaInstPtr, NumBd, MyBdSet); // Return the list
+ * }
+ * </pre>
+ *
+ * A more advanced use of this function may allocate multiple sets of BDs.
+ * They must be retrieved from HW and freed in the correct sequence:
+ * <pre>
+ * // Legal
+ * XDmaV3_SgBdFromHw(MyDmaInstPtr, NumBd1, &MySet1);
+ * XDmaV3_SgBdFree(MyDmaInstPtr, NumBd1, MySet1);
+ *
+ * // Legal
+ * XDmaV3_SgBdFromHw(MyDmaInstPtr, NumBd1, &MySet1);
+ * XDmaV3_SgBdFromHw(MyDmaInstPtr, NumBd2, &MySet2);
+ * XDmaV3_SgBdFree(MyDmaInstPtr, NumBd1, MySet1);
+ * XDmaV3_SgBdFree(MyDmaInstPtr, NumBd2, MySet2);
+ *
+ * // Not legal
+ * XDmaV3_SgBdFromHw(MyDmaInstPtr, NumBd1, &MySet1);
+ * XDmaV3_SgBdFromHw(MyDmaInstPtr, NumBd2, &MySet2);
+ * XDmaV3_SgBdFree(MyDmaInstPtr, NumBd2, MySet2);
+ * XDmaV3_SgBdFree(MyDmaInstPtr, NumBd1, MySet1);
+ * </pre>
+ *
+ * If HW has only partially completed a packet spanning multiple BDs, then none
+ * of the BDs for that packet will be included in the results.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param BdLimit is the maximum number of BDs to return in the set.
+ * @param BdSetPtr is an output parameter, it points to the first BD available
+ * for examination.
+ *
+ * @return
+ * The number of BDs processed by HW. A value of 0 indicates that no data
+ * is available. No more than BdLimit BDs will be returned.
+ *
+ * @note Treat BDs returned by this function as read-only.
+ *
+ * @note This function should not be preempted by another XDmaV3 function call
+ * that modifies the BD space. It is the caller's responsibility to
+ * provide a mutual exclusion mechanism.
+ *
+ ******************************************************************************/
+unsigned XDmaV3_SgBdFromHw(XDmaV3 * InstancePtr, unsigned BdLimit,
+ XDmaBdV3 ** BdSetPtr)
+{
+ XDmaV3_BdRing *Ring = &InstancePtr->BdRing;
+ XDmaBdV3 *CurBd;
+ unsigned BdCount;
+ unsigned BdPartialCount;
+ u32 Dmasr;
+
+ CurBd = Ring->HwHead;
+ BdCount = 0;
+ BdPartialCount = 0;
+
+ /* If no BDs in work group, then there's nothing to search */
+ if (Ring->HwCnt == 0) {
+ *BdSetPtr = NULL;
+ return (0);
+ }
+
+ /* Starting at HwHead, keep moving forward in the list until:
+ * - A BD is encountered with its DMASR.DMABSY bit set which means HW has
+ * not completed processing of that BD.
+ * - Ring->HwTail is reached
+ * - The number of requested BDs has been processed
+ */
+ while (BdCount < BdLimit) {
+ /* Read the status */
+ XDMAV3_CACHE_INVALIDATE(CurBd);
+ Dmasr = XDmaV3_mReadBd(CurBd, XDMAV3_BD_DMASR_OFFSET);
+
+ /* If the HW still hasn't processed this BD then we are done */
+ if (Dmasr & XDMAV3_DMASR_DMABSY_MASK) {
+ break;
+ }
+
+ BdCount++;
+
+ /* HW has processed this BD so check the "last" bit. If it is clear,
+ * then there are more BDs for the current packet. Keep a count of
+ * these partial packet BDs.
+ */
+ if (Dmasr & XDMAV3_DMASR_LAST_MASK) {
+ BdPartialCount = 0;
+ }
+ else {
+ BdPartialCount++;
+ }
+
+ /* Reached the end of the work group */
+ if (CurBd == Ring->HwTail) {
+ break;
+ }
+
+ /* Move on to next BD in work group */
+ CurBd = XDmaV3_mSgBdNext(InstancePtr, CurBd);
+ }
+
+ /* Subtract off any partial packet BDs found */
+ BdCount -= BdPartialCount;
+
+ /* If BdCount is non-zero then BDs were found to return. Set return
+ * parameters, update pointers and counters, return success
+ */
+ if (BdCount) {
+ *BdSetPtr = Ring->HwHead;
+ Ring->HwCnt -= BdCount;
+ Ring->PostCnt += BdCount;
+ XDMAV3_RING_SEEKAHEAD(Ring, Ring->HwHead, BdCount);
+ return (BdCount);
+ }
+ else {
+ *BdSetPtr = NULL;
+ return (0);
+ }
+}
+
+
+/******************************************************************************/
+/**
+ * Frees a set of BDs that had been previously retrieved with XDmaV3_SgBdFromHw().
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param NumBd is the number of BDs to free.
+ * @param BdSetPtr is the head of a list of BDs returned by XDmaV3_SgBdFromHw().
+ *
+ * @return
+ * - XST_SUCCESS if the set of BDs was freed.
+ * - XST_DMA_SG_LIST_ERROR if this function was called out of sequence with
+ * XDmaV3_SgBdFromHw().
+ *
+ * @note This function should not be preempted by another XDmaV3 function call
+ * that modifies the BD space. It is the caller's responsibility to
+ * provide a mutual exclusion mechanism.
+ *
+ ******************************************************************************/
+int XDmaV3_SgBdFree(XDmaV3 * InstancePtr, unsigned NumBd, XDmaBdV3 * BdSetPtr)
+{
+ XDmaV3_BdRing *Ring = &InstancePtr->BdRing;
+
+ /* Make sure we are in sync with XDmaV3_SgBdFromHw() */
+ if ((Ring->PostCnt < NumBd) || (Ring->PostHead != BdSetPtr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Update pointers and counters */
+ Ring->FreeCnt += NumBd;
+ Ring->PostCnt -= NumBd;
+ XDMAV3_RING_SEEKAHEAD(Ring, Ring->PostHead, NumBd);
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************/
+/**
+ * Check the internal data structures of the BD ring for the provided channel.
+ * The following checks are made:
+ *
+ * - Is the BD ring linked correctly in physical address space.
+ * - Do the internal pointers point to BDs in the ring.
+ * - Do the internal counters add up.
+ *
+ * The channel should be stopped prior to calling this function.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ *
+ * @return
+ * - XST_SUCCESS if the set of BDs was freed.
+ * - XST_DMA_SG_NO_LIST if the list has not been created.
+ * - XST_IS_STARTED if the channel is not stopped.
+ * - XST_DMA_SG_LIST_ERROR if a problem is found with the internal data
+ * structures. If this value is returned, the channel should be reset to
+ * avoid data corruption or system instability.
+ *
+ * @note This function should not be preempted by another XDmaV3 function call
+ * that modifies the BD space. It is the caller's responsibility to
+ * provide a mutual exclusion mechanism.
+ *
+ ******************************************************************************/
+int XDmaV3_SgCheck(XDmaV3 * InstancePtr)
+{
+ XDmaV3_BdRing *RingPtr = &InstancePtr->BdRing;
+ u32 AddrV, AddrP;
+ unsigned i;
+
+ /* Is the list created */
+ if (RingPtr->AllCnt == 0) {
+ return (XST_DMA_SG_NO_LIST);
+ }
+
+ /* Can't check if channel is running */
+ if (RingPtr->RunState == XST_DMA_SG_IS_STARTED) {
+ return (XST_IS_STARTED);
+ }
+
+ /* RunState doesn't make sense */
+ else if (RingPtr->RunState != XST_DMA_SG_IS_STOPPED) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Verify internal pointers point to correct memory space */
+ AddrV = (u32) RingPtr->FreeHead;
+ if ((AddrV < RingPtr->BaseAddr) || (AddrV > RingPtr->HighAddr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ AddrV = (u32) RingPtr->PreHead;
+ if ((AddrV < RingPtr->BaseAddr) || (AddrV > RingPtr->HighAddr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ AddrV = (u32) RingPtr->HwHead;
+ if ((AddrV < RingPtr->BaseAddr) || (AddrV > RingPtr->HighAddr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ AddrV = (u32) RingPtr->HwTail;
+ if ((AddrV < RingPtr->BaseAddr) || (AddrV > RingPtr->HighAddr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ AddrV = (u32) RingPtr->PostHead;
+ if ((AddrV < RingPtr->BaseAddr) || (AddrV > RingPtr->HighAddr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Verify internal counters add up */
+ if ((RingPtr->HwCnt + RingPtr->PreCnt + RingPtr->FreeCnt +
+ RingPtr->PostCnt) != RingPtr->AllCnt) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Verify BDs are linked correctly */
+ AddrV = RingPtr->BaseAddr;
+ AddrP = RingPtr->PhysBaseAddr + RingPtr->Separation;
+ for (i = 1; i < RingPtr->AllCnt; i++) {
+ /* Check BDA for this BD. It should point to next physical addr */
+ if (XDmaV3_mReadBd(AddrV, XDMAV3_BD_BDA_OFFSET) != AddrP) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Move on to next BD */
+ AddrV += RingPtr->Separation;
+ AddrP += RingPtr->Separation;
+ }
+
+ /* Last BD should point back to the beginning of ring */
+ if (XDmaV3_mReadBd(AddrV, XDMAV3_BD_BDA_OFFSET) !=
+ RingPtr->PhysBaseAddr) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* No problems found */
+ return (XST_SUCCESS);
+}
+
+
+/******************************************************************************
+ * Verify given channel is of the SGDMA variety.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ *
+ * @return
+ * - 1 if channel is of type SGDMA
+ * - 0 if channel is not of type SGDMA
+ ******************************************************************************/
+static int IsSgDmaChannel(XDmaV3 * InstancePtr)
+{
+ u32 Dmasr;
+
+ Dmasr = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_DMASR_OFFSET);
+ if (Dmasr & (XDMAV3_DMASR_DMACNFG_SGDMARX_MASK |
+ XDMAV3_DMASR_DMACNFG_SGDMATX_MASK |
+ XDMAV3_DMASR_DMACNFG_SSGDMA_MASK)) {
+ return (1);
+ }
+ else {
+ return (0);
+ }
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2006 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xdmav3_simple.c
+*
+* This file implements Simple DMA related functions. For more
+* information on this driver, see xdmav3.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 3.00a rmm 03/11/06 First release
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xdmav3.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+* Initiate a simple DMA transfer. The BD argument sets the parameters of the
+* transfer. Since the BD is also used for SG DMA transfers, some fields of the
+* BD will be ignored. The following BD macros will have no effect on the
+* transfer:
+*
+* - XDmaBdV3_mSetLast()
+* - XDmaBdV3_mClearLast()
+* - XDmaBdV3_mSetBdPage()
+*
+* To determine when the transfer has completed, the user can poll the device
+* with XDmaV3_mGetStatus() and test the XDMAV3_DMASR_DMABSY_MASK bit, or wait for
+* an interrupt. When the DMA operation has completed, the outcome of the
+* transfer can be retrieved by calling XDmaV3_mGetStatus() and testing for DMA
+* bus errors bits.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param BdPtr sets the parameters of the transfer.
+*
+* @return
+* - XST_SUCCESS if the transfer was initated
+* - XST_DEVICE_BUSY if a transfer is already in progress
+*
+******************************************************************************/
+int XDmaV3_SimpleTransfer(XDmaV3 * InstancePtr, XDmaBdV3 * BdPtr)
+{
+ u32 Dmasr;
+
+ /* Is the channel busy */
+ Dmasr = XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_DMASR_OFFSET);
+ if (Dmasr & (XDMAV3_DMASR_DMABSY_MASK | XDMAV3_DMASR_SGBSY_MASK)) {
+ return (XST_DEVICE_BUSY);
+ }
+
+ /* Copy BdPtr fields into the appropriate HW registers */
+
+ /* DMACR: SGS bit is set always. This is done in case the transfer
+ * occurs on a SGDMA channel and will prevent the HW from fetching the
+ * next BD.
+ */
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_DMACR_OFFSET,
+ XDmaV3_mReadBd(BdPtr, XDMAV3_BD_DMACR_OFFSET)
+ | XDMAV3_DMACR_SGS_MASK);
+
+ /* MSBA */
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_MSBA_OFFSET,
+ XDmaV3_mReadBd(BdPtr, XDMAV3_BD_MSBA_OFFSET));
+
+ /* LSBA */
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_LSBA_OFFSET,
+ XDmaV3_mReadBd(BdPtr, XDMAV3_BD_LSBA_OFFSET));
+
+ /* LENGTH: Writing this register starts HW */
+ XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_LENGTH_OFFSET,
+ XDmaV3_mReadBd(BdPtr, XDMAV3_BD_LENGTH_OFFSET));
+
+ return (XST_SUCCESS);
+}
--- /dev/null
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xenv_linux.h
+*
+* Defines common services specified by xenv.h.
+*
+* @note
+* This file is not intended to be included directly by driver code.
+* Instead, the generic xenv.h file is intended to be included by driver
+* code.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a wgr 02/28/07 Added cache handling macros.
+* 1.00a wgr 02/27/07 Simplified code. Deprecated old-style macro names.
+* 1.00a xd 11/03/04 Improved support for doxygen.
+* 1.00a ch 10/24/02 First release
+* 1.10a wgr 03/22/07 Converted to new coding style.
+* </pre>
+*
+*
+******************************************************************************/
+
+#ifndef XENV_LINUX_H
+#define XENV_LINUX_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/***************************** Include Files *********************************/
+
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+
+
+/******************************************************************************
+ *
+ * MEMCPY / MEMSET related macros.
+ *
+ * Those macros are defined to catch legacy code in Xilinx drivers. The
+ * XENV_MEM_COPY and XENV_MEM_FILL macros were used in early Xilinx driver
+ * code. They are being replaced by memcpy() and memset() function calls. These
+ * macros are defined to catch any remaining occurences of those macros.
+ *
+ ******************************************************************************/
+
+/*****************************************************************************/
+/**
+ *
+ * Copies a non-overlapping block of memory.
+ *
+ * @param DestPtr
+ * Destination address to copy data to.
+ *
+ * @param SrcPtr
+ * Source address to copy data from.
+ *
+ * @param Bytes
+ * Number of bytes to copy.
+ *
+ * @return None.
+ *
+ *****************************************************************************/
+
+#define XENV_MEM_COPY(DestPtr, SrcPtr, Bytes) \
+ memcpy(DestPtr, SrcPtr, Bytes)
+/* do_not_use_XENV_MEM_COPY_use_memcpy_instead */
+
+
+/*****************************************************************************/
+/**
+ *
+ * Fills an area of memory with constant data.
+ *
+ * @param DestPtr
+ * Destination address to copy data to.
+ *
+ * @param Data
+ * Value to set.
+ *
+ * @param Bytes
+ * Number of bytes to copy.
+ *
+ * @return None.
+ *
+ *****************************************************************************/
+
+#define XENV_MEM_FILL(DestPtr, Data, Bytes) \
+ memset(DestPtr, Data, Bytes)
+/* do_not_use_XENV_MEM_FILL_use_memset_instead */
+
+
+/******************************************************************************
+ *
+ * TIME related macros
+ *
+ ******************************************************************************/
+/**
+ * A structure that contains a time stamp used by other time stamp macros
+ * defined below. This structure is processor dependent.
+ */
+typedef int XENV_TIME_STAMP;
+
+/*****************************************************************************/
+/**
+ *
+ * Time is derived from the 64 bit PPC timebase register
+ *
+ * @param StampPtr is the storage for the retrieved time stamp.
+ *
+ * @return None.
+ *
+ * @note
+ *
+ * Signature: void XENV_TIME_STAMP_GET(XTIME_STAMP *StampPtr)
+ * <br><br>
+ * This macro must be implemented by the user.
+ *
+ *****************************************************************************/
+#define XENV_TIME_STAMP_GET(StampPtr)
+
+/*****************************************************************************/
+/**
+ *
+ * This macro is not yet implemented and always returns 0.
+ *
+ * @param Stamp1Ptr is the first sampled time stamp.
+ * @param Stamp2Ptr is the second sampled time stamp.
+ *
+ * @return 0
+ *
+ * @note
+ *
+ * This macro must be implemented by the user.
+ *
+ *****************************************************************************/
+#define XENV_TIME_STAMP_DELTA_US(Stamp1Ptr, Stamp2Ptr) (0)
+
+/*****************************************************************************/
+/**
+ *
+ * This macro is not yet implemented and always returns 0.
+ *
+ * @param Stamp1Ptr is the first sampled time stamp.
+ * @param Stamp2Ptr is the second sampled time stamp.
+ *
+ * @return 0
+ *
+ * @note
+ *
+ * This macro must be implemented by the user
+ *
+ *****************************************************************************/
+#define XENV_TIME_STAMP_DELTA_MS(Stamp1Ptr, Stamp2Ptr) (0)
+
+/*****************************************************************************/
+/**
+ *
+ * Delay the specified number of microseconds.
+ *
+ * @param delay
+ * Number of microseconds to delay.
+ *
+ * @return None.
+ *
+ * @note XENV_USLEEP is deprecated. Use udelay() instead.
+ *
+ *****************************************************************************/
+
+#define XENV_USLEEP(delay) udelay(delay)
+/* do_not_use_XENV_MEM_COPY_use_memcpy_instead */
+
+
+/******************************************************************************
+ *
+ * CACHE handling macros / mappings
+ *
+ * The implementation of the cache handling functions can be found in
+ * arch/microblaze.
+ *
+ * These #defines are simple mappings to the Linux API.
+ *
+ * The underlying Linux implementation will take care of taking the right
+ * actions depending on the configuration of the MicroBlaze processor in the
+ * system.
+ *
+ ******************************************************************************/
+
+#define XCACHE_ENABLE_DCACHE() __enable_dcache()
+#define XCACHE_DISABLE_DCACHE() __disable_dcache()
+#define XCACHE_ENABLE_ICACHE() __enable_icache()
+#define XCACHE_DISABLE_ICACHE() __disable_icache()
+
+#define XCACHE_INVALIDATE_DCACHE_RANGE(Addr, Len) invalidate_dcache_range((u32)(Addr), (u32)((Addr)+(Len)))
+#define XCACHE_FLUSH_DCACHE_RANGE(Addr, Len) flush_dcache_range((u32)(Addr), (u32)((Addr)+(Len)))
+
+#define XCACHE_INVALIDATE_ICACHE_RANGE(Addr, Len) "XCACHE_INVALIDATE_ICACHE_RANGE unsupported"
+#define XCACHE_FLUSH_ICACHE_RANGE(Addr, Len) flush_icache_range(Addr, Len)
+
+#define XCACHE_ENABLE_CACHE() \
+ { XCACHE_ENABLE_DCACHE(); XCACHE_ENABLE_ICACHE(); }
+
+#define XCACHE_DISABLE_CACHE() \
+ { XCACHE_DISABLE_DCACHE(); XCACHE_DISABLE_ICACHE(); }
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
+
--- /dev/null
+/* $Id: xio.c,v 1.5 2007/07/24 22:01:35 xduan Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xio.c
+*
+* Contains I/O functions for memory-mapped or non-memory-mapped I/O
+* architectures. These functions encapsulate PowerPC architecture-specific
+* I/O requirements.
+*
+* @note
+*
+* This file contains architecture-dependent code.
+*
+* The order of the SYNCHRONIZE_IO and the read or write operation is
+* important. For the Read operation, all I/O needs to complete prior
+* to the desired read to insure valid data from the address. The PPC
+* is a weakly ordered I/O model and reads can and will occur prior
+* to writes and the SYNCHRONIZE_IO ensures that any writes occur prior
+* to the read. For the Write operation the SYNCHRONIZE_IO occurs
+* after the desired write to ensure that the address is updated with
+* the new value prior to any subsequent read.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- --------------------------------------------------------
+* 1.00a ecm 10/18/05 initial release
+* needs to be updated to replace eieio with mbar when
+* compilers support this mnemonic.
+*
+* 1.00a ecm 01/24/07 update for new coding standard.
+* 1.10a xd 07/24/07 Corrected the format in asm functions in __DCC__ mode.
+* </pre>
+******************************************************************************/
+
+
+/***************************** Include Files *********************************/
+#include "xio.h"
+#include "xbasic_types.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Function Prototypes ******************************/
+/*****************************************************************************/
+/**
+*
+* Performs a 16-bit endian converion.
+*
+* @param Source contains the value to be converted.
+* @param DestPtr contains a pointer to the location to put the
+* converted value.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void OutSwap16(u16 Source, u16 *DestPtr)
+{
+ *DestPtr = (u16) (((Source & 0xFF00) >> 8) | ((Source & 0x00FF) << 8));
+}
+
+u16 InSwap16(u16 *DestPtr)
+{
+ u16 Source = *DestPtr;
+ return (u16) (((Source & 0xFF00) >> 8) | ((Source & 0x00FF) << 8));
+}
+/*****************************************************************************/
+/**
+*
+* Performs a 32-bit endian converion.
+*
+* @param Source contains the value to be converted.
+* @param DestPtr contains a pointer to the location to put the
+* converted value.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void OutSwap32(u32 Source, u32 *DestPtr)
+{
+
+ /* get each of the half words from the 32 bit word */
+
+ u16 LoWord = (u16) (Source & 0x0000FFFF);
+ u16 HiWord = (u16) ((Source & 0xFFFF0000) >> 16);
+
+ /* byte swap each of the 16 bit half words */
+
+ LoWord = (((LoWord & 0xFF00) >> 8) | ((LoWord & 0x00FF) << 8));
+ HiWord = (((HiWord & 0xFF00) >> 8) | ((HiWord & 0x00FF) << 8));
+
+ /* swap the half words before returning the value */
+
+ *DestPtr = (u32) ((LoWord << 16) | HiWord);
+}
+
+u32 InSwap32(u32 *DestPtr)
+{
+ /* get each of the half words from the 32 bit word */
+ u32 Source = *DestPtr;
+
+ u16 LoWord = (u16) (Source & 0x0000FFFF);
+ u16 HiWord = (u16) ((Source & 0xFFFF0000) >> 16);
+
+ /* byte swap each of the 16 bit half words */
+
+ LoWord = (((LoWord & 0xFF00) >> 8) | ((LoWord & 0x00FF) << 8));
+ HiWord = (((HiWord & 0xFF00) >> 8) | ((HiWord & 0x00FF) << 8));
+
+ /* swap the half words before returning the value */
+
+ return (u32) ((LoWord << 16) | HiWord);
+}
+
+/*****************************************************************************/
+/**
+*
+* Performs an input operation for an 8-bit memory location by reading from the
+* specified address and returning the value read from that address.
+*
+* @param InAddress contains the address to perform the input operation at.
+*
+* @return
+*
+* The value read from the specified input address.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+ u8 XIo_In8(XIo_Address InAddress)
+{
+ /* read the contents of the I/O location and then synchronize the I/O
+ * such that the I/O operation completes before proceeding on
+ */
+
+#if defined CONFIG_PPC
+
+ u8 IoContents;
+ __asm__ volatile ("eieio; lbz %0,0(%1)":"=r" (IoContents):"b"
+ (InAddress));
+ return IoContents;
+
+#else
+
+ SYNCHRONIZE_IO;
+ return *(u8 *) InAddress;
+
+#endif
+
+}
+
+/*****************************************************************************/
+/**
+*
+* Performs an input operation for a 16-bit memory location by reading from the
+* specified address and returning the value read from that address.
+*
+* @param InAddress contains the address to perform the input operation at.
+*
+* @return
+*
+* The value read from the specified input address.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u16 XIo_In16(XIo_Address InAddress)
+{
+ /* read the contents of the I/O location and then synchronize the I/O
+ * such that the I/O operation completes before proceeding on
+ */
+
+#if defined CONFIG_PPC
+
+ u16 IoContents;
+ __asm__ volatile ("eieio; lhz %0,0(%1)":"=r" (IoContents):"b"
+ (InAddress));
+ return IoContents;
+
+#else
+
+ SYNCHRONIZE_IO;
+ return *(u16 *) InAddress;
+
+#endif
+}
+
+/*****************************************************************************/
+/**
+*
+* Performs an input operation for a 32-bit memory location by reading from the
+* specified address and returning the value read from that address.
+*
+* @param InAddress contains the address to perform the input operation at.
+*
+* @return
+*
+* The value read from the specified input address.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XIo_In32(XIo_Address InAddress)
+{
+ /* read the contents of the I/O location and then synchronize the I/O
+ * such that the I/O operation completes before proceeding on
+ */
+
+#ifdef CONFIG_PPC
+
+ u32 IoContents;
+ __asm__ volatile ("eieio; lwz %0,0(%1)":"=r" (IoContents):"b"
+ (InAddress));
+ return IoContents;
+
+#else
+
+ SYNCHRONIZE_IO;
+ return *(u32 *) InAddress;
+
+#endif
+
+}
+
+/*****************************************************************************/
+/**
+*
+* Performs an input operation for a 16-bit memory location by reading from the
+* specified address and returning the byte-swapped value read from that
+* address.
+*
+* @param InAddress contains the address to perform the input operation at.
+*
+* @return
+*
+* The byte-swapped value read from the specified input address.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u16 XIo_InSwap16(XIo_Address InAddress)
+{
+ /* read the contents of the I/O location and then synchronize the I/O
+ * such that the I/O operation completes before proceeding on
+ */
+#ifdef CONFIG_PPC
+ u16 IoContents;
+
+ __asm__ volatile ("eieio; lhbrx %0,0,%1":"=r" (IoContents):"b"
+ (InAddress));
+ return IoContents;
+#else
+ return InSwap16(InAddress);
+#endif
+}
+
+/*****************************************************************************/
+/**
+*
+* Performs an input operation for a 32-bit memory location by reading from the
+* specified address and returning the byte-swapped value read from that
+* address.
+*
+* @param InAddress contains the address to perform the input operation at.
+*
+* @return
+*
+* The byte-swapped value read from the specified input address.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XIo_InSwap32(XIo_Address InAddress)
+{
+ /* read the contents of the I/O location and then synchronize the I/O
+ * such that the I/O operation completes before proceeding on
+ */
+#ifdef CONFIG_PPC
+ u32 IoContents;
+
+ __asm__ volatile ("eieio; lwbrx %0,0,%1":"=r" (IoContents):"b"
+ (InAddress));
+ return IoContents;
+#else
+ return InSwap32(InAddress);
+#endif
+
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Performs an output operation for an 8-bit memory location by writing the
+* specified value to the the specified address.
+*
+* @param OutAddress contains the address to perform the output operation at.
+* @param Value contains the value to be output at the specified address.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XIo_Out8(XIo_Address OutAddress, u8 Value)
+{
+ /* write the contents of the I/O location and then synchronize the I/O
+ * such that the I/O operation completes before proceeding on
+ */
+
+#ifdef CONFIG_PPC
+
+ __asm__ volatile ("stb %0,0(%1); eieio"::"r" (Value), "b"(OutAddress));
+
+#else
+
+ *(volatile u8 *) OutAddress = Value;
+ SYNCHRONIZE_IO;
+
+#endif
+
+}
+
+/*****************************************************************************/
+/**
+*
+* Performs an output operation for a 16-bit memory location by writing the
+* specified value to the the specified address.
+*
+* @param OutAddress contains the address to perform the output operation at.
+* @param Value contains the value to be output at the specified address.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XIo_Out16(XIo_Address OutAddress, u16 Value)
+{
+ /* write the contents of the I/O location and then synchronize the I/O
+ * such that the I/O operation completes before proceeding on
+ */
+
+#ifdef CONFIG_PPC
+
+ __asm__ volatile ("sth %0,0(%1); eieio"::"r" (Value), "b"(OutAddress));
+
+#else
+
+ *(volatile u16 *) OutAddress = Value;
+ SYNCHRONIZE_IO;
+
+#endif
+}
+
+/*****************************************************************************/
+/**
+*
+* Performs an output operation for a 32-bit memory location by writing the
+* specified value to the the specified address.
+*
+* @param OutAddress contains the address to perform the output operation at.
+* @param Value contains the value to be output at the specified address.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XIo_Out32(XIo_Address OutAddress, u32 Value)
+{
+ /* write the contents of the I/O location and then synchronize the I/O
+ * such that the I/O operation completes before proceeding on
+ */
+
+#ifdef CONFIG_PPC
+
+ __asm__ volatile ("stw %0,0(%1); eieio"::"r" (Value), "b"(OutAddress));
+
+#else
+
+ *(volatile u32 *) OutAddress = Value;
+ SYNCHRONIZE_IO;
+
+#endif
+}
+
+/*****************************************************************************/
+/**
+*
+* Performs an output operation for a 16-bit memory location by writing the
+* specified value to the the specified address. The value is byte-swapped
+* before being written.
+*
+* @param OutAddress contains the address to perform the output operation at.
+* @param Value contains the value to be output at the specified address.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XIo_OutSwap16(XIo_Address OutAddress, u16 Value)
+{
+ /* write the contents of the I/O location and then synchronize the I/O
+ * such that the I/O operation completes before proceeding on
+ */
+#ifdef CONFIG_PPC
+ __asm__ volatile ("sthbrx %0,0,%1; eieio"::"r" (Value),
+ "b"(OutAddress));
+#else
+ OutSwap16(OutAddress, Value);
+#endif
+}
+
+/*****************************************************************************/
+/**
+*
+* Performs an output operation for a 32-bit memory location by writing the
+* specified value to the the specified address. The value is byte-swapped
+* before being written.
+*
+* @param OutAddress contains the address to perform the output operation at.
+* @param Value contains the value to be output at the specified address.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XIo_OutSwap32(XIo_Address OutAddress, u32 Value)
+{
+ /* write the contents of the I/O location and then synchronize the I/O
+ * such that the I/O operation completes before proceeding on
+ */
+#ifdef CONFIG_PPC
+ __asm__ volatile ("stwbrx %0,0,%1; eieio"::"r" (Value),
+ "b"(OutAddress));
+#else
+ OutSwap32(OutAddress, Value);
+#endif
+}
--- /dev/null
+/* $Id: xio.h,v 1.4 2007/07/24 22:01:35 xduan Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xio.h
+*
+* This file contains the interface for the XIo component, which encapsulates
+* the Input/Output functions for the PowerPC architecture.
+* This header file needs to be updated to replace eieio with mbar when
+* compilers support the mbar mnemonic.
+*
+* @note
+*
+* This file contains architecture-dependent items (memory mapped or non memory
+* mapped I/O).
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- --------------------------------------------------------
+* 1.00a ecm 10/18/05 initial release
+* needs to be updated to replace eieio with mbar when
+* compilers support this mnemonic.
+*
+* 1.00a ecm 01/24/07 update for new coding standard.
+* 1.10a xd 07/24/07 Corrected the format in asm functions in __DCC__ mode.
+* </pre>
+******************************************************************************/
+
+#ifndef XIO_H /* prevent circular inclusions */
+#define XIO_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+/**
+ * Typedef for an I/O address. Typically correlates to the width of the
+ * address bus.
+ */
+typedef u32 XIo_Address;
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/* The following macro is specific to the GNU compiler and PowerPC family. It
+ * performs an EIEIO instruction such that I/O operations are synced correctly.
+ * This macro is not necessarily portable across compilers since it uses
+ * inline assembly.
+ */
+#ifdef CONFIG_PPC
+# define SYNCHRONIZE_IO __asm__ volatile ("eieio") /* should be 'mbar' ultimately */
+#else
+# define SYNCHRONIZE_IO
+#endif
+
+/* The following macros allow the software to be transportable across
+ * processors which use big or little endian memory models.
+ *
+ * Defined first are processor-specific endian conversion macros specific to
+ * the GNU compiler and the PowerPC family, as well as a no-op endian conversion
+ * macro. These macros are not to be used directly by software. Instead, the
+ * XIo_To/FromLittleEndianXX and XIo_To/FromBigEndianXX macros below are to be
+ * used to allow the endian conversion to only be performed when necessary
+ */
+
+#define XIo_EndianNoop(Source, DestPtr) (*DestPtr = Source)
+
+#ifdef CONFIG_PPC
+
+#define XIo_EndianSwap16(Source, DestPtr) __asm__ __volatile__(\
+ "sthbrx %0,0,%1\n"\
+ : : "r" (Source), "r" (DestPtr)\
+ )
+
+#define XIo_EndianSwap32(Source, DestPtr) __asm__ __volatile__(\
+ "stwbrx %0,0,%1\n"\
+ : : "r" (Source), "r" (DestPtr)\
+ )
+#else
+
+#define XIo_EndianSwap16(Source, DestPtr) \
+{\
+ u16 src = (Source); \
+ u16 *destptr = (DestPtr); \
+ *destptr = src >> 8; \
+ *destptr |= (src << 8); \
+}
+
+#define XIo_EndianSwap32(Source, DestPtr) \
+{\
+ u32 src = (Source); \
+ u32 *destptr = (DestPtr); \
+ *destptr = src >> 24; \
+ *destptr |= ((src >> 8) & 0x0000FF00); \
+ *destptr |= ((src << 8) & 0x00FF0000); \
+ *destptr |= ((src << 24) & 0xFF000000); \
+}
+
+#endif
+
+// #ifdef XLITTLE_ENDIAN
+// /* little-endian processor */
+
+// #define XIo_ToLittleEndian16 XIo_EndianNoop
+// #define XIo_ToLittleEndian32 XIo_EndianNoop
+// #define XIo_FromLittleEndian16 XIo_EndianNoop
+// #define XIo_FromLittleEndian32 XIo_EndianNoop
+
+// #define XIo_ToBigEndian16(Source, DestPtr) XIo_EndianSwap16(Source, DestPtr)
+// #define XIo_ToBigEndian32(Source, DestPtr) XIo_EndianSwap32(Source, DestPtr)
+// #define XIo_FromBigEndian16 XIo_ToBigEndian16
+// #define XIo_FromBigEndian32 XIo_ToBigEndian32
+
+// #else
+/* big-endian processor */ // ppc or microblaze
+
+#define XIo_ToLittleEndian16(Source, DestPtr) XIo_EndianSwap16(Source, DestPtr)
+#define XIo_ToLittleEndian32(Source, DestPtr) XIo_EndianSwap32(Source, DestPtr)
+#define XIo_FromLittleEndian16 XIo_ToLittleEndian16
+#define XIo_FromLittleEndian32 XIo_ToLittleEndian32
+
+#define XIo_ToBigEndian16 XIo_EndianNoop
+#define XIo_ToBigEndian32 XIo_EndianNoop
+#define XIo_FromBigEndian16 XIo_EndianNoop
+#define XIo_FromBigEndian32 XIo_EndianNoop
+
+// #endif
+
+
+/************************** Function Prototypes ******************************/
+
+/* The following macros allow optimized I/O operations for memory mapped I/O
+ * Note that the SYNCHRONIZE_IO may be moved by the compiler during
+ * optimization.
+ */
+
+u8 XIo_In8(XIo_Address InAddress);
+u16 XIo_In16(XIo_Address InAddress);
+u32 XIo_In32(XIo_Address InAddress);
+
+void XIo_Out8(XIo_Address OutAddress, u8 Value);
+void XIo_Out16(XIo_Address OutAddress, u16 Value);
+void XIo_Out32(XIo_Address OutAddress, u32 Value);
+
+
+/*
+#define XIo_In8(InputPtr) (*(volatile u8 *)(InputPtr)); SYNCHRONIZE_IO;
+#define XIo_In16(InputPtr) (*(volatile u16 *)(InputPtr)); SYNCHRONIZE_IO;
+#define XIo_In32(InputPtr) (*(volatile u32 *)(InputPtr)); SYNCHRONIZE_IO;
+
+#define XIo_Out8(OutputPtr, Value) \
+ { (*(volatile u8 *)(OutputPtr) = Value); SYNCHRONIZE_IO; }
+#define XIo_Out16(OutputPtr, Value) \
+ { (*(volatile u16 *)(OutputPtr) = Value); SYNCHRONIZE_IO; }
+#define XIo_Out32(OutputPtr, Value) \
+ { (*(volatile u32 *)(OutputPtr) = Value); SYNCHRONIZE_IO; }
+ */
+
+/* The following functions handle IO addresses where data must be swapped
+ * They cannot be implemented as macros
+ */
+u16 XIo_InSwap16(XIo_Address InAddress);
+u32 XIo_InSwap32(XIo_Address InAddress);
+void XIo_OutSwap16(XIo_Address OutAddress, u16 Value);
+void XIo_OutSwap32(XIo_Address OutAddress, u32 Value);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xio_dcr.c,v 1.9 2007/01/24 17:00:16 meinelte Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xio_dcr.c
+*
+* The implementation of the XDcrIo interface. See xio_dcr.h for more
+* information about the component.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a ecm 11/09/06 Modified from the PPC405 version to use the indirect
+* addressing that is available in the PPC440 block in V5.
+* Removed the jump table structure in xio_dcr.c also.
+* Added functionality from the SEG driver to allow for
+* one file pair.
+* 1.00a ecm 01/02/07 Incorporated changes from testing with multiple DCR
+* masters, discovered and fixed several concurrency
+* issues.
+* 1.00a ecm 01/24/07 update for new coding standard.
+* </pre>
+*
+* @internal
+*
+* The C functions which subsequently call into either the assembly code or into
+* the provided table of functions are required since the registers assigned to
+* the calling and return from functions are strictly defined in the ABI and that
+* definition is used in the low-level functions directly. The use of macros is
+* not recommended since the temporary registers in the ABI are defined but there
+* is no way to force the compiler to use a specific register in a block of code.
+*
+*****************************************************************************/
+
+/***************************** Include Files ********************************/
+
+#include <asm/dcr.h>
+#include <asm/reg.h>
+
+#include "xstatus.h"
+#include "xbasic_types.h"
+#include "xio.h"
+#include "xio_dcr.h"
+
+/************************** Constant Definitions ****************************/
+
+/*
+ * base address defines for each of the four possible DCR base
+ * addresses a processor can have
+ */
+#define XDCR_0_BASEADDR 0x000
+#define XDCR_1_BASEADDR 0x100
+#define XDCR_2_BASEADDR 0x200
+#define XDCR_3_BASEADDR 0x300
+
+
+#define MAX_DCR_REGISTERS 4096
+#define MAX_DCR_REGISTER MAX_DCR_REGISTERS - 1
+#define MIN_DCR_REGISTER 0
+
+/**************************** Type Definitions ******************************/
+
+
+/***************** Macros (Inline Functions) Definitions ********************/
+
+/************************** Variable Definitions ****************************/
+
+
+
+/************************** Function Prototypes *****************************/
+
+/*****************************************************************************/
+/**
+*
+* Outputs value provided to specified register defined in the header file.
+*
+* @param DcrRegister is the intended destination DCR register
+* @param Data is the value to be placed into the specified DCR register
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+****************************************************************************/
+void XIo_DcrOut(u32 DcrRegister, u32 Data)
+{
+ /*
+ * Assert validates the register number
+ */
+ XASSERT_VOID(DcrRegister < MAX_DCR_REGISTERS);
+
+ /*
+ * pass the call on to the proper function
+ */
+ XIo_mDcrIndirectAddrWriteReg(XDCR_0_BASEADDR, DcrRegister, Data);
+}
+
+/*****************************************************************************/
+/**
+*
+* Reads value from specified register.
+*
+* @param DcrRegister is the intended source DCR register
+*
+* @return
+*
+* Contents of the specified DCR register.
+*
+* @note
+*
+* None.
+*
+****************************************************************************/
+u32 XIo_DcrIn(u32 DcrRegister)
+{
+ /*
+ * Assert validates the register number
+ */
+ XASSERT_NONVOID(DcrRegister < MAX_DCR_REGISTERS);
+
+ /*
+ * pass the call on to the proper function
+ */
+ return (XIo_mDcrIndirectAddrReadReg(XDCR_0_BASEADDR, DcrRegister));
+}
+
+/*****************************************************************************/
+/**
+*
+* Reads the value of the specified register using the indirect access method.
+*
+* @param DcrBase is the base of the block of DCR registers
+* @param DcrRegister is the intended destination DCR register
+*
+* @return
+*
+* Contents of the specified DCR register.
+*
+* @note
+*
+* Uses the indirect addressing method available in V5 with PPC440.
+*
+****************************************************************************/
+u32 XIo_DcrReadReg(u32 DcrBase, u32 DcrRegister)
+{
+ switch (DcrBase) {
+ case 0x000:
+ return XIo_mDcrIndirectAddrReadReg(XDCR_0_BASEADDR,
+ DcrRegister);
+ case 0x100:
+ return XIo_mDcrIndirectAddrReadReg(XDCR_1_BASEADDR,
+ DcrRegister);
+ case 0x200:
+ return XIo_mDcrIndirectAddrReadReg(XDCR_2_BASEADDR,
+ DcrRegister);
+ case 0x300:
+ return XIo_mDcrIndirectAddrReadReg(XDCR_3_BASEADDR,
+ DcrRegister);
+ default:
+ return XIo_mDcrIndirectAddrReadReg(XDCR_0_BASEADDR,
+ DcrRegister);
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Writes the value to the specified register using the indirect access method.
+*
+* @param DcrBase is the base of the block of DCR registers
+* @param DcrRegister is the intended destination DCR register
+* @param Data is the value to be placed into the specified DCR register
+*
+* @return
+*
+* None
+*
+* @note
+*
+* Uses the indirect addressing method available in V5 with PPC440.
+*
+****************************************************************************/
+void XIo_DcrWriteReg(u32 DcrBase, u32 DcrRegister, u32 Data)
+{
+ switch (DcrBase) {
+ case 0x000:
+ XIo_mDcrIndirectAddrWriteReg(XDCR_0_BASEADDR, DcrRegister,
+ Data);
+ return;
+ case 0x100:
+ XIo_mDcrIndirectAddrWriteReg(XDCR_1_BASEADDR, DcrRegister,
+ Data);
+ return;
+ case 0x200:
+ XIo_mDcrIndirectAddrWriteReg(XDCR_2_BASEADDR, DcrRegister,
+ Data);
+ return;
+ case 0x300:
+ XIo_mDcrIndirectAddrWriteReg(XDCR_3_BASEADDR, DcrRegister,
+ Data);
+ return;
+ default:
+ XIo_mDcrIndirectAddrWriteReg(XDCR_0_BASEADDR, DcrRegister,
+ Data);
+ return;
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Explicitly acquires and release DCR lock--Auto-Lock is disabled.
+* Reads the value of the specified register using the indirect access method.
+* This function is provided because the most common usecase is to enable
+* Auto-Lock. Checking for Auto-Lock in every indirect access would defeat the
+* purpose of having Auto-Lock.
+* Auto-Lock can only be enable/disabled in hardware.
+*
+* @param DcrBase is the base of the block of DCR registers
+* @param DcrRegister is the intended destination DCR register
+*
+* @return
+*
+* Contents of the specified DCR register.
+*
+* @note
+*
+* Uses the indirect addressing method available in V5 with PPC440.
+*
+****************************************************************************/
+u32 XIo_DcrLockAndReadReg(u32 DcrBase, u32 DcrRegister)
+{
+ unsigned int rVal;
+
+ switch (DcrBase) {
+ case 0x000:
+ XIo_mDcrLock(XDCR_0_BASEADDR);
+ rVal = XIo_mDcrIndirectAddrReadReg(XDCR_0_BASEADDR,
+ DcrRegister);
+ XIo_mDcrUnlock(XDCR_0_BASEADDR);
+ case 0x100:
+ XIo_mDcrLock(XDCR_1_BASEADDR);
+ rVal = XIo_mDcrIndirectAddrReadReg(XDCR_1_BASEADDR,
+ DcrRegister);
+ XIo_mDcrUnlock(XDCR_1_BASEADDR);
+ case 0x200:
+ XIo_mDcrLock(XDCR_2_BASEADDR);
+ rVal = XIo_mDcrIndirectAddrReadReg(XDCR_2_BASEADDR,
+ DcrRegister);
+ XIo_mDcrUnlock(XDCR_2_BASEADDR);
+ case 0x300:
+ XIo_mDcrLock(XDCR_3_BASEADDR);
+ rVal = XIo_mDcrIndirectAddrReadReg(XDCR_3_BASEADDR,
+ DcrRegister);
+ XIo_mDcrUnlock(XDCR_3_BASEADDR);
+ default:
+ XIo_mDcrLock(XDCR_0_BASEADDR);
+ rVal = XIo_mDcrIndirectAddrReadReg(XDCR_0_BASEADDR,
+ DcrRegister);
+ XIo_mDcrUnlock(XDCR_0_BASEADDR);
+ }
+ return rVal;
+}
+
+/*****************************************************************************/
+/**
+*
+* Explicitly acquires and release DCR lock--Auto-Lock is disabled.
+* Writes the value to the specified register using the indirect access method.
+* This function is provided because the most common usecase is to enable
+* Auto-Lock. Checking for Auto-Lock in every indirect access would defeat the
+* purpose of having Auto-Lock.
+* Auto-Lock can only be enable/disabled in hardware.
+*
+* @param DcrBase is the base of the block of DCR registers
+* @param DcrRegister is the intended destination DCR register
+* @param Data is the value to be placed into the specified DCR register
+*
+* @return
+*
+* None
+*
+* @note
+*
+* Uses the indirect addressing method available in V5 with PPC440.
+*
+****************************************************************************/
+void XIo_DcrLockAndWriteReg(u32 DcrBase, u32 DcrRegister, u32 Data)
+{
+ switch (DcrBase) {
+ case 0x000:
+ XIo_mDcrLock(XDCR_0_BASEADDR);
+ XIo_mDcrIndirectAddrWriteReg(XDCR_0_BASEADDR, DcrRegister,
+ Data);
+ XIo_mDcrUnlock(XDCR_0_BASEADDR);
+ return;
+ case 0x100:
+ XIo_mDcrLock(XDCR_1_BASEADDR);
+ XIo_mDcrIndirectAddrWriteReg(XDCR_1_BASEADDR, DcrRegister,
+ Data);
+ XIo_mDcrUnlock(XDCR_1_BASEADDR);
+ return;
+ case 0x200:
+ XIo_mDcrLock(XDCR_2_BASEADDR);
+ XIo_mDcrIndirectAddrWriteReg(XDCR_2_BASEADDR, DcrRegister,
+ Data);
+ XIo_mDcrUnlock(XDCR_2_BASEADDR);
+ return;
+ case 0x300:
+ XIo_mDcrLock(XDCR_3_BASEADDR);
+ XIo_mDcrIndirectAddrWriteReg(XDCR_3_BASEADDR, DcrRegister,
+ Data);
+ XIo_mDcrUnlock(XDCR_3_BASEADDR);
+ return;
+ default:
+ XIo_mDcrLock(XDCR_0_BASEADDR);
+ XIo_mDcrIndirectAddrWriteReg(XDCR_0_BASEADDR, DcrRegister,
+ Data);
+ XIo_mDcrUnlock(XDCR_0_BASEADDR);
+ return;
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Read APU UDI DCR via indirect addressing.
+*
+* @param DcrBase is the base of the block of DCR registers
+* @param UDInum is the desired APU UDI register
+*
+* @return
+*
+* Contents of the specified APU register.
+*
+* @note
+*
+* Uses the indirect addressing method available in V5 with PPC440.
+*
+****************************************************************************/
+u32 XIo_DcrReadAPUUDIReg(u32 DcrBase, short UDInum)
+{
+ switch (DcrBase) {
+ case 0x000:
+ return XIo_mDcrIndirectAddrReadAPUUDIReg(XDCR_0_BASEADDR,
+ UDInum);
+ case 0x100:
+ return XIo_mDcrIndirectAddrReadAPUUDIReg(XDCR_1_BASEADDR,
+ UDInum);
+ case 0x200:
+ return XIo_mDcrIndirectAddrReadAPUUDIReg(XDCR_2_BASEADDR,
+ UDInum);
+ case 0x300:
+ return XIo_mDcrIndirectAddrReadAPUUDIReg(XDCR_3_BASEADDR,
+ UDInum);
+ default:
+ return XIo_mDcrIndirectAddrReadAPUUDIReg(XDCR_0_BASEADDR,
+ UDInum);
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Writes the value to the APU UDI DCR using the indirect access method.
+*
+* @param DcrBase is the base of the block of DCR registers
+* @param UDInum is the intended destination APU register
+* @param Data is the value to be placed into the specified APU register
+*
+* @return
+*
+* None
+*
+* @note
+*
+* Uses the indirect addressing method available in V5 with PPC440.
+*
+****************************************************************************/
+void XIo_DcrWriteAPUUDIReg(u32 DcrBase, short UDInum, u32 Data)
+{
+ switch (DcrBase) {
+ case 0x000:
+ XIo_mDcrIndirectAddrWriteAPUUDIReg(XDCR_0_BASEADDR, UDInum,
+ Data);
+ return;
+ case 0x100:
+ XIo_mDcrIndirectAddrWriteAPUUDIReg(XDCR_1_BASEADDR, UDInum,
+ Data);
+ return;
+ case 0x200:
+ XIo_mDcrIndirectAddrWriteAPUUDIReg(XDCR_2_BASEADDR, UDInum,
+ Data);
+ return;
+ case 0x300:
+ XIo_mDcrIndirectAddrWriteAPUUDIReg(XDCR_3_BASEADDR, UDInum,
+ Data);
+ return;
+ default:
+ XIo_mDcrIndirectAddrWriteAPUUDIReg(XDCR_0_BASEADDR, UDInum,
+ Data);
+ return;
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Locks DCR bus via the Global Status/Control register.
+*
+* @param DcrBase is the base of the block of DCR registers
+*
+* @return
+*
+* None
+*
+* @note
+*
+* Care must be taken to not write a '1' to either timeout bit because
+* it will be cleared. The internal PPC440 can clear both timeout bits but an
+* external DCR master can only clear the external DCR master's timeout bit.
+*
+* Only available in V5 with PPC440.
+*
+****************************************************************************/
+void XIo_DcrLock(u32 DcrBase)
+{
+ switch (DcrBase) {
+ case 0x000:
+ XIo_mDcrLock(XDCR_0_BASEADDR);
+ return;
+ case 0x100:
+ XIo_mDcrLock(XDCR_1_BASEADDR);
+ return;
+ case 0x200:
+ XIo_mDcrLock(XDCR_2_BASEADDR);
+ return;
+ case 0x300:
+ XIo_mDcrLock(XDCR_3_BASEADDR);
+ return;
+ default:
+ XIo_mDcrLock(XDCR_0_BASEADDR);
+ return;
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Unlocks DCR bus via the Global Status/Control register.
+*
+* @param DcrBase is the base of the block of DCR registers
+*
+* @return
+*
+* None
+*
+* @note
+*
+* Care must be taken to not write a '1' to either timeout bit because
+* it will be cleared. The internal PPC440 can clear both timeout bits but an
+* external DCR master can only clear the external DCR master's timeout bit.
+*
+* Only available in V5 with PPC440.
+*
+****************************************************************************/
+void XIo_DcrUnlock(u32 DcrBase)
+{
+ switch (DcrBase) {
+ case 0x000:
+ XIo_mDcrUnlock(XDCR_0_BASEADDR);
+ return;
+ case 0x100:
+ XIo_mDcrUnlock(XDCR_1_BASEADDR);
+ return;
+ case 0x200:
+ XIo_mDcrUnlock(XDCR_2_BASEADDR);
+ return;
+ case 0x300:
+ XIo_mDcrUnlock(XDCR_3_BASEADDR);
+ return;
+ default:
+ XIo_mDcrUnlock(XDCR_0_BASEADDR);
+ return;
+ }
+}
--- /dev/null
+/* $Id: xio_dcr.h,v 1.8 2007/01/24 17:00:16 meinelte Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xio_dcr.h
+*
+* The DCR I/O access functions.
+*
+* @note
+*
+* These access functions are specific to the PPC440 CPU. Changes might be
+* necessary for other members of the IBM PPC Family.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a ecm 10/18/05 First release
+* Need to verify opcodes for mt/mfdcr remain the same.
+* 1.00a ecm 11/09/06 Modified from the PPC405 version to use the indirect
+* addressing that is available in the PPC440 block in V5.
+* Removed the jump table structure in xio_dcr.c also.
+* Added functionality from the SEG driver to allow for
+* one file pair.
+* 1.00a ecm 01/02/07 Incorporated changes from testing with multiple DCR
+* masters, discovered and fixed several concurrency
+* issues.
+* 1.00a ecm 01/24/07 update for new coding standard.
+* </pre>
+*
+* @internal
+*
+* This code WILL NOT FUNCTION on the PPC405 based architectures, V2P and V4.
+*
+******************************************************************************/
+
+#ifndef XDCRIO_H /* prevent circular inclusions */
+#define XDCRIO_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/***************************** Include Files *********************************/
+#include "xbasic_types.h"
+
+/************************** Constant Definitions *****************************/
+/*
+ * 256 internal DCR registers
+ * Base address: 2 most signifcant bits of 10-bit addr taken from
+ * the C_DCRBASEADDR parameter of the processor block.
+ * Offset: 8 least significant bits
+ */
+/* register base addresses */
+
+#define XDCR_APU_BASE 0x04
+#define XDCR_MIB_BASE 0x10
+#define XDCR_XB_BASE 0x20
+#define XDCR_PLBS0_BASE 0x34
+#define XDCR_PLBS1_BASE 0x44
+#define XDCR_PLBM_BASE 0x54
+#define XDCR_DMA0_BASE 0x80
+#define XDCR_DMA1_BASE 0x98
+#define XDCR_DMA2_BASE 0xB0
+#define XDCR_DMA3_BASE 0xC8
+
+/* register offsets */
+/* global registers 0x00-0x02 */
+
+#define XDCR_IDA_ADDR 0x00
+#define XDCR_IDA_ACC 0x01
+#define XDCR_CTRLCFGSTAT 0x02
+
+/* Auxiliary Processor Unit Controller (APU) 0x04-0x05 */
+
+#define XDCR_APU_UDI (XDCR_APU_BASE+0x00)
+#define XDCR_APU_CTRL (XDCR_APU_BASE+0x01)
+
+/* Memory Interface Bridge (MIB) 0x10-0x13 */
+
+#define XDCR_MIB_CTRL (XDCR_MIB_BASE+0x00)
+#define XDCR_MIB_RCON (XDCR_MIB_BASE+0x01)
+#define XDCR_MIB_BCON (XDCR_MIB_BASE+0x02)
+
+/* Crossbar (XB) 0x20-0x33 */
+
+#define XDCR_XB_IST (XDCR_XB_BASE+0x00)
+#define XDCR_XB_IMASK (XDCR_XB_BASE+0x01)
+#define XDCR_XB_ARBCFGX (XDCR_XB_BASE+0x03)
+#define XDCR_XB_FIFOSTX (XDCR_XB_BASE+0x04)
+#define XDCR_XB_SMSTX (XDCR_XB_BASE+0x05)
+#define XDCR_XB_MISCX (XDCR_XB_BASE+0x06)
+#define XDCR_XB_ARBCFGM (XDCR_XB_BASE+0x08)
+#define XDCR_XB_FIFOSTM (XDCR_XB_BASE+0x09)
+#define XDCR_XB_SMSTM (XDCR_XB_BASE+0x0A)
+#define XDCR_XB_MISCM (XDCR_XB_BASE+0x0B)
+#define XDCR_XB_TMPL0MAP (XDCR_XB_BASE+0x0D)
+#define XDCR_XB_TMPL1MAP (XDCR_XB_BASE+0x0E)
+#define XDCR_XB_TMPL2MAP (XDCR_XB_BASE+0x0F)
+#define XDCR_XB_TMPL3MAP (XDCR_XB_BASE+0x10)
+#define XDCR_XB_TMPLSEL (XDCR_XB_BASE+0x11)
+
+/* PLB Slave DCR offsets only */
+
+#define XDCR_PLBS_CFG 0x00
+#define XDCR_PLBS_SEARU 0x02
+#define XDCR_PLBS_SEARL 0x03
+#define XDCR_PLBS_SESR 0x04
+#define XDCR_PLBS_MISCST 0x05
+#define XDCR_PLBS_PLBERRST 0x06
+#define XDCR_PLBS_SMST 0x07
+#define XDCR_PLBS_MISC 0x08
+#define XDCR_PLBS_CMDSNIFF 0x09
+#define XDCR_PLBS_CMDSNIFFA 0x0A
+#define XDCR_PLBS_TMPL0MAP 0x0C
+#define XDCR_PLBS_TMPL1MAP 0x0D
+#define XDCR_PLBS_TMPL2MAP 0x0E
+#define XDCR_PLBS_TMPL3MAP 0x0F
+
+/* PLB Slave 0 (PLBS0) 0x34-0x43 */
+
+#define XDCR_PLBS0_CFG (XDCR_PLBS0_BASE+0x00)
+#define XDCR_PLBS0_CNT (XDCR_PLBS0_BASE+0x01)
+#define XDCR_PLBS0_SEARU (XDCR_PLBS0_BASE+0x02)
+#define XDCR_PLBS0_SEARL (XDCR_PLBS0_BASE+0x03)
+#define XDCR_PLBS0_SESR (XDCR_PLBS0_BASE+0x04)
+#define XDCR_PLBS0_MISCST (XDCR_PLBS0_BASE+0x05)
+#define XDCR_PLBS0_PLBERRST (XDCR_PLBS0_BASE+0x06)
+#define XDCR_PLBS0_SMST (XDCR_PLBS0_BASE+0x07)
+#define XDCR_PLBS0_MISC (XDCR_PLBS0_BASE+0x08)
+#define XDCR_PLBS0_CMDSNIFF (XDCR_PLBS0_BASE+0x09)
+#define XDCR_PLBS0_CMDSNIFFA (XDCR_PLBS0_BASE+0x0A)
+#define XDCR_PLBS0_TMPL0MAP (XDCR_PLBS0_BASE+0x0C)
+#define XDCR_PLBS0_TMPL1MAP (XDCR_PLBS0_BASE+0x0D)
+#define XDCR_PLBS0_TMPL2MAP (XDCR_PLBS0_BASE+0x0E)
+#define XDCR_PLBS0_TMPL3MAP (XDCR_PLBS0_BASE+0x0F)
+
+/* PLB Slave 1 (PLBS1) 0x44-0x53 */
+
+#define XDCR_PLBS1_CFG (XDCR_PLBS1_BASE+0x00)
+#define XDCR_PLBS1_CNT (XDCR_PLBS1_BASE+0x01)
+#define XDCR_PLBS1_SEARU (XDCR_PLBS1_BASE+0x02)
+#define XDCR_PLBS1_SEARL (XDCR_PLBS1_BASE+0x03)
+#define XDCR_PLBS1_SESR (XDCR_PLBS1_BASE+0x04)
+#define XDCR_PLBS1_MISCST (XDCR_PLBS1_BASE+0x05)
+#define XDCR_PLBS1_PLBERRST (XDCR_PLBS1_BASE+0x06)
+#define XDCR_PLBS1_SMST (XDCR_PLBS1_BASE+0x07)
+#define XDCR_PLBS1_MISC (XDCR_PLBS1_BASE+0x08)
+#define XDCR_PLBS1_CMDSNIFF (XDCR_PLBS1_BASE+0x09)
+#define XDCR_PLBS1_CMDSNIFFA (XDCR_PLBS1_BASE+0x0A)
+#define XDCR_PLBS1_TMPL0MAP (XDCR_PLBS1_BASE+0x0C)
+#define XDCR_PLBS1_TMPL1MAP (XDCR_PLBS1_BASE+0x0D)
+#define XDCR_PLBS1_TMPL2MAP (XDCR_PLBS1_BASE+0x0E)
+#define XDCR_PLBS1_TMPL3MAP (XDCR_PLBS1_BASE+0x0F)
+
+/* PLB Master (PLBM) 0x54-0x5F */
+
+#define XDCR_PLBM_CFG (XDCR_PLBM_BASE+0x00)
+#define XDCR_PLBM_CNT (XDCR_PLBM_BASE+0x01)
+#define XDCR_PLBM_FSEARU (XDCR_PLBM_BASE+0x02)
+#define XDCR_PLBM_FSEARL (XDCR_PLBM_BASE+0x03)
+#define XDCR_PLBM_FSESR (XDCR_PLBM_BASE+0x04)
+#define XDCR_PLBM_MISCST (XDCR_PLBM_BASE+0x05)
+#define XDCR_PLBM_PLBERRST (XDCR_PLBM_BASE+0x06)
+#define XDCR_PLBM_SMST (XDCR_PLBM_BASE+0x07)
+#define XDCR_PLBM_MISC (XDCR_PLBM_BASE+0x08)
+#define XDCR_PLBM_CMDSNIFF (XDCR_PLBM_BASE+0x09)
+#define XDCR_PLBM_CMDSNIFFA (XDCR_PLBM_BASE+0x0A)
+
+/* DMA Controller DCR offsets only */
+#define XDCR_DMA_TXNXTDESCPTR 0x00
+#define XDCR_DMA_TXCURBUFADDR 0x01
+#define XDCR_DMA_TXCURBUFLEN 0x02
+#define XDCR_DMA_TXCURDESCPTR 0x03
+#define XDCR_DMA_TXTAILDESCPTR 0x04
+#define XDCR_DMA_TXCHANNELCTRL 0x05
+#define XDCR_DMA_TXIRQ 0x06
+#define XDCR_DMA_TXSTATUS 0x07
+#define XDCR_DMA_RXNXTDESCPTR 0x08
+#define XDCR_DMA_RXCURBUFADDR 0x09
+#define XDCR_DMA_RXCURBUFLEN 0x0A
+#define XDCR_DMA_RXCURDESCPTR 0x0B
+#define XDCR_DMA_RXTAILDESCPTR 0x0C
+#define XDCR_DMA_RXCHANNELCTRL 0x0D
+#define XDCR_DMA_RXIRQ 0x0E
+#define XDCR_DMA_RXSTATUS 0x0F
+#define XDCR_DMA_CTRL 0x10
+
+/* DMA Controller 0 (DMA0) 0x80-0x90 */
+
+#define XDCR_DMA0_TXNXTDESCPTR (XDCR_DMA0_BASE+0x00)
+#define XDCR_DMA0_TXCURBUFADDR (XDCR_DMA0_BASE+0x01)
+#define XDCR_DMA0_TXCURBUFLEN (XDCR_DMA0_BASE+0x02)
+#define XDCR_DMA0_TXCURDESCPTR (XDCR_DMA0_BASE+0x03)
+#define XDCR_DMA0_TXTAILDESCPTR (XDCR_DMA0_BASE+0x04)
+#define XDCR_DMA0_TXCHANNELCTRL (XDCR_DMA0_BASE+0x05)
+#define XDCR_DMA0_TXIRQ (XDCR_DMA0_BASE+0x06)
+#define XDCR_DMA0_TXSTATUS (XDCR_DMA0_BASE+0x07)
+#define XDCR_DMA0_RXNXTDESCPTR (XDCR_DMA0_BASE+0x08)
+#define XDCR_DMA0_RXCURBUFADDR (XDCR_DMA0_BASE+0x09)
+#define XDCR_DMA0_RXCURBUFLEN (XDCR_DMA0_BASE+0x0A)
+#define XDCR_DMA0_RXCURDESCPTR (XDCR_DMA0_BASE+0x0B)
+#define XDCR_DMA0_RXTAILDESCPTR (XDCR_DMA0_BASE+0x0C)
+#define XDCR_DMA0_RXCHANNELCTRL (XDCR_DMA0_BASE+0x0D)
+#define XDCR_DMA0_RXIRQ (XDCR_DMA0_BASE+0x0E)
+#define XDCR_DMA0_RXSTATUS (XDCR_DMA0_BASE+0x0F)
+#define XDCR_DMA0_CTRL (XDCR_DMA0_BASE+0x10)
+
+/* DMA Controller 1 (DMA1) 0x98-0xA8 */
+
+#define XDCR_DMA1_TXNXTDESCPTR (XDCR_DMA1_BASE+0x00)
+#define XDCR_DMA1_TXCURBUFADDR (XDCR_DMA1_BASE+0x01)
+#define XDCR_DMA1_TXCURBUFLEN (XDCR_DMA1_BASE+0x02)
+#define XDCR_DMA1_TXCURDESCPTR (XDCR_DMA1_BASE+0x03)
+#define XDCR_DMA1_TXTAILDESCPTR (XDCR_DMA1_BASE+0x04)
+#define XDCR_DMA1_TXCHANNELCTRL (XDCR_DMA1_BASE+0x05)
+#define XDCR_DMA1_TXIRQ (XDCR_DMA1_BASE+0x06)
+#define XDCR_DMA1_TXSTATUS (XDCR_DMA1_BASE+0x07)
+#define XDCR_DMA1_RXNXTDESCPTR (XDCR_DMA1_BASE+0x08)
+#define XDCR_DMA1_RXCURBUFADDR (XDCR_DMA1_BASE+0x09)
+#define XDCR_DMA1_RXCURBUFLEN (XDCR_DMA1_BASE+0x0A)
+#define XDCR_DMA1_RXCURDESCPTR (XDCR_DMA1_BASE+0x0B)
+#define XDCR_DMA1_RXTAILDESCPTR (XDCR_DMA1_BASE+0x0C)
+#define XDCR_DMA1_RXCHANNELCTRL (XDCR_DMA1_BASE+0x0D)
+#define XDCR_DMA1_RXIRQ (XDCR_DMA1_BASE+0x0E)
+#define XDCR_DMA1_RXSTATUS (XDCR_DMA1_BASE+0x0F)
+#define XDCR_DMA1_CTRL (XDCR_DMA1_BASE+0x10)
+
+/* DMA Controller 2 (DMA2) 0xB0-0xC0 */
+
+#define XDCR_DMA2_TXNXTDESCPTR (XDCR_DMA2_BASE+0x00)
+#define XDCR_DMA2_TXCURBUFADDR (XDCR_DMA2_BASE+0x01)
+#define XDCR_DMA2_TXCURBUFLEN (XDCR_DMA2_BASE+0x02)
+#define XDCR_DMA2_TXCURDESCPTR (XDCR_DMA2_BASE+0x03)
+#define XDCR_DMA2_TXTAILDESCPTR (XDCR_DMA2_BASE+0x04)
+#define XDCR_DMA2_TXCHANNELCTRL (XDCR_DMA2_BASE+0x05)
+#define XDCR_DMA2_TXIRQ (XDCR_DMA2_BASE+0x06)
+#define XDCR_DMA2_TXSTATUS (XDCR_DMA2_BASE+0x07)
+#define XDCR_DMA2_RXNXTDESCPTR (XDCR_DMA2_BASE+0x08)
+#define XDCR_DMA2_RXCURBUFADDR (XDCR_DMA2_BASE+0x09)
+#define XDCR_DMA2_RXCURBUFLEN (XDCR_DMA2_BASE+0x0A)
+#define XDCR_DMA2_RXCURDESCPTR (XDCR_DMA2_BASE+0x0B)
+#define XDCR_DMA2_RXTAILDESCPTR (XDCR_DMA2_BASE+0x0C)
+#define XDCR_DMA2_RXCHANNELCTRL (XDCR_DMA2_BASE+0x0D)
+#define XDCR_DMA2_RXIRQ (XDCR_DMA2_BASE+0x0E)
+#define XDCR_DMA2_RXSTATUS (XDCR_DMA2_BASE+0x0F)
+#define XDCR_DMA2_CTRL (XDCR_DMA2_BASE+0x10)
+
+/* DMA Controller 3 (DMA3) 0xC8-0xD8 */
+
+#define XDCR_DMA3_TXNXTDESCPTR (XDCR_DMA3_BASE+0x00)
+#define XDCR_DMA3_TXCURBUFADDR (XDCR_DMA3_BASE+0x01)
+#define XDCR_DMA3_TXCURBUFLEN (XDCR_DMA3_BASE+0x02)
+#define XDCR_DMA3_TXCURDESCPTR (XDCR_DMA3_BASE+0x03)
+#define XDCR_DMA3_TXTAILDESCPTR (XDCR_DMA3_BASE+0x04)
+#define XDCR_DMA3_TXCHANNELCTRL (XDCR_DMA3_BASE+0x05)
+#define XDCR_DMA3_TXIRQ (XDCR_DMA3_BASE+0x06)
+#define XDCR_DMA3_TXSTATUS (XDCR_DMA3_BASE+0x07)
+#define XDCR_DMA3_RXNXTDESCPTR (XDCR_DMA3_BASE+0x08)
+#define XDCR_DMA3_RXCURBUFADDR (XDCR_DMA3_BASE+0x09)
+#define XDCR_DMA3_RXCURBUFLEN (XDCR_DMA3_BASE+0x0A)
+#define XDCR_DMA3_RXCURDESCPTR (XDCR_DMA3_BASE+0x0B)
+#define XDCR_DMA3_RXTAILDESCPTR (XDCR_DMA3_BASE+0x0C)
+#define XDCR_DMA3_RXCHANNELCTRL (XDCR_DMA3_BASE+0x0D)
+#define XDCR_DMA3_RXIRQ (XDCR_DMA3_BASE+0x0E)
+#define XDCR_DMA3_RXSTATUS (XDCR_DMA3_BASE+0x0F)
+#define XDCR_DMA3_CTRL (XDCR_DMA3_BASE+0x10)
+
+
+/**
+ * <pre
+ * These are the bit defines for the Control, Configuration, and Status
+ * register (XDCR_CTRLCFGSTAT)
+ * @{
+ */
+#define XDCR_INT_MSTR_LOCK_MASK 0x80000000 /* Internal Master Bus Lock */
+#define XDCR_INT_MSTR_AUTO_LOCK_MASK 0x40000000 /* Internal Master Bus Auto Lock, RO */
+#define XDCR_EXT_MSTR_LOCK_MASK 0x20000000 /* External Master Bus Master Lock */
+#define XDCR_EXT_MSTR_AUTO_LOCK_MASK 0x10000000 /* External Master Bus Auto Lock, RO */
+#define XDCR_ENB_DCR_AUTO_LOCK_MASK 0x08000000 /* Enable Auto Bus Lock */
+#define XDCR_ENB_MSTR_ASYNC_MASK 0x04000000 /* External Master in Async Mode */
+#define XDCR_ENB_SLV_ASYNC_MASK 0x02000000 /* External Slave in Async Mode */
+#define XDCR_ENB_DCR_TIMEOUT_SUPP_MASK 0x01000000 /* Enable Timeout Support */
+#define XDCR_INT_MSTR_TIMEOUT_BIT 0x00000002 /* Internal Master Bus Timeout Occurred */
+#define XDCR_EXT_MSTR_TIMEOUT_BIT 0x00000001 /* External Master Bus Timeout Occurred */
+
+/*
+ * Mask to disable exceptions in PPC440 MSR
+ * Bit 14: Critical Interrupt Enable 0x00020000
+ * Bit 16: External Interrupt Enable 0x00008000
+ * Bit 20: Floating-point Exceptions Mode 0 0x00000800
+ * Bit 23: Floating-point Exceptions Mode 1 0x00000100
+ */
+#define XDCR_DISABLE_EXCEPTIONS 0xFFFD76FF
+#define XDCR_ALL_LOCK (XDCR_INT_MSTR_LOCK_MASK | XDCR_EXT_MSTR_LOCK_MASK)
+#define XDCR_ALL_TIMEOUT (XDCR_INT_MSTR_TIMEOUT_BIT | XDCR_EXT_MSTR_TIMEOUT_BIT)
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/******************************************************************************/
+/**
+* Reads the register at the specified DCR address.
+*
+*
+* @param DcrRegister is the intended source DCR register
+*
+* @return
+*
+* Contents of the specified DCR register.
+*
+* @note
+*
+* C-style signature:
+* void XIo_mDcrReadReg(u32 DcrRegister)
+*
+*******************************************************************************/
+#define XIo_mDcrReadReg(DcrRegister) ({ mfdcr((DcrRegister)); })
+
+/******************************************************************************/
+/**
+* Writes the register at specified DCR address.
+*
+*
+* @param DcrRegister is the intended destination DCR register
+* @param Data is the value to be placed into the specified DRC register
+*
+* @return
+*
+* None
+*
+* @note
+*
+* C-style signature:
+* void XIo_mDcrWriteReg(u32 DcrRegister, u32 Data)
+*
+*******************************************************************************/
+#define XIo_mDcrWriteReg(DcrRegister, Data) ({ mtdcr((DcrRegister), (Data)); })
+
+/******************************************************************************/
+/**
+* Explicitly locks the DCR bus
+*
+* @param DcrBase is the base of the block of DCR registers
+*
+* @return
+*
+* None
+*
+* @note
+*
+* C-style signature:
+* void XIo_mDcrLock(u32 DcrBase)
+*
+* Sets either Lock bit. Since a master cannot edit another master's Lock bit,
+* the macro can be simplified.
+* Care must be taken to not write a '1' to either timeout bit because
+* it will be cleared.
+*
+*******************************************************************************/
+#define XIo_mDcrLock(DcrBase) \
+({ \
+ mtdcr((DcrBase) | XDCR_CTRLCFGSTAT, \
+ (mfdcr((DcrBase) | XDCR_CTRLCFGSTAT) | XDCR_ALL_LOCK) & ~XDCR_ALL_TIMEOUT); \
+})
+
+/******************************************************************************/
+/**
+* Explicitly locks the DCR bus
+*
+* @param DcrBase is the base of the block of DCR registers
+*
+* @return
+*
+* None
+*
+* @note
+*
+* C-style signature:
+* void XIo_mDcrUnlock(u32 DcrBase)
+*
+* Unsets either Lock bit. Since a master cannot edit another master's Lock bit,
+* the macro can be simplified.
+* Care must be taken to not write a '1' to either timeout bit because
+* it will be cleared.
+*
+*******************************************************************************/
+#define XIo_mDcrUnlock(DcrBase) \
+({ \
+ mtdcr((DcrBase) | XDCR_CTRLCFGSTAT, \
+ (mfdcr((DcrBase) | XDCR_CTRLCFGSTAT) & ~(XDCR_ALL_LOCK | XDCR_ALL_TIMEOUT))); \
+})
+
+/******************************************************************************/
+/**
+* Reads the APU UDI register at the specified APU address.
+*
+*
+* @param DcrBase is the base of the block of DCR registers
+* @param UDInum is the intended source APU register
+*
+* @return
+*
+* Contents of the specified APU register.
+*
+* @note
+*
+* C-style signature:
+* u32 XIo_mDcrReadAPUUDIReg(u32 DcrRegister, u32 UDInum)
+*
+* Since reading an APU UDI DCR requires a dummy write to the same DCR,
+* the target UDI number is required. In order to make this operation atomic,
+* interrupts are disabled before and enabled after the DCR accesses.
+* Because an APU UDI access involves two DCR accesses, the DCR bus must be
+* locked to ensure that another master doesn't access the APU UDI register
+* at the same time.
+* Care must be taken to not write a '1' to either timeout bit because
+* it will be cleared.
+* Steps:
+* - save old MSR
+* - disable interrupts by writing mask to MSR
+* - acquire lock; since the PPC440 supports timeout wait, it will wait until
+* it successfully acquires the DCR bus lock
+* - shift and mask the UDI number to its bit position of [22:25]
+* - add the DCR base address to the UDI number and perform the read
+* - release DCR bus lock
+* - restore MSR
+* - return value read
+*
+*******************************************************************************/
+#define XIo_mDcrReadAPUUDIReg(DcrBase, UDInum) \
+({ \
+ unsigned int rVal; \
+ unsigned int oldMSR = mfmsr(); \
+ mtmsr(oldMSR & XDCR_DISABLE_EXCEPTIONS); \
+ XIo_DcrLock((DcrBase)); \
+ mtdcr((DcrBase) | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); \
+ rVal = mfdcr((DcrBase) | XDCR_APU_UDI); \
+ XIo_DcrUnlock((DcrBase)); \
+ mtmsr(oldMSR); \
+ rVal; \
+})
+
+/******************************************************************************/
+/**
+* Writes the data to the APU UDI register at the specified APU address.
+*
+*
+* @param DcrBase is the base of the block of DCR registers
+* @param UDInum is the intended source APU register
+* @param Data is the value to be placed into the specified APU register
+*
+* @return
+*
+* None
+*
+* @note
+*
+* C-style signature:
+* void XIo_mDcrWriteAPUUDIReg(u32 DcrRegister, u32 UDInum, u32 Data)
+*
+* Since writing an APU UDI DCR requires a dummy write to the same DCR,
+* the target UDI number is required. In order to make this operation atomic,
+* interrupts are disabled before and enabled after the DCR accesses.
+* Because an APU UDI access involves two DCR accesses, the DCR bus must be
+* locked to ensure that another master doesn't access the APU UDI register
+* at the same time.
+* Care must be taken to not write a '1' to either timeout bit because
+* it will be cleared.
+* Steps:
+* - save old MSR
+* - disable interrupts by writing mask to MSR
+* - acquire lock, since the PPC440 supports timeout wait, it will wait until
+* it successfully acquires the DCR bus lock
+* - shift and mask the UDI number to its bit position of [22:25]
+* - add DCR base address to UDI number offset and perform the write
+* - release DCR bus lock
+* - restore MSR
+*
+*******************************************************************************/
+#define XIo_mDcrWriteAPUUDIReg(DcrBase, UDInum, Data) \
+({ \
+ unsigned int oldMSR = mfmsr(); \
+ mtmsr(oldMSR & XDCR_DISABLE_EXCEPTIONS); \
+ XIo_DcrLock((DcrBase)); \
+ mtdcr((DcrBase) | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); \
+ mtdcr((DcrBase) | XDCR_APU_UDI, (Data)); \
+ XIo_DcrUnlock((DcrBase)); \
+ mtmsr(oldMSR); \
+})
+
+/******************************************************************************/
+/**
+* Reads the register at the specified DCR address using the indirect addressing
+* method.
+*
+*
+* @param DcrBase is the base of the block of DCR registers
+* @param DcrRegister is the intended source DCR register
+*
+* @return
+*
+* Contents of the specified DCR register.
+*
+* @note
+*
+* C-style signature:
+* void XIo_mDcrIndirectAddrReadReg(u32 DcrBase, u32 DcrRegister)
+*
+* Assumes auto-buslocking feature is ON.
+* In order to make this operation atomic, interrupts are disabled before
+* and enabled after the DCR accesses.
+*
+*******************************************************************************/
+#define XIo_mDcrIndirectAddrReadReg(DcrBase, DcrRegister) \
+({ \
+ unsigned int rVal; \
+ unsigned int oldMSR = mfmsr(); \
+ mtmsr(oldMSR & XDCR_DISABLE_EXCEPTIONS); \
+ XIo_mDcrWriteReg((DcrBase) | XDCR_IDA_ADDR, (DcrBase) | DcrRegister); \
+ rVal = XIo_mDcrReadReg((DcrBase) | XDCR_IDA_ACC); \
+ mtmsr(oldMSR); \
+ rVal; \
+})
+
+/******************************************************************************/
+/**
+* Writes the register at specified DCR address using the indirect addressing
+* method.
+*
+*
+* @param DcrBase is the base of the block of DCR registers
+* @param DcrRegister is the intended destination DCR register
+* @param Data is the value to be placed into the specified DRC register
+*
+* @return
+*
+* None
+*
+* @note
+*
+* C-style signature:
+* void XIo_mDcrIndirectAddrWriteReg(u32 DcrBase, u32 DcrRegister,
+* u32 Data)
+*
+* Assumes auto-buslocking feature is ON.
+* In order to make this operation atomic, interrupts are disabled before
+* and enabled after the DCR accesses.
+*
+*******************************************************************************/
+#define XIo_mDcrIndirectAddrWriteReg(DcrBase, DcrRegister, Data) \
+({ \
+ unsigned int oldMSR = mfmsr(); \
+ mtmsr(oldMSR & XDCR_DISABLE_EXCEPTIONS); \
+ XIo_mDcrWriteReg((DcrBase) | XDCR_IDA_ADDR, (DcrBase) | DcrRegister); \
+ XIo_mDcrWriteReg((DcrBase) | XDCR_IDA_ACC, Data); \
+ mtmsr(oldMSR); \
+})
+
+/******************************************************************************/
+/**
+* Reads the APU UDI register at the specified DCR address using the indirect
+* addressing method.
+*
+*
+* @param DcrBase is the base of the block of DCR registers
+* @param UDInum is the intended source DCR register
+*
+* @return
+*
+* Contents of the specified APU register.
+*
+* @note
+*
+* C-style signature:
+* void XIo_mDcrIndirectAddrReadAPUUDIReg(u32 DcrBase, u32 UDInum)
+*
+* An indirect APU UDI read requires three DCR accesses:
+* 1) Indirect address reg write
+* 2) Indirect access reg write to specify the UDI number
+* 3) Indirect access reg read of the actual data
+* Since (2) unlocks the DCR bus, the DCR bus must be explicitly locked
+* instead of relying on the auto-lock feature.
+* In order to make this operation atomic, interrupts are disabled before
+* and enabled after the DCR accesses.
+* Care must be taken to not write a '1' to either timeout bit because
+* it will be cleared.
+*
+*******************************************************************************/
+#define XIo_mDcrIndirectAddrReadAPUUDIReg(DcrBase, UDInum) \
+({ \
+ unsigned int rVal; \
+ unsigned int oldMSR = mfmsr(); \
+ mtmsr(oldMSR & XDCR_DISABLE_EXCEPTIONS); \
+ XIo_DcrLock((DcrBase)); \
+ XIo_mDcrWriteReg((DcrBase) | XDCR_IDA_ADDR, (DcrBase) | XDCR_APU_UDI); \
+ XIo_mDcrWriteReg((DcrBase) | XDCR_IDA_ACC, ((UDInum << 6) & 0x000003c0) | 0x00000030); \
+ rVal = XIo_mDcrReadReg((DcrBase) | XDCR_IDA_ACC); \
+ XIo_DcrUnlock((DcrBase)); \
+ mtmsr(oldMSR); \
+ rVal; \
+})
+
+/******************************************************************************/
+/**
+* Writes the APU UDI register at specified DCR address using the indirect
+* addressing method.
+*
+*
+* @param DcrBase is the base of the block of DCR registers
+* @param UDInum is the intended source DCR register
+* @param Data is the value to be placed into the specified DRC register
+*
+* @return
+*
+* None
+*
+* @note
+*
+* C-style signature:
+* void XIo_mDcrIndirectAddrWriteReg(u32 DcrBase, u32 UDInum, u32 Data)
+*
+* An indirect APU UDI write requires three DCR accesses:
+* 1) Indirect address reg write
+* 2) Indirect access reg write to specify the UDI number
+* 3) Indirect access reg write of the actual data
+* Since (2) unlocks the DCR bus, the DCR bus must be explicitly locked
+* instead of relying on the auto-lock feature.
+* In order to make this operation atomic, interrupts are disabled before
+* and enabled after the DCR accesses.
+* Care must be taken to not write a '1' to either timeout bit because
+* it will be cleared.
+*
+*******************************************************************************/
+#define XIo_mDcrIndirectAddrWriteAPUUDIReg(DcrBase, UDInum, Data) \
+({ \
+ unsigned int oldMSR = mfmsr(); \
+ mtmsr(oldMSR & XDCR_DISABLE_EXCEPTIONS); \
+ XIo_DcrLock((DcrBase)); \
+ XIo_mDcrWriteReg((DcrBase) | XDCR_IDA_ADDR, (DcrBase) | XDCR_APU_UDI); \
+ XIo_mDcrWriteReg((DcrBase) | XDCR_IDA_ACC, ((UDInum << 6) & 0x000003c0) | 0x00000030); \
+ XIo_mDcrWriteReg((DcrBase) | XDCR_IDA_ACC, Data);\
+ XIo_DcrUnlock((DcrBase)); \
+ mtmsr(oldMSR); \
+})
+
+/************************** Function Prototypes ******************************/
+void XIo_DcrOut(u32 DcrRegister, u32 Data);
+u32 XIo_DcrIn(u32 DcrRegister);
+
+u32 XIo_DcrReadReg(u32 DcrBase, u32 DcrRegister);
+void XIo_DcrWriteReg(u32 DcrBase, u32 DcrRegister, u32 Data);
+u32 XIo_DcrLockAndReadReg(u32 DcrBase, u32 DcrRegister);
+void XIo_DcrLockAndWriteReg(u32 DcrBase, u32 DcrRegister, u32 Data);
+
+void XIo_DcrWriteAPUUDIReg(u32 DcrBase, short UDInum, u32 Data);
+u32 XIo_DcrReadAPUUDIReg(u32 DcrBase, short UDInum);
+
+void XIo_DcrLock(u32 DcrBase);
+void XIo_DcrUnlock(u32 DcrBase);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xipif_v1_23_b.c,v 1.3 2004/11/15 20:31:35 xduan Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2004 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xipif_v1_23_b.c
+*
+* This file contains the implementation of the XIpIf component. The
+* XIpIf component encapsulates the IPIF, which is the standard interface
+* that IP must adhere to when connecting to a bus. The purpose of this
+* component is to encapsulate the IPIF processing such that maintainability
+* is increased. This component does not provide a lot of abstraction from
+* from the details of the IPIF as it is considered a building block for
+* device drivers. A device driver designer must be familiar with the
+* details of the IPIF hardware to use this component.
+*
+* The IPIF hardware provides a building block for all hardware devices such
+* that each device does not need to reimplement these building blocks. The
+* IPIF contains other building blocks, such as FIFOs and DMA channels, which
+* are also common to many devices. These blocks are implemented as separate
+* hardware blocks and instantiated within the IPIF. The primary hardware of
+* the IPIF which is implemented by this software component is the interrupt
+* architecture. Since there are many blocks of a device which may generate
+* interrupts, all the interrupt processing is contained in the common part
+* of the device, the IPIF. This interrupt processing is for the device level
+* only and does not include any processing for the interrupt controller.
+*
+* A device is a mechanism such as an Ethernet MAC. The device is made
+* up of several parts which include an IPIF and the IP. The IPIF contains most
+* of the device infrastructure which is common to all devices, such as
+* interrupt processing, DMA channels, and FIFOs. The infrastructure may also
+* be referred to as IPIF internal blocks since they are part of the IPIF and
+* are separate blocks that can be selected based upon the needs of the device.
+* The IP of the device is the logic that is unique to the device and interfaces
+* to the IPIF of the device.
+*
+* In general, there are two levels of registers within the IPIF. The first
+* level, referred to as the device level, contains registers which are for the
+* entire device. The second level, referred to as the IP level, contains
+* registers which are specific to the IP of the device. The two levels of
+* registers are designed to be hierarchical such that the device level is
+* is a more general register set above the more specific registers of the IP.
+* The IP level of registers provides functionality which is typically common
+* across all devices and allows IP designers to focus on the unique aspects
+* of the IP.
+*
+* The interrupt registers of the IPIF are parameterizable such that the only
+* the number of bits necessary for the device are implemented. The functions
+* of this component do not attempt to validate that the passed in arguments are
+* valid based upon the number of implemented bits. This is necessary to
+* maintain the level of performance required for the common components. Bits
+* of the registers are assigned starting at the least significant bit of the
+* registers.
+*
+* <b>Critical Sections</b>
+*
+* It is the responsibility of the device driver designer to use critical
+* sections as necessary when calling functions of the IPIF. This component
+* does not use critical sections and it does access registers using
+* read-modify-write operations. Calls to IPIF functions from a main thread
+* and from an interrupt context could produce unpredictable behavior such that
+* the caller must provide the appropriate critical sections.
+*
+* <b>Mutual Exclusion</b>
+*
+* The functions of the IPIF are not thread safe such that the caller of all
+* functions is responsible for ensuring mutual exclusion for an IPIF. Mutual
+* exclusion across multiple IPIF components is not necessary.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.23b jhl 02/27/01 Repartioned to reduce size
+* 1.23b rpm 08/17/04 Doxygenated for inclusion in API documentation
+* 1.23b xd 10/27/04 Improve Doxygen format
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xipif_v1_23_b.h"
+#include "xio.h"
+
+/************************** Constant Definitions *****************************/
+
+/* the following constant is used to generate bit masks for register testing
+ * in the self test functions, it defines the starting bit mask that is to be
+ * shifted from the LSB to MSB in creating a register test mask
+ */
+#define XIIF_V123B_FIRST_BIT_MASK 1UL
+
+
+/* the following constant defines the maximum number of bits which may be
+ * used in the registers at the device and IP levels, this is based upon the
+ * number of bits available in the registers
+ */
+#define XIIF_V123B_MAX_REG_BIT_COUNT 32
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/************************** Function Prototypes ******************************/
+
+static int IpIntrSelfTest(u32 RegBaseAddress, u32 IpRegistersWidth);
+
+/*****************************************************************************/
+/**
+*
+* This function performs a self test on the specified IPIF component. Many
+* of the registers in the IPIF are tested to ensure proper operation. This
+* function is destructive because the IPIF is reset at the start of the test
+* and at the end of the test to ensure predictable results. The IPIF reset
+* also resets the entire device that uses the IPIF. This function exits with
+* all interrupts for the device disabled.
+*
+* @param RegBaseAddress is the base address of the device's IPIF registers
+*
+* @param IpRegistersWidth contains the number of bits in the IP interrupt
+* registers of the device. The hardware is parameterizable such that
+* only the number of bits necessary to support a device are implemented.
+* This value must be between 0 and 32 with 0 indicating there are no IP
+* interrupt registers used.
+*
+* @return
+*
+* A value of XST_SUCCESS indicates the test was successful with no errors.
+* Any one of the following error values may also be returned.
+* <br><br>
+* - XST_IPIF_RESET_REGISTER_ERROR The value of a register at reset was
+* not valid
+* <br><br>
+* - XST_IPIF_IP_STATUS_ERROR A write to the IP interrupt status
+* register did not read back correctly
+* <br><br>
+* - XST_IPIF_IP_ACK_ERROR One or more bits in the IP interrupt
+* status register did not reset when acked
+* <br><br>
+* - XST_IPIF_IP_ENABLE_ERROR The IP interrupt enable register
+* did not read back correctly based upon
+* what was written to it
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XIpIfV123b_SelfTest(u32 RegBaseAddress, u8 IpRegistersWidth)
+{
+ int Status;
+
+ /* assert to verify arguments are valid */
+
+ XASSERT_NONVOID(IpRegistersWidth <= XIIF_V123B_MAX_REG_BIT_COUNT);
+
+ /* reset the IPIF such that it's in a known state before the test
+ * and interrupts are globally disabled
+ */
+ XIIF_V123B_RESET(RegBaseAddress);
+
+ /* perform the self test on the IP interrupt registers, if
+ * it is not successful exit with the status
+ */
+ Status = IpIntrSelfTest(RegBaseAddress, IpRegistersWidth);
+ if (Status != XST_SUCCESS) {
+ return Status;
+ }
+
+ /* reset the IPIF such that it's in a known state before exiting test */
+
+ XIIF_V123B_RESET(RegBaseAddress);
+
+ /* reaching this point means there were no errors, return success */
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************
+*
+* Perform a self test on the IP interrupt registers of the IPIF. This
+* function modifies registers of the IPIF such that they are not guaranteed
+* to be in the same state when it returns. Any bits in the IP interrupt
+* status register which are set are assumed to be set by default after a reset
+* and are not tested in the test.
+*
+* @param RegBaseAddress is the base address of the device's IPIF registers
+*
+* @param IpRegistersWidth contains the number of bits in the IP interrupt
+* registers of the device. The hardware is parameterizable such that
+* only the number of bits necessary to support a device are implemented.
+* This value must be between 0 and 32 with 0 indicating there are no IP
+* interrupt registers used.
+*
+* @return
+*
+* A status indicating XST_SUCCESS if the test was successful. Otherwise, one
+* of the following values is returned.
+* - XST_IPIF_RESET_REGISTER_ERROR The value of a register at reset was
+* not valid
+* <br><br>
+* - XST_IPIF_IP_STATUS_ERROR A write to the IP interrupt status
+* register did not read back correctly
+* <br><br>
+* - XST_IPIF_IP_ACK_ERROR One or more bits in the IP status
+* register did not reset when acked
+* <br><br>
+* - XST_IPIF_IP_ENABLE_ERROR The IP interrupt enable register
+* did not read back correctly based upon
+* what was written to it
+* @note
+*
+* None.
+*
+******************************************************************************/
+static int IpIntrSelfTest(u32 RegBaseAddress, u32 IpRegistersWidth)
+{
+ /* ensure that the IP interrupt enable register is zero
+ * as it should be at reset, the interrupt status is dependent upon the
+ * IP such that it's reset value is not known
+ */
+ if (XIIF_V123B_READ_IIER(RegBaseAddress) != 0) {
+ return XST_IPIF_RESET_REGISTER_ERROR;
+ }
+
+ /* if there are any used IP interrupts, then test all of the interrupt
+ * bits in all testable registers
+ */
+ if (IpRegistersWidth > 0) {
+ u32 BitCount;
+ u32 IpInterruptMask = XIIF_V123B_FIRST_BIT_MASK;
+ u32 Mask = XIIF_V123B_FIRST_BIT_MASK; /* bits assigned MSB to LSB */
+ u32 InterruptStatus;
+
+ /* generate the register masks to be used for IP register tests, the
+ * number of bits supported by the hardware is parameterizable such
+ * that only that number of bits are implemented in the registers, the
+ * bits are allocated starting at the MSB of the registers
+ */
+ for (BitCount = 1; BitCount < IpRegistersWidth; BitCount++) {
+ Mask = Mask << 1;
+ IpInterruptMask |= Mask;
+ }
+
+ /* get the current IP interrupt status register contents, any bits
+ * already set must default to 1 at reset in the device and these
+ * bits can't be tested in the following test, remove these bits from
+ * the mask that was generated for the test
+ */
+ InterruptStatus = XIIF_V123B_READ_IISR(RegBaseAddress);
+ IpInterruptMask &= ~InterruptStatus;
+
+ /* set the bits in the device status register and verify them by reading
+ * the register again, all bits of the register are latched
+ */
+ XIIF_V123B_WRITE_IISR(RegBaseAddress, IpInterruptMask);
+ InterruptStatus = XIIF_V123B_READ_IISR(RegBaseAddress);
+ if ((InterruptStatus & IpInterruptMask) != IpInterruptMask)
+ {
+ return XST_IPIF_IP_STATUS_ERROR;
+ }
+
+ /* test to ensure that the bits set in the IP interrupt status register
+ * can be cleared by acknowledging them in the IP interrupt status
+ * register then read it again and verify it was cleared
+ */
+ XIIF_V123B_WRITE_IISR(RegBaseAddress, IpInterruptMask);
+ InterruptStatus = XIIF_V123B_READ_IISR(RegBaseAddress);
+ if ((InterruptStatus & IpInterruptMask) != 0) {
+ return XST_IPIF_IP_ACK_ERROR;
+ }
+
+ /* set the IP interrupt enable set register and then read the IP
+ * interrupt enable register and verify the interrupts were enabled
+ */
+ XIIF_V123B_WRITE_IIER(RegBaseAddress, IpInterruptMask);
+ if (XIIF_V123B_READ_IIER(RegBaseAddress) != IpInterruptMask) {
+ return XST_IPIF_IP_ENABLE_ERROR;
+ }
+
+ /* clear the IP interrupt enable register and then read the
+ * IP interrupt enable register and verify the interrupts were disabled
+ */
+ XIIF_V123B_WRITE_IIER(RegBaseAddress, 0);
+ if (XIIF_V123B_READ_IIER(RegBaseAddress) != 0) {
+ return XST_IPIF_IP_ENABLE_ERROR;
+ }
+ }
+ return XST_SUCCESS;
+}
--- /dev/null
+/* $Id: xipif_v1_23_b.h,v 1.5 2005/09/26 16:04:52 trujillo Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2004 Xilinx Inc.
+* All rights reserved.
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xipif_v1_23_b.h
+*
+* The XIpIf component encapsulates the IPIF, which is the standard interface
+* that IP must adhere to when connecting to a bus. The purpose of this
+* component is to encapsulate the IPIF processing such that maintainability
+* is increased. This component does not provide a lot of abstraction from
+* from the details of the IPIF as it is considered a building block for
+* device drivers. A device driver designer must be familiar with the
+* details of the IPIF hardware to use this component.
+*
+* The IPIF hardware provides a building block for all hardware devices such
+* that each device does not need to reimplement these building blocks. The
+* IPIF contains other building blocks, such as FIFOs and DMA channels, which
+* are also common to many devices. These blocks are implemented as separate
+* hardware blocks and instantiated within the IPIF. The primary hardware of
+* the IPIF which is implemented by this software component is the interrupt
+* architecture. Since there are many blocks of a device which may generate
+* interrupts, all the interrupt processing is contained in the common part
+* of the device, the IPIF. This interrupt processing is for the device level
+* only and does not include any processing for the interrupt controller.
+*
+* A device is a mechanism such as an Ethernet MAC. The device is made
+* up of several parts which include an IPIF and the IP. The IPIF contains most
+* of the device infrastructure which is common to all devices, such as
+* interrupt processing, DMA channels, and FIFOs. The infrastructure may also
+* be referred to as IPIF internal blocks since they are part of the IPIF and
+* are separate blocks that can be selected based upon the needs of the device.
+* The IP of the device is the logic that is unique to the device and interfaces
+* to the IPIF of the device.
+*
+* In general, there are two levels of registers within the IPIF. The first
+* level, referred to as the device level, contains registers which are for the
+* entire device. The second level, referred to as the IP level, contains
+* registers which are specific to the IP of the device. The two levels of
+* registers are designed to be hierarchical such that the device level is
+* is a more general register set above the more specific registers of the IP.
+* The IP level of registers provides functionality which is typically common
+* across all devices and allows IP designers to focus on the unique aspects
+* of the IP.
+*
+* <b>Critical Sections</b>
+*
+* It is the responsibility of the device driver designer to use critical
+* sections as necessary when calling functions of the IPIF. This component
+* does not use critical sections and it does access registers using
+* read-modify-write operations. Calls to IPIF functions from a main thread
+* and from an interrupt context could produce unpredictable behavior such that
+* the caller must provide the appropriate critical sections.
+*
+* <b>Mutual Exclusion</b>
+*
+* The functions of the IPIF are not thread safe such that the caller of all
+* functions is responsible for ensuring mutual exclusion for an IPIF. Mutual
+* exclusion across multiple IPIF components is not necessary.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- ---------------------------------------------------------
+* 1.23b jhl 02/27/01 Repartioned to minimize size
+* 1.23b rpm 07/16/04 Changed ifdef for circular inclusion to be more qualified
+* 1.23b rpm 08/17/04 Doxygenated for inclusion of API documentation
+* 1.23b xd 10/27/04 Improve Doxygen format
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XIPIF_V123B_H /* prevent circular inclusions */
+#define XIPIF_V123B_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/***************************** Include Files *********************************/
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xversion.h"
+
+/************************** Constant Definitions *****************************/
+
+/** @name Register Offsets
+ *
+ * The following constants define the register offsets for the registers of the
+ * IPIF, there are some holes in the memory map for reserved addresses to allow
+ * other registers to be added and still match the memory map of the interrupt
+ * controller registers
+ * @{
+ */
+#define XIIF_V123B_DISR_OFFSET 0UL /**< device interrupt status register */
+#define XIIF_V123B_DIPR_OFFSET 4UL /**< device interrupt pending register */
+#define XIIF_V123B_DIER_OFFSET 8UL /**< device interrupt enable register */
+#define XIIF_V123B_DIIR_OFFSET 24UL /**< device interrupt ID register */
+#define XIIF_V123B_DGIER_OFFSET 28UL /**< device global interrupt enable register */
+#define XIIF_V123B_IISR_OFFSET 32UL /**< IP interrupt status register */
+#define XIIF_V123B_IIER_OFFSET 40UL /**< IP interrupt enable register */
+#define XIIF_V123B_RESETR_OFFSET 64UL /**< reset register */
+/* @} */
+
+/**
+ * The value used for the reset register to reset the IPIF
+ */
+#define XIIF_V123B_RESET_MASK 0xAUL
+
+/**
+ * The following constant is used for the device global interrupt enable
+ * register, to enable all interrupts for the device, this is the only bit
+ * in the register
+ */
+#define XIIF_V123B_GINTR_ENABLE_MASK 0x80000000UL
+
+/**
+ * The mask to identify each internal IPIF error condition in the device
+ * registers of the IPIF. Interrupts are assigned in the register from LSB
+ * to the MSB
+ */
+#define XIIF_V123B_ERROR_MASK 1UL /**< LSB of the register */
+
+/** @name Interrupt IDs
+ *
+ * The interrupt IDs which identify each internal IPIF condition, this value
+ * must correlate with the mask constant for the error
+ * @{
+ */
+#define XIIF_V123B_ERROR_INTERRUPT_ID 0 /**< interrupt bit #, (LSB = 0) */
+#define XIIF_V123B_NO_INTERRUPT_ID 128 /**< no interrupts are pending */
+/* @} */
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/*****************************************************************************/
+/**
+*
+* Reset the IPIF component and hardware. This is a destructive operation that
+* could cause the loss of data since resetting the IPIF of a device also
+* resets the device using the IPIF and any blocks, such as FIFOs or DMA
+* channels, within the IPIF. All registers of the IPIF will contain their
+* reset value when this function returns.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+#define XIIF_V123B_RESET(RegBaseAddress) \
+ XIo_Out32(RegBaseAddress + XIIF_V123B_RESETR_OFFSET, XIIF_V123B_RESET_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This macro sets the device interrupt status register to the value.
+* This register indicates the status of interrupt sources for a device
+* which contains the IPIF. The status is independent of whether interrupts
+* are enabled and could be used for polling a device at a higher level rather
+* than a more detailed level.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* device which contains the IPIF. With the exception of some internal IPIF
+* conditions, the contents of this register are not latched but indicate
+* the live status of the interrupt sources within the device. Writing any of
+* the non-latched bits of the register will have no effect on the register.
+*
+* For the latched bits of this register only, setting a bit which is zero
+* within this register causes an interrupt to generated. The device global
+* interrupt enable register and the device interrupt enable register must be set
+* appropriately to allow an interrupt to be passed out of the device. The
+* interrupt is cleared by writing to this register with the bits to be
+* cleared set to a one and all others to zero. This register implements a
+* toggle on write functionality meaning any bits which are set in the value
+* written cause the bits in the register to change to the opposite state.
+*
+* This function writes the specified value to the register such that
+* some bits may be set and others cleared. It is the caller's responsibility
+* to get the value of the register prior to setting the value to prevent a
+* destructive behavior.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @param Status contains the value to be written to the interrupt status
+* register of the device. The only bits which can be written are
+* the latched bits which contain the internal IPIF conditions. The
+* following values may be used to set the status register or clear an
+* interrupt condition.
+* - XIIF_V123B_ERROR_MASK Indicates a device error in the IPIF
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+#define XIIF_V123B_WRITE_DISR(RegBaseAddress, Status) \
+ XIo_Out32((RegBaseAddress) + XIIF_V123B_DISR_OFFSET, (Status))
+
+/*****************************************************************************/
+/**
+*
+* This macro gets the device interrupt status register contents.
+* This register indicates the status of interrupt sources for a device
+* which contains the IPIF. The status is independent of whether interrupts
+* are enabled and could be used for polling a device at a higher level.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* device which contains the IPIF. With the exception of some internal IPIF
+* conditions, the contents of this register are not latched but indicate
+* the live status of the interrupt sources within the device.
+*
+* For only the latched bits of this register, the interrupt may be cleared by
+* writing to these bits in the status register.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* A status which contains the value read from the interrupt status register of
+* the device. The bit definitions are specific to the device with
+* the exception of the latched internal IPIF condition bits. The following
+* values may be used to detect internal IPIF conditions in the status.
+* <br><br>
+* - XIIF_V123B_ERROR_MASK Indicates a device error in the IPIF
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XIIF_V123B_READ_DISR(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XIIF_V123B_DISR_OFFSET)
+
+/*****************************************************************************/
+/**
+*
+* This function sets the device interrupt enable register contents.
+* This register controls which interrupt sources of the device are allowed to
+* generate an interrupt. The device global interrupt enable register must also
+* be set appropriately for an interrupt to be passed out of the device.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* device which contains the IPIF. Setting a bit in this register enables that
+* interrupt source to generate an interrupt. Clearing a bit in this register
+* disables interrupt generation for that interrupt source.
+*
+* This function writes only the specified value to the register such that
+* some interrupts source may be enabled and others disabled. It is the
+* caller's responsibility to get the value of the interrupt enable register
+* prior to setting the value to prevent an destructive behavior.
+*
+* An interrupt source may not be enabled to generate an interrupt, but can
+* still be polled in the interrupt status register.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @param
+*
+* Enable contains the value to be written to the interrupt enable register
+* of the device. The bit definitions are specific to the device with
+* the exception of the internal IPIF conditions. The following
+* values may be used to enable the internal IPIF conditions interrupts.
+* - XIIF_V123B_ERROR_MASK Indicates a device error in the IPIF
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* Signature: u32 XIIF_V123B_WRITE_DIER(u32 RegBaseAddress,
+* u32 Enable)
+*
+******************************************************************************/
+#define XIIF_V123B_WRITE_DIER(RegBaseAddress, Enable) \
+ XIo_Out32((RegBaseAddress) + XIIF_V123B_DIER_OFFSET, (Enable))
+
+/*****************************************************************************/
+/**
+*
+* This function gets the device interrupt enable register contents.
+* This register controls which interrupt sources of the device
+* are allowed to generate an interrupt. The device global interrupt enable
+* register and the device interrupt enable register must also be set
+* appropriately for an interrupt to be passed out of the device.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* device which contains the IPIF. Setting a bit in this register enables that
+* interrupt source to generate an interrupt if the global enable is set
+* appropriately. Clearing a bit in this register disables interrupt generation
+* for that interrupt source regardless of the global interrupt enable.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* The value read from the interrupt enable register of the device. The bit
+* definitions are specific to the device with the exception of the internal
+* IPIF conditions. The following values may be used to determine from the
+* value if the internal IPIF conditions interrupts are enabled.
+* <br><br>
+* - XIIF_V123B_ERROR_MASK Indicates a device error in the IPIF
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XIIF_V123B_READ_DIER(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XIIF_V123B_DIER_OFFSET)
+
+/*****************************************************************************/
+/**
+*
+* This function gets the device interrupt pending register contents.
+* This register indicates the pending interrupt sources, those that are waiting
+* to be serviced by the software, for a device which contains the IPIF.
+* An interrupt must be enabled in the interrupt enable register of the IPIF to
+* be pending.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* the device which contains the IPIF. With the exception of some internal IPIF
+* conditions, the contents of this register are not latched since the condition
+* is latched in the IP interrupt status register, by an internal block of the
+* IPIF such as a FIFO or DMA channel, or by the IP of the device. This register
+* is read only and is not latched, but it is necessary to acknowledge (clear)
+* the interrupt condition by performing the appropriate processing for the IP
+* or block within the IPIF.
+*
+* This register can be thought of as the contents of the interrupt status
+* register ANDed with the contents of the interrupt enable register.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* The value read from the interrupt pending register of the device. The bit
+* definitions are specific to the device with the exception of the latched
+* internal IPIF condition bits. The following values may be used to detect
+* internal IPIF conditions in the value.
+* <br><br>
+* - XIIF_V123B_ERROR_MASK Indicates a device error in the IPIF
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XIIF_V123B_READ_DIPR(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XIIF_V123B_DIPR_OFFSET)
+
+/*****************************************************************************/
+/**
+*
+* This macro gets the device interrupt ID for the highest priority interrupt
+* which is pending from the interrupt ID register. This function provides
+* priority resolution such that faster interrupt processing is possible.
+* Without priority resolution, it is necessary for the software to read the
+* interrupt pending register and then check each interrupt source to determine
+* if an interrupt is pending. Priority resolution becomes more important as the
+* number of interrupt sources becomes larger.
+*
+* Interrupt priorities are based upon the bit position of the interrupt in the
+* interrupt pending register with bit 0 being the highest priority. The
+* interrupt ID is the priority of the interrupt, 0 - 31, with 0 being the
+* highest priority. The interrupt ID register is live rather than latched such
+* that multiple calls to this function may not yield the same results. A
+* special value, outside of the interrupt priority range of 0 - 31, is
+* contained in the register which indicates that no interrupt is pending. This
+* may be useful for allowing software to continue processing interrupts in a
+* loop until there are no longer any interrupts pending.
+*
+* The interrupt ID is designed to allow a function pointer table to be used
+* in the software such that the interrupt ID is used as an index into that
+* table. The function pointer table could contain an instance pointer, such
+* as to DMA channel, and a function pointer to the function which handles
+* that interrupt. This design requires the interrupt processing of the device
+* driver to be partitioned into smaller more granular pieces based upon
+* hardware used by the device, such as DMA channels and FIFOs.
+*
+* It is not mandatory that this function be used by the device driver software.
+* It may choose to read the pending register and resolve the pending interrupt
+* priorities on it's own.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* An interrupt ID, 0 - 31, which identifies the highest priority interrupt
+* which is pending. A value of XIIF_NO_INTERRUPT_ID indicates that there is
+* no interrupt pending. The following values may be used to identify the
+* interrupt ID for the internal IPIF interrupts.
+* <br><br>
+* - XIIF_V123B_ERROR_INTERRUPT_ID Indicates a device error in the IPIF
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XIIF_V123B_READ_DIIR(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XIIF_V123B_DIIR_OFFSET)
+
+/*****************************************************************************/
+/**
+*
+* This function disables all interrupts for the device by writing to the global
+* interrupt enable register. This register provides the ability to disable
+* interrupts without any modifications to the interrupt enable register such
+* that it is minimal effort to restore the interrupts to the previous enabled
+* state. The corresponding function, XIpIf_GlobalIntrEnable, is provided to
+* restore the interrupts to the previous enabled state. This function is
+* designed to be used in critical sections of device drivers such that it is
+* not necessary to disable other device interrupts.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XIIF_V123B_GINTR_DISABLE(RegBaseAddress) \
+ XIo_Out32((RegBaseAddress) + XIIF_V123B_DGIER_OFFSET, 0)
+
+/*****************************************************************************/
+/**
+*
+* This function writes to the global interrupt enable register to enable
+* interrupts from the device. This register provides the ability to enable
+* interrupts without any modifications to the interrupt enable register such
+* that it is minimal effort to restore the interrupts to the previous enabled
+* state. This function does not enable individual interrupts as the interrupt
+* enable register must be set appropriately. This function is designed to be
+* used in critical sections of device drivers such that it is not necessary to
+* disable other device interrupts.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XIIF_V123B_GINTR_ENABLE(RegBaseAddress) \
+ XIo_Out32((RegBaseAddress) + XIIF_V123B_DGIER_OFFSET, \
+ XIIF_V123B_GINTR_ENABLE_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This function determines if interrupts are enabled at the global level by
+* reading the global interrupt register. This register provides the ability to
+* disable interrupts without any modifications to the interrupt enable register
+* such that it is minimal effort to restore the interrupts to the previous
+* enabled state.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* TRUE if interrupts are enabled for the IPIF, FALSE otherwise.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XIIF_V123B_IS_GINTR_ENABLED(RegBaseAddress) \
+ (XIo_In32((RegBaseAddress) + XIIF_V123B_DGIER_OFFSET) == \
+ XIIF_V123B_GINTR_ENABLE_MASK)
+
+/*****************************************************************************/
+/**
+*
+* This function sets the IP interrupt status register to the specified value.
+* This register indicates the status of interrupt sources for the IP of the
+* device. The IP is defined as the part of the device that connects to the
+* IPIF. The status is independent of whether interrupts are enabled such that
+* the status register may also be polled when interrupts are not enabled.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* IP. All bits of this register are latched. Setting a bit which is zero
+* within this register causes an interrupt to be generated. The device global
+* interrupt enable register and the device interrupt enable register must be set
+* appropriately to allow an interrupt to be passed out of the device. The
+* interrupt is cleared by writing to this register with the bits to be
+* cleared set to a one and all others to zero. This register implements a
+* toggle on write functionality meaning any bits which are set in the value
+* written cause the bits in the register to change to the opposite state.
+*
+* This function writes only the specified value to the register such that
+* some status bits may be set and others cleared. It is the caller's
+* responsibility to get the value of the register prior to setting the value
+* to prevent an destructive behavior.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @param Status contains the value to be written to the IP interrupt status
+* register. The bit definitions are specific to the device IP.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XIIF_V123B_WRITE_IISR(RegBaseAddress, Status) \
+ XIo_Out32((RegBaseAddress) + XIIF_V123B_IISR_OFFSET, (Status))
+
+/*****************************************************************************/
+/**
+*
+* This macro gets the contents of the IP interrupt status register.
+* This register indicates the status of interrupt sources for the IP of the
+* device. The IP is defined as the part of the device that connects to the
+* IPIF. The status is independent of whether interrupts are enabled such
+* that the status register may also be polled when interrupts are not enabled.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* device. All bits of this register are latched. Writing a 1 to a bit within
+* this register causes an interrupt to be generated if enabled in the interrupt
+* enable register and the global interrupt enable is set. Since the status is
+* latched, each status bit must be acknowledged in order for the bit in the
+* status register to be updated. Each bit can be acknowledged by writing a
+* 0 to the bit in the status register.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* A status which contains the value read from the IP interrupt status register.
+* The bit definitions are specific to the device IP.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XIIF_V123B_READ_IISR(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XIIF_V123B_IISR_OFFSET)
+
+/*****************************************************************************/
+/**
+*
+* This macro sets the IP interrupt enable register contents. This register
+* controls which interrupt sources of the IP are allowed to generate an
+* interrupt. The global interrupt enable register and the device interrupt
+* enable register must also be set appropriately for an interrupt to be
+* passed out of the device containing the IPIF and the IP.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* IP. Setting a bit in this register enables the interrupt source to generate
+* an interrupt. Clearing a bit in this register disables interrupt generation
+* for that interrupt source.
+*
+* This function writes only the specified value to the register such that
+* some interrupt sources may be enabled and others disabled. It is the
+* caller's responsibility to get the value of the interrupt enable register
+* prior to setting the value to prevent an destructive behavior.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @param Enable contains the value to be written to the IP interrupt enable
+* register. The bit definitions are specific to the device IP.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+#define XIIF_V123B_WRITE_IIER(RegBaseAddress, Enable) \
+ XIo_Out32((RegBaseAddress) + XIIF_V123B_IIER_OFFSET, (Enable))
+
+/*****************************************************************************/
+/**
+*
+* This macro gets the IP interrupt enable register contents. This register
+* controls which interrupt sources of the IP are allowed to generate an
+* interrupt. The global interrupt enable register and the device interrupt
+* enable register must also be set appropriately for an interrupt to be
+* passed out of the device containing the IPIF and the IP.
+*
+* Each bit of the register correlates to a specific interrupt source within the
+* IP. Setting a bit in this register enables the interrupt source to generate
+* an interrupt. Clearing a bit in this register disables interrupt generation
+* for that interrupt source.
+*
+* @param RegBaseAddress contains the base address of the IPIF registers.
+*
+* @return
+*
+* The contents read from the IP interrupt enable register. The bit definitions
+* are specific to the device IP.
+*
+* @note
+*
+* Signature: u32 XIIF_V123B_READ_IIER(u32 RegBaseAddress)
+*
+******************************************************************************/
+#define XIIF_V123B_READ_IIER(RegBaseAddress) \
+ XIo_In32((RegBaseAddress) + XIIF_V123B_IIER_OFFSET)
+
+/************************** Function Prototypes ******************************/
+
+/**
+ * Initialization Functions
+ */
+int XIpIfV123b_SelfTest(u32 RegBaseAddress, u8 IpRegistersWidth);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xlldma.c
+*
+* This file implements initialization and control related functions. For more
+* information on this driver, see xlldma.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a xd 12/21/06 First release
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include <linux/string.h>
+
+#include "xlldma.h"
+#include "xenv.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+ * This function initializes a DMA engine. This function must be called
+ * prior to using a DMA engine. Initialization of a engine includes setting
+ * up the register base address, setting up the instance data, and ensuring the
+ * hardware is in a quiescent state.
+ *
+ * @param InstancePtr is a pointer to the DMA engine instance to be worked on.
+ * @param BaseAddress is where the registers for this engine can be found.
+ * If address translation is being used, then this parameter must
+ * reflect the virtual base address.
+ * @return None.
+ *
+ *****************************************************************************/
+void XLlDma_Initialize(XLlDma * InstancePtr, u32 BaseAddress)
+{
+ /* Setup the instance */
+ memset(InstancePtr, 0, sizeof(XLlDma));
+ InstancePtr->RegBase = BaseAddress;
+
+ /* Initialize the ring structures */
+ InstancePtr->TxBdRing.RunState = XST_DMA_SG_IS_STOPPED;
+ InstancePtr->TxBdRing.ChanBase = BaseAddress + XLLDMA_TX_OFFSET;
+ InstancePtr->TxBdRing.IsRxChannel = 0;
+ InstancePtr->RxBdRing.RunState = XST_DMA_SG_IS_STOPPED;
+ InstancePtr->RxBdRing.ChanBase = BaseAddress + XLLDMA_RX_OFFSET;
+ InstancePtr->RxBdRing.IsRxChannel = 1;
+
+ /* Reset the device and return */
+ XLlDma_Reset(InstancePtr);
+}
+
+/*****************************************************************************/
+/**
+* Reset both TX and RX channels of a DMA engine.
+*
+* Any DMA transaction in progress aborts immediately. The DMA engine is in
+* stop state after the reset.
+*
+* @param InstancePtr is a pointer to the DMA engine instance to be worked on.
+*
+* @return None.
+*
+* @note
+* - If the hardware is not working properly, this function will enter
+* infinite loop and never return.
+* - After the reset, the Normal mode is enabled, and the overflow error
+* for both TX/RX channels are disabled.
+* - After the reset, the DMA engine is no longer in pausing state, if
+* the DMA engine is paused before the reset operation.
+* - After the reset, the coalescing count value and the delay timeout
+* value are both set to 1 for TX and RX channels.
+* - After the reset, all interrupts are disabled.
+*
+******************************************************************************/
+void XLlDma_Reset(XLlDma * InstancePtr)
+{
+ u32 IrqStatus;
+ XLlDma_BdRing *TxRingPtr, *RxRingPtr;
+
+ TxRingPtr = &XLlDma_mGetTxRing(InstancePtr);
+ RxRingPtr = &XLlDma_mGetRxRing(InstancePtr);
+
+ /* Save the locations of current BDs both rings are working on
+ * before the reset so later we can resume the rings smoothly.
+ */
+ XLlDma_mBdRingSnapShotCurrBd(TxRingPtr);
+ XLlDma_mBdRingSnapShotCurrBd(RxRingPtr);
+
+ /* Start reset process then wait for completion */
+ XLlDma_mSetCr(InstancePtr, XLLDMA_DMACR_SW_RESET_MASK);
+
+ /* Loop until the reset is done */
+ while ((XLlDma_mGetCr(InstancePtr) & XLLDMA_DMACR_SW_RESET_MASK)) {
+ }
+
+ /* Disable all interrupts after issue software reset */
+ XLlDma_mBdRingIntDisable(TxRingPtr, XLLDMA_CR_IRQ_ALL_EN_MASK);
+ XLlDma_mBdRingIntDisable(RxRingPtr, XLLDMA_CR_IRQ_ALL_EN_MASK);
+
+ /* Clear Interrupt registers of both channels, as the software reset
+ * does not clear any register values. Not doing so will cause
+ * interrupts asserted after the software reset if there is any
+ * interrupt left over before.
+ */
+ IrqStatus = XLlDma_mBdRingGetIrq(TxRingPtr);
+ XLlDma_mBdRingAckIrq(TxRingPtr, IrqStatus);
+ IrqStatus = XLlDma_mBdRingGetIrq(RxRingPtr);
+ XLlDma_mBdRingAckIrq(RxRingPtr, IrqStatus);
+
+ /* Enable Normal mode, and disable overflow errors for both channels */
+ XLlDma_mSetCr(InstancePtr, XLLDMA_DMACR_TAIL_PTR_EN_MASK |
+ XLLDMA_DMACR_RX_OVERFLOW_ERR_DIS_MASK |
+ XLLDMA_DMACR_TX_OVERFLOW_ERR_DIS_MASK);
+
+ /* Set TX/RX Channel coalescing setting */
+ XLlDma_BdRingSetCoalesce(TxRingPtr, 1, 1);
+ XLlDma_BdRingSetCoalesce(RxRingPtr, 1, 1);
+
+ TxRingPtr->RunState = XST_DMA_SG_IS_STOPPED;
+ RxRingPtr->RunState = XST_DMA_SG_IS_STOPPED;
+}
+
+/*****************************************************************************/
+/**
+* Pause DMA transactions on both channels. The DMA enters the pausing state
+* immediately. So if a DMA transaction is in progress, it will be left
+* unfinished and will be continued once the DMA engine is resumed
+* (see XLlDma_Resume()).
+*
+* @param InstancePtr is a pointer to the DMA engine instance to be worked on.
+*
+* @return None.
+*
+* @note
+* - If the hardware is not working properly, this function will enter
+* infinite loop and never return.
+* - After the DMA is paused, DMA channels still could accept more BDs
+* from software (see XLlDma_BdRingToHw()), but new BDs will not be
+* processed until the DMA is resumed (see XLlDma_Resume()).
+*
+*****************************************************************************/
+void XLlDma_Pause(XLlDma * InstancePtr)
+{
+ u32 RegValue;
+ XLlDma_BdRing *TxRingPtr, *RxRingPtr;
+
+ TxRingPtr = &XLlDma_mGetTxRing(InstancePtr);
+ RxRingPtr = &XLlDma_mGetRxRing(InstancePtr);
+
+ /* Do nothing if both channels already stopped */
+ if ((TxRingPtr->RunState == XST_DMA_SG_IS_STOPPED) &&
+ (RxRingPtr->RunState == XST_DMA_SG_IS_STOPPED)) {
+ return;
+ }
+
+ /* Enable pause bits for both TX/ RX channels */
+ RegValue = XLlDma_mGetCr(InstancePtr);
+ XLlDma_mSetCr(InstancePtr, RegValue | XLLDMA_DMACR_TX_PAUSE_MASK |
+ XLLDMA_DMACR_RX_PAUSE_MASK);
+
+ /* Loop until Write Command Queue of RX channel is empty, which
+ * indicates that all the write data associated with the pending
+ * commands has been flushed.*/
+ while (!(XLlDma_mBdRingGetIrq(RxRingPtr) | XLLDMA_IRQ_WRQ_EMPTY_MASK));
+
+ TxRingPtr->RunState = XST_DMA_SG_IS_STOPPED;
+ RxRingPtr->RunState = XST_DMA_SG_IS_STOPPED;
+}
+
+/*****************************************************************************/
+/**
+* Resume DMA transactions on both channels. Any interrupted DMA transaction
+* caused by DMA pause operation (see XLlDma_Pause()) and all committed
+* transactions after DMA is paused will be continued upon the return of this
+* function.
+*
+* @param InstancePtr is a pointer to the DMA engine instance to be worked on.
+*
+* @return None.
+*
+*****************************************************************************/
+void XLlDma_Resume(XLlDma * InstancePtr)
+{
+ u32 RegValue;
+ XLlDma_BdRing *TxRingPtr, *RxRingPtr;
+
+ TxRingPtr = &XLlDma_mGetTxRing(InstancePtr);
+ RxRingPtr = &XLlDma_mGetRxRing(InstancePtr);
+
+ /* Do nothing if both channels already started */
+ if ((TxRingPtr->RunState == XST_DMA_SG_IS_STARTED) &&
+ (RxRingPtr->RunState == XST_DMA_SG_IS_STARTED)) {
+ return;
+ }
+
+ /* Clear pause bits for both TX/ RX channels */
+ RegValue = XLlDma_mGetCr(InstancePtr);
+ XLlDma_mSetCr(InstancePtr, RegValue & ~(XLLDMA_DMACR_TX_PAUSE_MASK |
+ XLLDMA_DMACR_RX_PAUSE_MASK));
+
+ TxRingPtr->RunState = XST_DMA_SG_IS_STARTED;
+ RxRingPtr->RunState = XST_DMA_SG_IS_STARTED;
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xlldma.h
+*
+* The Xilinx Local-Link Scatter Gather DMA driver. This driver supports Soft
+* DMA (SDMA) engines. Each SDMA engine contains two separate DMA channels (TX
+* and RX).
+*
+* This component is designed to be used as a basic building block for
+* designing a device driver. It provides registers accesses such that all
+* DMA processing can be maintained easier, but the device driver designer
+* must still understand all the details of the DMA channel.
+*
+* For a full description of DMA features, please see the hardware spec. This
+* driver supports the following features:
+*
+* - Scatter-Gather DMA (SGDMA)
+* - Interrupts
+* - Programmable interrupt coalescing for SGDMA
+* - Capable of using 32 bit addressing for buffer. (Hardware spec states
+* 36 Bit bus addressing, which includes Msb 4-bits of DMA address
+* configurable on each channel through the Channel Control Registers)
+* - APIs to manage Buffer Descriptors (BD) movement to and from the SGDMA
+* engine
+* - Virtual memory support
+*
+* <b>Transactions</b>
+*
+* To describe a DMA transaction in its simplest form, you need source address,
+* destination address, and the number of bytes to transfer. When using a DMA
+* receive channel, the source address is within some piece of IP Hardware and
+* doesn't require the application explicitly set it. Likewise with a transmit
+* channel and the destination address. So this leaves a application buffer
+* address and the number bytes to transfer as the primary transaction
+* attributes. Other attributes include:
+*
+* - If the transaction occurs on a bus wider than 32 bits, what are the
+* highest order address bits.
+* - Does this transaction represent the start of a packet, or end of a
+* packet.
+*
+* The object used to describe a transaction is referred to as a Buffer
+* Descriptor (BD). The format of a BD closely matches that of the DMA hardware.
+* Many fields within the BD correspond directly with the same fields within the
+* hardware registers. See xlldmabd.h for a detailed description of and the API
+* for manipulation of these objects.
+*
+* <b>Scatter-Gather DMA</b>
+*
+* SGDMA allows the application to define a list of transactions in memory which
+* the hardware will process without further application intervention. During
+* this time, the application is free to continue adding more work to keep the
+* Hardware busy.
+*
+* Notification of completed transactions can be done either by polling the
+* hardware, or using interrupts that signal a transaction has completed or a
+* series of transactions have been completed.
+*
+* SGDMA processes whole packets. A packet is defined as a series of
+* data bytes that represent a message. SGDMA allows a packet of data to be
+* broken up into one or more transactions. For example, take an Ethernet IP
+* packet which consists of a 14 byte header followed by a 1 or more byte
+* payload. With SGDMA, the application may point a BD to the header and another
+* BD to the payload, then transfer them as a single message. This strategy can
+* make a TCP/IP stack more efficient by allowing it to keep packet headers and
+* data in different memory regions instead of assembling packets into
+* contiguous blocks of memory.
+*
+* <b>SGDMA Ring Management</b>
+*
+* The hardware expects BDs to be setup as a singly linked list. As a BD is
+* completed, the DMA engine will dereference BD.Next and load the next BD to
+* process. This driver uses a fixed buffer ring where all BDs are linked to the
+* next BD in adjacent memory. The last BD in the ring is linked to the first.
+*
+* Within the ring, the driver maintains four groups of BDs. Each group consists
+* of 0 or more adjacent BDs:
+*
+* - Free: Those BDs that can be allocated by the application with
+* XLlDma_BdRingAlloc(). These BDs are under driver control and may not be
+* modified by the application
+*
+* - Pre-process: Those BDs that have been allocated with
+* XLlDma_BdRingAlloc(). These BDs are under application control. The
+* application modifies these BDs in preparation for future DMA
+* transactions.
+*
+* - Hardware: Those BDs that have been enqueued to hardware with
+* XLlDma_BdRingToHw(). These BDs are under hardware control and may be in a
+* state of awaiting hardware processing, in process, or processed by
+* hardware. It is considered an error for the application to change BDs
+* while they are in this group. Doing so can cause data corruption and lead
+* to system instability.
+*
+* - Post-process: Those BDs that have been processed by hardware and have
+* been extracted from the work group with XLlDma_BdRingFromHw(). These BDs
+* are under application control. The application may access these BDs to
+* determine the result of DMA transactions. When the application is
+* finished, XLlDma_BdRingFree() should be called to place them back into
+* the Free group.
+*
+*
+* Normally BDs are moved in the following way:
+* <pre>
+*
+* XLlDma_BdRingAlloc() XLlDma_BdRingToHw()
+* Free ------------------------> Pre-process ----------------------> Hardware
+* |
+* /|\ |
+* | XLlDma_BdRingFree() XLlDma_BdRingFromHw() |
+* +--------------------------- Post-process <----------------------+
+*
+* </pre>
+*
+* The only exception to the flow above is that after BDs are moved from Free
+* group to Pre-process group, the application decide for whatever reason these
+* BDs are not ready and could not be given to hardware. In this case these BDs
+* could be moved back to Free group using XLlDma_BdRingUnAlloc() function to
+* help keep the BD ring in great shape and recover the error. See comments of
+* the function for details
+*
+* <pre>
+*
+* XLlDma_BdRingUnAlloc()
+* Free <----------------------- Pre-process
+*
+* </pre>
+*
+* The API provides macros that allow BD list traversal. These macros should be
+* used with care as they do not understand where one group ends and another
+* begins.
+*
+* The driver does not cache or keep copies of any BD. When the application
+* modifies BDs returned by XLlDma_BdRingAlloc() or XLlDma_BdRingFromHw(), they
+* are modifying the same BD that hardware accesses.
+*
+* Certain pairs of list modification functions have usage restrictions. See
+* the function headers for XLlDma_BdRingAlloc() and XLlDma_BdRingFromHw() for
+* more information.
+*
+* <b>SGDMA Descriptor Ring Creation</b>
+*
+* During initialization, the function XLlDma_BdRingCreate() is used to setup
+* a application supplied memory block to contain all BDs for the DMA channel.
+* This function takes as an argument the number of BDs to place in the list. To
+* arrive at this number, the application is given two methods of calculating
+* it.
+*
+* The first method assumes the application has a block of memory and they just
+* want to fit as many BDs as possible into it. The application must calculate
+* the number of BDs that will fit with XLlDma_mBdRingCntCalc(), then supply
+* that number into the list creation function.
+*
+* The second method allows the application to just supply the number directly.
+* The driver assumes the memory block is large enough to contain them all. To
+* double-check, the application should invoke XLlDma_mBdRingMemCalc() to verify
+* the memory block size is adequate.
+*
+* Once the list has been created, it can be used right away to perform DMA
+* transactions. However, there are optional steps that can be done to increase
+* throughput and decrease application code complexity by the use of
+* XLlDma_BdRingClone().
+*
+* BDs have several application accessible attributes that affect how DMA
+* transactions are carried out. Some of these attributes will probably be
+* constant at run-time. The cloning function can be used to copy a template BD
+* to every BD in the ring relieving the application of having to setup
+* transactions from scratch every time a BD is submitted to hardware.
+*
+* Ideally, the only transaction parameters that need to be set by application
+* should be: buffer address, bytes to transfer, and whether the BD is the
+* Start and/or End of a packet.
+*
+* <b>Interrupt Coalescing</b>
+*
+* SGDMA provides control over the frequency of interrupts. On a high speed link
+* significant processor overhead may be used servicing interrupts. Interrupt
+* coalescing provides two mechanisms that help control interrupt frequency:
+*
+* - The packet threshold counter will hold off interrupting the CPU until a
+* programmable number of packets have been processed by the engine.
+* - The packet waitbound timer is used to interrupt the CPU if after a
+* programmable amount of time after processing the last packet, no new
+* packets were processed.
+*
+* <b>Interrupt Service </b>
+*
+* This driver does not service interrupts. This is done typically by a
+* interrupt handler within a higher level driver/application that uses DMA.
+* This driver does provide an API to enable or disable specific interrupts.
+*
+* This interrupt handler provided by the higher level driver/application
+* !!!MUST!!! clear pending interrupts before handling the BDs processed by the
+* DMA. Otherwise the following corner case could raise some issue:
+*
+* - A packet is transmitted(/received) and asserts a TX(/RX) interrupt, and if
+* this interrupt handler deals with the BDs finished by the DMA before clears
+* the interrupt, another packet could get transmitted(/received) and assert
+* the interrupt between when the BDs are taken care and when the interrupt
+* clearing operation begins, and the interrupt clearing operation will clear
+* the interrupt raised by the second packet and will never process its
+* according BDs until a new interrupt occurs.
+*
+* Changing the sequence to "Clear interrupts before handle BDs" solves this
+* issue:
+*
+* - If the interrupt raised by the second packet is before the interrupt
+* clearing operation, the descriptors associated with the second packet must
+* have been finished by hardware and ready for the handler to deal with,
+* and those descriptors will processed with those BDs of the first packet
+* during the handling of the interrupt asserted by the first packet.
+*
+* - if the interrupt of the second packet is asserted after the interrupt
+* clearing operation but its BDs are finished before the handler starts to
+* deal with BDs, the packet's buffer descriptors will be handled with
+* those of the first packet during the handling of the interrupt asserted
+* by the first packet.
+*
+* - Otherwise, the BDs of the second packet is not ready when the interrupt
+* handler starts to deal with the BDs of the first packet. Those BDs will
+* be handled next time the interrupt handled gets invoked as the interrupt
+* of the second packet is not cleared in current pass and thereby will
+* cause the handler to get invoked again
+*
+* Please note if the second case above occurs, the handler will find
+* NO buffer descriptor is finished by the hardware (i.e.,
+* XLlDma_BdRingFromHw() returns 0) during the handling of the interrupt
+* asserted by the second packet. This is valid and the application should NOT
+* consider this is a hardware error and have no need to reset the hardware.
+*
+* <b> Software Initialization </b>
+*
+* The application needs to do following steps in order for preparing DMA engine
+* to be ready to process DMA transactions:
+*
+* - DMA Initialization using XLlDma_Initialize() function. This step
+* initializes a driver instance for the given DMA engine and resets the
+* engine.
+* - BD Ring creation. A BD ring is needed per channel and can be built by
+* calling XLlDma_BdRingCreate(). A parameter passed to this function is the
+* number of BD fit in a given memory range, and XLlDma_mBdRingCntCalc() helps
+* calculate the value.
+* - (Optional) BD setup using a template. Once a BD ring is created, the
+* application could populate a template BD and then invoke
+* XLlDma_BdRingClone() to set the same attributes on all BDs on the BD ring.
+* This saves the application some effort to populate all fixed attributes of
+* each BD before passing it to the hardware.
+* - (RX channel only) Prepare BDs with attached data buffers and give them to
+* RX channel. First allocate BDs using XLlDma_BdRingAlloc(), then populate
+* data buffer address, data buffer size and the control word fields of each
+* allocated BD with valid values. Last call XLlDma_BdRingToHw() to give the
+* BDs to the channel.
+* - Enable interrupts if interrupt mode is chosen. The application is
+* responsible for setting up the interrupt system, which includes providing
+* and connecting interrupt handlers and call back functions, before
+* the interrupts are enabled.
+* - Start DMA channels: Call XLlDma_BdRingStart() to start a channel
+*
+* <b> How to start DMA transactions </b>
+*
+* RX channel is ready to start RX transactions once the initialization (see
+* Initialization section above) is finished. The DMA transactions are triggered
+* by the user IP (like Local Link TEMAC).
+*
+* Starting TX transactions needs some work. The application calls
+* XLlDma_BdRingAlloc() to allocate a BD list, then populates necessary
+* attributes of each allocated BD including data buffer address, data size,
+* and control word, and last passes those BDs to the TX channel
+* (see XLlDma_BdRingToHw()). The added BDs will be processed as soon as the
+* TX channel reaches them.
+*
+* For both channels, If the DMA engine is currently paused (see
+* XLlDma_Pause()), the newly added BDs will be accepted but not processed
+* until the DMA engine is resumed (see XLlDma_Resume()).
+*
+* <b> Software Post-Processing on completed DMA transactions </b>
+*
+* Some software post-processing is needed after DMA transactions are finished.
+*
+* if interrupt system are set up and enabled, DMA channels notify the software
+* the finishing of DMA transactions using interrupts, Otherwise the
+* application could poll the channels (see XLlDma_BdRingFromHw()).
+*
+* - Once BDs are finished by a channel, the application first needs to fetch
+* them from the channel (see XLlDma_BdRingFromHw()).
+* - On TX side, the application now could free the data buffers attached to
+* those BDs as the data in the buffers has been transmitted.
+* - On RX side, the application now could use the received data in the buffers
+* attached to those BDs
+* - For both channels, those BDs need to be freed back to the Free group (see
+* XLlDma_BdRingFree()) so they are allocatable for future transactions.
+* - On RX side, it is the application's responsibility for having BDs ready
+* to receive data at any time. Otherwise the RX channel will refuse to
+* accept any data once it runs out of RX BDs. As we just freed those hardware
+* completed BDs in the previous step, it is good timing to allocate them
+* back (see XLlDma_BdRingAlloc()), prepare them, and feed them to the RX
+* channel again (see XLlDma_BdRingToHw())
+*
+* <b> Examples </b>
+*
+* Two examples are provided with this driver to demonstrate the driver usage:
+* One for interrupt mode and one for polling mode.
+*
+* <b>Address Translation</b>
+*
+* When the BD list is setup with XLlDma_BdRingCreate(), a physical and
+* virtual address is supplied for the segment of memory containing the
+* descriptors. The driver will handle any translations internally. Subsequent
+* access of descriptors by the application is done in terms of their virtual
+* address.
+*
+* Any application data buffer address attached to a BD must be physical
+* address. The application is responsible for calculating the physical address
+* before assigns it to the buffer address field in the BD.
+*
+* <b>Cache Coherency</b>
+*
+* This driver expects all application buffers attached to BDs to be in cache
+* coherent memory. Buffers for transmit MUST be flushed from the cache before
+* passing the associated BD to this driver. Buffers for receive MUST be
+* invalidated before passing the associated BD to this driver.
+*
+* <b>Alignment</b>
+*
+* For BDs:
+*
+* Minimum alignment is defined by the constant XLLDMA_BD_MINIMUM_ALIGNMENT.
+* This is the smallest alignment allowed by both hardware and software for them
+* to properly work. Other than XLLDMA_BD_MINIMUM_ALIGNMENT, multiples of the
+* constant are the only valid alignments for BDs.
+*
+* If the descriptor ring is to be placed in cached memory, alignment also MUST
+* be at least the processor's cache-line size. If this requirement is not met
+* then system instability will result. This is also true if the length of a BD
+* is longer than one cache-line, in which case multiple cache-lines are needed
+* to accommodate each BD.
+*
+* Aside from the initial creation of the descriptor ring (see
+* XLlDma_BdRingCreate()), there are no other run-time checks for proper
+* alignment.
+*
+* For application data buffers:
+*
+* Application data buffers may reside on any alignment.
+*
+* <b>Reset After Stopping</b>
+*
+* This driver is designed to allow for stop-reset-start cycles of the DMA
+* hardware while keeping the BD list intact. When restarted after a reset, this
+* driver will point the DMA engine to where it left off after stopping it.
+*
+* <b>Limitations</b>
+*
+* This driver only supports Normal mode (i.e., Tail Descriptor Pointer mode).
+* In this mode write of a Tail Descriptor Pointer register (which is done in
+* XLlDma_BdRingStart() and XLlDma_BdRingToHw()) starts DMA transactions.
+*
+* Legacy mode is NOT supported by this driver.
+*
+* This driver does not have any mechanism for mutual exclusion. It is up to the
+* application to provide this protection.
+*
+* <b>Hardware Defaults & Exclusive Use</b>
+*
+* During initialization, this driver will override the following hardware
+* default settings. If desired, the application may change these settings back
+* to their hardware defaults:
+*
+* - Normal mode (Tail Descriptor Pointer mode) will be enabled.
+* - Interrupt coalescing timer and counter overflow errors will be disabled
+* (XLLDMA_DMACR_RX_OVERFLOW_ERR_DIS_MASK and TX_OVERFLOW_ERR_DIS_MASK will
+* be set to 1). These two items control interrupt "overflow" behavior.
+* When enabled, the hardware may signal an error if interrupts are not
+* processed fast enough even though packets were correctly processed. This
+* error is triggered when certain internal counters overflow. The driver
+* disables this feature so no such error will be reported.
+*
+* The driver requires exclusive use of the following hardware features. If any
+* are changed by the application then the driver will not operate properly:
+*
+* - XLLDMA_DMACR_TAIL_PTR_ENABLE_MASK. The driver controls this bit
+* in the DMACR register.
+* - XLLDMA_BD_STSCTRL_COMPLETED_MASK. The driver controls this bit in each BD
+* - XLLDMA_NDESC_OFFSET. The driver controls this register
+* - XLLDMA_DMACR_SW_RESET_MASK. The driver controls this bit in the DMACR
+* register
+*
+* <b>BUS Interface</b>
+*
+* The constant CONFIG_XILINX_LLDMA_USE_DCR (see xlldma_hw.h) is used
+* to inform the driver the type of the BUS the DMA device is on. If
+* the DMA device is on DCR BUS, CONFIG_XILINX_LLDMA_USE_DCR must be
+* defined as a compiler option used in the Makefile BEFORE this driver
+* is compiled; Otherwise, the constant must NOT be defined.
+*
+* <b>User-IP Specific Definition</b>
+*
+* This driver relies on two User-IP (like Local-Link TEMAC) specific constants
+* (see xlldma_userip.h) to work properly:
+*
+* - XLLDMA_USR_APPWORD_OFFSET defines a user word the User-IP always updates
+* in the RX Buffer Descriptors (BD) during <b>ALL</b> Receive transactions.
+* This driver uses XLLDMA_BD_USR4_OFFSET as the default value of this
+* constant.
+*
+* - XLLDMA_USR_APPWORD_INITVALUE defines the value the DMA driver uses to
+* populate the XLLDMA_USR_APPWORD_OFFSET field in any RX BD before giving
+* the BD to the RX channel for receive transaction. It must be ensured
+* that the User-IP will always populates a different value into the
+* XLLDMA_USR_APPWORD_OFFSET field during any receive transaction. Failing
+* to do so will cause the DMA driver to work improperly. This driver uses
+* 0xFFFFFFFF as the default value of this constant.
+*
+* If the User-IP uses different setting, the correct setting must be defined as
+* compiler options used in the Makefile BEFORE this driver is compiled. In
+* either case the default definition of the constants in this driver will be
+* discarded.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a xd 12/21/06 First release
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XLLDMA_H /* prevent circular inclusions */
+#define XLLDMA_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xlldma_bd.h"
+#include "xlldma_bdring.h"
+#include "xlldma_userip.h"
+#include "xstatus.h"
+
+/************************** Constant Definitions *****************************/
+
+#define XLLDMA_NO_CHANGE 0xFFFF /* Used as API argument */
+#define XLLDMA_ALL_BDS 0xFFFFFFFF /* Used as API argument */
+
+/**************************** Type Definitions *******************************/
+
+
+/**
+ * The XLlDma driver instance data. An instance must be allocated for each DMA
+ * engine in use. Each DMA engine includes a TX channel and a RX channel.
+ */
+typedef struct XLlDma {
+ u32 RegBase; /**< Virtual base address of DMA engine */
+ XLlDma_BdRing TxBdRing; /**< BD container management for TX channel */
+ XLlDma_BdRing RxBdRing; /**< BD container management for RX channel */
+
+} XLlDma;
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/****************************************************************************/
+/**
+* Retrieve the TX ring object. This object can be used in the various Ring
+* API functions.
+*
+* @param InstancePtr is the DMA engine to operate on.
+*
+* @return TxBdRing object
+*
+* @note
+* C-style signature:
+* XLlDma_BdRing XLlDma_mGetTxRing(XLlDma* InstancePtr)
+*
+*****************************************************************************/
+#define XLlDma_mGetTxRing(InstancePtr) ((InstancePtr)->TxBdRing)
+
+
+/****************************************************************************/
+/**
+* Retrieve the RX ring object. This object can be used in the various Ring
+* API functions.
+*
+* @param InstancePtr is the DMA engine to operate on.
+*
+* @return RxBdRing object
+*
+* @note
+* C-style signature:
+* XLlDma_BdRing XLlDma_mGetRxRing(XLlDma* InstancePtr)
+*
+*****************************************************************************/
+#define XLlDma_mGetRxRing(InstancePtr) ((InstancePtr)->RxBdRing)
+
+
+/****************************************************************************/
+/**
+* Retrieve the contents of the DMA engine control register
+* (XLLDMA_DMACR_OFFSET).
+*
+* @param InstancePtr is the DMA engine instance to operate on.
+*
+* @return Current contents of the DMA engine control register.
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mGetCr(XLlDma* InstancePtr)
+*
+*****************************************************************************/
+#define XLlDma_mGetCr(InstancePtr) \
+ XLlDma_mReadReg((InstancePtr)->RegBase, XLLDMA_DMACR_OFFSET)
+
+
+/****************************************************************************/
+/**
+* Set the contents of the DMA engine control register (XLLDMA_DMACR_OFFSET).
+* This control register affects both DMA channels.
+*
+* @param InstancePtr is the DMA engine instance to operate on.
+* @param Data is the data to write to the DMA engine control register.
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mSetCr(XLlDma* InstancePtr, u32 Data)
+*
+*****************************************************************************/
+#define XLlDma_mSetCr(InstancePtr, Data) \
+ XLlDma_mWriteReg((InstancePtr)->RegBase, XLLDMA_DMACR_OFFSET, (Data))
+
+
+/************************** Function Prototypes ******************************/
+
+/*
+ * Initialization and control functions in xlldma.c
+ */
+void XLlDma_Initialize(XLlDma * InstancePtr, u32 BaseAddress);
+void XLlDma_Reset(XLlDma * InstancePtr);
+void XLlDma_Pause(XLlDma * InstancePtr);
+void XLlDma_Resume(XLlDma * InstancePtr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file xlldma_bd.h
+ *
+ * This header provides operations to manage buffer descriptors (BD) in support
+ * of Local-Link scatter-gather DMA (see xlldma.h).
+ *
+ * The API exported by this header defines abstracted macros that allow the
+ * application to read/write specific BD fields.
+ *
+ * <b>Buffer Descriptors</b>
+ *
+ * A buffer descriptor defines a DMA transaction (see "Transaction"
+ * section in xlldma.h). The macros defined by this header file allow access
+ * to most fields within a BD to tailor a DMA transaction according to
+ * application and hardware requirements. See the hardware IP DMA spec for
+ * more information on BD fields and how they affect transfers.
+ *
+ * The XLlDma_Bd structure defines a BD. The organization of this structure is
+ * driven mainly by the hardware for use in scatter-gather DMA transfers.
+ *
+ * <b>Accessor Macros</b>
+ *
+ * Most of the BD attributes can be accessed through macro functions defined
+ * here in this API. Words such as XLLDMA_BD_USR1_OFFSET (see xlldma_hw.h)
+ * should be accessed using XLlDma_mBdRead() and XLlDma_mBdWrite() as defined
+ * in xlldma_hw.h. The USR words are implementation dependent. For example,
+ * they may implement checksum offloading fields for Ethernet devices. Accessor
+ * macros may be defined in the device specific API to get at this data.
+ *
+ * <b>Performance</b>
+ *
+ * BDs are typically in a non-cached memory space. Limiting I/O to BDs can
+ * improve overall performance of the DMA channel.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a xd 12/21/06 First release
+ * </pre>
+ *
+ *****************************************************************************/
+
+#ifndef XLLDMA_BD_H /* prevent circular inclusions */
+#define XLLDMA_BD_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xenv.h"
+#include "xlldma_hw.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/**
+ * The XLlDma_Bd is the type for buffer descriptors (BDs).
+ */
+typedef u32 XLlDma_Bd[XLLDMA_BD_NUM_WORDS];
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*****************************************************************************/
+/**
+*
+* Read the given Buffer Descriptor word.
+*
+* @param BaseAddress is the base address of the BD to read
+* @param Offset is the word offset to be read
+*
+* @return The 32-bit value of the field
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mBdRead(u32 BaseAddress, u32 Offset)
+*
+******************************************************************************/
+#define XLlDma_mBdRead(BaseAddress, Offset) \
+ (*(u32*)((u32)(BaseAddress) + (u32)(Offset)))
+
+
+/*****************************************************************************/
+/**
+*
+* Write the given Buffer Descriptor word.
+*
+* @param BaseAddress is the base address of the BD to write
+* @param Offset is the word offset to be written
+* @param Data is the 32-bit value to write to the field
+*
+* @return None.
+*
+* @note
+* C-style signature:
+* void XLlDma_mBdWrite(u32 BaseAddress, u32 RegOffset, u32 Data)
+*
+******************************************************************************/
+#define XLlDma_mBdWrite(BaseAddress, Offset, Data) \
+ (*(u32*)((u32)(BaseAddress) + (u32)(Offset)) = (Data))
+
+
+/*****************************************************************************/
+/**
+ * Zero out all BD fields
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @return Nothing
+ *
+ * @note
+ * C-style signature:
+ * void XLlDma_mBdClear(XLlDma_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XLlDma_mBdClear(BdPtr) \
+ memset((BdPtr), 0, sizeof(XLlDma_Bd))
+
+
+/*****************************************************************************/
+/**
+ * Set the BD's STS/CTRL field. The word containing STS/CTRL also contains the
+ * USR0 field. USR0 will not be modified. This operation requires a read-
+ * modify-write operation. If it is wished to set both STS/CTRL and USR0 with
+ * a single write operation, then use XLlDma_mBdWrite(BdPtr,
+ * XLLDMA_BD_STSCTRL_USR0_OFFSET, Data).
+ *
+ * @param BdPtr is the BD to operate on
+ * @param Data is the value to write to STS/CTRL. Or 0 or more
+ * XLLDMA_BD_STSCTRL_*** values defined in xlldma_hw.h to create a
+ * valid value for this parameter
+ *
+ * @note
+ * C-style signature:
+ * u32 XLlDma_mBdSetStsCtrl(XLlDma_Bd* BdPtr, u32 Data)
+ *
+ *****************************************************************************/
+#define XLlDma_mBdSetStsCtrl(BdPtr, Data) \
+ XLlDma_mBdWrite((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET, \
+ (XLlDma_mBdRead((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET) \
+ & XLLDMA_BD_STSCTRL_USR0_MASK) | \
+ ((Data) & XLLDMA_BD_STSCTRL_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Retrieve the word containing the BD's STS/CTRL field. This word also
+ * contains the USR0 field.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @return Word at offset XLLDMA_BD_DMASR_OFFSET. Use XLLDMA_BD_STSCTRL_***
+ * values defined in xlldma_hw.h to interpret this returned value
+ *
+ * @note
+ * C-style signature:
+ * u32 XLlDma_mBdGetStsCtrl(XLlDma_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XLlDma_mBdGetStsCtrl(BdPtr) \
+ XLlDma_mBdRead((BdPtr), XLLDMA_BD_STSCTRL_USR0_OFFSET)
+
+
+/*****************************************************************************/
+/**
+ * Set transfer length in bytes for the given BD. The length must be set each
+ * time a BD is submitted to hardware.
+ *
+ * @param BdPtr is the BD to operate on
+ * @param LenBytes is the number of bytes to transfer.
+ *
+ * @note
+ * C-style signature:
+ * void XLlDma_mBdSetLength(XLlDma_Bd* BdPtr, u32 LenBytes)
+ *
+ *****************************************************************************/
+#define XLlDma_mBdSetLength(BdPtr, LenBytes) \
+ XLlDma_mBdWrite((BdPtr), XLLDMA_BD_BUFL_OFFSET, (LenBytes))
+
+
+/*****************************************************************************/
+/**
+ * Retrieve the BD length field.
+ *
+ * For TX channels, the returned value is the same as that written with
+ * XLlDma_mBdSetLength().
+ *
+ * For RX channels, the returned value is what was written by the DMA engine
+ * after processing the BD. This value represents the number of bytes
+ * processed.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @return Bytes processed by hardware or set by XLlDma_mBdSetLength().
+ *
+ * @note
+ * C-style signature:
+ * u32 XLlDma_mBdGetLength(XLlDma_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XLlDma_mBdGetLength(BdPtr) \
+ XLlDma_mBdRead((BdPtr), XLLDMA_BD_BUFL_OFFSET)
+
+
+/*****************************************************************************/
+/**
+ * Set the ID field of the given BD. The ID is an arbitrary piece of data the
+ * application can associate with a specific BD.
+ *
+ * @param BdPtr is the BD to operate on
+ * @param Id is a 32 bit quantity to set in the BD
+ *
+ * @note
+ * C-style signature:
+ * void XLlDma_mBdSetId(XLlDma_Bd* BdPtr, void Id)
+ *
+ *****************************************************************************/
+#define XLlDma_mBdSetId(BdPtr, Id) \
+ (XLlDma_mBdWrite((BdPtr), XLLDMA_BD_ID_OFFSET, (u32)(Id)))
+
+
+/*****************************************************************************/
+/**
+ * Retrieve the ID field of the given BD previously set with XLlDma_mBdSetId.
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @note
+ * C-style signature:
+ * u32 XLlDma_mBdGetId(XLlDma_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XLlDma_mBdGetId(BdPtr) (XLlDma_mBdRead((BdPtr), XLLDMA_BD_ID_OFFSET))
+
+
+/*****************************************************************************/
+/**
+ * Set the BD's buffer address.
+ *
+ * @param BdPtr is the BD to operate on
+ * @param Addr is the address to set
+ *
+ * @note
+ * C-style signature:
+ * void XLlDma_mBdSetBufAddr(XLlDma_Bd* BdPtr, u32 Addr)
+ *
+ *****************************************************************************/
+#define XLlDma_mBdSetBufAddr(BdPtr, Addr) \
+ (XLlDma_mBdWrite((BdPtr), XLLDMA_BD_BUFA_OFFSET, (u32)(Addr)))
+
+
+/*****************************************************************************/
+/**
+ * Get the BD's buffer address
+ *
+ * @param BdPtr is the BD to operate on
+ *
+ * @note
+ * C-style signature:
+ * u32 XLlDma_mBdGetBufAddrLow(XLlDma_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XLlDma_mBdGetBufAddr(BdPtr) \
+ (XLlDma_mBdRead((BdPtr), XLLDMA_BD_BUFA_OFFSET))
+
+
+/************************** Function Prototypes ******************************/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xlldma_bdring.c
+*
+* This file implements buffer descriptor ring related functions. For more
+* information on this driver, see xlldma.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a xd 12/21/06 First release
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include <linux/string.h>
+
+#include "xlldma.h"
+#include "xenv.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/******************************************************************************
+ * Define methods to flush and invalidate cache for BDs should they be
+ * located in cached memory. These macros may NOPs if the underlying
+ * XCACHE_FLUSH_DCACHE_RANGE and XCACHE_INVALIDATE_DCACHE_RANGE macros are not
+ * implemented or they do nothing.
+ *****************************************************************************/
+#ifdef XCACHE_FLUSH_DCACHE_RANGE
+# define XLLDMA_CACHE_FLUSH(BdPtr) \
+ XCACHE_FLUSH_DCACHE_RANGE((BdPtr), XLLDMA_BD_HW_NUM_BYTES)
+#else
+# define XLLDMA_CACHE_FLUSH(BdPtr)
+#endif
+
+#ifdef XCACHE_INVALIDATE_DCACHE_RANGE
+# define XLLDMA_CACHE_INVALIDATE(BdPtr) \
+ XCACHE_INVALIDATE_DCACHE_RANGE((BdPtr), XLLDMA_BD_HW_NUM_BYTES)
+#else
+# define XLLDMA_CACHE_INVALIDATE(BdPtr)
+#endif
+
+/******************************************************************************
+ * Compute the virtual address of a descriptor from its physical address
+ *
+ * @param BdPtr is the physical address of the BD
+ *
+ * @returns Virtual address of BdPtr
+ *
+ * @note Assume BdPtr is always a valid BD in the ring
+ * @note RingPtr is an implicit parameter
+ *****************************************************************************/
+#define XLLDMA_PHYS_TO_VIRT(BdPtr) \
+ ((u32)(BdPtr) + (RingPtr->FirstBdAddr - RingPtr->FirstBdPhysAddr))
+
+/******************************************************************************
+ * Compute the physical address of a descriptor from its virtual address
+ *
+ * @param BdPtr is the virtual address of the BD
+ *
+ * @returns Physical address of BdPtr
+ *
+ * @note Assume BdPtr is always a valid BD in the ring
+ * @note RingPtr is an implicit parameter
+ *****************************************************************************/
+#define XLLDMA_VIRT_TO_PHYS(BdPtr) \
+ ((u32)(BdPtr) - (RingPtr->FirstBdAddr - RingPtr->FirstBdPhysAddr))
+
+/******************************************************************************
+ * Move the BdPtr argument ahead an arbitrary number of BDs wrapping around
+ * to the beginning of the ring if needed.
+ *
+ * We know if a wraparound should occur if the new BdPtr is greater than
+ * the high address in the ring OR if the new BdPtr crosses the 0xFFFFFFFF
+ * to 0 boundary.
+ *
+ * @param RingPtr is the ring BdPtr appears in
+ * @param BdPtr on input is the starting BD position and on output is the
+ * final BD position
+ * @param NumBd is the number of BD spaces to increment
+ *
+ *****************************************************************************/
+#define XLLDMA_RING_SEEKAHEAD(RingPtr, BdPtr, NumBd) \
+ { \
+ u32 Addr = (u32)(BdPtr); \
+ \
+ Addr += ((RingPtr)->Separation * (NumBd)); \
+ if ((Addr > (RingPtr)->LastBdAddr) || ((u32)(BdPtr) > Addr))\
+ { \
+ Addr -= (RingPtr)->Length; \
+ } \
+ \
+ (BdPtr) = (XLlDma_Bd*)Addr; \
+ }
+
+/******************************************************************************
+ * Move the BdPtr argument backwards an arbitrary number of BDs wrapping
+ * around to the end of the ring if needed.
+ *
+ * We know if a wraparound should occur if the new BdPtr is less than
+ * the base address in the ring OR if the new BdPtr crosses the 0xFFFFFFFF
+ * to 0 boundary.
+ *
+ * @param RingPtr is the ring BdPtr appears in
+ * @param BdPtr on input is the starting BD position and on output is the
+ * final BD position
+ * @param NumBd is the number of BD spaces to increment
+ *
+ *****************************************************************************/
+#define XLLDMA_RING_SEEKBACK(RingPtr, BdPtr, NumBd) \
+ { \
+ u32 Addr = (u32)(BdPtr); \
+ \
+ Addr -= ((RingPtr)->Separation * (NumBd)); \
+ if ((Addr < (RingPtr)->FirstBdAddr) || ((u32)(BdPtr) < Addr)) \
+ { \
+ Addr += (RingPtr)->Length; \
+ } \
+ \
+ (BdPtr) = (XLlDma_Bd*)Addr; \
+ }
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+ * Using a memory segment allocated by the caller, create and setup the BD list
+ * for the given SGDMA ring.
+ *
+ * @param InstancePtr is the instance to be worked on.
+ * @param PhysAddr is the physical base address of application memory region.
+ * @param VirtAddr is the virtual base address of the application memory
+ * region.If address translation is not being utilized, then VirtAddr
+ * should be equivalent to PhysAddr.
+ * @param Alignment governs the byte alignment of individual BDs. This function
+ * will enforce a minimum alignment of XLLDMA_BD_MINIMUM_ALIGNMENT bytes
+ * with no maximum as long as it is specified as a power of 2.
+ * @param BdCount is the number of BDs to setup in the application memory
+ * region. It is assumed the region is large enough to contain the BDs.
+ * Refer to the "SGDMA Ring Creation" section in xlldma.h for more
+ * information. The minimum valid value for this parameter is 1.
+ *
+ * @return
+ *
+ * - XST_SUCCESS if initialization was successful
+ * - XST_NO_FEATURE if the provided instance is a non SGDMA type of DMA
+ * channel.
+ * - XST_INVALID_PARAM under any of the following conditions: 1) PhysAddr
+ * and/or VirtAddr are not aligned to the given Alignment parameter;
+ * 2) Alignment parameter does not meet minimum requirements or is not a
+ * power of 2 value; 3) BdCount is 0.
+ * - XST_DMA_SG_LIST_ERROR if the memory segment containing the list spans
+ * over address 0x00000000 in virtual address space.
+ *
+ *****************************************************************************/
+int XLlDma_BdRingCreate(XLlDma_BdRing * RingPtr, u32 PhysAddr,
+ u32 VirtAddr, u32 Alignment, unsigned BdCount)
+{
+ unsigned i;
+ u32 BdVirtAddr;
+ u32 BdPhysAddr;
+
+ /* In case there is a failure prior to creating list, make sure the
+ * following attributes are 0 to prevent calls to other SG functions
+ * from doing anything
+ */
+ RingPtr->AllCnt = 0;
+ RingPtr->FreeCnt = 0;
+ RingPtr->HwCnt = 0;
+ RingPtr->PreCnt = 0;
+ RingPtr->PostCnt = 0;
+
+ /* Make sure Alignment parameter meets minimum requirements */
+ if (Alignment < XLLDMA_BD_MINIMUM_ALIGNMENT) {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* Make sure Alignment is a power of 2 */
+ if ((Alignment - 1) & Alignment) {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* Make sure PhysAddr and VirtAddr are on same Alignment */
+ if ((PhysAddr % Alignment) || (VirtAddr % Alignment)) {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* Is BdCount reasonable? */
+ if (BdCount == 0) {
+ return (XST_INVALID_PARAM);
+ }
+
+ /* Compute how many bytes will be between the start of adjacent BDs */
+ RingPtr->Separation =
+ (sizeof(XLlDma_Bd) + (Alignment - 1)) & ~(Alignment - 1);
+
+ /* Must make sure the ring doesn't span address 0x00000000. If it does,
+ * then the next/prev BD traversal macros will fail.
+ */
+ if (VirtAddr > (VirtAddr + (RingPtr->Separation * BdCount) - 1)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Initial ring setup:
+ * - Clear the entire space
+ * - Setup each BD's next pointer with the physical address of the
+ * next BD
+ * - Set each BD's DMA complete status bit
+ */
+ memset((void *) VirtAddr, 0, (RingPtr->Separation * BdCount));
+
+ BdVirtAddr = VirtAddr;
+ BdPhysAddr = PhysAddr + RingPtr->Separation;
+ for (i = 1; i < BdCount; i++) {
+ XLlDma_mBdWrite(BdVirtAddr, XLLDMA_BD_NDESC_OFFSET, BdPhysAddr);
+ XLlDma_mBdWrite(BdVirtAddr, XLLDMA_BD_STSCTRL_USR0_OFFSET,
+ XLLDMA_BD_STSCTRL_COMPLETED_MASK);
+ XLLDMA_CACHE_FLUSH(BdVirtAddr);
+ BdVirtAddr += RingPtr->Separation;
+ BdPhysAddr += RingPtr->Separation;
+ }
+
+ /* At the end of the ring, link the last BD back to the top */
+ XLlDma_mBdWrite(BdVirtAddr, XLLDMA_BD_NDESC_OFFSET, PhysAddr);
+ XLLDMA_CACHE_FLUSH(BdVirtAddr);
+
+ /* Setup and initialize pointers and counters */
+ RingPtr->RunState = XST_DMA_SG_IS_STOPPED;
+ RingPtr->FirstBdAddr = VirtAddr;
+ RingPtr->FirstBdPhysAddr = PhysAddr;
+ RingPtr->LastBdAddr = BdVirtAddr;
+ RingPtr->Length = RingPtr->LastBdAddr - RingPtr->FirstBdAddr +
+ RingPtr->Separation;
+ RingPtr->AllCnt = BdCount;
+ RingPtr->FreeCnt = BdCount;
+ RingPtr->FreeHead = (XLlDma_Bd *) VirtAddr;
+ RingPtr->PreHead = (XLlDma_Bd *) VirtAddr;
+ RingPtr->HwHead = (XLlDma_Bd *) VirtAddr;
+ RingPtr->HwTail = (XLlDma_Bd *) VirtAddr;
+ RingPtr->PostHead = (XLlDma_Bd *) VirtAddr;
+ RingPtr->BdaRestart = (XLlDma_Bd *) PhysAddr;
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Clone the given BD into every BD in the ring. Except for
+ * XLLDMA_BD_NDESC_OFFSET, every field of the source BD is replicated in every
+ * BD in the ring.
+ *
+ * This function can be called only when all BDs are in the free group such as
+ * they are immediately after creation of the ring. This prevents modification
+ * of BDs while they are in use by hardware or the application.
+ *
+ * @param InstancePtr is the instance to be worked on.
+ * @param SrcBdPtr is the source BD template to be cloned into the list.
+ *
+ * @return
+ * - XST_SUCCESS if the list was modified.
+ * - XST_DMA_SG_NO_LIST if a list has not been created.
+ * - XST_DMA_SG_LIST_ERROR if some of the BDs in this channel are under
+ * hardware or application control.
+ * - XST_DEVICE_IS_STARTED if the DMA channel has not been stopped.
+ *
+ *****************************************************************************/
+int XLlDma_BdRingClone(XLlDma_BdRing * RingPtr, XLlDma_Bd * SrcBdPtr)
+{
+ unsigned i;
+ u32 CurBd;
+ u32 Save;
+ XLlDma_Bd TmpBd;
+
+ /* Can't do this function if there isn't a ring */
+ if (RingPtr->AllCnt == 0) {
+ return (XST_DMA_SG_NO_LIST);
+ }
+
+ /* Can't do this function with the channel running */
+ if (RingPtr->RunState == XST_DMA_SG_IS_STARTED) {
+ return (XST_DEVICE_IS_STARTED);
+ }
+
+ /* Can't do this function with some of the BDs in use */
+ if (RingPtr->FreeCnt != RingPtr->AllCnt) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+
+ /* Make a copy of the template then modify it by setting complete bit
+ * in status/control field
+ */
+ memcpy(&TmpBd, SrcBdPtr, sizeof(XLlDma_Bd));
+ Save = XLlDma_mBdRead(&TmpBd, XLLDMA_BD_STSCTRL_USR0_OFFSET);
+ Save |= XLLDMA_BD_STSCTRL_COMPLETED_MASK;
+ XLlDma_mBdWrite(&TmpBd, XLLDMA_BD_STSCTRL_USR0_OFFSET, Save);
+
+ /* Starting from the top of the ring, save BD.Next, overwrite the
+ * entire BD with the template, then restore BD.Next
+ */
+ for (i = 0, CurBd = RingPtr->FirstBdAddr;
+ i < RingPtr->AllCnt; i++, CurBd += RingPtr->Separation) {
+ Save = XLlDma_mBdRead(CurBd, XLLDMA_BD_NDESC_OFFSET);
+ memcpy((void *) CurBd, (void *) &TmpBd, sizeof(XLlDma_Bd));
+ XLlDma_mBdWrite(CurBd, XLLDMA_BD_NDESC_OFFSET, Save);
+ XLLDMA_CACHE_FLUSH(CurBd);
+ }
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Allow DMA transactions to commence on the given channels if descriptors are
+ * ready to be processed.
+ *
+ * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
+ *
+ * @return
+ * - XST_SUCCESS if the channel) were started.
+ * - XST_DMA_SG_NO_LIST if the channel) have no initialized BD ring.
+ *
+ *****************************************************************************/
+int XLlDma_BdRingStart(XLlDma_BdRing * RingPtr)
+{
+ /* BD list has yet to be created for this channel */
+ if (RingPtr->AllCnt == 0) {
+ return (XST_DMA_SG_NO_LIST);
+ }
+
+ /* Do nothing if already started */
+ if (RingPtr->RunState == XST_DMA_SG_IS_STARTED) {
+ return (XST_SUCCESS);
+ }
+
+ /* Sync hardware and driver with the last unprocessed BD or the 1st BD
+ * in the ring if this is the first time starting the channel
+ */
+ XLlDma_mWriteReg(RingPtr->ChanBase, XLLDMA_CDESC_OFFSET,
+ (u32) RingPtr->BdaRestart);
+
+ /* Note as started */
+ RingPtr->RunState = XST_DMA_SG_IS_STARTED;
+
+ /* If there are unprocessed BDs then we want to channel to begin
+ * processing right away
+ */
+ if (RingPtr->HwCnt > 0) {
+ XLLDMA_CACHE_INVALIDATE(RingPtr->HwTail);
+
+ if ((XLlDma_mBdRead(RingPtr->HwTail,
+ XLLDMA_BD_STSCTRL_USR0_OFFSET) &
+ XLLDMA_BD_STSCTRL_COMPLETED_MASK) == 0) {
+ XLlDma_mWriteReg(RingPtr->ChanBase,
+ XLLDMA_TDESC_OFFSET,
+ XLLDMA_VIRT_TO_PHYS(RingPtr->HwTail));
+ }
+ }
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Set interrupt coalescing parameters for the given descriptor ring channel.
+ *
+ * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
+ * @param Counter sets the packet counter on the channel. Valid range is
+ * 1..255, or XLLDMA_NO_CHANGE to leave this setting unchanged.
+ * @param Timer sets the waitbound timer on the channel. Valid range is
+ * 1..255, or XLLDMA_NO_CHANGE to leave this setting unchanged. LSB is
+ * in units of 1 / (local link clock).
+ *
+ * @return
+ * - XST_SUCCESS if interrupt coalescing settings updated
+ * - XST_FAILURE if Counter or Timer parameters are out of range
+ *****************************************************************************/
+int XLlDma_BdRingSetCoalesce(XLlDma_BdRing * RingPtr, u32 Counter, u32 Timer)
+{
+ u32 Cr = XLlDma_mReadReg(RingPtr->ChanBase, XLLDMA_CR_OFFSET);
+
+ if (Counter != XLLDMA_NO_CHANGE) {
+ if ((Counter == 0) || (Counter > 0xFF)) {
+ return (XST_FAILURE);
+ }
+
+ Cr = (Cr & ~XLLDMA_CR_IRQ_COUNT_MASK) |
+ (Counter << XLLDMA_CR_IRQ_COUNT_SHIFT);
+ Cr |= XLLDMA_CR_LD_IRQ_CNT_MASK;
+ }
+
+ if (Timer != XLLDMA_NO_CHANGE) {
+ if ((Timer == 0) || (Timer > 0xFF)) {
+ return (XST_FAILURE);
+ }
+
+ Cr = (Cr & ~XLLDMA_CR_IRQ_TIMEOUT_MASK) |
+ (Timer << XLLDMA_CR_IRQ_TIMEOUT_SHIFT);
+ Cr |= XLLDMA_CR_LD_IRQ_CNT_MASK;
+ }
+
+ XLlDma_mWriteReg(RingPtr->ChanBase, XLLDMA_CR_OFFSET, Cr);
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Retrieve current interrupt coalescing parameters from the given descriptor
+ * ring channel.
+ *
+ * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
+ * @param CounterPtr points to a memory location where the current packet
+ * counter will be written.
+ * @param TimerPtr points to a memory location where the current waitbound
+ * timer will be written.
+ *****************************************************************************/
+void XLlDma_BdRingGetCoalesce(XLlDma_BdRing * RingPtr,
+ u32 *CounterPtr, u32 *TimerPtr)
+{
+ u32 Cr = XLlDma_mReadReg(RingPtr->ChanBase, XLLDMA_CR_OFFSET);
+
+ *CounterPtr =
+ ((Cr & XLLDMA_CR_IRQ_COUNT_MASK) >> XLLDMA_CR_IRQ_COUNT_SHIFT);
+ *TimerPtr =
+ ((Cr & XLLDMA_CR_IRQ_TIMEOUT_MASK) >>
+ XLLDMA_CR_IRQ_TIMEOUT_SHIFT);
+}
+
+
+/*****************************************************************************/
+/**
+ * Reserve locations in the BD ring. The set of returned BDs may be modified in
+ * preparation for future DMA transactions). Once the BDs are ready to be
+ * submitted to hardware, the application must call XLlDma_BdRingToHw() in the
+ * same order which they were allocated here. Example:
+ *
+ * <pre>
+ * NumBd = 2;
+ * Status = XDsma_RingBdAlloc(MyRingPtr, NumBd, &MyBdSet);
+ *
+ * if (Status != XST_SUCCESS)
+ * {
+ * // Not enough BDs available for the request
+ * }
+ *
+ * CurBd = MyBdSet;
+ * for (i=0; i<NumBd; i++)
+ * {
+ * // Prepare CurBd.....
+ *
+ * // Onto next BD
+ * CurBd = XLlDma_mBdRingNext(MyRingPtr, CurBd);
+ * }
+ *
+ * // Give list to hardware
+ * Status = XLlDma_BdRingToHw(MyRingPtr, NumBd, MyBdSet);
+ * </pre>
+ *
+ * A more advanced use of this function may allocate multiple sets of BDs.
+ * They must be allocated and given to hardware in the correct sequence:
+ * <pre>
+ * // Legal
+ * XLlDma_BdRingAlloc(MyRingPtr, NumBd1, &MySet1);
+ * XLlDma_BdRingToHw(MyRingPtr, NumBd1, MySet1);
+ *
+ * // Legal
+ * XLlDma_BdRingAlloc(MyRingPtr, NumBd1, &MySet1);
+ * XLlDma_BdRingAlloc(MyRingPtr, NumBd2, &MySet2);
+ * XLlDma_BdRingToHw(MyRingPtr, NumBd1, MySet1);
+ * XLlDma_BdRingToHw(MyRingPtr, NumBd2, MySet2);
+ *
+ * // Not legal
+ * XLlDma_BdRingAlloc(MyRingPtr, NumBd1, &MySet1);
+ * XLlDma_BdRingAlloc(MyRingPtr, NumBd2, &MySet2);
+ * XLlDma_BdRingToHw(MyRingPtr, NumBd2, MySet2);
+ * XLlDma_BdRingToHw(MyRingPtr, NumBd1, MySet1);
+ * </pre>
+ *
+ * Use the API defined in xlldmabd.h to modify individual BDs. Traversal of the
+ * BD set can be done using XLlDma_mBdRingNext() and XLlDma_mBdRingPrev().
+ *
+ * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
+ * @param NumBd is the number of BDs to allocate
+ * @param BdSetPtr is an output parameter, it points to the first BD available
+ * for modification.
+ *
+ * @return
+ * - XST_SUCCESS if the requested number of BDs was returned in the BdSetPtr
+ * parameter.
+ * - XST_FAILURE if there were not enough free BDs to satisfy the request.
+ *
+ * @note This function should not be preempted by another XLlDma_BdRing
+ * function call that modifies the BD space. It is the caller's
+ * responsibility to provide a mutual exclusion mechanism.
+ *
+ * @note Do not modify more BDs than the number requested with the NumBd
+ * parameter. Doing so will lead to data corruption and system
+ * instability.
+ *
+ *****************************************************************************/
+int XLlDma_BdRingAlloc(XLlDma_BdRing * RingPtr, unsigned NumBd,
+ XLlDma_Bd ** BdSetPtr)
+{
+ /* Enough free BDs available for the request? */
+ if (RingPtr->FreeCnt < NumBd) {
+ return (XST_FAILURE);
+ }
+
+ /* Set the return argument and move FreeHead forward */
+ *BdSetPtr = RingPtr->FreeHead;
+ XLLDMA_RING_SEEKAHEAD(RingPtr, RingPtr->FreeHead, NumBd);
+ RingPtr->FreeCnt -= NumBd;
+ RingPtr->PreCnt += NumBd;
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Fully or partially undo an XLlDma_BdRingAlloc() operation. Use this function
+ * if all the BDs allocated by XLlDma_BdRingAlloc() could not be transferred to
+ * hardware with XLlDma_BdRingToHw().
+ *
+ * This function helps out in situations when an unrelated error occurs after
+ * BDs have been allocated but before they have been given to hardware.
+ *
+ * This function is not the same as XLlDma_BdRingFree(). The Free function
+ * returns BDs to the free list after they have been processed by hardware,
+ * while UnAlloc returns them before being processed by hardware.
+ *
+ * There are two scenarios where this function can be used. Full UnAlloc or
+ * Partial UnAlloc. A Full UnAlloc means all the BDs Alloc'd will be returned:
+ *
+ * <pre>
+ * Status = XLlDma_BdRingAlloc(MyRingPtr, 10, &BdPtr);
+ * .
+ * .
+ * if (Error)
+ * {
+ * Status = XLlDma_BdRingUnAlloc(MyRingPtr, 10, &BdPtr);
+ * }
+ * </pre>
+ *
+ * A partial UnAlloc means some of the BDs Alloc'd will be returned:
+ *
+ * <pre>
+ * Status = XLlDma_BdRingAlloc(MyRingPtr, 10, &BdPtr);
+ * BdsLeft = 10;
+ * CurBdPtr = BdPtr;
+ *
+ * while (BdsLeft)
+ * {
+ * if (Error)
+ * {
+ * Status = XLlDma_BdRingUnAlloc(MyRingPtr, BdsLeft, CurBdPtr);
+ * }
+ *
+ * CurBdPtr = XLlDma_mBdRingNext(MyRingPtr, CurBdPtr);
+ * BdsLeft--;
+ * }
+ * </pre>
+ *
+ * A partial UnAlloc must include the last BD in the list that was Alloc'd.
+ *
+ * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
+ * @param NumBd is the number of BDs to unallocate
+ * @param BdSetPtr points to the first of the BDs to be returned.
+ *
+ * @return
+ * - XST_SUCCESS if the BDs were unallocated.
+ * - XST_FAILURE if NumBd parameter was greater that the number of BDs in the
+ * preprocessing state.
+ *
+ * @note This function should not be preempted by another XLlDma ring function
+ * call that modifies the BD space. It is the caller's responsibility to
+ * provide a mutual exclusion mechanism.
+ *
+ *****************************************************************************/
+int XLlDma_BdRingUnAlloc(XLlDma_BdRing * RingPtr, unsigned NumBd,
+ XLlDma_Bd * BdSetPtr)
+{
+ /* Enough BDs in the free state for the request? */
+ if (RingPtr->PreCnt < NumBd) {
+ return (XST_FAILURE);
+ }
+
+ /* Set the return argument and move FreeHead backward */
+ XLLDMA_RING_SEEKBACK(RingPtr, RingPtr->FreeHead, NumBd);
+ RingPtr->FreeCnt += NumBd;
+ RingPtr->PreCnt -= NumBd;
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Enqueue a set of BDs to hardware that were previously allocated by
+ * XLlDma_BdRingAlloc(). Once this function returns, the argument BD set goes
+ * under hardware control. Any changes made to these BDs after this point will
+ * corrupt the BD list leading to data corruption and system instability.
+ *
+ * The set will be rejected if the last BD of the set does not mark the end of
+ * a packet.
+ *
+ * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
+ * @param NumBd is the number of BDs in the set.
+ * @param BdSetPtr is the first BD of the set to commit to hardware.
+ *
+ * @return
+ * - XST_SUCCESS if the set of BDs was accepted and enqueued to hardware
+ * - XST_FAILURE if the set of BDs was rejected because the first BD
+ * did not have its start-of-packet bit set, the last BD did not have
+ * its end-of-packet bit set, or any one of the BD set has 0 as length
+ * value
+ * - XST_DMA_SG_LIST_ERROR if this function was called out of sequence with
+ * XLlDma_BdRingAlloc()
+ *
+ * @note This function should not be preempted by another XLlDma ring function
+ * call that modifies the BD space. It is the caller's responsibility to
+ * provide a mutual exclusion mechanism.
+ *
+ *****************************************************************************/
+int XLlDma_BdRingToHw(XLlDma_BdRing * RingPtr, unsigned NumBd,
+ XLlDma_Bd * BdSetPtr)
+{
+ XLlDma_Bd *CurBdPtr;
+ unsigned i;
+ u32 BdStsCr;
+
+ /* If the commit set is empty, do nothing */
+ if (NumBd == 0) {
+ return (XST_SUCCESS);
+ }
+
+ /* Make sure we are in sync with XLlDma_BdRingAlloc() */
+ if ((RingPtr->PreCnt < NumBd) || (RingPtr->PreHead != BdSetPtr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ CurBdPtr = BdSetPtr;
+ BdStsCr = XLlDma_mBdRead(CurBdPtr, XLLDMA_BD_STSCTRL_USR0_OFFSET);
+
+ /* The first BD should have been marked as start-of-packet */
+ if (!(BdStsCr & XLLDMA_BD_STSCTRL_SOP_MASK)) {
+ return (XST_FAILURE);
+ }
+
+ /* For each BD being submitted except the last one, clear the completed
+ * bit and stop_on_end bit in the status word
+ */
+ for (i = 0; i < NumBd - 1; i++) {
+
+ /* Make sure the length value in the BD is non-zero. */
+ if (XLlDma_mBdGetLength(CurBdPtr) == 0) {
+ return (XST_FAILURE);
+ }
+
+ BdStsCr &=
+ ~(XLLDMA_BD_STSCTRL_COMPLETED_MASK |
+ XLLDMA_BD_STSCTRL_SOE_MASK);
+ XLlDma_mBdWrite(CurBdPtr, XLLDMA_BD_STSCTRL_USR0_OFFSET,
+ BdStsCr);
+
+ /* In RX channel case, the current BD should have the
+ * XLLDMA_USERIP_APPWORD_OFFSET initialized to
+ * XLLDMA_USERIP_APPWORD_INITVALUE
+ */
+ if (RingPtr->IsRxChannel) {
+ XLlDma_mBdWrite(CurBdPtr, XLLDMA_USERIP_APPWORD_OFFSET,
+ XLLDMA_USERIP_APPWORD_INITVALUE);
+ }
+
+ /* Flush the current BD so DMA core could see the updates */
+ XLLDMA_CACHE_FLUSH(CurBdPtr);
+
+ CurBdPtr = XLlDma_mBdRingNext(RingPtr, CurBdPtr);
+ BdStsCr =
+ XLlDma_mBdRead(CurBdPtr, XLLDMA_BD_STSCTRL_USR0_OFFSET);
+ }
+
+ /* The last BD should have end-of-packet bit set */
+ if (!(BdStsCr & XLLDMA_BD_STSCTRL_EOP_MASK)) {
+ return (XST_FAILURE);
+ }
+
+ /* Make sure the length value in the last BD is non-zero. */
+ if (XLlDma_mBdGetLength(CurBdPtr) == 0) {
+ return (XST_FAILURE);
+ }
+
+ /* The last BD should also have the completed and stop-on-end bits
+ * cleared
+ */
+ BdStsCr &=
+ ~(XLLDMA_BD_STSCTRL_COMPLETED_MASK |
+ XLLDMA_BD_STSCTRL_SOE_MASK);
+ XLlDma_mBdWrite(CurBdPtr, XLLDMA_BD_STSCTRL_USR0_OFFSET, BdStsCr);
+
+ /* In RX channel case, the last BD should have the
+ * XLLDMA_USERIP_APPWORD_OFFSET initialized to
+ * XLLDMA_USERIP_APPWORD_INITVALUE
+ */
+ if (RingPtr->IsRxChannel) {
+ XLlDma_mBdWrite(CurBdPtr, XLLDMA_USERIP_APPWORD_OFFSET,
+ XLLDMA_USERIP_APPWORD_INITVALUE);
+ }
+
+ /* Flush the last BD so DMA core could see the updates */
+ XLLDMA_CACHE_FLUSH(CurBdPtr);
+
+ /* This set has completed pre-processing, adjust ring pointers and
+ * counters
+ */
+ XLLDMA_RING_SEEKAHEAD(RingPtr, RingPtr->PreHead, NumBd);
+ RingPtr->PreCnt -= NumBd;
+ RingPtr->HwTail = CurBdPtr;
+ RingPtr->HwCnt += NumBd;
+
+ /* If it was enabled, tell the engine to begin processing */
+ if (RingPtr->RunState == XST_DMA_SG_IS_STARTED) {
+ XLlDma_mWriteReg(RingPtr->ChanBase, XLLDMA_TDESC_OFFSET,
+ XLLDMA_VIRT_TO_PHYS(RingPtr->HwTail));
+ }
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Returns a set of BD(s) that have been processed by hardware. The returned
+ * BDs may be examined by the application to determine the outcome of the DMA
+ * transactions). Once the BDs have been examined, the application must call
+ * XLlDma_BdRingFree() in the same order which they were retrieved here.
+ *
+ * Example:
+ *
+ * <pre>
+ * NumBd = XLlDma_BdRingFromHw(MyRingPtr, XLLDMA_ALL_BDS, &MyBdSet);
+ *
+ * if (NumBd == 0)
+ * {
+ * // hardware has nothing ready for us yet
+ * }
+ *
+ * CurBd = MyBdSet;
+ * for (i=0; i<NumBd; i++)
+ * {
+ * // Examine CurBd for post processing.....
+ *
+ * // Onto next BD
+ * CurBd = XLlDma_mBdRingNext(MyRingPtr, CurBd);
+ * }
+ *
+ * XLlDma_BdRingFree(MyRingPtr, NumBd, MyBdSet); // Return the list
+ * </pre>
+ *
+ * A more advanced use of this function may allocate multiple sets of BDs.
+ * They must be retrieved from hardware and freed in the correct sequence:
+ * <pre>
+ * // Legal
+ * XLlDma_BdRingFromHw(MyRingPtr, NumBd1, &MySet1);
+ * XLlDma_BdRingFree(MyRingPtr, NumBd1, MySet1);
+ *
+ * // Legal
+ * XLlDma_BdRingFromHw(MyRingPtr, NumBd1, &MySet1);
+ * XLlDma_BdRingFromHw(MyRingPtr, NumBd2, &MySet2);
+ * XLlDma_BdRingFree(MyRingPtr, NumBd1, MySet1);
+ * XLlDma_BdRingFree(MyRingPtr, NumBd2, MySet2);
+ *
+ * // Not legal
+ * XLlDma_BdRingFromHw(MyRingPtr, NumBd1, &MySet1);
+ * XLlDma_BdRingFromHw(MyRingPtr, NumBd2, &MySet2);
+ * XLlDma_BdRingFree(MyRingPtr, NumBd2, MySet2);
+ * XLlDma_BdRingFree(MyRingPtr, NumBd1, MySet1);
+ * </pre>
+ *
+ * If hardware has partially completed a packet spanning multiple BDs, then
+ * none of the BDs for that packet will be included in the results.
+ *
+ * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
+ * @param BdLimit is the maximum number of BDs to return in the set. Use
+ * XLLDMA_ALL_BDS to return all BDs that have been processed.
+ * @param BdSetPtr is an output parameter, it points to the first BD available
+ * for examination.
+ *
+ * @return
+ * The number of BDs processed by hardware. A value of 0 indicates that no
+ * data is available. No more than BdLimit BDs will be returned.
+ *
+ * @note Treat BDs returned by this function as read-only.
+ *
+ * @note This function should not be preempted by another XLlDma ring function
+ * call that modifies the BD space. It is the caller's responsibility to
+ * provide a mutual exclusion mechanism.
+ *
+ *****************************************************************************/
+unsigned XLlDma_BdRingFromHw(XLlDma_BdRing * RingPtr, unsigned BdLimit,
+ XLlDma_Bd ** BdSetPtr)
+{
+ XLlDma_Bd *CurBdPtr;
+ unsigned BdCount;
+ unsigned BdPartialCount;
+ u32 BdStsCr;
+ u32 UserIpAppWord;
+
+ CurBdPtr = RingPtr->HwHead;
+ BdCount = 0;
+ BdPartialCount = 0;
+
+ /* If no BDs in work group, then there's nothing to search */
+ if (RingPtr->HwCnt == 0) {
+ *BdSetPtr = NULL;
+ return (0);
+ }
+
+ /* Starting at HwHead, keep moving forward in the list until:
+ * - A BD is encountered with its completed bit clear in the status
+ * word which means hardware has not completed processing of that
+ * BD.
+ * - A BD is encountered with its XLLDMA_USERIP_APPWORD_OFFSET field
+ * with value XLLDMA_USERIP_APPWORD_INITVALUE which means hardware
+ * has not completed updating the BD structure.
+ * - RingPtr->HwTail is reached
+ * - The number of requested BDs has been processed
+ */
+ while (BdCount < BdLimit) {
+ /* Read the status */
+ XLLDMA_CACHE_INVALIDATE(CurBdPtr);
+ BdStsCr = XLlDma_mBdRead(CurBdPtr,
+ XLLDMA_BD_STSCTRL_USR0_OFFSET);
+
+ /* If the hardware still hasn't processed this BD then we are
+ * done
+ */
+ if (!(BdStsCr & XLLDMA_BD_STSCTRL_COMPLETED_MASK)) {
+ break;
+ }
+
+ /* In RX channel case, check if XLLDMA_USERIP_APPWORD_OFFSET
+ * field of the BD has been updated. If not, RX channel has
+ * not completed updating the BD structure and we delay
+ * the processing of this BD to next time
+ */
+ if (RingPtr->IsRxChannel) {
+ UserIpAppWord = XLlDma_mBdRead(CurBdPtr,
+ XLLDMA_USERIP_APPWORD_OFFSET);
+ if (UserIpAppWord == XLLDMA_USERIP_APPWORD_INITVALUE) {
+ break;
+ }
+ }
+
+
+ BdCount++;
+
+ /* Hardware has processed this BD so check the "last" bit. If
+ * it is clear, then there are more BDs for the current packet.
+ * Keep a count of these partial packet BDs.
+ */
+ if (BdStsCr & XLLDMA_BD_STSCTRL_EOP_MASK) {
+ BdPartialCount = 0;
+ }
+ else {
+ BdPartialCount++;
+ }
+
+ /* Reached the end of the work group */
+ if (CurBdPtr == RingPtr->HwTail) {
+ break;
+ }
+
+ /* Move on to next BD in work group */
+ CurBdPtr = XLlDma_mBdRingNext(RingPtr, CurBdPtr);
+ }
+
+ /* Subtract off any partial packet BDs found */
+ BdCount -= BdPartialCount;
+
+ /* If BdCount is non-zero then BDs were found to return. Set return
+ * parameters, update pointers and counters, return success
+ */
+ if (BdCount) {
+ *BdSetPtr = RingPtr->HwHead;
+ RingPtr->HwCnt -= BdCount;
+ RingPtr->PostCnt += BdCount;
+ XLLDMA_RING_SEEKAHEAD(RingPtr, RingPtr->HwHead, BdCount);
+ return (BdCount);
+ }
+ else {
+ *BdSetPtr = NULL;
+ return (0);
+ }
+}
+
+
+/*****************************************************************************/
+/**
+ * Frees a set of BDs that had been previously retrieved with
+ * XLlDma_BdRingFromHw().
+ *
+ * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
+ * @param NumBd is the number of BDs to free.
+ * @param BdSetPtr is the head of a list of BDs returned by
+ * XLlDma_BdRingFromHw().
+ *
+ * @return
+ * - XST_SUCCESS if the set of BDs was freed.
+ * - XST_DMA_SG_LIST_ERROR if this function was called out of sequence with
+ * XLlDma_BdRingFromHw().
+ *
+ * @note This function should not be preempted by another XLlDma function call
+ * that modifies the BD space. It is the caller's responsibility to
+ * provide a mutual exclusion mechanism.
+ *
+ * @internal
+ * This Interrupt handler provided by application MUST clear pending
+ * interrupts before handling them by calling the call back. Otherwise
+ * the following corner case could raise some issue:
+ *
+ * - A packet was transmitted and asserted an TX interrupt, and if
+ * this interrupt handler calls the call back before clears the
+ * interrupt, another packet could get transmitted (and assert the
+ * interrupt) between when the call back function returned and when
+ * the interrupt clearing operation begins, and the interrupt
+ * clearing operation will clear the interrupt raised by the second
+ * packet and won't never process its according buffer descriptors
+ * until a new interrupt occurs.
+ *
+ * Changing the sequence to "Clear interrupts, then handle" solve this
+ * issue. If the interrupt raised by the second packet is before the
+ * the interrupt clearing operation, the descriptors associated with
+ * the second packet must have been finished by hardware and ready for
+ * the handling by the call back; otherwise, the interrupt raised by
+ * the second packet is after the interrupt clearing operation,
+ * the packet's buffer descriptors will be handled by the call back in
+ * current pass, if the descriptors are finished before the call back
+ * is invoked, or next pass otherwise.
+ *
+ * Please note that if the second packet is handled by the call back
+ * in current pass, the next pass could find no buffer descriptor
+ * finished by the hardware. (i.e., XLlDma_BdRingFromHw() returns 0).
+ * As XLlDma_BdRingFromHw() and XLlDma_BdRingFree() are used in pair,
+ * XLlDma_BdRingFree() covers this situation by checking if the BD
+ * list to free is empty
+ *****************************************************************************/
+int XLlDma_BdRingFree(XLlDma_BdRing * RingPtr, unsigned NumBd,
+ XLlDma_Bd * BdSetPtr)
+{
+ /* If the BD Set to free is empty, return immediately with value
+ * XST_SUCCESS. See the @internal comment block above for detailed
+ * information
+ */
+ if (NumBd == 0) {
+ return XST_SUCCESS;
+ }
+
+ /* Make sure we are in sync with XLlDma_BdRingFromHw() */
+ if ((RingPtr->PostCnt < NumBd) || (RingPtr->PostHead != BdSetPtr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Update pointers and counters */
+ RingPtr->FreeCnt += NumBd;
+ RingPtr->PostCnt -= NumBd;
+ XLLDMA_RING_SEEKAHEAD(RingPtr, RingPtr->PostHead, NumBd);
+
+ return (XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Check the internal data structures of the BD ring for the provided channel.
+ * The following checks are made:
+ *
+ * - Is the BD ring linked correctly in physical address space.
+ * - Do the internal pointers point to BDs in the ring.
+ * - Do the internal counters add up.
+ *
+ * The channel should be stopped prior to calling this function.
+ *
+ * @param RingPtr is a pointer to the descriptor ring to be worked on.
+ *
+ * @return
+ * - XST_SUCCESS if no errors were found.
+ * - XST_DMA_SG_NO_LIST if the ring has not been created.
+ * - XST_IS_STARTED if the channel is not stopped.
+ * - XST_DMA_SG_LIST_ERROR if a problem is found with the internal data
+ * structures. If this value is returned, the channel should be reset to
+ * avoid data corruption or system instability.
+ *
+ * @note This function should not be preempted by another XLlDma ring function
+ * call that modifies the BD space. It is the caller's responsibility to
+ * provide a mutual exclusion mechanism.
+ *
+ *****************************************************************************/
+int XLlDma_BdRingCheck(XLlDma_BdRing * RingPtr)
+{
+ u32 AddrV, AddrP;
+ unsigned i;
+
+ /* Is the list created */
+ if (RingPtr->AllCnt == 0) {
+ return (XST_DMA_SG_NO_LIST);
+ }
+
+ /* Can't check if channel is running */
+ if (RingPtr->RunState == XST_DMA_SG_IS_STARTED) {
+ return (XST_IS_STARTED);
+ }
+
+ /* RunState doesn't make sense */
+ else if (RingPtr->RunState != XST_DMA_SG_IS_STOPPED) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Verify internal pointers point to correct memory space */
+ AddrV = (u32) RingPtr->FreeHead;
+ if ((AddrV < RingPtr->FirstBdAddr) || (AddrV > RingPtr->LastBdAddr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ AddrV = (u32) RingPtr->PreHead;
+ if ((AddrV < RingPtr->FirstBdAddr) || (AddrV > RingPtr->LastBdAddr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ AddrV = (u32) RingPtr->HwHead;
+ if ((AddrV < RingPtr->FirstBdAddr) || (AddrV > RingPtr->LastBdAddr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ AddrV = (u32) RingPtr->HwTail;
+ if ((AddrV < RingPtr->FirstBdAddr) || (AddrV > RingPtr->LastBdAddr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ AddrV = (u32) RingPtr->PostHead;
+ if ((AddrV < RingPtr->FirstBdAddr) || (AddrV > RingPtr->LastBdAddr)) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Verify internal counters add up */
+ if ((RingPtr->HwCnt + RingPtr->PreCnt + RingPtr->FreeCnt +
+ RingPtr->PostCnt) != RingPtr->AllCnt) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Verify BDs are linked correctly */
+ AddrV = RingPtr->FirstBdAddr;
+ AddrP = RingPtr->FirstBdPhysAddr + RingPtr->Separation;
+ for (i = 1; i < RingPtr->AllCnt; i++) {
+ XLLDMA_CACHE_INVALIDATE(AddrV);
+ /* Check next pointer for this BD. It should equal to the
+ * physical address of next BD
+ */
+ if (XLlDma_mBdRead(AddrV, XLLDMA_BD_NDESC_OFFSET) != AddrP) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* Move on to next BD */
+ AddrV += RingPtr->Separation;
+ AddrP += RingPtr->Separation;
+ }
+
+ XLLDMA_CACHE_INVALIDATE(AddrV);
+ /* Last BD should point back to the beginning of ring */
+ if (XLlDma_mBdRead(AddrV, XLLDMA_BD_NDESC_OFFSET) !=
+ RingPtr->FirstBdPhysAddr) {
+ return (XST_DMA_SG_LIST_ERROR);
+ }
+
+ /* No problems found */
+ return (XST_SUCCESS);
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xlldma_bdring.h
+*
+* This file contains DMA channel related structure and constant definition
+* as well as function prototypes. Each DMA channel is managed by a Buffer
+* Descriptor ring, and so XLlDma_BdRing is chosen as the symbol prefix used in
+* this file. See xlldma.h for more information.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a xd 12/21/06 First release
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XLLDMA_BDRING_H /* prevent circular inclusions */
+#define XLLDMA_BDRING_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xlldma_hw.h"
+#include "xlldma_bd.h"
+
+/** Container structure for descriptor storage control. If address translation
+ * is enabled, then all addresses and pointers excluding FirstBdPhysAddr are
+ * expressed in terms of the virtual address.
+ */
+typedef struct {
+ u32 ChanBase; /**< Virtual base address of channel registers
+ */
+ u32 IsRxChannel; /**< Is this a receive channel ? */
+ u32 FirstBdPhysAddr; /**< Physical address of 1st BD in list */
+ u32 FirstBdAddr; /**< Virtual address of 1st BD in list */
+ u32 LastBdAddr; /**< Virtual address of last BD in the list */
+ u32 Length; /**< Total size of ring in bytes */
+ u32 RunState; /**< Flag to indicate channel is started */
+ u32 Separation; /**< Number of bytes between the starting
+ address of adjacent BDs */
+ XLlDma_Bd *FreeHead; /**< First BD in the free group */
+ XLlDma_Bd *PreHead; /**< First BD in the pre-work group */
+ XLlDma_Bd *HwHead; /**< First BD in the work group */
+ XLlDma_Bd *HwTail; /**< Last BD in the work group */
+ XLlDma_Bd *PostHead; /**< First BD in the post-work group */
+ XLlDma_Bd *BdaRestart; /**< BD to load when channel is started */
+ u32 FreeCnt; /**< Number of allocatable BDs in free group */
+ u32 PreCnt; /**< Number of BDs in pre-work group */
+ u32 HwCnt; /**< Number of BDs in work group */
+ u32 PostCnt; /**< Number of BDs in post-work group */
+ u32 AllCnt; /**< Total Number of BDs for channel */
+} XLlDma_BdRing;
+
+/*****************************************************************************/
+/**
+* Use this macro at initialization time to determine how many BDs will fit
+* within the given memory constraints.
+*
+* The results of this macro can be provided to XLlDma_BdRingCreate().
+*
+* @param Alignment specifies what byte alignment the BDs must fall on and
+* must be a power of 2 to get an accurate calculation (32, 64, 126,...)
+* @param Bytes is the number of bytes to be used to store BDs.
+*
+* @return Number of BDs that can fit in the given memory area
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mBdRingCntCalc(u32 Alignment, u32 Bytes)
+*
+******************************************************************************/
+#define XLlDma_mBdRingCntCalc(Alignment, Bytes) \
+ (u32)((Bytes)/((sizeof(XLlDma_Bd)+((Alignment)-1))&~((Alignment)-1)))
+
+
+/*****************************************************************************/
+/**
+* Use this macro at initialization time to determine how many bytes of memory
+* are required to contain a given number of BDs at a given alignment.
+*
+* @param Alignment specifies what byte alignment the BDs must fall on. This
+* parameter must be a power of 2 to get an accurate calculation (32, 64,
+* 128,...)
+* @param NumBd is the number of BDs to calculate memory size requirements for
+*
+* @return The number of bytes of memory required to create a BD list with the
+* given memory constraints.
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mBdRingMemCalc(u32 Alignment, u32 NumBd)
+*
+******************************************************************************/
+#define XLlDma_mBdRingMemCalc(Alignment, NumBd) \
+ (u32)((sizeof(XLlDma_Bd)+((Alignment)-1))&~((Alignment)-1))*(NumBd)
+
+
+/****************************************************************************/
+/**
+* Return the total number of BDs allocated by this channel with
+* XLlDma_BdRingCreate().
+*
+* @param RingPtr is the BD ring to operate on.
+*
+* @return The total number of BDs allocated for this channel.
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mBdRingGetCnt(XLlDma_BdRing* RingPtr)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingGetCnt(RingPtr) ((RingPtr)->AllCnt)
+
+
+/****************************************************************************/
+/**
+* Return the number of BDs allocatable with XLlDma_BdRingAlloc() for pre-
+* processing.
+*
+* @param RingPtr is the BD ring to operate on.
+*
+* @return The number of BDs currently allocatable.
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mBdRingGetFreeCnt(XLlDma_BdRing* RingPtr)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingGetFreeCnt(RingPtr) ((RingPtr)->FreeCnt)
+
+
+/****************************************************************************/
+/**
+* Snap shot the latest BD a BD ring is processing.
+*
+* @param RingPtr is the BD ring to operate on.
+*
+* @return None
+*
+* @note
+* C-style signature:
+* void XLlDma_mBdRingSnapShotCurrBd(XLlDma_BdRing* RingPtr)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingSnapShotCurrBd(RingPtr) \
+ { \
+ (RingPtr)->BdaRestart = \
+ (XLlDma_Bd *)XLlDma_mReadReg((RingPtr)->ChanBase, \
+ XLLDMA_CDESC_OFFSET); \
+ }
+
+
+/****************************************************************************/
+/**
+* Return the next BD in the ring.
+*
+* @param RingPtr is the BD ring to operate on.
+* @param BdPtr is the current BD.
+*
+* @return The next BD in the ring relative to the BdPtr parameter.
+*
+* @note
+* C-style signature:
+* XLlDma_Bd *XLlDma_mBdRingNext(XLlDma_BdRing* RingPtr, XLlDma_Bd *BdPtr)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingNext(RingPtr, BdPtr) \
+ (((u32)(BdPtr) >= (RingPtr)->LastBdAddr) ? \
+ (XLlDma_Bd*)(RingPtr)->FirstBdAddr : \
+ (XLlDma_Bd*)((u32)(BdPtr) + (RingPtr)->Separation))
+
+
+/****************************************************************************/
+/**
+* Return the previous BD in the ring.
+*
+* @param InstancePtr is the DMA channel to operate on.
+* @param BdPtr is the current BD.
+*
+* @return The previous BD in the ring relative to the BdPtr parameter.
+*
+* @note
+* C-style signature:
+* XLlDma_Bd *XLlDma_mBdRingPrev(XLlDma_BdRing* RingPtr, XLlDma_Bd *BdPtr)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingPrev(RingPtr, BdPtr) \
+ (((u32)(BdPtr) <= (RingPtr)->FirstBdAddr) ? \
+ (XLlDma_Bd*)(RingPtr)->LastBdAddr : \
+ (XLlDma_Bd*)((u32)(BdPtr) - (RingPtr)->Separation))
+
+/****************************************************************************/
+/**
+* Retrieve the contents of the channel status register XLLDMA_SR_OFFSET
+*
+* @param RingPtr is the channel instance to operate on.
+*
+* @return Current contents of SR_OFFSET
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mBdRingGetSr(XLlDma_BdRing* RingPtr)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingGetSr(RingPtr) \
+ XLlDma_mReadReg((RingPtr)->ChanBase, XLLDMA_SR_OFFSET)
+
+
+/****************************************************************************/
+/**
+* Retrieve the contents of the channel control register XLLDMA_CR_OFFSET
+*
+* @param RingPtr is the channel instance to operate on.
+*
+* @return Current contents of CR_OFFSET
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mBdRingGetCr(XLlDma_BdRing* RingPtr)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingGetCr(RingPtr) \
+ XLlDma_mReadReg((RingPtr)->ChanBase, XLLDMA_CR_OFFSET)
+
+
+/****************************************************************************/
+/**
+* Set the contents of the channel control register XLLDMA_CR_OFFSET. This
+* register does not affect the other DMA channel.
+*
+* @param RingPtr is the channel instance to operate on.
+* @param Data is the data to write to CR_OFFSET
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mBdRingSetCr(XLlDma_BdRing* RingPtr, u32 Data)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingSetCr(RingPtr, Data) \
+ XLlDma_mWriteReg((RingPtr)->ChanBase, XLLDMA_CR_OFFSET, (Data))
+
+
+/****************************************************************************/
+/**
+* Check if the current DMA channel is busy with a DMA operation.
+*
+* @param RingPtr is the channel instance to operate on.
+*
+* @return TRUE if the DMA is busy. FALSE otherwise
+*
+* @note
+* C-style signature:
+* XBoolean XLlDma_mBdRingBusy(XLlDma_BdRing* RingPtr)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingBusy(RingPtr) \
+ ((XLlDma_mReadReg((RingPtr)->ChanBase, XLLDMA_SR_OFFSET) \
+ & XLLDMA_SR_ENGINE_BUSY_MASK) ? TRUE : FALSE)
+
+
+/****************************************************************************/
+/**
+* Set interrupt enable bits for a channel. This operation will modify the
+* XLLDMA_CR_OFFSET register.
+*
+* @param RingPtr is the channel instance to operate on.
+* @param Mask consists of the interrupt signals to enable. They are formed by
+* OR'ing one or more of the following bitmasks together:
+* XLLDMA_CR_IRQ_EN_MASK, XLLDMA_CR_IRQ_ERROR_EN_MASK,
+* XLLDMA_CR_IRQ_DELAY_EN_MASK, XLLDMA_CR_IRQ_COALESCE_EN_MASK and
+* XLLDMA_CR_IRQ_ALL_EN_MASK. Bits not specified in the mask are not
+* affected.
+*
+* @note
+* C-style signature:
+* void XLlDma_mBdRingIntEnable(XLlDma_BdRing* RingPtr, u32 Mask)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingIntEnable(RingPtr, Mask) \
+ { \
+ u32 Reg = XLlDma_mReadReg((RingPtr)->ChanBase, \
+ XLLDMA_CR_OFFSET); \
+ Reg |= ((Mask) & XLLDMA_CR_IRQ_ALL_EN_MASK); \
+ XLlDma_mWriteReg((RingPtr)->ChanBase, XLLDMA_CR_OFFSET, Reg);\
+ }
+
+
+/****************************************************************************/
+/**
+* Clear interrupt enable bits for a channel. This operation will modify the
+* XLLDMA_CR_OFFSET register.
+*
+* @param RingPtr is the channel instance to operate on.
+* @param Mask consists of the interrupt signals to disable. They are formed
+* by OR'ing one or more of the following bitmasks together:
+* XLLDMA_CR_IRQ_EN_MASK, XLLDMA_CR_IRQ_ERROR_EN_MASK,
+* XLLDMA_CR_IRQ_DELAY_EN_MASK, XLLDMA_CR_IRQ_COALESCE_EN_MASK and
+* XLLDMA_CR_IRQ_ALL_EN_MASK. Bits not specified in the mask are not
+* affected.
+*
+* @note
+* C-style signature:
+* void XLlDma_mBdRingIntDisable(XLlDma_BdRing* RingPtr, u32 Mask)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingIntDisable(RingPtr, Mask) \
+ { \
+ u32 Reg = XLlDma_mReadReg((RingPtr)->ChanBase, \
+ XLLDMA_CR_OFFSET); \
+ Reg &= ~((Mask) & XLLDMA_CR_IRQ_ALL_EN_MASK); \
+ XLlDma_mWriteReg((RingPtr)->ChanBase, XLLDMA_CR_OFFSET, Reg);\
+ }
+
+
+/****************************************************************************/
+/**
+* Get enabled interrupts of a channel.
+*
+* @param RingPtr is the channel instance to operate on.
+* @return Enabled interrupts of a channel. Use XLLDMA_CR_IRQ_* defined in
+* xlldma_hw.h to interpret this returned value.
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mBdRingIntGetEnabled(XLlDma_BdRing* RingPtr)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingIntGetEnabled(RingPtr) \
+ (XLlDma_mReadReg((RingPtr)->ChanBase, XLLDMA_CR_OFFSET) \
+ & XLLDMA_CR_IRQ_ALL_EN_MASK)
+
+
+/****************************************************************************/
+/**
+* Retrieve the contents of the channel's IRQ register XDMACR_IRQ_OFFSET. This
+* operation can be used to see which interrupts are pending.
+*
+* @param RingPtr is the channel instance to operate on.
+*
+* @return Current contents of the IRQ_OFFSET register. Use XLLDMA_IRQ_***
+* values defined in xlldma_hw.h to interpret the returned value.
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mBdRingGetIrq(XLlDma_BdRing* RingPtr)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingGetIrq(RingPtr) \
+ XLlDma_mReadReg((RingPtr)->ChanBase, XLLDMA_IRQ_OFFSET)
+
+
+/****************************************************************************/
+/**
+* Acknowledge asserted interrupts.
+*
+* @param RingPtr is the channel instance to operate on.
+* @param Mask are the interrupt signals to acknowledge and are made by Or'ing
+* one or more of the following bits: XLLDMA_IRQ_ERROR_MASK,
+* XLLDMA_IRQ_DELAY_MASK, XLLDMA_IRQ_COALESCE_MASK, XLLDMA_IRQ_ALL_MASK.
+* Any mask bit set for an unasserted interrupt has no effect.
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mBdRingAckIrq(XLlDma_BdRing* RingPtr)
+*
+*****************************************************************************/
+#define XLlDma_mBdRingAckIrq(RingPtr, Mask) \
+ XLlDma_mWriteReg((RingPtr)->ChanBase, XLLDMA_IRQ_OFFSET,\
+ (Mask) & XLLDMA_IRQ_ALL_MASK)
+
+/************************* Function Prototypes ******************************/
+
+/*
+ * Descriptor ring functions xlldma_bdring.c
+ */
+int XLlDma_BdRingCreate(XLlDma_BdRing * RingPtr, u32 PhysAddr,
+ u32 VirtAddr, u32 Alignment, unsigned BdCount);
+int XLlDma_BdRingCheck(XLlDma_BdRing * RingPtr);
+int XLlDma_BdRingClone(XLlDma_BdRing * RingPtr, XLlDma_Bd * SrcBdPtr);
+int XLlDma_BdRingAlloc(XLlDma_BdRing * RingPtr, unsigned NumBd,
+ XLlDma_Bd ** BdSetPtr);
+int XLlDma_BdRingUnAlloc(XLlDma_BdRing * RingPtr, unsigned NumBd,
+ XLlDma_Bd * BdSetPtr);
+int XLlDma_BdRingToHw(XLlDma_BdRing * RingPtr, unsigned NumBd,
+ XLlDma_Bd * BdSetPtr);
+unsigned XLlDma_BdRingFromHw(XLlDma_BdRing * RingPtr, unsigned BdLimit,
+ XLlDma_Bd ** BdSetPtr);
+int XLlDma_BdRingFree(XLlDma_BdRing * RingPtr, unsigned NumBd,
+ XLlDma_Bd * BdSetPtr);
+int XLlDma_BdRingStart(XLlDma_BdRing * RingPtr);
+int XLlDma_BdRingSetCoalesce(XLlDma_BdRing * RingPtr, u32 Counter, u32 Timer);
+void XLlDma_BdRingGetCoalesce(XLlDma_BdRing * RingPtr,
+ u32 *CounterPtr, u32 *TimerPtr);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xlldma_hw.h
+*
+* This header file contains identifiers and register-level driver functions (or
+* macros) that can be used to access the Local-Link Scatter-gather Direct
+* Memory Access Gather (LLDMA) device.
+*
+* For more information about the operation of this device, see the hardware
+* specification and documentation in the higher level driver xlldma.h source
+* code file.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a xd 12/21/06 First release
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XLLDMA_HW_H /* prevent circular inclusions */
+#define XLLDMA_HW_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+
+/** @name Device Bus Type definition The constant
+ * CONFIG_XILINX_LLDMA_USE_DCR is used to inform this driver the type
+ * of the BUS the DMA device is on. If the DMA core is on DCR BUS
+ * using indirect addressing, which currently only happens on V5FX,
+ * then this option must be set. On other architectures where dma
+ * ports are accessed through memory mapped io, this must not be set.
+ *@{
+ */
+#ifdef CONFIG_XILINX_LLDMA_USE_DCR
+#include "xio_dcr.h"
+#else
+#include "xio.h"
+#endif
+/*@}*/
+
+/************************** Constant Definitions *****************************/
+
+/** @name Buffer Descriptor Alignment
+ * @{
+ */
+#define XLLDMA_BD_MINIMUM_ALIGNMENT 0x40 /**< Minimum byte alignment
+ requirement for descriptors to
+ satisfy both hardware/software
+ needs */
+/*@}*/
+
+
+/* Register offset definitions. Unless otherwise noted, register access is
+ * 32 bit.
+ */
+
+#ifdef CONFIG_XILINX_LLDMA_USE_DCR
+
+/* DMA core is on DCR BUS */
+
+/** @name Device registers for DCR based systems.
+ * Offsets defined in DCR address space. TX and RX channels consist of
+ * identical registers
+ * @{
+ */
+#define XLLDMA_TX_OFFSET 0x00000000 /**< TX channel registers base
+ offset [0..7] */
+#define XLLDMA_RX_OFFSET 0x00000008 /**< RX channel registers base
+ offset [8..F] */
+#define XLLDMA_DMACR_OFFSET 0x00000010 /**< DMA control register */
+
+/* This set of registers are applicable for both channels. Add
+ * XLLDMA_TX_OFFSET to get to TX channel, and XLLDMA_RX_OFFSET to get to RX
+ * channel
+ */
+#define XLLDMA_NDESC_OFFSET 0x00000000 /**< Next descriptor pointer */
+#define XLLDMA_BUFA_OFFSET 0x00000001 /**< Current buffer address */
+#define XLLDMA_BUFL_OFFSET 0x00000002 /**< Current buffer length */
+#define XLLDMA_CDESC_OFFSET 0x00000003 /**< Current descriptor pointer */
+#define XLLDMA_TDESC_OFFSET 0x00000004 /**< Tail descriptor pointer */
+#define XLLDMA_CR_OFFSET 0x00000005 /**< Channel control */
+#define XLLDMA_IRQ_OFFSET 0x00000006 /**< Interrupt register */
+#define XLLDMA_SR_OFFSET 0x00000007 /**< Status */
+/*@}*/
+
+#else /* Non-DCR interface is used */
+
+/** @name Device registers for Non-DCR based systems.
+ * Offsets defined in Non-DCR address space. TX and RX channels consist of
+ * identical registers
+ * @{
+ */
+#define XLLDMA_TX_OFFSET 0x00000000 /**< TX channel registers base
+ offset */
+#define XLLDMA_RX_OFFSET 0x00000020 /**< RX channel registers base
+ offset */
+#define XLLDMA_DMACR_OFFSET 0x00000040 /**< DMA control register */
+
+/* This set of registers are applicable for both channels. Add
+ * XLLDMA_TX_OFFSET to get to TX channel, and XLLDMA_RX_OFFSET to get to RX
+ * channel
+ */
+#define XLLDMA_NDESC_OFFSET 0x00000000 /**< Next descriptor pointer */
+#define XLLDMA_BUFA_OFFSET 0x00000004 /**< Current buffer address */
+#define XLLDMA_BUFL_OFFSET 0x00000008 /**< Current buffer length */
+#define XLLDMA_CDESC_OFFSET 0x0000000C /**< Current descriptor pointer */
+#define XLLDMA_TDESC_OFFSET 0x00000010 /**< Tail descriptor pointer */
+#define XLLDMA_CR_OFFSET 0x00000014 /**< Channel control */
+#define XLLDMA_IRQ_OFFSET 0x00000018 /**< Interrupt register */
+#define XLLDMA_SR_OFFSET 0x0000001C /**< Status */
+
+/*@}*/
+
+#endif /* #ifdef CONFIG_XILINX_LLDMA_USE_DCR */
+
+/** @name Buffer Descriptor register offsets
+ * USR fields are defined by higher level IP. For example, checksum offload
+ * setup for EMAC type devices. The 1st 8 words are utilized by hardware. Any
+ * words after the 8th are for software use only.
+ * @{
+ */
+#define XLLDMA_BD_NDESC_OFFSET 0x00 /**< Next descriptor pointer */
+#define XLLDMA_BD_BUFA_OFFSET 0x04 /**< Buffer address */
+#define XLLDMA_BD_BUFL_OFFSET 0x08 /**< Buffer length */
+#define XLLDMA_BD_STSCTRL_USR0_OFFSET 0x0C /**< Status and Control and
+ hardware implementation
+ specific */
+#define XLLDMA_BD_USR1_OFFSET 0x10 /**< Hardware implementation
+ specific */
+#define XLLDMA_BD_USR2_OFFSET 0x14 /**< Hardware implementation
+ specific */
+#define XLLDMA_BD_USR3_OFFSET 0x18 /**< Hardware implementation
+ specific */
+#define XLLDMA_BD_USR4_OFFSET 0x1C /**< Hardware implementation
+ specific */
+#define XLLDMA_BD_ID_OFFSET 0x20 /**< Software application use */
+
+#define XLLDMA_BD_NUM_WORDS 9 /**< Number of 32-bit words that
+ make up a full BD */
+#define XLLDMA_BD_HW_NUM_WORDS 8 /**< Number of 32-bit words that
+ make up the hardware
+ accessible portion of a BD */
+#define XLLDMA_BD_HW_NUM_BYTES 32 /**< Number of bytes that make up
+ the hardware accessible
+ portion of a BD */
+/*@}*/
+
+
+/* Register masks. The following constants define bit locations of various
+ * control bits in the registers. Constants are not defined for those registers
+ * that have a single bit field representing all 32 bits. For further
+ * information on the meaning of the various bit masks, refer to the hardware
+ * spec.
+ */
+
+
+/** @name Bitmasks of XLLDMA_TX_CR_OFFSET and XLLDMA_RX_CR_OFFSET registers
+ * @{
+ */
+#define XLLDMA_CR_IRQ_TIMEOUT_MASK 0xFF000000 /**< Interrupt coalesce
+ waitbound timeout */
+#define XLLDMA_CR_IRQ_COUNT_MASK 0x00FF0000 /**< Interrupt coalesce
+ count threshold */
+#define XLLDMA_CR_MSB_ADDR_MASK 0x0000F000 /**< MSB address of DMA
+ buffers and descriptors
+ for 36 bit
+ addressing */
+#define XLLDMA_CR_APP_EN_MASK 0x00000800 /**< Application data mask
+ enable */
+#define XLLDMA_CR_USE_1_BIT_CNT_MASK 0x00000400 /**< Turn 4 and 2 bit
+ interrupt counters into
+ 1 bit counters */
+#define XLLDMA_CR_USE_INT_ON_END_MASK 0x00000200 /**< Use interrupt-on-end */
+#define XLLDMA_CR_LD_IRQ_CNT_MASK 0x00000100 /**< Load IRQ_COUNT */
+#define XLLDMA_CR_IRQ_EN_MASK 0x00000080 /**< Master interrupt
+ enable */
+#define XLLDMA_CR_IRQ_ERROR_EN_MASK 0x00000004 /**< Enable error
+ interrupt */
+#define XLLDMA_CR_IRQ_DELAY_EN_MASK 0x00000002 /**< Enable coalesce delay
+ interrupt */
+#define XLLDMA_CR_IRQ_COALESCE_EN_MASK 0x00000001 /**< Enable coalesce count
+ interrupt */
+#define XLLDMA_CR_IRQ_ALL_EN_MASK 0x00000087 /**< All interrupt enable
+ bits */
+
+/* Shift constants for selected masks */
+#define XLLDMA_CR_IRQ_TIMEOUT_SHIFT 24
+#define XLLDMA_CR_IRQ_COUNT_SHIFT 16
+#define XLLDMA_CR_MSB_ADDR_SHIFT 12
+
+/*@}*/
+
+
+/** @name Bitmasks of XLLDMA_TX_IRQ_OFFSET & XLLDMA_RX_IRQ_OFFSET registers
+ * @{
+ */
+#define XLLDMA_IRQ_WRQ_EMPTY_MASK 0x00004000 /**< Write Command Queue
+ Empty -- RX channel
+ Only */
+#define XLLDMA_IRQ_COALESCE_COUNTER_MASK 0x00003C00 /**< Coalesce IRQ 4 bit
+ counter */
+#define XLLDMA_IRQ_DELAY_COUNTER_MASK 0x00000300 /**< Coalesce delay IRQ 2
+ bit counter */
+#define XLLDMA_IRQ_PLB_RD_ERROR_MASK 0x00000010 /**< PLB Read Error IRQ */
+#define XLLDMA_IRQ_PLB_WR_ERROR_MASK 0x00000008 /**< PLB Write Error IRQ */
+#define XLLDMA_IRQ_ERROR_MASK 0x00000004 /**< Error IRQ */
+#define XLLDMA_IRQ_DELAY_MASK 0x00000002 /**< Coalesce delay IRQ */
+#define XLLDMA_IRQ_COALESCE_MASK 0x00000001 /**< Coalesce threshold
+ IRQ */
+#define XLLDMA_IRQ_ALL_ERR_MASK 0x0000001C /**< All error interrupt */
+#define XLLDMA_IRQ_ALL_MASK 0x0000001F /**< All interrupt bits */
+
+/* Shift constants for selected masks */
+#define XLLDMA_IRQ_COALESCE_COUNTER_SHIFT 10
+#define XLLDMA_IRQ_DELAY_COUNTER_SHIFT 8
+
+/*@}*/
+
+
+/** @name Bitmasks of XLLDMA_TX_SR_OFFSET and XLLDMA_RX_SR_OFFSET registers
+ * @{
+ */
+#define XLLDMA_SR_IRQ_ON_END_MASK 0x00000040 /**< IRQ on end has occurred */
+#define XLLDMA_SR_STOP_ON_END_MASK 0x00000020 /**< Stop on end has occurred */
+#define XLLDMA_SR_COMPLETED_MASK 0x00000010 /**< BD completed */
+#define XLLDMA_SR_SOP_MASK 0x00000008 /**< Current BD has SOP set */
+#define XLLDMA_SR_EOP_MASK 0x00000004 /**< Current BD has EOP set */
+#define XLLDMA_SR_ENGINE_BUSY_MASK 0x00000002 /**< Channel is busy */
+/*@}*/
+
+
+/** @name Bitmasks associated with XLLDMA_DMACR_OFFSET register
+ * @{
+ */
+#define XLLDMA_DMACR_TX_PAUSE_MASK 0x20000000 /**< Pause TX channel
+ */
+#define XLLDMA_DMACR_RX_PAUSE_MASK 0x10000000 /**< Pause RX channel
+ */
+#define XLLDMA_DMACR_PLB_ERR_DIS_MASK 0x00000020 /**< Disable PLB
+ error detection
+ */
+#define XLLDMA_DMACR_RX_OVERFLOW_ERR_DIS_MASK 0x00000010 /**< Disable error
+ when 2 or 4 bit
+ coalesce counter
+ overflows */
+#define XLLDMA_DMACR_TX_OVERFLOW_ERR_DIS_MASK 0x00000008 /**< Disable error
+ when 2 or 4 bit
+ coalesce counter
+ overflows */
+#define XLLDMA_DMACR_TAIL_PTR_EN_MASK 0x00000004 /**< Enable use of
+ tail pointer
+ register */
+#define XLLDMA_DMACR_EN_ARB_HOLD_MASK 0x00000002 /**< Enable
+ arbitration
+ hold */
+#define XLLDMA_DMACR_SW_RESET_MASK 0x00000001 /**< Assert Software
+ reset for both
+ channels */
+/*@}*/
+
+
+/** @name Bitmasks of XLLDMA_BD_STSCTRL_USR0_OFFSET descriptor word
+ * @{
+ */
+#define XLLDMA_BD_STSCTRL_ERROR_MASK 0x80000000 /**< DMA error */
+#define XLLDMA_BD_STSCTRL_IOE_MASK 0x40000000 /**< Interrupt on end */
+#define XLLDMA_BD_STSCTRL_SOE_MASK 0x20000000 /**< Stop on end */
+#define XLLDMA_BD_STSCTRL_COMPLETED_MASK 0x10000000 /**< DMA completed */
+#define XLLDMA_BD_STSCTRL_SOP_MASK 0x08000000 /**< Start of packet */
+#define XLLDMA_BD_STSCTRL_EOP_MASK 0x04000000 /**< End of packet */
+#define XLLDMA_BD_STSCTRL_BUSY_MASK 0x02000000 /**< DMA channel busy */
+
+#define XLLDMA_BD_STSCTRL_MASK 0xFF000000 /**< Status/Control field
+ */
+#define XLLDMA_BD_STSCTRL_USR0_MASK 0x00FFFFFF /**< User field #0 */
+/*@}*/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+#ifdef CONFIG_XILINX_LLDMA_USE_DCR
+
+/* DCR interface is used */
+
+#define XLlDma_In32 XIo_DcrIn
+#define XLlDma_Out32 XIo_DcrOut
+
+#else
+
+/* Non-DCR interface is used */
+
+#define XLlDma_In32 XIo_In32
+#define XLlDma_Out32 XIo_Out32
+
+#endif
+
+/*****************************************************************************/
+/**
+*
+* Read the given register.
+*
+* @param BaseAddress is the base address of the device
+* @param RegOffset is the register offset to be read
+*
+* @return The 32-bit value of the register
+*
+* @note
+* C-style signature:
+* u32 XLlDma_mReadReg(u32 BaseAddress, u32 RegOffset)
+*
+******************************************************************************/
+#define XLlDma_mReadReg(BaseAddress, RegOffset) \
+ XLlDma_In32((BaseAddress) + (RegOffset))
+
+/*****************************************************************************/
+/**
+*
+* Write the given register.
+*
+* @param BaseAddress is the base address of the device
+* @param RegOffset is the register offset to be written
+* @param Data is the 32-bit value to write to the register
+*
+* @return None.
+*
+* @note
+* C-style signature:
+* void XLlDma_mWriteReg(u32 BaseAddress, u32 RegOffset, u32 Data)
+*
+******************************************************************************/
+#define XLlDma_mWriteReg(BaseAddress, RegOffset, Data) \
+ XLlDma_Out32((BaseAddress) + (RegOffset), (Data))
+
+/************************** Function Prototypes ******************************/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2007 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xlldma_userip.h
+*
+* This file is for the User-IP core (like Local-Link TEMAC) to define constants
+* that are the User-IP core specific. DMA driver requires the constants to work
+* correctly. Two constants must be defined in this file:
+*
+* - XLLDMA_USR_APPWORD_OFFSET:
+*
+* This constant defines a user word the User-IP always updates in the RX
+* Buffer Descriptors (BD) during any Receive transaction.
+*
+* The DMA driver initializes this chosen user word of any RX BD to the
+* pre-defined value (see XLLDMA_USR_APPWORD_INITVALUE below) before
+* giving it to the RX channel. The DMA relies on its updation (by the
+* User-IP) to ensure the BD has been completed by the RX channel besides
+* checking the COMPLETE bit in XLLDMA_BD_STSCTRL_USR0_OFFSET field (see
+* xlldma_hw.h).
+*
+* The only valid options for this constant are XLLDMA_BD_USR1_OFFSET,
+* XLLDMA_BD_USR2_OFFSET, XLLDMA_BD_USR3_OFFSET and XLLDMA_BD_USR4_OFFSET.
+*
+* If the User-IP does not update any of the option fields above, the DMA
+* driver will not work properly.
+*
+* - XLLDMA_USR_APPWORD_INITVALUE:
+*
+* This constant defines the value the DMA driver uses to populate the
+* XLLDMA_USR_APPWORD_OFFSET field (see above) in any RX BD before giving
+* the BD to the RX channel for receive transaction.
+*
+* It must be ensured that the User-IP will always populates a different
+* value from this constant into the XLLDMA_USR_APPWORD_OFFSET field at
+* the end of any receive transaction. Failing to do so will cause the
+* DMA driver to work improperly.
+*
+* If the User-IP uses different setting, the correct setting must be defined as
+* a compiler options used in the Makefile. In either case the default
+* definition of the constants in this file will be discarded.
+*
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a xd 02/21/07 First release
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XLLDMA_USERIP_H /* prevent circular inclusions */
+#define XLLDMA_USERIP_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xlldma_hw.h"
+
+/************************** Constant Definitions *****************************/
+
+#ifndef XLLDMA_USERIP_APPWORD_OFFSET
+#define XLLDMA_USERIP_APPWORD_OFFSET XLLDMA_BD_USR4_OFFSET
+#endif
+
+#ifndef XLLDMA_USERIP_APPWORD_INITVALUE
+#define XLLDMA_USERIP_APPWORD_INITVALUE 0xFFFFFFFF
+#endif
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Function Prototypes ******************************/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file llfifo.c
+ *
+ * The Xilinx local link FIFO driver component. This driver supports the
+ * Xilinx xps_ll_fifo core.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 10/13/06 First release
+ * </pre>
+ ******************************************************************************/
+
+
+/***************************** Include Files *********************************/
+
+#include <linux/string.h>
+
+#include "xllfifo_hw.h"
+#include "xllfifo.h"
+#include "xstatus.h"
+
+/************************** Constant Definitions *****************************/
+
+#define FIFO_WIDTH_BYTES 4
+
+/*
+ * Implementation Notes:
+ *
+ * This Fifo driver makes use of a byte streamer driver (xstreamer.h). The code
+ * is structured like so:
+ *
+ * +--------------------+
+ * | llfifo |
+ * | +----------------+
+ * | | +--------------+
+ * | | | xstreamer |
+ * | | +--------------+
+ * | +----------------+
+ * | |
+ * +--------------------+
+ *
+ * Initialization
+ * At initialization time this driver (llfifo) sets up the streamer objects to
+ * use routines in this driver (llfifo) to perform the actual I/O to the H/W
+ * FIFO core.
+ *
+ * Operation
+ * Once the streamer objects are set up, the API routines in this driver, just
+ * call through to the streamer driver to perform the read/write operations.
+ * The streamer driver will eventually make calls back into the routines (which
+ * reside in this driver) given at initialization to peform the actual I/O.
+ *
+ * Interrupts
+ * Interrupts are handled in the OS/Application layer above this driver.
+ */
+
+xdbg_stmnt(u32 _xllfifo_rr_value;)
+xdbg_stmnt(u32 _xllfifo_ipie_value;)
+xdbg_stmnt(u32 _xllfifo_ipis_value;)
+
+/****************************************************************************/
+/*
+*
+* XLlFifo_RxGetWord reads one 32 bit word from the FIFO specified by
+* <i>InstancePtr</i>.
+*
+* XLlFifo_RxGetLen or XLlFifo_iRxGetLen must be called before calling
+* XLlFifo_RxGetWord. Otherwise, the hardware will raise an <i>Over Read
+* Exception</i>.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return XLlFifo_RxGetWord returns the 32 bit word read from the FIFO.
+*
+* @note
+* C-style signature:
+* u32 XLlFifo_RxGetWord(XLlFifo *InstancePtr)
+*
+*****************************************************************************/
+#define XLlFifo_RxGetWord(InstancePtr) \
+ XLlFifo_ReadReg((InstancePtr)->BaseAddress, XLLF_RDFD_OFFSET)
+
+/****************************************************************************/
+/*
+*
+* XLlFifo_TxPutWord writes the 32 bit word, <i>Word</i> to the FIFO specified by
+* <i>InstancePtr</i>.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return N/A
+*
+* @note
+* C-style signature:
+* void XLlFifo_TxPutWord(XLlFifo *InstancePtr, u32 Word)
+*
+*****************************************************************************/
+#define XLlFifo_TxPutWord(InstancePtr, Word) \
+ XLlFifo_WriteReg((InstancePtr)->BaseAddress, XLLF_TDFD_OFFSET, \
+ (Word))
+
+/*****************************************************************************/
+/*
+*
+* XLlFifo_iRxOccupancy returns the number of 32-bit words available (occupancy)
+* to be read from the receive channel of the FIFO, specified by
+* <i>InstancePtr</i>.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return XLlFifo_iRxOccupancy returns the occupancy count in 32-bit words for
+* the specified FIFO.
+*
+******************************************************************************/
+static u32 XLlFifo_iRxOccupancy(XLlFifo *InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr);
+
+ return XLlFifo_ReadReg(InstancePtr->BaseAddress,
+ XLLF_RDFO_OFFSET);
+}
+
+/*****************************************************************************/
+/*
+*
+* XLlFifo_iRxGetLen notifies the hardware that the program is ready to receive the
+* next frame from the receive channel of the FIFO specified by <i>InstancePtr</i>.
+*
+* Note that the program must first call XLlFifo_iRxGetLen before pulling data
+* out of the receive channel of the FIFO with XLlFifo_Read.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return XLlFifo_iRxGetLen returns the number of bytes available in the next
+* frame.
+*
+******************************************************************************/
+static u32 XLlFifo_iRxGetLen(XLlFifo *InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr);
+
+ return XLlFifo_ReadReg(InstancePtr->BaseAddress,
+ XLLF_RLF_OFFSET);
+}
+
+/*****************************************************************************/
+/*
+*
+* XLlFifo_iRead_Aligned reads, <i>WordCount</i>, words from the FIFO referenced by
+* <i>InstancePtr</i> to the block of memory, referenced by <i>BufPtr</i>.
+*
+* XLlFifo_iRead_Aligned assumes that <i>BufPtr</i> is already aligned according
+* to the following hardware limitations:
+* ppc - aligned on 32 bit boundaries to avoid performance penalties
+* from unaligned exception handling.
+* microblaze - aligned on 32 bit boundaries as microblaze does not handle
+* unaligned transfers.
+*
+* Care must be taken to ensure that the number of words read with one or more
+* calls to XLlFifo_Read() does not exceed the number of bytes (rounded up to
+* the nearest whole 32 bit word) available given from the last call to
+* XLlFifo_RxGetLen().
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @param BufPtr specifies the memory address to place the data read.
+*
+* @param WordCount specifies the number of 32 bit words to read.
+*
+* @return XLlFifo_iRead_Aligned always returns XST_SUCCESS. Error handling is
+* otherwise handled through hardware exceptions and interrupts.
+*
+* @note
+*
+* C Signature: int XLlFifo_iRead_Aligned(XLlFifo *InstancePtr,
+* void *BufPtr, unsigned WordCount);
+*
+******************************************************************************/
+/* static */ int XLlFifo_iRead_Aligned(XLlFifo *InstancePtr, void *BufPtr,
+ unsigned WordCount)
+{
+ unsigned WordsRemaining = WordCount;
+ u32 *BufPtrIdx = BufPtr;
+
+ xdbg_printf(XDBG_DEBUG_FIFO_RX, "XLlFifo_iRead_Aligned: start\n");
+ XASSERT_NONVOID(InstancePtr);
+ XASSERT_NONVOID(BufPtr);
+ /* assert bufer is 32 bit aligned */
+ XASSERT_NONVOID(((unsigned)BufPtr & 0x3) == 0x0);
+ xdbg_printf(XDBG_DEBUG_FIFO_RX, "XLlFifo_iRead_Aligned: after asserts\n");
+
+ while (WordsRemaining) {
+/* xdbg_printf(XDBG_DEBUG_FIFO_RX,
+ "XLlFifo_iRead_Aligned: WordsRemaining: %d\n",
+ WordsRemaining);
+*/
+ *BufPtrIdx = XLlFifo_RxGetWord(InstancePtr);
+ BufPtrIdx++;
+ WordsRemaining--;
+ }
+ xdbg_printf(XDBG_DEBUG_FIFO_RX,
+ "XLlFifo_iRead_Aligned: returning SUCCESS\n");
+ return XST_SUCCESS;
+}
+
+/****************************************************************************/
+/*
+*
+* XLlFifo_iTxVacancy returns the number of unused 32 bit words available
+* (vacancy) in the send channel of the FIFO, specified by <i>InstancePtr</i>.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return XLlFifo_iTxVacancy returns the vacancy count in 32-bit words for
+* the specified FIFO.
+*
+*****************************************************************************/
+static u32 XLlFifo_iTxVacancy(XLlFifo *InstancePtr)
+{
+ XASSERT_NONVOID(InstancePtr);
+
+ return XLlFifo_ReadReg(InstancePtr->BaseAddress,
+ XLLF_TDFV_OFFSET);
+}
+
+/*****************************************************************************/
+/*
+*
+* XLlFifo_iTxSetLen begins a hardware transfer of data out of the transmit
+* channel of the FIFO, specified by <i>InstancePtr</i>. <i>Bytes</i> specifies the number
+* of bytes in the frame to transmit.
+*
+* Note that <i>Bytes</i> (rounded up to the nearest whole 32 bit word) must be same
+* number of words just written using one or more calls to
+* XLlFifo_iWrite_Aligned()
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @param Bytes specifies the number of bytes to transmit.
+*
+* @return N/A
+*
+******************************************************************************/
+static void XLlFifo_iTxSetLen(XLlFifo *InstancePtr, u32 Bytes)
+{
+ XASSERT_VOID(InstancePtr);
+
+ XLlFifo_WriteReg(InstancePtr->BaseAddress, XLLF_TLF_OFFSET,
+ Bytes);
+}
+
+/*****************************************************************************/
+/*
+*
+* XLlFifo_iWrite_Aligned writes, <i>WordCount</i>, words to the FIFO referenced by
+* <i>InstancePtr</i> from the block of memory, referenced by <i>BufPtr</i>.
+*
+* XLlFifo_iWrite_Aligned assumes that <i>BufPtr</i> is already aligned according
+* to the following hardware limitations:
+* ppc - aligned on 32 bit boundaries to avoid performance penalties
+* from unaligned exception handling.
+* microblaze - aligned on 32 bit boundaries as microblaze does not handle
+* unaligned transfers.
+*
+* Care must be taken to ensure that the number of words written with one or
+* more calls to XLlFifo_iWrite_Aligned() matches the number of bytes (rounded up
+* to the nearest whole 32 bit word) given in the next call to
+* XLlFifo_iTxSetLen().
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @param BufPtr specifies the memory address to place the data read.
+*
+* @param WordCount specifies the number of 32 bit words to read.
+*
+* @return XLlFifo_iWrite_Aligned always returns XST_SUCCESS. Error handling is
+* otherwise handled through hardware exceptions and interrupts.
+*
+* @note
+*
+* C Signature: int XLlFifo_iWrite_Aligned(XLlFifo *InstancePtr,
+* void *BufPtr, unsigned WordCount);
+*
+******************************************************************************/
+/* static */ int XLlFifo_iWrite_Aligned(XLlFifo *InstancePtr, void *BufPtr,
+ unsigned WordCount)
+{
+ unsigned WordsRemaining = WordCount;
+ u32 *BufPtrIdx = BufPtr;
+
+ xdbg_printf(XDBG_DEBUG_FIFO_TX,
+ "XLlFifo_iWrite_Aligned: Inst: %p; Buff: %p; Count: %d\n",
+ InstancePtr, BufPtr, WordCount);
+ XASSERT_NONVOID(InstancePtr);
+ XASSERT_NONVOID(BufPtr);
+ /* assert bufer is 32 bit aligned */
+ XASSERT_NONVOID(((unsigned)BufPtr & 0x3) == 0x0);
+
+ xdbg_printf(XDBG_DEBUG_FIFO_TX,
+ "XLlFifo_iWrite_Aligned: WordsRemaining: %d\n",
+ WordsRemaining);
+ while (WordsRemaining) {
+ XLlFifo_TxPutWord(InstancePtr, *BufPtrIdx);
+ BufPtrIdx++;
+ WordsRemaining--;
+ }
+
+ xdbg_printf(XDBG_DEBUG_FIFO_TX,
+ "XLlFifo_iWrite_Aligned: returning SUCCESS\n");
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* XLlFifo_Initialize initializes an XPS_ll_Fifo device along with the
+* <i>InstancePtr</i> that references it.
+*
+* @param InstancePtr references the memory instance to be associated with
+* the FIFO device upon initialization.
+*
+* @param BaseAddress is the processor address used to access the
+* base address of the Fifo device.
+*
+* @return N/A
+*
+******************************************************************************/
+void XLlFifo_Initialize(XLlFifo *InstancePtr, u32 BaseAddress)
+{
+ XASSERT_VOID(InstancePtr);
+ XASSERT_VOID(BaseAddress);
+
+ /* Clear instance memory */
+ memset(InstancePtr, 0, sizeof(XLlFifo));
+
+ /*
+ * We don't care about the physical base address, just copy the
+ * processor address over it.
+ */
+ InstancePtr->BaseAddress = BaseAddress;
+
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+
+ XLlFifo_TxReset(InstancePtr);
+ XLlFifo_RxReset(InstancePtr);
+
+ XStrm_RxInitialize(&(InstancePtr->RxStreamer), FIFO_WIDTH_BYTES,
+ (void *)InstancePtr,
+ (XStrm_XferFnType)XLlFifo_iRead_Aligned,
+ (XStrm_GetLenFnType)XLlFifo_iRxGetLen,
+ (XStrm_GetOccupancyFnType)XLlFifo_iRxOccupancy);
+
+ XStrm_TxInitialize(&(InstancePtr->TxStreamer), FIFO_WIDTH_BYTES,
+ (void *)InstancePtr,
+ (XStrm_XferFnType)XLlFifo_iWrite_Aligned,
+ (XStrm_SetLenFnType)XLlFifo_iTxSetLen,
+ (XStrm_GetVacancyFnType)XLlFifo_iTxVacancy);
+}
+
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+ *
+ * @file llfifo.h
+ *
+ * The Xilinx Dual Channel Fifo driver component. This driver supports the
+ * Virtex-5(TM) and Virtex-4(TM) XPS_ll_Fifo.
+ *
+ * For a full description of the bridge features, please see the HW spec. This driver
+ * supports the following features:
+ * - Memory mapped access to host interface registers
+ * - API for polled frame transfers
+ * - API for interrupt driven frame transfers
+ * - Virtual memory support
+ * - Full duplex operation
+ *
+ * <h2>Driver Description</h2>
+ *
+ * This driver enables higher layer software to access the XPS_llFifo core
+ * using any alignment in the data buffers.
+ *
+ * This driver supports send and receive channels in the same instance
+ * structure in the same fashion as the hardware core.
+ *
+ * <h2>Initialization</h2>
+ *
+ * An instance of this driver is initialized using a call to Initialize().
+ *
+ * <h2>Usage</h2>
+ *
+ * It is fairly simple to use the API provided by this FIFO driver. The
+ * only somewhat tricky part is that the calling code must correctly call
+ * a couple routines in the right sequence for receive and transmit.
+ *
+ * This sequence is described here. Check the routine functional
+ * descriptions for information on how to use a specific API routine.
+ *
+ * <h3>Receive</h3>
+ *
+ * A frame is received by using the following sequence:<br>
+ * 1) call XLlFifo_RxGetLen() to get the length of the next incoming frame<br>
+ * 2) call XLlFifo_Read() one or more times to read the number of bytes
+ * reported by XLlFifo_RxGetLen().<br>
+ *
+ * For example:
+ * <pre>
+ * frame_len = XLlFifo_RxGetLen(&RxInstance);
+ * while (frame_len) {
+ * unsigned bytes = min(sizeof(buffer), frame_len);
+ * XLlFifo_Read(&RxInstance, buffer, bytes);
+ * // ********
+ * // do something with buffer here
+ * // ********
+ * frame_len -= bytes;
+ * }
+ * </pre>
+ *
+ * This FIFO hardware core does <b>not</b> support a sequence where the
+ * calling code calls RxGetLen() twice in a row and then receive the data
+ * for two frames. Each frame must be read in by calling RxGetLen() just
+ * prior to reading the data.
+ *
+ * <h3>Transmit</h3>
+ * A frame is transmittted by using the following sequence:<br>
+ * 1) call XLlFifo_Write() one or more times to write all the of bytes in
+ * the next frame.<br>
+ * 2) call XLlFifo_TxSetLen() to begin the transmission of frame just
+ * written.<br>
+ *
+ * For example:
+ * <pre>
+ * frame_left = frame_len;
+ * while (frame_left) {
+ * unsigned bytes = min(sizeof(buffer), frame_left);
+ * XLlFifo_Write(&TxInstance, buffer, bytes);
+ * // ********
+ * // do something here to refill buffer
+ * // ********
+ * }
+ * XLlFifo_TxSetLen(&RxInstance, frame_len);
+ * </pre>
+ *
+ * This FIFO hardware core does <b>not</b> support a sequence where the
+ * calling code writes the data for two frames and then calls TxSetLen()
+ * twice in a row. Each frame must be written by writting the data for one
+ * frame and then calling TxSetLen().
+ *
+ * <h2>Interrupts</h2>
+ * This driver does not handle interrupts from the FIFO hardware. The
+ * software layer above may make use of the interrupts by setting up its
+ * own handlers for the interrupts.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 10/12/06 First release
+ * </pre>
+ *
+ *****************************************************************************/
+#ifndef XLLFIFO_H /* prevent circular inclusions */
+#define XLLFIFO_H /* by using preprocessor symbols */
+
+/* force C linkage */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xllfifo_hw.h"
+#include "xstreamer.h"
+
+/**************************** Type Definitions *******************************/
+
+/**
+ * This typedef defines a run-time instance of an XLlFifo device.
+ */
+typedef struct XLlFifo {
+ u32 BaseAddress; /**< BaseAddress is the physical base address of the
+ * device's registers
+ */
+
+ u32 IsReady; /**< IsReady is non-zero if the driver instance
+ * has been initialized.
+ */
+ XStrm_RxFifoStreamer RxStreamer; /**< RxStreamer is the byte streamer
+ * instance for the receive channel.
+ */
+ XStrm_TxFifoStreamer TxStreamer; /**< TxStreamer is the byte streamer
+ * instance for the transmit channel.
+ */
+} XLlFifo;
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_Reset resets both the Tx and Rx channels and the local link interface
+* the FIFO specified by <i>InstancePtr</i>. XLlFifo_TxReset resets also sends a
+* reset pulse to the downstream device (e.g. TEMAC). XLlFifo_Reset drops any
+* bytes in the FIFO not yet retrieved. XLlFifo_Reset drops any bytes in the FIFO
+* not yet transmitted.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return N/A
+*
+* @note
+* C-style signature:
+* void XLlFifo_Reset(XLlFifo *InstancePtr)
+*
+*****************************************************************************/
+#define XLlFifo_Reset(InstancePtr) \
+ XLlFifo_WriteReg((InstancePtr)->BaseAddress, XLLF_LLR_OFFSET, \
+ XLLF_LLR_RESET_MASK)
+
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_Status returns a bit mask of the interrupt status register (ISR)
+* for the FIFO specified by <i>InstancePtr</i>. XLlFifo_Status can be used
+* to query the status of the FIFO without having to have interrupts enabled.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return XLlFifo_IntStatus returns a bit mask of the status conditions.
+* The mask will be a set of bitwise or'd values from the
+* <code>XLLF_INT_*_MASK</code> preprocessor symbols.
+*
+* @note
+* C-style signature:
+* u32 XLlFifo_IntStatus(XLlFifo *InstancePtr)
+*
+*****************************************************************************/
+#define XLlFifo_Status(InstancePtr) \
+ XLlFifo_ReadReg((InstancePtr)->BaseAddress, XLLF_ISR_OFFSET)
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_IntEnable enables the interrupts specified in <i>Mask</i> for the
+* FIFO specified by <i>InstancePtr</i>. The corresponding interrupt for each bit
+* set to 1 in <i>Mask</i>, will be enabled.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @param Mask contains a bit mask of the interrupts to enable. The mask
+* can be formed using a set of bitwise or'd values from the
+* <code>XLLF_INT_*_MASK</code> preprocessor symbols.
+*
+* @return N/A
+*
+* @note
+* C-style signature:
+* void XLlFifo_IntEnable(XLlFifo *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XLlFifo_IntEnable(InstancePtr, Mask) \
+{ \
+ u32 Reg = XLlFifo_ReadReg((InstancePtr)->BaseAddress, \
+ XLLF_IER_OFFSET); \
+ Reg |= ((Mask) & XLLF_INT_ALL_MASK); \
+ XLlFifo_WriteReg((InstancePtr)->BaseAddress, XLLF_IER_OFFSET, \
+ Reg); \
+}
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_IntDisable disables the interrupts specified in <i>Mask</i> for the
+* FIFO specified by <i>InstancePtr</i>. The corresponding interrupt for each bit
+* set to 1 in <i>Mask</i>, will be disabled. In other words, XLlFifo_IntDisable
+* uses the "set a bit to clear it" scheme.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @param Mask contains a bit mask of the interrupts to disable. The mask
+* can be formed using a set of bitwise or'd values from the
+* <code>XLLF_INT_*_MASK</code> preprocessor symbols.
+*
+* @return N/A
+*
+* @note
+* C-style signature:
+* void XLlFifo_IntDisable(XLlFifo *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XLlFifo_IntDisable(InstancePtr, Mask) \
+{ \
+ u32 Reg = XLlFifo_ReadReg((InstancePtr)->BaseAddress, \
+ XLLF_IER_OFFSET); \
+ Reg &= ~((Mask) & XLLF_INT_ALL_MASK); \
+ XLlFifo_WriteReg((InstancePtr)->BaseAddress, XLLF_IER_OFFSET, \
+ Reg); \
+}
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_IntPending returns a bit mask of the pending interrupts for the
+* FIFO specified by <i>InstancePtr</i>. Each bit set to 1 in the return value
+* represents a pending interrupt.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return XLlFifo_IntPending returns a bit mask of the interrupts that are
+* pending. The mask will be a set of bitwise or'd values from the
+* <code>XLLF_INT_*_MASK</code> preprocessor symbols.
+*
+* @note
+* C-style signature:
+* u32 XLlFifo_IntPending(XLlFifo *InstancePtr)
+*
+*****************************************************************************/
+#ifdef DEBUG
+extern u32 _xllfifo_ipie_value;
+extern u32 _xllfifo_ipis_value;
+#define XLlFifo_IntPending(InstancePtr) \
+ (_xllfifo_ipie_value = XLlFifo_ReadReg( \
+ (InstancePtr)->BaseAddress, XLLF_IER_OFFSET), \
+ _xllfifo_ipis_value = XLlFifo_ReadReg( \
+ (InstancePtr)->BaseAddress, XLLF_ISR_OFFSET), \
+ (_xllfifo_ipie_value & _xllfifo_ipis_value))
+#else
+#define XLlFifo_IntPending(InstancePtr) \
+ (XLlFifo_ReadReg((InstancePtr)->BaseAddress, XLLF_IER_OFFSET) & \
+ XLlFifo_ReadReg((InstancePtr)->BaseAddress, XLLF_ISR_OFFSET))
+#endif
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_IntClear clears pending interrupts specified in <i>Mask</i> for the
+* FIFO specified by <i>InstancePtr</i>. The corresponding pending interrupt for
+* each bit set to 1 in <i>Mask</i>, will be cleared. In other words,
+* XLlFifo_IntClear uses the "set a bit to clear it" scheme.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @param Mask contains a bit mask of the pending interrupts to clear. The
+* mask can be formed using a set of bitwise or'd values from the
+* <code>XLLF_INT_*_MASK</code> preprocessor symbols.
+*
+* @note
+* C-style signature:
+* void XLlFifo_IntClear(XLlFifo *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XLlFifo_IntClear(InstancePtr, Mask) \
+ XLlFifo_WriteReg((InstancePtr)->BaseAddress, XLLF_ISR_OFFSET, \
+ ((Mask) & XLLF_INT_ALL_MASK))
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_RxReset resets the receive channel of the FIFO specified by
+* <i>InstancePtr</i>. XLlFifo_RxReset drops any bytes in the FIFO not yet
+* retrieved.
+*
+* The calling software may want to test for the completion of the reset by
+* reading the interrupt status (IS) register and testing for the Rx Reset
+* complete (RRC) bit.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return N/A
+*
+* @note
+* C-style signature:
+* void XLlFifo_RxReset(XLlFifo *InstancePtr)
+*
+*****************************************************************************/
+#define XLlFifo_RxReset(InstancePtr) \
+ XLlFifo_WriteReg((InstancePtr)->BaseAddress, XLLF_RDFR_OFFSET, \
+ XLLF_RDFR_RESET_MASK)
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_IsRxEmpty returns true if the receive channel of the FIFO, specified
+* by <i>InstancePtr</i>, is empty.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return XLlFifo_IsRxEmpty returns TRUE when the receive channel of the
+* FIFO is empty. Otherwise, XLlFifo_IsRxEmpty returns FALSE.
+*
+* @note
+* C-style signature:
+* int XLlFifo_IsRxEmpty(XLlFifo *InstancePtr)
+*
+*****************************************************************************/
+#define XLlFifo_IsRxEmpty(InstancePtr) \
+ ((XStrm_IsRxInternalEmpty(&((InstancePtr)->RxStreamer)) && \
+ ((XLlFifo_ReadReg((InstancePtr)->BaseAddress, \
+ XLLF_RDFO_OFFSET) == 0))) \
+ ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+*
+* XLlFifo_RxOccupancy returns the number of 32-bit words available (occupancy) to
+* be read from the receive channel of the FIFO, specified by <i>InstancePtr</i>.
+*
+* The xps_ll_fifo core uses the same fifo to store data values and frame length
+* values. Upon initialization, the XLlFifo_RxOccupancy will give the value of
+* 1, which means one length value (a reserved fifo location) and no data
+* values.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return XLlFifo_RxOccupancy returns the occupancy count for the specified
+* packet FIFO.
+*
+* @note
+*
+* C Signature: u32 XLlFifo_RxOccupancy(XLlFifo *InstancePtr)
+*
+******************************************************************************/
+#define XLlFifo_RxOccupancy(InstancePtr) \
+ XStrm_RxOccupancy(&((InstancePtr)->RxStreamer))
+
+/*****************************************************************************/
+/**
+*
+* XLlFifo_RxGetLen notifies the hardware that the program is ready to receive
+* the next frame from the receive channel of the FIFO, specified by
+* <i>InstancePtr</i>.
+*
+* Note that the program must first call XLlFifo_RxGetLen before pulling data
+* out of the receive channel of the FIFO with XLlFifo_Read.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return XLlFifo_RxGetLen returns the number of bytes available in the next
+* frame.
+*
+* @note
+*
+* C Signature: u32 XLlFifo_RxGetLen(XLlFifo *InstancePtr)
+*
+******************************************************************************/
+#define XLlFifo_RxGetLen(InstancePtr) \
+ XStrm_RxGetLen(&((InstancePtr)->RxStreamer))
+
+/*****************************************************************************/
+/**
+*
+* XLlFifo_Read reads <i>Bytes</i> bytes from the receive channel of the FIFO
+* referenced by <i>InstancePtr</i> to the block of memory, referenced by
+* <i>BufPtr</i>.
+*
+* Care must be taken to ensure that the number of bytes read with one or more
+* calls to XLlFifo_Read() does not exceed the number of bytes available given
+* from the last call to XLlFifo_RxGetLen().
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @param BufPtr specifies the memory address to place the data read.
+*
+* @param Bytes specifies the number of bytes to read.
+*
+* @return N/A
+*
+* @note
+* Error handling is handled through hardware exceptions and interrupts.
+*
+* C Signature: void XLlFifo_Read(XLlFifo *InstancePtr, void *BufPtr, unsigned Bytes)
+*
+******************************************************************************/
+#define XLlFifo_Read(InstancePtr, BufPtr, Bytes) \
+ XStrm_Read(&((InstancePtr)->RxStreamer), (BufPtr), (Bytes))
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_TxReset resets the transmit channel of the FIFO specified by
+* <i>InstancePtr</i>. XLlFifo_TxReset drops any bytes in the FIFO not yet
+* transmitted.
+*
+* The calling software may want to test for the completion of the reset by
+* reading the interrupt status (IS) register and testing for the Tx Reset
+* complete (TRC) bit.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return N/A
+*
+* @note
+* C-style signature:
+* void XLlFifo_TxReset(XLlFifo *InstancePtr)
+*
+*****************************************************************************/
+#define XLlFifo_TxReset(InstancePtr) \
+ XLlFifo_WriteReg((InstancePtr)->BaseAddress, XLLF_TDFR_OFFSET, \
+ XLLF_TDFR_RESET_MASK)
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_IsTxDone returns true if the transmission in the transmit channel
+* of the FIFO, specified by <i>InstancePtr</i>, is complete. XLlFifo_IsTxDone
+* works only if the TC bit in the IS register is cleared before sending a
+* frame.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return XLlFifo_IsTxDone returns TRUE when the transmit channel of the
+* FIFO is complete. Otherwise, XLlFifo_IsTxDone returns FALSE.
+*
+* @note
+* C-style signature:
+* int XLlFifo_IsTxDone(XLlFifo *InstancePtr)
+*
+*****************************************************************************/
+#define XLlFifo_IsTxDone(InstancePtr) \
+ ((XLlFifo_ReadReg((InstancePtr)->BaseAddress, XLLF_ISR_OFFSET) & \
+ XLLF_INT_TC_MASK) \
+ ? TRUE : FALSE)
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_TxVacancy returns the number of unused 32 bit words available
+* (vacancy) in the send channel of the FIFO specified by <i>InstancePtr</i>.
+*
+* The xps_ll_fifo core uses tXLLF_he same fifo to store data values and frame length
+* values. Upon initialization, the XLlFifo_TxVacancy will give the value of
+* FIFO_WIDTH - 1, which means one length value used (a reserved fifo location)
+* and no data values yet present.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return XLlFifo_TxVacancy returns the vacancy count in 32-bit words for
+* the specified FIFO.
+*
+* @note
+* C-style signature:
+* u32 XLlFifo_TxVacancy(XLlFifo *InstancePtr)
+*
+*****************************************************************************/
+#define XLlFifo_TxVacancy(InstancePtr) \
+ XStrm_TxVacancy(&((InstancePtr)->TxStreamer))
+
+/*****************************************************************************/
+/**
+*
+* XLlFifo_TxSetLen begins a hardware transfer of <i>Bytes</i> bytes out of the
+* transmit channel of the FIFO specified by <i>InstancePtr</i>.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @param Bytes specifies the frame length in bytes.
+*
+* @return N/A
+*
+* @note
+*
+* C Signature: void XLlFifo_TxSetLen(XLlFifo *InstancePtr, u32 Bytes)
+*
+******************************************************************************/
+#define XLlFifo_TxSetLen(InstancePtr, Bytes) \
+ XStrm_TxSetLen(&((InstancePtr)->TxStreamer), (Bytes))
+
+/*****************************************************************************/
+/**
+*
+* XLlFifo_Write writes <i>Bytes</i> bytes of the block of memory, referenced by
+* <i>BufPtr</i>, to the transmit channel of the FIFO referenced by
+* <i>InstancePtr</i>.
+*
+* Care must be taken to ensure that the number of bytes written with one or
+* more calls to XLlFifo_Write() matches the number of bytes given in the next
+* call to XLlFifo_TxSetLen().
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @param BufPtr specifies the memory address of data to write.
+*
+* @param Bytes specifies the number of bytes to write.
+*
+* @return N/A
+*
+* @note
+* Error handling is handled through hardware exceptions and interrupts.
+*
+* C Signature: void XLlFifo_Write(XLlFifo *InstancePtr, void *BufPtr, unsigned Bytes)
+*
+******************************************************************************/
+#define XLlFifo_Write(InstancePtr, BufPtr, Bytes) \
+ XStrm_Write(&((InstancePtr)->TxStreamer), (BufPtr), (Bytes))
+
+
+/************************** Function Prototypes ******************************/
+/*
+ * Initialization functions in xtemac_sinit.c
+ */
+void XLlFifo_Initialize(XLlFifo *InstancePtr, u32 BaseAddress);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* XLLFIFO_H end of preprocessor protection symbols */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2004-2006 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file llfifo_hw.h
+*
+* This header file contains identifiers and low-level driver functions (or
+* macros) that can be used to access the xps_ll_fifo core.
+* High-level driver functions are defined in xpfifo.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a jvb 10/16/06 First release.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XLLFIFO_HW_H /* prevent circular inclusions */
+#define XLLFIFO_HW_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xio.h"
+#include "xdebug.h"
+
+/************************** Constant Definitions *****************************/
+
+/* Register offset definitions. Unless otherwise noted, register access is
+ * 32 bit.
+ */
+
+/** @name Registers
+ * @{
+ */
+#define XLLF_ISR_OFFSET 0x00000000 /**< Interrupt Status */
+#define XLLF_IER_OFFSET 0x00000004 /**< Interrupt Enable */
+
+#define XLLF_TDFR_OFFSET 0x00000008 /**< Transmit Reset */
+#define XLLF_TDFV_OFFSET 0x0000000c /**< Transmit Vacancy */
+#define XLLF_TDFD_OFFSET 0x00000010 /**< Transmit Data */
+#define XLLF_TLF_OFFSET 0x00000014 /**< Transmit Length */
+
+#define XLLF_RDFR_OFFSET 0x00000018 /**< Receive Reset */
+#define XLLF_RDFO_OFFSET 0x0000001c /**< Receive Occupancy */
+#define XLLF_RDFD_OFFSET 0x00000020 /**< Receive Data */
+#define XLLF_RLF_OFFSET 0x00000024 /**< Receive Length */
+#define XLLF_LLR_OFFSET 0x00000028 /**< Local Link Reset */
+
+/*@}*/
+
+/* Register masks. The following constants define bit locations of various
+ * control bits in the registers. Constants are not defined for those registers
+ * that have a single bit field representing all 32 bits. For further
+ * information on the meaning of the various bit masks, refer to the HW spec.
+ */
+
+/** @name Interrupt bits
+ * These bits are associated with the XLLF_IER_OFFSET and XLLF_ISR_OFFSET
+ * registers.
+ * @{
+ */
+#define XLLF_INT_RPURE_MASK 0x80000000 /**< Receive under-read */
+#define XLLF_INT_RPORE_MASK 0x40000000 /**< Receive over-read */
+#define XLLF_INT_RPUE_MASK 0x20000000 /**< Receive underrun (empty) */
+#define XLLF_INT_TPOE_MASK 0x10000000 /**< Transmit overrun */
+#define XLLF_INT_TC_MASK 0x08000000 /**< Transmit complete */
+#define XLLF_INT_RC_MASK 0x04000000 /**< Receive complete */
+#define XLLF_INT_TSE_MASK 0x02000000 /**< Transmit length mismatch */
+#define XLLF_INT_TRC_MASK 0x01000000 /**< Transmit reset complete */
+#define XLLF_INT_RRC_MASK 0x00800000 /**< Receive reset complete */
+#define XLLF_INT_ALL_MASK 0xff800000 /**< All the ints */
+#define XLLF_INT_ERROR_MASK 0xf2000000 /**< Error status ints */
+#define XLLF_INT_RXERROR_MASK 0xe0000000 /**< Receive Error status ints */
+#define XLLF_INT_TXERROR_MASK 0x12000000 /**< Transmit Error status ints */
+/*@}*/
+
+/** @name Reset register values
+ * These bits are associated with the XLLF_TDFR_OFFSET and XLLF_RDFR_OFFSET
+ * reset registers.
+ * @{
+ */
+#define XLLF_RDFR_RESET_MASK 0x000000a5 /**< receive reset value */
+#define XLLF_TDFR_RESET_MASK 0x000000a5 /**< Transmit reset value */
+#define XLLF_LLR_RESET_MASK 0x000000a5 /**< Local Link reset value */
+/*@}*/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/**** debug macros ****/
+#define XLlFifo_reg_name(RegOffset) \
+ (((RegOffset) == XLLF_ISR_OFFSET) ? "ISR": \
+ ((RegOffset) == XLLF_IER_OFFSET) ? "IER": \
+ ((RegOffset) == XLLF_TDFR_OFFSET) ? "TDFR {tx reset}": \
+ ((RegOffset) == XLLF_TDFV_OFFSET) ? "TDFV {tx vacancy}": \
+ ((RegOffset) == XLLF_TDFD_OFFSET) ? "TDFD {tx data}": \
+ ((RegOffset) == XLLF_TLF_OFFSET) ? "TLF {tx length}": \
+ ((RegOffset) == XLLF_RDFR_OFFSET) ? "RDFR {rx reset}": \
+ ((RegOffset) == XLLF_RDFO_OFFSET) ? "RDFO {rx occupancy}": \
+ ((RegOffset) == XLLF_RDFD_OFFSET) ? "RDFD {rx data}": \
+ ((RegOffset) == XLLF_RLF_OFFSET) ? "RLF {rx length}": \
+ "unknown")
+
+#define XLlFifo_print_reg_o(BaseAddress, RegOffset, Value) \
+ xdbg_printf(XDBG_DEBUG_FIFO_REG, "0x%08x -> %s(0x%08x)\n", (Value), \
+ XLlFifo_reg_name(RegOffset), \
+ (RegOffset) + (BaseAddress))
+
+#define XLlFifo_print_reg_i(BaseAddress, RegOffset, Value) \
+ xdbg_printf(XDBG_DEBUG_FIFO_REG, "%s(0x%08x) -> 0x%08x\n", \
+ XLlFifo_reg_name(RegOffset), \
+ (RegOffset) + (BaseAddress), (Value))
+/**** end debug macros ****/
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_ReadReg returns the value of the register at the offet,
+* <i>RegOffset</i>, from the memory mapped base address, <i>BaseAddress</i>.
+*
+* @param BaseAddress specifies the base address of the device.
+*
+* @param RegOffset specifies the offset from BaseAddress.
+*
+* @return XLlFifo_ReadReg returns the value of the specified register.
+*
+* @note
+* C-style signature:
+* u32 XLlFifo_ReadReg(u32 BaseAddress, u32 RegOffset)
+*
+*****************************************************************************/
+#ifdef DEBUG
+extern u32 _xllfifo_rr_value;
+#define XLlFifo_ReadReg(BaseAddress, RegOffset) \
+ ((((RegOffset) > 0x24) ? xdbg_printf(XDBG_DEBUG_ERROR, \
+ "XLlFifo_WriteReg: Woah! wrong reg addr: 0x%08x\n", \
+ (RegOffset)) : 0), \
+ _xllfifo_rr_value = XIo_In32((BaseAddress) + (RegOffset)), \
+ XLlFifo_print_reg_i((BaseAddress), (RegOffset), _xllfifo_rr_value), \
+ _xllfifo_rr_value)
+#else
+#define XLlFifo_ReadReg(BaseAddress, RegOffset) \
+ (XIo_In32((BaseAddress) + (RegOffset)))
+#endif
+
+/****************************************************************************/
+/**
+*
+* XLlFifo_WriteReg writes the value, <i>Value</i>, to the register at the
+* offet, <i>RegOffset</i>, from the memory mapped base address,
+* <i>BaseAddress</i>.
+*
+* @param BaseAddress specifies the base address of the device.
+*
+* @param RegOffset specifies the offset from BaseAddress.
+*
+* @param Value is value to write to the register.
+*
+* @return N/A
+*
+* @note
+* C-style signature:
+* void XLlFifo_WriteReg(u32 BaseAddress, u32 RegOffset, u32 Value)
+*
+*****************************************************************************/
+#ifdef DEBUG
+#define XLlFifo_WriteReg(BaseAddress, RegOffset, Value) \
+ (((RegOffset) > 0x24) ? xdbg_printf(XDBG_DEBUG_ERROR, \
+ "XLlFifo_WriteReg: Woah! wrong reg addr: 0x%08x\n", \
+ (RegOffset)) : 0), \
+ XLlFifo_print_reg_o((BaseAddress), (RegOffset), (Value)), \
+ (XIo_Out32((BaseAddress) + (RegOffset), (Value)))
+#else
+#define XLlFifo_WriteReg(BaseAddress, RegOffset, Value) \
+ ((XIo_Out32((BaseAddress) + (RegOffset), (Value))))
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* XLLFIFO_HW_H end of protection macro */
--- /dev/null
+/* $Id: xpacket_fifo_l_v2_00_a.c,v 1.1 2006/12/13 14:22:53 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2003-2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xpacket_fifo_l_v2_00_a.c
+*
+* Contains low-level (Level 0) functions for the XPacketFifoV200a driver.
+* See xpacket_fifo_v2_00_a.h for information about the high-level (Level 1)
+* driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- ------------------------------------------------------
+* 2.00a rpm 10/22/03 First release. Moved most of Level 1 driver functions
+* into this layer.
+* 2.00a rmm 02/24/04 Added L0WriteDRE function.
+* 2.00a xd 10/27/04 Changed comments to support doxygen for API
+* documentation.
+* </pre>
+*
+*****************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xio.h"
+#include "xpacket_fifo_l_v2_00_a.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************* Variable Definitions ******************************/
+
+
+/************************** Function Prototypes ******************************/
+
+static int Write32(u32 RegBaseAddress, u32 DataBaseAddress,
+ u8 *BufferPtr, u32 ByteCount);
+
+static int Write64(u32 RegBaseAddress, u32 DataBaseAddress,
+ u8 *BufferPtr, u32 ByteCount);
+
+static int Read32(u32 RegBaseAddress, u32 DataBaseAddress,
+ u8 *BufferPtr, u32 ByteCount);
+
+static int Read64(u32 RegBaseAddress, u32 DataBaseAddress,
+ u8 *BufferPtr, u32 ByteCount);
+
+
+/*****************************************************************************/
+/**
+*
+* Read data from a FIFO and puts it into a specified buffer. The packet FIFO is
+* currently 32 or 64 bits wide such that an input buffer which is a series of
+* bytes is filled from the FIFO a word at a time. If the requested byte count
+* is not a multiple of 32/64 bit words, it is necessary for this function to
+* format the remaining 32/64 bit word from the FIFO into a series of bytes in
+* the buffer. There may be up to 3/7 extra bytes which must be extracted from
+* the last word of the FIFO and put into the buffer.
+*
+* @param RegBaseAddress is the base address of the FIFO registers.
+*
+* @param DataBaseAddress is the base address of the FIFO keyhole.
+*
+* @param BufferPtr points to the memory buffer to write the data into. This
+* buffer must be 32 bit aligned or an alignment exception could be
+* generated. Since this buffer is a byte buffer, the data is assumed to
+* be endian independent.
+*
+* @param ByteCount contains the number of bytes to read from the FIFO. This
+* number of bytes must be present in the FIFO or an error will be
+* returned.
+*
+* @return
+*
+* XST_SUCCESS indicates the operation was successful. If the number of
+* bytes specified by the byte count is not present in the FIFO
+* XST_PFIFO_LACK_OF_DATA is returned.
+* <br><br>
+* If the function was successful, the specified buffer is modified to contain
+* the bytes which were removed from the FIFO.
+*
+* @note
+*
+* Note that the exact number of bytes which are present in the FIFO is
+* not known by this function. It can only check for a number of 32/64 bit
+* words such that if the byte count specified is incorrect, but is still
+* possible based on the number of words in the FIFO, up to 3/7 garbage bytes
+* may be present at the end of the buffer.
+* <br><br>
+* This function assumes that if the device consuming data from the FIFO is
+* a byte device, the order of the bytes to be consumed is from the most
+* significant byte to the least significant byte of a 32/64 bit word removed
+* from the FIFO.
+*
+******************************************************************************/
+int XPacketFifoV200a_L0Read(u32 RegBaseAddress, u32 DataBaseAddress,
+ u8 *BufferPtr, u32 ByteCount)
+{
+ u32 Width;
+ int Result = XST_FIFO_ERROR;
+
+ /* determine the width of the FIFO
+ */
+ Width = XIo_In32(RegBaseAddress + XPF_V200A_COUNT_STATUS_REG_OFFSET) &
+ XPF_V200A_FIFO_WIDTH_MASK;
+
+ if ((Width == XPF_V200A_FIFO_WIDTH_LEGACY_TYPE) ||
+ (Width == XPF_V200A_FIFO_WIDTH_32BITS_TYPE)) {
+ Result = Read32(RegBaseAddress, DataBaseAddress, BufferPtr,
+ ByteCount);
+ }
+ else if (Width == XPF_V200A_FIFO_WIDTH_64BITS_TYPE) {
+ Result = Read64(RegBaseAddress, DataBaseAddress, BufferPtr,
+ ByteCount);
+ }
+
+ return Result;
+
+}
+
+/*****************************************************************************/
+/**
+*
+* Write data into a packet FIFO. The packet FIFO is currently 32 or 64 bits
+* wide such that an input buffer which is a series of bytes must be written
+* into the FIFO a word at a time. If the buffer is not a multiple of 32 bit
+* words, it is necessary for this function to format the remaining bytes into
+* a single 32 bit word to be inserted into the FIFO. This is necessary to
+* avoid any accesses past the end of the buffer.
+*
+* @param RegBaseAddress is the base address of the FIFO registers.
+*
+* @param DataBaseAddress is the base address of the FIFO keyhole.
+*
+* @param BufferPtr points to the memory buffer that data is to be read from
+* and written into the FIFO. Since this buffer is a byte buffer, the
+* data is assumed to be endian independent. This buffer must be 32 bit
+* aligned or an alignment exception could be generated.
+*
+* @param ByteCount contains the number of bytes to read from the buffer and to
+* write to the FIFO.
+*
+* @return
+*
+* XST_SUCCESS is returned if the operation succeeded. If there is not enough
+* room in the FIFO to hold the specified bytes, XST_PFIFO_NO_ROOM is
+* returned.
+*
+* @note
+*
+* This function assumes that if the device inserting data into the FIFO is
+* a byte device, the order of the bytes in each 32/64 bit word is from the most
+* significant byte to the least significant byte.
+*
+******************************************************************************/
+int XPacketFifoV200a_L0Write(u32 RegBaseAddress,
+ u32 DataBaseAddress, u8 *BufferPtr, u32 ByteCount)
+{
+ u32 Width;
+ int Result = XST_FIFO_ERROR;
+
+
+ /* determine the width of the FIFO
+ */
+ Width = XIo_In32(RegBaseAddress + XPF_V200A_COUNT_STATUS_REG_OFFSET) &
+ XPF_V200A_FIFO_WIDTH_MASK;
+
+ if ((Width == XPF_V200A_FIFO_WIDTH_LEGACY_TYPE) ||
+ (Width == XPF_V200A_FIFO_WIDTH_32BITS_TYPE)) {
+ Result = Write32(RegBaseAddress, DataBaseAddress, BufferPtr,
+ ByteCount);
+ }
+ else if (Width == XPF_V200A_FIFO_WIDTH_64BITS_TYPE) {
+ Result = Write64(RegBaseAddress, DataBaseAddress, BufferPtr,
+ ByteCount);
+ }
+
+ return Result;
+
+}
+
+/*****************************************************************************/
+/**
+*
+* Write data into a packet FIFO configured for the Data Realignment Engine
+* (DRE). A packet FIFO channel configured in this way will accept any
+* combination of byte, half-word, or word writes. The DRE will shift the data
+* into the correct byte lane.
+*
+* @param RegBaseAddress is the base address of the FIFO registers.
+*
+* @param DataBaseAddress is the base address of the FIFO keyhole.
+*
+* @param BufferPtr points to the memory buffer that data is to be read from
+* and written into the FIFO. Since this buffer is a byte buffer, the
+* data is assumed to be endian independent. There are no alignment
+* restrictions.
+*
+* @param ByteCount contains the number of bytes to read from the buffer and to
+* write to the FIFO.
+*
+* @return
+*
+* XST_SUCCESS is returned if the operation succeeded. If there is not enough
+* room in the FIFO to hold the specified bytes, XST_PFIFO_NO_ROOM is
+* returned.
+*
+* @note
+*
+* This function assumes that if the device inserting data into the FIFO is
+* a byte device, the order of the bytes in each 32/64 bit word is from the most
+* significant byte to the least significant byte.
+*
+******************************************************************************/
+int XPacketFifoV200a_L0WriteDre(u32 RegBaseAddress,
+ u32 DataBaseAddress,
+ u8 *BufferPtr, u32 ByteCount)
+{
+ u32 FifoRoomLeft;
+ u32 BytesLeft;
+ u32 Width;
+
+ /* calculate how many slots are left in the FIFO
+ */
+ FifoRoomLeft =
+ XIo_In32(RegBaseAddress + XPF_V200A_COUNT_STATUS_REG_OFFSET)
+ & XPF_V200A_COUNT_MASK;
+
+ /* determine the width of the FIFO
+ */
+ Width = XIo_In32(RegBaseAddress + XPF_V200A_COUNT_STATUS_REG_OFFSET) &
+ XPF_V200A_FIFO_WIDTH_MASK;
+
+ /* from the width, determine how many bytes can be written to the FIFO
+ */
+ if ((Width == XPF_V200A_FIFO_WIDTH_LEGACY_TYPE) ||
+ (Width == XPF_V200A_FIFO_WIDTH_32BITS_TYPE)) {
+ FifoRoomLeft *= 4;
+ }
+ else if (Width == XPF_V200A_FIFO_WIDTH_64BITS_TYPE) {
+ FifoRoomLeft *= 8;
+ }
+
+ /* Make sure there's enough room in the FIFO */
+ if (FifoRoomLeft < ByteCount) {
+ return XST_PFIFO_NO_ROOM;
+ }
+
+ /* Determine the number of bytes to write until 32 bit alignment is
+ * reached, then write those bytes to the FIFO one byte at a time
+ */
+ BytesLeft = (unsigned) BufferPtr % sizeof(u32);
+ ByteCount -= BytesLeft;
+ while (BytesLeft--) {
+ XIo_Out8(DataBaseAddress, *BufferPtr++);
+ }
+
+ /* Write as many 32 bit words as we can */
+ BytesLeft = ByteCount;
+ while (BytesLeft >= sizeof(u32)) {
+ XIo_Out32(DataBaseAddress, *(u32 *) BufferPtr);
+ BufferPtr += sizeof(u32);
+ BytesLeft -= sizeof(u32);
+ }
+
+ /* Write remaining bytes */
+ while (BytesLeft--) {
+ XIo_Out8(DataBaseAddress, *BufferPtr++);
+ }
+
+ return XST_SUCCESS;
+
+}
+
+/*****************************************************************************/
+/**
+*
+* Read data from a FIFO and puts it into a specified buffer. The packet FIFO
+* is 32 bits wide such that an input buffer which is a series of bytes is
+* filled from the FIFO a word at a time. If the requested byte count is not
+* a multiple of 32 bit words, it is necessary for this function to format the
+* remaining 32 bit word from the FIFO into a series of bytes in the buffer.
+* There may be up to 3 extra bytes which must be extracted from the last word
+* of the FIFO and put into the buffer.
+*
+* @param RegBaseAddress is the base address of the FIFO registers.
+*
+* @param DataBaseAddress is the base address of the FIFO keyhole.
+*
+* @param BufferPtr points to the memory buffer to write the data into. This
+* buffer must be 32 bit aligned or an alignment exception could be
+* generated. Since this buffer is a byte buffer, the data is assumed to
+* be endian independent.
+*
+* @param ByteCount contains the number of bytes to read from the FIFO. This
+* number of bytes must be present in the FIFO or an error will be
+* returned.
+*
+* @return
+*
+* XST_SUCCESS indicates the operation was successful. If the number of
+* bytes specified by the byte count is not present in the FIFO
+* XST_PFIFO_LACK_OF_DATA is returned.
+* <br><br>
+* If the function was successful, the specified buffer is modified to contain
+* the bytes which were removed from the FIFO.
+*
+* @note
+*
+* Note that the exact number of bytes which are present in the FIFO is
+* not known by this function. It can only check for a number of 32 bit
+* words such that if the byte count specified is incorrect, but is still
+* possible based on the number of words in the FIFO, up to 3 garbage bytes
+* may be present at the end of the buffer.
+* <br><br>
+* This function assumes that if the device consuming data from the FIFO is
+* a byte device, the order of the bytes to be consumed is from the most
+* significant byte to the least significant byte of a 32 bit word removed
+* from the FIFO.
+*
+******************************************************************************/
+static int Read32(u32 RegBaseAddress, u32 DataBaseAddress,
+ u8 *BufferPtr, u32 ByteCount)
+{
+ u32 FifoCount;
+ u32 WordCount;
+ u32 ExtraByteCount;
+ u32 *WordBuffer = (u32 *) BufferPtr;
+
+ /* get the count of how many 32 bit words are in the FIFO, if there
+ * aren't enough words to satisfy the request, return an error
+ */
+
+ FifoCount =
+ XIo_In32(RegBaseAddress +
+ XPF_V200A_COUNT_STATUS_REG_OFFSET) &
+ XPF_V200A_COUNT_MASK;
+
+ if ((FifoCount * XPF_V200A_32BIT_FIFO_WIDTH_BYTE_COUNT) < ByteCount) {
+ return XST_PFIFO_LACK_OF_DATA;
+ }
+
+ /* calculate the number of words to read from the FIFO before the word
+ * containing the extra bytes, and calculate the number of extra bytes
+ * the extra bytes are defined as those at the end of the buffer when
+ * the buffer does not end on a 32 bit boundary
+ */
+ WordCount = ByteCount / XPF_V200A_32BIT_FIFO_WIDTH_BYTE_COUNT;
+ ExtraByteCount = ByteCount % XPF_V200A_32BIT_FIFO_WIDTH_BYTE_COUNT;
+
+ /* Read the 32 bit words from the FIFO for all the buffer except the
+ * last word which contains the extra bytes, the following code assumes
+ * that the buffer is 32 bit aligned, otherwise an alignment exception
+ * could be generated
+ */
+ for (FifoCount = 0; FifoCount < WordCount; FifoCount++) {
+ WordBuffer[FifoCount] = XIo_In32(DataBaseAddress);
+ }
+
+ /* if there are extra bytes to handle, read the last word from the FIFO
+ * and insert the extra bytes into the buffer
+ */
+ if (ExtraByteCount > 0) {
+ u32 LastWord;
+ u8 *WordPtr;
+ u8 *ExtraBytesBuffer = (u8 *) (WordBuffer + WordCount);
+
+ /* get the last word from the FIFO for the extra bytes */
+
+ LastWord = XIo_In32(DataBaseAddress);
+
+ /* one extra byte in the last word, put the byte into the next
+ * location of the buffer, bytes in a word of the FIFO are ordered
+ * from most significant byte to least
+ */
+ WordPtr = (u8 *) &LastWord;
+ if (ExtraByteCount == 1) {
+ ExtraBytesBuffer[0] = WordPtr[0];
+ }
+
+ /* two extra bytes in the last word, put each byte into the next
+ * two locations of the buffer
+ */
+ else if (ExtraByteCount == 2) {
+ ExtraBytesBuffer[0] = WordPtr[0];
+ ExtraBytesBuffer[1] = WordPtr[1];
+ }
+ /* three extra bytes in the last word, put each byte into the next
+ * three locations of the buffer
+ */
+ else if (ExtraByteCount == 3) {
+ ExtraBytesBuffer[0] = WordPtr[0];
+ ExtraBytesBuffer[1] = WordPtr[1];
+ ExtraBytesBuffer[2] = WordPtr[2];
+ }
+ }
+
+ return XST_SUCCESS;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Read data from a FIFO and puts it into a specified buffer. The packet FIFO
+* is 64 bits wide such that an input buffer which is a series of bytes is
+* filled from the FIFO a word at a time. If the requested byte count is not
+* a multiple of 64 bit words, it is necessary for this function to format the
+* remaining 64 bit word from the FIFO into a series of bytes in the buffer.
+* There may be up to 7 extra bytes which must be extracted from the last word
+* of the FIFO and put into the buffer.
+*
+* @param RegBaseAddress is the base address of the FIFO registers.
+*
+* @param DataBaseAddress is the base address of the FIFO keyhole.
+*
+* @param BufferPtr points to the memory buffer to write the data into. This
+* buffer must be 32 bit aligned or an alignment exception could be
+* generated. Since this buffer is a byte buffer, the data is assumed to
+* be endian independent.
+*
+* @param ByteCount contains the number of bytes to read from the FIFO. This
+* number of bytes must be present in the FIFO or an error will be
+* returned.
+*
+* @return
+*
+* XST_SUCCESS indicates the operation was successful. If the number of
+* bytes specified by the byte count is not present in the FIFO
+* XST_PFIFO_LACK_OF_DATA is returned.
+* <br><br>
+* If the function was successful, the specified buffer is modified to contain
+* the bytes which were removed from the FIFO.
+*
+* @note
+*
+* Note that the exact number of bytes which are present in the FIFO is
+* not known by this function. It can only check for a number of 64 bit
+* words such that if the byte count specified is incorrect, but is still
+* possible based on the number of words in the FIFO, up to 7 garbage bytes
+* may be present at the end of the buffer.
+* <br><br>
+* This function assumes that if the device consuming data from the FIFO is
+* a byte device, the order of the bytes to be consumed is from the most
+* significant byte to the least significant byte of a 64 bit word removed
+* from the FIFO.
+*
+******************************************************************************/
+static int Read64(u32 RegBaseAddress, u32 DataBaseAddress,
+ u8 *BufferPtr, u32 ByteCount)
+{
+ u32 FifoCount;
+ u32 WordCount;
+ u32 ExtraByteCount;
+ u32 *WordBuffer = (u32 *) BufferPtr;
+
+ /* get the count of how many 64 bit words are in the FIFO, if there
+ * aren't enough words to satisfy the request, return an error
+ */
+
+ FifoCount =
+ XIo_In32(RegBaseAddress +
+ XPF_V200A_COUNT_STATUS_REG_OFFSET) &
+ XPF_V200A_COUNT_MASK;
+
+ if ((FifoCount * XPF_V200A_64BIT_FIFO_WIDTH_BYTE_COUNT) < ByteCount) {
+ return XST_PFIFO_LACK_OF_DATA;
+ }
+
+ /* calculate the number of words to read from the FIFO before the word
+ * containing the extra bytes, and calculate the number of extra bytes
+ * the extra bytes are defined as those at the end of the buffer when
+ * the buffer does not end on a 32 bit boundary
+ */
+ WordCount = ByteCount / XPF_V200A_64BIT_FIFO_WIDTH_BYTE_COUNT;
+ ExtraByteCount = ByteCount % XPF_V200A_64BIT_FIFO_WIDTH_BYTE_COUNT;
+
+ /* Read the 64 bit words from the FIFO for all the buffer except the
+ * last word which contains the extra bytes, the following code assumes
+ * that the buffer is 32 bit aligned, otherwise an alignment exception
+ * could be generated. The MSWord must be read first followed by the
+ * LSWord
+ */
+ for (FifoCount = 0; FifoCount < WordCount; FifoCount++) {
+ WordBuffer[(FifoCount * 2)] = XIo_In32(DataBaseAddress);
+ WordBuffer[(FifoCount * 2) + 1] = XIo_In32(DataBaseAddress + 4);
+ }
+
+ /* if there are extra bytes to handle, read the last word from the FIFO
+ * and insert the extra bytes into the buffer
+ */
+ if (ExtraByteCount > 0) {
+ u32 MSLastWord;
+ u32 LSLastWord;
+ u8 *WordPtr;
+ u8 *ExtraBytesBuffer = (u8 *) (WordBuffer + (WordCount * 2));
+ u8 Index = 0;
+
+ /* get the last word from the FIFO for the extra bytes */
+
+ MSLastWord = XIo_In32(DataBaseAddress);
+ LSLastWord = XIo_In32(DataBaseAddress + 4);
+
+ /* four or more extra bytes in the last word, put the byte into
+ * the next location of the buffer, bytes in a word of the FIFO
+ * are ordered from most significant byte to least
+ */
+ WordPtr = (u8 *) &MSLastWord;
+ if (ExtraByteCount >= 4) {
+ ExtraBytesBuffer[Index] = WordPtr[0];
+ ExtraBytesBuffer[Index + 1] = WordPtr[1];
+ ExtraBytesBuffer[Index + 2] = WordPtr[2];
+ ExtraBytesBuffer[Index + 3] = WordPtr[3];
+ ExtraByteCount = ExtraByteCount - 4;
+ MSLastWord = LSLastWord;
+ Index = 4;
+ }
+
+ /* one extra byte in the last word, put the byte into the next
+ * location of the buffer, bytes in a word of the FIFO are
+ * ordered from most significant byte to least
+ */
+ if (ExtraByteCount == 1) {
+ ExtraBytesBuffer[Index] = WordPtr[0];
+ }
+
+ /* two extra bytes in the last word, put each byte into the next
+ * two locations of the buffer
+ */
+ else if (ExtraByteCount == 2) {
+ ExtraBytesBuffer[Index] = WordPtr[0];
+ ExtraBytesBuffer[Index + 1] = WordPtr[1];
+ }
+ /* three extra bytes in the last word, put each byte into the next
+ * three locations of the buffer
+ */
+ else if (ExtraByteCount == 3) {
+ ExtraBytesBuffer[Index] = WordPtr[0];
+ ExtraBytesBuffer[Index + 1] = WordPtr[1];
+ ExtraBytesBuffer[Index + 2] = WordPtr[2];
+ }
+ }
+
+ return XST_SUCCESS;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Write data into a 32 bit packet FIFO. The packet FIFO is 32 bits wide in this
+* function call such that an input buffer which is a series of bytes must be
+* written into the FIFO a word at a time. If the buffer is not a multiple of
+* 32 bit words, it is necessary for this function to format the remaining bytes
+* into a single 32 bit word to be inserted into the FIFO. This is necessary to
+* avoid any accesses past the end of the buffer.
+*
+* @param RegBaseAddress is the base address of the FIFO registers.
+*
+* @param DataBaseAddress is the base address of the FIFO keyhole.
+*
+* @param BufferPtr points to the memory buffer that data is to be read from
+* and written into the FIFO. Since this buffer is a byte buffer, the
+* data is assumed to be endian independent. This buffer must be 32 bit
+* aligned or an alignment exception could be generated.
+* @param ByteCount contains the number of bytes to read from the buffer and to
+* write to the FIFO.
+*
+* @return
+*
+* XST_SUCCESS is returned if the operation succeeded. If there is not enough
+* room in the FIFO to hold the specified bytes, XST_PFIFO_NO_ROOM is
+* returned.
+*
+* @note
+*
+* This function assumes that if the device inserting data into the FIFO is
+* a byte device, the order of the bytes in each 32 bit word is from the most
+* significant byte to the least significant byte.
+*
+******************************************************************************/
+static int Write32(u32 RegBaseAddress, u32 DataBaseAddress,
+ u8 *BufferPtr, u32 ByteCount)
+{
+ u32 FifoCount;
+ u32 WordCount;
+ u32 ExtraByteCount;
+ u32 *WordBuffer = (u32 *) BufferPtr;
+
+ /* get the count of how many words may be inserted into the FIFO */
+
+ FifoCount =
+ XIo_In32(RegBaseAddress +
+ XPF_V200A_COUNT_STATUS_REG_OFFSET) &
+ XPF_V200A_COUNT_MASK;
+
+ /* Calculate the number of 32 bit words required to insert the
+ * specified number of bytes in the FIFO and determine the number
+ * of extra bytes if the buffer length is not a multiple of 32 bit
+ * words
+ */
+
+ WordCount = ByteCount / XPF_V200A_32BIT_FIFO_WIDTH_BYTE_COUNT;
+ ExtraByteCount = ByteCount % XPF_V200A_32BIT_FIFO_WIDTH_BYTE_COUNT;
+
+ /* take into account the extra bytes in the total word count */
+
+ if (ExtraByteCount > 0) {
+ WordCount++;
+ }
+
+ /* if there's not enough room in the FIFO to hold the specified
+ * number of bytes, then indicate an error,
+ */
+ if (FifoCount < WordCount) {
+ return XST_PFIFO_NO_ROOM;
+ }
+
+ /* readjust the word count to not take into account the extra bytes */
+
+ if (ExtraByteCount > 0) {
+ WordCount--;
+ }
+
+ /* Write all the bytes of the buffer which can be written as 32 bit
+ * words into the FIFO, waiting to handle the extra bytes separately
+ */
+ for (FifoCount = 0; FifoCount < WordCount; FifoCount++) {
+ XIo_Out32(DataBaseAddress, WordBuffer[FifoCount]);
+ }
+
+ /* if there are extra bytes to handle, extract them from the buffer
+ * and create a 32 bit word and write it to the FIFO
+ */
+ if (ExtraByteCount > 0) {
+ u32 LastWord = 0;
+ u8 *WordPtr;
+ u8 *ExtraBytesBuffer = (u8 *) (WordBuffer + WordCount);
+
+ /* one extra byte in the buffer, put the byte into the last word
+ * to be inserted into the FIFO, perform this processing inline
+ * rather than in a loop to help performance
+ */
+ WordPtr = (u8 *) &LastWord;
+ if (ExtraByteCount == 1) {
+ WordPtr[0] = ExtraBytesBuffer[0];
+ }
+
+ /* two extra bytes in the buffer, put each byte into the last word
+ * to be inserted into the FIFO
+ */
+ else if (ExtraByteCount == 2) {
+ WordPtr[0] = ExtraBytesBuffer[0];
+ WordPtr[1] = ExtraBytesBuffer[1];
+ }
+
+ /* three extra bytes in the buffer, put each byte into the last
+ * word to be inserted into the FIFO
+ */
+ else if (ExtraByteCount == 3) {
+ WordPtr[0] = ExtraBytesBuffer[0];
+ WordPtr[1] = ExtraBytesBuffer[1];
+ WordPtr[2] = ExtraBytesBuffer[2];
+ }
+
+ /* write the last 32 bit word to the FIFO and return with
+ * no errors
+ */
+
+ XIo_Out32(DataBaseAddress, LastWord);
+ }
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Write data into a 64 bit packet FIFO. The packet FIFO is 64 bits wide in this
+* function call such that an input buffer which is a series of bytes must be
+* written into the FIFO a word at a time. If the buffer is not a multiple of
+* 64 bit words, it is necessary for this function to format the remaining bytes
+* into two 32 bit words to be inserted into the FIFO. This is necessary to
+* avoid any accesses past the end of the buffer.
+*
+* @param RegBaseAddress is the base address of the FIFO registers.
+*
+* @param DataBaseAddress is the base address of the FIFO keyhole.
+*
+* @param BufferPtr points to the memory buffer that data is to be read from
+* and written into the FIFO. Since this buffer is a byte buffer, the
+* data is assumed to be endian independent. This buffer must be 32 bit
+* aligned or an alignment exception could be generated.
+*
+* @param ByteCount contains the number of bytes to read from the buffer and to
+* write to the FIFO.
+*
+* @return
+*
+* XST_SUCCESS is returned if the operation succeeded. If there is not enough
+* room in the FIFO to hold the specified bytes, XST_PFIFO_NO_ROOM is
+* returned.
+*
+* @note
+*
+* This function assumes that if the device inserting data into the FIFO is
+* a byte device, the order of the bytes in each 64 bit word is from the most
+* significant byte to the least significant byte.
+*
+******************************************************************************/
+static int Write64(u32 RegBaseAddress, u32 DataBaseAddress,
+ u8 *BufferPtr, u32 ByteCount)
+{
+ u32 FifoCount;
+ u32 WordCount;
+ u32 ExtraByteCount;
+ u32 *WordBuffer = (u32 *) BufferPtr;
+
+ /* get the count of how many words may be inserted into the FIFO */
+
+ FifoCount =
+ XIo_In32(RegBaseAddress +
+ XPF_V200A_COUNT_STATUS_REG_OFFSET) &
+ XPF_V200A_COUNT_MASK;
+
+ /* Calculate the number of 64 bit words required to insert the
+ * specified number of bytes in the FIFO and determine the number
+ * of extra bytes if the buffer length is not a multiple of 64 bit
+ * words
+ */
+
+ WordCount = ByteCount / XPF_V200A_64BIT_FIFO_WIDTH_BYTE_COUNT;
+ ExtraByteCount = ByteCount % XPF_V200A_64BIT_FIFO_WIDTH_BYTE_COUNT;
+
+ /* take into account the extra bytes in the total word count */
+
+ if (ExtraByteCount > 0) {
+ WordCount++;
+ }
+
+ /* if there's not enough room in the FIFO to hold the specified
+ * number of bytes, then indicate an error,
+ */
+ if (FifoCount < WordCount) {
+ return XST_PFIFO_NO_ROOM;
+ }
+
+ /* readjust the word count to not take into account the extra bytes */
+
+ if (ExtraByteCount > 0) {
+ WordCount--;
+ }
+
+ /* Write all the bytes of the buffer which can be written as 32 bit
+ * words into the FIFO, waiting to handle the extra bytes separately
+ * The MSWord must be written first followed by the LSWord
+ */
+ for (FifoCount = 0; FifoCount < WordCount; FifoCount++) {
+ XIo_Out32(DataBaseAddress, WordBuffer[(FifoCount * 2)]);
+ XIo_Out32(DataBaseAddress + 4, WordBuffer[(FifoCount * 2) + 1]);
+ }
+
+ /* if there are extra bytes to handle, extract them from the buffer
+ * and create two 32 bit words and write to the FIFO
+ */
+ if (ExtraByteCount > 0) {
+
+ u32 MSLastWord = 0;
+ u32 LSLastWord = 0;
+ u8 Index = 0;
+ u8 *WordPtr;
+ u8 *ExtraBytesBuffer = (u8 *) (WordBuffer + (WordCount * 2));
+
+ /* four extra bytes in the buffer, put the bytes into the last word
+ * to be inserted into the FIFO, perform this processing inline
+ * rather than in a loop to help performance
+ */
+ WordPtr = (u8 *) &MSLastWord;
+
+ if (ExtraByteCount >= 4) {
+ WordPtr[0] = ExtraBytesBuffer[Index];
+ WordPtr[1] = ExtraBytesBuffer[Index + 1];
+ WordPtr[2] = ExtraBytesBuffer[Index + 2];
+ WordPtr[3] = ExtraBytesBuffer[Index + 3];
+ ExtraByteCount = ExtraByteCount - 4;
+ WordPtr = (u8 *) &LSLastWord;
+ Index = 4;
+ }
+
+ /* one extra byte in the buffer, put the byte into the last word
+ * to be inserted into the FIFO, perform this processing inline
+ * rather than in a loop to help performance
+ */
+ if (ExtraByteCount == 1) {
+ WordPtr[0] = ExtraBytesBuffer[Index];
+ }
+
+ /* two extra bytes in the buffer, put each byte into the last word
+ * to be inserted into the FIFO
+ */
+ else if (ExtraByteCount == 2) {
+ WordPtr[0] = ExtraBytesBuffer[Index];
+ WordPtr[1] = ExtraBytesBuffer[Index + 1];
+ }
+
+ /* three extra bytes in the buffer, put each byte into the last
+ * word to be inserted into the FIFO
+ */
+ else if (ExtraByteCount == 3) {
+ WordPtr[0] = ExtraBytesBuffer[Index];
+ WordPtr[1] = ExtraBytesBuffer[Index + 1];
+ WordPtr[2] = ExtraBytesBuffer[Index + 2];
+ }
+
+ /* write the last 64 bit word to the FIFO and return with no errors
+ * The MSWord must be written first followed by the LSWord
+ */
+ XIo_Out32(DataBaseAddress, MSLastWord);
+ XIo_Out32(DataBaseAddress + 4, LSLastWord);
+ }
+
+ return XST_SUCCESS;
+}
--- /dev/null
+/* $Id: xpacket_fifo_l_v2_00_a.h,v 1.1 2006/12/13 14:23:01 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2003-2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xpacket_fifo_l_v2_00_a.h
+*
+* This header file contains identifiers and low-level (Level 0) driver
+* functions (or macros) that can be used to access the FIFO. High-level driver
+* (Level 1) functions are defined in xpacket_fifo_v2_00_a.h.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- ------------------------------------------------------
+* 2.00a rpm 10/22/03 First release. Moved most of Level 1 driver functions
+* into this layer.
+* 2.00a rmm 02/24/04 Added L0WriteDre function.
+* 2.00a xd 10/27/04 Changed comments to support doxygen for API
+* documentation.
+* </pre>
+*
+*****************************************************************************/
+#ifndef XPACKET_FIFO_L_V200A_H /* prevent circular inclusions */
+#define XPACKET_FIFO_L_V200A_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+
+/************************** Constant Definitions *****************************/
+
+/** @name FIFO types
+ *
+ * These constants specify the FIFO type and are mutually exclusive
+ * @{
+ */
+#define XPF_V200A_READ_FIFO_TYPE 0 /**< a read FIFO */
+#define XPF_V200A_WRITE_FIFO_TYPE 1 /**< a write FIFO */
+/* @} */
+
+/** @name Register offsets
+ *
+ * These constants define the offsets to each of the registers from the
+ * register base address, each of the constants are a number of bytes
+ * @{
+ */
+#define XPF_V200A_RESET_REG_OFFSET 0UL /**< Reset register */
+#define XPF_V200A_MODULE_INFO_REG_OFFSET 0UL /**< MIR register */
+#define XPF_V200A_COUNT_STATUS_REG_OFFSET 4UL /**< Count/Status register */
+/* @} */
+
+/**
+ * This constant is used with the Reset Register
+ */
+#define XPF_V200A_RESET_FIFO_MASK 0x0000000A
+
+/** @name Occupancy/Vacancy Count Register constants
+ * @{
+ */
+/** Constant used with the Occupancy/Vacancy Count Register. This
+ * register also contains FIFO status
+ */
+#define XPF_V200A_COUNT_MASK 0x00FFFFFF
+#define XPF_V200A_DEADLOCK_MASK 0x20000000
+#define XPF_V200A_ALMOST_EMPTY_FULL_MASK 0x40000000
+#define XPF_V200A_EMPTY_FULL_MASK 0x80000000
+#define XPF_V200A_VACANCY_SCALED_MASK 0x10000000
+/* @} */
+
+/**
+ * This constant is used to mask the Width field
+ */
+#define XPF_V200A_FIFO_WIDTH_MASK 0x0E000000
+
+/** @name Width field
+ * @{
+ */
+/** Constant used with the Width field */
+#define XPF_V200A_FIFO_WIDTH_LEGACY_TYPE 0x00000000
+#define XPF_V200A_FIFO_WIDTH_8BITS_TYPE 0x02000000
+#define XPF_V200A_FIFO_WIDTH_16BITS_TYPE 0x04000000
+#define XPF_V200A_FIFO_WIDTH_32BITS_TYPE 0x06000000
+#define XPF_V200A_FIFO_WIDTH_64BITS_TYPE 0x08000000
+#define XPF_V200A_FIFO_WIDTH_128BITS_TYPE 0x0A000000
+#define XPF_V200A_FIFO_WIDTH_256BITS_TYPE 0x0C000000
+#define XPF_V200A_FIFO_WIDTH_512BITS_TYPE 0x0E000000
+/* @} */
+
+/** @name FIFO word width
+ * @{
+ */
+/** Width of a FIFO word */
+#define XPF_V200A_32BIT_FIFO_WIDTH_BYTE_COUNT 4
+#define XPF_V200A_64BIT_FIFO_WIDTH_BYTE_COUNT 8
+/* @} */
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Function Prototypes ******************************/
+
+int XPacketFifoV200a_L0Read(u32 RegBaseAddress,
+ u32 DataBaseAddress,
+ u8 *ReadBufferPtr, u32 ByteCount);
+
+int XPacketFifoV200a_L0Write(u32 RegBaseAddress,
+ u32 DataBaseAddress,
+ u8 *WriteBufferPtr, u32 ByteCount);
+
+int XPacketFifoV200a_L0WriteDre(u32 RegBaseAddress,
+ u32 DataBaseAddress,
+ u8 *BufferPtr, u32 ByteCount);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xpacket_fifo_v2_00_a.c,v 1.1 2006/12/13 14:23:11 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2003 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xpacket_fifo_v2_00_a.c
+*
+* Contains functions for the XPacketFifoV200a component. See
+* xpacket_fifo_v2_00_a.h for more information about the component.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 2.00a ecm 12/30/02 First release
+* 2.00a rmm 05/14/03 Fixed diab compiler warnings
+* 2.00a rpm 10/22/03 Created and made use of Level 0 driver
+* 2.00a rmm 02/24/04 Added WriteDRE function.
+* 2.00a xd 10/27/04 Changed comments to support doxygen for API
+* documentation.
+* </pre>
+*
+*****************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xio.h"
+#include "xstatus.h"
+#include "xpacket_fifo_v2_00_a.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************* Variable Definitions ******************************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/*****************************************************************************/
+/**
+*
+* This function initializes a packet FIFO. Initialization resets the
+* FIFO such that it's empty and ready to use.
+*
+* @param InstancePtr contains a pointer to the FIFO to operate on.
+*
+* @param RegBaseAddress contains the base address of the registers for
+* the packet FIFO.
+*
+* @param DataBaseAddress contains the base address of the data for
+* the packet FIFO.
+*
+* @return Always returns XST_SUCCESS.
+*
+* @note None.
+*
+******************************************************************************/
+int XPacketFifoV200a_Initialize(XPacketFifoV200a * InstancePtr,
+ u32 RegBaseAddress, u32 DataBaseAddress)
+{
+ /* assert to verify input argument are valid */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+
+ /* initialize the component variables to the specified state */
+
+ InstancePtr->RegBaseAddress = RegBaseAddress;
+ InstancePtr->DataBaseAddress = DataBaseAddress;
+ InstancePtr->IsReady = XCOMPONENT_IS_READY;
+
+ /* reset the FIFO such that it's empty and ready to use and indicate the
+ * initialization was successful, note that the is ready variable must be
+ * set prior to calling the reset function to prevent an assert
+ */
+ XPF_V200A_RESET(InstancePtr);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function performs a self-test on the specified packet FIFO. The self
+* test resets the FIFO and reads a register to determine if it is the correct
+* reset value. This test is destructive in that any data in the FIFO will
+* be lost.
+*
+* @param InstancePtr is a pointer to the packet FIFO to be operated on.
+*
+* @param FifoType specifies the type of FIFO, read or write, for the self test.
+* The FIFO type is specified by the values XPF_V200A_READ_FIFO_TYPE or
+* XPF_V200A_WRITE_FIFO_TYPE.
+*
+* @return
+*
+* XST_SUCCESS is returned if the selftest is successful, or
+* XST_PFIFO_BAD_REG_VALUE indicating that the value read back from the
+* occupancy/vacancy count register after a reset does not match the
+* specified reset value.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XPacketFifoV200a_SelfTest(XPacketFifoV200a * InstancePtr, u32 FifoType)
+{
+ u32 Register;
+
+ /* assert to verify valid input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID((FifoType == XPF_V200A_READ_FIFO_TYPE) ||
+ (FifoType == XPF_V200A_WRITE_FIFO_TYPE));
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ /* reset the FIFO and then check to make sure the occupancy/vacancy
+ * register contents are correct for a reset condition
+ */
+ XPF_V200A_RESET(InstancePtr);
+
+ Register = XIo_In32(InstancePtr->RegBaseAddress +
+ XPF_V200A_COUNT_STATUS_REG_OFFSET);
+
+ /* check the value of the register to ensure that it's correct for the
+ * specified FIFO type since both FIFO types reset to empty, but a bit
+ * in the register changes definition based upon FIFO type
+ */
+
+ if (FifoType == XPF_V200A_READ_FIFO_TYPE) {
+ /* check the register value for a read FIFO which should be empty */
+
+ if ((Register & ~(XPF_V200A_FIFO_WIDTH_MASK)) !=
+ XPF_V200A_EMPTY_FULL_MASK) {
+ return XST_PFIFO_BAD_REG_VALUE;
+ }
+ }
+ else {
+ /* check the register value for a write FIFO which should not be full
+ * on reset
+ */
+ if (((Register & ~(XPF_V200A_FIFO_WIDTH_MASK) &
+ XPF_V200A_EMPTY_FULL_MASK)) != 0) {
+ return XST_PFIFO_BAD_REG_VALUE;
+ }
+ }
+
+ /* check the register value for the proper FIFO width */
+
+ Register &= ~XPF_V200A_EMPTY_FULL_MASK;
+
+ if (((Register & XPF_V200A_FIFO_WIDTH_MASK) !=
+ XPF_V200A_FIFO_WIDTH_LEGACY_TYPE) &&
+ ((Register & XPF_V200A_FIFO_WIDTH_MASK) !=
+ XPF_V200A_FIFO_WIDTH_32BITS_TYPE) &&
+ ((Register & XPF_V200A_FIFO_WIDTH_MASK) !=
+ XPF_V200A_FIFO_WIDTH_64BITS_TYPE)) {
+ return XST_PFIFO_BAD_REG_VALUE;
+ }
+
+ /* the test was successful */
+
+ return XST_SUCCESS;
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Read data from a FIFO and puts it into a specified buffer. This function
+* invokes the Level 0 driver function to read the FIFO.
+*
+* @param InstancePtr contains a pointer to the FIFO to operate on.
+*
+* @param BufferPtr points to the memory buffer to write the data into. This
+* buffer must be 32 bit aligned or an alignment exception could be
+* generated. Since this buffer is a byte buffer, the data is assumed to
+* be endian independent.
+*
+* @param ByteCount contains the number of bytes to read from the FIFO. This
+* number of bytes must be present in the FIFO or an error will be
+* returned.
+*
+* @return
+* - XST_SUCCESS if the operation was successful
+* <br><br>
+* - XST_PFIFO_LACK_OF_DATA if the number of bytes specified by the byte count
+* is not present in the FIFO.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XPacketFifoV200a_Read(XPacketFifoV200a * InstancePtr,
+ u8 *BufferPtr, u32 ByteCount)
+{
+ /* assert to verify valid input arguments including 32 bit alignment of
+ * the buffer pointer
+ */
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BufferPtr != NULL);
+ XASSERT_NONVOID(((u32) BufferPtr &
+ (XPF_V200A_32BIT_FIFO_WIDTH_BYTE_COUNT - 1)) == 0);
+ XASSERT_NONVOID(ByteCount != 0);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ return XPacketFifoV200a_L0Read(InstancePtr->RegBaseAddress,
+ InstancePtr->DataBaseAddress,
+ BufferPtr, ByteCount);
+}
+
+/*****************************************************************************/
+/**
+*
+* Write data into a packet FIFO. This function invokes the Level 0 driver
+* function to read the FIFO.
+*
+* @param InstancePtr contains a pointer to the FIFO to operate on.
+*
+* @param BufferPtr points to the memory buffer that data is to be read from
+* and written into the FIFO. Since this buffer is a byte buffer, the
+* data is assumed to be endian independent. This buffer must be 32 bit
+* aligned or an alignment exception could be generated.
+*
+* @param ByteCount contains the number of bytes to read from the buffer and to
+* write to the FIFO.
+*
+* @return
+* - XST_SUCCESS is returned if the operation succeeded.
+* <br><br>
+* - XST_PFIFO_NO_ROOM is returned if there is not enough room in the FIFO to
+* hold the specified bytes.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XPacketFifoV200a_Write(XPacketFifoV200a * InstancePtr,
+ u8 *BufferPtr, u32 ByteCount)
+{
+ /* assert to verify valid input arguments including 32 bit alignment of
+ * the buffer pointer
+ */
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BufferPtr != NULL);
+ XASSERT_NONVOID(((u32) BufferPtr &
+ (XPF_V200A_32BIT_FIFO_WIDTH_BYTE_COUNT - 1)) == 0);
+ XASSERT_NONVOID(ByteCount != 0);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+
+ return XPacketFifoV200a_L0Write(InstancePtr->RegBaseAddress,
+ InstancePtr->DataBaseAddress,
+ BufferPtr, ByteCount);
+}
+
+
+/*****************************************************************************/
+/**
+*
+* Write data into a packet FIFO configured with the Data Realignment engine
+* (DRE). There are no alignment restrictions. The FIFO can be written on any
+* byte boundary. The FIFO must be at least 32 bits wide.
+*
+* @param InstancePtr contains a pointer to the FIFO to operate on.
+*
+* @param BufferPtr points to the memory buffer that data is to be read from
+* and written into the FIFO. Since this buffer is a byte buffer, the
+* data is assumed to be endian independent.
+*
+* @param ByteCount contains the number of bytes to read from the buffer and to
+* write to the FIFO.
+*
+* @return
+*
+* XST_SUCCESS is returned if the operation succeeded. If there is not enough
+* room in the FIFO to hold the specified bytes, XST_PFIFO_NO_ROOM is
+* returned.
+*
+* @note
+*
+* This function assumes that if the device inserting data into the FIFO is
+* a byte device, the order of the bytes in each 32/64 bit word is from the most
+* significant byte to the least significant byte.
+*
+******************************************************************************/
+int XPacketFifoV200a_WriteDre(XPacketFifoV200a * InstancePtr,
+ u8 *BufferPtr, u32 ByteCount)
+{
+ /* assert to verify valid input arguments */
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(BufferPtr != NULL);
+ XASSERT_NONVOID(ByteCount != 0);
+ XASSERT_NONVOID(InstancePtr->IsReady == XCOMPONENT_IS_READY);
+
+ return XPacketFifoV200a_L0WriteDre(InstancePtr->RegBaseAddress,
+ InstancePtr->DataBaseAddress,
+ BufferPtr, ByteCount);
+}
--- /dev/null
+/* $Id: xpacket_fifo_v2_00_a.h,v 1.1 2006/12/13 14:23:19 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xpacket_fifo_v2_00_a.h
+*
+* This component is a common component because it's primary purpose is to
+* prevent code duplication in drivers. A driver which must handle a packet
+* FIFO uses this component rather than directly manipulating a packet FIFO.
+*
+* A FIFO is a device which has dual port memory such that one user may be
+* inserting data into the FIFO while another is consuming data from the FIFO.
+* A packet FIFO is designed for use with packet protocols such as Ethernet and
+* ATM. It is typically only used with devices when DMA and/or Scatter Gather
+* is used. It differs from a nonpacket FIFO in that it does not provide any
+* interrupts for thresholds of the FIFO such that it is less useful without
+* DMA.
+*
+* @note
+*
+* This component has the capability to generate an interrupt when an error
+* condition occurs. It is the user's responsibility to provide the interrupt
+* processing to handle the interrupt. This component provides the ability to
+* determine if that interrupt is active, a deadlock condition, and the ability
+* to reset the FIFO to clear the condition. In this condition, the device which
+* is using the FIFO should also be reset to prevent other problems. This error
+* condition could occur as a normal part of operation if the size of the FIFO
+* is not setup correctly. See the hardware IP specification for more details.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -----------------------------------------------
+* 2.00a ecm 12/30/02 First release
+* 2.00a rpm 10/22/03 Created and made use of Level 0 driver
+* 2.00a rmm 02/24/04 Added WriteDre function.
+* 2.00a xd 10/27/04 Changed comments to support doxygen for API
+* documentation.
+* </pre>
+*
+*****************************************************************************/
+#ifndef XPACKET_FIFO_V200A_H /* prevent circular inclusions */
+#define XPACKET_FIFO_V200A_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xpacket_fifo_l_v2_00_a.h"
+
+/************************** Constant Definitions *****************************/
+
+/* See the low-level header file for constant definitions */
+
+/**************************** Type Definitions *******************************/
+
+/**
+ * The XPacketFifo driver instance data. The driver is required to allocate a
+ * variable of this type for every packet FIFO in the device.
+ */
+typedef struct {
+ u32 RegBaseAddress; /**< Base address of registers */
+ u32 IsReady; /**< Device is initialized and ready */
+ u32 DataBaseAddress;/**< Base address of data for FIFOs */
+} XPacketFifoV200a;
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*****************************************************************************/
+/**
+*
+* Reset the specified packet FIFO. Resetting a FIFO will cause any data
+* contained in the FIFO to be lost.
+*
+* @param InstancePtr contains a pointer to the FIFO to operate on.
+*
+* @return None.
+*
+* @note C Signature: void XPF_V200A_RESET(XPacketFifoV200a *InstancePtr)
+*
+******************************************************************************/
+#define XPF_V200A_RESET(InstancePtr) \
+ XIo_Out32((InstancePtr)->RegBaseAddress + XPF_V200A_RESET_REG_OFFSET, XPF_V200A_RESET_FIFO_MASK);
+
+
+/*****************************************************************************/
+/**
+*
+* Get the occupancy count for a read packet FIFO and the vacancy count for a
+* write packet FIFO. These counts indicate the number of 32-bit words
+* contained (occupancy) in the FIFO or the number of 32-bit words available
+* to write (vacancy) in the FIFO.
+*
+* @param InstancePtr contains a pointer to the FIFO to operate on.
+*
+* @return The occupancy or vacancy count for the specified packet FIFO.
+*
+* @note
+*
+* C Signature: u32 XPF_V200A_GET_COUNT(XPacketFifoV200a *InstancePtr)
+*
+******************************************************************************/
+#define XPF_V200A_GET_COUNT(InstancePtr) \
+ (XIo_In32((InstancePtr)->RegBaseAddress + XPF_V200A_COUNT_STATUS_REG_OFFSET) & \
+ XPF_V200A_COUNT_MASK)
+
+
+/*****************************************************************************/
+/**
+*
+* Determine if the specified packet FIFO is almost empty. Almost empty is
+* defined for a read FIFO when there is only one data word in the FIFO.
+*
+* @param InstancePtr contains a pointer to the FIFO to operate on.
+*
+* @return
+*
+* TRUE if the packet FIFO is almost empty, FALSE otherwise.
+*
+* @note
+*
+* C Signature: u32 XPF_V200A_IS_ALMOST_EMPTY(XPacketFifoV200a *InstancePtr)
+*
+******************************************************************************/
+#define XPF_V200A_IS_ALMOST_EMPTY(InstancePtr) \
+ (XIo_In32((InstancePtr)->RegBaseAddress + XPF_V200A_COUNT_STATUS_REG_OFFSET) & \
+ XPF_V200A_ALMOST_EMPTY_FULL_MASK)
+
+
+/*****************************************************************************/
+/**
+*
+* Determine if the specified packet FIFO is almost full. Almost full is
+* defined for a write FIFO when there is only one available data word in the
+* FIFO.
+*
+* @param InstancePtr contains a pointer to the FIFO to operate on.
+*
+* @return
+*
+* TRUE if the packet FIFO is almost full, FALSE otherwise.
+*
+* @note
+*
+* C Signature: u32 XPF_V200A_IS_ALMOST_FULL(XPacketFifoV200a *InstancePtr)
+*
+******************************************************************************/
+#define XPF_V200A_IS_ALMOST_FULL(InstancePtr) \
+ (XIo_In32((InstancePtr)->RegBaseAddress + XPF_V200A_COUNT_STATUS_REG_OFFSET) & \
+ XPF_V200A_ALMOST_EMPTY_FULL_MASK)
+
+
+/*****************************************************************************/
+/**
+*
+* Determine if the specified packet FIFO is empty. This applies only to a
+* read FIFO.
+*
+* @param InstancePtr contains a pointer to the FIFO to operate on.
+*
+* @return
+*
+* TRUE if the packet FIFO is empty, FALSE otherwise.
+*
+* @note
+*
+* C Signature: u32 XPF_V200A_IS_EMPTY(XPacketFifoV200a *InstancePtr)
+*
+******************************************************************************/
+#define XPF_V200A_IS_EMPTY(InstancePtr) \
+ (XIo_In32((InstancePtr)->RegBaseAddress + XPF_V200A_COUNT_STATUS_REG_OFFSET) & \
+ XPF_V200A_EMPTY_FULL_MASK)
+
+
+/*****************************************************************************/
+/**
+*
+* Determine if the specified packet FIFO is full. This applies only to a
+* write FIFO.
+*
+* @param InstancePtr contains a pointer to the FIFO to operate on.
+*
+* @return
+*
+* TRUE if the packet FIFO is full, FALSE otherwise.
+*
+* @note
+*
+* C Signature: u32 XPF_V200A_IS_FULL(XPacketFifoV200a *InstancePtr)
+*
+******************************************************************************/
+#define XPF_V200A_IS_FULL(InstancePtr) \
+ (XIo_In32((InstancePtr)->RegBaseAddress + XPF_V200A_COUNT_STATUS_REG_OFFSET) & \
+ XPF_V200A_EMPTY_FULL_MASK)
+
+
+/*****************************************************************************/
+/**
+*
+* Determine if the specified packet FIFO is deadlocked. This condition occurs
+* when the FIFO is full and empty at the same time and is caused by a packet
+* being written to the FIFO which exceeds the total data capacity of the FIFO.
+* It occurs because of the mark/restore features of the packet FIFO which allow
+* retransmission of a packet. The software should reset the FIFO and any devices
+* using the FIFO when this condition occurs.
+*
+* @param InstancePtr contains a pointer to the FIFO to operate on.
+*
+* @return
+*
+* TRUE if the packet FIFO is deadlocked, FALSE otherwise.
+*
+* @note
+*
+* This component has the capability to generate an interrupt when an error
+* condition occurs. It is the user's responsibility to provide the interrupt
+* processing to handle the interrupt. This function provides the ability to
+* determine if a deadlock condition, and the ability to reset the FIFO to
+* clear the condition.
+* <br><br>
+* In this condition, the device which is using the FIFO should also be reset
+* to prevent other problems. This error condition could occur as a normal part
+* of operation if the size of the FIFO is not setup correctly.
+* <br><br>
+* C Signature: u32 XPF_V200A_IS_DEADLOCKED(XPacketFifoV200a *InstancePtr)
+*
+******************************************************************************/
+#define XPF_V200A_IS_DEADLOCKED(InstancePtr) \
+ (XIo_In32((InstancePtr)->RegBaseAddress + XPF_V200A_COUNT_STATUS_REG_OFFSET) & \
+ XPF_V200A_DEADLOCK_MASK)
+
+
+/************************** Function Prototypes ******************************/
+
+/**
+ * Standard functions
+ */
+int XPacketFifoV200a_Initialize(XPacketFifoV200a * InstancePtr,
+ u32 RegBaseAddress, u32 DataBaseAddress);
+int XPacketFifoV200a_SelfTest(XPacketFifoV200a * InstancePtr, u32 FifoType);
+
+/**
+ * Data functions
+ */
+int XPacketFifoV200a_Read(XPacketFifoV200a * InstancePtr,
+ u8 *ReadBufferPtr, u32 ByteCount);
+int XPacketFifoV200a_Write(XPacketFifoV200a * InstancePtr,
+ u8 *WriteBufferPtr, u32 ByteCount);
+int XPacketFifoV200a_WriteDre(XPacketFifoV200a * InstancePtr,
+ u8 *WriteBufferPtr, u32 ByteCount);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: xstatus.h,v 1.1 2006/12/13 14:23:26 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xstatus.h
+*
+* This file contains Xilinx software status codes. Status codes have their
+* own data type called int. These codes are used throughout the Xilinx
+* device drivers.
+*
+******************************************************************************/
+
+#ifndef XSTATUS_H /* prevent circular inclusions */
+#define XSTATUS_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+/************************** Constant Definitions *****************************/
+
+/*********************** Common statuses 0 - 500 *****************************/
+
+#define XST_SUCCESS 0L
+#define XST_FAILURE 1L
+#define XST_DEVICE_NOT_FOUND 2L
+#define XST_DEVICE_BLOCK_NOT_FOUND 3L
+#define XST_INVALID_VERSION 4L
+#define XST_DEVICE_IS_STARTED 5L
+#define XST_DEVICE_IS_STOPPED 6L
+#define XST_FIFO_ERROR 7L /* an error occurred during an
+ operation with a FIFO such as
+ an underrun or overrun, this
+ error requires the device to
+ be reset */
+#define XST_RESET_ERROR 8L /* an error occurred which requires
+ the device to be reset */
+#define XST_DMA_ERROR 9L /* a DMA error occurred, this error
+ typically requires the device
+ using the DMA to be reset */
+#define XST_NOT_POLLED 10L /* the device is not configured for
+ polled mode operation */
+#define XST_FIFO_NO_ROOM 11L /* a FIFO did not have room to put
+ the specified data into */
+#define XST_BUFFER_TOO_SMALL 12L /* the buffer is not large enough
+ to hold the expected data */
+#define XST_NO_DATA 13L /* there was no data available */
+#define XST_REGISTER_ERROR 14L /* a register did not contain the
+ expected value */
+#define XST_INVALID_PARAM 15L /* an invalid parameter was passed
+ into the function */
+#define XST_NOT_SGDMA 16L /* the device is not configured for
+ scatter-gather DMA operation */
+#define XST_LOOPBACK_ERROR 17L /* a loopback test failed */
+#define XST_NO_CALLBACK 18L /* a callback has not yet been
+ registered */
+#define XST_NO_FEATURE 19L /* device is not configured with
+ the requested feature */
+#define XST_NOT_INTERRUPT 20L /* device is not configured for
+ interrupt mode operation */
+#define XST_DEVICE_BUSY 21L /* device is busy */
+#define XST_ERROR_COUNT_MAX 22L /* the error counters of a device
+ have maxed out */
+#define XST_IS_STARTED 23L /* used when part of device is
+ already started i.e.
+ sub channel */
+#define XST_IS_STOPPED 24L /* used when part of device is
+ already stopped i.e.
+ sub channel */
+#define XST_DATA_LOST 26L /* driver defined error */
+#define XST_RECV_ERROR 27L /* generic receive error */
+#define XST_SEND_ERROR 28L /* generic transmit error */
+#define XST_NOT_ENABLED 29L /* a requested service is not
+ available because it has not
+ been enabled */
+
+/***************** Utility Component statuses 401 - 500 *********************/
+
+#define XST_MEMTEST_FAILED 401L /* memory test failed */
+
+
+/***************** Common Components statuses 501 - 1000 *********************/
+
+/********************* Packet Fifo statuses 501 - 510 ************************/
+
+#define XST_PFIFO_LACK_OF_DATA 501L /* not enough data in FIFO */
+#define XST_PFIFO_NO_ROOM 502L /* not enough room in FIFO */
+#define XST_PFIFO_BAD_REG_VALUE 503L /* self test, a register value
+ was invalid after reset */
+#define XST_PFIFO_ERROR 504L /* generic packet FIFO error */
+#define XST_PFIFO_DEADLOCK 505L /* packet FIFO is reporting
+ * empty and full simultaneously
+ */
+
+/************************** DMA statuses 511 - 530 ***************************/
+
+#define XST_DMA_TRANSFER_ERROR 511L /* self test, DMA transfer
+ failed */
+#define XST_DMA_RESET_REGISTER_ERROR 512L /* self test, a register value
+ was invalid after reset */
+#define XST_DMA_SG_LIST_EMPTY 513L /* scatter gather list contains
+ no buffer descriptors ready
+ to be processed */
+#define XST_DMA_SG_IS_STARTED 514L /* scatter gather not stopped */
+#define XST_DMA_SG_IS_STOPPED 515L /* scatter gather not running */
+#define XST_DMA_SG_LIST_FULL 517L /* all the buffer desciptors of
+ the scatter gather list are
+ being used */
+#define XST_DMA_SG_BD_LOCKED 518L /* the scatter gather buffer
+ descriptor which is to be
+ copied over in the scatter
+ list is locked */
+#define XST_DMA_SG_NOTHING_TO_COMMIT 519L /* no buffer descriptors have been
+ put into the scatter gather
+ list to be commited */
+#define XST_DMA_SG_COUNT_EXCEEDED 521L /* the packet count threshold
+ specified was larger than the
+ total # of buffer descriptors
+ in the scatter gather list */
+#define XST_DMA_SG_LIST_EXISTS 522L /* the scatter gather list has
+ already been created */
+#define XST_DMA_SG_NO_LIST 523L /* no scatter gather list has
+ been created */
+#define XST_DMA_SG_BD_NOT_COMMITTED 524L /* the buffer descriptor which was
+ being started was not committed
+ to the list */
+#define XST_DMA_SG_NO_DATA 525L /* the buffer descriptor to start
+ has already been used by the
+ hardware so it can't be reused
+ */
+#define XST_DMA_SG_LIST_ERROR 526L /* general purpose list access
+ error */
+#define XST_DMA_BD_ERROR 527L /* general buffer descriptor
+ error */
+
+/************************** IPIF statuses 531 - 550 ***************************/
+
+#define XST_IPIF_REG_WIDTH_ERROR 531L /* an invalid register width
+ was passed into the function */
+#define XST_IPIF_RESET_REGISTER_ERROR 532L /* the value of a register at
+ reset was not valid */
+#define XST_IPIF_DEVICE_STATUS_ERROR 533L /* a write to the device interrupt
+ status register did not read
+ back correctly */
+#define XST_IPIF_DEVICE_ACK_ERROR 534L /* the device interrupt status
+ register did not reset when
+ acked */
+#define XST_IPIF_DEVICE_ENABLE_ERROR 535L /* the device interrupt enable
+ register was not updated when
+ other registers changed */
+#define XST_IPIF_IP_STATUS_ERROR 536L /* a write to the IP interrupt
+ status register did not read
+ back correctly */
+#define XST_IPIF_IP_ACK_ERROR 537L /* the IP interrupt status register
+ did not reset when acked */
+#define XST_IPIF_IP_ENABLE_ERROR 538L /* IP interrupt enable register was
+ not updated correctly when other
+ registers changed */
+#define XST_IPIF_DEVICE_PENDING_ERROR 539L /* The device interrupt pending
+ register did not indicate the
+ expected value */
+#define XST_IPIF_DEVICE_ID_ERROR 540L /* The device interrupt ID register
+ did not indicate the expected
+ value */
+#define XST_IPIF_ERROR 541L /* generic ipif error */
+
+/****************** Device specific statuses 1001 - 4095 *********************/
+
+/********************* Ethernet statuses 1001 - 1050 *************************/
+
+#define XST_EMAC_MEMORY_SIZE_ERROR 1001L /* Memory space is not big enough
+ * to hold the minimum number of
+ * buffers or descriptors */
+#define XST_EMAC_MEMORY_ALLOC_ERROR 1002L /* Memory allocation failed */
+#define XST_EMAC_MII_READ_ERROR 1003L /* MII read error */
+#define XST_EMAC_MII_BUSY 1004L /* An MII operation is in progress */
+#define XST_EMAC_OUT_OF_BUFFERS 1005L /* Adapter is out of buffers */
+#define XST_EMAC_PARSE_ERROR 1006L /* Invalid adapter init string */
+#define XST_EMAC_COLLISION_ERROR 1007L /* Excess deferral or late
+ * collision on polled send */
+
+/*********************** UART statuses 1051 - 1075 ***************************/
+#define XST_UART
+
+#define XST_UART_INIT_ERROR 1051L
+#define XST_UART_START_ERROR 1052L
+#define XST_UART_CONFIG_ERROR 1053L
+#define XST_UART_TEST_FAIL 1054L
+#define XST_UART_BAUD_ERROR 1055L
+#define XST_UART_BAUD_RANGE 1056L
+
+
+/************************ IIC statuses 1076 - 1100 ***************************/
+
+#define XST_IIC_SELFTEST_FAILED 1076 /* self test failed */
+#define XST_IIC_BUS_BUSY 1077 /* bus found busy */
+#define XST_IIC_GENERAL_CALL_ADDRESS 1078 /* mastersend attempted with */
+ /* general call address */
+#define XST_IIC_STAND_REG_RESET_ERROR 1079 /* A non parameterizable reg */
+ /* value after reset not valid */
+#define XST_IIC_TX_FIFO_REG_RESET_ERROR 1080 /* Tx fifo included in design */
+ /* value after reset not valid */
+#define XST_IIC_RX_FIFO_REG_RESET_ERROR 1081 /* Rx fifo included in design */
+ /* value after reset not valid */
+#define XST_IIC_TBA_REG_RESET_ERROR 1082 /* 10 bit addr incl in design */
+ /* value after reset not valid */
+#define XST_IIC_CR_READBACK_ERROR 1083 /* Read of the control register */
+ /* didn't return value written */
+#define XST_IIC_DTR_READBACK_ERROR 1084 /* Read of the data Tx reg */
+ /* didn't return value written */
+#define XST_IIC_DRR_READBACK_ERROR 1085 /* Read of the data Receive reg */
+ /* didn't return value written */
+#define XST_IIC_ADR_READBACK_ERROR 1086 /* Read of the data Tx reg */
+ /* didn't return value written */
+#define XST_IIC_TBA_READBACK_ERROR 1087 /* Read of the 10 bit addr reg */
+ /* didn't return written value */
+#define XST_IIC_NOT_SLAVE 1088 /* The device isn't a slave */
+
+/*********************** ATMC statuses 1101 - 1125 ***************************/
+
+#define XST_ATMC_ERROR_COUNT_MAX 1101L /* the error counters in the ATM
+ controller hit the max value
+ which requires the statistics
+ to be cleared */
+
+/*********************** Flash statuses 1126 - 1150 **************************/
+
+#define XST_FLASH_BUSY 1126L /* Flash is erasing or programming */
+#define XST_FLASH_READY 1127L /* Flash is ready for commands */
+#define XST_FLASH_ERROR 1128L /* Flash had detected an internal
+ error. Use XFlash_DeviceControl
+ to retrieve device specific codes */
+#define XST_FLASH_ERASE_SUSPENDED 1129L /* Flash is in suspended erase state */
+#define XST_FLASH_WRITE_SUSPENDED 1130L /* Flash is in suspended write state */
+#define XST_FLASH_PART_NOT_SUPPORTED 1131L /* Flash type not supported by
+ driver */
+#define XST_FLASH_NOT_SUPPORTED 1132L /* Operation not supported */
+#define XST_FLASH_TOO_MANY_REGIONS 1133L /* Too many erase regions */
+#define XST_FLASH_TIMEOUT_ERROR 1134L /* Programming or erase operation
+ aborted due to a timeout */
+#define XST_FLASH_ADDRESS_ERROR 1135L /* Accessed flash outside its
+ addressible range */
+#define XST_FLASH_ALIGNMENT_ERROR 1136L /* Write alignment error */
+#define XST_FLASH_BLOCKING_CALL_ERROR 1137L /* Couldn't return immediately from
+ write/erase function with
+ XFL_NON_BLOCKING_WRITE/ERASE
+ option cleared */
+#define XST_FLASH_CFI_QUERY_ERROR 1138L /* Failed to query the device */
+
+/*********************** SPI statuses 1151 - 1175 ****************************/
+
+#define XST_SPI_MODE_FAULT 1151 /* master was selected as slave */
+#define XST_SPI_TRANSFER_DONE 1152 /* data transfer is complete */
+#define XST_SPI_TRANSMIT_UNDERRUN 1153 /* slave underruns transmit register */
+#define XST_SPI_RECEIVE_OVERRUN 1154 /* device overruns receive register */
+#define XST_SPI_NO_SLAVE 1155 /* no slave has been selected yet */
+#define XST_SPI_TOO_MANY_SLAVES 1156 /* more than one slave is being
+ * selected */
+#define XST_SPI_NOT_MASTER 1157 /* operation is valid only as master */
+#define XST_SPI_SLAVE_ONLY 1158 /* device is configured as slave-only */
+#define XST_SPI_SLAVE_MODE_FAULT 1159 /* slave was selected while disabled */
+
+/********************** OPB Arbiter statuses 1176 - 1200 *********************/
+
+#define XST_OPBARB_INVALID_PRIORITY 1176 /* the priority registers have either
+ * one master assigned to two or more
+ * priorities, or one master not
+ * assigned to any priority
+ */
+#define XST_OPBARB_NOT_SUSPENDED 1177 /* an attempt was made to modify the
+ * priority levels without first
+ * suspending the use of priority
+ * levels
+ */
+#define XST_OPBARB_PARK_NOT_ENABLED 1178 /* bus parking by id was enabled but
+ * bus parking was not enabled
+ */
+#define XST_OPBARB_NOT_FIXED_PRIORITY 1179 /* the arbiter must be in fixed
+ * priority mode to allow the
+ * priorities to be changed
+ */
+
+/************************ Intc statuses 1201 - 1225 **************************/
+
+#define XST_INTC_FAIL_SELFTEST 1201 /* self test failed */
+#define XST_INTC_CONNECT_ERROR 1202 /* interrupt already in use */
+
+/********************** TmrCtr statuses 1226 - 1250 **************************/
+
+#define XST_TMRCTR_TIMER_FAILED 1226 /* self test failed */
+
+/********************** WdtTb statuses 1251 - 1275 ***************************/
+
+#define XST_WDTTB_TIMER_FAILED 1251L
+
+/********************** PlbArb statuses 1276 - 1300 **************************/
+
+#define XST_PLBARB_FAIL_SELFTEST 1276L
+
+/********************** Plb2Opb statuses 1301 - 1325 *************************/
+
+#define XST_PLB2OPB_FAIL_SELFTEST 1301L
+
+/********************** Opb2Plb statuses 1326 - 1350 *************************/
+
+#define XST_OPB2PLB_FAIL_SELFTEST 1326L
+
+/********************** SysAce statuses 1351 - 1360 **************************/
+
+#define XST_SYSACE_NO_LOCK 1351L /* No MPU lock has been granted */
+
+/********************** PCI Bridge statuses 1361 - 1375 **********************/
+
+#define XST_PCI_INVALID_ADDRESS 1361L
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/*
+* @file xstreamer.c
+*
+* See xtreamer.h for a description on how to use this driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a jvb 10/13/06 First release - based on Robert McGee's streaming packet
+* fifo driver.
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xstreamer.h"
+
+/*
+ * Implementation Notes
+ *
+ * --- Receive ---
+ *
+ * The basic algorithm for receiving bytes through this byte streamer copies a
+ * fifo key-hole width chunk from the fifo into a holding buffer and then doles
+ * out the bytes from the holding buffer. In some cases, when the buffer given
+ * happens to already be aligned in memory, this algorithm will bypass the
+ * holding buffer.
+ *
+ * Here is a picture to depict this process:
+ *
+ * Initial state: holding buffer
+ * +--------------+
+ * | <empty> |
+ * +--------------+
+ * ^
+ * |
+ * index
+ *
+ * during XStrm_Read():
+ * first holding buffer fill: holding buffer
+ * +--------------+
+ * |////<full>////|
+ * +--------------+
+ * ^
+ * |
+ * index
+ *
+ * first holding buffer read: holding buffer
+ * read unread
+ * +--------------+
+ * | |///////|
+ * +--------------+
+ * ^
+ * |
+ * index
+ *
+ * ...
+ *
+ * last holding buffer read: holding buffer
+ * +--------------+
+ * | <empty> |
+ * +--------------+
+ * ^
+ * |
+ * index
+ *
+ * repeat this process ^^^
+ *
+ *
+ * --- Transmit ---
+ *
+ * The basic algorithm for transmitting bytes through this byte streamer copies
+ * bytes into a holding buffer and then writes the holding buffer into the fifo
+ * when it is full. In some cases, when the buffer given happens to already be
+ * aligned in memory, this algorithm will bypass the holding buffer.
+ *
+ * Here is a picture to depict this process:
+ *
+ * Initial state: holding buffer
+ * +--------------+
+ * | <empty> |
+ * +--------------+
+ * ^
+ * |
+ * index
+ *
+ * during XStrm_Write():
+ * first holding buffer write: holding buffer
+ * writen empty
+ * +--------------+
+ * |//////| |
+ * +--------------+
+ * ^
+ * |
+ * index
+ *
+ * ...
+ * last holding buffer write: holding buffer
+ * +--------------+
+ * |////<full>////|
+ * +--------------+
+ * ^
+ * |
+ * index
+ *
+ * holding buffer flush: holding buffer
+ * +--------------+
+ * | <empty> |
+ * +--------------+
+ * ^
+ * |
+ * index
+ *
+ * repeat this process ^^^
+ */
+
+#ifndef min
+#define min(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+
+/*****************************************************************************/
+/*
+*
+* XStrm_RxInitialize initializes the XStrm_RxFifoStreamer object referenced by
+* <i>InstancePtr</i>.
+*
+* @param InstancePtr references the tx streamer on which to operate.
+*
+* @param FifoWidth specifies the FIFO keyhole size in bytes.
+*
+* @param FifoInstance references the FIFO driver instance that this streamer
+* object should use to transfer data into the the actual fifo.
+*
+* @param ReadFn specifies a routine to use to read data from the actual
+* FIFO. It is assumed that this read routine will handle only reads
+* from an aligned buffer. (Otherwise, why are we using this streamer
+* driver?)
+*
+* @param GetLenFn specifies a routine to use to initiate a receive on the
+* actual FIFO.
+*
+* @param GetOccupancyFn specifies a routine to use to retrieve the occupancy
+* in the actual FIFO. The true occupancy value needs to come through
+* this streamer driver becuase it holds some of the bytes.
+*
+* @return N/A
+*
+******************************************************************************/
+void XStrm_RxInitialize(XStrm_RxFifoStreamer * InstancePtr,
+ unsigned FifoWidth, void *FifoInstance,
+ XStrm_XferFnType ReadFn,
+ XStrm_GetLenFnType GetLenFn,
+ XStrm_GetOccupancyFnType GetOccupancyFn)
+{
+ /* Verify arguments */
+ XASSERT_VOID(InstancePtr != NULL);
+
+ InstancePtr->HeadIndex = FifoWidth;
+ InstancePtr->FifoWidth = FifoWidth;
+ InstancePtr->FifoInstance = FifoInstance;
+ InstancePtr->ReadFn = ReadFn;
+ InstancePtr->GetLenFn = GetLenFn;
+ InstancePtr->GetOccupancyFn = GetOccupancyFn;
+}
+
+/*****************************************************************************/
+/*
+*
+* XStrm_TxInitialize initializes the XStrm_TxFifoStreamer object referenced by
+* <i>InstancePtr</i>.
+*
+* @param InstancePtr references the tx streamer on which to operate.
+*
+* @param FifoWidth specifies the FIFO keyhole size in bytes.
+*
+* @param FifoInstance references the FIFO driver instance that this streamer
+* object should use to transfer data into the the actual fifo.
+*
+* @param WriteFn specifies a routine to use to write data into the actual
+* FIFO. It is assumed that this write routine will handle only writes
+* from an aligned buffer. (Otherwise, why are we using this streamer
+* driver?)
+*
+* @param SetLenFn specifies a routine to use to initiate a transmit on the
+* actual FIFO.
+*
+* @param GetVacancyFn specifies a routine to use to retrieve the vacancy in
+* the actual FIFO. The true vacancy value needs to come through this
+* streamer driver becuase it holds some of the bytes.
+*
+* @return N/A
+*
+******************************************************************************/
+void XStrm_TxInitialize(XStrm_TxFifoStreamer * InstancePtr, unsigned FifoWidth,
+ void *FifoInstance, XStrm_XferFnType WriteFn,
+ XStrm_SetLenFnType SetLenFn,
+ XStrm_GetVacancyFnType GetVacancyFn)
+{
+ /* Verify arguments */
+ XASSERT_VOID(InstancePtr != NULL);
+
+ InstancePtr->TailIndex = 0;
+ InstancePtr->FifoWidth = FifoWidth;
+ InstancePtr->FifoInstance = FifoInstance;
+ InstancePtr->WriteFn = WriteFn;
+ InstancePtr->SetLenFn = SetLenFn;
+ InstancePtr->GetVacancyFn = GetVacancyFn;
+}
+
+/*****************************************************************************/
+/*
+*
+* XStrm_RxGetLen notifies the hardware that the program is ready to receive the
+* next frame from the receive channel of the FIFO, specified by
+* <i>InstancePtr</i>.
+*
+* Note that the program must first call XStrm_RxGetLen before pulling data
+* out of the receive channel of the FIFO with XStrm_Read.
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @return XStrm_RxGetLen returns the number of bytes available in the next
+* frame.
+*
+******************************************************************************/
+u32 XStrm_RxGetLen(XStrm_RxFifoStreamer * InstancePtr)
+{
+ u32 len;
+
+ InstancePtr->HeadIndex = InstancePtr->FifoWidth;
+ len = (*InstancePtr->GetLenFn) (InstancePtr->FifoInstance);
+ InstancePtr->FrmByteCnt = len;
+ return len;
+}
+
+/*****************************************************************************/
+/*
+*
+* XStrm_Read reads <i>Bytes</i> bytes from the FIFO specified by
+* <i>InstancePtr</i> to the block of memory, referenced by <i>BufPtr</i>.
+*
+* Care must be taken to ensure that the number of bytes read with one or more
+* calls to XStrm_Read() does not exceed the number of bytes available given
+* from the last call to XStrm_RxGetLen().
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @param BufPtr specifies the memory address to place the data read.
+*
+* @param Bytes specifies the number of bytes to read.
+*
+* @return N/A
+*
+******************************************************************************/
+void XStrm_Read(XStrm_RxFifoStreamer * InstancePtr, void *BufPtr,
+ unsigned Bytes)
+{
+ u8 *DestPtr = (u8 *) BufPtr;
+ unsigned BytesRemaining = Bytes;
+ unsigned FifoWordsToXfer;
+ unsigned PartialBytes;
+ unsigned i;
+
+ while (BytesRemaining) {
+ xdbg_printf(XDBG_DEBUG_FIFO_RX,
+ "XStrm_Read: BytesRemaining: %d\n", BytesRemaining);
+ /* Case 1: There are bytes in the holding buffer
+ *
+ * 1) Read the bytes from the holding buffer to the target buffer.
+ * 2) Loop back around and handle the rest of the transfer.
+ */
+ if (InstancePtr->HeadIndex != InstancePtr->FifoWidth) {
+ xdbg_printf(XDBG_DEBUG_FIFO_RX,
+ "XStrm_Read: Case 1: InstancePtr->HeadIndex [%d] != InstancePtr->FifoWidth [%d]\n",
+ InstancePtr->HeadIndex,
+ InstancePtr->FifoWidth);
+ i = InstancePtr->HeadIndex;
+
+ PartialBytes = min(BytesRemaining,
+ InstancePtr->FifoWidth -
+ InstancePtr->HeadIndex);
+ InstancePtr->HeadIndex += PartialBytes;
+ BytesRemaining -= PartialBytes;
+ InstancePtr->FrmByteCnt -= PartialBytes;
+ while (PartialBytes--) {
+ *DestPtr = InstancePtr->AlignedBuffer.bytes[i];
+ i++;
+ DestPtr++;
+ }
+ }
+ /* Case 2: There are no more bytes in the holding buffer and
+ * the target buffer is 32 bit aligned and
+ * the number of bytes remaining to transfer is greater
+ * than or equal to the fifo width.
+ *
+ * 1) We can go fast by reading a long string of fifo words right out
+ * of the fifo into the target buffer.
+ * 2) Loop back around to transfer the last few bytes.
+ */
+ else if ((((unsigned) DestPtr & 3) == 0) &&
+ (BytesRemaining >= InstancePtr->FifoWidth)) {
+ xdbg_printf(XDBG_DEBUG_FIFO_RX,
+ "XStrm_Read: Case 2: DestPtr: %p, BytesRemaining: %d, InstancePtr->FifoWidth: %d\n",
+ DestPtr, BytesRemaining,
+ InstancePtr->FifoWidth);
+ FifoWordsToXfer =
+ BytesRemaining / InstancePtr->FifoWidth;
+
+ (*(InstancePtr->ReadFn)) (InstancePtr->FifoInstance,
+ DestPtr, FifoWordsToXfer);
+ DestPtr += FifoWordsToXfer * InstancePtr->FifoWidth;
+ BytesRemaining -=
+ FifoWordsToXfer * InstancePtr->FifoWidth;
+ InstancePtr->FrmByteCnt -=
+ FifoWordsToXfer * InstancePtr->FifoWidth;
+ }
+ /* Case 3: There are no more bytes in the holding buffer and
+ * the number of bytes remaining to transfer is less than
+ * the fifo width or
+ * things just don't line up.
+ *
+ * 1) Fill the holding buffer.
+ * 2) Loop back around and handle the rest of the transfer.
+ */
+ else {
+ xdbg_printf(XDBG_DEBUG_FIFO_RX, "XStrm_Read: Case 3\n");
+ /*
+ * At the tail end, read one fifo word into the local holding
+ * buffer and loop back around to take care of the transfer.
+ */
+ (*InstancePtr->ReadFn) (InstancePtr->FifoInstance,
+ &(InstancePtr->AlignedBuffer.
+ bytes[0]), 1);
+ InstancePtr->HeadIndex = 0;
+ }
+ }
+}
+
+/*****************************************************************************/
+/*
+*
+* XStrm_TxSetLen flushes to the FIFO, specified by <i>InstancePtr</i>, any
+* bytes remaining in internal buffers and begins a hardware transfer of data
+* out of the transmit channel of the FIFO. <i>Bytes</i> specifies the number
+* of bytes in the frame to transmit.
+*
+* @param InstancePtr references the FIFO Streamer on which to operate.
+*
+* @param Bytes specifies the frame length in bytes.
+*
+* @return N/A
+*
+******************************************************************************/
+void XStrm_TxSetLen(XStrm_TxFifoStreamer * InstancePtr, u32 Bytes)
+{
+ /*
+ * First flush what's in the holding buffer
+ */
+ if (InstancePtr->TailIndex != 0) {
+ (*InstancePtr->WriteFn) (InstancePtr->FifoInstance,
+ &(InstancePtr->AlignedBuffer.bytes[0]),
+ 1);
+ InstancePtr->TailIndex = 0;
+ }
+
+ /*
+ * Kick off the hw write
+ */
+ (*(InstancePtr)->SetLenFn) (InstancePtr->FifoInstance, Bytes);
+}
+
+/*****************************************************************************/
+/*
+*
+* XStrm_Write writes <i>Bytes</i> bytes of the block of memory, referenced by
+* <i>BufPtr</i>, to the transmit channel of the FIFO referenced by
+* <i>InstancePtr</i>.
+*
+* Care must be taken to ensure that the number of bytes written with one or
+* more calls to XStrm_Write() matches the number of bytes given in the next
+* call to XStrm_TxSetLen().
+*
+* @param InstancePtr references the FIFO on which to operate.
+*
+* @param BufPtr specifies the memory address of data to write.
+*
+* @param Bytes specifies the number of bytes to write.
+*
+* @return N/A
+*
+******************************************************************************/
+void XStrm_Write(XStrm_TxFifoStreamer * InstancePtr, void *BufPtr,
+ unsigned Bytes)
+{
+ u8 *SrcPtr = (u8 *) BufPtr;
+ unsigned BytesRemaining = Bytes;
+ unsigned FifoWordsToXfer;
+ unsigned PartialBytes;
+ unsigned i;
+
+ while (BytesRemaining) {
+ xdbg_printf(XDBG_DEBUG_FIFO_TX,
+ "XStrm_Write: BytesRemaining: %d\n",
+ BytesRemaining);
+ /* Case 1: The holding buffer is full
+ *
+ * 1) Write it to the fifo.
+ * 2) Fall through to transfer more bytes in this iteration.
+ */
+ if (InstancePtr->TailIndex == InstancePtr->FifoWidth) {
+ xdbg_printf(XDBG_DEBUG_FIFO_TX,
+ "XStrm_Write: (case 1) TailIndex: %d; FifoWidth: %d; WriteFn: %p\n",
+ InstancePtr->TailIndex,
+ InstancePtr->FifoWidth,
+ InstancePtr->WriteFn);
+ (*InstancePtr->WriteFn) (InstancePtr->FifoInstance,
+ &(InstancePtr->AlignedBuffer.
+ bytes[0]), 1);
+ InstancePtr->TailIndex = 0;
+ }
+ /* Case 2: There are no bytes in the holding buffer and
+ * the target buffer is 32 bit aligned and
+ * the number of bytes remaining to transfer is greater
+ * than or equal to the fifo width.
+ *
+ * 1) We can go fast by writing a long string of fifo words right out
+ * of the source buffer into the fifo.
+ * 2) Loop back around to transfer the last few bytes.
+ */
+ if ((InstancePtr->TailIndex == 0) &&
+ (BytesRemaining >= InstancePtr->FifoWidth) &&
+ (((unsigned) SrcPtr & 3) == 0)) {
+ FifoWordsToXfer =
+ BytesRemaining / InstancePtr->FifoWidth;
+
+ xdbg_printf(XDBG_DEBUG_FIFO_TX,
+ "XStrm_Write: (case 2) TailIndex: %d; BytesRemaining: %d; FifoWidth: %d; SrcPtr: %p;\n InstancePtr: %p; WriteFn: %p (XLlFifo_iWrite_Aligned: %p),\nFifoWordsToXfer: %d (BytesRemaining: %d)\n",
+ InstancePtr->TailIndex, BytesRemaining,
+ InstancePtr->FifoWidth, SrcPtr, InstancePtr,
+ InstancePtr->WriteFn,
+ XLlFifo_iWrite_Aligned, FifoWordsToXfer,
+ BytesRemaining);
+
+ (*InstancePtr->WriteFn) (InstancePtr->FifoInstance,
+ SrcPtr, FifoWordsToXfer);
+ SrcPtr += FifoWordsToXfer * InstancePtr->FifoWidth;
+ BytesRemaining -=
+ FifoWordsToXfer * InstancePtr->FifoWidth;
+ xdbg_printf(XDBG_DEBUG_FIFO_TX,
+ "XStrm_Write: (end case 2) TailIndex: %d; BytesRemaining: %d; SrcPtr: %p\n",
+ InstancePtr->TailIndex, BytesRemaining,
+ SrcPtr);
+ }
+ /* Case 3: The alignment of the "galaxies" didn't occur in
+ * Case 2 above, so we must pump the bytes through the
+ * holding buffer.
+ *
+ * 1) Write bytes from the source buffer to the holding buffer
+ * 2) Loop back around and handle the rest of the transfer.
+ */
+ else {
+ i = InstancePtr->TailIndex;
+
+ PartialBytes =
+ min(BytesRemaining,
+ InstancePtr->FifoWidth -
+ InstancePtr->TailIndex);
+ BytesRemaining -= PartialBytes;
+ InstancePtr->TailIndex += PartialBytes;
+ while (PartialBytes--) {
+ xdbg_printf(XDBG_DEBUG_FIFO_TX,
+ "XStrm_Write: (case 3) PartialBytes: %d\n",
+ PartialBytes);
+ InstancePtr->AlignedBuffer.bytes[i] = *SrcPtr;
+ i++;
+ SrcPtr++;
+ }
+ }
+ }
+}
--- /dev/null
+/* $Id: */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2005-2006 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/*
+ *
+ * @file xstreamer.h
+ *
+ * The Xilinx byte streamer for packet FIFOs.
+ *
+ * <h2>Driver Description</h2>
+ *
+ * This driver enables higher layer software to access a hardware FIFO using
+ * any alignment in the data buffers while preserving alignment for the hardware
+ * FIFO access.
+ *
+ * This driver treats send and receive channels separately, using different
+ * types of instance objects for each.
+ *
+ * This driver makes use of another FIFO driver to access the specific FIFO
+ * hardware through use of the routines passed into the Tx/RxInitialize
+ * routines.
+ *
+ * <h2>Initialization</h2>
+ *
+ * Send and receive channels are intialized separately. The receive channel is
+ * initiailzed using XStrm_RxInitialize(). The send channel is initialized
+ * using XStrm_TxInitialize().
+ *
+ *
+ * <h2>Usage</h2>
+ * It is fairly simple to use the API provided by this byte streamer
+ * driver. The only somewhat tricky part is that the calling code must
+ * correctly call a couple routines in the right sequence for receive and
+ * transmit.
+ *
+ * This sequence is described here. Check the routine functional
+ * descriptions for information on how to use a specific API routine.
+ *
+ * <h3>Receive</h3>
+ * A frame is received by using the following sequence:<br>
+ * 1) call XStrm_RxGetLen() to get the length of the next incoming frame.<br>
+ * 2) call XStrm_Read() one or more times to read the number of bytes
+ * reported by XStrm_RxGetLen().<br>
+ *
+ * For example:
+ * <pre>
+ * frame_len = XStrm_RxGetLen(&RxInstance);
+ * while (frame_len) {
+ * unsigned bytes = min(sizeof(buffer), frame_len);
+ * XStrm_Read(&RxInstance, buffer, bytes);
+ * // do something with buffer here
+ * frame_len -= bytes;
+ * }
+ * </pre>
+ *
+ * Other restrictions on the sequence of API calls may apply depending on
+ * the specific FIFO driver used by this byte streamer driver.
+ *
+ * <h3>Transmit</h3>
+ * A frame is transmittted by using the following sequence:<br>
+ * 1) call XStrm_Write() one or more times to write all the of bytes in
+ * the next frame.<br>
+ * 2) call XStrm_TxSetLen() to begin the transmission of frame just
+ * written.<br>
+ *
+ * For example:
+ * <pre>
+ * frame_left = frame_len;
+ * while (frame_left) {
+ * unsigned bytes = min(sizeof(buffer), frame_left);
+ * XStrm_Write(&TxInstance, buffer, bytes);
+ * // do something here to refill buffer
+ * }
+ * XStrm_TxSetLen(&RxInstance, frame_len);
+ * </pre>
+ *
+ * Other restrictions on the sequence of API calls may apply depending on
+ * the specific FIFO driver used by this byte streamer driver.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver Who Date Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a jvb 10/12/06 First release
+ * </pre>
+ *
+ *****************************************************************************/
+#ifndef XSTREAMER_H /* prevent circular inclusions */
+#define XSTREAMER_H /* by using preprocessor symbols */
+
+/* force C linkage */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+#include "xdebug.h"
+
+/*
+ * key hole size in 32 bit words
+ */
+#define LARGEST_FIFO_KEYHOLE_SIZE_WORDS 4
+
+/*
+ * This union is used simply to force a 32bit alignment on the
+ * buffer. Only the 'bytes' member is really used.
+ */
+union XStrm_AlignedBufferType {
+ u32 _words[LARGEST_FIFO_KEYHOLE_SIZE_WORDS];
+ char bytes[LARGEST_FIFO_KEYHOLE_SIZE_WORDS * 4];
+};
+
+typedef int(*XStrm_XferFnType) (void *FifoInstance, void *BufPtr,
+ unsigned WordCount);
+typedef u32 (*XStrm_GetLenFnType) (void *FifoInstance);
+typedef void (*XStrm_SetLenFnType) (void *FifoInstance,
+ u32 ByteCount);
+typedef u32 (*XStrm_GetOccupancyFnType) (void *FifoInstance);
+typedef u32 (*XStrm_GetVacancyFnType) (void *FifoInstance);
+
+/**
+ * This typedef defines a run-time instance of a receive byte-streamer.
+ */
+typedef struct XStrm_RxFifoStreamer {
+ union XStrm_AlignedBufferType AlignedBuffer;
+ unsigned HeadIndex; /**< HeadIndex is the index to the AlignedBuffer
+ * as bytes.
+ */
+ unsigned FifoWidth; /**< FifoWidth is the FIFO key hole width in bytes.
+ */
+ unsigned FrmByteCnt; /**< FrmByteCnt is the number of bytes in the next
+ * Frame.
+ */
+ void *FifoInstance; /**< FifoInstance is the FIFO driver instance to
+ * pass to ReadFn, GetLenFn, and GetOccupancyFn
+ * routines.
+ */
+ XStrm_XferFnType ReadFn; /**< ReadFn is the routine the streamer
+ * uses to receive bytes from the Fifo.
+ */
+ XStrm_GetLenFnType GetLenFn; /**< GetLenFn is the routine the streamer
+ * uses to initiate receive operations
+ * on the FIFO.
+ */
+ XStrm_GetOccupancyFnType GetOccupancyFn; /**< GetOccupancyFn is the
+ * routine the streamer uses
+ * to get the occupancy from
+ * the FIFO.
+ */
+} XStrm_RxFifoStreamer;
+
+/**
+ * This typedef defines a run-time instance of a transmit byte-streamer.
+ */
+typedef struct XStrm_TxFifoStreamer {
+ union XStrm_AlignedBufferType AlignedBuffer;
+ unsigned TailIndex; /**< TailIndex is the index to the AlignedBuffer
+ * as bytes
+ */
+ unsigned FifoWidth; /**< FifoWidth is the FIFO key hole width in bytes.
+ */
+
+ void *FifoInstance; /**< FifoInstance is the FIFO driver instance to
+ * pass to WriteFn, SetLenFn, and GetVacancyFn
+ * routines.
+ */
+ XStrm_XferFnType WriteFn; /**< WriteFn is the routine the streamer
+ * uses to transmit bytes to the Fifo.
+ */
+ XStrm_SetLenFnType SetLenFn; /**< SetLenFn is the routine the streamer
+ * uses to initiate transmit operations
+ * on the FIFO.
+ */
+ XStrm_GetVacancyFnType GetVacancyFn; /**< GetVaccancyFn is the routine
+ * the streamer uses to get the
+ * vacancy from the FIFO.
+ */
+} XStrm_TxFifoStreamer;
+
+/*****************************************************************************/
+/*
+*
+* XStrm_TxVacancy returns the number of unused 32-bit words available (vacancy)
+* between the streamer, specified by <i>InstancePtr</i>, and the FIFO this
+* streamer is using.
+*
+* @param InstancePtr references the streamer on which to operate.
+*
+* @return XStrm_TxVacancy returns the vacancy count in number of 32 bit words.
+*
+* @note
+*
+* C Signature: u32 XStrm_TxVacancy(XStrm_TxFifoStreamer *InstancePtr)
+*
+* The amount of bytes in the holding buffer (rounded up to whole 32-bit words)
+* is subtracted from the vacancy value of FIFO this streamer is using. This is
+* to ensure the caller can write the number words given in the return value and
+* not overflow the FIFO.
+*
+******************************************************************************/
+#define XStrm_TxVacancy(InstancePtr) \
+ (((*(InstancePtr)->GetVacancyFn)((InstancePtr)->FifoInstance)) - \
+ (((InstancePtr)->TailIndex + 3) / 4))
+
+/*****************************************************************************/
+/*
+*
+* XStrm_RxOccupancy returns the number of 32-bit words available (occupancy) to
+* be read from the streamer, specified by <i>InstancePtr</i>, and FIFO this
+* steamer is using.
+*
+* @param InstancePtr references the streamer on which to operate.
+*
+* @return XStrm_RxOccupancy returns the occupancy count in number of 32 bit
+* words.
+*
+* @note
+*
+* C Signature: u32 XStrm_RxOccupancy(XStrm_RxFifoStreamer *InstancePtr)
+*
+* The amount of bytes in the holding buffer (rounded up to whole 32-bit words)
+* is added to the occupancy value of FIFO this streamer is using. This is to
+* ensure the caller will get a little more accurate occupancy value.
+*
+******************************************************************************/
+#ifdef DEBUG
+extern u32 _xstrm_ro_value;
+extern u32 _xstrm_buffered;
+#define XStrm_RxOccupancy(InstancePtr) \
+ (_xstrm_ro_value = ((*(InstancePtr)->GetOccupancyFn)((InstancePtr)->FifoInstance)), \
+ xdbg_printf(XDBG_DEBUG_FIFO_RX, "reg: %d; frmbytecnt: %d\n", \
+ _xstrm_ro_value, (InstancePtr)->FrmByteCnt), \
+ (((InstancePtr)->FrmByteCnt) ? \
+ _xstrm_buffered = ((InstancePtr)->FifoWidth - (InstancePtr)->HeadIndex) : \
+ 0), \
+ xdbg_printf(XDBG_DEBUG_FIFO_RX, "buffered_bytes: %d\n", _xstrm_buffered), \
+ xdbg_printf(XDBG_DEBUG_FIFO_RX, "buffered (rounded): %d\n", _xstrm_buffered), \
+ (_xstrm_ro_value + _xstrm_buffered))
+#else
+#define XStrm_RxOccupancy(InstancePtr) \
+ ( \
+ ((*(InstancePtr)->GetOccupancyFn)((InstancePtr)->FifoInstance)) + \
+ ( \
+ ((InstancePtr)->FrmByteCnt) ? \
+ ((InstancePtr)->FifoWidth - (InstancePtr)->HeadIndex) : \
+ 0 \
+ ) \
+ )
+#endif
+
+/****************************************************************************/
+/*
+*
+* XStrm_IsRxInternalEmpty returns true if the streamer, specified by
+* <i>InstancePtr</i>, is not holding any bytes in it's internal buffers. Note
+* that this routine does not reflect information about the state of the
+* FIFO used by this streamer.
+*
+* @param InstancePtr references the streamer on which to operate.
+*
+* @return XStrm_IsRxInternalEmpty returns TRUE when the streamer is not
+* holding any bytes in it's internal buffers. Otherwise,
+* XStrm_IsRxInternalEmpty returns FALSE.
+*
+* @note
+* C-style signature:
+* int XStrm_IsRxInternalEmpty(XStrm_RxFifoStreamer *InstancePtr)
+*
+*****************************************************************************/
+#define XStrm_IsRxInternalEmpty(InstancePtr) \
+ (((InstancePtr)->HeadIndex == (InstancePtr)->FifoWidth) ? TRUE : FALSE)
+
+void XStrm_RxInitialize(XStrm_RxFifoStreamer *InstancePtr,
+ unsigned FifoWidth, void *FifoInstance,
+ XStrm_XferFnType ReadFn,
+ XStrm_GetLenFnType GetLenFn,
+ XStrm_GetOccupancyFnType GetOccupancyFn);
+
+void XStrm_TxInitialize(XStrm_TxFifoStreamer *InstancePtr,
+ unsigned FifoWidth, void *FifoInstance,
+ XStrm_XferFnType WriteFn,
+ XStrm_SetLenFnType SetLenFn,
+ XStrm_GetVacancyFnType GetVacancyFn);
+
+void XStrm_TxSetLen(XStrm_TxFifoStreamer *InstancePtr, u32 Bytes);
+void XStrm_Write(XStrm_TxFifoStreamer *InstancePtr, void *BufPtr,
+ unsigned bytes);
+
+u32 XStrm_RxGetLen(XStrm_RxFifoStreamer *InstancePtr);
+void XStrm_Read(XStrm_RxFifoStreamer *InstancePtr, void *BufPtr,
+ unsigned bytes);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* XSTREAMER_H end of preprocessor protection symbols */
--- /dev/null
+/* $Id: xversion.c,v 1.1 2006/12/13 14:23:34 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xversion.c
+*
+* This file contains the implementation of the XVersion component. This
+* component represents a version ID. It is encapsulated within a component
+* so that it's type and implementation can change without affecting users of
+* it.
+*
+* The version is formatted as X.YYZ where X = 0 - 9, Y = 00 - 99, Z = a - z
+* X is the major revision, YY is the minor revision, and Z is the
+* compatability revision.
+*
+* Packed versions are also utilized for the configuration ROM such that
+* memory is minimized. A packed version consumes only 16 bits and is
+* formatted as follows.
+*
+* <pre>
+* Revision Range Bit Positions
+*
+* Major Revision 0 - 9 Bits 15 - 12
+* Minor Revision 0 - 99 Bits 11 - 5
+* Compatability Revision a - z Bits 4 - 0
+*
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a xd 11/03/04 Improved support for doxygen.
+</pre>
+*
+******************************************************************************/
+
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xversion.h"
+
+/************************** Constant Definitions *****************************/
+
+/* the following constants define the masks and shift values to allow the
+ * revisions to be packed and unpacked, a packed version is packed into a 16
+ * bit value in the following format, XXXXYYYYYYYZZZZZ, where XXXX is the
+ * major revision, YYYYYYY is the minor revision, and ZZZZZ is the compatability
+ * revision
+ */
+#define XVE_MAJOR_SHIFT_VALUE 12
+#define XVE_MINOR_ONLY_MASK 0x0FE0
+#define XVE_MINOR_SHIFT_VALUE 5
+#define XVE_COMP_ONLY_MASK 0x001F
+
+/* the following constants define the specific characters of a version string
+ * for each character of the revision, a version string is in the following
+ * format, "X.YYZ" where X is the major revision (0 - 9), YY is the minor
+ * revision (00 - 99), and Z is the compatability revision (a - z)
+ */
+#define XVE_MAJOR_CHAR 0 /* major revision 0 - 9 */
+#define XVE_MINOR_TENS_CHAR 2 /* minor revision tens 0 - 9 */
+#define XVE_MINOR_ONES_CHAR 3 /* minor revision ones 0 - 9 */
+#define XVE_COMP_CHAR 4 /* compatability revision a - z */
+#define XVE_END_STRING_CHAR 5
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+static u32 IsVersionStringValid(s8 *StringPtr);
+
+/*****************************************************************************/
+/**
+*
+* Unpacks a packed version into the specified version. Versions are packed
+* into the configuration ROM to reduce the amount storage. A packed version
+* is a binary format as oppossed to a non-packed version which is implemented
+* as a string.
+*
+* @param InstancePtr points to the version to unpack the packed version into.
+* @param PackedVersion contains the packed version to unpack.
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void XVersion_UnPack(XVersion * InstancePtr, u16 PackedVersion)
+{
+ /* not implemented yet since CROM related */
+}
+
+/*****************************************************************************/
+/**
+*
+* Packs a version into the specified packed version. Versions are packed into
+* the configuration ROM to reduce the amount storage.
+*
+* @param InstancePtr points to the version to pack.
+* @param PackedVersionPtr points to the packed version which will receive
+* the new packed version.
+*
+* @return
+*
+* A status, XST_SUCCESS, indicating the packing was accomplished
+* successfully, or an error, XST_INVALID_VERSION, indicating the specified
+* input version was not valid such that the pack did not occur
+* <br><br>
+* The packed version pointed to by PackedVersionPtr is modified with the new
+* packed version if the status indicates success.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XVersion_Pack(XVersion * InstancePtr, u16 *PackedVersionPtr)
+{
+ /* not implemented yet since CROM related */
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Determines if two versions are equal.
+*
+* @param InstancePtr points to the first version to be compared.
+* @param VersionPtr points to a second version to be compared.
+*
+* @return
+*
+* TRUE if the versions are equal, FALSE otherwise.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+u32 XVersion_IsEqual(XVersion * InstancePtr, XVersion * VersionPtr)
+{
+ u8 *Version1 = (u8 *) InstancePtr;
+ u8 *Version2 = (u8 *) VersionPtr;
+ int Index;
+
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(VersionPtr != NULL);
+
+ /* check each byte of the versions to see if they are the same,
+ * return at any point a byte differs between them
+ */
+ for (Index = 0; Index < sizeof(XVersion); Index++) {
+ if (Version1[Index] != Version2[Index]) {
+ return FALSE;
+ }
+ }
+
+ /* No byte was found to be different between the versions, so indicate
+ * the versions are equal
+ */
+ return TRUE;
+}
+
+/*****************************************************************************/
+/**
+*
+* Converts a version to a null terminated string.
+*
+* @param InstancePtr points to the version to convert.
+* @param StringPtr points to the string which will be the result of the
+* conversion. This does not need to point to a null terminated
+* string as an input, but must point to storage which is an adequate
+* amount to hold the result string.
+*
+* @return
+*
+* The null terminated string is inserted at the location pointed to by
+* StringPtr if the status indicates success.
+*
+* @note
+*
+* It is necessary for the caller to have already allocated the storage to
+* contain the string. The amount of memory necessary for the string is
+* specified in the version header file.
+*
+******************************************************************************/
+void XVersion_ToString(XVersion * InstancePtr, s8 *StringPtr)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(StringPtr != NULL);
+
+ /* since version is implemented as a string, just copy the specified
+ * input into the specified output
+ */
+ XVersion_Copy(InstancePtr, (XVersion *) StringPtr);
+}
+
+/*****************************************************************************/
+/**
+*
+* Initializes a version from a null terminated string. Since the string may not
+* be a format which is compatible with the version, an error could occur.
+*
+* @param InstancePtr points to the version which is to be initialized.
+* @param StringPtr points to a null terminated string which will be
+* converted to a version. The format of the string must match the
+* version string format which is X.YYX where X = 0 - 9, YY = 00 - 99,
+* Z = a - z.
+*
+* @return
+*
+* A status, XST_SUCCESS, indicating the conversion was accomplished
+* successfully, or XST_INVALID_VERSION indicating the version string format
+* was not valid.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+int XVersion_FromString(XVersion * InstancePtr, s8 *StringPtr)
+{
+ /* assert to verify input arguments */
+
+ XASSERT_NONVOID(InstancePtr != NULL);
+ XASSERT_NONVOID(StringPtr != NULL);
+
+ /* if the version string specified is not valid, return an error */
+
+ if (!IsVersionStringValid(StringPtr)) {
+ return XST_INVALID_VERSION;
+ }
+
+ /* copy the specified string into the specified version and indicate the
+ * conversion was successful
+ */
+ XVersion_Copy((XVersion *) StringPtr, InstancePtr);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* Copies the contents of a version to another version.
+*
+* @param InstancePtr points to the version which is the source of data for
+* the copy operation.
+* @param VersionPtr points to another version which is the destination of
+* the copy operation.
+*
+* @return
+*
+* None.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+void XVersion_Copy(XVersion * InstancePtr, XVersion * VersionPtr)
+{
+ u8 *Source = (u8 *) InstancePtr;
+ u8 *Destination = (u8 *) VersionPtr;
+ int Index;
+
+ /* assert to verify input arguments */
+
+ XASSERT_VOID(InstancePtr != NULL);
+ XASSERT_VOID(VersionPtr != NULL);
+
+ /* copy each byte of the source version to the destination version */
+
+ for (Index = 0; Index < sizeof(XVersion); Index++) {
+ Destination[Index] = Source[Index];
+ }
+}
+
+/*****************************************************************************/
+/**
+*
+* Determines if the specified version is valid.
+*
+* @param StringPtr points to the string to be validated.
+*
+* @return
+*
+* TRUE if the version string is a valid format, FALSE otherwise.
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+static u32 IsVersionStringValid(s8 *StringPtr)
+{
+ /* if the input string is not a valid format, "X.YYZ" where X = 0 - 9,
+ * YY = 00 - 99, and Z = a - z, then indicate it's not valid
+ */
+ if ((StringPtr[XVE_MAJOR_CHAR] < '0') ||
+ (StringPtr[XVE_MAJOR_CHAR] > '9') ||
+ (StringPtr[XVE_MINOR_TENS_CHAR] < '0') ||
+ (StringPtr[XVE_MINOR_TENS_CHAR] > '9') ||
+ (StringPtr[XVE_MINOR_ONES_CHAR] < '0') ||
+ (StringPtr[XVE_MINOR_ONES_CHAR] > '9') ||
+ (StringPtr[XVE_COMP_CHAR] < 'a') ||
+ (StringPtr[XVE_COMP_CHAR] > 'z')) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
--- /dev/null
+/* $Id: xversion.h,v 1.1 2006/12/13 14:23:41 imanuilov Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+* @file xversion.h
+*
+* This file contains the interface for the XVersion component. This
+* component represents a version ID. It is encapsulated within a component
+* so that it's type and implementation can change without affecting users of
+* it.
+*
+* The version is formatted as X.YYZ where X = 0 - 9, Y = 00 - 99, Z = a - z
+* X is the major revision, YY is the minor revision, and Z is the
+* compatability revision.
+*
+* Packed versions are also utilized for the configuration ROM such that
+* memory is minimized. A packed version consumes only 16 bits and is
+* formatted as follows.
+*
+* <pre>
+* Revision Range Bit Positions
+*
+* Major Revision 0 - 9 Bits 15 - 12
+* Minor Revision 0 - 99 Bits 11 - 5
+* Compatability Revision a - z Bits 4 - 0
+*
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a xd 11/03/04 Improved support for doxygen.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XVERSION_H /* prevent circular inclusions */
+#define XVERSION_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xbasic_types.h"
+#include "xstatus.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+/* the following data type is used to hold a null terminated version string
+ * consisting of the following format, "X.YYX"
+ */
+typedef s8 XVersion[6];
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+void XVersion_UnPack(XVersion * InstancePtr, u16 PackedVersion);
+
+int XVersion_Pack(XVersion * InstancePtr, u16 *PackedVersion);
+
+u32 XVersion_IsEqual(XVersion * InstancePtr, XVersion * VersionPtr);
+
+void XVersion_ToString(XVersion * InstancePtr, s8 *StringPtr);
+
+int XVersion_FromString(XVersion * InstancePtr, s8 *StringPtr);
+
+void XVersion_Copy(XVersion * InstancePtr, XVersion * VersionPtr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
#include <linux/quotaops.h>
#include <linux/pagevec.h>
#include <linux/mman.h>
+#include <linux/sched.h>
#include <asm/uaccess.h>
#include "internal.h"
--- /dev/null
+/*
+ * include/asm-microblaze/a.out.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_A_OUT_H
+#define _ASM_A_OUT_H
+
+#endif /* _ASM_A._UT_H */
--- /dev/null
+/*
+ * include/asm-microblaze/atomic.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <linux/compiler.h>
+#include <asm/system.h> /* local_irq_XXX and friends */
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+#define atomic_inc(v) (atomic_add_return(1, (v)))
+#define atomic_dec(v) (atomic_sub_return(1, (v)))
+
+#define atomic_add(i, v) (atomic_add_return(i, (v)))
+#define atomic_sub(i, v) (atomic_sub_return(i, (v)))
+
+#define atomic_inc_return(v) (atomic_add_return(1, (v)))
+#define atomic_dec_return(v) (atomic_sub_return(1, (v)))
+
+#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+
+#define atomic_inc_not_zero(v) (atomic_add_unless((v), 1, 0))
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = v->counter;
+ if (likely(ret == old))
+ v->counter = new;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+
+ c = atomic_read(v);
+ while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
+ c = old;
+ return c != u;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *addr &= ~mask;
+ local_irq_restore(flags);
+}
+
+/**
+ * atomic_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static __inline__ int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long flags;
+ int val;
+
+ local_irq_save(flags);
+ val = v->counter;
+ v->counter = val += i;
+ local_irq_restore(flags);
+
+ return val;
+}
+
+static inline int atomic_sub_return(int i, atomic_t * v)
+{
+ return atomic_add_return(-i, v);
+}
+
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+/* FIXME */
+#include <asm-generic/atomic.h>
+
+#endif /* _ASM_ATOMIC_H */
--- /dev/null
+/*
+ * include/asm-microblaze/auxvec.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_AUXVEC_H
+#define _ASM_AUXVEC_H
+
+#endif /* _ASM_AUXVEC_H */
--- /dev/null
+#ifndef _MICROBLAZE_BITOPS_H
+#define _MICROBLAZE_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ */
+
+#include <linux/autoconf.h>
+#include <asm/byteorder.h> /* swab32 */
+#include <asm/system.h> /* save_flags */
+
+/* JW FIXME quick workaround to use generic bitops.h */
+#if 1
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+#include <asm-generic/bitops.h>
+#else
+
+#ifdef __KERNEL__
+/*
+ * Function prototypes to keep gcc -Wall happy
+ */
+
+/*
+ * The __ functions are not atomic
+ */
+
+extern void set_bit(int nr, volatile void * addr);
+extern void __set_bit(int nr, volatile void * addr);
+
+extern void clear_bit(int nr, volatile void * addr);
+extern void __clear_bit(int nr, volatile void * addr);
+
+extern void change_bit(int nr, volatile void * addr);
+extern void __change_bit(int nr, volatile void * addr);
+extern int test_and_set_bit(int nr, volatile void * addr);
+extern int __test_and_set_bit(int nr, volatile void * addr);
+extern int test_and_clear_bit(int nr, volatile void * addr);
+extern int __test_and_clear_bit(int nr, volatile void * addr);
+extern int test_and_change_bit(int nr, volatile void * addr);
+extern int __test_and_change_bit(int nr, volatile void * addr);
+extern int __constant_test_bit(int nr, const volatile void * addr);
+extern int __test_bit(int nr, const volatile void * addr);
+extern int find_first_zero_bit(void * addr, unsigned size);
+extern int find_next_zero_bit (const void * addr, int size, int offset);
+extern int find_next_bit(const unsigned long *addr, int size, int offset);
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ */
+extern __inline__ unsigned long ffz(unsigned long word)
+{
+ unsigned long result = 0;
+
+ while(word & 1) {
+ result++;
+ word >>= 1;
+ }
+ return result;
+}
+
+
+extern __inline__ void set_bit(int nr, volatile void * addr)
+{
+ int * a = (int *) addr;
+ int mask;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ *a |= mask;
+ local_irq_restore(flags);
+}
+
+extern __inline__ void __set_bit(int nr, volatile void * addr)
+{
+ int * a = (int *) addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ *a |= mask;
+}
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+extern int kflg;
+extern int intdeb;
+extern void printkaa(int *);
+
+extern __inline__ void clear_bit(int nr, volatile void * addr)
+{
+ int * a = (int *) addr;
+ int mask;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ *a &= ~mask;
+ local_irq_restore(flags);
+}
+
+extern __inline__ void __clear_bit(int nr, volatile void * addr)
+{
+ int * a = (int *) addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ *a &= ~mask;
+}
+
+
+extern __inline__ void change_bit(int nr, volatile void * addr)
+{
+ int mask;
+ unsigned long flags;
+ unsigned long *ADDR = (unsigned long *) addr;
+
+ ADDR += nr >> 5;
+ mask = 1 << (nr & 31);
+ local_irq_save(flags);
+ *ADDR ^= mask;
+ local_irq_restore(flags);
+}
+
+extern __inline__ void __change_bit(int nr, volatile void * addr)
+{
+ int mask;
+ unsigned long *ADDR = (unsigned long *) addr;
+
+ ADDR += nr >> 5;
+ mask = 1 << (nr & 31);
+ *ADDR ^= mask;
+}
+
+extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = (volatile unsigned int *) addr;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = (volatile unsigned int *) addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+ return retval;
+}
+
+extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = (volatile unsigned int *) addr;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = (volatile unsigned int *) addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+ return retval;
+}
+
+extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = (volatile unsigned int *) addr;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a ^= mask;
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+extern __inline__ int __test_and_change_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = (volatile unsigned int *) addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a ^= mask;
+ return retval;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+extern __inline__ int __constant_test_bit(int nr, const volatile void * addr)
+{
+ return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+extern __inline__ int __test_bit(int nr, const volatile void * addr)
+{
+ int * a = (int *) addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ return ((mask & *a) != 0);
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ __constant_test_bit((nr),(addr)) : \
+ __test_bit((nr),(addr)))
+
+#define find_first_zero_bit(addr, size) \
+ find_next_zero_bit((addr), (size), 0)
+
+extern __inline__ int find_next_zero_bit (const void * addr, int size, int offset)
+{
+ unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
+ unsigned long result = offset & ~31UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31UL;
+ if (offset) {
+ tmp = *(p++);
+ tmp |= ~0UL >> (32-offset);
+ if (size < 32)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while (size & ~31UL) {
+ if (~(tmp = *(p++)))
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp |= ~0UL >> size;
+found_middle:
+ return result + ffz(tmp);
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+ int num = 0;
+
+#if BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+
+#define ffs(x) generic_ffs(x)
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+
+extern __inline__ int ext2_set_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ unsigned long flags;
+ volatile unsigned char *ADDR = (unsigned char *) addr;
+
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
+ local_irq_save(flags);
+ retval = (mask & *ADDR) != 0;
+ *ADDR |= mask;
+ local_irq_restore(flags);
+ return retval;
+}
+
+extern __inline__ int ext2_clear_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ unsigned long flags;
+ volatile unsigned char *ADDR = (unsigned char *) addr;
+
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
+ local_irq_save(flags);
+ retval = (mask & *ADDR) != 0;
+ *ADDR &= ~mask;
+ local_irq_restore(flags);
+ return retval;
+}
+
+extern __inline__ int ext2_test_bit(int nr, volatile void * addr)
+{
+ int mask;
+ const volatile unsigned char *ADDR = (const unsigned char *) addr;
+
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
+ return ((mask & *ADDR) != 0);
+}
+
+#define ext2_find_first_zero_bit(addr, size) \
+ ext2_find_next_zero_bit((addr), (size), 0)
+
+extern __inline__ unsigned long ext2_find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset)
+{
+ unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
+ unsigned long result = offset & ~31UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31UL;
+ if(offset) {
+ /* We hold the little endian value in tmp, but then the
+ * shift is illegal. So we could keep a big endian value
+ * in tmp, like this:
+ *
+ * tmp = __swab32(*(p++));
+ * tmp |= ~0UL >> (32-offset);
+ *
+ * but this would decrease preformance, so we change the
+ * shift:
+ */
+ tmp = *(p++);
+ tmp |= __swab32(~0UL >> (32-offset));
+ if(size < 32)
+ goto found_first;
+ if(~tmp)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while(size & ~31UL) {
+ if(~(tmp = *(p++)))
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if(!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ /* tmp is little endian, so we would have to swab the shift,
+ * see above. But then we have to swab tmp below for ffz, so
+ * we might as well do this here.
+ */
+ return result + ffz(__swab32(tmp) | (~0UL << size));
+found_middle:
+ return result + ffz(__swab32(tmp));
+}
+
+
+
+/**
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static inline int fls(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first set bit, not the number of the byte
+ * containing a bit.
+ */
+#define find_first_bit(addr, size) \
+ find_next_bit((addr), (size), 0)
+
+
+#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr))
+#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr))
+#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr))
+
+
+/* Bitmap functions for the minix filesystem. */
+#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#endif /* __KERNEL__ */
+
+static inline int sched_find_first_bit(const unsigned long *b)
+{
+#if BITS_PER_LONG == 64
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 64;
+ return __ffs(b[2]) + 128;
+#elif BITS_PER_LONG == 32
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 32;
+ if (unlikely(b[2]))
+ return __ffs(b[2]) + 64;
+ if (b[3])
+ return __ffs(b[3]) + 96;
+ return __ffs(b[4]) + 128;
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+#endif
+#endif /* _MICROBLAZE_BITOPS_H */
--- /dev/null
+/*
+ * include/asm-microblaze/bug.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_BUG_H
+#define _ASM_BUG_H
+
+#include <linux/kernel.h>
+#include <asm-generic/bug.h>
+
+#endif /* _ASM_BUG_H */
--- /dev/null
+/*
+ * include/asm-microblaze/bugs.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_BUGS_H
+#define _ASM_BUGS_H
+
+static inline void check_bugs(void)
+{
+ /* nothing to do */
+}
+
+#endif /* _ASM_BUGS_H */
--- /dev/null
+/*
+ * include/asm-microblaze/system.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_BYTEORDER_H
+#define _ASM_BYTEORDER_H
+
+#include <asm/types.h>
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+# define __BYTEORDER_HAS_U64__
+# define __SWAB_64_THRU_32__
+#endif
+
+#include <linux/byteorder/big_endian.h>
+
+#endif /* _ASM_BYTEORDER_H */
--- /dev/null
+/*
+ * include/asm-microblaze/cache.h -- Cache operations
+ *
+ * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ */
+
+#ifndef __MICROBLAZE_CACHE_H__
+#define __MICROBLAZE_CACHE_H__
+
+#include <asm/xparameters.h>
+#include <asm/registers.h>
+#include <asm/cpuinfo.h>
+#include <linux/autoconf.h>
+
+#ifndef L1_CACHE_BYTES
+/* word-granular cache in microblaze */
+#define L1_CACHE_BYTES 4
+#define L1_CACHE_SHIFT 4
+#endif
+
+/* Assumes that caches are, in fact, enabled. */
+static inline void __enable_icache(void) {
+ if(cpuinfo->use_msr_instr) {
+ __asm__ __volatile__ (" msrset r0, %0; \
+ nop;" \
+ : \
+ : "i" (MSR_ICE) \
+ : "memory");
+ } else {
+ __asm__ __volatile__ (" \
+ mfs r12, rmsr; \
+ ori r12, r12, %0; \
+ mts rmsr, r12; \
+ nop;" \
+ : \
+ : "i" (MSR_ICE) \
+ : "memory", "r12");
+ }
+}
+
+/* Assumes that caches are, in fact, enabled. */
+static inline void __disable_icache(void) {
+ if(cpuinfo->use_msr_instr) {
+ __asm__ __volatile__ (" msrclr r0, %0; \
+ nop;" \
+ : \
+ : "i" (MSR_ICE) \
+ : "memory");
+ } else {
+ __asm__ __volatile__ (" \
+ mfs r12, rmsr; \
+ andi r12, r12, ~%0; \
+ mts rmsr, r12; \
+ nop;" \
+ : \
+ : "i" (MSR_ICE) \
+ : "memory", "r12");
+ }
+}
+
+/* Assumes that caches are, in fact, enabled. */
+static inline void __invalidate_icache(unsigned int addr) {
+ __asm__ __volatile__ (" \
+ wic %0, r0" \
+ : \
+ : "r" (addr));
+}
+
+static inline void enable_icache(void) {
+ if(cpuinfo->use_icache) __enable_icache();
+}
+
+/* Assumes that caches are, in fact, enabled. */
+static inline void __enable_dcache(void) {
+ if(cpuinfo->use_msr_instr) {
+ __asm__ __volatile__ (" msrset r0, %0; \
+ nop;" \
+ : \
+ : "i" (MSR_DCE) \
+ : "memory");
+ } else {
+ __asm__ __volatile__ (" \
+ mfs r12, rmsr; \
+ ori r12, r12, %0; \
+ mts rmsr, r12; \
+ nop;" \
+ : \
+ : "i" (MSR_DCE) \
+ : "memory", "r12");
+ }
+}
+
+/* Assumes that caches are, in fact, enabled. */
+static inline void __disable_dcache(void) {
+ if(cpuinfo->use_msr_instr) {
+ __asm__ __volatile__ (" msrclr r0, %0; \
+ nop;" \
+ : \
+ : "i" (MSR_DCE) \
+ : "memory");
+ } else {
+ __asm__ __volatile__ (" \
+ mfs r12, rmsr; \
+ andi r12, r12, ~%0; \
+ mts rmsr, r12; \
+ nop;" \
+ : \
+ : "i" (MSR_DCE) \
+ : "memory", "r12");
+ }
+}
+
+/* Assumes that caches are, in fact, enabled. */
+static inline void __invalidate_dcache(unsigned int addr) {
+ __asm__ __volatile__ (" \
+ wdc %0, r0" \
+ : \
+ : "r" (addr));
+}
+
+static inline void enable_dcache(void) {
+ if(cpuinfo->use_dcache) __enable_dcache();
+}
+
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+
+#define UNCACHED_SHADOW_MASK (DDR_SDRAM_HIGHADDR + 1 - DDR_SDRAM_BASEADDR)
+
+#endif
+
+#endif /* __MICROBLAZE_CACHE_H__ */
--- /dev/null
+/*
+ * include/asm-microblaze/cacheflush.h
+ *
+ * Copyright (C) 2007 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ * based on v850 version which was
+ * Copyright (C) 2001,02,03 NEC Electronics Corporation
+ * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
+ * Copyright (C) 2000 Lineo, David McCullough <davidm@lineo.com>
+ * Copyright (C) 2001 Lineo, Greg Ungerer <gerg@snapgear.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ */
+
+#ifndef __MICROBLAZE_CACHEFLUSH_H__
+#define __MICROBLAZE_CACHEFLUSH_H__
+#include <linux/kernel.h> /* For min/max macros */
+#include <linux/mm.h> /* For min/max macros */
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/xparameters.h>
+
+/*
+ * Cache handling functions.
+ * Microblaze has a write-through data cache, meaning that the data cache
+ * never needs to be flushed.
+ */
+#define flush_cache_all() do { } while(0)
+#define flush_cache_mm(mm) do { } while(0)
+#define flush_cache_range(mm, start, end) do { } while(0)
+#define flush_cache_page(vma, vmaddr) do { } while(0)
+#define flush_page_to_ram(page) do { } while(0)
+#define flush_dcache_page(page) do { } while(0)
+#define flush_dcache_range(start, end) do { } while(0)
+#define flush_icache_range(start, end) do { } while(0)
+#define flush_icache_user_range(vma,pg,adr,len) do { } while(0)
+#define flush_icache_page(vma,pg) do { } while(0)
+#define flush_icache() do { } while(0)
+#define flush_cache_sigtramp(vaddr) do { } while(0)
+
+#define flush_dcache_mmap_lock(mapping) do { } while(0)
+#define flush_dcache_mmap_unlock(mapping) do { } while(0)
+
+void __invalidate_icache_all (void);
+void __invalidate_icache_range (unsigned long start, unsigned long end);
+void __invalidate_dcache_all (void);
+void __invalidate_dcache_range (unsigned long start, unsigned long end);
+
+#define invalidate_cache_all() __invalidate_icache_all(); __invalidate_dcache_all()
+#define invalidate_dcache() __invalidate_dcache_all()
+#define invalidate_icache() __invalidate_icache_all()
+
+#if XPAR_MICROBLAZE_0_DCACHE_USE_FSL == 1
+#define invalidate_dcache_range(start, end) __invalidate_dcache_all()
+#define invalidate_icache_range(start, end) __invalidate_icache_all()
+#else
+#define invalidate_dcache_range(start, end) __invalidate_dcache_range(start,end)
+#define invalidate_icache_range(start, end) __invalidate_icache_range(start,end)
+#endif
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy((dst), (src), (len))
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy((dst), (src), (len))
+
+
+#endif /* __MICROBLAZE_CACHEFLUSH_H__ */
--- /dev/null
+/*
+ * include/asm-microblaze/checksum.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_CHECKSUM_H
+#define _ASM_CHECKSUM_H
+
+#include <linux/in6.h>
+#include <asm/string.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum);
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern unsigned int csum_partial_copy_from_user(const char *src, char *dst,
+ int len, int sum, int *csum_err);
+
+#define csum_partial_copy_nocheck(src, dst, len, sum) \
+ csum_partial_copy((src), (dst), (len), (sum))
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ *
+ */
+extern unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl);
+
+/*
+ * Fold a partial checksum
+ */
+static inline unsigned int csum_fold(unsigned int sum)
+{
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return ~sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline unsigned int
+csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
+ unsigned short proto, unsigned int sum)
+{
+ __asm__ ("add %0, %4, %1\n\t"
+ "addc %0, %4, %2\n\t"
+ "addc %0, %4, %3\n\t"
+ "addc %0, %4, r0\n\t"
+ : "=d" (sum)
+ : "d" (saddr), "d" (daddr), "d" (len + proto),
+ "0"(sum));
+
+ return sum;
+}
+
+static inline unsigned short int
+csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
+ unsigned short proto, unsigned int sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+extern unsigned short ip_compute_csum(const unsigned char * buff, int len);
+
+#endif /* _ASM_CHECKSUM_H */
--- /dev/null
+/*
+ * include/asm-microblaze/clinkage.h -- Macros to reflect C symbol-naming conventions
+ *
+ * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
+ * Copyright (C) 2001,2002 NEC Corporatione
+ * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ * Written by Miles Bader <miles@gnu.org>
+ * Microblaze port by John Williams
+ */
+
+#ifndef __MICROBLAZE_CLINKAGE_H__
+#define __MICROBLAZE_CLINKAGE_H__
+
+#define __ASSEMBLY__
+
+#include <linux/linkage.h>
+
+#define C_SYMBOL_NAME(name) name
+#define C_ENTRY(name) .globl name; .align 4; name
+#define C_END(name)
+
+#endif /* __MICROBLAZE_CLINKAGE_H__ */
--- /dev/null
+/*
+ * Generic support for queying CPU info
+ *
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ * Copyright (C) 2007 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ */
+#ifndef _ASM_CPUINFO_H
+#define _ASM_CPUINFO_H
+
+struct cpuinfo {
+ /* Core CPU configuration */
+ int use_barrel;
+ int use_divider;
+ int use_mult;
+ int use_fpu;
+ int use_exception;
+ int use_mul_64;
+ int use_msr_instr;
+ int use_pcmp_instr;
+
+ int ver_code;
+
+ /* CPU caches */
+ int use_icache;
+ int icache_tagbits;
+ int icache_write;
+ int icache_line;
+ int icache_size;
+ unsigned long icache_base;
+ unsigned long icache_high;
+
+ int use_dcache;
+ int dcache_tagbits;
+ int dcache_write;
+ int dcache_line;
+ int dcache_size;
+ unsigned long dcache_base;
+ unsigned long dcache_high;
+
+ /* Bus connections */
+ int use_dopb;
+ int use_iopb;
+ int use_dlmb;
+ int use_ilmb;
+ int num_fsl;
+
+ /* CPU interrupt line info */
+ int irq_edge;
+ int irq_positive;
+
+ int area_optimised;
+
+ /* HW support for CPU exceptions */
+ int opcode_0_illegal;
+ int exc_unaligned;
+ int exc_ill_opcode;
+ int exc_iopb;
+ int exc_dopb;
+ int exc_div_zero;
+ int exc_fpu;
+
+ /* HW debug support */
+ int hw_debug;
+ int num_pc_brk;
+ int num_rd_brk;
+ int num_wr_brk;
+
+ /* FPGA family */
+ int fpga_family_code;
+};
+
+/* Declare a global instance of the cpuinfo_ops */
+extern struct cpuinfo *cpuinfo;
+
+/* fwd declarations of the various CPUinfo populators */
+void setup_cpuinfo(void);
+
+void set_cpuinfo_static(struct cpuinfo *ci);
+void set_cpuinfo_pvr_partial(struct cpuinfo *ci);
+void set_cpuinfo_pvr_full(struct cpuinfo *ci);
+
+#endif
+
--- /dev/null
+/*
+ * include/asm-microblaze/cputime.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_CPUTIME_H
+#define _ASM_CPUTIME_H
+
+#include <asm-generic/cputime.h>
+
+#endif /* _ASM_CPUTIME_H */
--- /dev/null
+/*
+ * include/asm-microblaze/current.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_CURRENT_H
+#define _ASM_CURRENT_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Dedicate r31 to keeping the current task pointer
+ */
+register struct task_struct *current asm("r31");
+
+#define get_current() current
+
+#endif
+
+#endif /* _ASM_CURRENT_H */
--- /dev/null
+/*
+ * include/asm-microblaze/delay.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_DELAY_H
+#define _ASM_DELAY_H
+
+#include <asm/param.h>
+
+static inline void __delay(unsigned long loops)
+{
+ asm volatile ("# __delay \n\t" \
+ "1: addi %0, %0, -1 \t\n" \
+ "bneid %0, 1b \t\n" \
+ "nop \t\n"
+ : "=r" (loops)
+ : "0" (loops));
+}
+
+static inline void udelay(unsigned long usec)
+{
+ unsigned long long tmp = usec;
+ unsigned long loops = (tmp * 4295 * HZ * loops_per_jiffy) >> 32;
+ __delay(loops);
+}
+
+#endif /* _ASM_DELAY_H */
--- /dev/null
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_MICROBLAZE_DEVICE_H
+#define _ASM_MICROBLAZE_DEVICE_H
+
+struct dma_mapping_ops;
+struct device_node;
+
+struct dev_archdata {
+ /* Optional pointer to an OF device node */
+ struct device_node *of_node;
+
+ /* DMA operations on that device */
+ struct dma_mapping_ops *dma_ops;
+ void *dma_data;
+
+ /* NUMA node if applicable */
+ int numa_node;
+};
+
+#endif /* _ASM_MICROBLAZE_DEVICE_H */
+
+
--- /dev/null
+/*
+ * include/asm-microblaze/div64.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_DIV64_H
+#define _ASM_DIV64_H
+
+#include <asm-generic/div64.h>
+
+#endif /* _ASM_DIV64_H */
--- /dev/null
+/*
+ * include/asm-microblaze/dma-mapping.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef __ASM_MICROBLAZE_DMA_MAPPING_H
+#define __ASM_MICROBLAZE_DMA_MAPPING_H
+
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <linux/scatterlist.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/cpuinfo.h>
+
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ int direction);
+
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly. For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask
+ * to this function.
+ */
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ /* Fix when needed. I really don't know of any limitations */
+ return 1;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+ return 0;
+}
+
+/*
+ * dma_map_single can't fail as it is implemented now.
+ */
+static inline int dma_mapping_error(dma_addr_t addr)
+{
+ return 0;
+}
+
+/**
+ * dma_alloc_coherent - allocate consistent memory for DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: required memory size
+ * @handle: bus-specific DMA address
+ *
+ * Allocate some uncached, unbuffered memory for a device for
+ * performing DMA. This function allocates pages, and will
+ * return the CPU-viewed address, and sets @handle to be the
+ * device-viewed address.
+ */
+extern void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp);
+
+/**
+ * dma_free_coherent - free memory allocated by dma_alloc_coherent
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: size of memory originally requested in dma_alloc_coherent
+ * @cpu_addr: CPU-view address returned from dma_alloc_coherent
+ * @handle: device-view address returned from dma_alloc_coherent
+ *
+ * Free (and unmap) a DMA buffer previously allocated by
+ * dma_alloc_coherent().
+ *
+ * References to memory and mappings associated with cpu_addr/handle
+ * during and after this call executing are illegal.
+ */
+extern void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle);
+
+/**
+ * dma_map_single - map a single buffer for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @cpu_addr: CPU direct mapped address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed. The CPU
+ * can regain ownership by calling dma_unmap_single() or dma_sync_single().
+ */
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_cache_sync(dev, cpu_addr, size, direction);
+ return virt_to_bus(cpu_addr);
+}
+
+/**
+ * dma_unmap_single - unmap a single buffer previously mapped
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Unmap a single streaming mode DMA translation. The handle and size
+ * must match what was provided in the previous dma_map_single() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+
+}
+
+/**
+ * dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed. The CPU
+ * can regain ownership by calling dma_unmap_page() or dma_sync_single().
+ */
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ return dma_map_single(dev, page_address(page) + offset,
+ size, direction);
+}
+
+/**
+ * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Unmap a single streaming mode DMA translation. The handle and size
+ * must match what was provided in the previous dma_map_single() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_unmap_single(dev, dma_address, size, direction);
+}
+
+/**
+ * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scatter-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+ char *virt;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for_each_sg(sgl, sg, nents, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg[i].dma_address = page_to_bus(sg_page(sg)) + sg->offset;
+ virt = page_address(sg_page(sg)) + sg->offset;
+ dma_cache_sync(dev, virt, sg->length, direction);
+ }
+
+ return nents;
+}
+
+/**
+ * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Unmap a set of streaming mode DMA translations.
+ * Again, CPU read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+
+}
+
+/**
+ * dma_sync_single_for_cpu
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Make physical memory consistent for a single streaming mode DMA
+ * translation after a transfer.
+ *
+ * If you perform a dma_map_single() but wish to interrogate the
+ * buffer using the cpu, yet do not wish to teardown the DMA mapping,
+ * you must call this function before doing so. At the next point you
+ * give the DMA address back to the card, you must first perform a
+ * dma_sync_single_for_device, and then the device again owns the
+ * buffer.
+ */
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything, that's all the pci API can do */
+ dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything, that's all the pci API can do */
+ dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
+}
+
+/**
+ * dma_sync_sg_for_cpu
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Make physical memory consistent for a set of streaming
+ * mode DMA translations after a transfer.
+ *
+ * The same as dma_sync_single_for_* but for a scatter-gather list,
+ * same rules and usage.
+ */
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for_each_sg(sgl, sg, nents, i)
+ dma_cache_sync(dev, page_address(sg_page(sg)) + sg->offset,
+ sg->length, direction);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for_each_sg(sgl, sg, nents, i)
+ dma_cache_sync(dev, page_address(sg_page(sg)) + sg->offset,
+ sg->length, direction);
+}
+
+/* Now for the API extensions over the pci_ one */
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
+{
+ return 1;
+}
+
+static inline int dma_get_cache_alignment(void)
+{
+ return cpuinfo->dcache_line;
+}
+
+#endif /* __ASM_AVR32_DMA_MAPPING_H */
--- /dev/null
+/*
+ * include/asm-microblaze/dma.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+/* we don't have dma address limit. define it as zero to be
+ * unlimited. */
+#define MAX_DMA_ADDRESS (0)
+
+#define ISA_DMA_THRESHOLD (0)
+
+
+#endif /* _ASM_DMA_H */
--- /dev/null
+/*
+ * include/asm-microblaze/elf.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_ELF_H
+#define _ASM_ELF_H
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+
+#endif /* _ASM_ELF_H */
--- /dev/null
+/*
+ * include/asm-microblaze/emergency-restart.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_EMERGENCY_RESTART_H
+#define _ASM_EMERGENCY_RESTART_H
+
+#include <asm-generic/emergency-restart.h>
+
+#endif /* _ASM_EMERGENCY_RESTART_H */
--- /dev/null
+/*
+ * include/asm-microblaze/entry.h -- Definitions used by low-level
+ * trap handlers
+ *
+ * Copyright (C) 2007 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ */
+
+#ifndef __MICROBLAZE_ENTRY_H__
+#define __MICROBLAZE_ENTRY_H__
+
+
+#include <asm/percpu.h>
+#include <asm/ptrace.h>
+
+/* These are per-cpu variables required in entry.S, among other
+ places */
+
+DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
+DECLARE_PER_CPU(unsigned int, KM); /* Kernel/user mode */
+DECLARE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */
+DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
+DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
+DECLARE_PER_CPU(unsigned int, SYSCALL_SAVE); /* Saved syscall number */
+
+#endif /* __MICROBLAZE_ENTRY_H__ */
+
--- /dev/null
+/*
+ * include/asm-microblaze/errno.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_ERRNO_H
+#define _ASM_ERRNO_H
+
+#include <asm-generic/errno.h>
+
+#endif /* _ASM_ERRNO_H */
--- /dev/null
+/*
+ * Preliminary support for HW exception handing for Microblaze
+ *
+ * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ */
+#ifndef _ASM_EXCEPTIONS_H
+#define _ASM_EXCEPTIONS_H
+
+#include <asm/xparameters.h>
+
+/* Some helper macros to let the exception handler code cleaner */
+
+/* Are there *any* HW exceptions enabled? */
+#define MICROBLAZE_EXCEPTIONS_ENABLED \
+ (XPAR_MICROBLAZE_0_UNALIGNED_EXCEPTIONS || \
+ XPAR_MICROBLAZE_0_ILL_OPCODE_EXCEPTION || \
+ XPAR_MICROBLAZE_0_IOPB_BUS_EXCEPTION || \
+ XPAR_MICROBLAZE_0_DOPB_BUS_EXCEPTION || \
+ XPAR_MICROBLAZE_0_DIV_ZERO_EXCEPTION || \
+ XPAR_MICROBLAZE_0_FPU_EXCEPTION)
+
+/* Are there any HW exceptions *other than* the unaligned exception? */
+#define OTHER_EXCEPTIONS_ENABLED \
+ (XPAR_MICROBLAZE_0_ILL_OPCODE_EXCEPTION || \
+ XPAR_MICROBLAZE_0_IOPB_BUS_EXCEPTION || \
+ XPAR_MICROBLAZE_0_DOPB_BUS_EXCEPTION || \
+ XPAR_MICROBLAZE_0_DIV_ZERO_EXCEPTION || \
+ XPAR_MICROBLAZE_0_FPU_EXCEPTION)
+
+#ifndef __ASSEMBLY__
+
+extern void initialize_exception_handlers(void);
+
+/* Macros to enable and disable HW exceptions in the MSR */
+/* Define MSR enable bit for HW exceptions */
+#define HWEX_MSR_BIT (1 << 8)
+
+#if MICROBLAZE_EXCEPTIONS_ENABLED
+
+#if XPAR_MICROBLAZE_0_USE_MSR_INSTR
+#define __enable_hw_exceptions() \
+ __asm__ __volatile__ (" msrset r0, %0; \
+ nop;" \
+ : \
+ : "i" (HWEX_MSR_BIT) \
+ : "memory")
+
+#define __disable_hw_exceptions() \
+ __asm__ __volatile__ (" msrclr r0, %0; \
+ nop;" \
+ : \
+ : "i" (HWEX_MSR_BIT) \
+ : "memory")
+
+
+#else /* !XPAR_MICROBLAZE_0_USE_MSR_INSTR */
+#define __enable_hw_exceptions() \
+ __asm__ __volatile__ (" \
+ mfs r12, rmsr; \
+ ori r12, r12, %0; \
+ mts rmsr, r12; \
+ nop;" \
+ : \
+ : "i" (HWEX_MSR_BIT) \
+ : "memory", "r12")
+
+#define __disable_hw_exceptions() \
+ __asm__ __volatile__ (" \
+ mfs r12, rmsr; \
+ andi r12, r12, ~%0; \
+ mts rmsr, r12; \
+ nop;" \
+ : \
+ : "i" (HWEX_MSR_BIT) \
+ : "memory", "r12")
+
+
+#endif /* XPAR_MICROBLAZE_0_USE_MSR_INSTR */
+
+#else /* MICROBLAZE_EXCEPTIONS_ENABLED */
+#define __enable_hw_exceptions()
+#define __disable_hw_exceptions()
+#endif /* MICROBLAZE_EXCEPTIONS_ENABLED */
+
+#endif /*__ASSEMBLY__ */
+
+
+#endif
--- /dev/null
+#ifndef _MICROBLAZE_FCNTL_H
+#define _MICROBLAZE_FCNTL_H
+
+#define O_DIRECTORY 040000 /* must be a directory */
+#define O_NOFOLLOW 0100000 /* don't follow links */
+#define O_LARGEFILE 0200000
+#define O_DIRECT 0400000 /* direct disk access hint */
+
+#include <asm-generic/fcntl.h>
+
+#endif /* _MICROBLAZE_FCNTL_H */
--- /dev/null
+/*
+ * include/asm-microblaze/flat.h -- uClinux flat-format executables
+ */
+
+#ifndef __MICROBLAZE_FLAT_H__
+#define __MICROBLAZE_FLAT_H__
+
+#include <asm/unaligned.h>
+
+#define flat_stack_align(sp) /* nothing needed */
+#define flat_argvp_envp_on_stack() 0
+#define flat_old_ram_flag(flags) (flags)
+#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
+
+/*
+ * Microblaze works a little differently from other arches, because
+ * of the MICROBLAZE_64 reloc type. Here, a 32 bit address is split
+ * over two instructions, an 'imm' instruction which provides the top
+ * 16 bits, then the instruction "proper" which provides the low 16
+ * bits.
+ */
+
+/*
+ * Crack open a symbol reference and extract the address to be
+ * relocated. rp is a potentially unaligned pointer to the
+ * reference
+ */
+
+static inline unsigned long
+flat_get_addr_from_rp(unsigned long *rp, unsigned long relval, unsigned long flags, unsigned long *p)
+{
+ unsigned long addr;
+ (void)flags;
+
+ /* Is it a split 64/32 reference? */
+ if(relval & 0x80000000)
+ {
+ /* Grab the two halves of the reference */
+ unsigned long val_hi, val_lo;
+
+ val_hi = get_unaligned(rp);
+ val_lo = get_unaligned(rp+1);
+
+ /* Crack the address out */
+ addr = ((val_hi & 0xffff) << 16) + (val_lo & 0xffff);
+ }
+ else
+ {
+ /* Get the address straight out */
+ addr = get_unaligned(rp);
+ }
+
+ return addr;
+}
+
+/*
+ * Insert an address into the symbol reference at rp. rp is potentially
+ * unaligned.
+ */
+
+static inline void
+flat_put_addr_at_rp(unsigned long *rp, unsigned long addr, unsigned long relval)
+{
+ /* Is this a split 64/32 reloc? */
+ if(relval & 0x80000000)
+ {
+ /* Get the two "halves" */
+ unsigned long val_hi = get_unaligned(rp);
+ unsigned long val_lo = get_unaligned(rp + 1);
+
+ /* insert the address */
+ val_hi = (val_hi & 0xffff0000) | addr >> 16;
+ val_lo = (val_lo & 0xffff0000) | (addr & 0xffff);
+
+ /* store the two halves back into memory */
+ put_unaligned(val_hi, rp);
+ put_unaligned(val_lo, rp+1);
+ }
+ else
+ {
+ /* Put it straight in, no messing around */
+ put_unaligned(addr, rp);
+ }
+}
+
+#define flat_get_relocate_addr(rel) (rel & 0x7fffffff)
+#define flat_set_persistent(relval, p) 0
+
+#endif /* __MICROBLAZE_FLAT_H__ */
--- /dev/null
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <asm-generic/futex.h>
+
+#endif
+
--- /dev/null
+/*
+ * include/asm-microblaze/hardirq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_HARDIRQ_H
+#define _ASM_HARDIRQ_H
+
+#include <linux/cache.h>
+#include <linux/irq.h>
+#include <asm/irq.h>
+#include <asm/current.h>
+#include <asm/ptrace.h>
+
+/* should be defined in each interrupt controller driver */
+extern unsigned int get_irq(struct pt_regs *regs);
+
+typedef struct {
+ unsigned int __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+void ack_bad_irq(unsigned int irq);
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+
+#endif /* _ASM_HARDIRQ_H */
--- /dev/null
+/*
+ * include/asm-microblaze/hw_irq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_HW_IRQ_H
+#define _ASM_HW_IRQ_H
+
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
+{
+ /* Nothing to do */
+}
+
+#endif /* _ASM_HW_IRQ_H */
--- /dev/null
+/*
+ * include/asm-microblaze/io.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <asm/byteorder.h>
+#include <asm/page.h>
+
+static inline unsigned char __raw_readb(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned char __force *)addr;
+}
+static inline unsigned short __raw_readw(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned short __force *)addr;
+}
+static inline unsigned int __raw_readl(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned int __force *)addr;
+}
+static inline unsigned long __raw_readq(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned long __force *)addr;
+}
+static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
+{
+ *(volatile unsigned char __force *)addr = v;
+}
+static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
+{
+ *(volatile unsigned short __force *)addr = v;
+}
+static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
+{
+ *(volatile unsigned int __force *)addr = v;
+}
+static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
+{
+ *(volatile unsigned long __force *)addr = v;
+}
+
+/*
+ * read (readb, readw, readl, readq) and write (writeb, writew,
+ * writel, writeq) accessors are for PCI and thus littel endian.
+ * Linux 2.4 for Microblaze had this wrong.
+ */
+static inline unsigned char readb(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned char __force *)addr;
+}
+static inline unsigned short readw(const volatile void __iomem *addr)
+{
+ return le16_to_cpu(*(volatile unsigned short __force *)addr);
+}
+static inline unsigned int readl(const volatile void __iomem *addr)
+{
+ return le32_to_cpu(*(volatile unsigned int __force *)addr);
+}
+static inline void writeb(unsigned char v, volatile void __iomem *addr)
+{
+ *(volatile unsigned char __force *)addr = v;
+}
+static inline void writew(unsigned short v, volatile void __iomem *addr)
+{
+ *(volatile unsigned short __force *)addr = cpu_to_le16(v);
+}
+static inline void writel(unsigned int v, volatile void __iomem *addr)
+{
+ *(volatile unsigned int __force *)addr = cpu_to_le32(v);
+}
+
+/* ioread and iowrite variants. thease are for now same as __raw_
+ * variants of accessors. we might check for endianess in the feature
+ */
+#define ioread8(addr) __raw_readb((u8 *)(addr))
+#define ioread16(addr) __raw_readw((u16 *)(addr))
+#define ioread32(addr) __raw_readl((u32 *)(addr))
+#define iowrite8(v, addr) __raw_writeb((u8)(v), (u8 *)(addr))
+#define iowrite16(v, addr) __raw_writew((u16)(v), (u16 *)(addr))
+#define iowrite32(v, addr) __raw_writel((u32)(v), (u32 *)(addr))
+
+/* These are the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl, the "string" versions
+ * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
+ * inb_p/inw_p/...
+ * The macros don't do byte-swapping.
+ */
+#define inb(port) readb((u8 *)((port)))
+#define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)))
+#define inw(port) readw((u16 *)((port)))
+#define outw(val, port) writew((val),(u16 *)((unsigned long)(port)))
+#define inl(port) readl((u32 *)((port)))
+#define outl(val, port) writel((val),(u32 *)((unsigned long)(port)))
+
+#define inb_p(port) inb((port))
+#define outb_p(val, port) outb((val), (port))
+#define inw_p(port) inw((port))
+#define outw_p(val, port) outw((val), (port))
+#define inl_p(port) inl((port))
+#define outl_p(val, port) outl((val), (port))
+
+#define memset_io(a,b,c) memset((void *)(a),(b),(c))
+#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
+#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+
+/**
+ * virt_to_phys - map virtual addresses to physical
+ * @address: address to remap
+ *
+ * The returned physical address is the physical (CPU) mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses directly mapped or allocated via kmalloc.
+ *
+ * This function does not give bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+ return __pa((unsigned long)address);
+}
+
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+#define page_to_bus page_to_phys
+#define bus_to_page phys_to_page
+
+/**
+ * phys_to_virt - map physical address to virtual
+ * @address: address to remap
+ *
+ * The returned virtual address is a current CPU mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses that have a kernel mapping
+ *
+ * This function does not handle bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+static inline void * phys_to_virt(unsigned long address)
+{
+ return (void *)__va(address);
+}
+
+
+static inline void *__ioremap(unsigned long address, unsigned long size,
+ unsigned long flags)
+{
+ return (void *)address;
+}
+
+
+#define IO_SPACE_LIMIT ~(0UL)
+
+#define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr))
+#define iounmap(addr) ((void)0)
+#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p) p
+
+/*
+ * The PPC out_be/in_be macros
+ */
+
+/*
+ * Big Endian
+ */
+#define out_be32(a,v) __raw_writel((v),(a))
+#define out_be16(a,v) __raw_writew((v),(a))
+
+#define in_be32(a) __raw_readl(a)
+#define in_be16(a) __raw_readw(a)
+
+/*
+ * Little endian
+ */
+
+#define out_le32(a,v) __raw_writel(__cpu_to_le32(v),(a));
+#define out_le16(a,v) __raw_writew(__cpu_to_le16(v),(a))
+
+#define in_le32(a) __le32_to_cpu(__raw_readl(a))
+#define in_le16(a) __le16_to_cpu(__raw_readw(a))
+
+/* Byte ops */
+#define out_8(a,v) __raw_writeb((v),(a))
+#define in_8(a) __raw_readb(a)
+
+#endif /* _ASM_IO_H */
--- /dev/null
+#ifndef _MICROBLAZE_IOCTL_H
+#define _MICROBLAZE_IOCTL_H
+
+
+/*
+ * this was copied from the alpha as it's a bit cleaner there.
+ * -- Cort
+ */
+
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 13
+#define _IOC_DIRBITS 3
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit.
+ * And this turns out useful to catch old ioctl numbers in header
+ * files for us.
+ */
+#define _IOC_NONE 1U
+#define _IOC_READ 2U
+#define _IOC_WRITE 4U
+
+#define _IOC(dir,type,nr,size) \
+ (((dir) << _IOC_DIRSHIFT) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+/* provoke compile error for invalid uses of size argument */
+extern unsigned int __invalid_size_argument_for_IOC;
+#define _IOC_TYPECHECK(t) \
+ ((sizeof(t) == sizeof(t[1]) && \
+ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
+ sizeof(t) : __invalid_size_argument_for_IOC)
+
+/* used to create numbers */
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
+#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
+#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
+#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode them.. */
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* various drivers, such as the pcmcia stuff, need these... */
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+
+#endif /* _MICROBLAZE_IOCTL_H */
--- /dev/null
+/*
+ * include/asm-microblaze/ioctls.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_IOCTLS_H
+#define _ASM_IOCTLS_H
+
+#include <asm/ioctl.h>
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS 0x5401
+#define TCSETS 0x5402
+#define TCSETSW 0x5403
+#define TCSETSF 0x5404
+#define TCGETA 0x5405
+#define TCSETA 0x5406
+#define TCSETAW 0x5407
+#define TCSETAF 0x5408
+#define TCSBRK 0x5409
+#define TCXONC 0x540A
+#define TCFLSH 0x540B
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+#define TIOCGPGRP 0x540F
+#define TIOCSPGRP 0x5410
+#define TIOCOUTQ 0x5411
+#define TIOCSTI 0x5412
+#define TIOCGWINSZ 0x5413
+#define TIOCSWINSZ 0x5414
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define FIONREAD 0x541B
+#define TIOCINQ FIONREAD
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+#define FIONBIO 0x5421
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
+#define TIOCSBRK 0x5427 /* BSD compatibility */
+#define TIOCCBRK 0x5428 /* BSD compatibility */
+#define TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+
+#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
+#define FIOCLEX 0x5451
+#define FIOASYNC 0x5452
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+#define FIOQSIZE 0x545E
+
+/* Used for packet mode */
+#define TIOCPKT_DATA 0
+#define TIOCPKT_FLUSHREAD 1
+#define TIOCPKT_FLUSHWRITE 2
+#define TIOCPKT_STOP 4
+#define TIOCPKT_START 8
+#define TIOCPKT_NOSTOP 16
+#define TIOCPKT_DOSTOP 32
+
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+#endif /* _ASM_IOCTLS_H */
--- /dev/null
+/*
+ * include/asm-microblaze/ipcbuf.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_IPCBUF_H
+#define _ASM_IPCBUF_H
+
+/*
+ * The user_ipc_perm structure for m68k architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 32-bit mode_t and seq
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
+ __kernel_mode_t mode;
+ unsigned short __pad1;
+ unsigned short seq;
+ unsigned short __pad2;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+
+#endif /* _ASM_IPCBUF_H */
--- /dev/null
+/*
+ * include/asm-microblaze/irq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+struct device_node;
+
+/* FIXME */
+#define NR_IRQS 32
+
+#define NO_IRQ NR_IRQS
+
+#ifdef CONFIG_OF
+/* irq_of_parse_and_map - Parse and Map an interrupt into linux virq space
+ * @device: Device node of the device whose interrupt is to be mapped
+ * @index: Index of the interrupt to map
+ *
+ * This function is a wrapper that chains of_irq_map_one() and
+ * irq_create_of_mapping() to make things easier to callers
+ */
+extern unsigned int irq_of_parse_and_map(struct device_node *dev, int index);
+#endif
+
+static inline int irq_canonicalize(int irq)
+{
+ return (irq);
+}
+
+struct pt_regs;
+extern void do_IRQ(struct pt_regs *regs);
+
+#endif /* _ASM_IRQ_H */
--- /dev/null
+#include <asm-generic/irq_regs.h>
--- /dev/null
+#include <asm-generic/kdebug.h>
--- /dev/null
+/*
+ * include/asm-microblaze/kmap_types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+enum km_type {
+ KM_BOUNCE_READ,
+ KM_SKB_SUNRPC_DATA,
+ KM_SKB_DATA_SOFTIRQ,
+ KM_USER0,
+ KM_USER1,
+ KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
+ KM_PTE0,
+ KM_PTE1,
+ KM_IRQ0,
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+ KM_TYPE_NR,
+};
+
+#endif /* _ASM_KMAP_TYPES_H */
--- /dev/null
+/*
+ * include/asm-microblaze/linkage.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_LINKAGE_H
+#define _ASM_LINKAGE_H
+
+#define __ALIGN .align 4
+#define __ALIGN_STR ".align 4"
+
+#endif /* _ASM_LINKAGE_H */
--- /dev/null
+#ifndef _PPC64_LMB_H
+#define _PPC64_LMB_H
+#ifdef __KERNEL__
+
+/*
+ * Definitions for talking to the Open Firmware PROM on
+ * Power Macintosh computers.
+ *
+ * Copyright (C) 2001 Peter Bergner, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <asm/prom.h>
+
+#define MAX_LMB_REGIONS 128
+
+struct lmb_property {
+ unsigned long base;
+ unsigned long size;
+};
+
+struct lmb_region {
+ unsigned long cnt;
+ unsigned long size;
+ struct lmb_property region[MAX_LMB_REGIONS+1];
+};
+
+struct lmb {
+ unsigned long debug;
+ unsigned long rmo_size;
+ struct lmb_region memory;
+ struct lmb_region reserved;
+};
+
+extern struct lmb lmb;
+
+extern void __init lmb_init(void);
+extern void __init lmb_analyze(void);
+extern long __init lmb_add(unsigned long base, unsigned long size);
+extern long __init lmb_reserve(unsigned long base, unsigned long size);
+extern unsigned long __init lmb_alloc(unsigned long size, unsigned long align);
+extern unsigned long __init lmb_alloc_base(unsigned long size,
+ unsigned long align, unsigned long max_addr);
+extern unsigned long __init __lmb_alloc_base(unsigned long size,
+ unsigned long align, unsigned long max_addr);
+extern unsigned long __init lmb_phys_mem_size(void);
+extern unsigned long __init lmb_end_of_DRAM(void);
+extern void __init lmb_enforce_memory_limit(unsigned long memory_limit);
+
+extern void lmb_dump_all(void);
+
+static inline unsigned long
+lmb_size_bytes(struct lmb_region *type, unsigned long region_nr)
+{
+ return type->region[region_nr].size;
+}
+static inline unsigned long
+lmb_size_pages(struct lmb_region *type, unsigned long region_nr)
+{
+ return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT;
+}
+static inline unsigned long
+lmb_start_pfn(struct lmb_region *type, unsigned long region_nr)
+{
+ return type->region[region_nr].base >> PAGE_SHIFT;
+}
+static inline unsigned long
+lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
+{
+ return lmb_start_pfn(type, region_nr) +
+ lmb_size_pages(type, region_nr);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _PPC64_LMB_H */
--- /dev/null
+#ifndef __MB_LOCAL_H
+#define __MB_LOCAL_H
+
+#include <asm-generic/local.h>
+
+#endif /* __MB_LOCAL_H */
--- /dev/null
+/*
+ * include/asm-microblaze/mman.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_MMAN_H
+#define _ASM_MMAN_H
+
+#include <asm-generic/mman.h>
+
+#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
+#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+#define MAP_LOCKED 0x2000 /* pages are locked */
+#define MAP_NORESERVE 0x4000 /* don't check for reservations */
+#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x10000 /* do not block on IO */
+
+#define MCL_CURRENT 1 /* lock all current mappings */
+#define MCL_FUTURE 2 /* lock all future mappings */
+
+#endif /* _ASM_MMAN_H */
--- /dev/null
+/*
+ * include/asm-microblaze/mmu.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_MMU_H
+#define _ASM_MMU_H
+
+typedef struct {
+ struct vm_list_struct *vmlist;
+ unsigned long end_brk;
+} mm_context_t;
+
+#endif /* _ASM_MMU_H */
--- /dev/null
+/*
+ * include/asm-microblaze/mmu_context.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_MMU_CONTEXT_H
+#define _ASM_MMU_CONTEXT_H
+
+#define init_new_context(tsk, mm) ({ 0; })
+
+#define enter_lazy_tlb(mm, tsk) do {} while(0)
+#define change_mm_context(old, ctx, _pml4) do {} while(0)
+#define destroy_context(mm) do {} while(0)
+#define deactivate_mm(tsk, mm) do {} while(0)
+#define switch_mm(prev, next, tsk) do {} while(0)
+#define activate_mm(prev, next) do {} while(0)
+
+
+#endif /* _ASM_MMU_CONTEXT_H */
--- /dev/null
+/*
+ * include/asm-microblaze/module.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_MODULE_H
+#define _ASM_MODULE_H
+
+typedef struct { volatile int counter; } module_t;
+
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Ehdr Elf32_Ehdr
+
+#endif /* _ASM_MODULE_H */
--- /dev/null
+/*
+ * include/asm-microblaze/msgbuf.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_MSGBUF_H
+#define _ASM_MSGBUF_H
+
+/*
+ * The msqid64_ds structure for m68k architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ unsigned long __unused1;
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ unsigned long __unused2;
+ __kernel_time_t msg_ctime; /* last change time */
+ unsigned long __unused3;
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* _ASM_MSGBUF_H */
--- /dev/null
+/*
+ * include/asm-microblaze/mutex.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+#include <asm-generic/mutex-dec.h>
+
+#endif /* _ASM_MUTEX_H */
--- /dev/null
+/*
+ * include/asm-microblaze/namei.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_NAMEI_H
+#define _ASM_NAMEI_H
+
+#ifdef __KERNEL__
+
+/* This dummy routine maybe changed to something useful
+ * for /usr/gnemul/ emulation stuff.
+ * Look at asm-sparc/namei.h for details.
+ */
+#define __emul_prefix() NULL
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_NAMEI_H */
--- /dev/null
+#ifndef _ASM_MICROBLAZE_OF_DEVICE_H
+#define _ASM_MICROBLAZE_OF_DEVICE_H
+#ifdef __KERNEL__
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+
+/*
+ * The of_device is a kind of "base class" that is a superset of
+ * struct device for use by devices attached to an OF node and
+ * probed using OF properties.
+ */
+struct of_device
+{
+ struct device_node *node; /* to be obsoleted */
+ u64 dma_mask; /* DMA mask */
+ struct device dev; /* Generic device interface */
+};
+
+extern ssize_t of_device_get_modalias(struct of_device *ofdev,
+ char *str, ssize_t len);
+extern int of_device_uevent(struct device *dev,
+ char **envp, int num_envp, char *buffer, int buffer_size);
+
+/* This is just here during the transition */
+#include <linux/of_device.h>
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_MICROBLAZE_OF_DEVICE_H */
--- /dev/null
+#ifndef _ASM_MICROBLAZE_OF_PLATFORM_H
+#define _ASM_MICROBLAZE_OF_PLATFORM_H
+/*
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/* This is just here during the transition */
+#include <linux/of_platform.h>
+
+/* Platform drivers register/unregister */
+extern int of_register_platform_driver(struct of_platform_driver *drv);
+extern void of_unregister_platform_driver(struct of_platform_driver *drv);
+
+/* Platform devices and busses creation */
+extern struct of_device *of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent);
+/* pseudo "matches" value to not do deep probe */
+#define OF_NO_DEEP_PROBE ((struct of_device_id *)-1)
+
+extern int of_platform_bus_probe(struct device_node *root,
+ struct of_device_id *matches,
+ struct device *parent);
+
+extern struct of_device *of_find_device_by_phandle(phandle ph);
+
+#endif /* _ASM_MICROBLAZE_OF_PLATFORM_H */
--- /dev/null
+/*
+ * include/asm-microblaze/page.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_PAGE_H
+#define _ASM_PAGE_H
+
+#include <linux/autoconf.h>
+#include <linux/pfn.h>
+
+/* PAGE_SHIFT determines the page size */
+
+#define PAGE_SHIFT (12)
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifdef __KERNEL__
+
+#include <asm/setup.h>
+#include <asm/xparameters.h>
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
+#define copy_user_page(vto, vfrom, vaddr, topg) memcpy((vto), (vfrom), PAGE_SIZE)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long ste[64];} pmd_t;
+typedef struct { pmd_t pue[1]; } pud_t;
+typedef struct { pud_t pge[1]; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).ste[0])
+#define pud_val(x) ((x).pue[0])
+#define pgd_val(x) ((x).pge[0])
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+/* align addr on a size boundary - adjust address up/down if needed */
+#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
+#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
+
+/* align addr on a size boundary - adjust address up if needed */
+#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
+
+#define PAGE_OFFSET (XPAR_ERAM_START)
+
+/**
+ * Conversions for virtual address, physical address, pfn, and struct
+ * page are defined in the following files.
+ *
+ * virt -+
+ * | asm-microblaze/page.h
+ * phys -+
+ * | linux/pfn.h
+ * pfn -+
+ * | asm-generic/memory_model.h
+ * page -+
+ *
+ */
+
+#define __pa(vaddr) ((unsigned long) (vaddr))
+#define __va(paddr) ((void *) (paddr))
+
+#define phys_to_pfn(phys) (PFN_DOWN(phys))
+#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
+
+#define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
+#define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
+
+#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
+#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
+
+#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
+#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
+
+// wgr check this...
+//#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) <= max_mapnr)
+#define pfn_valid(pfn) ((pfn) >= PFN_UP(XPAR_ERAM_START) && \
+ (pfn) <= PFN_DOWN((XPAR_ERAM_START+XPAR_ERAM_SIZE-1)))
+#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
+
+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/page.h>
+
+#endif /* _ASM_PAGE_H */
--- /dev/null
+/*
+ * include/asm-microblaze/param.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_PARAM_H
+#define _ASM_PARAM_H
+
+#ifdef __KERNEL__
+# define HZ 100 /* internal timer frequency */
+# define USER_HZ 100 /* for user interfaces in "ticks" */
+# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
+#endif
+
+#ifndef NGROUPS
+#define NGROUPS 32
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif /* _ASM_PARAM_H */
--- /dev/null
+#ifdef __KERNEL__
+#ifndef _ASM_PCI_BRIDGE_H
+#define _ASM_PCI_BRIDGE_H
+
+#include <linux/ioport.h>
+#include <linux/pci.h>
+
+struct device_node;
+struct pci_controller;
+
+/*
+ * pci_io_base returns the memory address at which you can access
+ * the I/O space for PCI bus number `bus' (or NULL on error).
+ */
+extern void __iomem *pci_bus_io_base(unsigned int bus);
+extern unsigned long pci_bus_io_base_phys(unsigned int bus);
+extern unsigned long pci_bus_mem_base_phys(unsigned int bus);
+
+/* Allocate a new PCI host bridge structure */
+extern struct pci_controller* pcibios_alloc_controller(void);
+
+/* Helper function for setting up resources */
+extern void pci_init_resource(struct resource *res, unsigned long start,
+ unsigned long end, int flags, char *name);
+
+/* Get the PCI host controller for a bus */
+extern struct pci_controller* pci_bus_to_hose(int bus);
+
+/* Get the PCI host controller for an OF device */
+extern struct pci_controller*
+pci_find_hose_for_OF_device(struct device_node* node);
+
+/* Fill up host controller resources from the OF node */
+extern void
+pci_process_bridge_OF_ranges(struct pci_controller *hose,
+ struct device_node *dev, int primary);
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+struct pci_controller {
+ int index; /* PCI domain number */
+ struct pci_controller *next;
+ struct pci_bus *bus;
+ void *arch_data;
+
+ int first_busno;
+ int last_busno;
+ int bus_offset;
+
+ void __iomem *io_base_virt;
+ unsigned long io_base_phys;
+
+ /* Some machines (PReP) have a non 1:1 mapping of
+ * the PCI memory space in the CPU bus space
+ */
+ unsigned long pci_mem_offset;
+
+ struct pci_ops *ops;
+ volatile unsigned int __iomem *cfg_addr;
+ volatile void __iomem *cfg_data;
+ /*
+ * If set, indirect method will set the cfg_type bit as
+ * needed to generate type 1 configuration transactions.
+ */
+ int set_cfg_type;
+
+ /* Currently, we limit ourselves to 1 IO range and 3 mem
+ * ranges since the common pci_bus structure can't handle more
+ */
+ struct resource io_resource;
+ struct resource mem_resources[3];
+ int mem_resource_count;
+
+ /* Host bridge I/O and Memory space
+ * Used for BAR placement algorithms
+ */
+ struct resource io_space;
+ struct resource mem_space;
+};
+
+/* These are used for config access before all the PCI probing
+ has been done. */
+int early_read_config_byte(struct pci_controller *hose, int bus, int dev_fn,
+ int where, u8 *val);
+int early_read_config_word(struct pci_controller *hose, int bus, int dev_fn,
+ int where, u16 *val);
+int early_read_config_dword(struct pci_controller *hose, int bus, int dev_fn,
+ int where, u32 *val);
+int early_write_config_byte(struct pci_controller *hose, int bus, int dev_fn,
+ int where, u8 val);
+int early_write_config_word(struct pci_controller *hose, int bus, int dev_fn,
+ int where, u16 val);
+int early_write_config_dword(struct pci_controller *hose, int bus, int dev_fn,
+ int where, u32 val);
+
+extern void setup_indirect_pci_nomap(struct pci_controller* hose,
+ void __iomem *cfg_addr, void __iomem *cfg_data);
+extern void setup_indirect_pci(struct pci_controller* hose,
+ u32 cfg_addr, u32 cfg_data);
+extern void setup_grackle(struct pci_controller *hose);
+
+extern unsigned char common_swizzle(struct pci_dev *, unsigned char *);
+
+/*
+ * The following code swizzles for exactly one bridge. The routine
+ * common_swizzle below handles multiple bridges. But there are a
+ * some boards that don't follow the PCI spec's suggestion so we
+ * break this piece out separately.
+ */
+static inline unsigned char bridge_swizzle(unsigned char pin,
+ unsigned char idsel)
+{
+ return (((pin-1) + idsel) % 4) + 1;
+}
+
+/*
+ * The following macro is used to lookup irqs in a standard table
+ * format for those PPC systems that do not already have PCI
+ * interrupts properly routed.
+ */
+/* FIXME - double check this */
+#define PCI_IRQ_TABLE_LOOKUP \
+({ long _ctl_ = -1; \
+ if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \
+ _ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \
+ _ctl_; })
+
+/*
+ * Scan the buses below a given PCI host bridge and assign suitable
+ * resources to all devices found.
+ */
+extern int pciauto_bus_scan(struct pci_controller *, int);
+
+#endif
+#endif /* __KERNEL__ */
--- /dev/null
+#ifndef __MICROBLAZE_PCI_H
+#define __MICROBLAZE_PCI_H
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <asm/io.h>
+#include <asm/pci-bridge.h>
+#include <asm-generic/pci-dma-compat.h>
+
+struct pci_dev;
+
+/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
+#define IOBASE_BRIDGE_NUMBER 0
+#define IOBASE_MEMORY 1
+#define IOBASE_IO 2
+#define IOBASE_ISA_IO 3
+#define IOBASE_ISA_MEM 4
+
+/*
+ * Set this to 1 if you want the kernel to re-assign all PCI
+ * bus numbers
+ */
+extern int pci_assign_all_busses;
+
+#define pcibios_assign_all_busses() (pci_assign_all_busses)
+#define pcibios_scan_all_fns(a, b) 0
+
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+extern inline void pcibios_set_master(struct pci_dev *dev)
+{
+ /* No special bus mastering setup handling */
+}
+
+extern inline void pcibios_penalize_isa_irq(int irq, int active)
+{
+ /* We don't do dynamic PCI IRQ allocation */
+}
+
+extern unsigned long pci_resource_to_bus(struct pci_dev *pdev, struct resource *res);
+
+/*
+ * The PCI bus bridge can translate addresses issued by the processor(s)
+ * into a different address on the PCI bus. On 32-bit cpus, we assume
+ * this mapping is 1-1, but on 64-bit systems it often isn't.
+ *
+ * Obsolete ! Drivers should now use pci_resource_to_bus
+ */
+extern unsigned long phys_to_bus(unsigned long pa);
+extern unsigned long pci_phys_to_bus(unsigned long pa, int busnr);
+extern unsigned long pci_bus_to_phys(unsigned int ba, int busnr);
+
+/* The PCI address space does equal the physical memory
+ * address space. The networking and block device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS (1)
+
+/* pci_unmap_{page,single} is a nop so... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME) (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+
+#ifdef CONFIG_PCI
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ enum pci_dma_burst_strategy *strat,
+ unsigned long *strategy_parameter)
+{
+ *strat = PCI_DMA_BURST_INFINITY;
+ *strategy_parameter = ~0UL;
+}
+#endif
+
+/*
+ * At present there are very few 32-bit PPC machines that can have
+ * memory above the 4GB point, and we don't support that.
+ */
+#define pci_dac_dma_supported(pci_dev, mask) (0)
+
+/* Return the index of the PCI controller for device PDEV. */
+#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
+
+/* Set the name of the bus as it appears in /proc/bus/pci */
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return 0;
+}
+
+/* Map a range of PCI memory or I/O space for a device into user space */
+int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
+/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
+#define HAVE_PCI_MMAP 1
+
+extern void
+pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+ struct resource *res);
+
+extern void
+pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+ struct pci_bus_region *region);
+
+extern int pcibios_add_platform_entries(struct pci_dev *dev);
+
+struct file;
+extern pgprot_t pci_phys_mem_access_prot(struct file *file,
+ unsigned long offset,
+ unsigned long size,
+ pgprot_t prot);
+
+#define HAVE_ARCH_PCI_RESOURCE_TO_USER
+extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ const struct resource *rsrc,
+ u64 *start, u64 *end);
+
+
+#endif /* __KERNEL__ */
+
+#endif /* __MICROBLAZE_PCI_H */
--- /dev/null
+/*
+ * include/asm-microblaze/percpu.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_PERCPU_H
+#define _ASM_PERCPU_H
+
+#include <asm-generic/percpu.h>
+
+#endif /* _ASM_PERCPU_H */
--- /dev/null
+/*
+ * include/asm-microblaze/pgalloc.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_PGALLOC_H
+#define _ASM_PGALLOC_H
+
+#define check_pgt_cache() do {} while(0)
+
+#endif /* _ASM_PGALLOC_H */
--- /dev/null
+/*
+ * include/asm-microblaze/pgtable.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+#ifndef _ASM_PGTABLE_H
+#define _ASM_PGTABLE_H
+
+#define pgd_present(pgd) (1) /* pages are always present on NO_MM */
+#define pgd_none(pgd) (0)
+#define pgd_bad(pgd) (0)
+#define pgd_clear(pgdp)
+#define kern_addr_valid(addr) (1)
+#define pmd_offset(a, b) ((void *) 0)
+
+#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
+#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
+#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */
+#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */
+#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */
+
+#define __swp_type(x) (0)
+#define __swp_offset(x) (0)
+#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#ifndef __ASSEMBLY__
+static inline int pte_file(pte_t pte) { return 0; }
+#endif
+
+#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
+
+#define swapper_pg_dir ((pgd_t *) NULL)
+
+#define pgtable_cache_init() do {} while(0)
+
+#define arch_enter_lazy_cpu_mode() do {} while (0)
+#define arch_leave_lazy_cpu_mode() do {} while (0)
+#define arch_flush_lazy_cpu_mode() do {} while (0)
+
+#endif /* _ASM_PGTABLE_H */
--- /dev/null
+/*
+ * include/asm-microblaze/poll.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_POLL_H
+#define _ASM_POLL_H
+
+#define POLLIN 0x0001
+#define POLLPRI 0x0002
+#define POLLOUT 0x0004
+
+#define POLLERR 0x0008
+#define POLLHUP 0x0010
+#define POLLNVAL 0x0020
+
+#define POLLRDNORM 0x0040
+#define POLLRDBAND 0x0080
+#define POLLWRNORM POLLOUT
+#define POLLWRBAND 0x0100
+
+#define POLLMSG 0x0400
+#define POLLREMOVE 0x0800
+#define POLLRDHUP 0x2000
+
+struct pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+#endif /* _ASM_POLL_H */
--- /dev/null
+/*
+ * include/asm-microblaze/param.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef __MICROBLAZE_POSIX_TYPES_H__
+#define __MICROBLAZE_POSIX_TYPES_H__
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned long __kernel_ino_t;
+typedef unsigned short __kernel_mode_t;
+typedef unsigned short __kernel_nlink_t;
+typedef long __kernel_off_t;
+typedef int __kernel_pid_t;
+typedef unsigned short __kernel_ipc_pid_t;
+typedef unsigned short __kernel_uid_t;
+typedef unsigned short __kernel_gid_t;
+typedef unsigned int __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef int __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_suseconds_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+typedef unsigned short __kernel_old_dev_t;
+
+#ifdef __GNUC__
+typedef long long __kernel_loff_t;
+#endif
+
+typedef struct {
+#if defined(__KERNEL__) || defined(__USE_ALL)
+ int val[2];
+#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+ int __val[2];
+#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+} __kernel_fsid_t;
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
+#undef __FD_SET
+#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
+
+#undef __FD_CLR
+#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
+
+#undef __FD_ISSET
+#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
+
+#undef __FD_ZERO
+#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
+
+#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
+
+#endif /* __MICROBLAZE_POSIX_TYPES_H__ */
--- /dev/null
+/*
+ * include/asm-microblaze/processor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_PROCESSOR_H
+#define _ASM_PROCESSOR_H
+
+#include <asm/ptrace.h>
+#include <asm/setup.h>
+
+/*
+ * User space process size: memory size
+ *
+ * TASK_SIZE on MMU cpu is usually 1GB. However, on no-MMU arch, both
+ * user processes and the kernel is on the same memory region. They
+ * both share the memory space and that is limited by the amount of
+ * physical memory. thus, we set TASK_SIZE == amount of total memory.
+ */
+
+#define TASK_SIZE (0x81000000 - 0x80000000)
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's. We won't be using it
+ */
+#define TASK_UNMAPPED_BASE 0
+
+struct task_struct;
+
+/* thread_struct is gone. use thread_info instead. */
+struct thread_struct { };
+#define INIT_THREAD { }
+
+/* Do necessary setup to start up a newly executed thread. */
+static inline void start_thread(struct pt_regs *regs,
+ unsigned long pc,
+ unsigned long usp)
+{
+ regs->pc = pc;
+ regs->r1 = usp;
+ regs->kernel_mode = 0;
+}
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+/* Free all resources held by a thread. */
+static inline void exit_thread(void)
+{
+}
+
+extern unsigned long thread_saved_pc(struct task_struct *t);
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+/* FIXME */
+#define cpu_relax() do {} while(0)
+#define prepare_to_copy(tsk) do {} while(0)
+
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+#define task_pt_regs(tsk) (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
+
+
+#define KSTK_EIP(tsk) (0)
+#define KSTK_ESP(tsk) (0)
+
+#endif /* _ASM_PROCESSOR_H */
--- /dev/null
+#ifndef _MICROBLAZE_PROM_H
+#define _MICROBLAZE_PROM_H
+#ifdef __KERNEL__
+
+/*
+ * Definitions for talking to the Open Firmware PROM on
+ * Power Macintosh computers.
+ *
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/platform_device.h>
+#include <asm/irq.h>
+#include <asm/atomic.h>
+
+#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
+#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
+
+#define of_compat_cmp(s1, s2, l) strncasecmp((s1), (s2), (l))
+#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
+#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
+
+/* Definitions used by the flattened device tree */
+#define OF_DT_HEADER 0xd00dfeed /* marker */
+#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
+#define OF_DT_END_NODE 0x2 /* End node */
+#define OF_DT_PROP 0x3 /* Property: name off, size,
+ * content */
+#define OF_DT_NOP 0x4 /* nop */
+#define OF_DT_END 0x9
+
+#define OF_DT_VERSION 0x10
+
+/*
+ * This is what gets passed to the kernel by prom_init or kexec
+ *
+ * The dt struct contains the device tree structure, full pathes and
+ * property contents. The dt strings contain a separate block with just
+ * the strings for the property names, and is fully page aligned and
+ * self contained in a page, so that it can be kept around by the kernel,
+ * each property name appears only once in this page (cheap compression)
+ *
+ * the mem_rsvmap contains a map of reserved ranges of physical memory,
+ * passing it here instead of in the device-tree itself greatly simplifies
+ * the job of everybody. It's just a list of u64 pairs (base/size) that
+ * ends when size is 0
+ */
+struct boot_param_header
+{
+ u32 magic; /* magic word OF_DT_HEADER */
+ u32 totalsize; /* total size of DT block */
+ u32 off_dt_struct; /* offset to structure */
+ u32 off_dt_strings; /* offset to strings */
+ u32 off_mem_rsvmap; /* offset to memory reserve map */
+ u32 version; /* format version */
+ u32 last_comp_version; /* last compatible version */
+ /* version 2 fields below */
+ u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
+ /* version 3 fields below */
+ u32 dt_strings_size; /* size of the DT strings block */
+ /* version 17 fields below */
+ u32 dt_struct_size; /* size of the DT structure block */
+};
+
+
+
+typedef u32 phandle;
+typedef u32 ihandle;
+
+struct property {
+ char *name;
+ int length;
+ void *value;
+ struct property *next;
+};
+
+struct device_node {
+ const char *name;
+ const char *type;
+ phandle node;
+ phandle linux_phandle;
+ char *full_name;
+
+ struct property *properties;
+ struct property *deadprops; /* removed properties */
+ struct device_node *parent;
+ struct device_node *child;
+ struct device_node *sibling;
+ struct device_node *next; /* next device of same type */
+ struct device_node *allnext; /* next in list of all nodes */
+ struct proc_dir_entry *pde; /* this node's proc directory */
+ struct kref kref;
+ unsigned long _flags;
+ void *data;
+};
+
+extern struct device_node *of_chosen;
+
+static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+{
+ return test_bit(flag, &n->_flags);
+}
+
+static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
+{
+ set_bit(flag, &n->_flags);
+}
+
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
+{
+ dn->pde = de;
+}
+
+
+extern struct device_node *of_find_all_nodes(struct device_node *prev);
+extern struct device_node *of_node_get(struct device_node *node);
+extern void of_node_put(struct device_node *node);
+
+/* For scanning the flat device-tree at boot time */
+extern int __init of_scan_flat_dt(int (*it)(unsigned long node,
+ const char *uname, int depth,
+ void *data),
+ void *data);
+extern void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
+ unsigned long *size);
+extern int __init of_flat_dt_is_compatible(unsigned long node, const char *name);
+extern unsigned long __init of_get_flat_dt_root(void);
+
+/* For updating the device tree at runtime */
+extern void of_attach_node(struct device_node *);
+extern void of_detach_node(struct device_node *);
+
+/* Other Prototypes */
+extern void finish_device_tree(void);
+extern void unflatten_device_tree(void);
+extern void early_init_devtree(void *);
+#define device_is_compatible(d, c) of_device_is_compatible((d), (c))
+extern int machine_is_compatible(const char *compat);
+extern void print_properties(struct device_node *node);
+extern int prom_n_intr_cells(struct device_node* np);
+extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
+extern int prom_add_property(struct device_node* np, struct property* prop);
+extern int prom_remove_property(struct device_node *np, struct property *prop);
+extern int prom_update_property(struct device_node *np,
+ struct property *newprop,
+ struct property *oldprop);
+
+extern struct resource *request_OF_resource(struct device_node* node,
+ int index, const char* name_postfix);
+extern int release_OF_resource(struct device_node* node, int index);
+
+
+/*
+ * OF address retreival & translation
+ */
+
+
+/* Helper to read a big number; size is in cells (not bytes) */
+static inline u64 of_read_number(const u32 *cell, int size)
+{
+ u64 r = 0;
+ while (size--)
+ r = (r << 32) | *(cell++);
+ return r;
+}
+
+/* Like of_read_number, but we want an unsigned long result */
+#ifdef CONFIG_PPC32
+static inline unsigned long of_read_ulong(const u32 *cell, int size)
+{
+ return cell[size-1];
+}
+#else
+#define of_read_ulong(cell, size) of_read_number(cell, size)
+#endif
+
+/* Translate an OF address block into a CPU physical address
+ */
+extern u64 of_translate_address(struct device_node *np, const u32 *addr);
+
+/* Extract an address from a device, returns the region size and
+ * the address space flags too. The PCI version uses a BAR number
+ * instead of an absolute index
+ */
+extern const u32 *of_get_address(struct device_node *dev, int index,
+ u64 *size, unsigned int *flags);
+extern const u32 *of_get_pci_address(struct device_node *dev, int bar_no,
+ u64 *size, unsigned int *flags);
+
+/* Get an address as a resource. Note that if your address is
+ * a PIO address, the conversion will fail if the physical address
+ * can't be internally converted to an IO token with
+ * pci_address_to_pio(), that is because it's either called to early
+ * or it can't be matched to any host bridge IO space
+ */
+extern int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+extern int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r);
+
+/* Parse the ibm,dma-window property of an OF node into the busno, phys and
+ * size parameters.
+ */
+void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
+ unsigned long *busno, unsigned long *phys, unsigned long *size);
+
+extern void kdump_move_device_tree(void);
+
+/* CPU OF node matching */
+struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+
+/* Get the MAC address */
+extern const void *of_get_mac_address(struct device_node *np);
+
+/*
+ * OF interrupt mapping
+ */
+
+/* This structure is returned when an interrupt is mapped. The controller
+ * field needs to be put() after use
+ */
+
+#define OF_MAX_IRQ_SPEC 4 /* We handle specifiers of at most 4 cells */
+
+struct of_irq {
+ struct device_node *controller; /* Interrupt controller node */
+ u32 size; /* Specifier size */
+ u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */
+};
+
+/**
+ * of_irq_map_init - Initialize the irq remapper
+ * @flags: flags defining workarounds to enable
+ *
+ * Some machines have bugs in the device-tree which require certain workarounds
+ * to be applied. Call this before any interrupt mapping attempts to enable
+ * those workarounds.
+ */
+#define OF_IMAP_OLDWORLD_MAC 0x00000001
+#define OF_IMAP_NO_PHANDLE 0x00000002
+
+extern void of_irq_map_init(unsigned int flags);
+
+/**
+ * of_irq_map_raw - Low level interrupt tree parsing
+ * @parent: the device interrupt parent
+ * @intspec: interrupt specifier ("interrupts" property of the device)
+ * @ointsize: size of the passed in interrupt specifier
+ * @addr: address specifier (start of "reg" property of the device)
+ * @out_irq: structure of_irq filled by this function
+ *
+ * Returns 0 on success and a negative number on error
+ *
+ * This function is a low-level interrupt tree walking function. It
+ * can be used to do a partial walk with synthetized reg and interrupts
+ * properties, for example when resolving PCI interrupts when no device
+ * node exist for the parent.
+ *
+ */
+
+extern int of_irq_map_raw(struct device_node *parent, const u32 *intspec,
+ u32 ointsize, const u32 *addr,
+ struct of_irq *out_irq);
+
+
+/**
+ * of_irq_map_one - Resolve an interrupt for a device
+ * @device: the device whose interrupt is to be resolved
+ * @index: index of the interrupt to resolve
+ * @out_irq: structure of_irq filled by this function
+ *
+ * This function resolves an interrupt, walking the tree, for a given
+ * device-tree node. It's the high level pendant to of_irq_map_raw().
+ * It also implements the workarounds for OldWolrd Macs.
+ */
+extern int of_irq_map_one(struct device_node *device, int index,
+ struct of_irq *out_irq);
+
+/**
+ * of_irq_map_pci - Resolve the interrupt for a PCI device
+ * @pdev: the device whose interrupt is to be resolved
+ * @out_irq: structure of_irq filled by this function
+ *
+ * This function resolves the PCI interrupt for a given PCI device. If a
+ * device-node exists for a given pci_dev, it will use normal OF tree
+ * walking. If not, it will implement standard swizzling and walk up the
+ * PCI tree until an device-node is found, at which point it will finish
+ * resolving using the OF tree walking.
+ */
+struct pci_dev;
+extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
+
+extern int of_irq_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+
+/**
+ * of_iomap - Maps the memory mapped IO for a given device_node
+ * @device: the device whose io range will be mapped
+ * @index: index of the io range
+ *
+ * Returns a pointer to the mapped memory
+ */
+extern void __iomem *of_iomap(struct device_node *device, int index);
+
+/*
+ * NB: This is here while we transition from using asm/prom.h
+ * to linux/of.h
+ */
+#include <linux/of.h>
+
+#endif /* __KERNEL__ */
+#endif /* _MICROBLAZE_PROM_H */
--- /dev/null
+/*
+ * include/asm-microblaze/ptrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_PTRACE_H
+#define _ASM_PTRACE_H
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+
+typedef unsigned long microblaze_reg_t;
+
+struct pt_regs {
+ microblaze_reg_t r0;
+ microblaze_reg_t r1;
+ microblaze_reg_t r2;
+ microblaze_reg_t r3;
+ microblaze_reg_t r4;
+ microblaze_reg_t r5;
+ microblaze_reg_t r6;
+ microblaze_reg_t r7;
+ microblaze_reg_t r8;
+ microblaze_reg_t r9;
+ microblaze_reg_t r10;
+ microblaze_reg_t r11;
+ microblaze_reg_t r12;
+ microblaze_reg_t r13;
+ microblaze_reg_t r14;
+ microblaze_reg_t r15;
+ microblaze_reg_t r16;
+ microblaze_reg_t r17;
+ microblaze_reg_t r18;
+ microblaze_reg_t r19;
+ microblaze_reg_t r20;
+ microblaze_reg_t r21;
+ microblaze_reg_t r22;
+ microblaze_reg_t r23;
+ microblaze_reg_t r24;
+ microblaze_reg_t r25;
+ microblaze_reg_t r26;
+ microblaze_reg_t r27;
+ microblaze_reg_t r28;
+ microblaze_reg_t r29;
+ microblaze_reg_t r30;
+ microblaze_reg_t r31;
+ microblaze_reg_t pc;
+ microblaze_reg_t msr;
+ microblaze_reg_t ear;
+ microblaze_reg_t esr;
+ microblaze_reg_t fsr;
+ int kernel_mode;
+};
+
+#define kernel_mode(regs) ((regs)->kernel_mode)
+#define user_mode(regs) (!kernel_mode(regs))
+
+#define instruction_pointer(regs) ((regs)->pc)
+#define profile_pc(regs) instruction_pointer(regs)
+
+extern void show_regs(struct pt_regs *);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PTRACE_H */
--- /dev/null
+/*
+ * Support for the MicroBlaze PVR (Processor Version Register)
+ *
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ * Copyright (C) 2007 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ */
+#ifndef _ASM_PVR_H
+#define _ASM_PVR_H
+
+#define PVR_MSR_BIT 0x400
+
+struct pvr_s {
+ unsigned pvr[16];
+};
+
+/* The following taken from Xilinx's standalone BSP pvr.h */
+
+/* Basic PVR mask */
+#define PVR0_PVR_FULL_MASK 0x80000000
+#define PVR0_USE_BARREL_MASK 0x40000000
+#define PVR0_USE_DIV_MASK 0x20000000
+#define PVR0_USE_HW_MUL_MASK 0x10000000
+#define PVR0_USE_FPU_MASK 0x08000000
+#define PVR0_USE_EXCEPTION_MASK 0x04000000
+#define PVR0_USE_ICACHE_MASK 0x02000000
+#define PVR0_USE_DCACHE_MASK 0x01000000
+#define PVR0_VERSION_MASK 0x0000FF00
+#define PVR0_USER1_MASK 0x000000FF
+
+/* User 2 PVR mask */
+#define PVR1_USER2_MASK 0xFFFFFFFF
+
+/* Configuration PVR masks */
+#define PVR2_D_OPB_MASK 0x80000000
+#define PVR2_D_LMB_MASK 0x40000000
+#define PVR2_I_OPB_MASK 0x20000000
+#define PVR2_I_LMB_MASK 0x10000000
+#define PVR2_INTERRUPT_IS_EDGE_MASK 0x08000000
+#define PVR2_EDGE_IS_POSITIVE_MASK 0x04000000
+#define PVR2_USE_MSR_INSTR 0x00020000
+#define PVR2_USE_PCMP_INSTR 0x00010000
+#define PVR2_AREA_OPTIMISED 0x00008000
+#define PVR2_USE_BARREL_MASK 0x00004000
+#define PVR2_USE_DIV_MASK 0x00002000
+#define PVR2_USE_HW_MUL_MASK 0x00001000
+#define PVR2_USE_FPU_MASK 0x00000800
+#define PVR2_USE_MUL64_MASK 0x00000400
+#define PVR2_OPCODE_0x0_ILLEGAL_MASK 0x00000040
+#define PVR2_UNALIGNED_EXCEPTION_MASK 0x00000020
+#define PVR2_ILL_OPCODE_EXCEPTION_MASK 0x00000010
+#define PVR2_IOPB_BUS_EXCEPTION_MASK 0x00000008
+#define PVR2_DOPB_BUS_EXCEPTION_MASK 0x00000004
+#define PVR2_DIV_ZERO_EXCEPTION_MASK 0x00000002
+#define PVR2_FPU_EXCEPTION_MASK 0x00000001
+
+/* Debug and exception PVR masks */
+#define PVR3_DEBUG_ENABLED_MASK 0x80000000
+#define PVR3_NUMBER_OF_PC_BRK_MASK 0x1E000000
+#define PVR3_NUMBER_OF_RD_ADDR_BRK_MASK 0x00380000
+#define PVR3_NUMBER_OF_WR_ADDR_BRK_MASK 0x0000E000
+#define PVR3_FSL_LINKS_MASK 0x00000380
+
+/* ICache config PVR masks */
+#define PVR4_USE_ICACHE_MASK 0x80000000
+#define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000
+#define PVR4_ICACHE_USE_FSL_MASK 0x02000000
+#define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000
+#define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000
+#define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000
+
+/* DCache config PVR masks */
+#define PVR5_USE_DCACHE_MASK 0x80000000
+#define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000
+#define PVR5_DCACHE_USE_FSL_MASK 0x02000000
+#define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000
+#define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000
+#define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000
+
+/* ICache base address PVR mask */
+#define PVR6_ICACHE_BASEADDR_MASK 0xFFFFFFFF
+
+/* ICache high address PVR mask */
+#define PVR7_ICACHE_HIGHADDR_MASK 0xFFFFFFFF
+
+/* DCache base address PVR mask */
+#define PVR8_DCACHE_BASEADDR_MASK 0xFFFFFFFF
+
+/* DCache high address PVR mask */
+#define PVR9_DCACHE_HIGHADDR_MASK 0xFFFFFFFF
+
+/* Target family PVR mask */
+#define PVR10_TARGET_FAMILY_MASK 0xFF000000
+
+/* MSR Reset value PVR mask */
+#define PVR11_MSR_RESET_VALUE_MASK 0x000007FF
+
+
+/* PVR access macros */
+#define PVR_IS_FULL(pvr_s) (pvr_s.pvr[0] & PVR0_PVR_FULL_MASK)
+#define PVR_USE_BARREL(pvr_s) (pvr_s.pvr[0] & PVR0_USE_BARREL_MASK)
+#define PVR_USE_DIV(pvr_s) (pvr_s.pvr[0] & PVR0_USE_DIV_MASK)
+#define PVR_USE_HW_MUL(pvr_s) (pvr_s.pvr[0] & PVR0_USE_HW_MUL_MASK)
+#define PVR_USE_FPU(pvr_s) (pvr_s.pvr[0] & PVR0_USE_FPU_MASK)
+#define PVR_USE_ICACHE(pvr_s) (pvr_s.pvr[0] & PVR0_USE_ICACHE_MASK)
+#define PVR_USE_DCACHE(pvr_s) (pvr_s.pvr[0] & PVR0_USE_DCACHE_MASK)
+#define PVR_VERSION(pvr_s) ((pvr_s.pvr[0] & PVR0_VERSION_MASK) >> 8)
+#define PVR_USER1(pvr_s) (pvr_s.pvr[0] & PVR0_USER1_MASK)
+
+#define PVR_USER2(pvr_s) (pvr_s.pvr[1] & PVR1_USER2_MASK)
+
+#define PVR_D_OPB(pvr_s) (pvr_s.pvr[2] & PVR2_D_OPB_MASK)
+#define PVR_D_LMB(pvr_s) (pvr_s.pvr[2] & PVR2_D_LMB_MASK)
+#define PVR_I_OPB(pvr_s) (pvr_s.pvr[2] & PVR2_I_OPB_MASK)
+#define PVR_I_LMB(pvr_s) (pvr_s.pvr[2] & PVR2_I_LMB_MASK)
+#define PVR_INTERRUPT_IS_EDGE(pvr_s) (pvr_s.pvr[2] & PVR2_INTERRUPT_IS_EDGE_MASK)
+#define PVR_EDGE_IS_POSITIVE(pvr_s) (pvr_s.pvr[2] & PVR2_EDGE_IS_POSITIVE_MASK)
+#define PVR_USE_MSR_INSTR(pvr_s) (pvr_s.pvr[2] & PVR2_USE_MSR_INSTR)
+#define PVR_USE_PCMP_INSTR(pvr_s) (pvr_s.pvr[2] & PVR2_USE_PCMP_INSTR)
+#define PVR_AREA_OPTIMISED(pvr_s) (pvr_s.pvr[2] & PVR2_AREA_OPTIMISED)
+#define PVR_USE_MUL64(pvr_s) (pvr_s.pvr[2] & PVR2_USE_MUL64_MASK)
+#define PVR_OPCODE_0x0_ILLEGAL(pvr_s) (pvr_s.pvr[2] & PVR2_OPCODE_0x0_ILLEGAL_MASK)
+#define PVR_UNALIGNED_EXCEPTION(pvr_s) (pvr_s.pvr[2] & PVR2_UNALIGNED_EXCEPTION_MASK)
+#define PVR_ILL_OPCODE_EXCEPTION(pvr_s) (pvr_s.pvr[2] & PVR2_ILL_OPCODE_EXCEPTION_MASK)
+#define PVR_IOPB_BUS_EXCEPTION(pvr_s) (pvr_s.pvr[2] & PVR2_IOPB_BUS_EXCEPTION_MASK)
+#define PVR_DOPB_BUS_EXCEPTION(pvr_s) (pvr_s.pvr[2] & PVR2_DOPB_BUS_EXCEPTION_MASK)
+#define PVR_DIV_ZERO_EXCEPTION(pvr_s) (pvr_s.pvr[2] & PVR2_DIV_ZERO_EXCEPTION_MASK)
+#define PVR_FPU_EXCEPTION(pvr_s) (pvr_s.pvr[2] & PVR2_FPU_EXCEPTION_MASK)
+
+#define PVR_DEBUG_ENABLED(pvr_s) (pvr_s.pvr[3] & PVR3_DEBUG_ENABLED_MASK)
+#define PVR_NUMBER_OF_PC_BRK(pvr_s) ((pvr_s.pvr[3] & PVR3_NUMBER_OF_PC_BRK_MASK) >> 25)
+#define PVR_NUMBER_OF_RD_ADDR_BRK(pvr_s) ((pvr_s.pvr[3] & PVR3_NUMBER_OF_RD_ADDR_BRK_MASK) >> 19)
+#define PVR_NUMBER_OF_WR_ADDR_BRK(pvr_s) ((pvr_s.pvr[3] & PVR3_NUMBER_OF_WR_ADDR_BRK_MASK) >> 13)
+#define PVR_FSL_LINKS(pvr_s) ((pvr_s.pvr[3] & PVR3_FSL_LINKS_MASK) >> 7)
+
+#define PVR_ICACHE_ADDR_TAG_BITS(pvr_s) ((pvr_s.pvr[4] & PVR4_ICACHE_ADDR_TAG_BITS_MASK) >> 26)
+#define PVR_ICACHE_USE_FSL(pvr_s) (pvr_s.pvr[4] & PVR4_ICACHE_USE_FSL_MASK)
+#define PVR_ICACHE_ALLOW_WR(pvr_s) (pvr_s.pvr[4] & PVR4_ICACHE_ALLOW_WR_MASK)
+#define PVR_ICACHE_LINE_LEN(pvr_s) (1 << ((pvr_s.pvr[4] & PVR4_ICACHE_LINE_LEN_MASK) >> 21))
+#define PVR_ICACHE_BYTE_SIZE(pvr_s) (1 << ((pvr_s.pvr[4] & PVR4_ICACHE_BYTE_SIZE_MASK) >> 16))
+
+#define PVR_DCACHE_ADDR_TAG_BITS(pvr_s) ((pvr_s.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
+#define PVR_DCACHE_USE_FSL(pvr_s) (pvr_s.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
+#define PVR_DCACHE_ALLOW_WR(pvr_s) (pvr_s.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
+#define PVR_DCACHE_LINE_LEN(pvr_s) (1 << ((pvr_s.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
+#define PVR_DCACHE_BYTE_SIZE(pvr_s) (1 << ((pvr_s.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
+
+
+#define PVR_ICACHE_BASEADDR(pvr_s) (pvr_s.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
+#define PVR_ICACHE_HIGHADDR(pvr_s) (pvr_s.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
+
+#define PVR_DCACHE_BASEADDR(pvr_s) (pvr_s.pvr[8] & PVR8_DCACHE_BASEADDR_MASK)
+#define PVR_DCACHE_HIGHADDR(pvr_s) (pvr_s.pvr[9] & PVR9_DCACHE_HIGHADDR_MASK)
+
+#define PVR_TARGET_FAMILY(pvr_s) ((pvr_s.pvr[10] & PVR10_TARGET_FAMILY_MASK) >> 24)
+
+#define PVR_MSR_RESET_VALUE(pvr_s) (pvr_s.pvr[11] & PVR11_MSR_RESET_VALUE_MASK)
+
+int cpu_has_pvr(void);
+void get_pvr(struct pvr_s *pvr);
+
+#endif
+
--- /dev/null
+/*
+ * arch/microblaze/kernel/registers.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_REGISTERS_H
+#define _ASM_REGISTERS_H
+
+#define MSR_BE (1<<0)
+#define MSR_IE (1<<1)
+#define MSR_C (1<<2)
+#define MSR_BIP (1<<3)
+#define MSR_FSL (1<<4)
+#define MSR_ICE (1<<5)
+#define MSR_DZ (1<<6)
+#define MSR_DCE (1<<7)
+#define MSR_EE (1<<8)
+#define MSR_EIP (1<<9)
+#define MSR_CC (1<<31)
+
+#endif /* _ASM_REGISTERS_H */
--- /dev/null
+/*
+ * include/asm-microblaze/resource.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_RESOURCE_H
+#define _ASM_RESOURCE_H
+
+#include <asm-generic/resource.h>
+
+#endif /* _ASM_RESOURCE_H */
--- /dev/null
+/*
+ * include/asm-microblaze/scatterlist.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_SCATTERLIST_H
+#define _ASM_SCATTERLIST_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <asm/dma.h>
+
+struct scatterlist {
+#ifdef CONFIG_DEBUG_SG
+ unsigned long sg_magic;
+#endif
+ unsigned long page_link;
+ unsigned int offset;
+ unsigned int length;
+
+ /* For TCE support */
+ dma_addr_t dma_address;
+ u32 dma_length;
+};
+
+#endif
+
+#endif
--- /dev/null
+/*
+ * include/asm-microblaze/sections.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_SECTIONS_H
+#define _ASM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _ssbss[], _esbss[];
+extern unsigned long __ivt_start[], __ivt_end[];
+
+#endif /* _ASM_SECTIONS_H */
--- /dev/null
+/*
+ * include/asm-microblaze/segment.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ */
+
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ *
+ * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
+ */
+#define KERNEL_DS ((mm_segment_t){0})
+#define USER_DS KERNEL_DS
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) do { current_thread_info()->addr_limit = (x); } while(0)
+
+#define segment_eq(a,b) ((a).seg == (b).seg)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_SEGMENT_H */
--- /dev/null
+/*
+ * include/asm-microblaze/semaphore.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_SEMAPHORE_H
+#define _ASM_SEMAPHORE_H
+
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+
+struct semaphore {
+ atomic_t count;
+ int sleepers;
+ wait_queue_head_t wait;
+};
+
+#define __SEMAPHORE_INITIALIZER(name,n) \
+{ \
+ .count = ATOMIC_INIT(n), \
+ .sleepers = 0, \
+ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+}
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
+
+static inline void sema_init (struct semaphore *sem, int val)
+{
+ atomic_set(&sem->count, val);
+ sem->sleepers = 0;
+ init_waitqueue_head(&sem->wait);
+}
+
+static inline void init_MUTEX (struct semaphore *sem)
+{
+ sema_init(sem, 1);
+}
+
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+{
+ sema_init(sem, 0);
+}
+
+asmlinkage void __down(struct semaphore * sem);
+asmlinkage int __down_interruptible(struct semaphore * sem);
+asmlinkage int __down_trylock(struct semaphore * sem);
+asmlinkage void __up(struct semaphore * sem);
+
+extern spinlock_t semaphore_wake_lock;
+
+static inline void down(struct semaphore * sem)
+{
+ might_sleep();
+
+ if (atomic_sub_return(1, &sem->count) < 0)
+ __down(sem);
+}
+
+static inline int down_interruptible(struct semaphore * sem)
+{
+ int ret = 0;
+
+ might_sleep();
+
+ if (atomic_sub_return(1, &sem->count) < 0)
+ ret = __down_interruptible(sem);
+ return ret;
+}
+
+static inline int down_trylock(struct semaphore * sem)
+{
+ int ret = 0;
+
+ if (atomic_sub_return(1, &sem->count) < 0)
+ ret = __down_trylock(sem);
+ return ret;
+}
+
+/*
+ * Note! This is subtle. We jump to wake people up only if
+ * the semaphore was negative (== somebody was waiting on it).
+ */
+static inline void up(struct semaphore * sem)
+{
+ if (atomic_add_return(1, &sem->count) <= 0)
+ __up(sem);
+}
+
+#endif /* _ASM_SEMAPHORE_H */
--- /dev/null
+/*
+ * include/asm-microblaze/sembuf.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_SEMBUF_H
+#define _ASM_SEMBUF_H
+
+/*
+ * The semid64_ds structure for m68k architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ __kernel_time_t sem_otime; /* last semop time */
+ unsigned long __unused1;
+ __kernel_time_t sem_ctime; /* last change time */
+ unsigned long __unused2;
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+
+#endif /* _ASM_SEMBUF_H */
--- /dev/null
+/*
+ * include/asm-microblaze/setup.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_SETUP_H
+#define _ASM_SETUP_H
+
+#include <linux/init.h>
+
+#define COMMAND_LINE_SIZE 256
+
+int setup_early_printk(char *opt);
+
+void __init setup_memory(void);
+
+#endif /* _ASM_SETUP_H */
--- /dev/null
+/*
+ * include/asm-microblaze/shmbuf.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_SHMBUF_H
+#define _ASM_SHMBUF_H
+
+/*
+ * The shmid64_ds structure for m68k architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ unsigned long __unused1;
+ __kernel_time_t shm_dtime; /* last detach time */
+ unsigned long __unused2;
+ __kernel_time_t shm_ctime; /* last change time */
+ unsigned long __unused3;
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _ASM_SHMBUF_H */
--- /dev/null
+/*
+ * include/asm-microblaze/shmparam.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_SHMPARAM_H
+#define _ASM_SHMPARAM_H
+
+#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
+
+#endif /* _ASM_SHMPARAM_H */
--- /dev/null
+/*
+ * include/asm-microblaze/sigcontext.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_SIGCONTEXT_H
+#define _ASM_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+struct sigcontext {
+ struct pt_regs regs;
+ unsigned long oldmask;
+};
+
+#endif /* _ASM_SIGCONTEXT_H */
--- /dev/null
+/*
+ * include/asm-microblaze/siginfo.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_SIGINFO_H
+#define _ASM_SIGINFO_H
+
+#include <linux/types.h>
+#include <asm-generic/siginfo.h>
+
+#endif /* _ASM_SIGINFO_H */
--- /dev/null
+/*
+ * include/asm-microblaze/signal.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * Authors:
+ * Yasushi SHOJI <yashi@atmark-techno.com>
+ * Tetsuya OHKAWA <tetsuya@atmark-techno.com>
+ */
+
+#ifndef __MICROBLAZE_SIGNAL_H__
+#define __MICROBLAZE_SIGNAL_H__
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+#ifdef __KERNEL__
+
+/* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+#define _NSIG 64
+#define _NSIG_BPW 32
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#else /* !__KERNEL__ */
+
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+#define NSIG 32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#ifndef __ASSEMBLY__
+#include <asm-generic/signal.h>
+
+#ifdef __KERNEL__
+
+struct old_sigaction {
+ __sighandler_t sa_handler;
+ old_sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+
+#else /* !__KERNEL__ */
+
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+
+typedef struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+
+#include <asm/sigcontext.h>
+#undef __HAVE_ARCH_SIG_BITOPS
+
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __MICROBLAZE_SIGNAL_H__ */
--- /dev/null
+/*
+ * include/asm-microblaze/socket.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+#ifndef __MICROBLAZE_SOCKET_H__
+#define __MICROBLAZE_SOCKET_H__
+
+#include <asm/sockios.h>
+
+/* For setsockoptions(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_SNDBUFFORCE 32
+#define SO_RCVBUFFORCE 33
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+/* To add :#define SO_REUSEPORT 15 */
+#define SO_PASSCRED 16
+#define SO_PEERCRED 17
+#define SO_RCVLOWAT 18
+#define SO_SNDLOWAT 19
+#define SO_RCVTIMEO 20
+#define SO_SNDTIMEO 21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION 22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
+#define SO_SECURITY_ENCRYPTION_NETWORK 24
+
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+
+#define SO_PEERNAME 28
+#define SO_TIMESTAMP 29
+#define SCM_TIMESTAMP SO_TIMESTAMP
+
+#define SO_ACCEPTCONN 30
+
+#define SO_PEERSEC 31
+#define SO_PASSSEC 34
+
+#define SO_TIMESTAMPNS 35
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+#endif /* __MICROBLAZE_SOCKET_H__ */
--- /dev/null
+/*
+ * include/asm-microblaze/sockios.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_SOCKIOS_H
+#define _ASM_SOCKIOS_H
+
+#include <asm/ioctl.h>
+
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+
+#endif /* _ASM_SOCKIOS_H */
--- /dev/null
+/*
+ * include/asm-microblaze/stat.h -- microblaze stat structure
+ *
+ * Copyright (C) 2001,02,03 NEC Electronics Corporation
+ * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ * Written by Miles Bader <miles@gnu.org>
+ */
+
+#ifndef __MICROBLAZE_STAT_H__
+#define __MICROBLAZE_STAT_H__
+
+#include <asm/posix_types.h>
+
+struct stat {
+ unsigned int st_dev;
+ unsigned long st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int st_rdev;
+ long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long __unused1;
+ unsigned long st_mtime;
+ unsigned long __unused2;
+ unsigned long st_ctime;
+ unsigned long __unused3;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct stat64 {
+ unsigned long long st_dev;
+ unsigned long __unused1;
+
+ unsigned long long st_ino;
+
+ unsigned int st_mode;
+ unsigned int st_nlink;
+
+ unsigned int st_uid;
+ unsigned int st_gid;
+
+ unsigned long long st_rdev;
+ unsigned long __unused3;
+
+ long long st_size;
+ unsigned long st_blksize;
+
+ unsigned long st_blocks; /* No. of 512-byte blocks allocated */
+ unsigned long __unused4; /* future possible st_blocks high bits */
+
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+
+ unsigned long __unused8;
+};
+
+#endif /* __MICROBLAZE_STAT_H__ */
--- /dev/null
+/*
+ * include/asm-microblaze/statfs.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_STATFS_H
+#define _ASM_STATFS_H
+
+#include <asm-generic/statfs.h>
+
+#endif /* _ASM_STATFS_H */
--- /dev/null
+/*
+ * include/asm-microblaze/string.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_STRING_H
+#define _ASM_STRING_H
+
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMMOVE
+
+extern void * memset(void *,int,__kernel_size_t);
+extern void * memcpy(void *,const void *,__kernel_size_t);
+extern void * memmove(void *,const void *,__kernel_size_t);
+
+#endif /* _ASM_STRING_H */
--- /dev/null
+/*
+ * include/asm-microblaze/system.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_SYSTEM_H
+#define _ASM_SYSTEM_H
+
+#include <asm/xparameters.h>
+#include <asm/registers.h>
+
+struct task_struct;
+struct thread_info;
+
+extern struct task_struct * _switch_to(struct thread_info *prev, struct thread_info *next);
+
+#define switch_to(prev, next, last) \
+ do { \
+ (last) = _switch_to(task_thread_info(prev), task_thread_info(next)); \
+ } while(0)
+
+#if XPAR_MICROBLAZE_0_USE_MSR_INSTR
+
+#define local_irq_save(flags) \
+ do { \
+ asm volatile ("# local_irq_save \n\t" \
+ "msrclr %0, %1 \n\t" \
+ : "=r"(flags) \
+ : "i"(MSR_IE) \
+ : "memory"); \
+ } while(0)
+
+#define local_irq_disable() \
+ do { \
+ asm volatile ("# local_irq_disable \n\t" \
+ "msrclr r0, %0 \n\t" \
+ : \
+ : "i"(MSR_IE) \
+ : "memory"); \
+ } while(0)
+
+#define local_irq_enable() \
+ do { \
+ asm volatile ("# local_irq_enable \n\t" \
+ "msrset r0, %0 \n\t" \
+ : \
+ : "i"(MSR_IE) \
+ : "memory"); \
+ } while(0)
+
+#else /* XPAR_MICROBLAZE_0_USE_MSR_INSTR == 0 */
+
+#define local_irq_save(flags) \
+ do { \
+ register unsigned tmp; \
+ asm volatile ("# local_irq_save \n\t" \
+ "mfs %0, rmsr \n\t" \
+ "andi %1, %0, %2 \n\t" \
+ "mts rmsr, %1 \n\t" \
+ "nop \n\t" \
+ : "=r"(flags), "=r" (tmp) \
+ : "i"(~MSR_IE) \
+ : "memory"); \
+ } while(0)
+
+#define local_irq_disable() \
+ do { \
+ register unsigned tmp; \
+ asm volatile ("# local_irq_disable \n\t" \
+ "mfs %0, rmsr \n\t" \
+ "andi %0, %0, %1 \n\t" \
+ "mts rmsr, %0 \n\t" \
+ "nop \n\t" \
+ : "=r"(tmp) \
+ : "i"(~MSR_IE) \
+ : "memory"); \
+ } while(0)
+
+#define local_irq_enable() \
+ do { \
+ register unsigned tmp; \
+ asm volatile ("# local_irq_enable \n\t" \
+ "mfs %0, rmsr \n\t" \
+ "ori %0, %0, %1 \n\t" \
+ "mts rmsr, %0 \n\t" \
+ "nop \n\t" \
+ : "=r"(tmp) \
+ : "i"(MSR_IE) \
+ : "memory"); \
+ } while(0)
+
+#endif /* XPAR_MICROBLAZE_0_USE_MSR_INSTR */
+
+#define local_save_flags(flags) \
+ do { \
+ asm volatile ("# local_save_flags \n\t" \
+ "mfs %0, rmsr \n\t" \
+ : "=r"(flags) \
+ : \
+ : "memory"); \
+ } while(0)
+
+#define local_irq_restore(flags) \
+ do { \
+ asm volatile ("# local_irq_restore \n\t" \
+ "mts rmsr, %0 \n\t" \
+ : \
+ : "r"(flags) \
+ : "memory"); \
+ } while(0)
+
+static inline int irqs_disabled(void)
+{
+ unsigned long flags;
+
+ local_save_flags(flags);
+ return ((flags & MSR_IE) == 0);
+}
+
+
+
+#define smp_read_barrier_depends() do {} while(0)
+#define read_barrier_depends() do {} while(0)
+
+#define nop() asm volatile ("nop")
+#define mb() barrier()
+#define rmb() mb()
+#define wmb() mb()
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+{
+ extern void __bad_xchg(volatile void *, int);
+ unsigned long ret;
+ unsigned long flags;
+
+ switch (size) {
+ case 1:
+ local_irq_save(flags);
+ ret = *(volatile unsigned char *)ptr;
+ *(volatile unsigned char *)ptr = x;
+ local_irq_restore(flags);
+ break;
+
+ case 4:
+ local_irq_save(flags);
+ ret = *(volatile unsigned long *)ptr;
+ *(volatile unsigned long *)ptr = x;
+ local_irq_restore(flags);
+ break;
+ default:
+ __bad_xchg(ptr, size), ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+extern void *cacheable_memcpy(void *, const void *, unsigned int);
+
+#endif /* _ASM_SYSTEM_H */
--- /dev/null
+/*
+ * include/asm-microblaze/termbits.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_TERMBITS_H
+#define _ASM_TERMBITS_H
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+struct ktermios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+
+/* c_cc characters */
+
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+#define IUTF8 0040000
+
+/* c_oflag bits */
+
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
+#define CIBAUD 002003600000 /* input baud rate (not used) */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+
+/* tcflow() and TCXONC use these */
+
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* _ASM_TERMBITS_H */
--- /dev/null
+/*
+ * include/asm-microblaze/termios.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_TERMIOS_H
+#define _ASM_TERMIOS_H
+
+#include <linux/string.h>
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+#ifdef __KERNEL__
+/* intr=^C quit=^| erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+#endif
+
+/* Modem lines */
+
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+/* Line disciplines */
+
+#define N_TTY 0
+#define N_SLIP 1
+#define N_MOUSE 2
+#define N_PPP 3
+#define N_STRIP 4
+#define N_AX25 5
+#define N_X25 6 /* X.25 async */
+#define N_6PACK 7
+#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
+#define N_R3964 9 /* Reserved for Simatic R3964 module */
+#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
+#define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */
+#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
+#define N_HDLC 13 /* synchronous HDLC */
+#define N_SYNC_PPP 14
+#define N_HCI 15 /* Bluetooth HCI UART */
+
+#include <asm-generic/termios.h>
+
+#endif /* _ASM_TERMIOS_H */
--- /dev/null
+/*
+ * include/asm-microblaze/thread_info.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ * FIXME -- need review
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+/* we have 8k stack */
+#define THREAD_SHIFT 13
+#define THREAD_SIZE (1 << THREAD_SHIFT)
+
+#ifndef __ASSEMBLY__
+# include <asm/processor.h>
+# include <asm/segment.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+
+struct cpu_context {
+ __u32 sp;
+ __u32 r2;
+ /* dedicated registers */
+ __u32 r13;
+ __u32 r14;
+ __u32 r15;
+ __u32 r16;
+ __u32 r17;
+ __u32 r18;
+ /* non-volatile registers */
+ __u32 r19;
+ __u32 r20;
+ __u32 r21;
+ __u32 r22;
+ __u32 r23;
+ __u32 r24;
+ __u32 r25;
+ __u32 r26;
+ __u32 r27;
+ __u32 r28;
+ __u32 r29;
+ __u32 r30;
+ /* special purpose registers */
+ __u32 msr;
+ __u32 ear;
+ __u32 esr;
+ __u32 fsr;
+};
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ unsigned long flags; /* low level flags */
+ unsigned long status; /* thread-synchronous flags */
+ __u32 cpu; /* current CPU */
+ __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
+ mm_segment_t addr_limit; /* thread address space */
+ struct restart_block restart_block;
+
+ struct cpu_context cpu_context;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = 1, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ register unsigned long sp asm("r1");
+
+ return (struct thread_info *)(sp & ~(THREAD_SIZE-1));
+}
+
+/* thread information allocation */
+#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
+#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
+
+#endif /* __ASSEMBLY__ */
+
+#define PREEMPT_ACTIVE 0x10000000
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
+#define TIF_IRET 5 /* return with iret */
+#define TIF_MEMDIE 6
+#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_IRET (1<<TIF_IRET)
+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+
+#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
+#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
+
+/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_THREAD_INFO_H */
--- /dev/null
+/*
+ * include/asm-microblaze/timex.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ * FIXME -- need review
+ */
+
+#ifndef _ASM_TIMEX_H
+#define _ASM_TIMEX_H
+
+#define CLOCK_TICK_RATE 1000 /* Timer input freq. */
+
+typedef unsigned long cycles_t;
+
+#define get_cycles() (0)
+
+#endif /* _ASM_TIMEX_H */
--- /dev/null
+/*
+ * include/asm-microblaze/tlb.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_TLB_H
+#define _ASM_TLB_H
+
+#define tlb_flush(tlb) do {} while(0)
+
+#include <asm-generic/tlb.h>
+
+#endif /* _ASM_TLB_H */
--- /dev/null
+/*
+ * include/asm-microblaze/tlbflush.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_TLBFLUSH_H
+#define _ASM_TLBFLUSH_H
+
+#define flush_tlb() BUG()
+#define flush_tlb_all() BUG()
+#define flush_tlb_mm(mm) BUG()
+#define flush_tlb_page(vma,addr) BUG()
+#define flush_tlb_range(mm,start,end) BUG()
+#define flush_tlb_pgtables(mm,start,end) BUG()
+#define flush_tlb_kernel_range(start, end) BUG()
+
+#endif /* _ASM_TLBFLUSH_H */
--- /dev/null
+#ifndef _ASM_POWERPC_TOPOLOGY_H
+#define _ASM_POWERPC_TOPOLOGY_H
+#ifdef __KERNEL__
+
+
+struct sys_device;
+struct device_node;
+
+#ifdef CONFIG_NUMA
+
+#include <asm/mmzone.h>
+
+static inline int cpu_to_node(int cpu)
+{
+ return numa_cpu_lookup_table[cpu];
+}
+
+#define parent_node(node) (node)
+
+static inline cpumask_t node_to_cpumask(int node)
+{
+ return numa_cpumask_lookup_table[node];
+}
+
+static inline int node_to_first_cpu(int node)
+{
+ cpumask_t tmp;
+ tmp = node_to_cpumask(node);
+ return first_cpu(tmp);
+}
+
+int of_node_to_nid(struct device_node *device);
+
+struct pci_bus;
+#ifdef CONFIG_PCI
+extern int pcibus_to_node(struct pci_bus *bus);
+#else
+static inline int pcibus_to_node(struct pci_bus *bus)
+{
+ return -1;
+}
+#endif
+
+#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
+ CPU_MASK_ALL : \
+ node_to_cpumask(pcibus_to_node(bus)) \
+ )
+
+/* sched_domains SD_NODE_INIT for PPC64 machines */
+#define SD_NODE_INIT (struct sched_domain) { \
+ .span = CPU_MASK_NONE, \
+ .parent = NULL, \
+ .child = NULL, \
+ .groups = NULL, \
+ .min_interval = 8, \
+ .max_interval = 32, \
+ .busy_factor = 32, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 3, \
+ .idle_idx = 1, \
+ .newidle_idx = 2, \
+ .wake_idx = 1, \
+ .flags = SD_LOAD_BALANCE \
+ | SD_BALANCE_EXEC \
+ | SD_BALANCE_NEWIDLE \
+ | SD_WAKE_IDLE \
+ | SD_SERIALIZE \
+ | SD_WAKE_BALANCE, \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+ .nr_balance_failed = 0, \
+}
+
+extern void __init dump_numa_cpu_topology(void);
+
+extern int sysfs_add_device_to_node(struct sys_device *dev, int nid);
+extern void sysfs_remove_device_from_node(struct sys_device *dev, int nid);
+
+#else
+
+static inline int of_node_to_nid(struct device_node *device)
+{
+ return 0;
+}
+
+static inline void dump_numa_cpu_topology(void) {}
+
+static inline int sysfs_add_device_to_node(struct sys_device *dev, int nid)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_device_from_node(struct sys_device *dev,
+ int nid)
+{
+}
+
+
+#include <asm-generic/topology.h>
+
+#endif /* CONFIG_NUMA */
+
+#ifdef CONFIG_SMP
+#include <asm/cputable.h>
+#define smt_capable() (cpu_has_feature(CPU_FTR_SMT))
+
+#ifdef CONFIG_PPC64
+#include <asm/smp.h>
+
+#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
+#endif
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_TOPOLOGY_H */
--- /dev/null
+/*
+ * include/asm-microblaze/types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_TYPES_H
+#define _ASM_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+typedef __signed__ char s8;
+typedef unsigned char u8;
+
+typedef __signed__ short s16;
+typedef unsigned short u16;
+
+typedef __signed__ int s32;
+typedef unsigned int u32;
+
+typedef __signed__ long long s64;
+typedef unsigned long long u64;
+
+
+#define BITS_PER_LONG 32
+
+/* Dma addresses are 32-bits wide. */
+
+typedef u32 dma_addr_t;
+
+#endif /* __KERNEL__ */
+#endif
+
+#endif /* _ASM_TYPES_H */
--- /dev/null
+/*
+ * include/asm-microblaze/uaccess.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_UACCESS_H
+#define _ASM_UACCESS_H
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <asm/segment.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+
+/* Check against bounds of physical memory */
+static inline int ___range_ok(unsigned long addr, unsigned long size)
+{
+ return ((addr < XPAR_ERAM_START) ||
+ ((addr + size) >= (XPAR_ERAM_START + XPAR_ERAM_SIZE)));
+}
+
+#define __range_ok(addr, size) ___range_ok((unsigned long)(addr), (unsigned long)(size))
+
+#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
+#define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
+
+extern inline int bad_user_access_length (void)
+{
+ return 0;
+}
+
+#define __get_user(var, ptr) \
+ ({ \
+ int __gu_err = 0; \
+ switch (sizeof (*(ptr))) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ (var) = *(ptr); \
+ break; \
+ case 8: \
+ memcpy((void *) &(var), (ptr), 8); \
+ break; \
+ default: \
+ (var) = 0; \
+ __gu_err = __get_user_bad (); \
+ break; \
+ } \
+ __gu_err; \
+ })
+#define __get_user_bad() (bad_user_access_length (), (-EFAULT))
+
+#define __put_user(var, ptr) \
+ ({ \
+ int __pu_err = 0; \
+ switch (sizeof (*(ptr))) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ *(ptr) = (var); \
+ break; \
+ case 8: { \
+ typeof(*(ptr)) __pu_val = var; \
+ memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
+ } \
+ break; \
+ default: \
+ __pu_err = __put_user_bad (); \
+ break; \
+ } \
+ __pu_err; \
+ })
+#define __put_user_bad() (bad_user_access_length (), (-EFAULT))
+
+#define put_user(x, ptr) __put_user(x, ptr)
+#define get_user(x, ptr) __get_user(x, ptr)
+
+#define copy_to_user(to,from,n) (memcpy(to, from, n), 0)
+#define copy_from_user(to,from,n) (memcpy(to, from, n), 0)
+
+#define __copy_to_user(to,from,n) (copy_to_user(to,from,n))
+#define __copy_from_user(to,from,n) (copy_from_user(to,from,n))
+#define __copy_to_user_inatomic(to,from,n) (__copy_to_user(to,from,n))
+#define __copy_from_user_inatomic(to,from,n) (__copy_from_user(to,from,n))
+
+/*
+ * __clear_user: - Zero a block of memory in user space, with less checking.
+ * @to: Destination address, in user space.
+ * @n: Number of bytes to zero.
+ *
+ * Zero a block of memory in user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be cleared.
+ * On success, this will be zero.
+ */
+static inline __kernel_size_t
+__clear_user(void __user *addr, __kernel_size_t size)
+{
+ memset((void *)addr, 0, size);
+ return 0;
+}
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+static inline unsigned long clear_user(void *addr, unsigned long size)
+{
+ if (access_ok(VERIFY_WRITE, addr, size))
+ size = __clear_user(addr, size);
+ return size;
+}
+
+/* Returns 0 if exception not found and fixup otherwise. */
+extern unsigned long search_exception_table(unsigned long);
+
+extern long strncpy_from_user(char *dst, const char *src, long count);
+extern long strnlen_user(const char *src, long count);
+
+#endif /* _ASM_UACCESS_H */
--- /dev/null
+#ifndef __MICROBLAZE_UCONTEXT_H__
+#define __MICROBLAZE_UCONTEXT_H__
+
+#include <asm/sigcontext.h>
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif /* __MICROBLAZE_UCONTEXT_H__ */
--- /dev/null
+/*
+ * include/asm-microblaze/unaligned.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_UNALIGNED_H
+#define _ASM_UNALIGNED_H
+
+#include <asm-generic/unaligned.h>
+
+#endif /* _ASM_UNALIGNED_H */
--- /dev/null
+/*
+ * include/asm-microblaze/unistd.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ */
+
+#ifndef _ASM_UNISTD_H
+#define _ASM_UNISTD_H
+
+#define __NR_restart_syscall 0
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lchown 16
+#define __NR_break 17
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_stty 31
+#define __NR_gtty 32
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_ftime 35
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_prof 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+#define __NR_lock 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_mpx 56
+#define __NR_setpgid 57
+#define __NR_ulimit 58
+#define __NR_oldolduname 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_profil 98
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_olduname 109
+#define __NR_iopl 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_vm86old 113
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_modify_ldt 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+#define __NR_vm86 166
+#define __NR_query_module 167
+#define __NR_poll 168
+#define __NR_nfsservctl 169
+#define __NR_setresgid 170
+#define __NR_getresgid 171
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread64 180
+#define __NR_pwrite64 181
+#define __NR_chown 182
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+#define __NR_getpmsg 188 /* some people actually want streams */
+#define __NR_putpmsg 189 /* some people actually want streams */
+#define __NR_vfork 190
+#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_pivot_root 217
+#define __NR_mincore 218
+#define __NR_madvise 219
+#define __NR_madvise1 219 /* delete when C lib stub is removed */
+#define __NR_getdents64 220
+#define __NR_fcntl64 221
+/* 223 is unused */
+#define __NR_gettid 224
+#define __NR_readahead 225
+#define __NR_setxattr 226
+#define __NR_lsetxattr 227
+#define __NR_fsetxattr 228
+#define __NR_getxattr 229
+#define __NR_lgetxattr 230
+#define __NR_fgetxattr 231
+#define __NR_listxattr 232
+#define __NR_llistxattr 233
+#define __NR_flistxattr 234
+#define __NR_removexattr 235
+#define __NR_lremovexattr 236
+#define __NR_fremovexattr 237
+#define __NR_tkill 238
+#define __NR_sendfile64 239
+#define __NR_futex 240
+#define __NR_sched_setaffinity 241
+#define __NR_sched_getaffinity 242
+#define __NR_set_thread_area 243
+#define __NR_get_thread_area 244
+#define __NR_io_setup 245
+#define __NR_io_destroy 246
+#define __NR_io_getevents 247
+#define __NR_io_submit 248
+#define __NR_io_cancel 249
+#define __NR_fadvise64 250
+/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
+#define __NR_exit_group 252
+#define __NR_lookup_dcookie 253
+#define __NR_epoll_create 254
+#define __NR_epoll_ctl 255
+#define __NR_epoll_wait 256
+#define __NR_remap_file_pages 257
+#define __NR_set_tid_address 258
+#define __NR_timer_create 259
+#define __NR_timer_settime (__NR_timer_create+1)
+#define __NR_timer_gettime (__NR_timer_create+2)
+#define __NR_timer_getoverrun (__NR_timer_create+3)
+#define __NR_timer_delete (__NR_timer_create+4)
+#define __NR_clock_settime (__NR_timer_create+5)
+#define __NR_clock_gettime (__NR_timer_create+6)
+#define __NR_clock_getres (__NR_timer_create+7)
+#define __NR_clock_nanosleep (__NR_timer_create+8)
+#define __NR_statfs64 268
+#define __NR_fstatfs64 269
+#define __NR_tgkill 270
+#define __NR_utimes 271
+#define __NR_fadvise64_64 272
+#define __NR_vserver 273
+#define __NR_mbind 274
+#define __NR_get_mempolicy 275
+#define __NR_set_mempolicy 276
+#define __NR_mq_open 277
+#define __NR_mq_unlink (__NR_mq_open+1)
+#define __NR_mq_timedsend (__NR_mq_open+2)
+#define __NR_mq_timedreceive (__NR_mq_open+3)
+#define __NR_mq_notify (__NR_mq_open+4)
+#define __NR_mq_getsetattr (__NR_mq_open+5)
+#define __NR_kexec_load 283
+#define __NR_waitid 284
+/* #define __NR_sys_setaltroot 285 */
+#define __NR_add_key 286
+#define __NR_request_key 287
+#define __NR_keyctl 288
+#define __NR_ioprio_set 289
+#define __NR_ioprio_get 290
+#define __NR_inotify_init 291
+#define __NR_inotify_add_watch 292
+#define __NR_inotify_rm_watch 293
+#define __NR_migrate_pages 294
+#define __NR_openat 295
+#define __NR_mkdirat 296
+#define __NR_mknodat 297
+#define __NR_fchownat 298
+#define __NR_futimesat 299
+#define __NR_fstatat64 300
+#define __NR_unlinkat 301
+#define __NR_renameat 302
+#define __NR_linkat 303
+#define __NR_symlinkat 304
+#define __NR_readlinkat 305
+#define __NR_fchmodat 306
+#define __NR_faccessat 307
+#define __NR_pselect6 308
+#define __NR_ppoll 309
+#define __NR_unshare 310
+#define __NR_set_robust_list 311
+#define __NR_get_robust_list 312
+#define __NR_splice 313
+#define __NR_sync_file_range 314
+#define __NR_tee 315
+#define __NR_vmsplice 316
+
+#define NR_syscalls 317
+
+/*
+ * user-visible error numbers are in the range -1 - -128: see
+ * <asm-generic/errno.h>
+ *
+ * both i386 and microblaze use generic version * of errno.h. the
+ * generic version of errno.h has more than 128 of * numbers. not
+ * sure what the comment means.
+ *
+ * following code is taken from mb 2.4
+ */
+#define __syscall_return(type, res) \
+do { \
+ /* user-visible error numbers are in the range -1 - -124: \
+ see <asm-microblaze/errno.h> */ \
+ if ((unsigned long)(res) >= (unsigned long)(-125)) { \
+ errno = -(res); \
+ res = -1; \
+ } \
+ return (type) (res); \
+} while (0)
+
+#define _syscall0(type, name) \
+type name (void) \
+{ \
+ long __ret; \
+ asm volatile ("addik r12, r0, %1 \n\t" \
+ "brki r14, 0x8 \n\t" \
+ "addk %0, r3, r0 \n\t" \
+ : "=r" (__ret) \
+ : "i" (__NR_##name) \
+ : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r12", "r14", "cc"); \
+ __syscall_return (type, __ret); \
+}
+
+#define _syscall1(type, name, type1, arg1) \
+type name (type1 arg1) \
+{ \
+ long __ret; \
+ asm volatile ("addk r5, r0, %2 \n\t" \
+ "addik r12, r0, %1 \n\t" \
+ "brki r14, 0x8 \n\t" \
+ "addk %0, r3, r0 \n\t" \
+ : "=r" (__ret) \
+ : "i" (__NR_##name), \
+ "r" ((long)arg1) \
+ : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r12", "r14", "cc"); \
+ __syscall_return (type, __ret); \
+}
+
+#define _syscall2(type, name, type1, arg1, type2, arg2) \
+type name (type1 arg1, type2 arg2) \
+{ \
+ long __ret; \
+ asm volatile ("addk r5, r0, %2 \n\t" \
+ "addk r6, r0, %3 \n\t" \
+ "addik r12, r0, %1 \n\t" \
+ "brki r14, 0x8 \n\t" \
+ "addk %0, r3, r0 \n\t" \
+ : "=r" (__ret) \
+ : "i" (__NR_##name), \
+ "r" ((long)arg1), \
+ "r" ((long)arg2) \
+ : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r12", "r14", "cc"); \
+ __syscall_return (type, __ret); \
+}
+
+#define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
+type name (type1 arg1, type2 arg2, type3 arg3) \
+{ \
+ long __ret; \
+ asm volatile ("addk r5, r0, %2 \n\t" \
+ "addk r6, r0, %3 \n\t" \
+ "addk r7, r0, %4 \n\t" \
+ "addik r12, r0, %1 \n\t" \
+ "brki r14, 0x8 \n\t" \
+ "addk %0, r3, r0 \n\t" \
+ : "=r" (__ret) \
+ : "i" (__NR_##name), \
+ "r" ((long)arg1), \
+ "r" ((long)arg2), \
+ "r" ((long)arg3) \
+ : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r12", "r14", "cc"); \
+ __syscall_return (type, __ret); \
+}
+
+#define _syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, type4, arg4) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+ long __ret; \
+ asm volatile ("addk r5, r0, %2 \n\t" \
+ "addk r6, r0, %3 \n\t" \
+ "addk r7, r0, %4 \n\t" \
+ "addk r8, r0, %5 \n\t" \
+ "addik r12, r0, %1 \n\t" \
+ "brki r14, 0x8 \n\t" \
+ "addk %0, r3, r0 \n\t" \
+ : "=r" (__ret) \
+ : "i" (__NR_##name), \
+ "r" ((long)arg1), \
+ "r" ((long)arg2), \
+ "r" ((long)arg3), \
+ "r" ((long)arg4) \
+ : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r12", "r14", "cc"); \
+ __syscall_return (type, __ret); \
+}
+
+#define _syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, type4, arg4, type5, arg5) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+ long __ret; \
+ asm volatile ("addk r5, r0, %2 \n\t" \
+ "addk r6, r0, %3 \n\t" \
+ "addk r7, r0, %4 \n\t" \
+ "addk r8, r0, %5 \n\t" \
+ "addk r9, r0, %6 \n\t" \
+ "addik r12, r0, %1 \n\t" \
+ "brki r14, 0x8 \n\t" \
+ "addk %0, r3, r0 \n\t" \
+ : "=r" (__ret) \
+ : "i" (__NR_##name), \
+ "r" ((long)arg1), \
+ "r" ((long)arg2), \
+ "r" ((long)arg3), \
+ "r" ((long)arg4), \
+ "r" ((long)arg5) \
+ : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r12", "r14", "cc"); \
+ __syscall_return (type, __ret); \
+}
+
+#define _syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, type4, arg4, type5, arg5, type6, arg6) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
+{ \
+ long __ret; \
+ asm volatile ("addk r5, r0, %2 \n\t" \
+ "addk r6, r0, %3 \n\t" \
+ "addk r7, r0, %4 \n\t" \
+ "addk r8, r0, %5 \n\t" \
+ "addk r9, r0, %6 \n\t" \
+ "addk r10, r0, %7 \n\t" \
+ "addik r12, r0, %1 \n\t" \
+ "brki r14, 0x8 \n\t" \
+ "addk %0, r3, r0 \n\t" \
+ : "=r" (__ret) \
+ : "i" (__NR_##name), \
+ "r" ((long)arg1), \
+ "r" ((long)arg2), \
+ "r" ((long)arg3), \
+ "r" ((long)arg4), \
+ "r" ((long)arg5), \
+ "r" ((long)arg6) \
+ : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r12", "r14", "cc"); \
+ __syscall_return (type, __ret); \
+}
+
+#ifdef __KERNEL_SYSCALLS__
+
+extern long execve(const char *filename, char **argv, char **envp);
+
+#endif /* __KERNEL_SYSCALLS__ */
+
+#ifdef __KERNEL__
+#define __ARCH_WANT_IPC_PARSE_VERSION
+//#define __ARCH_WANT_OLD_READDIR
+//#define __ARCH_WANT_OLD_STAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SGETMASK
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+//#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGACTION
+//#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+#endif
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
+
+#endif /* _ASM_UNISTD_H */
--- /dev/null
+/*
+ * include/asm-microblaze/user.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * Authors:
+ * Yasushi SHOJI <yashi@atmark-techno.com>
+ * Tetsuya OHKAWA <tetsuya@atmark-techno.com>
+ */
+
+#ifndef _ASM_USER_H
+#define _ASM_USER_H
+
+#endif /* _ASM_USER_H */
--- /dev/null
+/* $Id: xbasic_types.h,v 1.1.2.1 2007/03/13 15:45:51 akondratenko Exp $ */
+/******************************************************************************
+*
+* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+* FOR A PARTICULAR PURPOSE.
+*
+* (c) Copyright 2002-2004 Xilinx Inc.
+* All rights reserved.
+*
+******************************************************************************/
+/*****************************************************************************/
+/**
+*
+* @file xbasic_types.h
+*
+* This file contains basic types for Xilinx software IP. These types do not
+* follow the standard naming convention with respect to using the component
+* name in front of each name because they are considered to be primitives.
+*
+* @note
+*
+* This file contains items which are architecture dependent.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver Who Date Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a rmm 12/14/01 First release
+* rmm 05/09/03 Added "xassert always" macros to rid ourselves of diab
+* compiler warnings
+* 1.00a rpm 11/07/03 Added XNullHandler function as a stub interrupt handler
+* 1.00a rpm 07/21/04 Added XExceptionHandler typedef for processor exceptions
+* 1.00a xd 11/03/04 Improved support for doxygen.
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XBASIC_TYPES_H /* prevent circular inclusions */
+#define XBASIC_TYPES_H /* by using protection macros */
+
+#include <linux/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/***************************** Include Files *********************************/
+
+
+/************************** Constant Definitions *****************************/
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#define XCOMPONENT_IS_READY 0x11111111 /**< component has been initialized */
+#define XCOMPONENT_IS_STARTED 0x22222222 /**< component has been started */
+
+/* the following constants and declarations are for unit test purposes and are
+ * designed to be used in test applications.
+ */
+#define XTEST_PASSED 0
+#define XTEST_FAILED 1
+
+#define XASSERT_NONE 0
+#define XASSERT_OCCURRED 1
+
+extern unsigned int XAssertStatus;
+extern void XAssert(char *, int);
+
+/**************************** Type Definitions *******************************/
+/**
+ * This data type defines an interrupt handler for a device.
+ * The argument points to the instance of the component
+ */
+typedef void (*XInterruptHandler)(void *InstancePtr);
+
+/**
+ * This data type defines an exception handler for a processor.
+ * The argument points to the instance of the component
+ */
+typedef void (*XExceptionHandler)(void *InstancePtr);
+
+/**
+ * This data type defines a callback to be invoked when an
+ * assert occurs. The callback is invoked only when asserts are enabled
+ */
+typedef void (*XAssertCallback)(char* FilenamePtr, int LineNumber);
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+#ifndef NDEBUG
+
+/*****************************************************************************/
+/**
+* This assert macro is to be used for functions that do not return anything
+* (void). This in conjunction with the XWaitInAssert boolean can be used to
+* accomodate tests so that asserts which fail allow execution to continue.
+*
+* @param expression is the expression to evaluate. If it evaluates to
+* false, the assert occurs.
+*
+* @return Returns void unless the XWaitInAssert variable is true, in which
+* case no return is made and an infinite loop is entered.
+*
+* @note None.
+*
+******************************************************************************/
+#define XASSERT_VOID(expression) \
+{ \
+ if (expression) \
+ { \
+ XAssertStatus = XASSERT_NONE; \
+ } \
+ else \
+ { \
+ XAssert(__FILE__, __LINE__); \
+ XAssertStatus = XASSERT_OCCURRED; \
+ return; \
+ } \
+}
+
+/*****************************************************************************/
+/**
+* This assert macro is to be used for functions that do return a value. This in
+* conjunction with the XWaitInAssert boolean can be used to accomodate tests so
+* that asserts which fail allow execution to continue.
+*
+* @param expression is the expression to evaluate. If it evaluates to false,
+* the assert occurs.
+*
+* @return Returns 0 unless the XWaitInAssert variable is true, in which case
+* no return is made and an infinite loop is entered.
+*
+* @note None.
+*
+******************************************************************************/
+#define XASSERT_NONVOID(expression) \
+{ \
+ if (expression) \
+ { \
+ XAssertStatus = XASSERT_NONE; \
+ } \
+ else \
+ { \
+ XAssert(__FILE__, __LINE__); \
+ XAssertStatus = XASSERT_OCCURRED; \
+ return 0; \
+ } \
+}
+
+/*****************************************************************************/
+/**
+* Always assert. This assert macro is to be used for functions that do not
+* return anything (void). Use for instances where an assert should always
+* occur.
+*
+* @return Returns void unless the XWaitInAssert variable is true, in which case
+* no return is made and an infinite loop is entered.
+*
+* @note None.
+*
+******************************************************************************/
+#define XASSERT_VOID_ALWAYS() \
+{ \
+ XAssert(__FILE__, __LINE__); \
+ XAssertStatus = XASSERT_OCCURRED; \
+ return; \
+}
+
+/*****************************************************************************/
+/**
+* Always assert. This assert macro is to be used for functions that do return
+* a value. Use for instances where an assert should always occur.
+*
+* @return Returns void unless the XWaitInAssert variable is true, in which case
+* no return is made and an infinite loop is entered.
+*
+* @note None.
+*
+******************************************************************************/
+#define XASSERT_NONVOID_ALWAYS() \
+{ \
+ XAssert(__FILE__, __LINE__); \
+ XAssertStatus = XASSERT_OCCURRED; \
+ return 0; \
+}
+
+
+#else
+
+#define XASSERT_VOID(expression)
+#define XASSERT_VOID_ALWAYS()
+#define XASSERT_NONVOID(expression)
+#define XASSERT_NONVOID_ALWAYS()
+#endif
+
+/************************** Function Prototypes ******************************/
+
+void XAssertSetCallback(XAssertCallback Routine);
+void XNullHandler(void *NullParameter);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
+
--- /dev/null
+
+/*******************************************************************
+*
+* CAUTION: This file is automatically generated by libgen.
+* Version: Xilinx EDK 8.2.02 EDK_Im_Sp2.4
+* DO NOT EDIT.
+*
+* Copyright (c) 2005 Xilinx, Inc. All rights reserved.
+*
+* Description: Driver parameters
+*
+*******************************************************************/
+
+#define STDIN_BASEADDRESS 0x40600000
+#define STDOUT_BASEADDRESS 0x40600000
+
+/******************************************************************/
+
+
+/* Definitions for peripheral DLMB_CNTLR */
+#define XPAR_DLMB_CNTLR_BASEADDR 0x00000000
+#define XPAR_DLMB_CNTLR_HIGHADDR 0x00001FFF
+
+
+/* Definitions for peripheral ILMB_CNTLR */
+#define XPAR_ILMB_CNTLR_BASEADDR 0x00000000
+#define XPAR_ILMB_CNTLR_HIGHADDR 0x00001FFF
+
+
+/******************************************************************/
+
+/* Definitions for driver OPBARB */
+#define XPAR_XOPBARB_NUM_INSTANCES 1
+
+/* Definitions for peripheral MB_OPB */
+#define XPAR_MB_OPB_BASEADDR 0xFFFFFFFF
+#define XPAR_MB_OPB_HIGHADDR 0x00000000
+#define XPAR_MB_OPB_DEVICE_ID 0
+#define XPAR_MB_OPB_NUM_MASTERS 2
+
+
+/******************************************************************/
+
+/* Definitions for driver UARTLITE */
+#define XPAR_XUARTLITE_NUM_INSTANCES 2
+
+/* Definitions for peripheral DEBUG_MODULE */
+#define XPAR_DEBUG_MODULE_BASEADDR 0x41400000
+#define XPAR_DEBUG_MODULE_HIGHADDR 0x4140FFFF
+#define XPAR_DEBUG_MODULE_DEVICE_ID 0
+#define XPAR_DEBUG_MODULE_BAUDRATE 0
+#define XPAR_DEBUG_MODULE_USE_PARITY 0
+#define XPAR_DEBUG_MODULE_ODD_PARITY 0
+#define XPAR_DEBUG_MODULE_DATA_BITS 0
+
+
+/* Definitions for peripheral RS232_UART */
+#define XPAR_RS232_UART_BASEADDR 0x40600000
+#define XPAR_RS232_UART_HIGHADDR 0x4060FFFF
+#define XPAR_RS232_UART_DEVICE_ID 1
+#define XPAR_RS232_UART_BAUDRATE 9600
+#define XPAR_RS232_UART_USE_PARITY 0
+#define XPAR_RS232_UART_ODD_PARITY 0
+#define XPAR_RS232_UART_DATA_BITS 8
+
+
+/******************************************************************/
+
+
+/* Definitions for peripheral DDR_SDRAM_64MX32 */
+#define XPAR_DDR_SDRAM_64MX32_MEM0_BASEADDR 0x24000000
+#define XPAR_DDR_SDRAM_64MX32_MEM0_HIGHADDR 0x27FFFFFF
+
+
+/******************************************************************/
+
+/* Definitions for driver EMAC */
+#define XPAR_XEMAC_NUM_INSTANCES 1
+
+/* Definitions for peripheral ETHERNET_MAC */
+#define XPAR_ETHERNET_MAC_BASEADDR 0x40C00000
+#define XPAR_ETHERNET_MAC_HIGHADDR 0x40C0FFFF
+#define XPAR_ETHERNET_MAC_DEVICE_ID 0
+#define XPAR_ETHERNET_MAC_ERR_COUNT_EXIST 1
+#define XPAR_ETHERNET_MAC_DMA_PRESENT 1
+#define XPAR_ETHERNET_MAC_MII_EXIST 1
+#define XPAR_ETHERNET_MAC_CAM_EXIST 0
+#define XPAR_ETHERNET_MAC_JUMBO_EXIST 0
+#define XPAR_ETHERNET_MAC_TX_DRE_TYPE 0
+#define XPAR_ETHERNET_MAC_RX_DRE_TYPE 0
+#define XPAR_ETHERNET_MAC_TX_INCLUDE_CSUM 0
+#define XPAR_ETHERNET_MAC_RX_INCLUDE_CSUM 0
+
+
+/******************************************************************/
+
+/* Definitions for driver TMRCTR */
+#define XPAR_XTMRCTR_NUM_INSTANCES 1
+
+/* Definitions for peripheral OPB_TIMER_1 */
+#define XPAR_OPB_TIMER_1_BASEADDR 0x41C00000
+#define XPAR_OPB_TIMER_1_HIGHADDR 0x41C0FFFF
+#define XPAR_OPB_TIMER_1_DEVICE_ID 0
+
+
+/******************************************************************/
+
+#define XPAR_INTC_MAX_NUM_INTR_INPUTS 3
+#define XPAR_XINTC_HAS_IPR 1
+#define XPAR_XINTC_USE_DCR 0
+/* Definitions for driver INTC */
+#define XPAR_XINTC_NUM_INSTANCES 1
+
+/* Definitions for peripheral OPB_INTC_0 */
+#define XPAR_OPB_INTC_0_BASEADDR 0x41200000
+#define XPAR_OPB_INTC_0_HIGHADDR 0x4120FFFF
+#define XPAR_OPB_INTC_0_DEVICE_ID 0
+#define XPAR_OPB_INTC_0_KIND_OF_INTR 0x00000004
+
+
+/******************************************************************/
+
+#define XPAR_INTC_SINGLE_BASEADDR 0x41200000
+#define XPAR_INTC_SINGLE_HIGHADDR 0x4120FFFF
+#define XPAR_INTC_SINGLE_DEVICE_ID XPAR_OPB_INTC_0_DEVICE_ID
+#define XPAR_OPB_TIMER_1_INTERRUPT_MASK 0X000001
+#define XPAR_OPB_INTC_0_OPB_TIMER_1_INTERRUPT_INTR 0
+#define XPAR_ETHERNET_MAC_IP2INTC_IRPT_MASK 0X000002
+#define XPAR_OPB_INTC_0_ETHERNET_MAC_IP2INTC_IRPT_INTR 1
+#define XPAR_RS232_UART_INTERRUPT_MASK 0X000004
+#define XPAR_OPB_INTC_0_RS232_UART_INTERRUPT_INTR 2
+
+/******************************************************************/
+
+#define XPAR_CPU_CORE_CLOCK_FREQ_HZ 100000000
+
+/******************************************************************/
+
+
+/* Definitions for peripheral MICROBLAZE_0 */
+#define XPAR_MICROBLAZE_0_SCO 0
+#define XPAR_MICROBLAZE_0_DATA_SIZE 32
+#define XPAR_MICROBLAZE_0_DYNAMIC_BUS_SIZING 1
+#define XPAR_MICROBLAZE_0_D_OPB 1
+#define XPAR_MICROBLAZE_0_D_LMB 1
+#define XPAR_MICROBLAZE_0_I_OPB 1
+#define XPAR_MICROBLAZE_0_I_LMB 1
+#define XPAR_MICROBLAZE_0_USE_MSR_INSTR 1
+#define XPAR_MICROBLAZE_0_USE_PCMP_INSTR 1
+#define XPAR_MICROBLAZE_0_USE_BARREL 1
+#define XPAR_MICROBLAZE_0_USE_DIV 1
+#define XPAR_MICROBLAZE_0_USE_HW_MUL 1
+#define XPAR_MICROBLAZE_0_USE_FPU 0
+#define XPAR_MICROBLAZE_0_UNALIGNED_EXCEPTIONS 0
+#define XPAR_MICROBLAZE_0_ILL_OPCODE_EXCEPTION 0
+#define XPAR_MICROBLAZE_0_IOPB_BUS_EXCEPTION 0
+#define XPAR_MICROBLAZE_0_DOPB_BUS_EXCEPTION 0
+#define XPAR_MICROBLAZE_0_DIV_ZERO_EXCEPTION 0
+#define XPAR_MICROBLAZE_0_FPU_EXCEPTION 0
+#define XPAR_MICROBLAZE_0_PVR 0
+#define XPAR_MICROBLAZE_0_PVR_USER1 0x00
+#define XPAR_MICROBLAZE_0_PVR_USER2 0x00000000
+#define XPAR_MICROBLAZE_0_DEBUG_ENABLED 1
+#define XPAR_MICROBLAZE_0_NUMBER_OF_PC_BRK 2
+#define XPAR_MICROBLAZE_0_NUMBER_OF_RD_ADDR_BRK 0
+#define XPAR_MICROBLAZE_0_NUMBER_OF_WR_ADDR_BRK 0
+#define XPAR_MICROBLAZE_0_INTERRUPT_IS_EDGE 0
+#define XPAR_MICROBLAZE_0_EDGE_IS_POSITIVE 1
+#define XPAR_MICROBLAZE_0_RESET_MSR 0x00000000
+#define XPAR_MICROBLAZE_0_OPCODE_0X0_ILLEGAL 0
+#define XPAR_MICROBLAZE_0_FSL_LINKS 0
+#define XPAR_MICROBLAZE_0_FSL_DATA_SIZE 32
+#define XPAR_MICROBLAZE_0_ICACHE_BASEADDR 0x00000000
+#define XPAR_MICROBLAZE_0_ICACHE_HIGHADDR 0x3FFFFFFF
+#define XPAR_MICROBLAZE_0_USE_ICACHE 0
+#define XPAR_MICROBLAZE_0_ALLOW_ICACHE_WR 1
+#define XPAR_MICROBLAZE_0_ADDR_TAG_BITS 0
+#define XPAR_MICROBLAZE_0_CACHE_BYTE_SIZE 8192
+#define XPAR_MICROBLAZE_0_ICACHE_USE_FSL 1
+#define XPAR_MICROBLAZE_0_ICACHE_LINE_LEN 4
+#define XPAR_MICROBLAZE_0_DCACHE_BASEADDR 0x00000000
+#define XPAR_MICROBLAZE_0_DCACHE_HIGHADDR 0x3FFFFFFF
+#define XPAR_MICROBLAZE_0_USE_DCACHE 0
+#define XPAR_MICROBLAZE_0_ALLOW_DCACHE_WR 1
+#define XPAR_MICROBLAZE_0_DCACHE_ADDR_TAG 0
+#define XPAR_MICROBLAZE_0_DCACHE_BYTE_SIZE 8192
+#define XPAR_MICROBLAZE_0_DCACHE_USE_FSL 1
+#define XPAR_MICROBLAZE_0_DCACHE_LINE_LEN 4
+
+/******************************************************************/
+
--- /dev/null
+/********************************************************************
+ *
+ * CAUTION: This file is automatically generated by libgen.
+ * Version: Xilinx EDK 8.2.02 EDK_Im_Sp2.4
+ *
+ * Copyright (c) 2007 Xilinx, Inc. All rights reserved.
+ *
+ * Description: sp3e Driver Parameters
+ *
+ * NOTE:
+ * Mappings at the end of the file have been added manually.
+ *
+ ********************************************************************/
+
+/* System Clock Frequency */
+#define XPAR_CPU_CLOCK_FREQ 66666667
+
+/* Processor ability to generate exceptions when in delay slot */
+#define XPAR_MICROBLAZE_0_EX_HANDLE_DELAY_SLOT 0
+
+/* Definitions for MICROBLAZE_0 */
+#define XPAR_MICROBLAZE_0_INSTANCE "microblaze_0"
+#define XPAR_MICROBLAZE_0_FAMILY "spartan3e"
+#define XPAR_MICROBLAZE_0_D_OPB 1
+#define XPAR_MICROBLAZE_0_D_LMB 1
+#define XPAR_MICROBLAZE_0_I_OPB 1
+#define XPAR_MICROBLAZE_0_I_LMB 1
+#define XPAR_MICROBLAZE_0_USE_BARREL 1
+#define XPAR_MICROBLAZE_0_USE_DIV 1
+#define XPAR_MICROBLAZE_0_USE_HW_MUL 1
+#define XPAR_MICROBLAZE_0_USE_FPU 0
+#define XPAR_MICROBLAZE_0_USE_MSR_INSTR 1
+#define XPAR_MICROBLAZE_0_USE_PCMP_INSTR 1
+#define XPAR_MICROBLAZE_0_UNALIGNED_EXCEPTIONS 1
+#define XPAR_MICROBLAZE_0_ILL_OPCODE_EXCEPTION 1
+#define XPAR_MICROBLAZE_0_IOPB_BUS_EXCEPTION 1
+#define XPAR_MICROBLAZE_0_DOPB_BUS_EXCEPTION 1
+#define XPAR_MICROBLAZE_0_DIV_ZERO_EXCEPTION 1
+#define XPAR_MICROBLAZE_0_FPU_EXCEPTION 1
+#define XPAR_MICROBLAZE_0_DEBUG_ENABLED 1
+#define XPAR_MICROBLAZE_0_NUMBER_OF_PBRK 2
+#define XPAR_MICROBLAZE_0_NUMBER_OF_RD_ADDR_BRK 0
+#define XPAR_MICROBLAZE_0_NUMBER_OF_WR_ADDR_BRK 0
+#define XPAR_MICROBLAZE_0_INTERRUPT_IS_EDGE 0
+#define XPAR_MICROBLAZE_0_EDGE_IS_POSITIVE 1
+#define XPAR_MICROBLAZE_0_FSL_LINKS 1
+#define XPAR_MICROBLAZE_0_FSL_DATA_SIZE 32
+#define XPAR_MICROBLAZE_0_ICACHE_BASEADDR 0x24000000
+#define XPAR_MICROBLAZE_0_ICACHE_HIGHADDR 0x25FFFFFF
+#define XPAR_MICROBLAZE_0_USE_ICACHE 1
+#define XPAR_MICROBLAZE_0_ALLOW_ICACHE_WR 1
+#define XPAR_MICROBLAZE_0_ADDR_TAG_BITS 14
+#define XPAR_MICROBLAZE_0_CACHE_BYTE_SIZE 2048
+#define XPAR_MICROBLAZE_0_ICACHE_USE_FSL 1
+#define XPAR_MICROBLAZE_0_DCACHE_BASEADDR 0x24000000
+#define XPAR_MICROBLAZE_0_DCACHE_HIGHADDR 0x25FFFFFF
+#define XPAR_MICROBLAZE_0_USE_DCACHE 1
+#define XPAR_MICROBLAZE_0_ALLOW_DCACHE_WR 1
+#define XPAR_MICROBLAZE_0_DCACHE_ADDR_TAG 12
+#define XPAR_MICROBLAZE_0_DCACHE_BYTE_SIZE 8192
+#define XPAR_MICROBLAZE_0_DCACHE_USE_FSL 1
+#define XPAR_MICROBLAZE_0_HW_VER "4.00.b"
+
+/* Definitions for LMB_BRAM_IF_CNTLR_0 */
+#define XPAR_LMB_BRAM_IF_CNTLR_0_INSTANCE "dlmb_cntlr"
+#define XPAR_LMB_BRAM_IF_CNTLR_0_BASEADDR 0x00000000
+#define XPAR_LMB_BRAM_IF_CNTLR_0_HIGHADDR 0x00001FFF
+#define XPAR_LMB_BRAM_IF_CNTLR_0_MASK 0x45000000
+#define XPAR_LMB_BRAM_IF_CNTLR_0_LMB_AWIDTH 32
+#define XPAR_LMB_BRAM_IF_CNTLR_0_LMB_DWIDTH 32
+#define XPAR_LMB_BRAM_IF_CNTLR_0_HW_VER "1.00.b"
+
+/* Definitions for LMB_BRAM_IF_CNTLR_1 */
+#define XPAR_LMB_BRAM_IF_CNTLR_1_INSTANCE "ilmb_cntlr"
+#define XPAR_LMB_BRAM_IF_CNTLR_1_BASEADDR 0x00000000
+#define XPAR_LMB_BRAM_IF_CNTLR_1_HIGHADDR 0x00001FFF
+#define XPAR_LMB_BRAM_IF_CNTLR_1_MASK 0x45000000
+#define XPAR_LMB_BRAM_IF_CNTLR_1_LMB_AWIDTH 32
+#define XPAR_LMB_BRAM_IF_CNTLR_1_LMB_DWIDTH 32
+#define XPAR_LMB_BRAM_IF_CNTLR_1_HW_VER "1.00.b"
+
+/* Definitions for OPB_V20_0 */
+#define XPAR_OPB_V20_0_INSTANCE "mb_opb"
+#define XPAR_OPB_V20_0_BASEADDR 0xFFFFFFFF
+#define XPAR_OPB_V20_0_HIGHADDR 0x00000000
+#define XPAR_OPB_V20_0_OPB_AWIDTH 32
+#define XPAR_OPB_V20_0_OPB_DWIDTH 32
+#define XPAR_OPB_V20_0_NUM_MASTERS 2
+#define XPAR_OPB_V20_0_NUM_SLAVES 15
+#define XPAR_OPB_V20_0_USE_LUT_OR 1
+#define XPAR_OPB_V20_0_EXT_RESET_HIGH 1
+#define XPAR_OPB_V20_0_DYNAM_PRIORITY 0
+#define XPAR_OPB_V20_0_PARK 0
+#define XPAR_OPB_V20_0_PROINTRFCE 0
+#define XPAR_OPB_V20_0_REG_GRANTS 1
+#define XPAR_OPB_V20_0_DEV_BLK_ID 0
+#define XPAR_OPB_V20_0_DEV_MIR_ENABLE 0
+#define XPAR_OPB_V20_0_HW_VER "1.10.c"
+
+/* Definitions for OPB_MDM_0 */
+#define XPAR_OPB_MDM_0_INSTANCE "debug_module"
+#define XPAR_OPB_MDM_0_BASEADDR 0x41400000
+#define XPAR_OPB_MDM_0_HIGHADDR 0x4140FFFF
+#define XPAR_OPB_MDM_0_OPB_DWIDTH 32
+#define XPAR_OPB_MDM_0_OPB_AWIDTH 32
+#define XPAR_OPB_MDM_0_FAMILY "spartan3e"
+#define XPAR_OPB_MDM_0_MB_DBG_PORTS 1
+#define XPAR_OPB_MDM_0_USE_UART 1
+#define XPAR_OPB_MDM_0_UART_WIDTH 8
+#define XPAR_OPB_MDM_0_WRITE_FSL_PORTS 0
+#define XPAR_OPB_MDM_0_HW_VER "2.00.a"
+
+/* Definitions for OPB_UARTLITE_0 */
+#define XPAR_OPB_UARTLITE_0_INSTANCE "RS232_DTE"
+#define XPAR_OPB_UARTLITE_0_BASEADDR 0x40600000
+#define XPAR_OPB_UARTLITE_0_HIGHADDR 0x4060FFFF
+#define XPAR_OPB_UARTLITE_0_OPB_DWIDTH 32
+#define XPAR_OPB_UARTLITE_0_OPB_AWIDTH 32
+#define XPAR_OPB_UARTLITE_0_DATA_BITS 8
+#define XPAR_OPB_UARTLITE_0_CLK_FREQ 66666667
+#define XPAR_OPB_UARTLITE_0_BAUDRATE 9600
+#define XPAR_OPB_UARTLITE_0_USE_PARITY 0
+#define XPAR_OPB_UARTLITE_0_ODD_PARITY 0
+#define XPAR_OPB_UARTLITE_0_HW_VER "1.00.b"
+
+/* Definitions for OPB_GPIO_0 */
+#define XPAR_OPB_GPIO_0_INSTANCE "LEDs_6Bit"
+#define XPAR_OPB_GPIO_0_BASEADDR 0x40000000
+#define XPAR_OPB_GPIO_0_HIGHADDR 0x4000FFFF
+#define XPAR_OPB_GPIO_0_USER_ID_CODE 3
+#define XPAR_OPB_GPIO_0_OPB_AWIDTH 32
+#define XPAR_OPB_GPIO_0_OPB_DWIDTH 32
+#define XPAR_OPB_GPIO_0_FAMILY "spartan3e"
+#define XPAR_OPB_GPIO_0_GPIO_WIDTH 6
+#define XPAR_OPB_GPIO_0_ALL_INPUTS 0
+#define XPAR_OPB_GPIO_0_INTERRUPT_PRESENT 0
+#define XPAR_OPB_GPIO_0_IS_BIDIR 0
+#define XPAR_OPB_GPIO_0_DOUT_DEFAULT 0x00000000
+#define XPAR_OPB_GPIO_0_TRI_DEFAULT 0xFFFFFFFF
+#define XPAR_OPB_GPIO_0_IS_DUAL 0
+#define XPAR_OPB_GPIO_0_ALL_INPUTS_2 0
+#define XPAR_OPB_GPIO_0_IS_BIDIR_2 1
+#define XPAR_OPB_GPIO_0_DOUT_DEFAULT_2 0x00000000
+#define XPAR_OPB_GPIO_0_TRI_DEFAULT_2 0xFFFFFFFF
+#define XPAR_OPB_GPIO_0_HW_VER "3.01.b"
+
+/* Definitions for OPB_GPIO_1 */
+#define XPAR_OPB_GPIO_1_INSTANCE "LEDs_1Bit"
+#define XPAR_OPB_GPIO_1_BASEADDR 0x40020000
+#define XPAR_OPB_GPIO_1_HIGHADDR 0x4002FFFF
+#define XPAR_OPB_GPIO_1_USER_ID_CODE 3
+#define XPAR_OPB_GPIO_1_OPB_AWIDTH 32
+#define XPAR_OPB_GPIO_1_OPB_DWIDTH 32
+#define XPAR_OPB_GPIO_1_FAMILY "spartan3e"
+#define XPAR_OPB_GPIO_1_GPIO_WIDTH 1
+#define XPAR_OPB_GPIO_1_ALL_INPUTS 0
+#define XPAR_OPB_GPIO_1_INTERRUPT_PRESENT 0
+#define XPAR_OPB_GPIO_1_IS_BIDIR 0
+#define XPAR_OPB_GPIO_1_DOUT_DEFAULT 0x00000000
+#define XPAR_OPB_GPIO_1_TRI_DEFAULT 0xFFFFFFFF
+#define XPAR_OPB_GPIO_1_IS_DUAL 0
+#define XPAR_OPB_GPIO_1_ALL_INPUTS_2 0
+#define XPAR_OPB_GPIO_1_IS_BIDIR_2 1
+#define XPAR_OPB_GPIO_1_DOUT_DEFAULT_2 0x00000000
+#define XPAR_OPB_GPIO_1_TRI_DEFAULT_2 0xFFFFFFFF
+#define XPAR_OPB_GPIO_1_HW_VER "3.01.b"
+
+/* Definitions for OPB_GPIO_2 */
+#define XPAR_OPB_GPIO_2_INSTANCE "DIP_Switches_4Bit"
+#define XPAR_OPB_GPIO_2_BASEADDR 0x40040000
+#define XPAR_OPB_GPIO_2_HIGHADDR 0x4004FFFF
+#define XPAR_OPB_GPIO_2_USER_ID_CODE 3
+#define XPAR_OPB_GPIO_2_OPB_AWIDTH 32
+#define XPAR_OPB_GPIO_2_OPB_DWIDTH 32
+#define XPAR_OPB_GPIO_2_FAMILY "spartan3e"
+#define XPAR_OPB_GPIO_2_GPIO_WIDTH 4
+#define XPAR_OPB_GPIO_2_ALL_INPUTS 1
+#define XPAR_OPB_GPIO_2_INTERRUPT_PRESENT 0
+#define XPAR_OPB_GPIO_2_IS_BIDIR 0
+#define XPAR_OPB_GPIO_2_DOUT_DEFAULT 0x00000000
+#define XPAR_OPB_GPIO_2_TRI_DEFAULT 0xFFFFFFFF
+#define XPAR_OPB_GPIO_2_IS_DUAL 0
+#define XPAR_OPB_GPIO_2_ALL_INPUTS_2 0
+#define XPAR_OPB_GPIO_2_IS_BIDIR_2 1
+#define XPAR_OPB_GPIO_2_DOUT_DEFAULT_2 0x00000000
+#define XPAR_OPB_GPIO_2_TRI_DEFAULT_2 0xFFFFFFFF
+#define XPAR_OPB_GPIO_2_HW_VER "3.01.b"
+
+/* Definitions for OPB_GPIO_3 */
+#define XPAR_OPB_GPIO_3_INSTANCE "Buttons_3Bit"
+#define XPAR_OPB_GPIO_3_BASEADDR 0x40060000
+#define XPAR_OPB_GPIO_3_HIGHADDR 0x4006FFFF
+#define XPAR_OPB_GPIO_3_USER_ID_CODE 3
+#define XPAR_OPB_GPIO_3_OPB_AWIDTH 32
+#define XPAR_OPB_GPIO_3_OPB_DWIDTH 32
+#define XPAR_OPB_GPIO_3_FAMILY "spartan3e"
+#define XPAR_OPB_GPIO_3_GPIO_WIDTH 3
+#define XPAR_OPB_GPIO_3_ALL_INPUTS 1
+#define XPAR_OPB_GPIO_3_INTERRUPT_PRESENT 0
+#define XPAR_OPB_GPIO_3_IS_BIDIR 0
+#define XPAR_OPB_GPIO_3_DOUT_DEFAULT 0x00000000
+#define XPAR_OPB_GPIO_3_TRI_DEFAULT 0xFFFFFFFF
+#define XPAR_OPB_GPIO_3_IS_DUAL 0
+#define XPAR_OPB_GPIO_3_ALL_INPUTS_2 0
+#define XPAR_OPB_GPIO_3_IS_BIDIR_2 1
+#define XPAR_OPB_GPIO_3_DOUT_DEFAULT_2 0x00000000
+#define XPAR_OPB_GPIO_3_TRI_DEFAULT_2 0xFFFFFFFF
+#define XPAR_OPB_GPIO_3_HW_VER "3.01.b"
+
+/* Definitions for OPB_GPIO_4 */
+#define XPAR_OPB_GPIO_4_INSTANCE "Character_LCD_2x16"
+#define XPAR_OPB_GPIO_4_BASEADDR 0x40080000
+#define XPAR_OPB_GPIO_4_HIGHADDR 0x4008FFFF
+#define XPAR_OPB_GPIO_4_USER_ID_CODE 3
+#define XPAR_OPB_GPIO_4_OPB_AWIDTH 32
+#define XPAR_OPB_GPIO_4_OPB_DWIDTH 32
+#define XPAR_OPB_GPIO_4_FAMILY "spartan3e"
+#define XPAR_OPB_GPIO_4_GPIO_WIDTH 7
+#define XPAR_OPB_GPIO_4_ALL_INPUTS 0
+#define XPAR_OPB_GPIO_4_INTERRUPT_PRESENT 0
+#define XPAR_OPB_GPIO_4_IS_BIDIR 1
+#define XPAR_OPB_GPIO_4_DOUT_DEFAULT 0x00000000
+#define XPAR_OPB_GPIO_4_TRI_DEFAULT 0xFFFFFFFF
+#define XPAR_OPB_GPIO_4_IS_DUAL 0
+#define XPAR_OPB_GPIO_4_ALL_INPUTS_2 0
+#define XPAR_OPB_GPIO_4_IS_BIDIR_2 1
+#define XPAR_OPB_GPIO_4_DOUT_DEFAULT_2 0x00000000
+#define XPAR_OPB_GPIO_4_TRI_DEFAULT_2 0xFFFFFFFF
+#define XPAR_OPB_GPIO_4_HW_VER "3.01.b"
+
+/* Definitions for OPB_GPIO_5 */
+#define XPAR_OPB_GPIO_5_INSTANCE "J4_IO_4Bit"
+#define XPAR_OPB_GPIO_5_BASEADDR 0x400A0000
+#define XPAR_OPB_GPIO_5_HIGHADDR 0x400AFFFF
+#define XPAR_OPB_GPIO_5_USER_ID_CODE 3
+#define XPAR_OPB_GPIO_5_OPB_AWIDTH 32
+#define XPAR_OPB_GPIO_5_OPB_DWIDTH 32
+#define XPAR_OPB_GPIO_5_FAMILY "spartan3e"
+#define XPAR_OPB_GPIO_5_GPIO_WIDTH 4
+#define XPAR_OPB_GPIO_5_ALL_INPUTS 0
+#define XPAR_OPB_GPIO_5_INTERRUPT_PRESENT 0
+#define XPAR_OPB_GPIO_5_IS_BIDIR 0
+#define XPAR_OPB_GPIO_5_DOUT_DEFAULT 0x00000000
+#define XPAR_OPB_GPIO_5_TRI_DEFAULT 0xFFFFFFFF
+#define XPAR_OPB_GPIO_5_IS_DUAL 0
+#define XPAR_OPB_GPIO_5_ALL_INPUTS_2 0
+#define XPAR_OPB_GPIO_5_IS_BIDIR_2 1
+#define XPAR_OPB_GPIO_5_DOUT_DEFAULT_2 0x00000000
+#define XPAR_OPB_GPIO_5_TRI_DEFAULT_2 0xFFFFFFFF
+#define XPAR_OPB_GPIO_5_HW_VER "3.01.b"
+
+/* Definitions for OPB_GPIO_6 */
+#define XPAR_OPB_GPIO_6_INSTANCE "Rotary_Encoder"
+#define XPAR_OPB_GPIO_6_BASEADDR 0x400C0000
+#define XPAR_OPB_GPIO_6_HIGHADDR 0x400CFFFF
+#define XPAR_OPB_GPIO_6_USER_ID_CODE 3
+#define XPAR_OPB_GPIO_6_OPB_AWIDTH 32
+#define XPAR_OPB_GPIO_6_OPB_DWIDTH 32
+#define XPAR_OPB_GPIO_6_FAMILY "spartan3e"
+#define XPAR_OPB_GPIO_6_GPIO_WIDTH 3
+#define XPAR_OPB_GPIO_6_ALL_INPUTS 1
+#define XPAR_OPB_GPIO_6_INTERRUPT_PRESENT 1
+#define XPAR_OPB_GPIO_6_IS_BIDIR 0
+#define XPAR_OPB_GPIO_6_DOUT_DEFAULT 0x00000000
+#define XPAR_OPB_GPIO_6_TRI_DEFAULT 0xFFFFFFFF
+#define XPAR_OPB_GPIO_6_IS_DUAL 0
+#define XPAR_OPB_GPIO_6_ALL_INPUTS_2 0
+#define XPAR_OPB_GPIO_6_IS_BIDIR_2 1
+#define XPAR_OPB_GPIO_6_DOUT_DEFAULT_2 0x00000000
+#define XPAR_OPB_GPIO_6_TRI_DEFAULT_2 0xFFFFFFFF
+#define XPAR_OPB_GPIO_6_HW_VER "3.01.b"
+
+/* Definitions for OPB_EMC_0 */
+#define XPAR_OPB_EMC_0_INSTANCE "FLASH_16Mx8"
+#define XPAR_OPB_EMC_0_NUM_BANKS_MEM 1
+#define XPAR_OPB_EMC_0_INCLUDE_BURST 0
+#define XPAR_OPB_EMC_0_INCLUDE_NEGEDGE_IOREGS 0
+#define XPAR_OPB_EMC_0_FAMILY "spartan3e"
+#define XPAR_OPB_EMC_0_MEM0_BASEADDR 0x21000000
+#define XPAR_OPB_EMC_0_MEM0_HIGHADDR 0x21FFFFFF
+#define XPAR_OPB_EMC_0_MEM1_BASEADDR 0xFFFFFFFF
+#define XPAR_OPB_EMC_0_MEM1_HIGHADDR 0x00000000
+#define XPAR_OPB_EMC_0_MEM2_BASEADDR 0xFFFFFFFF
+#define XPAR_OPB_EMC_0_MEM2_HIGHADDR 0x00000000
+#define XPAR_OPB_EMC_0_MEM3_BASEADDR 0xFFFFFFFF
+#define XPAR_OPB_EMC_0_MEM3_HIGHADDR 0x00000000
+#define XPAR_OPB_EMC_0_MEM0_WIDTH 8
+#define XPAR_OPB_EMC_0_MEM1_WIDTH 32
+#define XPAR_OPB_EMC_0_MEM2_WIDTH 32
+#define XPAR_OPB_EMC_0_MEM3_WIDTH 32
+#define XPAR_OPB_EMC_0_MAX_MEM_WIDTH 8
+#define XPAR_OPB_EMC_0_INCLUDE_DATAWIDTH_MATCHING_0 1
+#define XPAR_OPB_EMC_0_INCLUDE_DATAWIDTH_MATCHING_1 1
+#define XPAR_OPB_EMC_0_INCLUDE_DATAWIDTH_MATCHING_2 1
+#define XPAR_OPB_EMC_0_INCLUDE_DATAWIDTH_MATCHING_3 1
+#define XPAR_OPB_EMC_0_SYNCH_MEM_0 0
+#define XPAR_OPB_EMC_0_SYNCH_PIPEDELAY_0 2
+#define XPAR_OPB_EMC_0_TCEDV_PS_MEM_0 110000
+#define XPAR_OPB_EMC_0_TAVDV_PS_MEM_0 110000
+#define XPAR_OPB_EMC_0_THZCE_PS_MEM_0 35000
+#define XPAR_OPB_EMC_0_THZOE_PS_MEM_0 7000
+#define XPAR_OPB_EMC_0_TWPS_MEM_0 110000
+#define XPAR_OPB_EMC_0_TWP_PS_MEM_0 70000
+#define XPAR_OPB_EMC_0_TLZWE_PS_MEM_0 15000
+#define XPAR_OPB_EMC_0_SYNCH_MEM_1 0
+#define XPAR_OPB_EMC_0_SYNCH_PIPEDELAY_1 2
+#define XPAR_OPB_EMC_0_TCEDV_PS_MEM_1 15000
+#define XPAR_OPB_EMC_0_TAVDV_PS_MEM_1 15000
+#define XPAR_OPB_EMC_0_THZCE_PS_MEM_1 7000
+#define XPAR_OPB_EMC_0_THZOE_PS_MEM_1 7000
+#define XPAR_OPB_EMC_0_TWPS_MEM_1 15000
+#define XPAR_OPB_EMC_0_TWP_PS_MEM_1 12000
+#define XPAR_OPB_EMC_0_TLZWE_PS_MEM_1 0
+#define XPAR_OPB_EMC_0_SYNCH_MEM_2 0
+#define XPAR_OPB_EMC_0_SYNCH_PIPEDELAY_2 2
+#define XPAR_OPB_EMC_0_TCEDV_PS_MEM_2 15000
+#define XPAR_OPB_EMC_0_TAVDV_PS_MEM_2 15000
+#define XPAR_OPB_EMC_0_THZCE_PS_MEM_2 7000
+#define XPAR_OPB_EMC_0_THZOE_PS_MEM_2 7000
+#define XPAR_OPB_EMC_0_TWPS_MEM_2 15000
+#define XPAR_OPB_EMC_0_TWP_PS_MEM_2 12000
+#define XPAR_OPB_EMC_0_TLZWE_PS_MEM_2 0
+#define XPAR_OPB_EMC_0_SYNCH_MEM_3 0
+#define XPAR_OPB_EMC_0_SYNCH_PIPEDELAY_3 2
+#define XPAR_OPB_EMC_0_TCEDV_PS_MEM_3 15000
+#define XPAR_OPB_EMC_0_TAVDV_PS_MEM_3 15000
+#define XPAR_OPB_EMC_0_THZCE_PS_MEM_3 7000
+#define XPAR_OPB_EMC_0_THZOE_PS_MEM_3 7000
+#define XPAR_OPB_EMC_0_TWPS_MEM_3 15000
+#define XPAR_OPB_EMC_0_TWP_PS_MEM_3 12000
+#define XPAR_OPB_EMC_0_TLZWE_PS_MEM_3 0
+#define XPAR_OPB_EMC_0_OPB_DWIDTH 32
+#define XPAR_OPB_EMC_0_OPB_AWIDTH 32
+#define XPAR_OPB_EMC_0_OPB_CLK_PERIOD_PS 14999
+#define XPAR_OPB_EMC_0_HW_VER "2.00.a"
+
+/* Definitions for MCH_OPB_DDR_0 */
+#define XPAR_MCH_OPB_DDR_0_INSTANCE "DDR_SDRAM_32Mx16"
+#define XPAR_MCH_OPB_DDR_0_FAMILY "spartan3e"
+#define XPAR_MCH_OPB_DDR_0_REG_DIMM 0
+#define XPAR_MCH_OPB_DDR_0_NUM_BANKS_MEM 1
+#define XPAR_MCH_OPB_DDR_0_NUM_CLK_PAIRS 1
+#define XPAR_MCH_OPB_DDR_0_DDR_ASYNSUPPORT 1
+#define XPAR_MCH_OPB_DDR_0_EXTRA_TSU 0
+#define XPAR_MCH_OPB_DDR_0_USE_OPEN_ROW_MNGT 0
+#define XPAR_MCH_OPB_DDR_0_INCLUDE_DDR_PIPE 1
+#define XPAR_MCH_OPB_DDR_0_NUM_CHANNELS 2
+#define XPAR_MCH_OPB_DDR_0_PRIORITY_MODE 0
+#define XPAR_MCH_OPB_DDR_0_INCLUDE_OPB_IPIF 1
+#define XPAR_MCH_OPB_DDR_0_INCLUDE_OPB_BURST_SUPPORT 0
+#define XPAR_MCH_OPB_DDR_0_INCLUDE_TIMEOUT_CNTR 0
+#define XPAR_MCH_OPB_DDR_0_TIMEOUT 16
+#define XPAR_MCH_OPB_DDR_0_MCH_OPB_DWIDTH 32
+#define XPAR_MCH_OPB_DDR_0_MCH_OPB_AWIDTH 32
+#define XPAR_MCH_OPB_DDR_0_MCH_OPB_CLK_PERIOD_PS 14999
+#define XPAR_MCH_OPB_DDR_0_DDR_TMRD 15000
+#define XPAR_MCH_OPB_DDR_0_DDR_TWR 15000
+#define XPAR_MCH_OPB_DDR_0_DDR_TWTR 1
+#define XPAR_MCH_OPB_DDR_0_DDR_TRAS 40000
+#define XPAR_MCH_OPB_DDR_0_DDR_TRC 65000
+#define XPAR_MCH_OPB_DDR_0_DDR_TRFC 75000
+#define XPAR_MCH_OPB_DDR_0_DDR_TRCD 20000
+#define XPAR_MCH_OPB_DDR_0_DDR_TRRD 15000
+#define XPAR_MCH_OPB_DDR_0_DDR_TREFI 7800000
+#define XPAR_MCH_OPB_DDR_0_DDR_TRP 20000
+#define XPAR_MCH_OPB_DDR_0_DDR_TXSR 80000
+#define XPAR_MCH_OPB_DDR_0_DDR_CAS_LAT 2
+#define XPAR_MCH_OPB_DDR_0_DDR_DWIDTH 16
+#define XPAR_MCH_OPB_DDR_0_DDR_AWIDTH 13
+#define XPAR_MCH_OPB_DDR_0_DDR_COL_AWIDTH 10
+#define XPAR_MCH_OPB_DDR_0_DDR_BANK_AWIDTH 2
+#define XPAR_MCH_OPB_DDR_0_MCH0_PROTOCOL 0
+#define XPAR_MCH_OPB_DDR_0_MCH0_ACCESSBUF_DEPTH 16
+#define XPAR_MCH_OPB_DDR_0_MCH0_RDDATABUF_DEPTH 16
+#define XPAR_MCH_OPB_DDR_0_MCH1_PROTOCOL 0
+#define XPAR_MCH_OPB_DDR_0_MCH1_ACCESSBUF_DEPTH 16
+#define XPAR_MCH_OPB_DDR_0_MCH1_RDDATABUF_DEPTH 16
+#define XPAR_MCH_OPB_DDR_0_MCH2_PROTOCOL 0
+#define XPAR_MCH_OPB_DDR_0_MCH2_ACCESSBUF_DEPTH 16
+#define XPAR_MCH_OPB_DDR_0_MCH2_RDDATABUF_DEPTH 16
+#define XPAR_MCH_OPB_DDR_0_MCH3_PROTOCOL 0
+#define XPAR_MCH_OPB_DDR_0_MCH3_ACCESSBUF_DEPTH 16
+#define XPAR_MCH_OPB_DDR_0_MCH3_RDDATABUF_DEPTH 16
+#define XPAR_MCH_OPB_DDR_0_XCL0_LINESIZE 4
+#define XPAR_MCH_OPB_DDR_0_XCL0_WRITEXFER 1
+#define XPAR_MCH_OPB_DDR_0_XCL1_LINESIZE 4
+#define XPAR_MCH_OPB_DDR_0_XCL1_WRITEXFER 1
+#define XPAR_MCH_OPB_DDR_0_XCL2_LINESIZE 4
+#define XPAR_MCH_OPB_DDR_0_XCL2_WRITEXFER 1
+#define XPAR_MCH_OPB_DDR_0_XCL3_LINESIZE 4
+#define XPAR_MCH_OPB_DDR_0_XCL3_WRITEXFER 1
+#define XPAR_MCH_OPB_DDR_0_MEM0_BASEADDR 0x24000000
+#define XPAR_MCH_OPB_DDR_0_MEM0_HIGHADDR 0x27FFFFFF
+#define XPAR_MCH_OPB_DDR_0_MEM1_BASEADDR 0xFFFFFFFF
+#define XPAR_MCH_OPB_DDR_0_MEM1_HIGHADDR 0x00000000
+#define XPAR_MCH_OPB_DDR_0_MEM2_BASEADDR 0xFFFFFFFF
+#define XPAR_MCH_OPB_DDR_0_MEM2_HIGHADDR 0x00000000
+#define XPAR_MCH_OPB_DDR_0_MEM3_BASEADDR 0xFFFFFFFF
+#define XPAR_MCH_OPB_DDR_0_MEM3_HIGHADDR 0x00000000
+#define XPAR_MCH_OPB_DDR_0_SIM_INIT_TIME_PS 100000000
+#define XPAR_MCH_OPB_DDR_0_HW_VER "1.00.c"
+
+/* Definitions for OPB_ETHERNET_0 */
+#define XPAR_OPB_ETHERNET_0_INSTANCE "Ethernet_MAC"
+#define XPAR_OPB_ETHERNET_0_DEV_BLK_ID 1
+#define XPAR_OPB_ETHERNET_0_DEV_MIR_ENABLE 1
+#define XPAR_OPB_ETHERNET_0_BASEADDR 0x40C00000
+#define XPAR_OPB_ETHERNET_0_HIGHADDR 0x40C0FFFF
+#define XPAR_OPB_ETHERNET_0_RESET_PRESENT 1
+#define XPAR_OPB_ETHERNET_0_INCLUDE_DEV_PENCODER 1
+#define XPAR_OPB_ETHERNET_0_DMA_PRESENT 1
+#define XPAR_OPB_ETHERNET_0_DMA_INTR_COALESCE 1
+#define XPAR_OPB_ETHERNET_0_OPB_AWIDTH 32
+#define XPAR_OPB_ETHERNET_0_OPB_DWIDTH 32
+#define XPAR_OPB_ETHERNET_0_OPB_CLK_PERIOD_PS 14999
+#define XPAR_OPB_ETHERNET_0_FAMILY "spartan3e"
+#define XPAR_OPB_ETHERNET_0_IPIF_RDFIFO_DEPTH 32768
+#define XPAR_OPB_ETHERNET_0_IPIF_WRFIFO_DEPTH 32768
+#define XPAR_OPB_ETHERNET_0_MIIM_CLKDVD 0x0000001F
+#define XPAR_OPB_ETHERNET_0_SOURCE_ADDR_INSERT_EXIST 1
+#define XPAR_OPB_ETHERNET_0_PAD_INSERT_EXIST 1
+#define XPAR_OPB_ETHERNET_0_FCS_INSERT_EXIST 1
+#define XPAR_OPB_ETHERNET_0_MAFIFO_DEPTH 64
+#define XPAR_OPB_ETHERNET_0_MAFIFO_BRAM_1_SRL_0 0
+#define XPAR_OPB_ETHERNET_0_HALF_DUPLEX_EXIST 1
+#define XPAR_OPB_ETHERNET_0_ERR_COUNT_EXIST 1
+#define XPAR_OPB_ETHERNET_0_CAM_EXIST 0
+#define XPAR_OPB_ETHERNET_0_CAM_BRAM_0_SRL_1 1
+#define XPAR_OPB_ETHERNET_0_JUMBO_EXIST 0
+#define XPAR_OPB_ETHERNET_0_MII_EXIST 1
+#define XPAR_OPB_ETHERNET_0_TX_DRE_TYPE 0
+#define XPAR_OPB_ETHERNET_0_RX_DRE_TYPE 0
+#define XPAR_OPB_ETHERNET_0_TX_INCLUDE_CSUM 0
+#define XPAR_OPB_ETHERNET_0_RX_INCLUDE_CSUM 0
+#define XPAR_OPB_ETHERNET_0_HW_VER "1.04.a"
+
+/* Definitions for OPB_TIMER_0 */
+#define XPAR_OPB_TIMER_0_INSTANCE "opb_timer_1"
+#define XPAR_OPB_TIMER_0_FAMILY "spartan3e"
+#define XPAR_OPB_TIMER_0_COUNT_WIDTH 32
+#define XPAR_OPB_TIMER_0_ONE_TIMER_ONLY 1
+#define XPAR_OPB_TIMER_0_TRIG0_ASSERT 1
+#define XPAR_OPB_TIMER_0_TRIG1_ASSERT 1
+#define XPAR_OPB_TIMER_0_GEN0_ASSERT 1
+#define XPAR_OPB_TIMER_0_GEN1_ASSERT 1
+#define XPAR_OPB_TIMER_0_OPB_AWIDTH 32
+#define XPAR_OPB_TIMER_0_OPB_DWIDTH 32
+#define XPAR_OPB_TIMER_0_BASEADDR 0x41C00000
+#define XPAR_OPB_TIMER_0_HIGHADDR 0x41C0FFFF
+#define XPAR_OPB_TIMER_0_HW_VER "1.00.b"
+
+/* Definitions for OPB_INTC_0 */
+#define XPAR_OPB_INTC_0_INSTANCE "opb_intc_0"
+#define XPAR_OPB_INTC_0_FAMILY "spartan3e"
+#define XPAR_OPB_INTC_0_Y 0
+#define XPAR_OPB_INTC_0_X 0
+#define XPAR_OPB_INTC_0_U_SET "intc"
+#define XPAR_OPB_INTC_0_OPB_AWIDTH 32
+#define XPAR_OPB_INTC_0_OPB_DWIDTH 32
+#define XPAR_OPB_INTC_0_BASEADDR 0x41200000
+#define XPAR_OPB_INTC_0_HIGHADDR 0x4120FFFF
+#define XPAR_OPB_INTC_0_NUM_INTR_INPUTS 5
+#define XPAR_OPB_INTC_0_KIND_OF_INTR 0x00000010
+#define XPAR_OPB_INTC_0_KIND_OF_EDGE 0x00000010
+#define XPAR_OPB_INTC_0_KIND_OF_LVL 0x0000000F
+#define XPAR_OPB_INTC_0_HAS_IPR 1
+#define XPAR_OPB_INTC_0_HAS_SIE 1
+#define XPAR_OPB_INTC_0_HAS_CIE 1
+#define XPAR_OPB_INTC_0_HAS_IVR 1
+#define XPAR_OPB_INTC_0_IRQ_IS_LEVEL 1
+#define XPAR_OPB_INTC_0_IRQ_ACTIVE 1
+#define XPAR_OPB_INTC_0_HW_VER "1.00.c"
+
+/* Definitions for OPB_COLOR_VIDEO_CTRL_0 */
+#define XPAR_OPB_COLOR_VIDEO_CTRL_0_INSTANCE "opb_color_video_ctrl_0"
+#define XPAR_OPB_COLOR_VIDEO_CTRL_0_BASEADDR 0x45000000
+#define XPAR_OPB_COLOR_VIDEO_CTRL_0_HIGHADDR 0x45FFFFFF
+#define XPAR_OPB_COLOR_VIDEO_CTRL_0_VIDEO_MODE 6
+#define XPAR_OPB_COLOR_VIDEO_CTRL_0_BM_MODE 0
+#define XPAR_OPB_COLOR_VIDEO_CTRL_0_CHAR_MODE 1
+#define XPAR_OPB_COLOR_VIDEO_CTRL_0_HW_VER "1.00.a"
+
+/* Interrupt settings */
+#define XPAR_OPB_INTC_0_FSL_PS2_0_IRQ 0
+#define XPAR_OPB_INTC_0_OPB_TIMER_0_IRQ 1
+#define XPAR_OPB_INTC_0_OPB_ETHERNET_0_IRQ 2
+#define XPAR_OPB_INTC_0_OPB_GPIO_6_IRQ 3
+#define XPAR_OPB_INTC_0_OPB_UARTLITE_0_IRQ 4
+
+/* Peripheral counts */
+#define XPAR_OPB_V20_NUM_INSTANCES 1
+#define XPAR_FSL_PS2_NUM_INSTANCES 1
+#define XPAR_LMB_BRAM_IF_CNTLR_NUM_INSTANCES 2
+#define XPAR_OPB_EMC_NUM_INSTANCES 1
+#define XPAR_OPB_MDM_NUM_INSTANCES 1
+#define XPAR_OPB_GPIO_NUM_INSTANCES 7
+#define XPAR_OPB_ETHERNET_NUM_INSTANCES 1
+#define XPAR_MCH_OPB_DDR_NUM_INSTANCES 1
+#define XPAR_OPB_TIMER_NUM_INSTANCES 1
+#define XPAR_OPB_COLOR_VIDEO_CTRL_NUM_INSTANCES 1
+#define XPAR_OPB_UARTLITE_NUM_INSTANCES 1
+#define XPAR_OPB_INTC_NUM_INSTANCES 1
+
+/******************************************************************/
+
+/* MEMORY MAP */
+#define XPAR_ERAM_START XPAR_MCH_OPB_DDR_0_MEM0_BASEADDR
+#define XPAR_ERAM_SIZE ((XPAR_MCH_OPB_DDR_0_MEM0_HIGHADDR) - (XPAR_MCH_OPB_DDR_0_MEM0_BASEADDR) +1)
+
+/* OPB TIMER */
+#define XPAR_TIMER_0_BASEADDR XPAR_OPB_TIMER_0_BASEADDR
+#define XPAR_TIMER_0_IRQ XPAR_OPB_INTC_0_OPB_TIMER_0_IRQ
+
--- /dev/null
+#ifndef _ASM_XPARAMETERS_H
+#define _ASM_XPARAMETERS_H
+
+#ifdef __KERNEL__
+
+#if defined (CONFIG_SP3E)
+
+#include <asm/xparameters-sp3e.h>
+
+#elif defined(CONFIG_ML401)
+
+#include <asm/xparameters-ml401.h>
+
+#else
+
+#include <asm/xparameters_bsp.h>
+
+#endif
+
+#define DDR_SDRAM_BASEADDR (XPAR_ERAM_START)
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+#define DDR_SDRAM_HIGHADDR (XPAR_ERAM_START + (XPAR_ERAM_SIZE)/2 - 1)
+#else
+#define DDR_SDRAM_HIGHADDR (XPAR_ERAM_START + XPAR_ERAM_SIZE - 1)
+#endif
+
+#define XPAR_CPU_FREQ (XPAR_CPU_CLOCK_FREQ)
+#define XPAR_TIMER_INTR (CONFIG_XILINX_TIMER_0_IRQ)
+
+/* Old LW BSP */
+#ifdef XPAR_OPB_INTC_0_BASEADDR
+#define XPAR_INTC_0_BASEADDR (XPAR_OPB_INTC_0_BASEADDR)
+#define XPAR_INTC_MAX_NUM_INTR_INPUTS (XPAR_OPB_INTC_0_NUM_INTR_INPUTS)
+#define XPAR_INTC_0_KIND_OF_INTR (XPAR_OPB_INTC_0_KIND_OF_INTR)
+#endif
+
+#ifdef XPAR_OPB_UARTLITE_0_BASEADDR
+#define XPAR_UARTLITE_0_BASEADDR (XPAR_OPB_UARTLITE_0_BASEADDR)
+#define XPAR_UARTLITE_0_IRQ (XPAR_OPB_INTC_0_OPB_UARTLITE_0_IRQ)
+#endif
+
+#ifdef XPAR_OPB_SPI_0_BASEADDR
+#define XPAR_SPI_0_BASEADDR (XPAR_OPB_SPI_0_BASEADDR)
+#define XPAR_SPI_0_HIGHADDR (XPAR_OPB_SPI_0_HIGHADDR)
+#define XPAR_SPI_0_DEVICE_ID (0)
+#define XPAR_SPI_0_FIFO_EXIST (XPAR_OPB_SPI_0_FIFO_EXIST)
+#define XPAR_SPI_0_SPI_SLAVE_ONLY (XPAR_OPB_SPI_0_SPI_SLAVE_ONLY)
+#define XPAR_SPI_0_NUM_SS_BITS (XPAR_OPB_SPI_0_NUM_SS_BITS)
+#define XPAR_INTC_0_SPI_0_VEC_ID (XPAR_OPB_INTC_0_OPB_SPI_0_IRQ)
+#endif
+
+#ifdef XPAR_OPB_ETHERNET_0_BASEADDR
+#define XPAR_XEMAC_NUM_INSTANCES (XPAR_OPB_ETHERNET_NUM_INSTANCES)
+#define XPAR_EMAC_0_BASEADDR (XPAR_OPB_ETHERNET_0_BASEADDR)
+#define XPAR_EMAC_0_HIGHADDR (XPAR_OPB_ETHERNET_0_HIGHADDR)
+#define XPAR_EMAC_0_DEVICE_ID (0)
+#define XPAR_EMAC_0_ERR_COUNT_EXIST (XPAR_OPB_ETHERNET_0_ERR_COUNT_EXIST)
+#define XPAR_EMAC_0_DMA_PRESENT (XPAR_OPB_ETHERNET_0_DMA_PRESENT)
+#define XPAR_EMAC_0_MII_EXIST (XPAR_OPB_ETHERNET_0_MII_EXIST)
+#define XPAR_EMAC_0_CAM_EXIST (XPAR_OPB_ETHERNET_0_CAM_EXIST)
+#define XPAR_EMAC_0_JUMBO_EXIST (XPAR_OPB_ETHERNET_0_JUMBO_EXIST)
+#define XPAR_EMAC_0_TX_DRE_TYPE (XPAR_OPB_ETHERNET_0_TX_DRE_TYPE)
+#define XPAR_EMAC_0_RX_DRE_TYPE (XPAR_OPB_ETHERNET_0_RX_DRE_TYPE)
+#define XPAR_EMAC_0_TX_INCLUDE_CSUM (XPAR_OPB_ETHERNET_0_TX_INCLUDE_CSUM)
+#define XPAR_EMAC_0_RX_INCLUDE_CSUM (XPAR_OPB_ETHERNET_0_RX_INCLUDE_CSUM)
+#define XPAR_INTC_0_EMAC_0_VEC_ID (XPAR_OPB_INTC_0_OPB_ETHERNET_0_IRQ)
+#define XPAR_EMAC_0_IRQ (XPAR_OPB_INTC_0_OPB_ETHERNET_0_IRQ)
+#endif
+
+#ifdef XPAR_OPB_EMC_0_MEM0_BASEADDR
+#define XPAR_FLASH_BASEADDR (XPAR_OPB_EMC_0_MEM0_BASEADDR)
+#define XPAR_FLASH_HIGHADDR (XPAR_OPB_EMC_0_MEM0_HIGHADDR)
+#define XPAR_FLASH_BUSWIDTH 1
+#endif
+
+#ifdef XPAR_OPB_GPIO_0_BASEADDR
+#define XPAR_GPIO_0_BASEADDR (XPAR_OPB_GPIO_0_BASEADDR)
+#define XPAR_GPIO_0_HIGHADDR (XPAR_OPB_GPIO_0_HIGHADDR)
+#define XPAR_GPIO_0_DEVICE_ID (0)
+#define XPAR_GPIO_0_IS_DUAL (XPAR_OPB_GPIO_0_IS_DUAL)
+#define XPAR_GPIO_0_INTERRUPT_PRESENT (XPAR_OPB_GPIO_0_INTERRUPT_PRESENT)
+#define XPAR_GPIO_0_IRQ (XPAR_OPB_INTC_0_OPB_GPIO_0_IRQ)
+#endif
+
+#ifdef XPAR_OPB_GPIO_1_BASEADDR
+#define XPAR_GPIO_1_BASEADDR (XPAR_OPB_GPIO_1_BASEADDR)
+#define XPAR_GPIO_1_HIGHADDR (XPAR_OPB_GPIO_1_HIGHADDR)
+#define XPAR_GPIO_1_DEVICE_ID (1)
+#define XPAR_GPIO_1_IS_DUAL (XPAR_OPB_GPIO_1_IS_DUAL)
+#define XPAR_GPIO_1_INTERRUPT_PRESENT (XPAR_OPB_GPIO_1_INTERRUPT_PRESENT)
+#define XPAR_GPIO_1_IRQ (XPAR_OPB_INTC_0_OPB_GPIO_1_IRQ)
+#endif
+
+#ifdef XPAR_OPB_GPIO_2_BASEADDR
+#define XPAR_GPIO_2_BASEADDR (XPAR_OPB_GPIO_2_BASEADDR)
+#define XPAR_GPIO_2_HIGHADDR (XPAR_OPB_GPIO_2_HIGHADDR)
+#define XPAR_GPIO_2_DEVICE_ID (2)
+#define XPAR_GPIO_2_IS_DUAL (XPAR_OPB_GPIO_2_IS_DUAL)
+#define XPAR_GPIO_2_INTERRUPT_PRESENT (XPAR_OPB_GPIO_2_INTERRUPT_PRESENT)
+#define XPAR_GPIO_2_IRQ (XPAR_OPB_INTC_0_OPB_GPIO_2_IRQ)
+#endif
+
+#ifdef XPAR_OPB_GPIO_3_BASEADDR
+#define XPAR_GPIO_3_BASEADDR (XPAR_OPB_GPIO_3_BASEADDR)
+#define XPAR_GPIO_3_HIGHADDR (XPAR_OPB_GPIO_3_HIGHADDR)
+#define XPAR_GPIO_3_DEVICE_ID (3)
+#define XPAR_GPIO_3_IS_DUAL (XPAR_OPB_GPIO_3_IS_DUAL)
+#define XPAR_GPIO_3_INTERRUPT_PRESENT (XPAR_OPB_GPIO_3_INTERRUPT_PRESENT)
+#define XPAR_GPIO_3_IRQ (XPAR_OPB_INTC_0_OPB_GPIO_3_IRQ)
+#endif
+
+#ifdef XPAR_OPB_GPIO_4_BASEADDR
+#define XPAR_GPIO_4_BASEADDR (XPAR_OPB_GPIO_4_BASEADDR)
+#define XPAR_GPIO_4_HIGHADDR (XPAR_OPB_GPIO_4_HIGHADDR)
+#define XPAR_GPIO_4_DEVICE_ID (4)
+#define XPAR_GPIO_4_IS_DUAL (XPAR_OPB_GPIO_4_IS_DUAL)
+#define XPAR_GPIO_4_INTERRUPT_PRESENT (XPAR_OPB_GPIO_4_INTERRUPT_PRESENT)
+#define XPAR_GPIO_4_IRQ (XPAR_OPB_INTC_0_OPB_GPIO_4_IRQ)
+#endif
+
+#ifdef XPAR_OPB_GPIO_5_BASEADDR
+#define XPAR_GPIO_5_BASEADDR (XPAR_OPB_GPIO_5_BASEADDR)
+#define XPAR_GPIO_5_HIGHADDR (XPAR_OPB_GPIO_5_HIGHADDR)
+#define XPAR_GPIO_5_DEVICE_ID (5)
+#define XPAR_GPIO_5_IS_DUAL (XPAR_OPB_GPIO_5_IS_DUAL)
+#define XPAR_GPIO_5_INTERRUPT_PRESENT (XPAR_OPB_GPIO_5_INTERRUPT_PRESENT)
+#define XPAR_GPIO_5_IRQ (XPAR_OPB_INTC_0_OPB_GPIO_5_IRQ)
+#endif
+
+#ifdef XPAR_OPB_GPIO_6_BASEADDR
+#define XPAR_GPIO_6_BASEADDR (XPAR_OPB_GPIO_6_BASEADDR)
+#define XPAR_GPIO_6_HIGHADDR (XPAR_OPB_GPIO_6_HIGHADDR)
+#define XPAR_GPIO_6_DEVICE_ID (6)
+#define XPAR_GPIO_6_IS_DUAL (XPAR_OPB_GPIO_6_IS_DUAL)
+#define XPAR_GPIO_6_INTERRUPT_PRESENT (XPAR_OPB_GPIO_6_INTERRUPT_PRESENT)
+#define XPAR_GPIO_6_IRQ (XPAR_OPB_INTC_0_OPB_GPIO_6_IRQ)
+#endif
+
+#ifdef XPAR_OPB_ETHERNETLITE_0_BASEADDR
+#define XPAR_ETHERNETLITE_0_BASEADDR (XPAR_OPB_ETHERNETLITE_0_BASEADDR)
+#define XPAR_ETHERNETLITE_0_HIGHADDR (XPAR_OPB_ETHERNETLITE_0_HIGHADDR)
+#define XPAR_ETHERNETLITE_0_IRQ (XPAR_OPB_INTC_0_OPB_ETHERNETLITE_0_IRQ)
+#define XPAR_ETHERNETLITE_0_TX_PING_PONG (XPAR_OPB_ETHERNETLITE_0_TX_PING_PONG)
+#define XPAR_ETHERNETLITE_0_RX_PING_PONG (XPAR_OPB_ETHERNETLITE_0_RX_PING_PONG)
+#define XPAR_XEMACLITE_NUM_INSTANCES 1
+#endif
+
+/* New LW BSP */
+#ifdef XPAR_INTC_0_INSTANCE
+#define XPAR_INTC_MAX_NUM_INTR_INPUTS (XPAR_INTC_0_NUM_INTR_INPUTS)
+#endif
+
+#ifdef XPAR_EMAC_0_INSTACE
+#define XPAR_INTC_0_EMAC_0_VEC_ID (XPAR_EMAC_0_IRQ)
+#endif
+
+
+/* EDK 9.2 */
+#ifdef XPAR_XPS_TIMER_0_BASEADDR
+#define XPAR_TIMER_0_BASEADDR XPAR_XPS_TIMER_0_BASEADDR
+#define XPAR_TIMER_0_HIGHADDR XPAR_XPS_TIMER_0_HIGHADDR
+#define XPAR_TIMER_0_IRQ XPAR_XPS_TIMER_0_IRQ
+#endif
+
+#ifdef XPAR_XPS_INTC_0_BASEADDR
+#define XPAR_INTC_0_BASEADDR (XPAR_XPS_INTC_0_BASEADDR)
+#define XPAR_INTC_MAX_NUM_INTR_INPUTS (XPAR_XPS_INTC_0_NUM_INTR_INPUTS)
+#define XPAR_INTC_0_KIND_OF_INTR (XPAR_XPS_INTC_0_KIND_OF_INTR)
+#endif
+
+#ifdef XPAR_XPS_UARTLITE_0_BASEADDR
+#define XPAR_UARTLITE_0_BASEADDR (XPAR_XPS_UARTLITE_0_BASEADDR)
+#define XPAR_UARTLITE_0_IRQ (XPAR_XPS_UARTLITE_0_IRQ)
+#endif
+
+#ifdef XPAR_XPS_SPI_0_BASEADDR
+#define XPAR_SPI_0_BASEADDR (XPAR_XPS_SPI_0_BASEADDR)
+#define XPAR_SPI_0_HIGHADDR (XPAR_XPS_SPI_0_HIGHADDR)
+#define XPAR_SPI_0_DEVICE_ID (0)
+#define XPAR_SPI_0_FIFO_EXIST (XPAR_XPS_SPI_0_FIFO_EXIST)
+#define XPAR_SPI_0_SPI_SLAVE_ONLY (1)
+#define XPAR_SPI_0_NUM_SS_BITS (XPAR_XPS_SPI_0_NUM_SS_BITS)
+#define XPAR_INTC_0_SPI_0_VEC_ID (XPAR_XPS_SPI_0_IRQ)
+#endif
+
+#ifdef XPAR_XPS_ETHERNETLITE_0_BASEADDR
+#define XPAR_ETHERNETLITE_0_BASEADDR (XPAR_XPS_ETHERNETLITE_0_BASEADDR)
+#define XPAR_ETHERNETLITE_0_HIGHADDR (XPAR_XPS_ETHERNETLITE_0_HIGHADDR)
+#define XPAR_ETHERNETLITE_0_IRQ (XPAR_XPS_ETHERNETLITE_0_IRQ)
+#define XPAR_ETHERNETLITE_0_TX_PING_PONG (XPAR_XPS_ETHERNETLITE_0_TX_PING_PONG)
+#define XPAR_ETHERNETLITE_0_RX_PING_PONG (XPAR_XPS_ETHERNETLITE_0_RX_PING_PONG)
+#define XPAR_XEMACLITE_NUM_INSTANCES 1
+#endif
+
+#ifdef XPAR_XPS_LL_TEMAC_0_BASEADDR
+/* LocalLink TYPE Enumerations */
+#define XPAR_LL_FIFO 1
+#define XPAR_LL_DMA 2
+
+#define XPAR_LLTEMAC_0_INSTANCE XPAR_XPS_LL_TEMAC_0_INSTANCE
+#define XPAR_LLTEMAC_0_SUBFAMILY XPAR_XPS_LL_TEMAC_0_SUBFAMILY
+#define XPAR_LLTEMAC_0_RESERVED XPAR_XPS_LL_TEMAC_0_RESERVED
+#define XPAR_LLTEMAC_0_SPLB_NATIVE_DWIDTH XPAR_XPS_LL_TEMAC_0_SPLB_NATIVE_DWIDTH
+#define XPAR_LLTEMAC_0_FAMILY XPAR_XPS_LL_TEMAC_0_FAMILY
+#define XPAR_LLTEMAC_0_BASEADDR XPAR_XPS_LL_TEMAC_0_BASEADDR
+#define XPAR_LLTEMAC_0_HIGHADDR XPAR_XPS_LL_TEMAC_0_HIGHADDR
+#define XPAR_LLTEMAC_0_SPLB_DWIDTH XPAR_XPS_LL_TEMAC_0_SPLB_DWIDTH
+#define XPAR_LLTEMAC_0_SPLB_AWIDTH XPAR_XPS_LL_TEMAC_0_SPLB_AWIDTH
+#define XPAR_LLTEMAC_0_SPLB_NUM_MASTERS XPAR_XPS_LL_TEMAC_0_SPLB_NUM_MASTERS
+#define XPAR_LLTEMAC_0_SPLB_MID_WIDTH XPAR_XPS_LL_TEMAC_0_SPLB_MID_WIDTH
+#define XPAR_LLTEMAC_0_SPLB_P2P XPAR_XPS_LL_TEMAC_0_SPLB_P2P
+#define XPAR_LLTEMAC_0_BUS2CORE_CLK_RATIO XPAR_XPS_LL_TEMAC_0_BUS2CORE_CLK_RATIO
+#define XPAR_LLTEMAC_0_TEMAC_TYPE XPAR_XPS_LL_TEMAC_0_TEMAC_TYPE
+#define XPAR_LLTEMAC_0_INCLUDE_IO XPAR_XPS_LL_TEMAC_0_INCLUDE_IO
+#define XPAR_LLTEMAC_0_PHY_TYPE XPAR_XPS_LL_TEMAC_0_PHY_TYPE
+#define XPAR_LLTEMAC_0_TEMAC1_ENABLED XPAR_XPS_LL_TEMAC_0_TEMAC1_ENABLED
+#define XPAR_LLTEMAC_0_TEMAC0_PHYADDR XPAR_XPS_LL_TEMAC_0_TEMAC0_PHYADDR
+#define XPAR_LLTEMAC_0_TEMAC1_PHYADDR XPAR_XPS_LL_TEMAC_0_TEMAC1_PHYADDR
+#define XPAR_LLTEMAC_0_TEMAC0_TXFIFO XPAR_XPS_LL_TEMAC_0_TEMAC0_TXFIFO
+#define XPAR_LLTEMAC_0_TEMAC0_RXFIFO XPAR_XPS_LL_TEMAC_0_TEMAC0_RXFIFO
+#define XPAR_LLTEMAC_0_TEMAC1_TXFIFO XPAR_XPS_LL_TEMAC_0_TEMAC1_TXFIFO
+#define XPAR_LLTEMAC_0_TEMAC1_RXFIFO XPAR_XPS_LL_TEMAC_0_TEMAC1_RXFIFO
+#define XPAR_LLTEMAC_0_TEMAC0_TXCSUM XPAR_XPS_LL_TEMAC_0_TEMAC0_TXCSUM
+#define XPAR_LLTEMAC_0_TEMAC0_RXCSUM XPAR_XPS_LL_TEMAC_0_TEMAC0_RXCSUM
+#define XPAR_LLTEMAC_0_TEMAC1_TXCSUM XPAR_XPS_LL_TEMAC_0_TEMAC1_TXCSUM
+#define XPAR_LLTEMAC_0_TEMAC1_RXCSUM XPAR_XPS_LL_TEMAC_0_TEMAC1_RXCSUM
+#define XPAR_LLTEMAC_0_HW_VER XPAR_XPS_LL_TEMAC_0_HW_VER
+#define XPAR_LLTEMAC_0_SPLB_CLK_PERIOD_PS XPAR_XPS_LL_TEMAC_0_SPLB_CLK_PERIOD_PS
+#endif
+
+#ifdef XPAR_OPB_INTC_0_FSL_PS2_0_IRQ
+#define XPAR_FSL_PS2_IRQ (XPAR_OPB_INTC_0_FSL_PS2_0_IRQ)
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_XPARAMETERS_H */
--- /dev/null
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_GENERIC_H
+#define _ASM_POWERPC_DCR_GENERIC_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+enum host_type_t {DCR_HOST_MMIO, DCR_HOST_NATIVE, DCR_HOST_INVALID};
+
+typedef struct {
+ enum host_type_t type;
+ union {
+ dcr_host_mmio_t mmio;
+ dcr_host_native_t native;
+ } host;
+} dcr_host_t;
+
+extern bool dcr_map_ok_generic(dcr_host_t host);
+
+extern dcr_host_t dcr_map_generic(struct device_node *dev, unsigned int dcr_n,
+ unsigned int dcr_c);
+extern void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c);
+
+extern u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n);
+
+extern void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_GENERIC_H */
+
+
void __iomem *token;
unsigned int stride;
unsigned int base;
-} dcr_host_t;
+} dcr_host_mmio_t;
-#define DCR_MAP_OK(host) ((host).token != NULL)
+static inline bool dcr_map_ok_mmio(dcr_host_mmio_t host)
+{
+ return host.token != NULL;
+}
-extern dcr_host_t dcr_map(struct device_node *dev, unsigned int dcr_n,
- unsigned int dcr_c);
-extern void dcr_unmap(dcr_host_t host, unsigned int dcr_c);
+extern dcr_host_mmio_t dcr_map_mmio(struct device_node *dev,
+ unsigned int dcr_n,
+ unsigned int dcr_c);
+extern void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c);
-static inline u32 dcr_read(dcr_host_t host, unsigned int dcr_n)
+static inline u32 dcr_read_mmio(dcr_host_mmio_t host, unsigned int dcr_n)
{
return in_be32(host.token + ((host.base + dcr_n) * host.stride));
}
-static inline void dcr_write(dcr_host_t host, unsigned int dcr_n, u32 value)
+static inline void dcr_write_mmio(dcr_host_mmio_t host,
+ unsigned int dcr_n,
+ u32 value)
{
out_be32(host.token + ((host.base + dcr_n) * host.stride), value);
}
typedef struct {
unsigned int base;
-} dcr_host_t;
+} dcr_host_native_t;
-#define DCR_MAP_OK(host) (1)
+static inline bool dcr_map_ok_native(dcr_host_native_t host)
+{
+ return 1;
+}
-#define dcr_map(dev, dcr_n, dcr_c) ((dcr_host_t){ .base = (dcr_n) })
-#define dcr_unmap(host, dcr_c) do {} while (0)
-#define dcr_read(host, dcr_n) mfdcr(dcr_n + host.base)
-#define dcr_write(host, dcr_n, value) mtdcr(dcr_n + host.base, value)
+#define dcr_map_native(dev, dcr_n, dcr_c) \
+ ((dcr_host_native_t){ .base = (dcr_n) })
+#define dcr_unmap_native(host, dcr_c) do {} while (0)
+#define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base)
+#define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value)
/* Device Control Registers */
void __mtdcr(int reg, unsigned int val);
#ifndef _ASM_POWERPC_DCR_H
#define _ASM_POWERPC_DCR_H
#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
#ifdef CONFIG_PPC_DCR
#ifdef CONFIG_PPC_DCR_NATIVE
#include <asm/dcr-native.h>
-#else
+#endif
+
+#ifdef CONFIG_PPC_DCR_MMIO
#include <asm/dcr-mmio.h>
#endif
+
+/* Indirection layer for providing both NATIVE and MMIO support. */
+
+#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO)
+
+#include <asm/dcr-generic.h>
+
+#define DCR_MAP_OK(host) dcr_map_ok_generic(host)
+#define dcr_map(dev, dcr_n, dcr_c) dcr_map_generic(dev, dcr_n, dcr_c)
+#define dcr_unmap(host, dcr_c) dcr_unmap_generic(host, dcr_c)
+#define dcr_read(host, dcr_n) dcr_read_generic(host, dcr_n)
+#define dcr_write(host, dcr_n, value) dcr_write_generic(host, dcr_n, value)
+
+#else
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+typedef dcr_host_native_t dcr_host_t;
+#define DCR_MAP_OK(host) dcr_map_ok_native(host)
+#define dcr_map(dev, dcr_n, dcr_c) dcr_map_native(dev, dcr_n, dcr_c)
+#define dcr_unmap(host, dcr_c) dcr_unmap_native(host, dcr_c)
+#define dcr_read(host, dcr_n) dcr_read_native(host, dcr_n)
+#define dcr_write(host, dcr_n, value) dcr_write_native(host, dcr_n, value)
+#else
+typedef dcr_host_mmio_t dcr_host_t;
+#define DCR_MAP_OK(host) dcr_map_ok_mmio(host)
+#define dcr_map(dev, dcr_n, dcr_c) dcr_map_mmio(dev, dcr_n, dcr_c)
+#define dcr_unmap(host, dcr_c) dcr_unmap_mmio(host, dcr_c)
+#define dcr_read(host, dcr_n) dcr_read_mmio(host, dcr_n)
+#define dcr_write(host, dcr_n, value) dcr_write_mmio(host, dcr_n, value)
+#endif
+
+#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */
+
/*
* On CONFIG_PPC_MERGE, we have additional helpers to read the DCR
* base from the device-tree
#endif /* CONFIG_PPC_MERGE */
#endif /* CONFIG_PPC_DCR */
+#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DCR_H */
#elif defined(CONFIG_44x)
#include <asm/ibm44x.h>
+#ifdef CONFIG_XILINX_ML5XX
+#define NR_IRQS 32
+#else
#define NR_UIC_IRQS 32
#define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS)
+#endif /* ifdef CONFIG_XILINX_ML5XX */
#elif defined(CONFIG_8xx)
* PPC44x definitions
*
* Matt Porter <mporter@kernel.crashing.org>
- *
+ *..
* Copyright 2002-2005 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify it
#define NR_BOARD_IRQS 0
#endif
+#ifdef CONFIG_XILINX_ML5XX
+#define _IO_BASE 0
+#define _ISA_MEM_BASE 0
+#define PCI_DRAM_OFFSET 0
+#else
#define _IO_BASE isa_io_base
#define _ISA_MEM_BASE isa_mem_base
#define PCI_DRAM_OFFSET pci_dram_offset
+#endif
/* TLB entry offset/size used for pinning kernel lowmem */
#define PPC44x_PIN_SHIFT 28
#define PPC_PIN_SIZE (1 << PPC44x_PIN_SHIFT)
/* Lowest TLB slot consumed by the default pinned TLBs */
+#ifdef CONFIG_SERIAL_TEXT_DEBUG
+/* Reserve one more pinned TLB for the UART mapping */
+#define PPC44x_LOW_SLOT 62
+#else
#define PPC44x_LOW_SLOT 63
+#endif
/*
* Least significant 32-bits and extended real page number (ERPN) of
#define UART0_PHYS_IO_BASE 0xf0000200
#elif defined(CONFIG_440EP)
#define UART0_PHYS_IO_BASE 0xe0000000
+#elif defined(CONFIG_XILINX_ML5XX) && defined(CONFIG_SERIAL_TEXT_DEBUG)
+#include <platforms/4xx/xparameters/xparameters.h>
+#define UART0_PHYS_IO_BASE XPAR_UARTNS550_0_BASEADDR
#else
#define UART0_PHYS_ERPN 1
#define UART0_PHYS_IO_BASE 0x40000200
#define PPC44x_PCICFG_PAGE 0x0000000c00000000ULL
#define PPC44x_PCIIO_PAGE PPC44x_PCICFG_PAGE
#define PPC44x_PCIMEM_PAGE 0x0000000d00000000ULL
-#elif defined(CONFIG_440EP)
+#elif defined(CONFIG_440EP) || defined(CONFIG_XILINX_ML5XX)
#define PPC44x_IO_PAGE 0x0000000000000000ULL
#define PPC44x_PCICFG_PAGE 0x0000000000000000ULL
#define PPC44x_PCIIO_PAGE PPC44x_PCICFG_PAGE
/* IO_BASE is for PCI I/O.
- * ISA not supported, just here to resolve copilation.
+ * ISA not supported, just here to resolve compilation.
*/
#ifndef _IO_BASE
#elif defined(CONFIG_44x)
+
#if defined(CONFIG_BAMBOO)
#include <platforms/4xx/bamboo.h>
#endif
#include <platforms/4xx/taishan.h>
#endif
+#if defined(CONFIG_XILINX_VIRTEX)
+#include <platforms/4xx/virtex.h>
+#endif
+
#ifndef __ASSEMBLY__
+
+/* ppc4xx_init() is called from platform_init() in xilinx_ml5.c. I'm a little
+ * confused if we should actually call this function for ML5...
+ */
+void ppc4xx_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7);
#ifdef CONFIG_40x
/*
* The "residual" board information structure the boot loader passes
#define PPC44x_TLB_U2 0x00002000 /* User 2 */
#define PPC44x_TLB_U3 0x00001000 /* User 3 */
#define PPC44x_TLB_W 0x00000800 /* Caching is write-through */
+#ifdef CONFIG_XILINX_DISABLE_44x_CACHE
+#define PPC44x_XILINX_TLB_I 0x00000400 /* Caching is inhibited */
+#else
+#define PPC44x_XILINX_TLB_I 0x00000000 /* Caching is uninhibited */
+#endif
#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */
#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */
#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */
#ifndef __ASM_SERIAL_H__
#define __ASM_SERIAL_H__
+// FIXME HACK TODO wgr
+#if defined(CONFIG_XILINX_ML5XX) && defined(CONFIG_SERIAL_TEXT_DEBUG)
+#include <platforms/4xx/xparameters/xparameters.h>
+#endif
#if defined(CONFIG_EV64260)
#include <platforms/ev64260.h>
extern unsigned tb_ticks_per_jiffy;
extern unsigned tb_to_us;
extern unsigned tb_last_stamp;
+extern unsigned us_to_tb;
extern unsigned long disarm_decr[NR_CPUS];
extern void to_tm(int tim, struct rtc_time * tm);
extern const struct consw dummy_con; /* dummy console buffer */
extern const struct consw vga_con; /* VGA text console */
+extern const struct consw xil_con; /* XILINX text console */
extern const struct consw newport_con; /* SGI Newport console */
extern const struct consw prom_con; /* SPARC PROM console */
--- /dev/null
+/*
+ * include/linux/xilinx_devices.h
+ *
+ * Definitions for any platform device related flags or structures for
+ * Xilinx EDK IPs
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * 2002-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ */
+
+#ifdef __KERNEL__
+#ifndef _XILINX_DEVICE_H_
+#define _XILINX_DEVICE_H_
+
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+
+/*- 10/100 Mb Ethernet Controller IP (XEMAC) -*/
+
+struct xemac_platform_data {
+ u32 device_flags;
+ u32 dma_mode;
+ u32 has_mii;
+ u32 has_err_cnt;
+ u32 has_cam;
+ u32 has_jumbo;
+ u32 tx_dre;
+ u32 rx_dre;
+ u32 tx_hw_csum;
+ u32 rx_hw_csum;
+ u8 mac_addr[6];
+};
+
+/* Flags related to XEMAC device features */
+#define XEMAC_HAS_ERR_COUNT 0x00000001
+#define XEMAC_HAS_MII 0x00000002
+#define XEMAC_HAS_CAM 0x00000004
+#define XEMAC_HAS_JUMBO 0x00000008
+
+/* Possible DMA modes supported by XEMAC */
+#define XEMAC_DMA_NONE 1
+#define XEMAC_DMA_SIMPLE 2 /* simple 2 channel DMA */
+#define XEMAC_DMA_SGDMA 3 /* scatter gather DMA */
+
+/*- 10/100 Mb Ethernet Controller IP (XEMACLITE) -*/
+struct xemaclite_platform_data {
+ u32 tx_ping_pong;
+ u32 rx_ping_pong;
+ u8 mac_addr[6];
+};
+
+/*- 10/100/1000 Mb Ethernet Controller IP (XTEMAC) -*/
+
+struct xtemac_platform_data {
+#ifdef XPAR_TEMAC_0_INCLUDE_RX_CSUM
+ u8 tx_dre;
+ u8 rx_dre;
+ u8 tx_csum;
+ u8 rx_csum;
+ u8 phy_type;
+#endif
+ u8 dma_mode;
+ u32 rx_pkt_fifo_depth;
+ u32 tx_pkt_fifo_depth;
+ u16 mac_fifo_depth;
+ u8 dcr_host;
+ u8 dre;
+
+ u8 mac_addr[6];
+};
+
+/* Possible DMA modes supported by XTEMAC */
+#define XTEMAC_DMA_NONE 1
+#define XTEMAC_DMA_SIMPLE 2 /* simple 2 channel DMA */
+#define XTEMAC_DMA_SGDMA 3 /* scatter gather DMA */
+
+
+/* LLTEMAC platform data */
+struct xlltemac_platform_data {
+ u8 tx_csum;
+ u8 rx_csum;
+ u8 phy_type;
+ u8 dcr_host;
+ u8 ll_dev_type;
+ u32 ll_dev_baseaddress;
+ u32 ll_dev_dma_rx_irq;
+ u32 ll_dev_dma_tx_irq;
+ u32 ll_dev_fifo_irq;
+
+ u8 mac_addr[6];
+};
+
+/* SPI Controller IP */
+struct xspi_platform_data {
+ s16 bus_num;
+ u16 num_chipselect;
+ u32 speed_hz;
+};
+
+/*- GPIO -*/
+
+/* Flags related to XGPIO device features */
+#define XGPIO_IS_DUAL 0x00000001
+
+#endif /* _XILINX_DEVICE_H_ */
+#endif /* __KERNEL__ */
mmput(mm);
return len;
}
+
+asmlinkage int sys_memory_ok(unsigned long addr, unsigned long size)
+{
+ struct vm_area_struct * vma;
+ int ret;
+ struct mm_struct *mm = current->mm;
+
+ if (!mm) return 1;
+
+ down_write(&mm->mmap_sem);
+ ret = (vma = find_vma(mm, addr)) != NULL && vma->vm_end >= addr+size;
+ up_write(&mm->mmap_sem);
+ return ret;
+}
initramfs_data.cpio.gz
initramfs_list
include
+initramfs_data.S
+bin2asm
# Configuration for initramfs
#
+config INITRAMFS_NO_CHECK
+ depends on MICROBLAZE
+ bool "Assume a valid initramfs image exists. Don't check."
+ help
+ Enabling this option will cause the kernel to assume a valid
+ initramfs image exists in the kernel image. The kernel will skip the
+ check for initramfs during the boot time which will substantially
+ speed up the boot process on slow systems.
+
config INITRAMFS_SOURCE
string "Initramfs source file(s)"
default ""
#####
# Generate the initramfs cpio archive
-hostprogs-y := gen_init_cpio
+hostprogs-y := gen_init_cpio bin2asm
initramfs := $(CONFIG_SHELL) $(srctree)/scripts/gen_initramfs_list.sh
ramfs-input := $(if $(filter-out "",$(CONFIG_INITRAMFS_SOURCE)), \
$(shell echo $(CONFIG_INITRAMFS_SOURCE)),-d)
$(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.gz.d
$(call if_changed,initfs)
+$(obj)/initramfs_data.S: $(srctree)/usr/initramfs_data.S.in $(obj)/bin2asm $(obj)/initramfs_data.cpio.gz
+ @cat $(srctree)/usr/initramfs_data.S.in >$(obj)/initramfs_data.S
+ @$(obj)/bin2asm $(obj)/initramfs_data.cpio.gz >>$(obj)/initramfs_data.S
--- /dev/null
+/*
+ * usr/bin2asm.c
+ *
+ * Copyright 2007 Xilinx, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+
+int main(int argc, char *argv[])
+{
+ FILE *in_file;
+ int in_byte;
+
+ if (argc != 2)
+ return -1;
+
+ if ((in_file = fopen(argv[1], "rb")) == NULL)
+ return -1;
+
+ printf("\n/* The following code was "
+ "automatically generated by bin2asm. Do not edit. */\n\n");
+
+ while((in_byte=fgetc(in_file)) != EOF)
+ printf("\t.byte 0x%X\n", in_byte);
+
+ printf("\n/* End of the generated code */\n");
+
+ fclose(in_file);
+ return 0;
+}
* Hard link support by Luciano Rocha
*/
+#ifdef __CYGWIN32__
+#undef PATH_MAX
+#define PATH_MAX 259
+#endif
+
#define xstr(s) #s
#define str(s) xstr(s)
goto error;
}
+#ifdef __CYGWIN32__
+ file = open (location, O_RDONLY | O_BINARY);
+#else
file = open (location, O_RDONLY);
+#endif
if (file < 0) {
fprintf (stderr, "File %s could not be opened for reading\n", location);
goto error;
*/
.section .init.ramfs,"a"
-.incbin "usr/initramfs_data.cpio.gz"