If configured as two channels, one is to transmit to the device and another
is to receive from the device.
+Xilinx AXI CDMA engine, it does transfers between memory-mapped source
+address and a memory-mapped destination address.
+
Required properties:
-- compatible: Should be "xlnx,axi-dma-1.00.a"
+- compatible: Should be "xlnx,axi-vdma-1.00.a" or "xlnx,axi-dma-1.00.a" or
+ "xlnx,axi-cdma-1.00.a""
- #dma-cells: Should be <1>, see "dmas" property below
- - reg: Should contain VDMA registers location and length.
+ - reg: Should contain DMA registers location and length.
+- xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits).
+- dma-ranges: Should be as the following <dma_addr cpu_addr max_len>.
- dma-channel child node: Should have at least one channel and can have up to
two channels per device. This node specifies the properties of each
DMA channel (see child node properties below).
--- /dev/null
- CONFIG_MTD_NAND_PL35X=y
+CONFIG_LOCALVERSION="-xilinx"
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_BUG is not set
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_ZYNQ=y
+CONFIG_PL310_ERRATA_588369=y
+CONFIG_PL310_ERRATA_727915=y
+CONFIG_PL310_ERRATA_769419=y
+CONFIG_ARM_ERRATA_754322=y
+CONFIG_ARM_ERRATA_754327=y
+CONFIG_ARM_ERRATA_764369=y
+CONFIG_ARM_ERRATA_775420=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_REALLOC_ENABLE_AUTO=y
+CONFIG_PCIE_XILINX=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_SCHED_SMT=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+# CONFIG_COMPACTION is not set
+CONFIG_CMA=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_ZYNQ_CPUIDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_CAN=y
+CONFIG_CAN_XILINXCAN=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_SRAM=y
+CONFIG_XILINX_TRAFGEN=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_NETDEVICES=y
+CONFIG_MACB=y
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+CONFIG_E1000E=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_R8169=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_XILINX_EMACLITE=y
+CONFIG_XILINX_AXI_EMAC=y
+CONFIG_XILINX_PS_EMAC=y
+CONFIG_MARVELL_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_INPUT_SPARSEKMAP=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_XILINX_DEVCFG=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_CADENCE=y
+CONFIG_SPI=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_ZYNQ_QSPI=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_ZYNQ=y
+CONFIG_PMBUS=y
+CONFIG_SENSORS_UCD9000=y
+CONFIG_SENSORS_UCD9200=y
+CONFIG_THERMAL=y
+CONFIG_CPU_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_XILINX_WATCHDOG=y
+CONFIG_CADENCE_WATCHDOG=y
+CONFIG_REGULATOR=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_XILINX=y
+CONFIG_VIDEO_XILINX_CFA=y
+CONFIG_VIDEO_XILINX_CRESAMPLE=y
+CONFIG_VIDEO_XILINX_REMAPPER=y
+CONFIG_VIDEO_XILINX_RGB2YUV=y
+CONFIG_VIDEO_XILINX_SCALER=y
+CONFIG_VIDEO_XILINX_SWITCH=y
+CONFIG_VIDEO_XILINX_TPG=y
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_VIDEO_ADV7604=y
+# CONFIG_DVB_TUNER_DIB0070 is not set
+# CONFIG_DVB_TUNER_DIB0090 is not set
+CONFIG_DRM=y
+CONFIG_DRM_XILINX=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_ADI=y
+CONFIG_SND_SOC_ADI_AXI_I2S=y
+CONFIG_SND_SOC_ADI_AXI_SPDIF=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_STORAGE=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_XILINX=y
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_ZERO=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_LEDS_TRIGGER_CAMERA=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_SYNOPSYS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PCF8563=y
+CONFIG_DMADEVICES=y
+CONFIG_PL330_DMA=y
+CONFIG_XILINX_DMA_ENGINES=y
+CONFIG_XILINX_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=y
+CONFIG_UIO_XILINX_APM=y
+CONFIG_COMMON_CLK_SI570=y
+CONFIG_MEMORY=y
+CONFIG_IIO=y
+CONFIG_XILINX_XADC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_FTRACE is not set
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
__smp_cross_call = fn;
}
-static const char *ipi_types[NR_IPI] __tracepoint_string = {
-#define S(x,s) [x] = s
- S(IPI_WAKEUP, "CPU wakeup interrupts"),
- S(IPI_TIMER, "Timer broadcast interrupts"),
- S(IPI_RESCHEDULE, "Rescheduling interrupts"),
- S(IPI_CALL_FUNC, "Function call interrupts"),
- S(IPI_CPU_STOP, "CPU stop interrupts"),
- S(IPI_IRQ_WORK, "IRQ work interrupts"),
- S(IPI_COMPLETION, "completion interrupts"),
+struct ipi {
+ const char *desc;
+ void (*handler)(void);
+};
+
+static void ipi_cpu_stop(void);
+static void ipi_complete(void);
+
+#define IPI_DESC_STRING_IPI_WAKEUP "CPU wakeup interrupts"
+#define IPI_DESC_STRING_IPI_TIMER "Timer broadcast interrupts"
+#define IPI_DESC_STRING_IPI_RESCHEDULE "Rescheduling interrupts"
+#define IPI_DESC_STRING_IPI_CALL_FUNC "Function call interrupts"
+#define IPI_DESC_STRING_IPI_CPU_STOP "CPU stop interrupts"
+#define IPI_DESC_STRING_IPI_IRQ_WORK "IRQ work interrupts"
+#define IPI_DESC_STRING_IPI_COMPLETION "completion interrupts"
+
+#define IPI_DESC_STR(x) IPI_DESC_STRING_ ## x
+
+static const char* ipi_desc_strings[] __tracepoint_string =
+ {
+ [IPI_WAKEUP] = IPI_DESC_STR(IPI_WAKEUP),
+ [IPI_TIMER] = IPI_DESC_STR(IPI_TIMER),
+ [IPI_RESCHEDULE] = IPI_DESC_STR(IPI_RESCHEDULE),
+ [IPI_CALL_FUNC] = IPI_DESC_STR(IPI_CALL_FUNC),
+ [IPI_CPU_STOP] = IPI_DESC_STR(IPI_CPU_STOP),
+ [IPI_IRQ_WORK] = IPI_DESC_STR(IPI_IRQ_WORK),
- [IPI_COMPLETION] = IPI_DESC_STR(IPI_COMPLETION)
++ [IPI_COMPLETION] = IPI_DESC_STR(IPI_COMPLETION),
+ };
+
+
+static void tick_receive_broadcast_local(void)
+{
+ tick_receive_broadcast();
+}
+
+static struct ipi ipi_types[NR_IPI] = {
+#define S(x, f) [x].desc = IPI_DESC_STR(x), [x].handler = f
+ S(IPI_WAKEUP, NULL),
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ S(IPI_TIMER, tick_receive_broadcast_local),
+#endif
+ S(IPI_RESCHEDULE, scheduler_ipi),
+ S(IPI_CALL_FUNC, generic_smp_call_function_interrupt),
+ S(IPI_CPU_STOP, ipi_cpu_stop),
+#ifdef CONFIG_IRQ_WORK
+ S(IPI_IRQ_WORK, irq_work_run),
+#endif
+ S(IPI_COMPLETION, ipi_complete),
};
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
- trace_ipi_raise(target, ipi_desc_strings[ipinr]);
- trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
++ trace_ipi_raise_rcuidle(target, ipi_desc_strings[ipinr]);
__smp_cross_call(target, ipinr);
}
--- /dev/null
- CONFIG_MTD_NAND_ARASAN=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIE_XILINX_NWL=y
+CONFIG_NR_CPUS=8
+CONFIG_HOTPLUG_CPU=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_CMA=y
+CONFIG_COMPAT=y
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_MARK=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE=m
+CONFIG_NET_PKTGEN=y
+CONFIG_CAN=y
+CONFIG_CAN_XILINXCAN=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_REG_RELAX_NO_IR=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_MESSAGE_TRACING=y
+CONFIG_MAC80211_DEBUG_MENU=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_REGULATOR=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_NET_9P=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=128
+CONFIG_ARM_CCI400_PMU=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_XILINX_JESD204B=y
+CONFIG_XILINX_JESD204B_PHY=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+# CONFIG_ATA_SFF is not set
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_MACB=y
+CONFIG_MACB_EXT_BD=y
+CONFIG_XILINX_EMACLITE=y
+CONFIG_XILINX_AXI_EMAC=y
+CONFIG_AT803X_PHY=y
+CONFIG_AMD_PHY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_BCM7XXX_PHY=y
+CONFIG_BCM87XX_PHY=y
+CONFIG_ICPLUS_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_NATIONAL_PHY=y
+CONFIG_STE10XP=y
+CONFIG_LSI_ET1011C_PHY=y
+CONFIG_MICREL_PHY=y
+CONFIG_DP83867_PHY=y
+CONFIG_FIXED_PHY=y
+CONFIG_USB_USBNET=y
+CONFIG_WL18XX=y
+CONFIG_WLCORE_SPI=y
+CONFIG_WLCORE_SDIO=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_MAX310X=y
+CONFIG_SERIAL_UARTLITE=y
+CONFIG_SERIAL_UARTLITE_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX_PCA9541=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_CADENCE=y
+CONFIG_I2C_XILINX=y
+CONFIG_SPI=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_ZYNQMP_GQSPI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_XILINX=y
+CONFIG_GPIO_ZYNQ=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_POWER_RESET_LTC2952=y
+CONFIG_PMBUS=y
+CONFIG_SENSORS_MAX20751=y
+CONFIG_SENSORS_INA2XX=y
+CONFIG_WATCHDOG=y
+CONFIG_XILINX_WATCHDOG=y
+CONFIG_CADENCE_WATCHDOG=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_XILINX=y
+CONFIG_VIDEO_XILINX_CFA=y
+CONFIG_VIDEO_XILINX_CRESAMPLE=y
+CONFIG_VIDEO_XILINX_HLS=y
+CONFIG_VIDEO_XILINX_REMAPPER=y
+CONFIG_VIDEO_XILINX_RGB2YUV=y
+CONFIG_VIDEO_XILINX_SCALER=y
+CONFIG_VIDEO_XILINX_SWITCH=y
+CONFIG_VIDEO_XILINX_TPG=y
+# CONFIG_VGA_ARB is not set
+CONFIG_DRM=y
+CONFIG_DRM_XILINX=y
+CONFIG_FB_XILINX=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_PCI is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_XILINX_DP=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+# CONFIG_USB_DEFAULT_PERSIST is not set
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_XILINX=y
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_LEDS_TRIGGER_CAMERA=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_SYNOPSYS=y
+CONFIG_EDAC_ZYNQMP_OCM=y
+CONFIG_EDAC_CORTEX_ARM64=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_ZYNQMP=y
+CONFIG_DMADEVICES=y
+CONFIG_XILINX_DMA_ENGINES=y
+CONFIG_XILINX_DPDMA=y
+CONFIG_XILINX_DMA=y
+CONFIG_XILINX_ZYNQMP_DMA=y
+CONFIG_DMATEST=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_DMEM_GENIRQ=m
+CONFIG_UIO_XILINX_APM=y
+CONFIG_STAGING=y
+CONFIG_COMMON_CLK_SI570=y
+# CONFIG_COMMON_CLK_XGENE is not set
+CONFIG_ARM_SMMU=y
+CONFIG_SOC_XILINX_ZYNQMP=y
+CONFIG_IIO=y
+CONFIG_XILINX_XADC=y
+CONFIG_XILINX_INTC=y
+CONFIG_PHY_XILINX_ZYNQMP=y
+CONFIG_FPGA=y
+CONFIG_FPGA_MGR_ZYNQMP_FPGA=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_BTRFS_FS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+# CONFIG_CRYPTO_HW is not set
#endif /* CONFIG_IOMMU_DMA */
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- struct iommu_ops *iommu, bool coherent)
+ const struct iommu_ops *iommu, bool coherent)
{
- if (!dev->archdata.dma_ops)
- dev->archdata.dma_ops = &swiotlb_dma_ops;
+ if (!acpi_disabled && !dev->archdata.dma_ops)
+ dev->archdata.dma_ops = dma_ops;
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
obj-$(CONFIG_X86) += x86/
obj-$(CONFIG_ARCH_ZX) += zte/
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
+obj-$(CONFIG_COMMON_CLK_ZYNQMP) += zynqmp/
obj-$(CONFIG_H8300) += h8300/
+ obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/
select DMA_VIRTUAL_CHANNELS
help
Enable support for the MOXA ART SoC DMA controller.
--
++
Say Y here if you enabled MMP ADMA, otherwise say N.
config MPC512X_DMA
obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STI) += sti/
+obj-$(CONFIG_DRM_XILINX) += xilinx/
+obj-$(CONFIG_DRM_ZOCL)) += zocl/
+obj-$(CONFIG_DRM_XYLON) += xylon/
obj-$(CONFIG_DRM_IMX) += imx/
+ obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
--- /dev/null
- /* allow disable vblank */
- drm->vblank_disable_allowed = 1;
-
+/*
+ * Xilinx DRM KMS support for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "xilinx_drm_connector.h"
+#include "xilinx_drm_crtc.h"
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_encoder.h"
+#include "xilinx_drm_fb.h"
+#include "xilinx_drm_gem.h"
+
+#define DRIVER_NAME "xilinx_drm"
+#define DRIVER_DESC "Xilinx DRM KMS support for Xilinx"
+#define DRIVER_DATE "20130509"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static uint xilinx_drm_fbdev_vres = 2;
+module_param_named(fbdev_vres, xilinx_drm_fbdev_vres, uint, 0444);
+MODULE_PARM_DESC(fbdev_vres,
+ "fbdev virtual resolution multiplier for fb (default: 2)");
+
+/*
+ * TODO: The possible pipeline configurations are numerous with Xilinx soft IPs.
+ * It's not too bad for now, but the more proper way(Common Display Framework,
+ * or some internal abstraction) should be considered, when it reaches a point
+ * that such thing is required.
+ */
+
+struct xilinx_drm_private {
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ struct drm_fb_helper *fb;
+ struct platform_device *pdev;
+ bool is_master;
+};
+
+/**
+ * struct xilinx_video_format_desc - Xilinx Video IP video format description
+ * @name: Xilinx video format name
+ * @depth: color depth
+ * @bpp: bits per pixel
+ * @xilinx_format: xilinx format code
+ * @drm_format: drm format code
+ */
+struct xilinx_video_format_desc {
+ const char *name;
+ unsigned int depth;
+ unsigned int bpp;
+ unsigned int xilinx_format;
+ uint32_t drm_format;
+};
+
+static const struct xilinx_video_format_desc xilinx_video_formats[] = {
+ { "yuv420", 16, 16, XILINX_VIDEO_FORMAT_YUV420, DRM_FORMAT_YUV420 },
+ { "uvy422", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_UYVY },
+ { "vuy422", 16, 16, XILINX_VIDEO_FORMAT_YUV422, DRM_FORMAT_VYUY },
+ { "yuv422", 16, 16, XILINX_VIDEO_FORMAT_YUV422, DRM_FORMAT_YUYV },
+ { "yvu422", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_YVYU },
+ { "yuv444", 24, 24, XILINX_VIDEO_FORMAT_YUV444, DRM_FORMAT_YUV444 },
+ { "nv12", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV12 },
+ { "nv21", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV21 },
+ { "nv16", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV16 },
+ { "nv61", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV61 },
+ { "abgr1555", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ABGR1555 },
+ { "argb1555", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ARGB1555 },
+ { "rgba4444", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_RGBA4444 },
+ { "bgra4444", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_BGRA4444 },
+ { "bgr565", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_BGR565 },
+ { "rgb565", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_RGB565 },
+ { "bgr888", 24, 24, XILINX_VIDEO_FORMAT_RGB, DRM_FORMAT_BGR888 },
+ { "rgb888", 24, 24, XILINX_VIDEO_FORMAT_RGB, DRM_FORMAT_RGB888 },
+ { "xbgr8888", 24, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_XBGR8888 },
+ { "xrgb8888", 24, 32, XILINX_VIDEO_FORMAT_XRGB, DRM_FORMAT_XRGB8888 },
+ { "abgr8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ABGR8888 },
+ { "argb8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ARGB8888 },
+ { "bgra8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_BGRA8888 },
+ { "rgba8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_RGBA8888 },
+};
+
+/**
+ * xilinx_drm_check_format - Check if the given format is supported
+ * @drm: DRM device
+ * @fourcc: format fourcc
+ *
+ * Check if the given format @fourcc is supported by the current pipeline
+ *
+ * Return: true if the format is supported, or false
+ */
+bool xilinx_drm_check_format(struct drm_device *drm, uint32_t fourcc)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ return xilinx_drm_crtc_check_format(private->crtc, fourcc);
+}
+
+/**
+ * xilinx_drm_get_format - Get the current device format
+ * @drm: DRM device
+ *
+ * Get the current format of pipeline
+ *
+ * Return: the corresponding DRM_FORMAT_XXX
+ */
+uint32_t xilinx_drm_get_format(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ return xilinx_drm_crtc_get_format(private->crtc);
+}
+
+/**
+ * xilinx_drm_get_align - Get the alignment value for pitch
+ * @drm: DRM object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+unsigned int xilinx_drm_get_align(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ return xilinx_drm_crtc_get_align(private->crtc);
+}
+
+void xilinx_drm_set_config(struct drm_device *drm, struct drm_mode_set *set)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ if (private && private->fb)
+ xilinx_drm_fb_set_config(private->fb, set);
+}
+
+/* poll changed handler */
+static void xilinx_drm_output_poll_changed(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_fb_hotplug_event(private->fb);
+}
+
+static const struct drm_mode_config_funcs xilinx_drm_mode_config_funcs = {
+ .fb_create = xilinx_drm_fb_create,
+ .output_poll_changed = xilinx_drm_output_poll_changed,
+};
+
+/* enable vblank */
+static int xilinx_drm_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_crtc_enable_vblank(private->crtc);
+
+ return 0;
+}
+
+/* disable vblank */
+static void xilinx_drm_disable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_crtc_disable_vblank(private->crtc);
+}
+
+/* initialize mode config */
+static void xilinx_drm_mode_config_init(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width =
+ xilinx_drm_crtc_get_max_width(private->crtc);
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.funcs = &xilinx_drm_mode_config_funcs;
+}
+
+/* convert xilinx format to drm format by code */
+int xilinx_drm_format_by_code(unsigned int xilinx_format, uint32_t *drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (format->xilinx_format == xilinx_format) {
+ *drm_format = format->drm_format;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("Unknown Xilinx video format: %d\n", xilinx_format);
+
+ return -EINVAL;
+}
+
+/* convert xilinx format to drm format by name */
+int xilinx_drm_format_by_name(const char *name, uint32_t *drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (strcmp(format->name, name) == 0) {
+ *drm_format = format->drm_format;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("Unknown Xilinx video format: %s\n", name);
+
+ return -EINVAL;
+}
+
+/* get bpp of given format */
+unsigned int xilinx_drm_format_bpp(uint32_t drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (format->drm_format == drm_format)
+ return format->bpp;
+ }
+
+ return 0;
+}
+
+/* get color depth of given format */
+unsigned int xilinx_drm_format_depth(uint32_t drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (format->drm_format == drm_format)
+ return format->depth;
+ }
+
+ return 0;
+}
+
+/* load xilinx drm */
+static int xilinx_drm_load(struct drm_device *drm, unsigned long flags)
+{
+ struct xilinx_drm_private *private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct device_node *encoder_node;
+ struct platform_device *pdev = drm->platformdev;
+ unsigned int bpp, align, i = 0;
+ int ret;
+
+ private = devm_kzalloc(drm->dev, sizeof(*private), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
+ drm_mode_config_init(drm);
+
+ /* create a xilinx crtc */
+ private->crtc = xilinx_drm_crtc_create(drm);
+ if (IS_ERR(private->crtc)) {
+ DRM_DEBUG_DRIVER("failed to create xilinx crtc\n");
+ ret = PTR_ERR(private->crtc);
+ goto err_out;
+ }
+
+ while ((encoder_node = of_parse_phandle(drm->dev->of_node,
+ "xlnx,encoder-slave", i))) {
+ encoder = xilinx_drm_encoder_create(drm, encoder_node);
+ of_node_put(encoder_node);
+ if (IS_ERR(encoder)) {
+ DRM_DEBUG_DRIVER("failed to create xilinx encoder\n");
+ ret = PTR_ERR(encoder);
+ goto err_out;
+ }
+
+ connector = xilinx_drm_connector_create(drm, encoder, i);
+ if (IS_ERR(connector)) {
+ DRM_DEBUG_DRIVER("failed to create xilinx connector\n");
+ ret = PTR_ERR(connector);
+ goto err_out;
+ }
+
+ i++;
+ }
+
+ if (i == 0) {
+ DRM_ERROR("failed to get an encoder slave node\n");
+ return -ENODEV;
+ }
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize vblank\n");
+ goto err_out;
+ }
+
+ /* enable irq to enable vblank feature */
+ drm->irq_enabled = 1;
+
+ drm->dev_private = private;
+ private->drm = drm;
+
+ /* initialize xilinx framebuffer */
+ bpp = xilinx_drm_format_bpp(xilinx_drm_crtc_get_format(private->crtc));
+ align = xilinx_drm_crtc_get_align(private->crtc);
+ private->fb = xilinx_drm_fb_init(drm, bpp, 1, 1, align,
+ xilinx_drm_fbdev_vres);
+ if (IS_ERR(private->fb)) {
+ DRM_ERROR("failed to initialize drm cma fb\n");
+ ret = PTR_ERR(private->fb);
+ goto err_fb;
+ }
+
+ drm_kms_helper_poll_init(drm);
+
+ /* set up mode config for xilinx */
+ xilinx_drm_mode_config_init(drm);
+
+ drm_helper_disable_unused_functions(drm);
+
+ platform_set_drvdata(pdev, private);
+
+ return 0;
+
+err_fb:
+ drm_vblank_cleanup(drm);
+err_out:
+ drm_mode_config_cleanup(drm);
+ if (ret == -EPROBE_DEFER)
+ DRM_INFO("load() is defered & will be called again\n");
+ return ret;
+}
+
+/* unload xilinx drm */
+static int xilinx_drm_unload(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ drm_vblank_cleanup(drm);
+
+ drm_kms_helper_poll_fini(drm);
+
+ xilinx_drm_fb_fini(private->fb);
+
+ drm_mode_config_cleanup(drm);
+
+ return 0;
+}
+
+int xilinx_drm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct xilinx_drm_private *private = dev->dev_private;
+
+ if (!(drm_is_primary_client(file) && !file->minor->master) &&
+ capable(CAP_SYS_ADMIN)) {
+ file->is_master = 1;
+ private->is_master = true;
+ }
+
+ return 0;
+}
+
+/* preclose */
+static void xilinx_drm_preclose(struct drm_device *drm, struct drm_file *file)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ /* cancel pending page flip request */
+ xilinx_drm_crtc_cancel_page_flip(private->crtc, file);
+
+ if (private->is_master) {
+ private->is_master = false;
+ file->is_master = 0;
+ }
+}
+
+/* restore the default mode when xilinx drm is released */
+static void xilinx_drm_lastclose(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_crtc_restore(private->crtc);
+
+ xilinx_drm_fb_restore_mode(private->fb);
+}
+
+static int xilinx_drm_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ return 0;
+}
+
+static const struct file_operations xilinx_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver xilinx_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
+ DRIVER_PRIME,
+ .load = xilinx_drm_load,
+ .unload = xilinx_drm_unload,
+ .open = xilinx_drm_open,
+ .preclose = xilinx_drm_preclose,
+ .lastclose = xilinx_drm_lastclose,
+ .set_busid = xilinx_drm_set_busid,
+
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = xilinx_drm_enable_vblank,
+ .disable_vblank = xilinx_drm_disable_vblank,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = xilinx_drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .fops = &xilinx_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+#if defined(CONFIG_PM_SLEEP)
+/* suspend xilinx drm */
+static int xilinx_drm_pm_suspend(struct device *dev)
+{
+ struct xilinx_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+ struct drm_connector *connector;
+
+ drm_kms_helper_poll_disable(drm);
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ int old_dpms = connector->dpms;
+
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector,
+ DRM_MODE_DPMS_SUSPEND);
+
+ connector->dpms = old_dpms;
+ }
+ drm_modeset_unlock_all(drm);
+
+ return 0;
+}
+
+/* resume xilinx drm */
+static int xilinx_drm_pm_resume(struct device *dev)
+{
+ struct xilinx_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+ struct drm_connector *connector;
+
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ if (connector->funcs->dpms) {
+ int dpms = connector->dpms;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ connector->funcs->dpms(connector, dpms);
+ }
+ }
+ drm_kms_helper_poll_enable_locked(drm);
+ drm_modeset_unlock_all(drm);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops xilinx_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_drm_pm_suspend, xilinx_drm_pm_resume)
+};
+
+/* init xilinx drm platform */
+static int xilinx_drm_platform_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&xilinx_drm_driver, pdev);
+}
+
+/* exit xilinx drm platform */
+static int xilinx_drm_platform_remove(struct platform_device *pdev)
+{
+ struct xilinx_drm_private *private = platform_get_drvdata(pdev);
+
+ drm_put_dev(private->drm);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_drm_of_match[] = {
+ { .compatible = "xlnx,drm", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_drm_of_match);
+
+static struct platform_driver xilinx_drm_private_driver = {
+ .probe = xilinx_drm_platform_probe,
+ .remove = xilinx_drm_platform_remove,
+ .driver = {
+ .name = "xilinx-drm",
+ .pm = &xilinx_drm_pm_ops,
+ .of_match_table = xilinx_drm_of_match,
+ },
+};
+
+module_platform_driver(xilinx_drm_private_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DRM KMS Driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null
- obj = drm_gem_object_lookup(drm, file_priv,
+/*
+ * Xilinx DRM KMS Framebuffer helper
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * Based on drm_fb_cma_helper.c
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_fb.h"
+
+struct xilinx_drm_fb {
+ struct drm_framebuffer base;
+ struct drm_gem_cma_object *obj[4];
+};
+
+struct xilinx_drm_fbdev {
+ struct drm_fb_helper fb_helper;
+ struct xilinx_drm_fb *fb;
+ unsigned int align;
+ unsigned int vres_mult;
+ struct drm_display_mode old_mode;
+ bool mode_backup;
+};
+
+static inline struct xilinx_drm_fbdev *to_fbdev(struct drm_fb_helper *fb_helper)
+{
+ return container_of(fb_helper, struct xilinx_drm_fbdev, fb_helper);
+}
+
+static inline struct xilinx_drm_fb *to_fb(struct drm_framebuffer *base_fb)
+{
+ return container_of(base_fb, struct xilinx_drm_fb, base);
+}
+
+static void xilinx_drm_fb_destroy(struct drm_framebuffer *base_fb)
+{
+ struct xilinx_drm_fb *fb = to_fb(base_fb);
+ int i;
+
+ for (i = 0; i < 4; i++)
+ if (fb->obj[i])
+ drm_gem_object_unreference_unlocked(&fb->obj[i]->base);
+
+ drm_framebuffer_cleanup(base_fb);
+ kfree(fb);
+}
+
+static int xilinx_drm_fb_create_handle(struct drm_framebuffer *base_fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct xilinx_drm_fb *fb = to_fb(base_fb);
+
+ return drm_gem_handle_create(file_priv, &fb->obj[0]->base, handle);
+}
+
+static struct drm_framebuffer_funcs xilinx_drm_fb_funcs = {
+ .destroy = xilinx_drm_fb_destroy,
+ .create_handle = xilinx_drm_fb_create_handle,
+};
+
+/**
+ * xilinx_drm_fb_alloc - Allocate a xilinx_drm_fb
+ * @drm: DRM object
+ * @mode_cmd: drm_mode_fb_cmd2 struct
+ * @obj: pointers for returned drm_gem_cma_objects
+ * @num_planes: number of planes to be allocated
+ *
+ * This function is based on drm_fb_cma_alloc().
+ *
+ * Return: a xilinx_drm_fb object, or ERR_PTR.
+ */
+static struct xilinx_drm_fb *
+xilinx_drm_fb_alloc(struct drm_device *drm,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_cma_object **obj, unsigned int num_planes)
+{
+ struct xilinx_drm_fb *fb;
+ int ret;
+ int i;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(drm, &fb->base, &xilinx_drm_fb_funcs);
+ if (ret) {
+ DRM_ERROR("Failed to initialize framebuffer: %d\n", ret);
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+
+ return fb;
+}
+
+/**
+ * xilinx_drm_fb_get_gem_obj - Get CMA GEM object for framebuffer
+ * @base_fb: the framebuffer
+ * @plane: which plane
+ *
+ * This function is based on drm_fb_cma_get_gem_obj().
+ *
+ * Return: a CMA GEM object for given framebuffer, or NULL if not available.
+ */
+struct drm_gem_cma_object *
+xilinx_drm_fb_get_gem_obj(struct drm_framebuffer *base_fb, unsigned int plane)
+{
+ struct xilinx_drm_fb *fb = to_fb(base_fb);
+
+ if (plane >= 4)
+ return NULL;
+
+ return fb->obj[plane];
+}
+
+int xilinx_drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_mode_set *modeset;
+ int ret = 0;
+ int i;
+
+ if (oops_in_progress)
+ return -EBUSY;
+
+ drm_modeset_lock_all(dev);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+
+ modeset->x = var->xoffset;
+ modeset->y = var->yoffset;
+
+ if (modeset->num_connectors) {
+ ret = drm_mode_set_config_internal(modeset);
+ if (!ret) {
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ }
+ }
+ }
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * xilinx_drm_fb_set_config - synchronize resolution changes with fbdev
+ * @fb_helper: fb helper structure
+ * @set: mode set configuration
+ */
+void xilinx_drm_fb_set_config(struct drm_fb_helper *fb_helper,
+ struct drm_mode_set *set)
+{
+ if (fb_helper && set) {
+ struct xilinx_drm_fbdev *fbdev = to_fbdev(fb_helper);
+
+ if (fbdev && fb_helper->crtc_info &&
+ fb_helper->crtc_info[0].mode_set.mode && set->mode) {
+ if (!fbdev->mode_backup) {
+ fbdev->old_mode =
+ *fb_helper->crtc_info[0].mode_set.mode;
+ fbdev->mode_backup = true;
+ }
+ drm_mode_copy(fb_helper->crtc_info[0].mode_set.mode,
+ set->mode);
+ }
+ }
+}
+
+int
+xilinx_drm_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ unsigned int i;
+ int ret = 0;
+
+ switch (cmd) {
+ case FBIO_WAITFORVSYNC:
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set;
+ struct drm_crtc *crtc;
+
+ mode_set = &fb_helper->crtc_info[i].mode_set;
+ crtc = mode_set->crtc;
+ ret = drm_crtc_vblank_get(crtc);
+ if (!ret) {
+ drm_crtc_wait_one_vblank(crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+ }
+ return ret;
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct fb_ops xilinx_drm_fbdev_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = xilinx_drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_ioctl = xilinx_drm_fb_ioctl,
+};
+
+/**
+ * xilinx_drm_fbdev_create - Create the fbdev with a framebuffer
+ * @fb_helper: fb helper structure
+ * @sizes: framebuffer size info
+ *
+ * This function is based on drm_fbdev_cma_create().
+ *
+ * Return: 0 if successful, or the error code.
+ */
+static int xilinx_drm_fbdev_create(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct xilinx_drm_fbdev *fbdev = to_fbdev(fb_helper);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_device *drm = fb_helper->dev;
+ struct drm_gem_cma_object *obj;
+ struct drm_framebuffer *base_fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ size_t size;
+ int ret;
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = ALIGN(sizes->surface_width * bytes_per_pixel,
+ fbdev->align);
+ mode_cmd.pixel_format = xilinx_drm_get_format(drm);
+
+ mode_cmd.height *= fbdev->vres_mult;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ obj = drm_gem_cma_create(drm, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ fbi = framebuffer_alloc(0, drm->dev);
+ if (!fbi) {
+ DRM_ERROR("Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_drm_gem_cma_free_object;
+ }
+
+ fbdev->fb = xilinx_drm_fb_alloc(drm, &mode_cmd, &obj, 1);
+ if (IS_ERR(fbdev->fb)) {
+ DRM_ERROR("Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(fbdev->fb);
+ goto err_framebuffer_release;
+ }
+
+ base_fb = &fbdev->fb->base;
+ fb_helper->fb = base_fb;
+ fb_helper->fbdev = fbi;
+
+ fbi->par = fb_helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &xilinx_drm_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("Failed to allocate color map.\n");
+ goto err_xilinx_drm_fb_destroy;
+ }
+
+ drm_fb_helper_fill_fix(fbi, base_fb->pitches[0], base_fb->depth);
+ drm_fb_helper_fill_var(fbi, fb_helper, base_fb->width, base_fb->height);
+ fbi->var.yres = base_fb->height / fbdev->vres_mult;
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * base_fb->pitches[0];
+
+ drm->mode_config.fb_base = (resource_size_t)obj->paddr;
+ fbi->screen_base = (char __iomem *)(obj->vaddr + offset);
+ fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+ fbi->screen_size = size;
+ fbi->fix.smem_len = size;
+
+ return 0;
+
+err_xilinx_drm_fb_destroy:
+ drm_framebuffer_unregister_private(base_fb);
+ xilinx_drm_fb_destroy(base_fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+ drm_gem_cma_free_object(&obj->base);
+ return ret;
+}
+
+static struct drm_fb_helper_funcs xilinx_drm_fb_helper_funcs = {
+ .fb_probe = xilinx_drm_fbdev_create,
+};
+
+/**
+ * xilinx_drm_fb_init - Allocate and initializes the Xilinx framebuffer
+ * @drm: DRM device
+ * @preferred_bpp: preferred bits per pixel for the device
+ * @num_crtc: number of CRTCs
+ * @max_conn_count: maximum number of connectors
+ * @align: alignment value for pitch
+ * @vres_mult: multiplier for virtual resolution
+ *
+ * This function is based on drm_fbdev_cma_init().
+ *
+ * Return: a newly allocated drm_fb_helper struct or a ERR_PTR.
+ */
+struct drm_fb_helper *
+xilinx_drm_fb_init(struct drm_device *drm, unsigned int preferred_bpp,
+ unsigned int num_crtc, unsigned int max_conn_count,
+ unsigned int align, unsigned int vres_mult)
+{
+ struct xilinx_drm_fbdev *fbdev;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ DRM_ERROR("Failed to allocate drm fbdev.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fbdev->vres_mult = vres_mult;
+
+ fbdev->align = align;
+ fb_helper = &fbdev->fb_helper;
+ drm_fb_helper_prepare(drm, fb_helper, &xilinx_drm_fb_helper_funcs);
+
+ ret = drm_fb_helper_init(drm, fb_helper, num_crtc, max_conn_count);
+ if (ret < 0) {
+ DRM_ERROR("Failed to initialize drm fb helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret < 0) {
+ DRM_ERROR("Failed to add connectors.\n");
+ goto err_drm_fb_helper_fini;
+
+ }
+
+ drm_helper_disable_unused_functions(drm);
+
+ ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
+ if (ret < 0) {
+ DRM_ERROR("Failed to set initial hw configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return fb_helper;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_free:
+ kfree(fbdev);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * xilinx_drm_fbdev_fini - Free the Xilinx framebuffer
+ * @fb_helper: drm_fb_helper struct
+ *
+ * This function is based on drm_fbdev_cma_fini().
+ */
+void xilinx_drm_fb_fini(struct drm_fb_helper *fb_helper)
+{
+ struct xilinx_drm_fbdev *fbdev = to_fbdev(fb_helper);
+
+ if (fbdev->fb_helper.fbdev) {
+ struct fb_info *info;
+ int ret;
+
+ info = fbdev->fb_helper.fbdev;
+ ret = unregister_framebuffer(info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(&fbdev->fb->base);
+ xilinx_drm_fb_destroy(&fbdev->fb->base);
+ }
+
+ drm_fb_helper_fini(&fbdev->fb_helper);
+ kfree(fbdev);
+}
+
+/**
+ * xilinx_drm_fb_restore_mode - Restores initial framebuffer mode
+ * @fb_helper: drm_fb_helper struct, may be NULL
+ *
+ * This function is based on drm_fbdev_cma_restore_mode() and usually called
+ * from the Xilinx DRM drivers lastclose callback.
+ */
+void xilinx_drm_fb_restore_mode(struct drm_fb_helper *fb_helper)
+{
+ struct xilinx_drm_fbdev *fbdev = to_fbdev(fb_helper);
+
+ /* restore old display mode */
+ if (fb_helper && fbdev && fbdev->mode_backup &&
+ fb_helper->crtc_info &&
+ fb_helper->crtc_info[0].mode_set.mode) {
+ drm_mode_copy(fb_helper->crtc_info[0].mode_set.mode,
+ &(fbdev->old_mode));
+ fbdev->mode_backup = false;
+ }
+
+ if (fb_helper)
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+}
+
+/**
+ * xilinx_drm_fb_create - (struct drm_mode_config_funcs *)->fb_create callback
+ * @drm: DRM device
+ * @file_priv: drm file private data
+ * @mode_cmd: mode command for fb creation
+ *
+ * This functions creates a drm_framebuffer for given mode @mode_cmd. This
+ * functions is intended to be used for the fb_create callback function of
+ * drm_mode_config_funcs.
+ *
+ * Return: a drm_framebuffer object if successful, or ERR_PTR.
+ */
+struct drm_framebuffer *
+xilinx_drm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct xilinx_drm_fb *fb;
+ struct drm_gem_cma_object *objs[4];
+ struct drm_gem_object *obj;
+ unsigned int hsub;
+ unsigned int vsub;
+ int ret;
+ int i;
+
+ hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+
+ for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
++ obj = drm_gem_object_lookup(file_priv,
+ mode_cmd->handles[i]);
+ if (!obj) {
+ DRM_ERROR("Failed to lookup GEM object\n");
+ ret = -ENXIO;
+ goto err_gem_object_unreference;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i] + width *
+ drm_format_plane_cpp(mode_cmd->pixel_format, i) +
+ mode_cmd->offsets[i];
+
+ if (obj->size < min_size) {
+ drm_gem_object_unreference_unlocked(obj);
+ ret = -EINVAL;
+ goto err_gem_object_unreference;
+ }
+ objs[i] = to_drm_gem_cma_obj(obj);
+ }
+
+ fb = xilinx_drm_fb_alloc(drm, mode_cmd, objs, i);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto err_gem_object_unreference;
+ }
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->base.depth,
+ &fb->base.bits_per_pixel);
+ if (!fb->base.bits_per_pixel)
+ fb->base.bits_per_pixel =
+ xilinx_drm_format_bpp(mode_cmd->pixel_format);
+
+ return &fb->base;
+
+err_gem_object_unreference:
+ for (i--; i >= 0; i--)
+ drm_gem_object_unreference_unlocked(&objs[i]->base);
+ return ERR_PTR(ret);
+}
+
+
+/**
+ * xilinx_drm_fb_hotplug_event - Poll for hotpulug events
+ * @fb_helper: drm_fb_helper struct, may be NULL
+ *
+ * This function is based on drm_fbdev_cma_hotplug_event() and usually called
+ * from the Xilinx DRM drivers output_poll_changed callback.
+ */
+void xilinx_drm_fb_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ if (fb_helper) {
+ struct xilinx_drm_fbdev *fbdev = to_fbdev(fb_helper);
+
+ if (fbdev)
+ fbdev->mode_backup = false;
+ }
+
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
+}
--- /dev/null
- dev->vblank_disable_allowed = 1;
+/*
+ * Xylon DRM driver functions
+ *
+ * Copyright (C) 2014 Xylon d.o.o.
+ * Author: Davor Joja <davor.joja@logicbricks.com>
+ *
+ * Based on Xilinx DRM driver.
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "xylon_connector.h"
+#include "xylon_crtc.h"
+#include "xylon_drv.h"
+#include "xylon_encoder.h"
+#include "xylon_fb.h"
+#include "xylon_fbdev.h"
+#include "xylon_irq.h"
+
+#define DEVICE_NAME "logicvc"
+
+#define DRIVER_NAME "xylon-drm"
+#define DRIVER_DESCRIPTION "Xylon DRM driver for logiCVC IP core"
+#define DRIVER_VERSION "1.1"
+#define DRIVER_DATE "20140701"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static int xylon_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct xylon_drm_device *xdev;
+ unsigned int bpp;
+ int ret;
+
+ xdev = devm_kzalloc(dev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+ xdev->dev = dev;
+
+ dev->dev_private = xdev;
+
+ drm_mode_config_init(dev);
+
+ drm_kms_helper_poll_init(dev);
+
+ xdev->crtc = xylon_drm_crtc_create(dev);
+ if (IS_ERR(xdev->crtc)) {
+ DRM_ERROR("failed create xylon crtc\n");
+ ret = PTR_ERR(xdev->crtc);
+ goto err_out;
+ }
+
+ xylon_drm_mode_config_init(dev);
+
+ xdev->encoder = xylon_drm_encoder_create(dev);
+ if (IS_ERR(xdev->encoder)) {
+ DRM_ERROR("failed create xylon encoder\n");
+ ret = PTR_ERR(xdev->encoder);
+ goto err_out;
+ }
+
+ xdev->connector = xylon_drm_connector_create(dev, xdev->encoder);
+ if (IS_ERR(xdev->connector)) {
+ DRM_ERROR("failed create xylon connector\n");
+ ret = PTR_ERR(xdev->connector);
+ goto err_out;
+ }
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret) {
+ DRM_ERROR("failed initialize vblank\n");
+ goto err_out;
+ }
+
+ ret = xylon_drm_irq_install(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed install irq\n");
+ goto err_irq;
+ }
+
+ ret = xylon_drm_crtc_get_param(xdev->crtc, &bpp,
+ XYLON_DRM_CRTC_BUFF_BPP);
+ if (ret) {
+ DRM_ERROR("failed get bpp\n");
+ goto err_fbdev;
+ }
+ xdev->fbdev = xylon_drm_fbdev_init(dev, bpp, 1, 1);
+ if (IS_ERR(xdev->fbdev)) {
+ DRM_ERROR("failed initialize fbdev\n");
+ ret = PTR_ERR(xdev->fbdev);
+ goto err_fbdev;
+ }
+
+ drm_helper_disable_unused_functions(dev);
+
+ platform_set_drvdata(pdev, xdev);
+
+ return 0;
+
+err_fbdev:
+ xylon_drm_irq_uninstall(dev);
+err_irq:
+ drm_vblank_cleanup(dev);
+err_out:
+ drm_mode_config_cleanup(dev);
+
+ if (ret == -EPROBE_DEFER)
+ DRM_INFO("driver load deferred, will be called again\n");
+
+ return ret;
+}
+
+static int xylon_drm_unload(struct drm_device *dev)
+{
+ struct xylon_drm_device *xdev = dev->dev_private;
+
+ xylon_drm_irq_uninstall(dev);
+
+ drm_vblank_cleanup(dev);
+
+ drm_kms_helper_poll_fini(dev);
+
+ xylon_drm_fbdev_fini(xdev->fbdev);
+
+ drm_mode_config_cleanup(dev);
+
+ return 0;
+}
+
+static void xylon_drm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct xylon_drm_device *xdev = dev->dev_private;
+
+ xylon_drm_crtc_cancel_page_flip(xdev->crtc, file);
+}
+
+static void xylon_drm_postclose(struct drm_device *dev, struct drm_file *file)
+{
+}
+
+static void xylon_drm_lastclose(struct drm_device *dev)
+{
+ struct xylon_drm_device *xdev = dev->dev_private;
+
+ xylon_drm_crtc_properties_restore(xdev->crtc);
+
+ xylon_drm_fbdev_restore_mode(xdev->fbdev);
+}
+
+static int xylon_drm_vblank_enable(struct drm_device *dev, int crtc)
+{
+ struct xylon_drm_device *xdev = dev->dev_private;
+
+ xylon_drm_crtc_vblank(xdev->crtc, true);
+
+ return 0;
+}
+
+static void xylon_drm_vblank_disable(struct drm_device *dev, int crtc)
+{
+ struct xylon_drm_device *xdev = dev->dev_private;
+
+ xylon_drm_crtc_vblank(xdev->crtc, false);
+}
+
+static int xylon_drm_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *gem_obj;
+ struct xylon_drm_device *xdev = dev->dev_private;
+ unsigned int buff_width;
+ int ret;
+
+ ret = xylon_drm_crtc_get_param(xdev->crtc, &buff_width,
+ XYLON_DRM_CRTC_BUFF_WIDTH);
+ if (ret)
+ return ret;
+
+ args->pitch = buff_width * DIV_ROUND_UP(args->bpp, 8);
+ args->size = (u64)(buff_width * DIV_ROUND_UP(args->bpp, 8) *
+ args->height);
+
+ cma_obj = drm_gem_cma_create(dev, (unsigned int)args->size);
+ if (IS_ERR(cma_obj))
+ return PTR_ERR(cma_obj);
+
+ gem_obj = &cma_obj->base;
+
+ ret = drm_gem_handle_create(file_priv, gem_obj, &args->handle);
+ if (ret)
+ goto err_handle_create;
+
+ drm_gem_object_unreference_unlocked(gem_obj);
+
+ return PTR_ERR_OR_ZERO(cma_obj);
+
+err_handle_create:
+ drm_gem_cma_free_object(gem_obj);
+
+ return ret;
+}
+
+static const struct file_operations xylon_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver xylon_drm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .load = xylon_drm_load,
+ .unload = xylon_drm_unload,
+ .preclose = xylon_drm_preclose,
+ .postclose = xylon_drm_postclose,
+ .lastclose = xylon_drm_lastclose,
+
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = xylon_drm_vblank_enable,
+ .disable_vblank = xylon_drm_vblank_disable,
+
+ .irq_preinstall = xylon_drm_irq_preinst,
+ .irq_postinstall = xylon_drm_irq_postinst,
+ .irq_uninstall = xylon_drm_irq_uninst,
+ .irq_handler = xylon_drm_irq_handler,
+
+ .gem_free_object = drm_gem_cma_free_object,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ .dumb_create = xylon_drm_gem_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+
+ .fops = &xylon_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESCRIPTION,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int __maybe_unused xylon_drm_pm_suspend(struct device *dev)
+{
+ struct xylon_drm_device *xdev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_disable(xdev->dev);
+ drm_helper_connector_dpms(xdev->connector, DRM_MODE_DPMS_SUSPEND);
+
+ return 0;
+}
+
+static int __maybe_unused xylon_drm_pm_resume(struct device *dev)
+{
+ struct xylon_drm_device *xdev = dev_get_drvdata(dev);
+
+ drm_helper_connector_dpms(xdev->connector, DRM_MODE_DPMS_ON);
+ drm_kms_helper_poll_enable(xdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops xylon_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xylon_drm_pm_suspend, xylon_drm_pm_resume)
+ SET_RUNTIME_PM_OPS(xylon_drm_pm_suspend, xylon_drm_pm_resume, NULL)
+};
+
+static int xylon_drm_platform_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&xylon_drm_driver, pdev);
+}
+
+static int xylon_drm_platform_remove(struct platform_device *pdev)
+{
+ struct xylon_drm_device *xdev = platform_get_drvdata(pdev);
+
+ drm_put_dev(xdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id xylon_drm_of_match[] = {
+ { .compatible = "xylon,drm-1.00.a", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xylon_drm_of_match);
+
+static struct platform_driver xylon_drm_platform_driver = {
+ .probe = xylon_drm_platform_probe,
+ .remove = xylon_drm_platform_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &xylon_drm_pm_ops,
+ .of_match_table = xylon_drm_of_match,
+ },
+};
+
+module_platform_driver(xylon_drm_platform_driver);
+
+MODULE_AUTHOR("Xylon d.o.o.");
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL v2");
--- /dev/null
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+/*
+ * Xylon DRM driver fb functions
+ *
+ * Copyright (C) 2014 Xylon d.o.o.
+ * Author: Davor Joja <davor.joja@logicbricks.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem.h>
+
+#include "xylon_crtc.h"
+#include "xylon_drv.h"
+#include "xylon_fb.h"
+#include "xylon_fbdev.h"
+
+#define fb_to_xylon_drm_fb(x) container_of(x, struct xylon_drm_fb, fb)
+
+struct xylon_drm_fb {
+ struct drm_framebuffer fb;
+ struct drm_gem_object *obj;
+};
+
+static void xylon_drm_fb_destroy(struct drm_framebuffer *fb)
+{
+ struct drm_gem_object *obj;
+ struct xylon_drm_fb *xfb = fb_to_xylon_drm_fb(fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ obj = xfb->obj;
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+
+ kfree(xfb);
+}
+
+static int xylon_drm_fb_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct xylon_drm_fb *xfb = fb_to_xylon_drm_fb(fb);
+
+ return drm_gem_handle_create(file_priv, xfb->obj, handle);
+}
+
+static struct drm_framebuffer_funcs xylon_fb_funcs = {
+ .destroy = xylon_drm_fb_destroy,
+ .create_handle = xylon_drm_fb_create_handle,
+};
+
+struct drm_framebuffer *xylon_drm_fb_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ struct drm_framebuffer *fb;
+ struct xylon_drm_fb *xfb;
+ int ret;
+
+ xfb = kzalloc(sizeof(*xfb), GFP_KERNEL);
+ if (!xfb) {
+ DRM_ERROR("failed allocate framebuffer\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ xfb->obj = obj;
+
+ fb = &xfb->fb;
+
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+ ret = drm_framebuffer_init(dev, fb, &xylon_fb_funcs);
+ if (ret) {
+ DRM_ERROR("failed framebuffer init\n");
+ goto err;
+ }
+
+ return fb;
+
+err:
+ xylon_drm_fb_destroy(fb);
+
+ return ERR_PTR(ret);
+}
+
+struct drm_gem_object *xylon_drm_fb_get_gem_obj(struct drm_framebuffer *fb)
+{
+ struct xylon_drm_fb *xfb = fb_to_xylon_drm_fb(fb);
+
+ return xfb->obj;
+}
+
+static struct drm_framebuffer *
+xylon_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct xylon_drm_device *xdev = dev->dev_private;
+ bool res;
+
+ res = xylon_drm_crtc_check_format(xdev->crtc, mode_cmd->pixel_format);
+ if (!res) {
+ DRM_ERROR("unsupported pixel format %08x\n",
+ mode_cmd->pixel_format);
+ return ERR_PTR(-EINVAL);
+ }
+
++ obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-EINVAL);
+
+ return xylon_drm_fb_init(dev, mode_cmd, obj);
+}
+
+static void xylon_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct xylon_drm_device *xdev = dev->dev_private;
+
+ xylon_drm_fbdev_hotplug_event(xdev->fbdev);
+}
+
+static const struct drm_mode_config_funcs xylon_drm_mode_config_funcs = {
+ .fb_create = xylon_drm_fb_create,
+ .output_poll_changed = xylon_drm_output_poll_changed,
+};
+
+void xylon_drm_mode_config_init(struct drm_device *dev)
+{
+ struct xylon_drm_device *xdev = dev->dev_private;
+
+ xylon_drm_crtc_get_fix_parameters(xdev->crtc);
+
+ dev->mode_config.funcs = &xylon_drm_mode_config_funcs;
+}
#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14)
/* Broken Clock divider zero in controller */
#define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15)
- #define SDHCI_QUIRK2_CLOCK_STANDARD_25_BROKEN (1<<17)
-
- /*
- * When internal clock is disabled, a delay is needed before modifying the
- * SD clock frequency or enabling back the internal clock.
- */
- #define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16)
+/* Broken Clock between 19MHz-25MHz */
++#define SDHCI_QUIRK2_CLOCK_STANDARD_25_BROKEN (1<<16)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
--- /dev/null
- #include <linux/of_mtd.h>
+/*
+ * Arasan Nand Flash Controller Driver
+ *
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define DRIVER_NAME "arasan_nfc"
+#define EVNT_TIMEOUT 1000
+#define STATUS_TIMEOUT 2000
+#define ANFC_PM_TIMEOUT 1000 /* ms */
+
+#define PKT_OFST 0x00
+#define MEM_ADDR1_OFST 0x04
+#define MEM_ADDR2_OFST 0x08
+#define CMD_OFST 0x0C
+#define PROG_OFST 0x10
+#define INTR_STS_EN_OFST 0x14
+#define INTR_SIG_EN_OFST 0x18
+#define INTR_STS_OFST 0x1C
+#define READY_STS_OFST 0x20
+#define DMA_ADDR1_OFST 0x24
+#define FLASH_STS_OFST 0x28
+#define DATA_PORT_OFST 0x30
+#define ECC_OFST 0x34
+#define ECC_ERR_CNT_OFST 0x38
+#define ECC_SPR_CMD_OFST 0x3C
+#define ECC_ERR_CNT_1BIT_OFST 0x40
+#define ECC_ERR_CNT_2BIT_OFST 0x44
+#define DMA_ADDR0_OFST 0x50
+#define DATA_INTERFACE_REG 0x6C
+
+#define PKT_CNT_SHIFT 12
+
+#define ECC_ENABLE BIT(31)
+#define DMA_EN_MASK GENMASK(27, 26)
+#define DMA_ENABLE 0x2
+#define DMA_EN_SHIFT 26
+#define PAGE_SIZE_MASK GENMASK(25, 23)
+#define PAGE_SIZE_SHIFT 23
+#define PAGE_SIZE_512 0
+#define PAGE_SIZE_1K 5
+#define PAGE_SIZE_2K 1
+#define PAGE_SIZE_4K 2
+#define PAGE_SIZE_8K 3
+#define PAGE_SIZE_16K 4
+#define CMD2_SHIFT 8
+#define ADDR_CYCLES_SHIFT 28
+
+#define XFER_COMPLETE BIT(2)
+#define READ_READY BIT(1)
+#define WRITE_READY BIT(0)
+#define MBIT_ERROR BIT(3)
+#define ERR_INTRPT BIT(4)
+
+#define PROG_PGRD BIT(0)
+#define PROG_ERASE BIT(2)
+#define PROG_STATUS BIT(3)
+#define PROG_PGPROG BIT(4)
+#define PROG_RDID BIT(6)
+#define PROG_RDPARAM BIT(7)
+#define PROG_RST BIT(8)
+#define PROG_GET_FEATURE BIT(9)
+#define PROG_SET_FEATURE BIT(10)
+
+#define ONFI_STATUS_FAIL BIT(0)
+#define ONFI_STATUS_READY BIT(6)
+
+#define PG_ADDR_SHIFT 16
+#define BCH_MODE_SHIFT 25
+#define BCH_EN_SHIFT 27
+#define ECC_SIZE_SHIFT 16
+
+#define MEM_ADDR_MASK GENMASK(7, 0)
+#define BCH_MODE_MASK GENMASK(27, 25)
+
+#define CS_MASK GENMASK(31, 30)
+#define CS_SHIFT 30
+
+#define PAGE_ERR_CNT_MASK GENMASK(16, 8)
+#define PKT_ERR_CNT_MASK GENMASK(7, 0)
+
+#define NVDDR_MODE BIT(9)
+#define NVDDR_TIMING_MODE_SHIFT 3
+
+#define ONFI_ID_LEN 8
+#define TEMP_BUF_SIZE 512
+#define NVDDR_MODE_PACKET_SIZE 8
+#define SDR_MODE_PACKET_SIZE 4
+
+/**
+ * struct anfc_ecc_matrix - Defines ecc information storage format
+ * @pagesize: Page size in bytes.
+ * @codeword_size: Code word size information.
+ * @eccbits: Number of ecc bits.
+ * @bch: Bch / Hamming mode enable/disable.
+ * @eccsize: Ecc size information.
+ */
+struct anfc_ecc_matrix {
+ u32 pagesize;
+ u32 codeword_size;
+ u8 eccbits;
+ u8 bch;
+ u16 eccsize;
+};
+
+static const struct anfc_ecc_matrix ecc_matrix[] = {
+ {512, 512, 1, 0, 0x3},
+ {512, 512, 4, 1, 0x7},
+ {512, 512, 8, 1, 0xD},
+ /* 2K byte page */
+ {2048, 512, 1, 0, 0xC},
+ {2048, 512, 4, 1, 0x1A},
+ {2048, 512, 8, 1, 0x34},
+ {2048, 512, 12, 1, 0x4E},
+ {2048, 1024, 24, 1, 0x54},
+ /* 4K byte page */
+ {4096, 512, 1, 0, 0x18},
+ {4096, 512, 4, 1, 0x34},
+ {4096, 512, 8, 1, 0x68},
+ {4096, 512, 12, 1, 0x9C},
+ {4096, 1024, 24, 1, 0xA8},
+ /* 8K byte page */
+ {8192, 512, 1, 0, 0x30},
+ {8192, 512, 4, 1, 0x68},
+ {8192, 512, 8, 1, 0xD0},
+ {8192, 512, 12, 1, 0x138},
+ {8192, 1024, 24, 1, 0x150},
+ /* 16K byte page */
+ {16384, 512, 1, 0, 0x60},
+ {16384, 512, 4, 1, 0xD0},
+ {16384, 512, 8, 1, 0x1A0},
+ {16384, 512, 12, 1, 0x270},
+ {16384, 1024, 24, 1, 0x2A0}
+};
+
+/**
+ * struct anfc - Defines the Arasan NAND flash driver instance
+ * @chip: NAND chip information structure.
+ * @dev: Pointer to the device structure.
+ * @base: Virtual address of the NAND flash device.
+ * @curr_cmd: Current command issued.
+ * @clk_sys: Pointer to the system clock.
+ * @clk_flash: Pointer to the flash clock.
+ * @dma: Dma enable/disable.
+ * @bch: Bch / Hamming mode enable/disable.
+ * @err: Error identifier.
+ * @iswriteoob: Identifies if oob write operation is required.
+ * @buf: Buffer used for read/write byte operations.
+ * @raddr_cycles: Row address cycle information.
+ * @caddr_cycles: Column address cycle information.
+ * @irq: irq number
+ * @pktsize: Packet size for read / write operation.
+ * @bufshift: Variable used for indexing buffer operation
+ * @rdintrmask: Interrupt mask value for read operation.
+ * @num_cs: Number of chip selects in use.
+ * @spktsize: Packet size in ddr mode for status operation.
+ * @bufrdy: Completion event for buffer ready.
+ * @xfercomp: Completion event for transfer complete.
+ * @ecclayout: Ecc layout object
+ */
+struct anfc {
+ struct nand_chip chip;
+ struct device *dev;
+
+ void __iomem *base;
+ int curr_cmd;
+ struct clk *clk_sys;
+ struct clk *clk_flash;
+
+ bool dma;
+ bool bch;
+ bool err;
+ bool iswriteoob;
+
+ u8 buf[TEMP_BUF_SIZE];
+
+ u16 raddr_cycles;
+ u16 caddr_cycles;
+
+ u32 irq;
+ u32 pktsize;
+ u32 bufshift;
+ u32 rdintrmask;
+ u32 num_cs;
+ u32 spktsize;
+
+ struct completion bufrdy;
+ struct completion xfercomp;
+ struct nand_ecclayout ecclayout;
+};
+
+static inline struct anfc *to_anfc(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct anfc, chip);
+}
+
+static u8 anfc_page(u32 pagesize)
+{
+ switch (pagesize) {
+ case 512:
+ return PAGE_SIZE_512;
+ case 2048:
+ return PAGE_SIZE_2K;
+ case 4096:
+ return PAGE_SIZE_4K;
+ case 8192:
+ return PAGE_SIZE_8K;
+ case 16384:
+ return PAGE_SIZE_16K;
+ case 1024:
+ return PAGE_SIZE_1K;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline void anfc_enable_intrs(struct anfc *nfc, u32 val)
+{
+ writel(val, nfc->base + INTR_STS_EN_OFST);
+ writel(val, nfc->base + INTR_SIG_EN_OFST);
+}
+
+static int anfc_wait_for_event(struct anfc *nfc, u32 event)
+{
+ struct completion *comp;
+ int ret;
+
+ if (event == XFER_COMPLETE)
+ comp = &nfc->xfercomp;
+ else
+ comp = &nfc->bufrdy;
+
+ ret = wait_for_completion_timeout(comp, msecs_to_jiffies(EVNT_TIMEOUT));
+
+ return ret;
+}
+
+static inline void anfc_setpktszcnt(struct anfc *nfc, u32 pktsize,
+ u32 pktcount)
+{
+ writel(pktsize | (pktcount << PKT_CNT_SHIFT), nfc->base + PKT_OFST);
+}
+
+static inline void anfc_set_eccsparecmd(struct anfc *nfc, u8 cmd1, u8 cmd2)
+{
+ writel(cmd1 | (cmd2 << CMD2_SHIFT) |
+ (nfc->caddr_cycles << ADDR_CYCLES_SHIFT),
+ nfc->base + ECC_SPR_CMD_OFST);
+}
+
+static void anfc_setpagecoladdr(struct anfc *nfc, u32 page, u16 col)
+{
+ u32 val;
+
+ writel(col | (page << PG_ADDR_SHIFT), nfc->base + MEM_ADDR1_OFST);
+
+ val = readl(nfc->base + MEM_ADDR2_OFST);
+ val = (val & ~MEM_ADDR_MASK) |
+ ((page >> PG_ADDR_SHIFT) & MEM_ADDR_MASK);
+ writel(val, nfc->base + MEM_ADDR2_OFST);
+}
+
+static void anfc_prepare_cmd(struct anfc *nfc, u8 cmd1, u8 cmd2,
+ u8 dmamode, u32 pagesize, u8 addrcycles)
+{
+ u32 regval;
+
+ regval = cmd1 | (cmd2 << CMD2_SHIFT);
+ if (dmamode && nfc->dma)
+ regval |= DMA_ENABLE << DMA_EN_SHIFT;
+ if (addrcycles)
+ regval |= addrcycles << ADDR_CYCLES_SHIFT;
+ if (pagesize)
+ regval |= anfc_page(pagesize) << PAGE_SIZE_SHIFT;
+ writel(regval, nfc->base + CMD_OFST);
+}
+
+static int anfc_device_ready(struct mtd_info *mtd,
+ struct nand_chip *chip)
+{
+ u8 status;
+ unsigned long timeout = jiffies + STATUS_TIMEOUT;
+
+ do {
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, 0, 0);
+ status = chip->read_byte(mtd);
+ if (status & ONFI_STATUS_READY) {
+ if (status & ONFI_STATUS_FAIL)
+ return NAND_STATUS_FAIL;
+ break;
+ }
+ cpu_relax();
+ } while (!time_after_eq(jiffies, timeout));
+
+ if (time_after_eq(jiffies, timeout)) {
+ pr_err("%s timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int anfc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct anfc *nfc = to_anfc(mtd);
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ if (nfc->dma)
+ nfc->rdintrmask = XFER_COMPLETE;
+ else
+ nfc->rdintrmask = READ_READY;
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static int anfc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct anfc *nfc = to_anfc(mtd);
+
+ nfc->iswriteoob = true;
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ nfc->iswriteoob = false;
+
+ return 0;
+}
+
+static void anfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ u32 i, pktcount, buf_rd_cnt = 0, pktsize;
+ u32 *bufptr = (u32 *)buf;
+ struct anfc *nfc = to_anfc(mtd);
+ dma_addr_t paddr = 0;
+
+ if (nfc->curr_cmd == NAND_CMD_READ0) {
+ pktsize = nfc->pktsize;
+ if (mtd->writesize % pktsize)
+ pktcount = mtd->writesize / pktsize + 1;
+ else
+ pktcount = mtd->writesize / pktsize;
+ } else {
+ pktsize = len;
+ pktcount = 1;
+ }
+
+ anfc_setpktszcnt(nfc, pktsize, pktcount);
+
+ if (nfc->dma) {
+ paddr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(nfc->dev, paddr)) {
+ dev_err(nfc->dev, "Read buffer mapping error");
+ return;
+ }
+ writel(lower_32_bits(paddr), nfc->base + DMA_ADDR0_OFST);
+ writel(upper_32_bits(paddr), nfc->base + DMA_ADDR1_OFST);
+ anfc_enable_intrs(nfc, nfc->rdintrmask);
+ writel(PROG_PGRD, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc, XFER_COMPLETE);
+ dma_unmap_single(nfc->dev, paddr, len, DMA_FROM_DEVICE);
+ return;
+ }
+
+ anfc_enable_intrs(nfc, nfc->rdintrmask);
+ writel(PROG_PGRD, nfc->base + PROG_OFST);
+
+ while (buf_rd_cnt < pktcount) {
+
+ anfc_wait_for_event(nfc, READ_READY);
+ buf_rd_cnt++;
+
+ if (buf_rd_cnt == pktcount)
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+
+ for (i = 0; i < pktsize / 4; i++)
+ bufptr[i] = readl(nfc->base + DATA_PORT_OFST);
+
+ bufptr += (pktsize / 4);
+
+ if (buf_rd_cnt < pktcount)
+ anfc_enable_intrs(nfc, nfc->rdintrmask);
+ }
+
+ anfc_wait_for_event(nfc, XFER_COMPLETE);
+}
+
+static void anfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ u32 buf_wr_cnt = 0, pktcount = 1, i, pktsize;
+ u32 *bufptr = (u32 *)buf;
+ struct anfc *nfc = to_anfc(mtd);
+ dma_addr_t paddr = 0;
+
+ if (nfc->iswriteoob) {
+ pktsize = len;
+ pktcount = 1;
+ } else {
+ pktsize = nfc->pktsize;
+ pktcount = mtd->writesize / pktsize;
+ }
+
+ anfc_setpktszcnt(nfc, pktsize, pktcount);
+
+ if (nfc->dma) {
+ paddr = dma_map_single(nfc->dev, (void *)buf, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(nfc->dev, paddr)) {
+ dev_err(nfc->dev, "Write buffer mapping error");
+ return;
+ }
+ writel(lower_32_bits(paddr), nfc->base + DMA_ADDR0_OFST);
+ writel(upper_32_bits(paddr), nfc->base + DMA_ADDR1_OFST);
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(PROG_PGPROG, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc, XFER_COMPLETE);
+ dma_unmap_single(nfc->dev, paddr, len, DMA_TO_DEVICE);
+ return;
+ }
+
+ anfc_enable_intrs(nfc, WRITE_READY);
+ writel(PROG_PGPROG, nfc->base + PROG_OFST);
+
+ while (buf_wr_cnt < pktcount) {
+ anfc_wait_for_event(nfc, WRITE_READY);
+
+ buf_wr_cnt++;
+ if (buf_wr_cnt == pktcount)
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+
+ for (i = 0; i < (pktsize / 4); i++)
+ writel(bufptr[i], nfc->base + DATA_PORT_OFST);
+
+ bufptr += (pktsize / 4);
+
+ if (buf_wr_cnt < pktcount)
+ anfc_enable_intrs(nfc, WRITE_READY);
+ }
+
+ anfc_wait_for_event(nfc, XFER_COMPLETE);
+}
+
+static int anfc_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ u32 val;
+ struct anfc *nfc = to_anfc(mtd);
+
+ anfc_set_eccsparecmd(nfc, NAND_CMD_RNDOUT, NAND_CMD_RNDOUTSTART);
+
+ val = readl(nfc->base + CMD_OFST);
+ val = val | ECC_ENABLE;
+ writel(val, nfc->base + CMD_OFST);
+
+ if (nfc->dma)
+ nfc->rdintrmask = XFER_COMPLETE;
+ else
+ nfc->rdintrmask = READ_READY;
+
+ if (!nfc->bch)
+ nfc->rdintrmask = MBIT_ERROR;
+
+ chip->read_buf(mtd, buf, mtd->writesize);
+
+ val = readl(nfc->base + ECC_ERR_CNT_OFST);
+ if (nfc->bch) {
+ mtd->ecc_stats.corrected += val & PAGE_ERR_CNT_MASK;
+ } else {
+ val = readl(nfc->base + ECC_ERR_CNT_1BIT_OFST);
+ mtd->ecc_stats.corrected += val;
+ val = readl(nfc->base + ECC_ERR_CNT_2BIT_OFST);
+ mtd->ecc_stats.failed += val;
+ /* Clear ecc error count register 1Bit, 2Bit */
+ writel(0x0, nfc->base + ECC_ERR_CNT_1BIT_OFST);
+ writel(0x0, nfc->base + ECC_ERR_CNT_2BIT_OFST);
+ }
+ nfc->err = false;
+
+ if (oob_required)
+ chip->ecc.read_oob(mtd, chip, page);
+
+ return 0;
+}
+
+static int anfc_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ u32 val, i;
+ struct anfc *nfc = to_anfc(mtd);
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ anfc_set_eccsparecmd(nfc, NAND_CMD_RNDIN, 0);
+
+ val = readl(nfc->base + CMD_OFST);
+ val = val | ECC_ENABLE;
+ writel(val, nfc->base + CMD_OFST);
+
+ chip->write_buf(mtd, buf, mtd->writesize);
+
+ if (oob_required) {
+ anfc_device_ready(mtd, chip);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ if (nfc->dma)
+ nfc->rdintrmask = XFER_COMPLETE;
+ else
+ nfc->rdintrmask = READ_READY;
+ chip->read_buf(mtd, ecc_calc, mtd->oobsize);
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[eccpos[i]];
+ chip->ecc.write_oob(mtd, chip, page);
+ }
+
+ return 0;
+}
+
+static u8 anfc_read_byte(struct mtd_info *mtd)
+{
+ struct anfc *nfc = to_anfc(mtd);
+
+ return nfc->buf[nfc->bufshift++];
+}
+
+static void anfc_writefifo(struct anfc *nfc, u32 prog, u32 size, u8 *buf)
+{
+ u32 i, *bufptr = (u32 *)buf;
+
+ anfc_enable_intrs(nfc, WRITE_READY);
+
+ writel(prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc, WRITE_READY);
+
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ for (i = 0; i < size / 4; i++)
+ writel(bufptr[i], nfc->base + DATA_PORT_OFST);
+
+ anfc_wait_for_event(nfc, XFER_COMPLETE);
+}
+
+static void anfc_readfifo(struct anfc *nfc, u32 prog, u32 size)
+{
+ u32 i, *bufptr = (u32 *)&nfc->buf[0];
+
+ anfc_enable_intrs(nfc, READ_READY);
+
+ writel(prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc, READ_READY);
+
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+
+ for (i = 0; i < size / 4; i++)
+ bufptr[i] = readl(nfc->base + DATA_PORT_OFST);
+
+ anfc_wait_for_event(nfc, XFER_COMPLETE);
+}
+
+static int anfc_ecc_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ u32 oob_index, i, ecc_addr, regval, bchmode = 0;
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct anfc *nfc = to_anfc(mtd);
+ int found = -1;
+
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.read_page = anfc_read_page_hwecc;
+ nand_chip->ecc.write_page = anfc_write_page_hwecc;
+ nand_chip->ecc.write_oob = anfc_write_oob;
+ nand_chip->ecc.read_oob = anfc_read_oob;
+
+ for (i = 0; i < sizeof(ecc_matrix) / sizeof(struct anfc_ecc_matrix);
+ i++) {
+ if ((ecc_matrix[i].pagesize == mtd->writesize) &&
+ (ecc_matrix[i].codeword_size >= nand_chip->ecc_step_ds)) {
+ if (ecc_matrix[i].eccbits >=
+ nand_chip->ecc_strength_ds) {
+ found = i;
+ break;
+ }
+ found = i;
+ }
+ }
+
+ if (found < 0) {
+ dev_err(nfc->dev, "ECC scheme not supported");
+ return 1;
+ }
+ if (ecc_matrix[found].bch) {
+ switch (ecc_matrix[found].eccbits) {
+ case 12:
+ bchmode = 0x1;
+ break;
+ case 8:
+ bchmode = 0x2;
+ break;
+ case 4:
+ bchmode = 0x3;
+ break;
+ case 24:
+ bchmode = 0x4;
+ break;
+ default:
+ bchmode = 0x0;
+ }
+ }
+
+ nand_chip->ecc.strength = ecc_matrix[found].eccbits;
+ nand_chip->ecc.size = ecc_matrix[found].codeword_size;
+ nand_chip->ecc.steps = ecc_matrix[found].pagesize /
+ ecc_matrix[found].codeword_size;
+ nand_chip->ecc.bytes = ecc_matrix[found].eccsize /
+ nand_chip->ecc.steps;
+ nfc->ecclayout.eccbytes = ecc_matrix[found].eccsize;
+ nfc->bch = ecc_matrix[found].bch;
+ oob_index = mtd->oobsize - nfc->ecclayout.eccbytes;
+ ecc_addr = mtd->writesize + oob_index;
+
+ for (i = 0; i < nand_chip->ecc.size; i++)
+ nfc->ecclayout.eccpos[i] = oob_index + i;
+
+ nfc->ecclayout.oobfree->offset = 2;
+ nfc->ecclayout.oobfree->length = oob_index -
+ nfc->ecclayout.oobfree->offset;
+
+ nand_chip->ecc.layout = &nfc->ecclayout;
+ regval = ecc_addr | (ecc_matrix[found].eccsize << ECC_SIZE_SHIFT) |
+ (ecc_matrix[found].bch << BCH_EN_SHIFT);
+ writel(regval, nfc->base + ECC_OFST);
+
+ regval = readl(nfc->base + MEM_ADDR2_OFST);
+ regval = (regval & ~(BCH_MODE_MASK)) | (bchmode << BCH_MODE_SHIFT);
+ writel(regval, nfc->base + MEM_ADDR2_OFST);
+
+ if (nand_chip->ecc_step_ds >= 1024)
+ nfc->pktsize = 1024;
+ else
+ nfc->pktsize = 512;
+
+ return 0;
+}
+
+static void anfc_cmd_function(struct mtd_info *mtd,
+ unsigned int cmd, int column, int page_addr)
+{
+ struct anfc *nfc = to_anfc(mtd);
+ bool wait = false, read = false;
+ u32 addrcycles, prog;
+ u32 *bufptr = (u32 *)&nfc->buf[0];
+
+ nfc->bufshift = 0;
+ nfc->curr_cmd = cmd;
+
+ if (page_addr == -1)
+ page_addr = 0;
+ if (column == -1)
+ column = 0;
+
+ switch (cmd) {
+ case NAND_CMD_RESET:
+ anfc_prepare_cmd(nfc, cmd, 0, 0, 0, 0);
+ prog = PROG_RST;
+ wait = true;
+ break;
+ case NAND_CMD_SEQIN:
+ addrcycles = nfc->raddr_cycles + nfc->caddr_cycles;
+ anfc_prepare_cmd(nfc, cmd, NAND_CMD_PAGEPROG, 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, page_addr, column);
+ break;
+ case NAND_CMD_READOOB:
+ column += mtd->writesize;
+ case NAND_CMD_READ0:
+ case NAND_CMD_READ1:
+ addrcycles = nfc->raddr_cycles + nfc->caddr_cycles;
+ anfc_prepare_cmd(nfc, NAND_CMD_READ0, NAND_CMD_READSTART, 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, page_addr, column);
+ break;
+ case NAND_CMD_RNDOUT:
+ anfc_prepare_cmd(nfc, cmd, NAND_CMD_RNDOUTSTART, 1,
+ mtd->writesize, 2);
+ anfc_setpagecoladdr(nfc, page_addr, column);
+ if (nfc->dma)
+ nfc->rdintrmask = XFER_COMPLETE;
+ else
+ nfc->rdintrmask = READ_READY;
+ break;
+ case NAND_CMD_PARAM:
+ anfc_prepare_cmd(nfc, cmd, 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, page_addr, column);
+ anfc_setpktszcnt(nfc, sizeof(struct nand_onfi_params), 1);
+ anfc_readfifo(nfc, PROG_RDPARAM,
+ sizeof(struct nand_onfi_params));
+ break;
+ case NAND_CMD_READID:
+ anfc_prepare_cmd(nfc, cmd, 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, page_addr, column);
+ anfc_setpktszcnt(nfc, ONFI_ID_LEN, 1);
+ anfc_readfifo(nfc, PROG_RDID, ONFI_ID_LEN);
+ break;
+ case NAND_CMD_ERASE1:
+ addrcycles = nfc->raddr_cycles;
+ prog = PROG_ERASE;
+ anfc_prepare_cmd(nfc, cmd, NAND_CMD_ERASE2, 0, 0, addrcycles);
+ column = page_addr & 0xffff;
+ page_addr = (page_addr >> PG_ADDR_SHIFT) & 0xffff;
+ anfc_setpagecoladdr(nfc, page_addr, column);
+ wait = true;
+ break;
+ case NAND_CMD_STATUS:
+ anfc_prepare_cmd(nfc, cmd, 0, 0, 0, 0);
+ anfc_setpktszcnt(nfc, nfc->spktsize/4, 1);
+ anfc_setpagecoladdr(nfc, page_addr, column);
+ prog = PROG_STATUS;
+ wait = read = true;
+ break;
+ case NAND_CMD_GET_FEATURES:
+ anfc_prepare_cmd(nfc, cmd, 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, page_addr, column);
+ anfc_setpktszcnt(nfc, nfc->spktsize, 1);
+ anfc_readfifo(nfc, PROG_GET_FEATURE, 4);
+ break;
+ case NAND_CMD_SET_FEATURES:
+ anfc_prepare_cmd(nfc, cmd, 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, page_addr, column);
+ anfc_setpktszcnt(nfc, nfc->spktsize, 1);
+ break;
+ default:
+ return;
+ }
+
+ if (wait) {
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc, XFER_COMPLETE);
+ }
+
+ if (read)
+ bufptr[0] = readl(nfc->base + FLASH_STS_OFST);
+}
+
+static void anfc_select_chip(struct mtd_info *mtd, int num)
+{
+ u32 val;
+ int ret;
+ struct anfc *nfc = to_anfc(mtd);
+
+ if (num == -1) {
+ pm_runtime_mark_last_busy(nfc->dev);
+ pm_runtime_put_autosuspend(nfc->dev);
+ return;
+ }
+
+ ret = pm_runtime_get_sync(nfc->dev);
+ if (ret < 0) {
+ dev_err(nfc->dev, "runtime_get_sync failed\n");
+ return;
+ }
+ val = readl(nfc->base + MEM_ADDR2_OFST);
+ val = (val & ~(CS_MASK)) | (num << CS_SHIFT);
+ writel(val, nfc->base + MEM_ADDR2_OFST);
+}
+
+static irqreturn_t anfc_irq_handler(int irq, void *ptr)
+{
+ struct anfc *nfc = ptr;
+ u32 regval = 0, status;
+
+ status = readl(nfc->base + INTR_STS_OFST);
+ if (status & XFER_COMPLETE) {
+ complete(&nfc->xfercomp);
+ regval |= XFER_COMPLETE;
+ }
+
+ if (status & READ_READY) {
+ complete(&nfc->bufrdy);
+ regval |= READ_READY;
+ }
+
+ if (status & WRITE_READY) {
+ complete(&nfc->bufrdy);
+ regval |= WRITE_READY;
+ }
+
+ if (status & MBIT_ERROR) {
+ nfc->err = true;
+ complete(&nfc->bufrdy);
+ regval |= MBIT_ERROR;
+ }
+
+ if (regval) {
+ writel(regval, nfc->base + INTR_STS_OFST);
+ writel(0, nfc->base + INTR_STS_EN_OFST);
+ writel(0, nfc->base + INTR_SIG_EN_OFST);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int anfc_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, uint8_t *subfeature_param)
+{
+ int status;
+ struct anfc *nfc = to_anfc(mtd);
+
+ if (!chip->onfi_version || !(le16_to_cpu(chip->onfi_params.opt_cmd)
+ & ONFI_OPT_CMD_SET_GET_FEATURES))
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
+ anfc_writefifo(nfc, PROG_SET_FEATURE, nfc->spktsize, subfeature_param);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+static int anfc_init_timing_mode(struct anfc *nfc)
+{
+ int mode, err;
+ unsigned int feature[2], regval, i;
+ struct nand_chip *chip = &nfc->chip;
+ struct mtd_info *mtd = nand_to_mtd(&nfc->chip);
+
+ memset(&feature[0], 0, NVDDR_MODE_PACKET_SIZE);
+ mode = onfi_get_sync_timing_mode(chip);
+ /* Get nvddr timing modes */
+ mode = mode & 0xFF;
+ if (!mode) {
+ mode = onfi_get_async_timing_mode(&nfc->chip);
+ mode = fls(mode) - 1;
+ regval = mode;
+ } else {
+ mode = fls(mode) - 1;
+ regval = NVDDR_MODE | mode << NVDDR_TIMING_MODE_SHIFT;
+ mode |= ONFI_DATA_INTERFACE_NVDDR;
+ }
+
+ feature[0] = mode;
+ for (i = 0; i < nfc->num_cs; i++) {
+ chip->select_chip(mtd, i);
+ err = chip->onfi_set_features(mtd, chip,
+ ONFI_FEATURE_ADDR_TIMING_MODE,
+ (uint8_t *)&feature[0]);
+ chip->select_chip(mtd, -1);
+ if (err)
+ return err;
+ }
+ writel(regval, nfc->base + DATA_INTERFACE_REG);
+
+ if (mode & ONFI_DATA_INTERFACE_NVDDR)
+ nfc->spktsize = NVDDR_MODE_PACKET_SIZE;
+
+ return 0;
+}
+
+static int anfc_probe(struct platform_device *pdev)
+{
+ struct anfc *nfc;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct resource *res;
+ int err;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->base))
+ return PTR_ERR(nfc->base);
+
+ nand_chip = &nfc->chip;
+ nand_set_controller_data(nand_chip, nfc);
+ mtd = nand_to_mtd(nand_chip);
+ mtd->owner = THIS_MODULE;
+ mtd->name = DRIVER_NAME;
+ nfc->dev = &pdev->dev;
+ mtd->dev.parent = &pdev->dev;
+ nand_set_flash_node(nand_chip, pdev->dev.of_node);
+
+ nand_chip->cmdfunc = anfc_cmd_function;
+ nand_chip->waitfunc = anfc_device_ready;
+ nand_chip->chip_delay = 30;
+ nand_chip->read_buf = anfc_read_buf;
+ nand_chip->write_buf = anfc_write_buf;
+ nand_chip->read_byte = anfc_read_byte;
+ nand_chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE;
+ nand_chip->bbt_options = NAND_BBT_USE_FLASH;
+ nand_chip->select_chip = anfc_select_chip;
+ nand_chip->onfi_set_features = anfc_onfi_set_features;
+ nfc->dma = of_property_read_bool(pdev->dev.of_node,
+ "arasan,has-mdma");
+ nfc->num_cs = 1;
+ of_property_read_u32(pdev->dev.of_node, "num-cs", &nfc->num_cs);
+ platform_set_drvdata(pdev, nfc);
+ init_completion(&nfc->bufrdy);
+ init_completion(&nfc->xfercomp);
+ nfc->irq = platform_get_irq(pdev, 0);
+ if (nfc->irq < 0) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ return -ENXIO;
+ }
+ err = devm_request_irq(&pdev->dev, nfc->irq, anfc_irq_handler,
+ 0, "arasannfc", nfc);
+ if (err)
+ return err;
+ nfc->clk_sys = devm_clk_get(&pdev->dev, "clk_sys");
+ if (IS_ERR(nfc->clk_sys)) {
+ dev_err(&pdev->dev, "sys clock not found.\n");
+ return PTR_ERR(nfc->clk_sys);
+ }
+
+ nfc->clk_flash = devm_clk_get(&pdev->dev, "clk_flash");
+ if (IS_ERR(nfc->clk_flash)) {
+ dev_err(&pdev->dev, "flash clock not found.\n");
+ return PTR_ERR(nfc->clk_flash);
+ }
+
+ err = clk_prepare_enable(nfc->clk_sys);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable sys clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(nfc->clk_flash);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable flash clock.\n");
+ goto clk_dis_sys;
+ }
+
+ pm_runtime_set_autosuspend_delay(nfc->dev, ANFC_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(nfc->dev);
+ pm_runtime_set_active(nfc->dev);
+ pm_runtime_enable(nfc->dev);
+
+ nfc->spktsize = SDR_MODE_PACKET_SIZE;
+ if (nand_scan_ident(mtd, nfc->num_cs, NULL)) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "nand_scan_ident for NAND failed\n");
+ goto clk_dis_all;
+ }
+ if (nand_chip->onfi_version) {
+ nfc->raddr_cycles = nand_chip->onfi_params.addr_cycles & 0xF;
+ nfc->caddr_cycles =
+ (nand_chip->onfi_params.addr_cycles >> 4) & 0xF;
+ } else {
+ /* For non-ONFI devices, configuring the address cyles as 5 */
+ nfc->raddr_cycles = nfc->caddr_cycles = 5;
+ }
+
+ if (anfc_init_timing_mode(nfc)) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "timing mode init failed\n");
+ goto clk_dis_all;
+ }
+
+ if (anfc_ecc_init(mtd, &nand_chip->ecc)) {
+ err = -ENXIO;
+ goto clk_dis_all;
+ }
+
+ if (nand_scan_tail(mtd)) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "nand_scan_tail for NAND failed\n");
+ goto clk_dis_all;
+ }
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto clk_dis_all;
+
+ pm_runtime_mark_last_busy(nfc->dev);
+ pm_runtime_put_autosuspend(nfc->dev);
+
+ return err;
+
+clk_dis_all:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ clk_disable_unprepare(nfc->clk_flash);
+clk_dis_sys:
+ clk_disable_unprepare(nfc->clk_sys);
+
+ return err;
+}
+
+static int anfc_remove(struct platform_device *pdev)
+{
+ struct anfc *nfc = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = nand_to_mtd(&nfc->chip);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ clk_disable_unprepare(nfc->clk_sys);
+ clk_disable_unprepare(nfc->clk_flash);
+
+ nand_release(mtd);
+
+ return 0;
+}
+
+static const struct of_device_id anfc_ids[] = {
+ { .compatible = "arasan,nfc-v3p10" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, anfc_ids);
+static int anfc_suspend(struct device *dev)
+{
+ return pm_runtime_put_sync(dev);
+}
+
+static int anfc_resume(struct device *dev)
+{
+ return pm_runtime_get_sync(dev);
+}
+
+static int __maybe_unused anfc_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct anfc *nfc = platform_get_drvdata(pdev);
+
+ clk_disable(nfc->clk_sys);
+ clk_disable(nfc->clk_flash);
+
+ return 0;
+}
+
+static int __maybe_unused anfc_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct anfc *nfc = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_enable(nfc->clk_sys);
+ if (ret) {
+ dev_err(dev, "Cannot enable sys clock.\n");
+ return ret;
+ }
+ ret = clk_enable(nfc->clk_flash);
+ if (ret) {
+ dev_err(dev, "Cannot enable flash clock.\n");
+ clk_disable(nfc->clk_sys);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops anfc_pm_ops = {
+ .resume = anfc_resume,
+ .suspend = anfc_suspend,
+ .runtime_resume = anfc_runtime_resume,
+ .runtime_suspend = anfc_runtime_suspend,
+};
+
+static struct platform_driver anfc_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = anfc_ids,
+ .pm = &anfc_pm_ops,
+ },
+ .probe = anfc_probe,
+ .remove = anfc_remove,
+};
+module_platform_driver(anfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx, Inc");
+MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");
#define GEM_MTU_MIN_SIZE 68
-#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
-#define MACB_WOL_ENABLED (0x1 << 1)
+#define GEM_TX_PTPHDR_OFFSET 42
+#define GEM_RX_PTPHDR_OFFSET 28
- /*
- * Graceful stop timeouts in us. We should allow up to
+ /* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
#define MACB_HALT_TIMEOUT 1230
bp->mii_bus->read = &macb_mdio_read;
bp->mii_bus->write = &macb_mdio_write;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- bp->pdev->name, bp->pdev->id);
+ bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
- bp->mii_bus->parent = &bp->pdev->dev;
+ bp->mii_bus->parent = &bp->dev->dev;
pdata = dev_get_platdata(&bp->pdev->dev);
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
MACB_BIT(HRESP));
}
+ if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) &&
+ (bp->caps & MACB_CAPS_PCS))
+ gem_writel(bp, PCSCNTRL,
+ gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG));
+
/* Enable TX and RX */
- macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
+ macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE) |
+ MACB_BIT(PTPUNI));
}
- /*
- * The hash address register is 64 bits long and takes up two
+ /* The hash address register is 64 bits long and takes up two
* locations in the memory map. The least significant bits are stored
* in EMAC_HSL and the most significant bits in EMAC_HSH.
*
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
regs_buff[12] = macb_or_gem_readl(bp, USRIO);
- if (macb_is_gem(bp)) {
+ if (macb_is_gem(bp))
regs_buff[13] = gem_readl(bp, DMACFG);
- }
}
-static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int macb_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
{
- struct macb *bp = netdev_priv(netdev);
-
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
- wol->supported = WAKE_MAGIC;
-
- if (bp->wol & MACB_WOL_ENABLED)
- wol->wolopts |= WAKE_MAGIC;
- }
-}
-
-static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct macb *bp = netdev_priv(netdev);
-
- if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
- (wol->wolopts & ~WAKE_MAGIC))
- return -EOPNOTSUPP;
-
- if (wol->wolopts & WAKE_MAGIC)
- bp->wol |= MACB_WOL_ENABLED;
- else
- bp->wol &= ~MACB_WOL_ENABLED;
+ struct macb *bp = netdev_priv(dev);
- device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = bp->phc_index;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
return 0;
}
__func__);
}
- axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
- axienet_status &= ~XAE_RCW1_RX_MASK;
- axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
+ axienet_status &= ~XAE_RCW1_RX_MASK;
+ axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ }
- axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
- if (axienet_status & XAE_INT_RXRJECT_MASK)
- axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ /* Check for block lock bit got set or not
+ * This ensures that 10G ethernet IP
+ * is functioning normally or not.
+ */
+ err = readl_poll_timeout(lp->regs + XXV_STATRX_BLKLCK_OFFSET,
+ val, (val & XXV_RX_BLKLCK_MASK),
+ 10, DELAY_OF_ONE_MILLISEC);
+ if (err) {
+ netdev_err(ndev, "%s: Block lock bit of XXV MAC didn't",
+ __func__);
+ netdev_err(ndev, "Got Set cross check the ref clock");
+ netdev_err(ndev, "Configuration for the mac");
+ }
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ axienet_rxts_iow(lp, XAXIFIFO_TXTS_RDFR,
+ XAXIFIFO_TXTS_RESET_MASK);
+ axienet_rxts_iow(lp, XAXIFIFO_TXTS_SRR,
+ XAXIFIFO_TXTS_RESET_MASK);
+#endif
+ }
- axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+ if ((lp->axienet_config->mactype == XAXIENET_1G) &&
+ !lp->eth_hasnobuf) {
+ axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
+ if (axienet_status & XAE_INT_RXRJECT_MASK)
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+
+ /* Enable Receive errors */
+ axienet_iow(lp, XAE_IE_OFFSET, XAE_INT_RECV_ERROR_MASK);
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ lp->options |= XAE_OPTION_FCS_STRIP;
+ lp->options |= XAE_OPTION_FCS_INSERT;
+ } else {
+ axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+ }
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- /* Sync default options with HW but leave receiver and
- * transmitter disabled.
- */
- axienet_setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
- axienet_setoptions(ndev, lp->options);
+ lp->axienet_config->setoptions(ndev, lp->options);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
}
/**
--- /dev/null
- lp->ndev->trans_start = jiffies;
+/*
+ * Xilinx Ethernet: Linux driver for Ethernet.
+ *
+ * Author: Xilinx, Inc.
+ *
+ * 2010 (c) Xilinx, Inc. This file is licensed uner the terms of the GNU
+ * General Public License version 2. This program is licensed "as is"
+ * without any warranty of any kind, whether express or implied.
+ *
+ * This is a driver for xilinx processor sub-system (ps) ethernet device.
+ * This driver is mainly used in Linux 2.6.30 and above and it does _not_
+ * support Linux 2.4 kernel due to certain new features (e.g. NAPI) is
+ * introduced in this driver.
+ *
+ * TODO:
+ * 1. JUMBO frame is not enabled per EPs spec. Please update it if this
+ * support is added in and set MAX_MTU to 9000.
+ * 2. PTP slave mode: Currently master and slave sync is tested for 111MHz and
+ * 125 MHz ptp clock. Need to test with various other clock frequencies.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
+/************************** Constant Definitions *****************************/
+
+/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
+#define DRIVER_NAME "xemacps"
+#define DRIVER_DESCRIPTION "Xilinx Tri-Mode Ethernet MAC driver"
+#define DRIVER_VERSION "1.00a"
+
+/* Transmission timeout is 3 seconds. */
+#define TX_TIMEOUT (3*HZ)
+
+/* for RX skb IP header word-aligned */
+#define RX_IP_ALIGN_OFFSET 2
+
+/* DMA buffer descriptors must be aligned on a 4-byte boundary. */
+#define ALIGNMENT_BD 8
+
+/* Maximum value for hash bits. 2**6 */
+#define XEMACPS_MAX_HASH_BITS 64
+
+/* MDC clock division
+ * currently supporting 8, 16, 32, 48, 64, 96, 128, 224.
+ */
+enum { MDC_DIV_8 = 0, MDC_DIV_16, MDC_DIV_32, MDC_DIV_48,
+MDC_DIV_64, MDC_DIV_96, MDC_DIV_128, MDC_DIV_224 };
+
+/* Specify the receive buffer size in bytes, 64, 128, 192, 10240 */
+#define XEMACPS_RX_BUF_SIZE 1536
+
+/* Number of receive buffer bytes as a unit, this is HW setup */
+#define XEMACPS_RX_BUF_UNIT 64
+
+/* Default SEND and RECV buffer descriptors (BD) numbers.
+ * BD Space needed is (XEMACPS_SEND_BD_CNT+XEMACPS_RECV_BD_CNT)*8
+ */
+#undef DEBUG
+#define DEBUG
+
+#define XEMACPS_SEND_BD_CNT 256
+#define XEMACPS_RECV_BD_CNT 256
+
+#define XEMACPS_NAPI_WEIGHT 64
+
+/* Register offset definitions. Unless otherwise noted, register access is
+ * 32 bit. Names are self explained here.
+ */
+#define XEMACPS_NWCTRL_OFFSET 0x00000000 /* Network Control reg */
+#define XEMACPS_NWCFG_OFFSET 0x00000004 /* Network Config reg */
+#define XEMACPS_NWSR_OFFSET 0x00000008 /* Network Status reg */
+#define XEMACPS_USERIO_OFFSET 0x0000000C /* User IO reg */
+#define XEMACPS_DMACR_OFFSET 0x00000010 /* DMA Control reg */
+#define XEMACPS_TXSR_OFFSET 0x00000014 /* TX Status reg */
+#define XEMACPS_RXQBASE_OFFSET 0x00000018 /* RX Q Base address reg */
+#define XEMACPS_TXQBASE_OFFSET 0x0000001C /* TX Q Base address reg */
+#define XEMACPS_RXSR_OFFSET 0x00000020 /* RX Status reg */
+#define XEMACPS_ISR_OFFSET 0x00000024 /* Interrupt Status reg */
+#define XEMACPS_IER_OFFSET 0x00000028 /* Interrupt Enable reg */
+#define XEMACPS_IDR_OFFSET 0x0000002C /* Interrupt Disable reg */
+#define XEMACPS_IMR_OFFSET 0x00000030 /* Interrupt Mask reg */
+#define XEMACPS_PHYMNTNC_OFFSET 0x00000034 /* Phy Maintaince reg */
+#define XEMACPS_RXPAUSE_OFFSET 0x00000038 /* RX Pause Time reg */
+#define XEMACPS_TXPAUSE_OFFSET 0x0000003C /* TX Pause Time reg */
+#define XEMACPS_HASHL_OFFSET 0x00000080 /* Hash Low address reg */
+#define XEMACPS_HASHH_OFFSET 0x00000084 /* Hash High address reg */
+#define XEMACPS_LADDR1L_OFFSET 0x00000088 /* Specific1 addr low */
+#define XEMACPS_LADDR1H_OFFSET 0x0000008C /* Specific1 addr high */
+#define XEMACPS_LADDR2L_OFFSET 0x00000090 /* Specific2 addr low */
+#define XEMACPS_LADDR2H_OFFSET 0x00000094 /* Specific2 addr high */
+#define XEMACPS_LADDR3L_OFFSET 0x00000098 /* Specific3 addr low */
+#define XEMACPS_LADDR3H_OFFSET 0x0000009C /* Specific3 addr high */
+#define XEMACPS_LADDR4L_OFFSET 0x000000A0 /* Specific4 addr low */
+#define XEMACPS_LADDR4H_OFFSET 0x000000A4 /* Specific4 addr high */
+#define XEMACPS_MATCH1_OFFSET 0x000000A8 /* Type ID1 Match reg */
+#define XEMACPS_MATCH2_OFFSET 0x000000AC /* Type ID2 Match reg */
+#define XEMACPS_MATCH3_OFFSET 0x000000B0 /* Type ID3 Match reg */
+#define XEMACPS_MATCH4_OFFSET 0x000000B4 /* Type ID4 Match reg */
+#define XEMACPS_WOL_OFFSET 0x000000B8 /* Wake on LAN reg */
+#define XEMACPS_STRETCH_OFFSET 0x000000BC /* IPG Stretch reg */
+#define XEMACPS_SVLAN_OFFSET 0x000000C0 /* Stacked VLAN reg */
+#define XEMACPS_MODID_OFFSET 0x000000FC /* Module ID reg */
+#define XEMACPS_OCTTXL_OFFSET 0x00000100 /* Octects transmitted Low
+ reg */
+#define XEMACPS_OCTTXH_OFFSET 0x00000104 /* Octects transmitted High
+ reg */
+#define XEMACPS_TXCNT_OFFSET 0x00000108 /* Error-free Frmaes
+ transmitted counter */
+#define XEMACPS_TXBCCNT_OFFSET 0x0000010C /* Error-free Broadcast
+ Frames counter*/
+#define XEMACPS_TXMCCNT_OFFSET 0x00000110 /* Error-free Multicast
+ Frame counter */
+#define XEMACPS_TXPAUSECNT_OFFSET 0x00000114 /* Pause Frames Transmitted
+ Counter */
+#define XEMACPS_TX64CNT_OFFSET 0x00000118 /* Error-free 64 byte Frames
+ Transmitted counter */
+#define XEMACPS_TX65CNT_OFFSET 0x0000011C /* Error-free 65-127 byte
+ Frames Transmitted counter */
+#define XEMACPS_TX128CNT_OFFSET 0x00000120 /* Error-free 128-255 byte
+ Frames Transmitted counter */
+#define XEMACPS_TX256CNT_OFFSET 0x00000124 /* Error-free 256-511 byte
+ Frames transmitted counter */
+#define XEMACPS_TX512CNT_OFFSET 0x00000128 /* Error-free 512-1023 byte
+ Frames transmitted counter */
+#define XEMACPS_TX1024CNT_OFFSET 0x0000012C /* Error-free 1024-1518 byte
+ Frames transmitted counter */
+#define XEMACPS_TX1519CNT_OFFSET 0x00000130 /* Error-free larger than
+ 1519 byte Frames transmitted
+ Counter */
+#define XEMACPS_TXURUNCNT_OFFSET 0x00000134 /* TX under run error
+ Counter */
+#define XEMACPS_SNGLCOLLCNT_OFFSET 0x00000138 /* Single Collision Frame
+ Counter */
+#define XEMACPS_MULTICOLLCNT_OFFSET 0x0000013C /* Multiple Collision Frame
+ Counter */
+#define XEMACPS_EXCESSCOLLCNT_OFFSET 0x00000140 /* Excessive Collision Frame
+ Counter */
+#define XEMACPS_LATECOLLCNT_OFFSET 0x00000144 /* Late Collision Frame
+ Counter */
+#define XEMACPS_TXDEFERCNT_OFFSET 0x00000148 /* Deferred Transmission
+ Frame Counter */
+#define XEMACPS_CSENSECNT_OFFSET 0x0000014C /* Carrier Sense Error
+ Counter */
+#define XEMACPS_OCTRXL_OFFSET 0x00000150 /* Octects Received register
+ Low */
+#define XEMACPS_OCTRXH_OFFSET 0x00000154 /* Octects Received register
+ High */
+#define XEMACPS_RXCNT_OFFSET 0x00000158 /* Error-free Frames
+ Received Counter */
+#define XEMACPS_RXBROADCNT_OFFSET 0x0000015C /* Error-free Broadcast
+ Frames Received Counter */
+#define XEMACPS_RXMULTICNT_OFFSET 0x00000160 /* Error-free Multicast
+ Frames Received Counter */
+#define XEMACPS_RXPAUSECNT_OFFSET 0x00000164 /* Pause Frames
+ Received Counter */
+#define XEMACPS_RX64CNT_OFFSET 0x00000168 /* Error-free 64 byte Frames
+ Received Counter */
+#define XEMACPS_RX65CNT_OFFSET 0x0000016C /* Error-free 65-127 byte
+ Frames Received Counter */
+#define XEMACPS_RX128CNT_OFFSET 0x00000170 /* Error-free 128-255 byte
+ Frames Received Counter */
+#define XEMACPS_RX256CNT_OFFSET 0x00000174 /* Error-free 256-512 byte
+ Frames Received Counter */
+#define XEMACPS_RX512CNT_OFFSET 0x00000178 /* Error-free 512-1023 byte
+ Frames Received Counter */
+#define XEMACPS_RX1024CNT_OFFSET 0x0000017C /* Error-free 1024-1518 byte
+ Frames Received Counter */
+#define XEMACPS_RX1519CNT_OFFSET 0x00000180 /* Error-free 1519-max byte
+ Frames Received Counter */
+#define XEMACPS_RXUNDRCNT_OFFSET 0x00000184 /* Undersize Frames Received
+ Counter */
+#define XEMACPS_RXOVRCNT_OFFSET 0x00000188 /* Oversize Frames Received
+ Counter */
+#define XEMACPS_RXJABCNT_OFFSET 0x0000018C /* Jabbers Received
+ Counter */
+#define XEMACPS_RXFCSCNT_OFFSET 0x00000190 /* Frame Check Sequence
+ Error Counter */
+#define XEMACPS_RXLENGTHCNT_OFFSET 0x00000194 /* Length Field Error
+ Counter */
+#define XEMACPS_RXSYMBCNT_OFFSET 0x00000198 /* Symbol Error Counter */
+#define XEMACPS_RXALIGNCNT_OFFSET 0x0000019C /* Alignment Error
+ Counter */
+#define XEMACPS_RXRESERRCNT_OFFSET 0x000001A0 /* Receive Resource Error
+ Counter */
+#define XEMACPS_RXORCNT_OFFSET 0x000001A4 /* Receive Overrun */
+#define XEMACPS_RXIPCCNT_OFFSET 0x000001A8 /* IP header Checksum Error
+ Counter */
+#define XEMACPS_RXTCPCCNT_OFFSET 0x000001AC /* TCP Checksum Error
+ Counter */
+#define XEMACPS_RXUDPCCNT_OFFSET 0x000001B0 /* UDP Checksum Error
+ Counter */
+
+#define XEMACPS_1588S_OFFSET 0x000001D0 /* 1588 Timer Seconds */
+#define XEMACPS_1588NS_OFFSET 0x000001D4 /* 1588 Timer Nanoseconds */
+#define XEMACPS_1588ADJ_OFFSET 0x000001D8 /* 1588 Timer Adjust */
+#define XEMACPS_1588INC_OFFSET 0x000001DC /* 1588 Timer Increment */
+#define XEMACPS_PTPETXS_OFFSET 0x000001E0 /* PTP Event Frame
+ Transmitted Seconds */
+#define XEMACPS_PTPETXNS_OFFSET 0x000001E4 /* PTP Event Frame
+ Transmitted Nanoseconds */
+#define XEMACPS_PTPERXS_OFFSET 0x000001E8 /* PTP Event Frame Received
+ Seconds */
+#define XEMACPS_PTPERXNS_OFFSET 0x000001EC /* PTP Event Frame Received
+ Nanoseconds */
+#define XEMACPS_PTPPTXS_OFFSET 0x000001F0 /* PTP Peer Frame
+ Transmitted Seconds */
+#define XEMACPS_PTPPTXNS_OFFSET 0x000001F4 /* PTP Peer Frame
+ Transmitted Nanoseconds */
+#define XEMACPS_PTPPRXS_OFFSET 0x000001F8 /* PTP Peer Frame Received
+ Seconds */
+#define XEMACPS_PTPPRXNS_OFFSET 0x000001FC /* PTP Peer Frame Received
+ Nanoseconds */
+
+/* network control register bit definitions */
+#define XEMACPS_NWCTRL_FLUSH_DPRAM_MASK 0x00040000
+#define XEMACPS_NWCTRL_RXTSTAMP_MASK 0x00008000 /* RX Timestamp in CRC */
+#define XEMACPS_NWCTRL_ZEROPAUSETX_MASK 0x00001000 /* Transmit zero quantum
+ pause frame */
+#define XEMACPS_NWCTRL_PAUSETX_MASK 0x00000800 /* Transmit pause frame */
+#define XEMACPS_NWCTRL_HALTTX_MASK 0x00000400 /* Halt transmission
+ after current frame */
+#define XEMACPS_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */
+
+#define XEMACPS_NWCTRL_STATWEN_MASK 0x00000080 /* Enable writing to
+ stat counters */
+#define XEMACPS_NWCTRL_STATINC_MASK 0x00000040 /* Increment statistic
+ registers */
+#define XEMACPS_NWCTRL_STATCLR_MASK 0x00000020 /* Clear statistic
+ registers */
+#define XEMACPS_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */
+#define XEMACPS_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */
+#define XEMACPS_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */
+#define XEMACPS_NWCTRL_LOOPEN_MASK 0x00000002 /* local loopback */
+
+/* name network configuration register bit definitions */
+#define XEMACPS_NWCFG_BADPREAMBEN_MASK 0x20000000 /* disable rejection of
+ non-standard preamble */
+#define XEMACPS_NWCFG_IPDSTRETCH_MASK 0x10000000 /* enable transmit IPG */
+#define XEMACPS_NWCFG_FCSIGNORE_MASK 0x04000000 /* disable rejection of
+ FCS error */
+#define XEMACPS_NWCFG_HDRXEN_MASK 0x02000000 /* RX half duplex */
+#define XEMACPS_NWCFG_RXCHKSUMEN_MASK 0x01000000 /* enable RX checksum
+ offload */
+#define XEMACPS_NWCFG_PAUSECOPYDI_MASK 0x00800000 /* Do not copy pause
+ Frames to memory */
+#define XEMACPS_NWCFG_MDC_SHIFT_MASK 18 /* shift bits for MDC */
+#define XEMACPS_NWCFG_MDCCLKDIV_MASK 0x001C0000 /* MDC Mask PCLK divisor */
+#define XEMACPS_NWCFG_FCSREM_MASK 0x00020000 /* Discard FCS from
+ received frames */
+#define XEMACPS_NWCFG_LENGTHERRDSCRD_MASK 0x00010000
+/* RX length error discard */
+#define XEMACPS_NWCFG_RXOFFS_MASK 0x0000C000 /* RX buffer offset */
+#define XEMACPS_NWCFG_PAUSEEN_MASK 0x00002000 /* Enable pause TX */
+#define XEMACPS_NWCFG_RETRYTESTEN_MASK 0x00001000 /* Retry test */
+#define XEMACPS_NWCFG_1000_MASK 0x00000400 /* Gigbit mode */
+#define XEMACPS_NWCFG_EXTADDRMATCHEN_MASK 0x00000200
+/* External address match enable */
+#define XEMACPS_NWCFG_UCASTHASHEN_MASK 0x00000080 /* Receive unicast hash
+ frames */
+#define XEMACPS_NWCFG_MCASTHASHEN_MASK 0x00000040 /* Receive multicast hash
+ frames */
+#define XEMACPS_NWCFG_BCASTDI_MASK 0x00000020 /* Do not receive
+ broadcast frames */
+#define XEMACPS_NWCFG_COPYALLEN_MASK 0x00000010 /* Copy all frames */
+
+#define XEMACPS_NWCFG_NVLANDISC_MASK 0x00000004 /* Receive only VLAN
+ frames */
+#define XEMACPS_NWCFG_FDEN_MASK 0x00000002 /* Full duplex */
+#define XEMACPS_NWCFG_100_MASK 0x00000001 /* 10 or 100 Mbs */
+
+/* network status register bit definitaions */
+#define XEMACPS_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */
+#define XEMACPS_NWSR_MDIO_MASK 0x00000002 /* Status of mdio_in */
+
+/* MAC address register word 1 mask */
+#define XEMACPS_LADDR_MACH_MASK 0x0000FFFF /* Address bits[47:32]
+ bit[31:0] are in BOTTOM */
+
+/* DMA control register bit definitions */
+#define XEMACPS_DMACR_RXBUF_MASK 0x00FF0000 /* Mask bit for RX buffer
+ size */
+#define XEMACPS_DMACR_RXBUF_SHIFT 16 /* Shift bit for RX buffer
+ size */
+#define XEMACPS_DMACR_TCPCKSUM_MASK 0x00000800 /* enable/disable TX
+ checksum offload */
+#define XEMACPS_DMACR_TXSIZE_MASK 0x00000400 /* TX buffer memory size */
+#define XEMACPS_DMACR_RXSIZE_MASK 0x00000300 /* RX buffer memory size */
+#define XEMACPS_DMACR_ENDIAN_MASK 0x00000080 /* Endian configuration */
+#define XEMACPS_DMACR_BLENGTH_MASK 0x0000001F /* Buffer burst length */
+#define XEMACPS_DMACR_BLENGTH_INCR16 0x00000010 /* Buffer burst length */
+#define XEMACPS_DMACR_BLENGTH_INCR8 0x00000008 /* Buffer burst length */
+#define XEMACPS_DMACR_BLENGTH_INCR4 0x00000004 /* Buffer burst length */
+#define XEMACPS_DMACR_BLENGTH_SINGLE 0x00000002 /* Buffer burst length */
+
+/* transmit status register bit definitions */
+#define XEMACPS_TXSR_HRESPNOK_MASK 0x00000100 /* Transmit hresp not OK */
+#define XEMACPS_TXSR_COL1000_MASK 0x00000080 /* Collision Gbs mode */
+#define XEMACPS_TXSR_URUN_MASK 0x00000040 /* Transmit underrun */
+#define XEMACPS_TXSR_TXCOMPL_MASK 0x00000020 /* Transmit completed OK */
+#define XEMACPS_TXSR_BUFEXH_MASK 0x00000010 /* Transmit buffs exhausted
+ mid frame */
+#define XEMACPS_TXSR_TXGO_MASK 0x00000008 /* Status of go flag */
+#define XEMACPS_TXSR_RXOVR_MASK 0x00000004 /* Retry limit exceeded */
+#define XEMACPS_TXSR_COL100_MASK 0x00000002 /* Collision 10/100 mode */
+#define XEMACPS_TXSR_USEDREAD_MASK 0x00000001 /* TX buffer used bit set */
+
+#define XEMACPS_TXSR_ERROR_MASK (XEMACPS_TXSR_HRESPNOK_MASK | \
+ XEMACPS_TXSR_COL1000_MASK | \
+ XEMACPS_TXSR_URUN_MASK | \
+ XEMACPS_TXSR_BUFEXH_MASK | \
+ XEMACPS_TXSR_RXOVR_MASK | \
+ XEMACPS_TXSR_COL100_MASK | \
+ XEMACPS_TXSR_USEDREAD_MASK)
+
+/* receive status register bit definitions */
+#define XEMACPS_RXSR_HRESPNOK_MASK 0x00000008 /* Receive hresp not OK */
+#define XEMACPS_RXSR_RXOVR_MASK 0x00000004 /* Receive overrun */
+#define XEMACPS_RXSR_FRAMERX_MASK 0x00000002 /* Frame received OK */
+#define XEMACPS_RXSR_BUFFNA_MASK 0x00000001 /* RX buffer used bit set */
+
+#define XEMACPS_RXSR_ERROR_MASK (XEMACPS_RXSR_HRESPNOK_MASK | \
+ XEMACPS_RXSR_RXOVR_MASK | \
+ XEMACPS_RXSR_BUFFNA_MASK)
+
+/* interrupts bit definitions
+ * Bits definitions are same in XEMACPS_ISR_OFFSET,
+ * XEMACPS_IER_OFFSET, XEMACPS_IDR_OFFSET, and XEMACPS_IMR_OFFSET
+ */
+#define XEMACPS_IXR_PTPPSTX_MASK 0x02000000 /* PTP Psync transmitted */
+#define XEMACPS_IXR_PTPPDRTX_MASK 0x01000000 /* PTP Pdelay_req
+ transmitted */
+#define XEMACPS_IXR_PTPSTX_MASK 0x00800000 /* PTP Sync transmitted */
+#define XEMACPS_IXR_PTPDRTX_MASK 0x00400000 /* PTP Delay_req
+ transmitted */
+#define XEMACPS_IXR_PTPPSRX_MASK 0x00200000 /* PTP Psync received */
+#define XEMACPS_IXR_PTPPDRRX_MASK 0x00100000 /* PTP Pdelay_req
+ received */
+#define XEMACPS_IXR_PTPSRX_MASK 0x00080000 /* PTP Sync received */
+#define XEMACPS_IXR_PTPDRRX_MASK 0x00040000 /* PTP Delay_req received */
+#define XEMACPS_IXR_PAUSETX_MASK 0x00004000 /* Pause frame
+ transmitted */
+#define XEMACPS_IXR_PAUSEZERO_MASK 0x00002000 /* Pause time has reached
+ zero */
+#define XEMACPS_IXR_PAUSENZERO_MASK 0x00001000 /* Pause frame received */
+#define XEMACPS_IXR_HRESPNOK_MASK 0x00000800 /* hresp not ok */
+#define XEMACPS_IXR_RXOVR_MASK 0x00000400 /* Receive overrun
+ occurred */
+#define XEMACPS_IXR_TXCOMPL_MASK 0x00000080 /* Frame transmitted ok */
+#define XEMACPS_IXR_TXEXH_MASK 0x00000040 /* Transmit err occurred or
+ no buffers*/
+#define XEMACPS_IXR_RETRY_MASK 0x00000020 /* Retry limit exceeded */
+#define XEMACPS_IXR_URUN_MASK 0x00000010 /* Transmit underrun */
+#define XEMACPS_IXR_TXUSED_MASK 0x00000008 /* Tx buffer used bit read */
+#define XEMACPS_IXR_RXUSED_MASK 0x00000004 /* Rx buffer used bit read */
+#define XEMACPS_IXR_FRAMERX_MASK 0x00000002 /* Frame received ok */
+#define XEMACPS_IXR_MGMNT_MASK 0x00000001 /* PHY management complete */
+#define XEMACPS_IXR_ALL_MASK 0x00000CFE /* all interrupts in use */
+
+#define XEMACPS_IXR_TX_ERR_MASK (XEMACPS_IXR_TXEXH_MASK | \
+ XEMACPS_IXR_RETRY_MASK | \
+ XEMACPS_IXR_URUN_MASK | \
+ XEMACPS_IXR_TXUSED_MASK)
+
+#define XEMACPS_IXR_RX_ERR_MASK (XEMACPS_IXR_HRESPNOK_MASK | \
+ XEMACPS_IXR_RXUSED_MASK | \
+ XEMACPS_IXR_RXOVR_MASK)
+/* PHY Maintenance bit definitions */
+#define XEMACPS_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */
+#define XEMACPS_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */
+#define XEMACPS_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */
+#define XEMACPS_PHYMNTNC_ADDR_MASK 0x0F800000 /* Address bits */
+#define XEMACPS_PHYMNTNC_REG_MASK 0x007C0000 /* register bits */
+#define XEMACPS_PHYMNTNC_DATA_MASK 0x0000FFFF /* data bits */
+#define XEMACPS_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */
+#define XEMACPS_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */
+
+/* Wake on LAN bit definition */
+#define XEMACPS_WOL_MCAST_MASK 0x00080000
+#define XEMACPS_WOL_SPEREG1_MASK 0x00040000
+#define XEMACPS_WOL_ARP_MASK 0x00020000
+#define XEMACPS_WOL_MAGIC_MASK 0x00010000
+#define XEMACPS_WOL_ARP_ADDR_MASK 0x0000FFFF
+
+/* Buffer descriptor status words offset */
+#define XEMACPS_BD_ADDR_OFFSET 0x00000000 /**< word 0/addr of BDs */
+#define XEMACPS_BD_STAT_OFFSET 0x00000004 /**< word 1/status of BDs */
+
+/* Transmit buffer descriptor status words bit positions.
+ * Transmit buffer descriptor consists of two 32-bit registers,
+ * the first - word0 contains a 32-bit address pointing to the location of
+ * the transmit data.
+ * The following register - word1, consists of various information to
+ * control transmit process. After transmit, this is updated with status
+ * information, whether the frame was transmitted OK or why it had failed.
+ */
+#define XEMACPS_TXBUF_USED_MASK 0x80000000 /* Used bit. */
+#define XEMACPS_TXBUF_WRAP_MASK 0x40000000 /* Wrap bit, last
+ descriptor */
+#define XEMACPS_TXBUF_RETRY_MASK 0x20000000 /* Retry limit exceeded */
+#define XEMACPS_TXBUF_EXH_MASK 0x08000000 /* Buffers exhausted */
+#define XEMACPS_TXBUF_LAC_MASK 0x04000000 /* Late collision. */
+#define XEMACPS_TXBUF_NOCRC_MASK 0x00010000 /* No CRC */
+#define XEMACPS_TXBUF_LAST_MASK 0x00008000 /* Last buffer */
+#define XEMACPS_TXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
+
+#define XEMACPS_TXBUF_ERR_MASK 0x3C000000 /* Mask for length field */
+
+/* Receive buffer descriptor status words bit positions.
+ * Receive buffer descriptor consists of two 32-bit registers,
+ * the first - word0 contains a 32-bit word aligned address pointing to the
+ * address of the buffer. The lower two bits make up the wrap bit indicating
+ * the last descriptor and the ownership bit to indicate it has been used.
+ * The following register - word1, contains status information regarding why
+ * the frame was received (the filter match condition) as well as other
+ * useful info.
+ */
+#define XEMACPS_RXBUF_BCAST_MASK 0x80000000 /* Broadcast frame */
+#define XEMACPS_RXBUF_MULTIHASH_MASK 0x40000000 /* Multicast hashed frame */
+#define XEMACPS_RXBUF_UNIHASH_MASK 0x20000000 /* Unicast hashed frame */
+#define XEMACPS_RXBUF_EXH_MASK 0x08000000 /* buffer exhausted */
+#define XEMACPS_RXBUF_AMATCH_MASK 0x06000000 /* Specific address
+ matched */
+#define XEMACPS_RXBUF_IDFOUND_MASK 0x01000000 /* Type ID matched */
+#define XEMACPS_RXBUF_IDMATCH_MASK 0x00C00000 /* ID matched mask */
+#define XEMACPS_RXBUF_VLAN_MASK 0x00200000 /* VLAN tagged */
+#define XEMACPS_RXBUF_PRI_MASK 0x00100000 /* Priority tagged */
+#define XEMACPS_RXBUF_VPRI_MASK 0x000E0000 /* Vlan priority */
+#define XEMACPS_RXBUF_CFI_MASK 0x00010000 /* CFI frame */
+#define XEMACPS_RXBUF_EOF_MASK 0x00008000 /* End of frame. */
+#define XEMACPS_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */
+#define XEMACPS_RXBUF_LEN_MASK 0x00001FFF /* Mask for length field */
+
+#define XEMACPS_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */
+#define XEMACPS_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */
+#define XEMACPS_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */
+
+#define XEAMCPS_GEN_PURPOSE_TIMER_LOAD 100 /* timeout value is msecs */
+
+#define XEMACPS_GMII2RGMII_FULLDPLX BMCR_FULLDPLX
+#define XEMACPS_GMII2RGMII_SPEED1000 BMCR_SPEED1000
+#define XEMACPS_GMII2RGMII_SPEED100 BMCR_SPEED100
+#define XEMACPS_GMII2RGMII_REG_NUM 0x10
+#define XEMACPS_MDIO_BUSY_TIMEOUT (1 * HZ)
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+#define NS_PER_SEC 1000000000ULL /* Nanoseconds per
+ second */
+/* Sum of Ethernet, IP and UDP header length */
+#define XEMACPS_TX_PTPHDR_OFFSET 42
+#define XEMACPS_RX_PTPHDR_OFFSET 28 /* Sum of IP and UDP header length */
+#define XEMACPS_IP_PROTO_OFFSET 9 /* Protocol field offset */
+#define XEMACPS_UDP_PORT_OFFSET 22 /* UDP dst port offset */
+#define XEMACPS_PTP_EVENT_PORT_NUM 0x13F /* Transport port for ptp */
+#define XEMACPS_PTP_CC_MULT (1 << 31)
+#endif
+
+#define xemacps_read(base, reg) \
+ readl_relaxed(((void __iomem *)(base)) + (reg))
+#define xemacps_write(base, reg, val) \
+ writel_relaxed((val), ((void __iomem *)(base)) + (reg))
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ size_t len;
+};
+
+/* DMA buffer descriptor structure. Each BD is two words */
+struct xemacps_bd {
+ u32 addr;
+ u32 ctrl;
+};
+
+/* Our private device data. */
+struct net_local {
+ void __iomem *baseaddr;
+ struct clk *devclk;
+ struct clk *aperclk;
+
+ struct device_node *phy_node;
+ struct device_node *gmii2rgmii_phy_node;
+ struct ring_info *tx_skb;
+ struct ring_info *rx_skb;
+
+ struct xemacps_bd *rx_bd;
+ struct xemacps_bd *tx_bd;
+
+ dma_addr_t rx_bd_dma; /* physical address */
+ dma_addr_t tx_bd_dma; /* physical address */
+
+ u32 tx_bd_ci;
+ u32 tx_bd_tail;
+ u32 rx_bd_ci;
+
+ u32 tx_bd_freecnt;
+
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+ spinlock_t nwctrlreg_lock;
+ spinlock_t mdio_lock;
+
+ struct platform_device *pdev;
+ struct net_device *ndev; /* this device */
+ struct tasklet_struct tx_bdreclaim_tasklet;
+ struct workqueue_struct *txtimeout_handler_wq;
+ struct work_struct txtimeout_reinit;
+
+ struct napi_struct napi; /* napi information for device */
+ struct net_device_stats stats; /* Statistics for this device */
+
+ struct timer_list gen_purpose_timer; /* Used for stats update */
+
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ struct phy_device *gmii2rgmii_phy_dev;
+ phy_interface_t phy_interface;
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
+ /* RX ip/tcp/udp checksum */
+ unsigned ip_summed;
+ unsigned int enetnum;
+ unsigned int lastrxfrmscntr;
+ unsigned int has_mdio;
+ bool timerready;
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ struct hwtstamp_config hwtstamp_config;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ spinlock_t tmreg_lock;
+ int phc_index;
+ unsigned int tmr_add;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct timer_list time_keep;
+#endif
+};
+#define to_net_local(_nb) container_of(_nb, struct net_local,\
+ clk_rate_change_nb)
+
+static struct net_device_ops netdev_ops;
+
+/**
+ * xemacps_mdio_wait - Wait for the MDIO to be ready to use
+ * @lp: Pointer to the Emacps device private data
+ *
+ * This function waits till the device is ready to accept a new MDIO
+ * request.
+ *
+ * Return: 0 for success or ETIMEDOUT for a timeout
+ */
+static int xemacps_mdio_wait(struct net_local *lp)
+{
+ ulong timeout = jiffies + XEMACPS_MDIO_BUSY_TIMEOUT;
+ u32 regval;
+
+ /* Wait till the bus is free */
+ do {
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWSR_OFFSET);
+ if (regval & XEMACPS_NWSR_MDIOIDLE_MASK)
+ break;
+ else
+ cpu_relax();
+ } while (!time_after_eq(jiffies, timeout));
+
+ if (time_after_eq(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * xemacps_mdio_read - Read current value of phy register indicated by
+ * phyreg.
+ * @bus: mdio bus
+ * @mii_id: mii id
+ * @phyreg: phy register to be read
+ *
+ * Return: value read from specified phy register.
+ *
+ * note: This is for 802.3 clause 22 phys access. For 802.3 clause 45 phys
+ * access, set bit 30 to be 1. e.g. change XEMACPS_PHYMNTNC_OP_MASK to
+ * 0x00020000.
+ */
+static int xemacps_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
+{
+ struct net_local *lp = bus->priv;
+ u32 regval;
+ int value;
+
+ pm_runtime_get_sync(&lp->pdev->dev);
+ spin_lock(&lp->mdio_lock);
+ if (xemacps_mdio_wait(lp))
+ goto timeout;
+
+ regval = XEMACPS_PHYMNTNC_OP_MASK;
+ regval |= XEMACPS_PHYMNTNC_OP_R_MASK;
+ regval |= (mii_id << XEMACPS_PHYMNTNC_PHYAD_SHIFT_MASK);
+ regval |= (phyreg << XEMACPS_PHYMNTNC_PHREG_SHIFT_MASK);
+
+ xemacps_write(lp->baseaddr, XEMACPS_PHYMNTNC_OFFSET, regval);
+
+ /* wait for end of transfer */
+ if (xemacps_mdio_wait(lp))
+ goto timeout;
+
+ value = xemacps_read(lp->baseaddr, XEMACPS_PHYMNTNC_OFFSET) &
+ XEMACPS_PHYMNTNC_DATA_MASK;
+
+ spin_unlock(&lp->mdio_lock);
+ pm_runtime_put(&lp->pdev->dev);
+
+ return value;
+
+timeout:
+ spin_unlock(&lp->mdio_lock);
+ pm_runtime_put(&lp->pdev->dev);
+ return -ETIMEDOUT;
+}
+
+/**
+ * xemacps_mdio_write - Write passed in value to phy register indicated
+ * by phyreg.
+ * @bus: mdio bus
+ * @mii_id: mii id
+ * @phyreg: phy register to be configured.
+ * @value: value to be written to phy register.
+ * Return: 0. This API requires to be int type or compile warning generated
+ *
+ * note: This is for 802.3 clause 22 phys access. For 802.3 clause 45 phys
+ * access, set bit 30 to be 1. e.g. change XEMACPS_PHYMNTNC_OP_MASK to
+ * 0x00020000.
+ */
+static int xemacps_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
+ u16 value)
+{
+ struct net_local *lp = bus->priv;
+ u32 regval;
+
+ pm_runtime_get_sync(&lp->pdev->dev);
+ spin_lock(&lp->mdio_lock);
+ if (xemacps_mdio_wait(lp))
+ goto timeout;
+
+ regval = XEMACPS_PHYMNTNC_OP_MASK;
+ regval |= XEMACPS_PHYMNTNC_OP_W_MASK;
+ regval |= (mii_id << XEMACPS_PHYMNTNC_PHYAD_SHIFT_MASK);
+ regval |= (phyreg << XEMACPS_PHYMNTNC_PHREG_SHIFT_MASK);
+ regval |= value;
+
+ xemacps_write(lp->baseaddr, XEMACPS_PHYMNTNC_OFFSET, regval);
+
+ /* wait for end of transfer */
+ if (xemacps_mdio_wait(lp))
+ goto timeout;
+ spin_unlock(&lp->mdio_lock);
+ pm_runtime_put(&lp->pdev->dev);
+
+ return 0;
+
+timeout:
+ spin_unlock(&lp->mdio_lock);
+ pm_runtime_put(&lp->pdev->dev);
+ return -ETIMEDOUT;
+}
+
+/**
+ * xemacps_mdio_reset - mdio reset. It seems to be required per open
+ * source documentation phy.txt. But there is no reset in this device.
+ * Provide function API for now.
+ * @bus: mdio bus
+ *
+ * Return: Always 0
+ */
+static int xemacps_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+/**
+ * xemacps_set_freq - Set a clock to a new frequency
+ * @clk: Pointer to the clock to change
+ * @rate: New frequency in Hz
+ * @dev: Pointer to the struct device
+ */
+static void xemacps_set_freq(struct clk *clk, long rate, struct device *dev)
+{
+ rate = clk_round_rate(clk, rate);
+ if (rate < 0)
+ return;
+
+ dev_info(dev, "Set clk to %ld Hz\n", rate);
+ if (clk_set_rate(clk, rate))
+ dev_err(dev, "Setting new clock rate failed.\n");
+}
+
+/**
+ * xemacps_adjust_link - handles link status changes, such as speed,
+ * duplex, up/down, ...
+ * @ndev: network device
+ */
+static void xemacps_adjust_link(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+ struct phy_device *gmii2rgmii_phydev = lp->gmii2rgmii_phy_dev;
+ int status_change = 0;
+ u32 regval;
+ u16 gmii2rgmii_reg = 0;
+
+ if (phydev->link) {
+ if ((lp->speed != phydev->speed) ||
+ (lp->duplex != phydev->duplex)) {
+ regval = xemacps_read(lp->baseaddr,
+ XEMACPS_NWCFG_OFFSET);
+ regval &= ~(XEMACPS_NWCFG_FDEN_MASK |
+ XEMACPS_NWCFG_1000_MASK |
+ XEMACPS_NWCFG_100_MASK);
+
+ if (phydev->duplex) {
+ regval |= XEMACPS_NWCFG_FDEN_MASK;
+ gmii2rgmii_reg |= XEMACPS_GMII2RGMII_FULLDPLX;
+ }
+
+ if (phydev->speed == SPEED_1000) {
+ regval |= XEMACPS_NWCFG_1000_MASK;
+ gmii2rgmii_reg |= XEMACPS_GMII2RGMII_SPEED1000;
+ xemacps_set_freq(lp->devclk, 125000000,
+ &lp->pdev->dev);
+ } else if (phydev->speed == SPEED_100) {
+ regval |= XEMACPS_NWCFG_100_MASK;
+ gmii2rgmii_reg |= XEMACPS_GMII2RGMII_SPEED100;
+ xemacps_set_freq(lp->devclk, 25000000,
+ &lp->pdev->dev);
+ } else if (phydev->speed == SPEED_10) {
+ xemacps_set_freq(lp->devclk, 2500000,
+ &lp->pdev->dev);
+ } else {
+ dev_err(&lp->pdev->dev,
+ "%s: unknown PHY speed %d\n",
+ __func__, phydev->speed);
+ return;
+ }
+ if (lp->timerready && (phydev->speed != SPEED_1000)) {
+ del_timer_sync(&(lp->gen_purpose_timer));
+ lp->timerready = false;
+ }
+
+ xemacps_write(lp->baseaddr, XEMACPS_NWCFG_OFFSET,
+ regval);
+
+ if (gmii2rgmii_phydev != NULL) {
+ xemacps_mdio_write(lp->mii_bus,
+ gmii2rgmii_phydev->mdio.addr,
+ XEMACPS_GMII2RGMII_REG_NUM,
+ gmii2rgmii_reg);
+ }
+
+ lp->speed = phydev->speed;
+ lp->duplex = phydev->duplex;
+ status_change = 1;
+ }
+ }
+
+ if (phydev->link != lp->link) {
+ lp->link = phydev->link;
+ status_change = 1;
+ }
+
+ if (status_change) {
+ if (phydev->link)
+ dev_info(&lp->pdev->dev, "link up (%d/%s)\n",
+ phydev->speed,
+ DUPLEX_FULL == phydev->duplex ?
+ "FULL" : "HALF");
+ else
+ dev_info(&lp->pdev->dev, "link down\n");
+ }
+}
+
+/**
+ * xemacps_mii_probe - probe mii bus, find the right bus_id to register
+ * phy callback function.
+ * @ndev: network interface device structure
+ * Return: 0 on success, negative value if error
+ */
+static int xemacps_mii_probe(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
+
+ if (!lp->phy_node) {
+ dev_info(&lp->pdev->dev, "%s: no PHY setup\n", ndev->name);
+ return 0;
+ }
+
+ if (lp->gmii2rgmii_phy_node) {
+ phydev = of_phy_attach(lp->ndev,
+ lp->gmii2rgmii_phy_node,
+ 0, 0);
+ if (!phydev) {
+ dev_err(&lp->pdev->dev, "%s: no gmii to rgmii converter found\n",
+ ndev->name);
+ return -1;
+ }
+ lp->gmii2rgmii_phy_dev = phydev;
+ } else
+ lp->gmii2rgmii_phy_dev = NULL;
+
+ phydev = of_phy_connect(lp->ndev,
+ lp->phy_node,
+ &xemacps_adjust_link,
+ 0,
+ lp->phy_interface);
+
+ if (!phydev) {
+ dev_err(&lp->pdev->dev, "%s: no PHY found\n", ndev->name);
+ if (lp->gmii2rgmii_phy_dev) {
+ phy_disconnect(lp->gmii2rgmii_phy_dev);
+ lp->gmii2rgmii_phy_dev = NULL;
+ }
+ return -1;
+ }
+
+ dev_dbg(&lp->pdev->dev,
+ "GEM: phydev %p, phydev->phy_id 0x%x, phydev->addr 0x%x\n",
+ phydev, phydev->phy_id, phydev->mdio.addr);
+
+ phydev->supported &= (PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phydev->advertising = phydev->supported;
+
+ lp->link = 0;
+ lp->speed = 0;
+ lp->duplex = -1;
+ lp->phy_dev = phydev;
+
+ phy_start(lp->phy_dev);
+
+ dev_dbg(&lp->pdev->dev, "phy_addr 0x%x, phy_id 0x%08x\n",
+ lp->phy_dev->mdio.addr, lp->phy_dev->phy_id);
+
+ dev_dbg(&lp->pdev->dev, "attach [%s] phy driver\n",
+ lp->phy_dev->drv->name);
+
+ return 0;
+}
+
+/**
+ * xemacps_mii_init - Initialize and register mii bus to network device
+ * @lp: local device instance pointer
+ * Return: 0 on success, negative value if error
+ */
+static int xemacps_mii_init(struct net_local *lp)
+{
+ int rc = -ENXIO, i;
+ struct resource res;
+ struct device_node *np = of_get_parent(lp->phy_node);
+ struct device_node *npp;
+
+ lp->mii_bus = of_mdio_find_bus(np);
+ if (!lp->has_mdio && lp->mii_bus)
+ return 0;
+
+ lp->mii_bus = mdiobus_alloc();
+ if (lp->mii_bus == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ lp->mii_bus->name = "XEMACPS mii bus";
+ lp->mii_bus->read = &xemacps_mdio_read;
+ lp->mii_bus->write = &xemacps_mdio_write;
+ lp->mii_bus->reset = &xemacps_mdio_reset;
+ lp->mii_bus->priv = lp;
+ lp->mii_bus->parent = &lp->ndev->dev;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ lp->mii_bus->irq[i] = PHY_POLL;
+ npp = of_get_parent(np);
+ of_address_to_resource(npp, 0, &res);
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+
+ if (lp->phy_node) {
+ if (of_mdiobus_register(lp->mii_bus, np))
+ goto err_out_free_mdiobus;
+ }
+
+ return 0;
+
+err_out_free_mdiobus:
+ mdiobus_free(lp->mii_bus);
+err_out:
+ return rc;
+}
+
+/**
+ * xemacps_update_hdaddr - Update device's MAC address when configured
+ * MAC address is not valid, reconfigure with a good one.
+ * @lp: local device instance pointer
+ */
+static void xemacps_update_hwaddr(struct net_local *lp)
+{
+ u32 regvall;
+ u16 regvalh;
+ u8 addr[6];
+
+ regvall = xemacps_read(lp->baseaddr, XEMACPS_LADDR1L_OFFSET);
+ regvalh = xemacps_read(lp->baseaddr, XEMACPS_LADDR1H_OFFSET);
+ addr[0] = regvall & 0xFF;
+ addr[1] = (regvall >> 8) & 0xFF;
+ addr[2] = (regvall >> 16) & 0xFF;
+ addr[3] = (regvall >> 24) & 0xFF;
+ addr[4] = regvalh & 0xFF;
+ addr[5] = (regvalh >> 8) & 0xFF;
+
+ if (is_valid_ether_addr(addr)) {
+ ether_addr_copy(lp->ndev->dev_addr, addr);
+ } else {
+ dev_info(&lp->pdev->dev, "invalid address, use random\n");
+ eth_hw_addr_random(lp->ndev);
+ dev_info(&lp->pdev->dev,
+ "MAC updated %02x:%02x:%02x:%02x:%02x:%02x\n",
+ lp->ndev->dev_addr[0], lp->ndev->dev_addr[1],
+ lp->ndev->dev_addr[2], lp->ndev->dev_addr[3],
+ lp->ndev->dev_addr[4], lp->ndev->dev_addr[5]);
+ }
+}
+
+/**
+ * xemacps_set_hwaddr - Set device's MAC address from ndev->dev_addr
+ * @lp: local device instance pointer
+ */
+static void xemacps_set_hwaddr(struct net_local *lp)
+{
+ u32 regvall = 0;
+ u16 regvalh = 0;
+#ifdef __LITTLE_ENDIAN
+ regvall = cpu_to_le32(*((u32 *)lp->ndev->dev_addr));
+ regvalh = cpu_to_le16(*((u16 *)(lp->ndev->dev_addr + 4)));
+#endif
+#ifdef __BIG_ENDIAN
+ regvall = cpu_to_be32(*((u32 *)lp->ndev->dev_addr));
+ regvalh = cpu_to_be16(*((u16 *)(lp->ndev->dev_addr + 4)));
+#endif
+ /* LADDRXH has to be wriiten latter than LADDRXL to enable
+ * this address even if these 16 bits are zeros. */
+ xemacps_write(lp->baseaddr, XEMACPS_LADDR1L_OFFSET, regvall);
+ xemacps_write(lp->baseaddr, XEMACPS_LADDR1H_OFFSET, regvalh);
+#ifdef DEBUG
+ regvall = xemacps_read(lp->baseaddr, XEMACPS_LADDR1L_OFFSET);
+ regvalh = xemacps_read(lp->baseaddr, XEMACPS_LADDR1H_OFFSET);
+ dev_dbg(&lp->pdev->dev,
+ "MAC 0x%08x, 0x%08x, %02x:%02x:%02x:%02x:%02x:%02x\n",
+ regvall, regvalh,
+ (regvall & 0xff), ((regvall >> 8) & 0xff),
+ ((regvall >> 16) & 0xff), (regvall >> 24),
+ (regvalh & 0xff), (regvalh >> 8));
+#endif
+}
+
+/*
+ * xemacps_reset_hw - Helper function to reset the underlying hardware.
+ * This is called when we get into such deep trouble that we don't know
+ * how to handle otherwise.
+ * @lp: local device instance pointer
+ */
+static void xemacps_reset_hw(struct net_local *lp)
+{
+ u32 regisr;
+
+ /* make sure we have the buffer for ourselves */
+ wmb();
+
+ /* Clear statistic counters and keep mdio enabled as this mdio
+ * interface might be being reused by the other MAC.
+ */
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET,
+ XEMACPS_NWCTRL_STATCLR_MASK|XEMACPS_NWCTRL_MDEN_MASK);
+
+ /* Clear TX and RX status */
+ xemacps_write(lp->baseaddr, XEMACPS_TXSR_OFFSET, ~0UL);
+ xemacps_write(lp->baseaddr, XEMACPS_RXSR_OFFSET, ~0UL);
+
+ /* Disable all interrupts */
+ xemacps_write(lp->baseaddr, XEMACPS_IDR_OFFSET, ~0UL);
+ synchronize_irq(lp->ndev->irq);
+ regisr = xemacps_read(lp->baseaddr, XEMACPS_ISR_OFFSET);
+ xemacps_write(lp->baseaddr, XEMACPS_ISR_OFFSET, regisr);
+}
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+/**
+ * xemacps_time_keep - Call timecounter_read every second to avoid timer overrun
+ * @_data: Contains the device instance pointer
+ * Return: None
+ */
+void xemacps_time_keep(unsigned long _data)
+{
+ struct net_local *lp = (struct net_local *)_data;
+ u64 ns;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->tmreg_lock, flags);
+ ns = timecounter_read(&lp->tc);
+ spin_unlock_irqrestore(&lp->tmreg_lock, flags);
+
+ mod_timer(&lp->time_keep, jiffies + HZ);
+}
+
+/**
+ * xemacps_ptp_read - Read timestamp information from the timer counters
+ * @lp: Local device instance pointer
+ * @ts: Timespec structure to hold the current time value
+ * Return: None
+ */
+static inline void xemacps_ptp_read(struct net_local *lp,
+ struct timespec *ts)
+{
+ ts->tv_sec = xemacps_read(lp->baseaddr, XEMACPS_1588S_OFFSET);
+ ts->tv_nsec = xemacps_read(lp->baseaddr, XEMACPS_1588NS_OFFSET);
+
+ if (ts->tv_sec < xemacps_read(lp->baseaddr, XEMACPS_1588S_OFFSET))
+ ts->tv_nsec = xemacps_read(lp->baseaddr, XEMACPS_1588NS_OFFSET);
+}
+
+/**
+ * xemacps_read_clock - Read raw cycle counter (to be used by time counter)
+ * @cc: Cyclecounter structure
+ * Return: Hw time stamp
+ */
+static cycle_t xemacps_read_clock(const struct cyclecounter *cc)
+{
+ struct net_local *lp = container_of(cc, struct net_local, cc);
+ u64 stamp;
+ struct timespec ts;
+
+ xemacps_ptp_read(lp, &ts);
+ stamp = ts.tv_sec * NS_PER_SEC + ts.tv_nsec;
+
+ return stamp;
+}
+
+/**
+ * xemacps_ptp_write - Update the currenrt time value to the timer counters
+ * @lp: Local device instance pointer
+ * @ts: Timespec structure to hold the time value
+ * Return: None
+ */
+static inline void xemacps_ptp_write(struct net_local *lp,
+ const struct timespec *ts)
+{
+ xemacps_write(lp->baseaddr, XEMACPS_1588S_OFFSET, ts->tv_sec);
+ xemacps_write(lp->baseaddr, XEMACPS_1588NS_OFFSET, ts->tv_nsec);
+}
+
+/**
+ * xemacps_systim_to_hwtstamp - Convert system time value to hw timestamp
+ * @lp: Local device instance pointer
+ * @shhwtstamps: Timestamp structure to update
+ * @regval: Unsigned 64bit system time value.
+ * Return: None
+ */
+static void xemacps_systim_to_hwtstamp(struct net_local *lp,
+ struct skb_shared_hwtstamps *shhwtstamps,
+ u64 regval)
+{
+ u64 ns;
+
+ ns = timecounter_cyc2time(&lp->tc, regval);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * xemacps_rx_hwtstamp - Read rx timestamp from hw and update it to the skbuff
+ * @lp: Local device instance pointer
+ * @skb: Pointer to the socket buffer
+ * @msg_type: PTP message type
+ * Return: None
+ */
+static void xemacps_rx_hwtstamp(struct net_local *lp,
+ struct sk_buff *skb, unsigned msg_type)
+{
+ u32 sec, nsec;
+ u64 time64;
+
+ if (msg_type) {
+ /* PTP Peer Event Frame packets */
+ sec = xemacps_read(lp->baseaddr, XEMACPS_PTPPRXS_OFFSET);
+ nsec = xemacps_read(lp->baseaddr, XEMACPS_PTPPRXNS_OFFSET);
+ } else {
+ /* PTP Event Frame packets */
+ sec = xemacps_read(lp->baseaddr, XEMACPS_PTPERXS_OFFSET);
+ nsec = xemacps_read(lp->baseaddr, XEMACPS_PTPERXNS_OFFSET);
+ }
+ time64 = sec * NS_PER_SEC + nsec;
+ xemacps_systim_to_hwtstamp(lp, skb_hwtstamps(skb), time64);
+}
+
+/**
+ * xemacps_tx_hwtstamp - Read tx timestamp from hw and update it to the skbuff
+ * @lp: Local device instance pointer
+ * @skb: Pointer to the socket buffer
+ * @msg_type: PTP message type
+ * Return: None
+ */
+static void xemacps_tx_hwtstamp(struct net_local *lp,
+ struct sk_buff *skb, unsigned msg_type)
+{
+ u32 sec, nsec;
+ u64 time64;
+
+ if (msg_type) {
+ /* PTP Peer Event Frame packets */
+ sec = xemacps_read(lp->baseaddr, XEMACPS_PTPPTXS_OFFSET);
+ nsec = xemacps_read(lp->baseaddr, XEMACPS_PTPPTXNS_OFFSET);
+ } else {
+ /* PTP Event Frame packets */
+ sec = xemacps_read(lp->baseaddr, XEMACPS_PTPETXS_OFFSET);
+ nsec = xemacps_read(lp->baseaddr, XEMACPS_PTPETXNS_OFFSET);
+ }
+ time64 = sec * NS_PER_SEC + nsec;
+ xemacps_systim_to_hwtstamp(lp, skb_hwtstamps(skb), time64);
+ skb_tstamp_tx(skb, skb_hwtstamps(skb));
+}
+
+/**
+ * xemacps_ptp_enable - Select the mode of operation
+ * @ptp: PTP clock structure
+ * @rq: Requested feature to change
+ * @on: Whether to enable or disable the feature
+ * Return: Always returns EOPNOTSUPP
+ */
+static int xemacps_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * xemacps_ptp_gettime - Get the current time from the timer counter
+ * @ptp: PTP clock structure
+ * @ts: Timespec structure to hold the current time value
+ * Return: Always returns zero
+ */
+static int xemacps_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ unsigned long flags;
+ struct net_local *lp = container_of(ptp, struct net_local, ptp_caps);
+ u64 ns;
+ u32 remainder;
+
+ spin_lock_irqsave(&lp->tmreg_lock, flags);
+ ns = timecounter_read(&lp->tc);
+ spin_unlock_irqrestore(&lp->tmreg_lock, flags);
+ ts->tv_sec = div_u64_rem(ns, NS_PER_SEC, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * xemacps_ptp_settime - Reset the timercounter to use new base value
+ * @ptp: PTP clock structure
+ * @ts: Timespec structure to hold the current time value
+ * Return: Always returns zero
+ */
+static int xemacps_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ unsigned long flags;
+ struct net_local *lp = container_of(ptp, struct net_local, ptp_caps);
+ u64 ns;
+
+ ns = ts->tv_sec * NS_PER_SEC;
+ ns += ts->tv_nsec;
+ spin_lock_irqsave(&lp->tmreg_lock, flags);
+ timecounter_init(&lp->tc, &lp->cc, ns);
+ spin_unlock_irqrestore(&lp->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * xemacps_ptp_adjfreq - Adjust the clock freequency
+ * @ptp: PTP clock info structure
+ * @ppb: Frequency in parts per billion
+ * Return: Always returns zero
+ */
+static int xemacps_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u64 diff;
+ unsigned long flags;
+ int neg_adj = 0;
+ u32 mult = XEMACPS_PTP_CC_MULT;
+ struct net_local *lp = container_of(ptp, struct net_local, ptp_caps);
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ diff = mult;
+ diff *= ppb;
+ diff = div_u64(diff, NS_PER_SEC);
+ spin_lock_irqsave(&lp->tmreg_lock, flags);
+ /*
+ * dummy read to set cycle_last in tc to now.
+ * So use adjusted mult to calculate when next call
+ * timercounter_read.
+ */
+ timecounter_read(&lp->tc);
+ lp->cc.mult = neg_adj ? mult - diff : mult + diff;
+ spin_unlock_irqrestore(&lp->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * xemacps_ptp_adjtime - Adjust the timer counter value with delta
+ * @ptp: PTP clock info structure
+ * @delta: Delta value in nano seconds
+ * Return: Always returns zero
+ */
+static int xemacps_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ unsigned long flags;
+ struct net_local *lp = container_of(ptp, struct net_local, ptp_caps);
+ u64 now;
+
+ spin_lock_irqsave(&lp->tmreg_lock, flags);
+ now = timecounter_read(&lp->tc);
+ now += delta;
+ timecounter_init(&lp->tc, &lp->cc, now);
+ spin_unlock_irqrestore(&lp->tmreg_lock, flags);
+ return 0;
+}
+
+/**
+ * xemacps_ptp_init - Initialize the clock and register with ptp sub system
+ * @lp: Local device instance pointer
+ * Return: None
+ */
+static void xemacps_ptp_init(struct net_local *lp)
+{
+ struct timespec now;
+ unsigned long rate;
+
+ lp->ptp_caps.owner = THIS_MODULE;
+ snprintf(lp->ptp_caps.name, 16, "zynq ptp");
+ lp->ptp_caps.n_alarm = 0;
+ lp->ptp_caps.n_ext_ts = 0;
+ lp->ptp_caps.n_per_out = 0;
+ lp->ptp_caps.pps = 0;
+ lp->ptp_caps.adjfreq = xemacps_ptp_adjfreq;
+ lp->ptp_caps.adjtime = xemacps_ptp_adjtime;
+ lp->ptp_caps.gettime64 = xemacps_ptp_gettime;
+ lp->ptp_caps.settime64 = xemacps_ptp_settime;
+ lp->ptp_caps.enable = xemacps_ptp_enable;
+
+ rate = clk_get_rate(lp->aperclk);
+
+ spin_lock_init(&lp->tmreg_lock);
+ init_timer(&lp->time_keep);
+ lp->time_keep.data = (unsigned long)lp;
+ lp->time_keep.function = xemacps_time_keep;
+ lp->time_keep.expires = jiffies + HZ;
+ add_timer(&lp->time_keep);
+
+ lp->ptp_caps.max_adj = rate;
+ memset(&lp->cc, 0, sizeof(lp->cc));
+ lp->cc.read = xemacps_read_clock;
+ lp->cc.mask = CLOCKSOURCE_MASK(64);
+ lp->cc.mult = XEMACPS_PTP_CC_MULT;
+ lp->cc.shift = 31;
+ lp->tmr_add = (NS_PER_SEC/rate);
+ xemacps_write(lp->baseaddr, XEMACPS_1588INC_OFFSET,
+ lp->tmr_add);
+ getnstimeofday(&now);
+ xemacps_ptp_write(lp, (const struct timespec *)&now);
+ timecounter_init(&lp->tc, &lp->cc,
+ ktime_to_ns(ktime_get_real()));
+ lp->ptp_clock = ptp_clock_register(&lp->ptp_caps, &lp->pdev->dev);
+ if (IS_ERR(lp->ptp_clock))
+ pr_err("ptp_clock_register failed\n");
+
+ lp->phc_index = ptp_clock_index(lp->ptp_clock);
+}
+
+/**
+ * xemacps_ptp_close - Disable the ptp interface
+ * @lp: Local device instance pointer
+ * Return: None
+ */
+static void xemacps_ptp_close(struct net_local *lp)
+{
+ /* Clear the time counters */
+ xemacps_write(lp->baseaddr, XEMACPS_1588NS_OFFSET, 0x0);
+ xemacps_write(lp->baseaddr, XEMACPS_1588S_OFFSET, 0x0);
+ xemacps_write(lp->baseaddr, XEMACPS_1588ADJ_OFFSET, 0x0);
+ xemacps_write(lp->baseaddr, XEMACPS_1588INC_OFFSET, 0x0);
+
+ del_timer(&lp->time_keep);
+ ptp_clock_unregister(lp->ptp_clock);
+
+ /* Initialize hwstamp config */
+ lp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ lp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+}
+#endif /* CONFIG_XILINX_PS_EMAC_HWTSTAMP */
+
+/**
+ * xemacps_rx - process received packets when napi called
+ * @lp: local device instance pointer
+ * @budget: NAPI budget
+ * Return: number of BDs processed
+ */
+static int xemacps_rx(struct net_local *lp, int budget)
+{
+ struct xemacps_bd *cur_p;
+ u32 len;
+ struct sk_buff *skb;
+ struct sk_buff *new_skb;
+ u32 new_skb_baddr;
+ unsigned int numbdfree = 0;
+ u32 size = 0;
+ u32 packets = 0;
+ u32 regval;
+
+ cur_p = &lp->rx_bd[lp->rx_bd_ci];
+ regval = cur_p->addr;
+ rmb();
+ while (numbdfree < budget) {
+ if (!(regval & XEMACPS_RXBUF_NEW_MASK))
+ break;
+
+ regval = xemacps_read(lp->baseaddr, XEMACPS_RXSR_OFFSET);
+ xemacps_write(lp->baseaddr, XEMACPS_RXSR_OFFSET, regval);
+ if (regval & XEMACPS_RXSR_HRESPNOK_MASK) {
+ dev_err(&lp->pdev->dev, "RX error 0x%x\n", regval);
+ numbdfree = 0xFFFFFFFF;
+ break;
+ }
+
+ new_skb = netdev_alloc_skb(lp->ndev, XEMACPS_RX_BUF_SIZE);
+ if (new_skb == NULL) {
+ dev_err(&lp->ndev->dev, "no memory for new sk_buff\n");
+ break;
+ }
+ /* Get dma handle of skb->data */
+ new_skb_baddr = (u32) dma_map_single(lp->ndev->dev.parent,
+ new_skb->data,
+ XEMACPS_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+ dev_kfree_skb(new_skb);
+ break;
+ }
+
+ /* the packet length */
+ len = cur_p->ctrl & XEMACPS_RXBUF_LEN_MASK;
+ rmb();
+ skb = lp->rx_skb[lp->rx_bd_ci].skb;
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->rx_skb[lp->rx_bd_ci].mapping,
+ lp->rx_skb[lp->rx_bd_ci].len,
+ DMA_FROM_DEVICE);
+
+ /* setup received skb and send it upstream */
+ skb_put(skb, len); /* Tell the skb how much data we got. */
+ skb->protocol = eth_type_trans(skb, lp->ndev);
+
+ skb->ip_summed = lp->ip_summed;
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ if ((lp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) &&
+ (ntohs(skb->protocol) == ETH_P_IP)) {
+ u8 transport_poto, msg_type;
+ u16 dst_port;
+ /* While the GEM can timestamp PTP packets, it does
+ * not mark the RX descriptor to identify them. This
+ * is entirely the wrong place to be parsing UDP
+ * headers, but some minimal effort must be made.
+ * NOTE: the below parsing of ip_proto and dest_port
+ * depend on the use of Ethernet_II encapsulation,
+ * IPv4 without any options.
+ */
+ skb_copy_from_linear_data_offset(skb,
+ XEMACPS_IP_PROTO_OFFSET, &transport_poto, 1);
+ if (transport_poto == IPPROTO_UDP) {
+ skb_copy_from_linear_data_offset(skb,
+ XEMACPS_UDP_PORT_OFFSET, &dst_port, 2);
+ if (ntohs(dst_port) ==
+ XEMACPS_PTP_EVENT_PORT_NUM) {
+ skb_copy_from_linear_data_offset(skb,
+ XEMACPS_RX_PTPHDR_OFFSET,
+ &msg_type, 1);
+ xemacps_rx_hwtstamp(lp, skb,
+ msg_type & 0x2);
+ }
+ }
+ }
+#endif /* CONFIG_XILINX_PS_EMAC_HWTSTAMP */
+
+ size += len;
+ packets++;
+ netif_receive_skb(skb);
+
+ cur_p->addr = (cur_p->addr & ~XEMACPS_RXBUF_ADD_MASK)
+ | (new_skb_baddr);
+ lp->rx_skb[lp->rx_bd_ci].skb = new_skb;
+ lp->rx_skb[lp->rx_bd_ci].mapping = new_skb_baddr;
+ lp->rx_skb[lp->rx_bd_ci].len = XEMACPS_RX_BUF_SIZE;
+
+ cur_p->ctrl = 0;
+ cur_p->addr &= (~XEMACPS_RXBUF_NEW_MASK);
+ wmb();
+
+ lp->rx_bd_ci++;
+ lp->rx_bd_ci = lp->rx_bd_ci % XEMACPS_RECV_BD_CNT;
+ cur_p = &lp->rx_bd[lp->rx_bd_ci];
+ regval = cur_p->addr;
+ rmb();
+ numbdfree++;
+ }
+ wmb();
+ lp->stats.rx_packets += packets;
+ lp->stats.rx_bytes += size;
+ return numbdfree;
+}
+
+/**
+ * xemacps_rx_poll - NAPI poll routine
+ * @napi: pointer to napi struct
+ * @budget: NAPI budget
+ * Return: number of BDs processed
+ */
+static int xemacps_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct net_local *lp = container_of(napi, struct net_local, napi);
+ int work_done = 0;
+ u32 count;
+
+ spin_lock(&lp->rx_lock);
+ while (1) {
+
+ count = xemacps_rx(lp, budget - work_done);
+ if (count == 0xFFFFFFFF) {
+ napi_complete(napi);
+ spin_unlock(&lp->rx_lock);
+ goto reset_hw;
+ }
+ work_done += count;
+ if (work_done >= budget)
+ break;
+
+ napi_complete(napi);
+ /* We disabled RX interrupts in interrupt service
+ * routine, now it is time to enable it back.
+ */
+ xemacps_write(lp->baseaddr,
+ XEMACPS_IER_OFFSET, XEMACPS_IXR_FRAMERX_MASK);
+
+ /* If a packet has come in between the last check of the BD
+ * list and unmasking the interrupts, we may have missed the
+ * interrupt, so reschedule here.
+ */
+ if ((lp->rx_bd[lp->rx_bd_ci].addr & XEMACPS_RXBUF_NEW_MASK)
+ && napi_reschedule(napi)) {
+ xemacps_write(lp->baseaddr,
+ XEMACPS_IDR_OFFSET, XEMACPS_IXR_FRAMERX_MASK);
+ continue;
+ }
+ break;
+ }
+ spin_unlock(&lp->rx_lock);
+ return work_done;
+reset_hw:
+ queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
+ return 0;
+}
+
+/**
+ * xemacps_tx_poll - tx bd reclaim tasklet handler
+ * @data: pointer to network interface device structure
+ */
+static void xemacps_tx_poll(unsigned long data)
+{
+ struct net_device *ndev = (struct net_device *)data;
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+ struct xemacps_bd *cur_p;
+ u32 numbdsinhw;
+ struct ring_info *rp;
+ struct sk_buff *skb;
+ unsigned long flags;
+ u32 txbdcount = 0;
+ bool isfrag = false;
+
+ numbdsinhw = XEMACPS_SEND_BD_CNT - lp->tx_bd_freecnt;
+ if (!numbdsinhw)
+ return;
+
+ regval = xemacps_read(lp->baseaddr, XEMACPS_TXSR_OFFSET);
+ xemacps_write(lp->baseaddr, XEMACPS_TXSR_OFFSET, regval);
+ dev_dbg(&lp->pdev->dev, "TX status 0x%x\n", regval);
+ if (regval & (XEMACPS_TXSR_HRESPNOK_MASK | XEMACPS_TXSR_BUFEXH_MASK))
+ dev_err(&lp->pdev->dev, "TX error 0x%x\n", regval);
+
+ cur_p = &lp->tx_bd[lp->tx_bd_ci];
+
+ while (numbdsinhw) {
+
+ if ((cur_p->ctrl & XEMACPS_TXBUF_USED_MASK) !=
+ XEMACPS_TXBUF_USED_MASK) {
+ if (isfrag == false)
+ break;
+ }
+ rp = &lp->tx_skb[lp->tx_bd_ci];
+ skb = rp->skb;
+ lp->stats.tx_bytes += cur_p->ctrl & XEMACPS_TXBUF_LEN_MASK;
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ u8 msg_type;
+ skb_copy_from_linear_data_offset(skb,
+ XEMACPS_TX_PTPHDR_OFFSET, &msg_type, 1);
+ xemacps_tx_hwtstamp(lp, skb, msg_type & 0x2);
+ }
+#endif /* CONFIG_XILINX_PS_EMAC_HWTSTAMP */
+
+ dma_unmap_single(&lp->pdev->dev, rp->mapping, rp->len,
+ DMA_TO_DEVICE);
+ rp->skb = NULL;
+ dev_kfree_skb(skb);
+ /* log tx completed packets, errors logs
+ * are in other error counters.
+ */
+ if (cur_p->ctrl & XEMACPS_TXBUF_LAST_MASK) {
+ lp->stats.tx_packets++;
+ isfrag = false;
+ } else {
+ isfrag = true;
+ }
+
+ /* Set used bit, preserve wrap bit; clear everything else. */
+ cur_p->ctrl |= XEMACPS_TXBUF_USED_MASK;
+ cur_p->ctrl &= (XEMACPS_TXBUF_USED_MASK |
+ XEMACPS_TXBUF_WRAP_MASK);
+ lp->tx_bd_ci++;
+ lp->tx_bd_ci = lp->tx_bd_ci % XEMACPS_SEND_BD_CNT;
+ cur_p = &lp->tx_bd[lp->tx_bd_ci];
+ numbdsinhw--;
+ txbdcount++;
+ }
+
+ spin_lock(&lp->tx_lock);
+ lp->tx_bd_freecnt += txbdcount;
+ spin_unlock(&lp->tx_lock);
+
+ if (numbdsinhw) {
+ spin_lock_irqsave(&lp->nwctrlreg_lock, flags);
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWCTRL_OFFSET);
+ regval |= XEMACPS_NWCTRL_STARTTX_MASK;
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET, regval);
+ spin_unlock_irqrestore(&lp->nwctrlreg_lock, flags);
+ }
+
+ netif_wake_queue(ndev);
+}
+
+/**
+ * xemacps_interrupt - interrupt main service routine
+ * @irq: interrupt number
+ * @dev_id: pointer to a network device structure
+ * Return: IRQ_HANDLED or IRQ_NONE
+ */
+static irqreturn_t xemacps_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regisr;
+ u32 regctrl;
+
+ regisr = xemacps_read(lp->baseaddr, XEMACPS_ISR_OFFSET);
+ if (unlikely(!regisr))
+ return IRQ_NONE;
+
+ xemacps_write(lp->baseaddr, XEMACPS_ISR_OFFSET, regisr);
+
+ while (regisr) {
+ if (regisr & (XEMACPS_IXR_TXCOMPL_MASK |
+ XEMACPS_IXR_TX_ERR_MASK)) {
+ tasklet_schedule(&lp->tx_bdreclaim_tasklet);
+ }
+
+ if (regisr & XEMACPS_IXR_RXUSED_MASK) {
+ spin_lock(&lp->nwctrlreg_lock);
+ regctrl = xemacps_read(lp->baseaddr,
+ XEMACPS_NWCTRL_OFFSET);
+ regctrl |= XEMACPS_NWCTRL_FLUSH_DPRAM_MASK;
+ xemacps_write(lp->baseaddr,
+ XEMACPS_NWCTRL_OFFSET, regctrl);
+ spin_unlock(&lp->nwctrlreg_lock);
+ xemacps_write(lp->baseaddr,
+ XEMACPS_IDR_OFFSET, XEMACPS_IXR_FRAMERX_MASK);
+ napi_schedule(&lp->napi);
+ }
+
+ if (regisr & XEMACPS_IXR_FRAMERX_MASK) {
+ xemacps_write(lp->baseaddr,
+ XEMACPS_IDR_OFFSET, XEMACPS_IXR_FRAMERX_MASK);
+ napi_schedule(&lp->napi);
+ }
+
+ regisr = xemacps_read(lp->baseaddr, XEMACPS_ISR_OFFSET);
+ xemacps_write(lp->baseaddr, XEMACPS_ISR_OFFSET, regisr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Free all packets presently in the descriptor rings.
+ */
+static void xemacps_clean_rings(struct net_local *lp)
+{
+ int i;
+
+ for (i = 0; i < XEMACPS_RECV_BD_CNT; i++) {
+ if (lp->rx_skb && lp->rx_skb[i].skb) {
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->rx_skb[i].mapping,
+ lp->rx_skb[i].len,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb(lp->rx_skb[i].skb);
+ lp->rx_skb[i].skb = NULL;
+ lp->rx_skb[i].mapping = 0;
+ }
+ }
+
+ for (i = 0; i < XEMACPS_SEND_BD_CNT; i++) {
+ if (lp->tx_skb && lp->tx_skb[i].skb) {
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->tx_skb[i].mapping,
+ lp->tx_skb[i].len,
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb(lp->tx_skb[i].skb);
+ lp->tx_skb[i].skb = NULL;
+ lp->tx_skb[i].mapping = 0;
+ }
+ }
+}
+
+/**
+ * xemacps_descriptor_free - Free allocated TX and RX BDs
+ * @lp: local device instance pointer
+ */
+static void xemacps_descriptor_free(struct net_local *lp)
+{
+ int size;
+
+ xemacps_clean_rings(lp);
+
+ /* kfree(NULL) is safe, no need to check here */
+ kfree(lp->tx_skb);
+ lp->tx_skb = NULL;
+ kfree(lp->rx_skb);
+ lp->rx_skb = NULL;
+
+ size = XEMACPS_RECV_BD_CNT * sizeof(struct xemacps_bd);
+ if (lp->rx_bd) {
+ dma_free_coherent(&lp->pdev->dev, size,
+ lp->rx_bd, lp->rx_bd_dma);
+ lp->rx_bd = NULL;
+ }
+
+ size = XEMACPS_SEND_BD_CNT * sizeof(struct xemacps_bd);
+ if (lp->tx_bd) {
+ dma_free_coherent(&lp->pdev->dev, size,
+ lp->tx_bd, lp->tx_bd_dma);
+ lp->tx_bd = NULL;
+ }
+}
+
+/**
+ * xemacps_descriptor_init - Allocate both TX and RX BDs
+ * @lp: local device instance pointer
+ * Return: 0 on success, negative value if error
+ */
+static int xemacps_descriptor_init(struct net_local *lp)
+{
+ int size;
+ struct sk_buff *new_skb;
+ u32 new_skb_baddr;
+ u32 i;
+ struct xemacps_bd *cur_p;
+ u32 regval;
+
+ lp->tx_skb = NULL;
+ lp->rx_skb = NULL;
+ lp->rx_bd = NULL;
+ lp->tx_bd = NULL;
+
+ /* Reset the indexes which are used for accessing the BDs */
+ lp->tx_bd_ci = 0;
+ lp->tx_bd_tail = 0;
+ lp->rx_bd_ci = 0;
+
+ size = XEMACPS_SEND_BD_CNT * sizeof(struct ring_info);
+ lp->tx_skb = kzalloc(size, GFP_KERNEL);
+ if (!lp->tx_skb)
+ goto err_out;
+ size = XEMACPS_RECV_BD_CNT * sizeof(struct ring_info);
+ lp->rx_skb = kzalloc(size, GFP_KERNEL);
+ if (!lp->rx_skb)
+ goto err_out;
+
+ /*
+ * Set up RX buffer descriptors.
+ */
+
+ size = XEMACPS_RECV_BD_CNT * sizeof(struct xemacps_bd);
+ lp->rx_bd = dma_alloc_coherent(&lp->pdev->dev, size,
+ &lp->rx_bd_dma, GFP_KERNEL);
+ if (!lp->rx_bd)
+ goto err_out;
+ dev_dbg(&lp->pdev->dev, "RX ring %d bytes at 0x%x mapped %p\n",
+ size, lp->rx_bd_dma, lp->rx_bd);
+
+ for (i = 0; i < XEMACPS_RECV_BD_CNT; i++) {
+ cur_p = &lp->rx_bd[i];
+
+ new_skb = netdev_alloc_skb(lp->ndev, XEMACPS_RX_BUF_SIZE);
+ if (new_skb == NULL) {
+ dev_err(&lp->ndev->dev, "alloc_skb error %d\n", i);
+ goto err_out;
+ }
+
+ /* Get dma handle of skb->data */
+ new_skb_baddr = (u32) dma_map_single(lp->ndev->dev.parent,
+ new_skb->data,
+ XEMACPS_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr))
+ goto err_out;
+
+ /* set wrap bit for last BD */
+ regval = (new_skb_baddr & XEMACPS_RXBUF_ADD_MASK);
+ if (i == XEMACPS_RECV_BD_CNT - 1)
+ regval |= XEMACPS_RXBUF_WRAP_MASK;
+ cur_p->addr = regval;
+ cur_p->ctrl = 0;
+ wmb();
+
+ lp->rx_skb[i].skb = new_skb;
+ lp->rx_skb[i].mapping = new_skb_baddr;
+ lp->rx_skb[i].len = XEMACPS_RX_BUF_SIZE;
+ }
+
+ /*
+ * Set up TX buffer descriptors.
+ */
+
+ size = XEMACPS_SEND_BD_CNT * sizeof(struct xemacps_bd);
+ lp->tx_bd = dma_alloc_coherent(&lp->pdev->dev, size,
+ &lp->tx_bd_dma, GFP_KERNEL);
+ if (!lp->tx_bd)
+ goto err_out;
+ dev_dbg(&lp->pdev->dev, "TX ring %d bytes at 0x%x mapped %p\n",
+ size, lp->tx_bd_dma, lp->tx_bd);
+
+ for (i = 0; i < XEMACPS_SEND_BD_CNT; i++) {
+ cur_p = &lp->tx_bd[i];
+ /* set wrap bit for last BD */
+ cur_p->addr = 0;
+ regval = XEMACPS_TXBUF_USED_MASK;
+ if (i == XEMACPS_SEND_BD_CNT - 1)
+ regval |= XEMACPS_TXBUF_WRAP_MASK;
+ cur_p->ctrl = regval;
+ }
+ wmb();
+
+ lp->tx_bd_freecnt = XEMACPS_SEND_BD_CNT;
+
+ dev_dbg(&lp->pdev->dev,
+ "lp->tx_bd %p lp->tx_bd_dma %p lp->tx_skb %p\n",
+ lp->tx_bd, (void *)lp->tx_bd_dma, lp->tx_skb);
+ dev_dbg(&lp->pdev->dev,
+ "lp->rx_bd %p lp->rx_bd_dma %p lp->rx_skb %p\n",
+ lp->rx_bd, (void *)lp->rx_bd_dma, lp->rx_skb);
+
+ return 0;
+
+err_out:
+ xemacps_descriptor_free(lp);
+ return -ENOMEM;
+}
+
+/**
+ * xemacps_init_hw - Initialize hardware to known good state
+ * @lp: local device instance pointer
+ */
+static void xemacps_init_hw(struct net_local *lp)
+{
+ u32 regval;
+
+ xemacps_reset_hw(lp);
+ xemacps_set_hwaddr(lp);
+
+ /* network configuration */
+ regval = 0;
+ regval |= XEMACPS_NWCFG_FDEN_MASK;
+ regval |= XEMACPS_NWCFG_RXCHKSUMEN_MASK;
+ regval |= XEMACPS_NWCFG_PAUSECOPYDI_MASK;
+ regval |= XEMACPS_NWCFG_FCSREM_MASK;
+ regval |= XEMACPS_NWCFG_PAUSEEN_MASK;
+ regval |= XEMACPS_NWCFG_100_MASK;
+ regval |= XEMACPS_NWCFG_HDRXEN_MASK;
+
+ regval |= (MDC_DIV_224 << XEMACPS_NWCFG_MDC_SHIFT_MASK);
+ if (lp->ndev->flags & IFF_PROMISC) /* copy all */
+ regval |= XEMACPS_NWCFG_COPYALLEN_MASK;
+ if (!(lp->ndev->flags & IFF_BROADCAST)) /* No broadcast */
+ regval |= XEMACPS_NWCFG_BCASTDI_MASK;
+ xemacps_write(lp->baseaddr, XEMACPS_NWCFG_OFFSET, regval);
+
+ /* Init TX and RX DMA Q address */
+ xemacps_write(lp->baseaddr, XEMACPS_RXQBASE_OFFSET, lp->rx_bd_dma);
+ xemacps_write(lp->baseaddr, XEMACPS_TXQBASE_OFFSET, lp->tx_bd_dma);
+
+ /* DMACR configurations */
+ regval = (((XEMACPS_RX_BUF_SIZE / XEMACPS_RX_BUF_UNIT) +
+ ((XEMACPS_RX_BUF_SIZE % XEMACPS_RX_BUF_UNIT) ? 1 : 0)) <<
+ XEMACPS_DMACR_RXBUF_SHIFT);
+ regval |= XEMACPS_DMACR_RXSIZE_MASK;
+ regval |= XEMACPS_DMACR_TXSIZE_MASK;
+ regval |= XEMACPS_DMACR_TCPCKSUM_MASK;
+#ifdef __LITTLE_ENDIAN
+ regval &= ~XEMACPS_DMACR_ENDIAN_MASK;
+#endif
+#ifdef __BIG_ENDIAN
+ regval |= XEMACPS_DMACR_ENDIAN_MASK;
+#endif
+ regval |= XEMACPS_DMACR_BLENGTH_INCR16;
+ xemacps_write(lp->baseaddr, XEMACPS_DMACR_OFFSET, regval);
+
+ /* Enable TX, RX and MDIO port */
+ regval = 0;
+ regval |= XEMACPS_NWCTRL_MDEN_MASK;
+ regval |= XEMACPS_NWCTRL_TXEN_MASK;
+ regval |= XEMACPS_NWCTRL_RXEN_MASK;
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET, regval);
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ /* Initialize the ptp clock */
+ xemacps_ptp_init(lp);
+#endif
+
+ /* Enable interrupts */
+ regval = XEMACPS_IXR_ALL_MASK;
+ xemacps_write(lp->baseaddr, XEMACPS_IER_OFFSET, regval);
+}
+
+/**
+ * xemacps_resetrx_for_no_rxdata - Resets the Rx if there is no data
+ * for a while (presently 100 msecs)
+ * @data: Used for net_local instance pointer
+ */
+static void xemacps_resetrx_for_no_rxdata(unsigned long data)
+{
+ struct net_local *lp = (struct net_local *)data;
+ unsigned long regctrl;
+ unsigned long tempcntr;
+ unsigned long flags;
+
+ tempcntr = xemacps_read(lp->baseaddr, XEMACPS_RXCNT_OFFSET);
+ if ((!tempcntr) && (!(lp->lastrxfrmscntr))) {
+ spin_lock_irqsave(&lp->nwctrlreg_lock, flags);
+ regctrl = xemacps_read(lp->baseaddr,
+ XEMACPS_NWCTRL_OFFSET);
+ regctrl &= (~XEMACPS_NWCTRL_RXEN_MASK);
+ xemacps_write(lp->baseaddr,
+ XEMACPS_NWCTRL_OFFSET, regctrl);
+ regctrl = xemacps_read(lp->baseaddr, XEMACPS_NWCTRL_OFFSET);
+ regctrl |= (XEMACPS_NWCTRL_RXEN_MASK);
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET, regctrl);
+ spin_unlock_irqrestore(&lp->nwctrlreg_lock, flags);
+ }
+ lp->lastrxfrmscntr = tempcntr;
+}
+
+/**
+ * xemacps_update_stats - Update the statistic structure entries from
+ * the corresponding emacps hardware statistic registers
+ * @data: Used for net_local instance pointer
+ */
+static void xemacps_update_stats(unsigned long data)
+{
+ struct net_local *lp = (struct net_local *)data;
+ struct net_device_stats *nstat = &lp->stats;
+ u32 cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXUNDRCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_length_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXOVRCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_length_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXJABCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_length_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXFCSCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_crc_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXLENGTHCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_length_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXALIGNCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_frame_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXRESERRCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_missed_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_RXORCNT_OFFSET);
+ nstat->rx_errors += cnt;
+ nstat->rx_fifo_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_TXURUNCNT_OFFSET);
+ nstat->tx_errors += cnt;
+ nstat->tx_fifo_errors += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_SNGLCOLLCNT_OFFSET);
+ nstat->collisions += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_MULTICOLLCNT_OFFSET);
+ nstat->collisions += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_EXCESSCOLLCNT_OFFSET);
+ nstat->tx_errors += cnt;
+ nstat->tx_aborted_errors += cnt;
+ nstat->collisions += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_LATECOLLCNT_OFFSET);
+ nstat->tx_errors += cnt;
+ nstat->collisions += cnt;
+
+ cnt = xemacps_read(lp->baseaddr, XEMACPS_CSENSECNT_OFFSET);
+ nstat->tx_errors += cnt;
+ nstat->tx_carrier_errors += cnt;
+}
+
+/**
+ * xemacps_gen_purpose_timerhandler - Timer handler that is called at regular
+ * intervals upon expiry of the gen_purpose_timer defined in net_local struct.
+ * @data: Used for net_local instance pointer
+ *
+ * This timer handler is used to update the statistics by calling the API
+ * xemacps_update_stats. The statistics register can typically overflow pretty
+ * quickly under heavy load conditions. This timer is used to periodically
+ * read the stats registers and update the corresponding stats structure
+ * entries. The stats registers when read reset to 0.
+ */
+static void xemacps_gen_purpose_timerhandler(unsigned long data)
+{
+ struct net_local *lp = (struct net_local *)data;
+
+ xemacps_update_stats(data);
+ xemacps_resetrx_for_no_rxdata(data);
+ mod_timer(&(lp->gen_purpose_timer),
+ jiffies + msecs_to_jiffies(XEAMCPS_GEN_PURPOSE_TIMER_LOAD));
+}
+
+/**
+ * xemacps_open - Called when a network device is made active
+ * @ndev: network interface device structure
+ * Return: 0 on success, negative value if error
+ *
+ * The open entry point is called when a network interface is made active
+ * by the system (IFF_UP). At this point all resources needed for transmit
+ * and receive operations are allocated, the interrupt handler is
+ * registered with OS, the watchdog timer is started, and the stack is
+ * notified that the interface is ready.
+ *
+ * note: if error(s), allocated resources before error require to be
+ * released or system issues (such as memory) leak might happen.
+ */
+static int xemacps_open(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ int rc;
+
+ dev_dbg(&lp->pdev->dev, "open\n");
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ rc = xemacps_descriptor_init(lp);
+ if (rc) {
+ dev_err(&lp->pdev->dev,
+ "Unable to allocate DMA memory, rc %d\n", rc);
+ return rc;
+ }
+
+ rc = pm_runtime_get_sync(&lp->pdev->dev);
+ if (rc < 0) {
+ dev_err(&lp->pdev->dev,
+ "pm_runtime_get_sync() failed, rc %d\n", rc);
+ goto err_free_rings;
+ }
+
+ napi_enable(&lp->napi);
+ xemacps_init_hw(lp);
+
+ setup_timer(&(lp->gen_purpose_timer), xemacps_gen_purpose_timerhandler,
+ (unsigned long)lp);
+ lp->timerready = true;
+
+ netif_carrier_off(ndev);
+ rc = xemacps_mii_probe(ndev);
+ if (rc != 0) {
+ dev_err(&lp->pdev->dev,
+ "%s mii_probe fail.\n", lp->mii_bus->name);
+ rc = -ENXIO;
+ goto err_pm_put;
+ }
+
+ mod_timer(&(lp->gen_purpose_timer),
+ jiffies + msecs_to_jiffies(XEAMCPS_GEN_PURPOSE_TIMER_LOAD));
+ netif_start_queue(ndev);
+ tasklet_enable(&lp->tx_bdreclaim_tasklet);
+
+ return 0;
+
+err_pm_put:
+ napi_disable(&lp->napi);
+ xemacps_reset_hw(lp);
+ if (lp->timerready) {
+ del_timer_sync(&(lp->gen_purpose_timer));
+ lp->timerready = false;
+ }
+ pm_runtime_put(&lp->pdev->dev);
+err_free_rings:
+ xemacps_descriptor_free(lp);
+
+ return rc;
+}
+
+/**
+ * xemacps_close - disable a network interface
+ * @ndev: network interface device structure
+ * Return: 0
+ *
+ * The close entry point is called when a network interface is de-activated
+ * by OS. The hardware is still under the driver control, but needs to be
+ * disabled. A global MAC reset is issued to stop the hardware, and all
+ * transmit and receive resources are freed.
+ */
+static int xemacps_close(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ if (lp->timerready)
+ del_timer_sync(&(lp->gen_purpose_timer));
+ netif_stop_queue(ndev);
+ napi_disable(&lp->napi);
+ tasklet_disable(&lp->tx_bdreclaim_tasklet);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ if (lp->gmii2rgmii_phy_dev)
+ phy_disconnect(lp->gmii2rgmii_phy_dev);
+ netif_carrier_off(ndev);
+
+ xemacps_reset_hw(lp);
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ xemacps_ptp_close(lp);
+#endif
+ mdelay(500);
+ xemacps_descriptor_free(lp);
+
+ pm_runtime_put(&lp->pdev->dev);
+
+ return 0;
+}
+
+/**
+ * xemacps_reinit_for_txtimeout - work queue scheduled for the tx timeout
+ * handling.
+ * @data: queue work structure
+ */
+static void xemacps_reinit_for_txtimeout(struct work_struct *data)
+{
+ struct net_local *lp = container_of(data, struct net_local,
+ txtimeout_reinit);
+ int rc;
+
+ netif_stop_queue(lp->ndev);
+ napi_disable(&lp->napi);
+ tasklet_disable(&lp->tx_bdreclaim_tasklet);
+ spin_lock_bh(&lp->tx_lock);
+ xemacps_reset_hw(lp);
+ spin_unlock_bh(&lp->tx_lock);
+
+ if (lp->phy_dev)
+ phy_stop(lp->phy_dev);
+
+ xemacps_descriptor_free(lp);
+ rc = xemacps_descriptor_init(lp);
+ if (rc) {
+ dev_err(&lp->pdev->dev,
+ "Unable to allocate DMA memory, rc %d\n", rc);
+ return;
+ }
+
+ xemacps_init_hw(lp);
+
+ lp->link = 0;
+ lp->speed = 0;
+ lp->duplex = -1;
+
+ if (lp->phy_dev)
+ phy_start(lp->phy_dev);
+
+ napi_enable(&lp->napi);
+ tasklet_enable(&lp->tx_bdreclaim_tasklet);
- ndev->trans_start = jiffies;
++ netif_trans_update(lp->ndev);
+ netif_wake_queue(lp->ndev);
+}
+
+/**
+ * xemacps_tx_timeout - callback used when the transmitter has not made
+ * any progress for dev->watchdog ticks.
+ * @ndev: network interface device structure
+ */
+static void xemacps_tx_timeout(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ dev_err(&lp->pdev->dev, "transmit timeout %lu ms, reseting...\n",
+ TX_TIMEOUT * 1000UL / HZ);
+ queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
+}
+
+/**
+ * xemacps_clear_csum - Clear the csum field for transport protocols
+ * @skb: socket buffer
+ * @ndev: network interface device structure
+ * Return: 0 on success, other value if error
+ */
+static int xemacps_clear_csum(struct sk_buff *skb, struct net_device *ndev)
+{
+ /* Only run for packets requiring a checksum. */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (unlikely(skb_cow_head(skb, 0)))
+ return -1;
+
+ *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
+
+ return 0;
+}
+
+/**
+ * unwind_tx_frag_mapping - unwind the tx fragment mapping
+ * @lp: driver control structure
+ * @fragcnt: fragment count
+ */
+static void unwind_tx_frag_mapping(struct net_local *lp, int fragcnt)
+{
+ struct xemacps_bd *cur_p;
+
+ for (; fragcnt > 0; fragcnt--) {
+ cur_p = &lp->tx_bd[lp->tx_bd_freecnt];
+ dma_unmap_single(&lp->pdev->dev, cur_p->addr,
+ (cur_p->ctrl & XEMACPS_TXBUF_LEN_MASK),
+ DMA_TO_DEVICE);
+ cur_p->ctrl |= XEMACPS_TXBUF_USED_MASK;
+ if (lp->tx_bd_freecnt)
+ lp->tx_bd_freecnt--;
+ else
+ lp->tx_bd_freecnt = XEMACPS_SEND_BD_CNT - 1;
+ }
+}
+
+/**
+ * xemacps_start_xmit - transmit a packet (called by kernel)
+ * @skb: socket buffer
+ * @ndev: network interface device structure
+ * Return: 0 on success, other value if error
+ */
+static int xemacps_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ dma_addr_t mapping;
+ unsigned int nr_frags, len;
+ int i;
+ u32 regval;
+ void *virt_addr;
+ skb_frag_t *frag;
+ struct xemacps_bd *cur_p;
+ unsigned long flags;
+ u32 bd_tail;
+
+ nr_frags = skb_shinfo(skb)->nr_frags + 1;
+ if (nr_frags > lp->tx_bd_freecnt) {
+ netif_stop_queue(ndev); /* stop send queue */
+ return NETDEV_TX_BUSY;
+ }
+
+ if (xemacps_clear_csum(skb, ndev)) {
+ kfree(skb);
+ return NETDEV_TX_OK;
+ }
+
+ bd_tail = lp->tx_bd_tail;
+ cur_p = &lp->tx_bd[bd_tail];
+ frag = &skb_shinfo(skb)->frags[0];
+
+ for (i = 0; i < nr_frags; i++) {
+ if (i == 0) {
+ len = skb_headlen(skb);
+ mapping = dma_map_single(&lp->pdev->dev, skb->data,
+ len, DMA_TO_DEVICE);
+ } else {
+ len = skb_frag_size(frag);
+ virt_addr = skb_frag_address(frag);
+ mapping = dma_map_single(&lp->pdev->dev, virt_addr,
+ len, DMA_TO_DEVICE);
+ frag++;
+ skb_get(skb);
+ }
+
+ if (dma_mapping_error(&lp->pdev->dev, mapping)) {
+ if (i)
+ unwind_tx_frag_mapping(lp, i);
+ goto dma_err;
+ }
+
+ lp->tx_skb[lp->tx_bd_tail].skb = skb;
+ lp->tx_skb[lp->tx_bd_tail].mapping = mapping;
+ lp->tx_skb[lp->tx_bd_tail].len = len;
+ cur_p->addr = mapping;
+
+ /* preserve critical status bits */
+ regval = cur_p->ctrl;
+ regval &= (XEMACPS_TXBUF_USED_MASK | XEMACPS_TXBUF_WRAP_MASK);
+ /* update length field */
+ regval |= ((regval & ~XEMACPS_TXBUF_LEN_MASK) | len);
+ /* commit second to last buffer to hardware */
+ if (i != 0)
+ regval &= ~XEMACPS_TXBUF_USED_MASK;
+ /* last fragment of this packet? */
+ if (i == (nr_frags - 1))
+ regval |= XEMACPS_TXBUF_LAST_MASK;
+ cur_p->ctrl = regval;
+
+ lp->tx_bd_tail++;
+ lp->tx_bd_tail = lp->tx_bd_tail % XEMACPS_SEND_BD_CNT;
+ cur_p = &(lp->tx_bd[lp->tx_bd_tail]);
+ }
+
+ /* commit first buffer to hardware -- do this after
+ * committing the other buffers to avoid an underrun */
+ cur_p = &lp->tx_bd[bd_tail];
+ regval = cur_p->ctrl;
+ regval &= ~XEMACPS_TXBUF_USED_MASK;
+ cur_p->ctrl = regval;
+
+ spin_lock_bh(&lp->tx_lock);
+ lp->tx_bd_freecnt -= nr_frags;
+ spin_unlock_bh(&lp->tx_lock);
+
+ spin_lock_irqsave(&lp->nwctrlreg_lock, flags);
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWCTRL_OFFSET);
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET,
+ (regval | XEMACPS_NWCTRL_STARTTX_MASK));
+ spin_unlock_irqrestore(&lp->nwctrlreg_lock, flags);
+
++ netif_trans_update(ndev);
+ return 0;
+
+dma_err:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Get the MAC Address bit from the specified position
+ */
+static unsigned get_bit(u8 *mac, unsigned bit)
+{
+ unsigned byte;
+
+ byte = mac[bit / 8];
+ byte >>= (bit & 0x7);
+ byte &= 1;
+
+ return byte;
+}
+
+/*
+ * Calculate a GEM MAC Address hash index
+ */
+static unsigned calc_mac_hash(u8 *mac)
+{
+ int index_bit, mac_bit;
+ unsigned hash_index;
+
+ hash_index = 0;
+ mac_bit = 5;
+ for (index_bit = 5; index_bit >= 0; index_bit--) {
+ hash_index |= (get_bit(mac, mac_bit) ^
+ get_bit(mac, mac_bit + 6) ^
+ get_bit(mac, mac_bit + 12) ^
+ get_bit(mac, mac_bit + 18) ^
+ get_bit(mac, mac_bit + 24) ^
+ get_bit(mac, mac_bit + 30) ^
+ get_bit(mac, mac_bit + 36) ^
+ get_bit(mac, mac_bit + 42))
+ << index_bit;
+ mac_bit--;
+ }
+
+ return hash_index;
+}
+
+/**
+ * xemacps_set_hashtable - Add multicast addresses to the internal
+ * multicast-hash table. Called from xemac_set_rx_mode().
+ * @ndev: network interface device structure
+ *
+ * The hash address register is 64 bits long and takes up two
+ * locations in the memory map. The least significant bits are stored
+ * in EMAC_HSL and the most significant bits in EMAC_HSH.
+ *
+ * The unicast hash enable and the multicast hash enable bits in the
+ * network configuration register enable the reception of hash matched
+ * frames. The destination address is reduced to a 6 bit index into
+ * the 64 bit hash register using the following hash function. The
+ * hash function is an exclusive or of every sixth bit of the
+ * destination address.
+ *
+ * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
+ * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
+ * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
+ * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
+ * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
+ * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
+ *
+ * da[0] represents the least significant bit of the first byte
+ * received, that is, the multicast/unicast indicator, and da[47]
+ * represents the most significant bit of the last byte received. If
+ * the hash index, hi[n], points to a bit that is set in the hash
+ * register then the frame will be matched according to whether the
+ * frame is multicast or unicast. A multicast match will be signalled
+ * if the multicast hash enable bit is set, da[0] is 1 and the hash
+ * index points to a bit set in the hash register. A unicast match
+ * will be signalled if the unicast hash enable bit is set, da[0] is 0
+ * and the hash index points to a bit set in the hash register. To
+ * receive all multicast frames, the hash register should be set with
+ * all ones and the multicast hash enable bit should be set in the
+ * network configuration register.
+ */
+static void xemacps_set_hashtable(struct net_device *ndev)
+{
+ struct netdev_hw_addr *curr;
+ u32 regvalh, regvall, hash_index;
+ u8 *mc_addr;
+ struct net_local *lp;
+
+ lp = netdev_priv(ndev);
+
+ regvalh = regvall = 0;
+
+ netdev_for_each_mc_addr(curr, ndev) {
+ if (!curr) /* end of list */
+ break;
+ mc_addr = curr->addr;
+ hash_index = calc_mac_hash(mc_addr);
+
+ if (hash_index >= XEMACPS_MAX_HASH_BITS) {
+ dev_err(&lp->pdev->dev,
+ "hash calculation out of range %d\n",
+ hash_index);
+ break;
+ }
+ if (hash_index < 32)
+ regvall |= (1 << hash_index);
+ else
+ regvalh |= (1 << (hash_index - 32));
+ }
+
+ xemacps_write(lp->baseaddr, XEMACPS_HASHL_OFFSET, regvall);
+ xemacps_write(lp->baseaddr, XEMACPS_HASHH_OFFSET, regvalh);
+}
+
+/**
+ * xemacps_set_rx_mode - enable/disable promiscuous and multicast modes
+ * @ndev: network interface device structure
+ */
+static void xemacps_set_rx_mode(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWCFG_OFFSET);
+
+ /* promisc mode */
+ if (ndev->flags & IFF_PROMISC)
+ regval |= XEMACPS_NWCFG_COPYALLEN_MASK;
+ if (!(ndev->flags & IFF_PROMISC))
+ regval &= ~XEMACPS_NWCFG_COPYALLEN_MASK;
+
+ /* All multicast mode */
+ if (ndev->flags & IFF_ALLMULTI) {
+ regval |= XEMACPS_NWCFG_MCASTHASHEN_MASK;
+ xemacps_write(lp->baseaddr, XEMACPS_HASHL_OFFSET, ~0UL);
+ xemacps_write(lp->baseaddr, XEMACPS_HASHH_OFFSET, ~0UL);
+ /* Specific multicast mode */
+ } else if ((ndev->flags & IFF_MULTICAST)
+ && (netdev_mc_count(ndev) > 0)) {
+ regval |= XEMACPS_NWCFG_MCASTHASHEN_MASK;
+ xemacps_set_hashtable(ndev);
+ /* Disable multicast mode */
+ } else {
+ xemacps_write(lp->baseaddr, XEMACPS_HASHL_OFFSET, 0x0);
+ xemacps_write(lp->baseaddr, XEMACPS_HASHH_OFFSET, 0x0);
+ regval &= ~XEMACPS_NWCFG_MCASTHASHEN_MASK;
+ }
+
+ /* broadcast mode */
+ if (ndev->flags & IFF_BROADCAST)
+ regval &= ~XEMACPS_NWCFG_BCASTDI_MASK;
+ /* No broadcast */
+ if (!(ndev->flags & IFF_BROADCAST))
+ regval |= XEMACPS_NWCFG_BCASTDI_MASK;
+
+ xemacps_write(lp->baseaddr, XEMACPS_NWCFG_OFFSET, regval);
+}
+
+/**
+ * xemacps_get_settings - get device specific settings.
+ * Usage: Issue "ethtool ethX" under linux prompt.
+ * @ndev: network device
+ * @ecmd: ethtool command structure
+ * Return: 0 on success, negative value if error.
+ */
+static int
+xemacps_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, ecmd);
+}
+
+/**
+ * xemacps_set_settings - set device specific settings.
+ * Usage: Issue "ethtool -s ethX speed 1000" under linux prompt
+ * to change speed
+ * @ndev: network device
+ * @ecmd: ethtool command structure
+ * Return: 0 on success, negative value if error.
+ */
+static int
+xemacps_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, ecmd);
+}
+
+/**
+ * xemacps_get_drvinfo - report driver information
+ * Usage: Issue "ethtool -i ethX" under linux prompt
+ * @ndev: network device
+ * @ed: device driver information structure
+ */
+static void
+xemacps_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ memset(ed, 0, sizeof(struct ethtool_drvinfo));
+ strcpy(ed->driver, lp->pdev->dev.driver->name);
+ strcpy(ed->version, DRIVER_VERSION);
+}
+
+/**
+ * xemacps_get_ringparam - get device dma ring information.
+ * Usage: Issue "ethtool -g ethX" under linux prompt
+ * @ndev: network device
+ * @erp: ethtool ring parameter structure
+ */
+static void
+xemacps_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *erp)
+{
+ memset(erp, 0, sizeof(struct ethtool_ringparam));
+
+ erp->rx_max_pending = XEMACPS_RECV_BD_CNT;
+ erp->tx_max_pending = XEMACPS_SEND_BD_CNT;
+ erp->rx_pending = 0;
+ erp->tx_pending = 0;
+}
+
+/**
+ * xemacps_get_wol - get device wake on lan status
+ * Usage: Issue "ethtool ethX" under linux prompt
+ * @ndev: network device
+ * @ewol: wol status
+ */
+static void
+xemacps_get_wol(struct net_device *ndev, struct ethtool_wolinfo *ewol)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+
+ ewol->supported = WAKE_MAGIC | WAKE_ARP | WAKE_UCAST | WAKE_MCAST;
+
+ regval = xemacps_read(lp->baseaddr, XEMACPS_WOL_OFFSET);
+ if (regval & XEMACPS_WOL_MCAST_MASK)
+ ewol->wolopts |= WAKE_MCAST;
+ if (regval & XEMACPS_WOL_ARP_MASK)
+ ewol->wolopts |= WAKE_ARP;
+ if (regval & XEMACPS_WOL_SPEREG1_MASK)
+ ewol->wolopts |= WAKE_UCAST;
+ if (regval & XEMACPS_WOL_MAGIC_MASK)
+ ewol->wolopts |= WAKE_MAGIC;
+}
+
+/**
+ * xemacps_set_wol - set device wake on lan configuration
+ * Usage: Issue "ethtool -s ethX wol u|m|b|g" under linux prompt to enable
+ * specified type of packet.
+ * Issue "ethtool -s ethX wol d" under linux prompt to disable this feature.
+ * @ndev: network device
+ * @ewol: wol status
+ * Return: 0 on success, negative value if not supported
+ */
+static int
+xemacps_set_wol(struct net_device *ndev, struct ethtool_wolinfo *ewol)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+
+ if (ewol->wolopts & ~(WAKE_MAGIC | WAKE_ARP | WAKE_UCAST | WAKE_MCAST))
+ return -EOPNOTSUPP;
+
+ regval = xemacps_read(lp->baseaddr, XEMACPS_WOL_OFFSET);
+ regval &= ~(XEMACPS_WOL_MCAST_MASK | XEMACPS_WOL_ARP_MASK |
+ XEMACPS_WOL_SPEREG1_MASK | XEMACPS_WOL_MAGIC_MASK);
+
+ if (ewol->wolopts & WAKE_MAGIC)
+ regval |= XEMACPS_WOL_MAGIC_MASK;
+ if (ewol->wolopts & WAKE_ARP)
+ regval |= XEMACPS_WOL_ARP_MASK;
+ if (ewol->wolopts & WAKE_UCAST)
+ regval |= XEMACPS_WOL_SPEREG1_MASK;
+ if (ewol->wolopts & WAKE_MCAST)
+ regval |= XEMACPS_WOL_MCAST_MASK;
+
+ xemacps_write(lp->baseaddr, XEMACPS_WOL_OFFSET, regval);
+
+ return 0;
+}
+
+/**
+ * xemacps_get_pauseparam - get device pause status
+ * Usage: Issue "ethtool -a ethX" under linux prompt
+ * @ndev: network device
+ * @epauseparm: pause parameter
+ *
+ * note: hardware supports only tx flow control
+ */
+static void
+xemacps_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *epauseparm)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+
+ epauseparm->autoneg = 0;
+ epauseparm->rx_pause = 0;
+
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWCFG_OFFSET);
+ epauseparm->tx_pause = regval & XEMACPS_NWCFG_PAUSEEN_MASK;
+}
+
+/**
+ * xemacps_set_pauseparam - set device pause parameter(flow control)
+ * Usage: Issue "ethtool -A ethX tx on|off" under linux prompt
+ * @ndev: network device
+ * @epauseparm: pause parameter
+ * Return: 0 on success, negative value if not supported
+ *
+ * note: hardware supports only tx flow control
+ */
+static int
+xemacps_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *epauseparm)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+
+ if (netif_running(ndev)) {
+ dev_err(&lp->pdev->dev,
+ "Please stop netif before apply configruation\n");
+ return -EFAULT;
+ }
+
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWCFG_OFFSET);
+
+ if (epauseparm->tx_pause)
+ regval |= XEMACPS_NWCFG_PAUSEEN_MASK;
+ if (!(epauseparm->tx_pause))
+ regval &= ~XEMACPS_NWCFG_PAUSEEN_MASK;
+
+ xemacps_write(lp->baseaddr, XEMACPS_NWCFG_OFFSET, regval);
+ return 0;
+}
+
+/**
+ * xemacps_get_stats - get device statistic raw data in 64bit mode
+ * @ndev: network device
+ * Return: Pointer to network device statistic
+ */
+static struct net_device_stats
+*xemacps_get_stats(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct net_device_stats *nstat = &lp->stats;
+
+ xemacps_update_stats((unsigned long)lp);
+ return nstat;
+}
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+/**
+ * xemacps_get_ts_info - Get the interface timestamp capabilities
+ * @dev: Network device
+ * @info: Holds the interface timestamp capability info
+ * Return: Always return zero
+ */
+static int xemacps_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = lp->phc_index;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+#endif
+
+static struct ethtool_ops xemacps_ethtool_ops = {
+ .get_settings = xemacps_get_settings,
+ .set_settings = xemacps_set_settings,
+ .get_drvinfo = xemacps_get_drvinfo,
+ .get_link = ethtool_op_get_link, /* ethtool default */
+ .get_ringparam = xemacps_get_ringparam,
+ .get_wol = xemacps_get_wol,
+ .set_wol = xemacps_set_wol,
+ .get_pauseparam = xemacps_get_pauseparam,
+ .set_pauseparam = xemacps_set_pauseparam,
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ .get_ts_info = xemacps_get_ts_info,
+#endif
+};
+
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+static int xemacps_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config config;
+ struct net_local *lp;
+ u32 regval;
+
+ lp = netdev_priv(netdev);
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ if ((config.tx_type != HWTSTAMP_TX_OFF) &&
+ (config.tx_type != HWTSTAMP_TX_ON))
+ return -ERANGE;
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ regval = xemacps_read(lp->baseaddr, XEMACPS_NWCTRL_OFFSET);
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET,
+ (regval | XEMACPS_NWCTRL_RXTSTAMP_MASK));
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ config.tx_type = HWTSTAMP_TX_ON;
+ lp->hwtstamp_config = config;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+#endif /* CONFIG_XILINX_PS_EMAC_HWTSTAMP */
+
+/**
+ * xemacps_ioctl - ioctl entry point
+ * @ndev: network device
+ * @rq: interface request ioctl
+ * @cmd: command code
+ *
+ * Called when user issues an ioctl request to the network device.
+ *
+ * Return: 0 on success, negative value if error
+ */
+static int xemacps_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return phy_mii_ioctl(phydev, rq, cmd);
+#ifdef CONFIG_XILINX_PS_EMAC_HWTSTAMP
+ case SIOCSHWTSTAMP:
+ return xemacps_hwtstamp_ioctl(ndev, rq, cmd);
+#endif
+ default:
+ dev_info(&lp->pdev->dev, "ioctl %d not implemented.\n", cmd);
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * xemacps_probe - Platform driver probe
+ * @pdev: Pointer to platform device structure
+ *
+ * Return: 0 on success, negative value if error
+ */
+static int xemacps_probe(struct platform_device *pdev)
+{
+ struct resource *r_mem = NULL;
+ struct resource *r_irq = NULL;
+ struct net_device *ndev;
+ struct net_local *lp;
+ u32 regval = 0;
+ int rc = -ENXIO;
+ const u8 *mac_address;
+
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r_mem || !r_irq) {
+ dev_err(&pdev->dev, "no IO resource defined.\n");
+ return -ENXIO;
+ }
+
+ ndev = alloc_etherdev(sizeof(*lp));
+ if (!ndev) {
+ dev_err(&pdev->dev, "etherdev allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ lp = netdev_priv(ndev);
+ lp->pdev = pdev;
+ lp->ndev = ndev;
+
+ spin_lock_init(&lp->tx_lock);
+ spin_lock_init(&lp->rx_lock);
+ spin_lock_init(&lp->nwctrlreg_lock);
+ spin_lock_init(&lp->mdio_lock);
+
+ lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->baseaddr)) {
+ rc = PTR_ERR(lp->baseaddr);
+ goto err_out_free_netdev;
+ }
+
+ dev_dbg(&lp->pdev->dev, "BASEADDRESS hw: %p virt: %p\n",
+ (void *)r_mem->start, lp->baseaddr);
+
+ ndev->irq = platform_get_irq(pdev, 0);
+
+ ndev->netdev_ops = &netdev_ops;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->ethtool_ops = &xemacps_ethtool_ops;
+ ndev->base_addr = r_mem->start;
+ ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG;
+ netif_napi_add(ndev, &lp->napi, xemacps_rx_poll, XEMACPS_NAPI_WEIGHT);
+
+ lp->ip_summed = CHECKSUM_UNNECESSARY;
+
+ rc = register_netdev(ndev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_free_netdev;
+ }
+
+ if (ndev->irq == 54)
+ lp->enetnum = 0;
+ else
+ lp->enetnum = 1;
+
+ lp->aperclk = devm_clk_get(&pdev->dev, "aper_clk");
+ if (IS_ERR(lp->aperclk)) {
+ dev_err(&pdev->dev, "aper_clk clock not found.\n");
+ rc = PTR_ERR(lp->aperclk);
+ goto err_out_unregister_netdev;
+ }
+ lp->devclk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(lp->devclk)) {
+ dev_err(&pdev->dev, "ref_clk clock not found.\n");
+ rc = PTR_ERR(lp->devclk);
+ goto err_out_unregister_netdev;
+ }
+
+ rc = clk_prepare_enable(lp->aperclk);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to enable APER clock.\n");
+ goto err_out_unregister_netdev;
+ }
+ rc = clk_prepare_enable(lp->devclk);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to enable device clock.\n");
+ goto err_out_clk_dis_aper;
+ }
+
+ rc = of_property_read_u32(lp->pdev->dev.of_node, "xlnx,has-mdio",
+ &lp->has_mdio);
+ lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
+ "phy-handle", 0);
+ lp->gmii2rgmii_phy_node = of_parse_phandle(lp->pdev->dev.of_node,
+ "gmii2rgmii-phy-handle", 0);
+ rc = of_get_phy_mode(lp->pdev->dev.of_node);
+ if (rc < 0) {
+ dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
+ goto err_out_clk_dis_all;
+ }
+
+ lp->phy_interface = rc;
+
+ /* Set MDIO clock divider */
+ regval = (MDC_DIV_224 << XEMACPS_NWCFG_MDC_SHIFT_MASK);
+ xemacps_write(lp->baseaddr, XEMACPS_NWCFG_OFFSET, regval);
+
+ regval = XEMACPS_NWCTRL_MDEN_MASK;
+ xemacps_write(lp->baseaddr, XEMACPS_NWCTRL_OFFSET, regval);
+
+ rc = xemacps_mii_init(lp);
+ if (rc) {
+ dev_err(&lp->pdev->dev, "error in xemacps_mii_init\n");
+ goto err_out_clk_dis_all;
+ }
+
+ mac_address = of_get_mac_address(lp->pdev->dev.of_node);
+ if (mac_address) {
+ ether_addr_copy(lp->ndev->dev_addr, mac_address);
+ xemacps_set_hwaddr(lp);
+ } else {
+ xemacps_update_hwaddr(lp);
+ }
+
+ tasklet_init(&lp->tx_bdreclaim_tasklet, xemacps_tx_poll,
+ (unsigned long) ndev);
+ tasklet_disable(&lp->tx_bdreclaim_tasklet);
+
+ lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME);
+ INIT_WORK(&lp->txtimeout_reinit, xemacps_reinit_for_txtimeout);
+
+ platform_set_drvdata(pdev, ndev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
+ pdev->id, ndev->base_addr, ndev->irq);
+
+ rc = devm_request_irq(&pdev->dev, ndev->irq, &xemacps_interrupt, 0,
+ ndev->name, ndev);
+ if (rc) {
+ dev_err(&lp->pdev->dev, "Unable to request IRQ %p, error %d\n",
+ r_irq, rc);
+ goto err_out_clk_dis_all;
+ }
+
+ return 0;
+
+err_out_clk_dis_all:
+ clk_disable_unprepare(lp->devclk);
+err_out_clk_dis_aper:
+ clk_disable_unprepare(lp->aperclk);
+err_out_unregister_netdev:
+ unregister_netdev(ndev);
+err_out_free_netdev:
+ free_netdev(ndev);
+
+ return rc;
+}
+
+/**
+ * xemacps_remove - called when platform driver is unregistered
+ * @pdev: Pointer to the platform device structure
+ *
+ * Return: 0 on success
+ */
+static int xemacps_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp;
+
+ if (ndev) {
+ lp = netdev_priv(ndev);
+
+ if (lp->has_mdio) {
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ }
+ unregister_netdev(ndev);
+
+ if (!pm_runtime_suspended(&pdev->dev)) {
+ clk_disable_unprepare(lp->devclk);
+ clk_disable_unprepare(lp->aperclk);
+ } else {
+ clk_unprepare(lp->devclk);
+ clk_unprepare(lp->aperclk);
+ }
+
+ free_netdev(ndev);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/**
+ * xemacps_suspend - Suspend event
+ * @device: Pointer to device structure
+ *
+ * Return: 0
+ */
+static int __maybe_unused xemacps_suspend(struct device *device)
+{
+ struct platform_device *pdev = container_of(device,
+ struct platform_device, dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp = netdev_priv(ndev);
+
+ netif_device_detach(ndev);
+ if (!pm_runtime_suspended(device)) {
+ clk_disable(lp->devclk);
+ clk_disable(lp->aperclk);
+ }
+ return 0;
+}
+
+/**
+ * xemacps_resume - Resume after previous suspend
+ * @device: Pointer to device structure
+ *
+ * Return: 0 on success, errno otherwise.
+ */
+static int __maybe_unused xemacps_resume(struct device *device)
+{
+ struct platform_device *pdev = container_of(device,
+ struct platform_device, dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp = netdev_priv(ndev);
+
+ if (!pm_runtime_suspended(device)) {
+ int ret;
+
+ ret = clk_enable(lp->aperclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(lp->devclk);
+ if (ret) {
+ clk_disable(lp->aperclk);
+ return ret;
+ }
+ }
+ netif_device_attach(ndev);
+ return 0;
+}
+
+static int __maybe_unused xemacps_runtime_idle(struct device *dev)
+{
+ return pm_schedule_suspend(dev, 1);
+}
+
+static int __maybe_unused xemacps_runtime_resume(struct device *device)
+{
+ int ret;
+ struct platform_device *pdev = container_of(device,
+ struct platform_device, dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp = netdev_priv(ndev);
+
+ ret = clk_enable(lp->aperclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(lp->devclk);
+ if (ret) {
+ clk_disable(lp->aperclk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused xemacps_runtime_suspend(struct device *device)
+{
+ struct platform_device *pdev = container_of(device,
+ struct platform_device, dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp = netdev_priv(ndev);
+
+ clk_disable(lp->devclk);
+ clk_disable(lp->aperclk);
+ return 0;
+}
+
+static const struct dev_pm_ops xemacps_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xemacps_suspend, xemacps_resume)
+ SET_RUNTIME_PM_OPS(xemacps_runtime_suspend, xemacps_runtime_resume,
+ xemacps_runtime_idle)
+};
+#define XEMACPS_PM (&xemacps_dev_pm_ops)
+#else /* ! CONFIG_PM */
+#define XEMACPS_PM NULL
+#endif /* ! CONFIG_PM */
+
+static struct net_device_ops netdev_ops = {
+ .ndo_open = xemacps_open,
+ .ndo_stop = xemacps_close,
+ .ndo_start_xmit = xemacps_start_xmit,
+ .ndo_set_rx_mode = xemacps_set_rx_mode,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_do_ioctl = xemacps_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_tx_timeout = xemacps_tx_timeout,
+ .ndo_get_stats = xemacps_get_stats,
+};
+
+static struct of_device_id xemacps_of_match[] = {
+ { .compatible = "xlnx,ps7-ethernet-1.00.a", },
+ { /* end of table */}
+};
+MODULE_DEVICE_TABLE(of, xemacps_of_match);
+
+static struct platform_driver xemacps_driver = {
+ .probe = xemacps_probe,
+ .remove = xemacps_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xemacps_of_match,
+ .pm = XEMACPS_PM,
+ },
+};
+
+module_platform_driver(xemacps_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx Ethernet driver");
+MODULE_LICENSE("GPL v2");
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+ #define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
+#define DP83867_MDI_CROSSOVER 5
+#define DP83867_MDI_CROSSOVER_AUTO 0b10
+#define DP83867_MDI_CROSSOVER_MDIX 0b01
+#define DP83867_PHYCTRL_SGMIIEN 0x0800
+#define DP83867_PHYCTRL_RXFIFO_SHIFT 12
+#define DP83867_PHYCTRL_TXFIFO_SHIFT 14
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
}
if (phy_interface_is_rgmii(phydev)) {
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL,
+ (DP83867_MDI_CROSSOVER_AUTO << DP83867_MDI_CROSSOVER) |
+ (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+ if (ret)
+ return ret;
++
+ val = phy_read(phydev, MII_DP83867_PHYCTRL);
+ if (val < 0)
+ return val;
+ val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
+ val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
+ if (ret)
+ return ret;
+ } else {
+ phy_write(phydev, MII_BMCR,
+ (BMCR_ANENABLE | BMCR_FULLDPLX | BMCR_SPEED1000));
+
+ cfg2 = phy_read(phydev, MII_DP83867_CFG2);
+ cfg2 &= MII_DP83867_CFG2_MASK;
+ cfg2 |= (MII_DP83867_CFG2_SPEEDOPT_10EN |
+ MII_DP83867_CFG2_SGMII_AUTONEGEN |
+ MII_DP83867_CFG2_SPEEDOPT_ENH |
+ MII_DP83867_CFG2_SPEEDOPT_CNT |
+ MII_DP83867_CFG2_SPEEDOPT_INTLOW);
+ phy_write(phydev, MII_DP83867_CFG2, cfg2);
+
+ phy_write_mmd_indirect(phydev, DP83867_RGMIICTL,
+ DP83867_DEVADDR, 0x0);
+
+ phy_write(phydev, MII_DP83867_PHYCTRL,
+ DP83867_PHYCTRL_SGMIIEN |
+ (DP83867_MDI_CROSSOVER_MDIX << DP83867_MDI_CROSSOVER) |
+ (dp83867->fifo_depth << DP83867_PHYCTRL_RXFIFO_SHIFT) |
+ (dp83867->fifo_depth << DP83867_PHYCTRL_TXFIFO_SHIFT));
+ phy_write(phydev, MII_DP83867_BISCR, 0x0);
}
if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
Enable this to support the Broadcom Cygnus PCIe PHY.
If unsure, say N.
+config PHY_XILINX_ZYNQMP
+ tristate "Xilinx ZynqMP PHY driver"
+ depends on ARCH_ZYNQMP
+ select GENERIC_PHY
+ help
+ Enable this to support ZynqMP High Speed Gigabit Transceiver
+ that is part of ZynqMP SoC.
+
+ source "drivers/phy/tegra/Kconfig"
+
endmenu
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-20nm.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o
obj-$(CONFIG_PHY_TUSB1210) += phy-tusb1210.o
- obj-$(CONFIG_PHY_BRCMSTB_SATA) += phy-brcmstb-sata.o
+ obj-$(CONFIG_PHY_BRCM_SATA) += phy-brcm-sata.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
+obj-$(CONFIG_PHY_XILINX_ZYNQMP) += phy-zynqmp.o
+
+ obj-$(CONFIG_ARCH_TEGRA) += tegra/
If unsure, say no.
+ config RESET_OXNAS
+ bool
+
+if RESET_CONTROLLER
+
+config ZYNQMP_RESET_CONTROLLER
+ bool "Xilinx ZYNQMP Reset Controller Support"
+ help
+ ZYNQMP Reset Controller support.
+
+ This framework is designed to abstract reset handling of devices
+ via SoC-internal reset controller modules.
+
+ If sure, say yes.If unsure, say no.
+
+endif
+
source "drivers/reset/sti/Kconfig"
source "drivers/reset/hisilicon/Kconfig"
obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o
obj-$(CONFIG_ATH79) += reset-ath79.o
+ obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
+obj-$(CONFIG_ZYNQMP_RESET_CONTROLLER) += reset-zynqmp.o
};
#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
-#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
+#define MTD_MAX_ECCPOS_ENTRIES_LARGE 1260
++
+ /**
+ * struct mtd_oob_region - oob region definition
+ * @offset: region offset
+ * @length: region length
+ *
+ * This structure describes a region of the OOB area, and is used
+ * to retrieve ECC or free bytes sections.
+ * Each section is defined by an offset within the OOB area and a
+ * length.
+ */
+ struct mtd_oob_region {
+ u32 offset;
+ u32 length;
+ };
+
/*
- * Internal ECC layout control structure. For historical reasons, there is a
- * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
- * for export to user-space via the ECCGETLAYOUT ioctl.
- * nand_ecclayout should be expandable in the future simply by the above macros.
+ * struct mtd_ooblayout_ops - NAND OOB layout operations
+ * @ecc: function returning an ECC region in the OOB area.
+ * Should return -ERANGE if %section exceeds the total number of
+ * ECC sections.
+ * @free: function returning a free region in the OOB area.
+ * Should return -ERANGE if %section exceeds the total number of
+ * free sections.
*/
- struct nand_ecclayout {
- __u32 eccbytes;
- __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
- struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
+ struct mtd_ooblayout_ops {
+ int (*ecc)(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc);
+ int (*free)(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree);
};
struct module; /* only needed for owner field in mtd_info */
* @link_validate: Return whether a link is valid from the entity point of
* view. The media_entity_pipeline_start() function
* validates all links by calling this operation. Optional.
+ * @has_route: Return whether a route exists inside the entity between
+ * two given pads. Optional. If the operation isn't
+ * implemented all pads will be considered as connected.
+ *
+ * Note: Those these callbacks are called with struct media_device.@graph_mutex
+ * mutex held.
*/
struct media_entity_operations {
int (*link_setup)(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags);
int (*link_validate)(struct media_link *link);
+ bool (*has_route)(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1);
};
+ /**
+ * enum media_entity_type - Media entity type
+ *
+ * @MEDIA_ENTITY_TYPE_BASE:
+ * The entity isn't embedded in another subsystem structure.
+ * @MEDIA_ENTITY_TYPE_VIDEO_DEVICE:
+ * The entity is embedded in a struct video_device instance.
+ * @MEDIA_ENTITY_TYPE_V4L2_SUBDEV:
+ * The entity is embedded in a struct v4l2_subdev instance.
+ *
+ * Media entity objects are often not instantiated directly, but the media
+ * entity structure is inherited by (through embedding) other subsystem-specific
+ * structures. The media entity type identifies the type of the subclass
+ * structure that implements a media entity instance.
+ *
+ * This allows runtime type identification of media entities and safe casting to
+ * the correct object type. For instance, a media entity structure instance
+ * embedded in a v4l2_subdev structure instance will have the type
+ * MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a v4l2_subdev
+ * structure using the container_of() macro.
+ */
+ enum media_entity_type {
+ MEDIA_ENTITY_TYPE_BASE,
+ MEDIA_ENTITY_TYPE_VIDEO_DEVICE,
+ MEDIA_ENTITY_TYPE_V4L2_SUBDEV,
+ };
+
/**
* struct media_entity - A media entity graph object.
*