4 * Copyright (C) 2016 - 2017 Xilinx, Inc.
7 * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
8 * IP. It exposes a char device interface in sysfs and supports file
9 * operations like open(), close() and ioctl().
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
25 #include <linux/cdev.h>
26 #include <linux/device.h>
29 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
33 #include <linux/of_platform.h>
34 #include <linux/platform_device.h>
35 #include <linux/poll.h>
36 #include <linux/slab.h>
37 #include <linux/uaccess.h>
39 #include <uapi/misc/xilinx_sdfec.h>
41 #define DRIVER_NAME "xilinx_sdfec"
42 #define DRIVER_VERSION "0.3"
43 #define DRIVER_MAX_DEV BIT(MINORBITS)
45 static struct class *xsdfec_class;
46 static atomic_t xsdfec_ndevs = ATOMIC_INIT(0);
47 static dev_t xsdfec_devt;
49 /* Xilinx SDFEC Register Map */
50 #define XSDFEC_AXI_WR_PROTECT_ADDR (0x00000)
51 #define XSDFEC_CODE_WR_PROTECT_ADDR (0x00004)
52 #define XSDFEC_ACTIVE_ADDR (0x00008)
53 #define XSDFEC_AXIS_WIDTH_ADDR (0x0000c)
54 #define XSDFEC_AXIS_ENABLE_ADDR (0x00010)
55 #define XSDFEC_AXIS_ENABLE_MASK (0x0003F)
56 #define XSDFEC_FEC_CODE_ADDR (0x00014)
57 #define XSDFEC_ORDER_ADDR (0x00018)
59 /* Interrupt Status Register Bit Mask*/
60 #define XSDFEC_ISR_MASK (0x0003F)
61 /* Interrupt Status Register */
62 #define XSDFEC_ISR_ADDR (0x0001c)
63 /* Write Only - Interrupt Enable Register */
64 #define XSDFEC_IER_ADDR (0x00020)
65 /* Write Only - Interrupt Disable Register */
66 #define XSDFEC_IDR_ADDR (0x00024)
67 /* Read Only - Interrupt Mask Register */
68 #define XSDFEC_IMR_ADDR (0x00028)
70 /* Single Bit Errors */
71 #define XSDFEC_ECC_ISR_SBE (0x7FF)
72 /* Multi Bit Errors */
73 #define XSDFEC_ECC_ISR_MBE (0x3FF800)
74 /* ECC Interrupt Status Bit Mask */
75 #define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE | XSDFEC_ECC_ISR_MBE)
77 /* Multi Bit Error Postion */
78 #define XSDFEC_ECC_MULTI_BIT_POS (11)
79 #define XSDFEC_ERROR_MAX_THRESHOLD (100)
81 /* ECC Interrupt Status Register */
82 #define XSDFEC_ECC_ISR_ADDR (0x0002c)
83 /* Write Only - ECC Interrupt Enable Register */
84 #define XSDFEC_ECC_IER_ADDR (0x00030)
85 /* Write Only - ECC Interrupt Disable Register */
86 #define XSDFEC_ECC_IDR_ADDR (0x00034)
87 /* Read Only - ECC Interrupt Mask Register */
88 #define XSDFEC_ECC_IMR_ADDR (0x00038)
90 #define XSDFEC_BYPASS_ADDR (0x0003c)
91 #define XSDFEC_TEST_EMA_ADDR_BASE (0x00080)
92 #define XSDFEC_TEST_EMA_ADDR_HIGH (0x00089)
93 #define XSDFEC_TURBO_ADDR (0x00100)
94 #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x02000)
95 #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x021fc)
96 #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x02004)
97 #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x02200)
98 #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x02008)
99 #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x02204)
100 #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x0200c)
101 #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x02208)
104 * struct xsdfec_dev - Driver data for SDFEC
105 * @regs: device physical base address
106 * @dev: pointer to device struct
107 * @state: State of the SDFEC device
108 * @config: Configuration of the SDFEC device
109 * @intr_enabled: indicates IRQ enabled
110 * @wr_protect: indicates Write Protect enabled
111 * @isr_err_count: Count of ISR errors
112 * @cecc_count: Count of Correctable ECC errors (SBE)
113 * @uecc_count: Count of Uncorrectable ECC errors (MBE)
114 * @reset_count: Count of Resets requested
115 * @open_count: Count of char device being opened
117 * @xsdfec_cdev: Character device handle
118 * @waitq: Driver wait queue
120 * This structure contains necessary state for SDFEC driver to operate
125 enum xsdfec_state state;
126 struct xsdfec_config config;
129 atomic_t isr_err_count;
132 atomic_t reset_count;
135 struct cdev xsdfec_cdev;
136 wait_queue_head_t waitq;
140 xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, u32 value)
142 if (xsdfec->wr_protect) {
143 dev_err(xsdfec->dev, "SDFEC in write protect");
148 "Writing 0x%x to offset 0x%x", value, addr);
149 iowrite32(value, xsdfec->regs + addr);
153 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
157 rval = ioread32(xsdfec->regs + addr);
159 "Read value = 0x%x from offset 0x%x",
164 #define XSDFEC_WRITE_PROTECT_ENABLE (1)
165 #define XSDFEC_WRITE_PROTECT_DISABLE (0)
167 xsdfec_wr_protect(struct xsdfec_dev *xsdfec, bool wr_pr)
170 xsdfec_regwrite(xsdfec,
171 XSDFEC_CODE_WR_PROTECT_ADDR,
172 XSDFEC_WRITE_PROTECT_ENABLE);
173 xsdfec_regwrite(xsdfec,
174 XSDFEC_AXI_WR_PROTECT_ADDR,
175 XSDFEC_WRITE_PROTECT_ENABLE);
177 xsdfec_regwrite(xsdfec,
178 XSDFEC_AXI_WR_PROTECT_ADDR,
179 XSDFEC_WRITE_PROTECT_DISABLE);
180 xsdfec_regwrite(xsdfec,
181 XSDFEC_CODE_WR_PROTECT_ADDR,
182 XSDFEC_WRITE_PROTECT_DISABLE);
184 xsdfec->wr_protect = wr_pr;
188 xsdfec_dev_open(struct inode *iptr, struct file *fptr)
190 struct xsdfec_dev *xsdfec;
192 xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
196 /* Only one open per device at a time */
197 if (!atomic_dec_and_test(&xsdfec->open_count)) {
198 atomic_inc(&xsdfec->open_count);
202 fptr->private_data = xsdfec;
207 xsdfec_dev_release(struct inode *iptr, struct file *fptr)
209 struct xsdfec_dev *xsdfec;
211 xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
215 atomic_inc(&xsdfec->open_count);
219 #define XSDFEC_IS_ACTIVITY_SET (0x1)
221 xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
223 struct xsdfec_status status;
226 status.fec_id = xsdfec->config.fec_id;
227 status.state = xsdfec->state;
229 (xsdfec_regread(xsdfec,
230 XSDFEC_ACTIVE_ADDR) &
231 XSDFEC_IS_ACTIVITY_SET);
233 err = copy_to_user(arg, &status, sizeof(status));
235 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
236 __func__, xsdfec->config.fec_id);
243 xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
247 err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
249 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
250 __func__, xsdfec->config.fec_id);
257 xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
263 xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR,
265 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
266 if (mask_read & XSDFEC_ISR_MASK) {
268 "SDFEC enabling irq with IER failed");
273 xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR,
275 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
276 if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
278 "SDFEC disabling irq with IDR failed");
286 xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
292 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
293 XSDFEC_ECC_ISR_MASK);
294 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
295 if (mask_read & XSDFEC_ECC_ISR_MASK) {
297 "SDFEC enabling ECC irq with ECC IER failed");
302 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
303 XSDFEC_ECC_ISR_MASK);
304 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
305 if ((mask_read & XSDFEC_ECC_ISR_MASK) != XSDFEC_ECC_ISR_MASK) {
307 "SDFEC disable ECC irq with ECC IDR failed");
315 xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
317 struct xsdfec_irq irq;
320 err = copy_from_user(&irq, arg, sizeof(irq));
322 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
323 __func__, xsdfec->config.fec_id);
327 /* Setup tlast related IRQ */
328 if (irq.enable_isr) {
329 err = xsdfec_isr_enable(xsdfec, true);
334 /* Setup ECC related IRQ */
335 if (irq.enable_ecc_isr) {
336 err = xsdfec_ecc_isr_enable(xsdfec, true);
344 #define XSDFEC_TURBO_SCALE_MASK (0xF)
345 #define XSDFEC_TURBO_SCALE_BIT_POS (8)
347 xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
349 struct xsdfec_turbo turbo;
353 err = copy_from_user(&turbo, arg, sizeof(turbo));
355 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
356 __func__, xsdfec->config.fec_id);
360 /* Check to see what device tree says about the FEC codes */
361 if (xsdfec->config.code == XSDFEC_LDPC_CODE) {
363 "%s: Unable to write Turbo to SDFEC%d check DT",
364 __func__, xsdfec->config.fec_id);
366 } else if (xsdfec->config.code == XSDFEC_CODE_INVALID) {
367 xsdfec->config.code = XSDFEC_TURBO_CODE;
370 if (xsdfec->wr_protect)
371 xsdfec_wr_protect(xsdfec, false);
373 turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK) <<
374 XSDFEC_TURBO_SCALE_BIT_POS) | turbo.alg;
375 xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
380 xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
383 struct xsdfec_turbo turbo_params;
386 if (xsdfec->config.code == XSDFEC_LDPC_CODE) {
388 "%s: SDFEC%d is configured for LDPC, check DT",
389 __func__, xsdfec->config.fec_id);
393 reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
395 turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
396 XSDFEC_TURBO_SCALE_BIT_POS;
397 turbo_params.alg = reg_value & 0x1;
399 err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
401 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
402 __func__, xsdfec->config.fec_id);
409 #define XSDFEC_LDPC_REG_JUMP (0x10)
410 #define XSDFEC_REG0_N_MASK (0x0000FFFF)
411 #define XSDFEC_REG0_N_LSB (0)
412 #define XSDFEC_REG0_K_MASK (0x7fff0000)
413 #define XSDFEC_REG0_K_LSB (16)
415 xsdfec_reg0_write(struct xsdfec_dev *xsdfec,
416 u32 n, u32 k, u32 offset)
420 /* Use only lower 16 bits */
421 if (n & ~XSDFEC_REG0_N_MASK)
422 dev_err(xsdfec->dev, "N value is beyond 16 bits");
423 n &= XSDFEC_REG0_N_MASK;
424 n <<= XSDFEC_REG0_N_LSB;
426 if (k & XSDFEC_REG0_K_MASK)
427 dev_err(xsdfec->dev, "K value is beyond 16 bits");
429 k = ((k << XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK);
432 if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
433 > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
435 "Writing outside of LDPC reg0 space 0x%x",
436 XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
437 (offset * XSDFEC_LDPC_REG_JUMP));
440 xsdfec_regwrite(xsdfec,
441 XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
442 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
447 xsdfec_collect_ldpc_reg0(struct xsdfec_dev *xsdfec,
449 struct xsdfec_ldpc_params *ldpc_params)
452 u32 reg_addr = XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
453 (code_id * XSDFEC_LDPC_REG_JUMP);
455 if (reg_addr > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
457 "Accessing outside of LDPC reg0 for code_id %d",
462 reg_value = xsdfec_regread(xsdfec, reg_addr);
464 ldpc_params->n = (reg_value >> XSDFEC_REG0_N_LSB) & XSDFEC_REG0_N_MASK;
466 ldpc_params->k = (reg_value >> XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK;
471 #define XSDFEC_REG1_PSIZE_MASK (0x000001ff)
472 #define XSDFEC_REG1_NO_PACKING_MASK (0x00000400)
473 #define XSDFEC_REG1_NO_PACKING_LSB (10)
474 #define XSDFEC_REG1_NM_MASK (0x000ff800)
475 #define XSDFEC_REG1_NM_LSB (11)
476 #define XSDFEC_REG1_BYPASS_MASK (0x00100000)
478 xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
479 u32 no_packing, u32 nm, u32 offset)
483 if (psize & ~XSDFEC_REG1_PSIZE_MASK)
484 dev_err(xsdfec->dev, "Psize is beyond 10 bits");
485 psize &= XSDFEC_REG1_PSIZE_MASK;
487 if (no_packing != 0 && no_packing != 1)
488 dev_err(xsdfec->dev, "No-packing bit register invalid");
489 no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
490 XSDFEC_REG1_NO_PACKING_MASK);
492 if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
493 dev_err(xsdfec->dev, "NM is beyond 10 bits");
494 nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
496 wdata = nm | no_packing | psize;
497 if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
498 > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
500 "Writing outside of LDPC reg1 space 0x%x",
501 XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
502 (offset * XSDFEC_LDPC_REG_JUMP));
505 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
506 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
511 xsdfec_collect_ldpc_reg1(struct xsdfec_dev *xsdfec,
513 struct xsdfec_ldpc_params *ldpc_params)
516 u32 reg_addr = XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
517 (code_id * XSDFEC_LDPC_REG_JUMP);
519 if (reg_addr > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
521 "Accessing outside of LDPC reg1 for code_id %d",
526 reg_value = xsdfec_regread(xsdfec, reg_addr);
528 ldpc_params->psize = reg_value & XSDFEC_REG1_PSIZE_MASK;
530 ldpc_params->no_packing = ((reg_value >> XSDFEC_REG1_NO_PACKING_LSB) &
531 XSDFEC_REG1_NO_PACKING_MASK);
533 ldpc_params->nm = (reg_value >> XSDFEC_REG1_NM_LSB) &
538 #define XSDFEC_REG2_NLAYERS_MASK (0x000001FF)
539 #define XSDFEC_REG2_NLAYERS_LSB (0)
540 #define XSDFEC_REG2_NNMQC_MASK (0x000FFE00)
541 #define XSDFEC_REG2_NMQC_LSB (9)
542 #define XSDFEC_REG2_NORM_TYPE_MASK (0x00100000)
543 #define XSDFEC_REG2_NORM_TYPE_LSB (20)
544 #define XSDFEC_REG2_SPECIAL_QC_MASK (0x00200000)
545 #define XSDFEC_REG2_SPEICAL_QC_LSB (21)
546 #define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x00400000)
547 #define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
548 #define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x01800000)
549 #define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
552 xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
553 u32 norm_type, u32 special_qc, u32 no_final_parity,
554 u32 max_schedule, u32 offset)
558 if (nlayers & ~(XSDFEC_REG2_NLAYERS_MASK >>
559 XSDFEC_REG2_NLAYERS_LSB))
560 dev_err(xsdfec->dev, "Nlayers exceeds 9 bits");
561 nlayers &= XSDFEC_REG2_NLAYERS_MASK;
563 if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
564 dev_err(xsdfec->dev, "NMQC exceeds 11 bits");
565 nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
568 dev_err(xsdfec->dev, "Norm type is invalid");
569 norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
570 XSDFEC_REG2_NORM_TYPE_MASK);
572 dev_err(xsdfec->dev, "Special QC in invalid");
573 special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
574 XSDFEC_REG2_SPECIAL_QC_MASK);
576 if (no_final_parity > 1)
577 dev_err(xsdfec->dev, "No final parity check invalid");
579 ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
580 XSDFEC_REG2_NO_FINAL_PARITY_MASK);
581 if (max_schedule & ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >>
582 XSDFEC_REG2_MAX_SCHEDULE_LSB))
583 dev_err(xsdfec->dev, "Max Schdule exceeds 2 bits");
584 max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
585 XSDFEC_REG2_MAX_SCHEDULE_MASK);
587 wdata = (max_schedule | no_final_parity | special_qc | norm_type |
590 if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
591 > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
593 "Writing outside of LDPC reg2 space 0x%x",
594 XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
595 (offset * XSDFEC_LDPC_REG_JUMP));
598 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
599 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
604 xsdfec_collect_ldpc_reg2(struct xsdfec_dev *xsdfec,
606 struct xsdfec_ldpc_params *ldpc_params)
609 u32 reg_addr = XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
610 (code_id * XSDFEC_LDPC_REG_JUMP);
612 if (reg_addr > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
614 "Accessing outside of LDPC reg2 for code_id %d",
619 reg_value = xsdfec_regread(xsdfec, reg_addr);
621 ldpc_params->nlayers = ((reg_value >> XSDFEC_REG2_NLAYERS_LSB) &
622 XSDFEC_REG2_NLAYERS_MASK);
624 ldpc_params->nmqc = (reg_value >> XSDFEC_REG2_NMQC_LSB) &
625 XSDFEC_REG2_NNMQC_MASK;
627 ldpc_params->norm_type = ((reg_value >> XSDFEC_REG2_NORM_TYPE_LSB) &
628 XSDFEC_REG2_NORM_TYPE_MASK);
630 ldpc_params->special_qc = ((reg_value >> XSDFEC_REG2_SPEICAL_QC_LSB) &
631 XSDFEC_REG2_SPECIAL_QC_MASK);
633 ldpc_params->no_final_parity =
634 ((reg_value >> XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
635 XSDFEC_REG2_NO_FINAL_PARITY_MASK);
637 ldpc_params->max_schedule =
638 ((reg_value >> XSDFEC_REG2_MAX_SCHEDULE_LSB) &
639 XSDFEC_REG2_MAX_SCHEDULE_MASK);
644 #define XSDFEC_REG3_LA_OFF_LSB (8)
645 #define XSDFEC_REG3_QC_OFF_LSB (16)
647 xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off,
648 u8 la_off, u16 qc_off, u32 offset)
652 wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
653 (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
654 if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
655 (offset * XSDFEC_LDPC_REG_JUMP) >
656 XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
658 "Writing outside of LDPC reg3 space 0x%x",
659 XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
660 (offset * XSDFEC_LDPC_REG_JUMP));
663 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
664 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
669 xsdfec_collect_ldpc_reg3(struct xsdfec_dev *xsdfec,
671 struct xsdfec_ldpc_params *ldpc_params)
674 u32 reg_addr = XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
675 (code_id * XSDFEC_LDPC_REG_JUMP);
677 if (reg_addr > XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
679 "Accessing outside of LDPC reg3 for code_id %d",
684 reg_value = xsdfec_regread(xsdfec, reg_addr);
686 ldpc_params->qc_off = (reg_addr >> XSDFEC_REG3_QC_OFF_LSB) & 0xFF;
687 ldpc_params->la_off = (reg_addr >> XSDFEC_REG3_LA_OFF_LSB) & 0xFF;
688 ldpc_params->sc_off = (reg_addr & 0xFF);
693 #define XSDFEC_SC_TABLE_DEPTH (0x3fc)
694 #define XSDFEC_REG_WIDTH_JUMP (4)
696 xsdfec_sc_table_write(struct xsdfec_dev *xsdfec, u32 offset,
697 u32 *sc_ptr, u32 len)
702 * Writes that go beyond the length of
703 * Shared Scale(SC) table should fail
705 if ((XSDFEC_REG_WIDTH_JUMP * (offset + len)) > XSDFEC_SC_TABLE_DEPTH) {
706 dev_err(xsdfec->dev, "Write exceeds SC table length");
710 for (reg = 0; reg < len; reg++) {
711 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
712 (offset + reg) * XSDFEC_REG_WIDTH_JUMP, sc_ptr[reg]);
718 xsdfec_collect_sc_table(struct xsdfec_dev *xsdfec, u32 offset,
719 u32 *sc_ptr, u32 len)
723 u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
725 if (deepest_reach > XSDFEC_SC_TABLE_DEPTH) {
726 dev_err(xsdfec->dev, "Access will exceed SC table length");
730 for (reg = 0; reg < len; reg++) {
731 reg_addr = XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
732 ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
734 sc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
740 #define XSDFEC_LA_TABLE_DEPTH (0xFFC)
742 xsdfec_la_table_write(struct xsdfec_dev *xsdfec, u32 offset,
743 u32 *la_ptr, u32 len)
747 if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_LA_TABLE_DEPTH) {
748 dev_err(xsdfec->dev, "Write exceeds LA table length");
752 for (reg = 0; reg < len; reg++) {
753 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
754 (offset + reg) * XSDFEC_REG_WIDTH_JUMP,
761 xsdfec_collect_la_table(struct xsdfec_dev *xsdfec, u32 offset,
762 u32 *la_ptr, u32 len)
766 u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
768 if (deepest_reach > XSDFEC_LA_TABLE_DEPTH) {
769 dev_err(xsdfec->dev, "Access will exceed LA table length");
773 for (reg = 0; reg < len; reg++) {
774 reg_addr = XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
775 ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
777 la_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
783 #define XSDFEC_QC_TABLE_DEPTH (0x7FFC)
785 xsdfec_qc_table_write(struct xsdfec_dev *xsdfec,
786 u32 offset, u32 *qc_ptr, u32 len)
790 if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_QC_TABLE_DEPTH) {
791 dev_err(xsdfec->dev, "Write exceeds QC table length");
795 for (reg = 0; reg < len; reg++) {
796 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
797 (offset + reg) * XSDFEC_REG_WIDTH_JUMP, qc_ptr[reg]);
804 xsdfec_collect_qc_table(struct xsdfec_dev *xsdfec,
805 u32 offset, u32 *qc_ptr, u32 len)
809 u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
811 if (deepest_reach > XSDFEC_QC_TABLE_DEPTH) {
812 dev_err(xsdfec->dev, "Access will exceed QC table length");
816 for (reg = 0; reg < len; reg++) {
817 reg_addr = XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
818 (offset + reg) * XSDFEC_REG_WIDTH_JUMP;
820 qc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
827 xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
829 struct xsdfec_ldpc_params *ldpc;
832 ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
836 err = copy_from_user(ldpc, arg, sizeof(*ldpc));
839 "%s failed to copy from user for SDFEC%d",
840 __func__, xsdfec->config.fec_id);
843 if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
845 "%s: Unable to write LDPC to SDFEC%d check DT",
846 __func__, xsdfec->config.fec_id);
849 /* Disable Write Protection before proceeding */
850 if (xsdfec->wr_protect)
851 xsdfec_wr_protect(xsdfec, false);
854 err = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->code_id);
859 err = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing,
860 ldpc->nm, ldpc->code_id);
865 err = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
866 ldpc->norm_type, ldpc->special_qc,
867 ldpc->no_final_parity, ldpc->max_schedule,
873 err = xsdfec_reg3_write(xsdfec, ldpc->sc_off,
874 ldpc->la_off, ldpc->qc_off, ldpc->code_id);
878 /* Write Shared Codes */
879 err = xsdfec_sc_table_write(xsdfec, ldpc->sc_off,
880 ldpc->sc_table, ldpc->nlayers);
884 err = xsdfec_la_table_write(xsdfec, 4 * ldpc->la_off,
885 ldpc->la_table, ldpc->nlayers);
889 err = xsdfec_qc_table_write(xsdfec, 4 * ldpc->qc_off,
890 ldpc->qc_table, ldpc->nqc);
903 xsdfec_get_ldpc_code_params(struct xsdfec_dev *xsdfec, void __user *arg)
905 struct xsdfec_ldpc_params *ldpc_params;
908 if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
910 "%s: SDFEC%d is configured for TURBO, check DT",
911 __func__, xsdfec->config.fec_id);
915 ldpc_params = kzalloc(sizeof(*ldpc_params), GFP_KERNEL);
919 err = copy_from_user(ldpc_params, arg, sizeof(*ldpc_params));
922 "%s failed to copy from user for SDFEC%d",
923 __func__, xsdfec->config.fec_id);
927 err = xsdfec_collect_ldpc_reg0(xsdfec, ldpc_params->code_id,
932 err = xsdfec_collect_ldpc_reg1(xsdfec, ldpc_params->code_id,
937 err = xsdfec_collect_ldpc_reg2(xsdfec, ldpc_params->code_id,
942 err = xsdfec_collect_ldpc_reg3(xsdfec, ldpc_params->code_id,
948 * Collect the shared table values, needs to happen after reading
951 err = xsdfec_collect_sc_table(xsdfec, ldpc_params->sc_off,
952 ldpc_params->sc_table,
953 ldpc_params->nlayers);
957 err = xsdfec_collect_la_table(xsdfec, 4 * ldpc_params->la_off,
958 ldpc_params->la_table,
959 ldpc_params->nlayers);
963 err = xsdfec_collect_qc_table(xsdfec, 4 * ldpc_params->qc_off,
964 ldpc_params->qc_table,
969 err = copy_to_user(arg, ldpc_params, sizeof(*ldpc_params));
971 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
972 __func__, xsdfec->config.fec_id);
985 xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
987 bool order_out_of_range;
988 enum xsdfec_order order = *((enum xsdfec_order *)arg);
990 order_out_of_range = (order <= XSDFEC_INVALID_ORDER) ||
991 (order >= XSDFEC_ORDER_MAX);
992 if (order_out_of_range) {
994 "%s invalid order value %d for SDFEC%d",
995 __func__, order, xsdfec->config.fec_id);
999 /* Verify Device has not started */
1000 if (xsdfec->state == XSDFEC_STARTED) {
1001 dev_err(xsdfec->dev,
1002 "%s attempting to set Order while started for SDFEC%d",
1003 __func__, xsdfec->config.fec_id);
1007 xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, (order - 1));
1009 xsdfec->config.order = order;
1015 xsdfec_set_bypass(struct xsdfec_dev *xsdfec, void __user *arg)
1017 unsigned long bypass = *((unsigned long *)arg);
1020 dev_err(xsdfec->dev,
1021 "%s invalid bypass value %ld for SDFEC%d",
1022 __func__, bypass, xsdfec->config.fec_id);
1026 /* Verify Device has not started */
1027 if (xsdfec->state == XSDFEC_STARTED) {
1028 dev_err(xsdfec->dev,
1029 "%s attempting to set bypass while started for SDFEC%d",
1030 __func__, xsdfec->config.fec_id);
1034 xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, bypass);
1040 xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *is_active)
1044 reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
1045 /* using a double ! operator instead of casting */
1046 *is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
1052 xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg)
1054 u32 axis_width_field = 0;
1056 switch (axis_width_cfg) {
1058 axis_width_field = 0;
1061 axis_width_field = 1;
1064 axis_width_field = 2;
1068 return axis_width_field;
1072 xsdfec_translate_axis_words_cfg_val(
1073 enum xsdfec_axis_word_include axis_word_inc_cfg)
1075 u32 axis_words_field = 0;
1077 if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE ||
1078 axis_word_inc_cfg == XSDFEC_IN_BLOCK)
1079 axis_words_field = 0;
1080 else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION)
1081 axis_words_field = 1;
1083 return axis_words_field;
1086 #define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
1087 #define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
1088 #define XSDFEC_AXIS_DIN_WORDS_LSB (2)
1089 #define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
1091 xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
1094 u32 dout_words_field;
1095 u32 dout_width_field;
1096 u32 din_words_field;
1097 u32 din_width_field;
1098 struct xsdfec_config *config = &xsdfec->config;
1100 /* translate config info to register values */
1102 xsdfec_translate_axis_words_cfg_val(config->dout_word_include);
1104 xsdfec_translate_axis_width_cfg_val(config->dout_width);
1106 xsdfec_translate_axis_words_cfg_val(config->din_word_include);
1108 xsdfec_translate_axis_words_cfg_val(config->din_width);
1110 reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB;
1111 reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB;
1112 reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB;
1113 reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB;
1115 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
1120 static int xsdfec_start(struct xsdfec_dev *xsdfec)
1124 /* Verify Code is loaded */
1125 if (xsdfec->config.code == XSDFEC_CODE_INVALID) {
1126 dev_err(xsdfec->dev,
1127 "%s : set code before start for SDFEC%d",
1128 __func__, xsdfec->config.fec_id);
1131 regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
1133 if (regread != (xsdfec->config.code - 1)) {
1134 dev_err(xsdfec->dev,
1135 "%s SDFEC HW code does not match driver code, reg %d, code %d",
1136 __func__, regread, (xsdfec->config.code - 1));
1140 /* Verify Order has been set */
1141 if (xsdfec->config.order == XSDFEC_INVALID_ORDER) {
1142 dev_err(xsdfec->dev,
1143 "%s : set order before starting SDFEC%d",
1144 __func__, xsdfec->config.fec_id);
1148 /* Set AXIS enable */
1149 xsdfec_regwrite(xsdfec,
1150 XSDFEC_AXIS_ENABLE_ADDR,
1151 XSDFEC_AXIS_ENABLE_MASK);
1152 /* Write Protect Code and Registers */
1153 xsdfec_wr_protect(xsdfec, true);
1155 xsdfec->state = XSDFEC_STARTED;
1160 xsdfec_stop(struct xsdfec_dev *xsdfec)
1164 if (xsdfec->state != XSDFEC_STARTED)
1165 dev_err(xsdfec->dev, "Device not started correctly");
1166 /* Disable Write Protect */
1167 xsdfec_wr_protect(xsdfec, false);
1168 /* Disable AXIS_ENABLE register */
1169 regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
1170 regread &= (~XSDFEC_AXIS_ENABLE_MASK);
1171 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
1173 xsdfec->state = XSDFEC_STOPPED;
1178 * Reset will happen asynchronously
1179 * since there is no in-band reset register
1180 * Prepare driver for reset
1184 xsdfec_reset_req(struct xsdfec_dev *xsdfec)
1186 xsdfec->state = XSDFEC_INIT;
1187 xsdfec->config.order = XSDFEC_INVALID_ORDER;
1188 xsdfec->wr_protect = false;
1189 atomic_set(&xsdfec->isr_err_count, 0);
1190 atomic_set(&xsdfec->uecc_count, 0);
1191 atomic_set(&xsdfec->cecc_count, 0);
1192 atomic_inc(&xsdfec->reset_count);
1197 xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, unsigned long data)
1199 struct xsdfec_dev *xsdfec = fptr->private_data;
1200 void __user *arg = NULL;
1207 /* In failed state allow only reset and get status IOCTLs */
1208 if (xsdfec->state == XSDFEC_NEEDS_RESET &&
1209 (cmd != XSDFEC_RESET_REQ && cmd != XSDFEC_GET_STATUS)) {
1210 dev_err(xsdfec->dev,
1211 "SDFEC%d in failed state. Reset Required",
1212 xsdfec->config.fec_id);
1216 if (_IOC_TYPE(cmd) != XSDFEC_MAGIC) {
1217 dev_err(xsdfec->dev, "Not a xilinx sdfec ioctl");
1221 /* check if ioctl argument is present and valid */
1222 if (_IOC_DIR(cmd) != _IOC_NONE) {
1223 arg = (void __user *)data;
1225 dev_err(xsdfec->dev, "xilinx sdfec ioctl argument is NULL Pointer");
1230 /* Access check of the argument if present */
1231 if (_IOC_DIR(cmd) & _IOC_READ)
1232 err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
1233 else if (_IOC_DIR(cmd) & _IOC_WRITE)
1234 err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
1237 dev_err(xsdfec->dev, "Invalid xilinx sdfec ioctl argument");
1242 case XSDFEC_START_DEV:
1243 rval = xsdfec_start(xsdfec);
1245 case XSDFEC_STOP_DEV:
1246 rval = xsdfec_stop(xsdfec);
1248 case XSDFEC_RESET_REQ:
1249 rval = xsdfec_reset_req(xsdfec);
1251 case XSDFEC_GET_STATUS:
1252 rval = xsdfec_get_status(xsdfec, arg);
1254 case XSDFEC_GET_CONFIG:
1255 rval = xsdfec_get_config(xsdfec, arg);
1257 case XSDFEC_SET_IRQ:
1258 rval = xsdfec_set_irq(xsdfec, arg);
1260 case XSDFEC_SET_TURBO:
1261 rval = xsdfec_set_turbo(xsdfec, arg);
1263 case XSDFEC_GET_TURBO:
1264 rval = xsdfec_get_turbo(xsdfec, arg);
1266 case XSDFEC_ADD_LDPC_CODE_PARAMS:
1267 rval = xsdfec_add_ldpc(xsdfec, arg);
1269 case XSDFEC_GET_LDPC_CODE_PARAMS:
1270 rval = xsdfec_get_ldpc_code_params(xsdfec, arg);
1272 case XSDFEC_SET_ORDER:
1273 rval = xsdfec_set_order(xsdfec, arg);
1275 case XSDFEC_SET_BYPASS:
1276 rval = xsdfec_set_bypass(xsdfec, arg);
1278 case XSDFEC_IS_ACTIVE:
1279 rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
1282 /* Should not get here */
1283 dev_err(xsdfec->dev, "Undefined SDFEC IOCTL");
1290 xsdfec_poll(struct file *file, poll_table *wait)
1293 struct xsdfec_dev *xsdfec = file->private_data;
1296 return POLLNVAL | POLLHUP;
1298 poll_wait(file, &xsdfec->waitq, wait);
1300 /* XSDFEC ISR detected an error */
1301 if (xsdfec->state == XSDFEC_NEEDS_RESET)
1302 mask = POLLIN | POLLRDNORM;
1304 mask = POLLPRI | POLLERR;
1309 static const struct file_operations xsdfec_fops = {
1310 .owner = THIS_MODULE,
1311 .open = xsdfec_dev_open,
1312 .release = xsdfec_dev_release,
1313 .unlocked_ioctl = xsdfec_dev_ioctl,
1314 .poll = xsdfec_poll,
1318 xsdfec_parse_of(struct xsdfec_dev *xsdfec)
1320 struct device *dev = xsdfec->dev;
1321 struct device_node *node = dev->of_node;
1323 const char *fec_code;
1325 u32 din_word_include;
1327 u32 dout_word_include;
1329 rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
1331 dev_err(dev, "xlnx,sdfec-code not in DT");
1335 if (!strcasecmp(fec_code, "ldpc")) {
1336 xsdfec->config.code = XSDFEC_LDPC_CODE;
1337 } else if (!strcasecmp(fec_code, "turbo")) {
1338 xsdfec->config.code = XSDFEC_TURBO_CODE;
1340 dev_err(xsdfec->dev, "Invalid Code in DT");
1344 rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
1347 dev_err(dev, "xlnx,sdfec-din-words not in DT");
1351 if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX) {
1352 xsdfec->config.din_word_include = din_word_include;
1354 dev_err(xsdfec->dev, "Invalid DIN Words in DT");
1358 rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
1360 dev_err(dev, "xlnx,sdfec-din-width not in DT");
1364 switch (din_width) {
1365 /* Fall through and set for valid values */
1369 xsdfec->config.din_width = din_width;
1372 dev_err(xsdfec->dev, "Invalid DIN Width in DT");
1376 rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
1377 &dout_word_include);
1379 dev_err(dev, "xlnx,sdfec-dout-words not in DT");
1383 if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX) {
1384 xsdfec->config.dout_word_include = dout_word_include;
1386 dev_err(xsdfec->dev, "Invalid DOUT Words in DT");
1390 rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
1392 dev_err(dev, "xlnx,sdfec-dout-width not in DT");
1396 switch (dout_width) {
1397 /* Fall through and set for valid values */
1401 xsdfec->config.dout_width = dout_width;
1404 dev_err(xsdfec->dev, "Invalid DOUT Width in DT");
1408 /* Write LDPC to CODE Register */
1409 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code - 1);
1411 xsdfec_cfg_axi_streams(xsdfec);
1417 xsdfec_log_ecc_errors(struct xsdfec_dev *xsdfec, u32 ecc_err)
1422 cecc = ecc_err & XSDFEC_ECC_ISR_SBE;
1423 uecc = ecc_err & XSDFEC_ECC_ISR_MBE;
1425 uecc_cnt = atomic_add_return(hweight32(uecc), &xsdfec->uecc_count);
1426 atomic_add(hweight32(cecc), &xsdfec->cecc_count);
1428 if (uecc_cnt > 0 && uecc_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1429 dev_err(xsdfec->dev,
1430 "Multi-bit error on xsdfec%d. Needs reset",
1431 xsdfec->config.fec_id);
1434 /* Clear ECC errors */
1435 xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1439 xsdfec_log_isr_errors(struct xsdfec_dev *xsdfec, u32 isr_err)
1443 /* Update ISR error counts */
1444 isr_err_cnt = atomic_add_return(hweight32(isr_err),
1445 &xsdfec->isr_err_count);
1446 if (isr_err_cnt > 0 && isr_err_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1447 dev_err(xsdfec->dev,
1448 "Tlast,or DIN_WORDS or DOUT_WORDS not correct");
1451 /* Clear ISR error status */
1452 xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1456 xsdfec_reset_required(struct xsdfec_dev *xsdfec)
1458 xsdfec->state = XSDFEC_NEEDS_RESET;
1462 xsdfec_irq_thread(int irq, void *dev_id)
1464 struct xsdfec_dev *xsdfec = dev_id;
1465 irqreturn_t ret = IRQ_HANDLED;
1468 bool fatal_err = false;
1470 WARN_ON(xsdfec->irq != irq);
1472 /* Mask Interrupts */
1473 xsdfec_isr_enable(xsdfec, false);
1474 xsdfec_ecc_isr_enable(xsdfec, false);
1476 /* Read Interrupt Status Registers */
1477 ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
1478 isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
1480 if (ecc_err & XSDFEC_ECC_ISR_MBE) {
1481 /* Multi-Bit Errors need Reset */
1482 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1483 xsdfec_reset_required(xsdfec);
1485 } else if (isr_err & XSDFEC_ISR_MASK) {
1487 * Tlast, DIN_WORDS and DOUT_WORDS related
1490 xsdfec_log_isr_errors(xsdfec, isr_err);
1491 xsdfec_reset_required(xsdfec);
1493 } else if (ecc_err & XSDFEC_ECC_ISR_SBE) {
1494 /* Correctable ECC Errors */
1495 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1501 wake_up_interruptible(&xsdfec->waitq);
1503 /* Unmaks Interrupts */
1504 xsdfec_isr_enable(xsdfec, true);
1505 xsdfec_ecc_isr_enable(xsdfec, true);
1511 xsdfec_probe(struct platform_device *pdev)
1513 struct xsdfec_dev *xsdfec;
1515 struct device *dev_create;
1516 struct resource *res;
1518 bool irq_enabled = true;
1520 xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
1524 xsdfec->dev = &pdev->dev;
1525 xsdfec->config.fec_id = atomic_read(&xsdfec_ndevs);
1528 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1529 xsdfec->regs = devm_ioremap_resource(dev, res);
1530 if (IS_ERR(xsdfec->regs)) {
1531 dev_err(dev, "Unable to map resource");
1532 err = PTR_ERR(xsdfec->regs);
1533 goto err_xsdfec_dev;
1536 xsdfec->irq = platform_get_irq(pdev, 0);
1537 if (xsdfec->irq < 0) {
1538 dev_dbg(dev, "platform_get_irq failed");
1539 irq_enabled = false;
1542 err = xsdfec_parse_of(xsdfec);
1544 goto err_xsdfec_dev;
1546 /* Save driver private data */
1547 platform_set_drvdata(pdev, xsdfec);
1550 init_waitqueue_head(&xsdfec->waitq);
1551 /* Register IRQ thread */
1552 err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
1558 dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
1559 goto err_xsdfec_dev;
1563 cdev_init(&xsdfec->xsdfec_cdev, &xsdfec_fops);
1564 xsdfec->xsdfec_cdev.owner = THIS_MODULE;
1565 err = cdev_add(&xsdfec->xsdfec_cdev,
1566 MKDEV(MAJOR(xsdfec_devt), xsdfec->config.fec_id), 1);
1568 dev_err(dev, "cdev_add failed");
1570 goto err_xsdfec_dev;
1573 if (!xsdfec_class) {
1575 dev_err(dev, "xsdfec class not created correctly");
1576 goto err_xsdfec_cdev;
1579 dev_create = device_create(xsdfec_class, dev,
1580 MKDEV(MAJOR(xsdfec_devt),
1581 xsdfec->config.fec_id),
1582 xsdfec, "xsdfec%d", xsdfec->config.fec_id);
1583 if (IS_ERR(dev_create)) {
1584 dev_err(dev, "unable to create device");
1585 err = PTR_ERR(dev_create);
1586 goto err_xsdfec_cdev;
1589 atomic_set(&xsdfec->open_count, 1);
1590 dev_info(dev, "XSDFEC%d Probe Successful", xsdfec->config.fec_id);
1591 atomic_inc(&xsdfec_ndevs);
1594 /* Failure cleanup */
1596 cdev_del(&xsdfec->xsdfec_cdev);
1602 xsdfec_remove(struct platform_device *pdev)
1604 struct xsdfec_dev *xsdfec;
1605 struct device *dev = &pdev->dev;
1607 xsdfec = platform_get_drvdata(pdev);
1611 if (!xsdfec_class) {
1612 dev_err(dev, "xsdfec_class is NULL");
1616 device_destroy(xsdfec_class,
1617 MKDEV(MAJOR(xsdfec_devt), xsdfec->config.fec_id));
1618 cdev_del(&xsdfec->xsdfec_cdev);
1619 atomic_dec(&xsdfec_ndevs);
1623 static const struct of_device_id xsdfec_of_match[] = {
1624 { .compatible = "xlnx,sd-fec-1.1", },
1625 { /* end of table */ }
1627 MODULE_DEVICE_TABLE(of, xsdfec_of_match);
1629 static struct platform_driver xsdfec_driver = {
1631 .name = "xilinx-sdfec",
1632 .of_match_table = xsdfec_of_match,
1634 .probe = xsdfec_probe,
1635 .remove = xsdfec_remove,
1638 static int __init xsdfec_init_mod(void)
1642 xsdfec_class = class_create(THIS_MODULE, DRIVER_NAME);
1643 if (IS_ERR(xsdfec_class)) {
1644 err = PTR_ERR(xsdfec_class);
1645 pr_err("%s : Unable to register xsdfec class", __func__);
1649 err = alloc_chrdev_region(&xsdfec_devt,
1650 0, DRIVER_MAX_DEV, DRIVER_NAME);
1652 pr_err("%s : Unable to get major number", __func__);
1653 goto err_xsdfec_class;
1656 err = platform_driver_register(&xsdfec_driver);
1658 pr_err("%s Unabled to register %s driver",
1659 __func__, DRIVER_NAME);
1660 goto err_xsdfec_drv;
1666 unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1668 class_destroy(xsdfec_class);
1672 static void __exit xsdfec_cleanup_mod(void)
1674 platform_driver_unregister(&xsdfec_driver);
1675 unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1676 class_destroy(xsdfec_class);
1677 xsdfec_class = NULL;
1680 module_init(xsdfec_init_mod);
1681 module_exit(xsdfec_cleanup_mod);
1683 MODULE_AUTHOR("Xilinx, Inc");
1684 MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
1685 MODULE_LICENSE("GPL");
1686 MODULE_VERSION(DRIVER_VERSION);