4 * Copyright (C) 2016 - 2017 Xilinx, Inc.
7 * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
8 * IP. It exposes a char device interface in sysfs and supports file
9 * operations like open(), close() and ioctl().
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
25 #include <linux/cdev.h>
26 #include <linux/device.h>
29 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
33 #include <linux/of_platform.h>
34 #include <linux/platform_device.h>
35 #include <linux/poll.h>
36 #include <linux/slab.h>
37 #include <linux/uaccess.h>
39 #include <uapi/misc/xilinx_sdfec.h>
41 #define DRIVER_NAME "xilinx_sdfec"
42 #define DRIVER_VERSION "0.3"
43 #define DRIVER_MAX_DEV BIT(MINORBITS)
45 static struct class *xsdfec_class;
46 static atomic_t xsdfec_ndevs = ATOMIC_INIT(0);
47 static dev_t xsdfec_devt;
49 /* Xilinx SDFEC Register Map */
50 #define XSDFEC_AXI_WR_PROTECT_ADDR (0x00000)
51 #define XSDFEC_CODE_WR_PROTECT_ADDR (0x00004)
52 #define XSDFEC_ACTIVE_ADDR (0x00008)
53 #define XSDFEC_AXIS_WIDTH_ADDR (0x0000c)
54 #define XSDFEC_AXIS_ENABLE_ADDR (0x00010)
55 #define XSDFEC_AXIS_ENABLE_MASK (0x0001F)
56 #define XSDFEC_FEC_CODE_ADDR (0x00014)
57 #define XSDFEC_ORDER_ADDR (0x00018)
59 /* Interrupt Status Register Bit Mask*/
60 #define XSDFEC_ISR_MASK (0x0003F)
61 /* Interrupt Status Register */
62 #define XSDFEC_ISR_ADDR (0x0001c)
63 /* Write Only - Interrupt Enable Register */
64 #define XSDFEC_IER_ADDR (0x00020)
65 /* Write Only - Interrupt Disable Register */
66 #define XSDFEC_IDR_ADDR (0x00024)
67 /* Read Only - Interrupt Mask Register */
68 #define XSDFEC_IMR_ADDR (0x00028)
70 /* Single Bit Errors */
71 #define XSDFEC_ECC_ISR_SBE (0x7FF)
72 /* Multi Bit Errors */
73 #define XSDFEC_ECC_ISR_MBE (0x3FF800)
74 /* ECC Interrupt Status Bit Mask */
75 #define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE | XSDFEC_ECC_ISR_MBE)
77 /* Multi Bit Error Postion */
78 #define XSDFEC_ECC_MULTI_BIT_POS (11)
79 #define XSDFEC_ERROR_MAX_THRESHOLD (100)
81 /* ECC Interrupt Status Register */
82 #define XSDFEC_ECC_ISR_ADDR (0x0002c)
83 /* Write Only - ECC Interrupt Enable Register */
84 #define XSDFEC_ECC_IER_ADDR (0x00030)
85 /* Write Only - ECC Interrupt Disable Register */
86 #define XSDFEC_ECC_IDR_ADDR (0x00034)
87 /* Read Only - ECC Interrupt Mask Register */
88 #define XSDFEC_ECC_IMR_ADDR (0x00038)
90 #define XSDFEC_BYPASS_ADDR (0x0003c)
91 #define XSDFEC_TEST_EMA_ADDR_BASE (0x00080)
92 #define XSDFEC_TEST_EMA_ADDR_HIGH (0x00089)
93 #define XSDFEC_TURBO_ADDR (0x00100)
94 #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x02000)
95 #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x021fc)
96 #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x02004)
97 #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x02200)
98 #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x02008)
99 #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x02204)
100 #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x0200c)
101 #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x02208)
104 * struct xsdfec_dev - Driver data for SDFEC
105 * @regs: device physical base address
106 * @dev: pointer to device struct
107 * @state: State of the SDFEC device
108 * @config: Configuration of the SDFEC device
109 * @intr_enabled: indicates IRQ enabled
110 * @wr_protect: indicates Write Protect enabled
111 * @isr_err_count: Count of ISR errors
112 * @cecc_count: Count of Correctable ECC errors (SBE)
113 * @uecc_count: Count of Uncorrectable ECC errors (MBE)
114 * @reset_count: Count of Resets requested
115 * @open_count: Count of char device being opened
117 * @xsdfec_cdev: Character device handle
118 * @sc_off: Shared Scale Table Offset
119 * @qc_off: Shared Circulant Table Offset
120 * @la_off: Shared Layer Table Offset
121 * @waitq: Driver wait queue
123 * This structure contains necessary state for SDFEC driver to operate
128 enum xsdfec_state state;
129 struct xsdfec_config config;
132 atomic_t isr_err_count;
135 atomic_t reset_count;
138 struct cdev xsdfec_cdev;
142 wait_queue_head_t waitq;
146 xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, u32 value)
148 if (xsdfec->wr_protect) {
149 dev_err(xsdfec->dev, "SDFEC in write protect");
154 "Writing 0x%x to offset 0x%x", value, addr);
155 iowrite32(value, xsdfec->regs + addr);
159 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
163 rval = ioread32(xsdfec->regs + addr);
165 "Read value = 0x%x from offset 0x%x",
170 #define XSDFEC_WRITE_PROTECT_ENABLE (1)
171 #define XSDFEC_WRITE_PROTECT_DISABLE (0)
173 xsdfec_wr_protect(struct xsdfec_dev *xsdfec, bool wr_pr)
176 xsdfec_regwrite(xsdfec,
177 XSDFEC_CODE_WR_PROTECT_ADDR,
178 XSDFEC_WRITE_PROTECT_ENABLE);
179 xsdfec_regwrite(xsdfec,
180 XSDFEC_AXI_WR_PROTECT_ADDR,
181 XSDFEC_WRITE_PROTECT_ENABLE);
183 xsdfec_regwrite(xsdfec,
184 XSDFEC_AXI_WR_PROTECT_ADDR,
185 XSDFEC_WRITE_PROTECT_DISABLE);
186 xsdfec_regwrite(xsdfec,
187 XSDFEC_CODE_WR_PROTECT_ADDR,
188 XSDFEC_WRITE_PROTECT_DISABLE);
190 xsdfec->wr_protect = wr_pr;
194 xsdfec_dev_open(struct inode *iptr, struct file *fptr)
196 struct xsdfec_dev *xsdfec;
198 xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
202 /* Only one open per device at a time */
203 if (!atomic_dec_and_test(&xsdfec->open_count)) {
204 atomic_inc(&xsdfec->open_count);
208 fptr->private_data = xsdfec;
213 xsdfec_dev_release(struct inode *iptr, struct file *fptr)
215 struct xsdfec_dev *xsdfec;
217 xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
221 atomic_inc(&xsdfec->open_count);
225 #define XSDFEC_IS_ACTIVITY_SET (0x1)
227 xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
229 struct xsdfec_status status;
232 status.fec_id = xsdfec->config.fec_id;
233 status.state = xsdfec->state;
235 (xsdfec_regread(xsdfec,
236 XSDFEC_ACTIVE_ADDR) &
237 XSDFEC_IS_ACTIVITY_SET);
239 err = copy_to_user(arg, &status, sizeof(status));
241 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
242 __func__, xsdfec->config.fec_id);
249 xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
253 err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
255 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
256 __func__, xsdfec->config.fec_id);
263 xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
269 xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR,
271 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
272 if (mask_read & XSDFEC_ISR_MASK) {
274 "SDFEC enabling irq with IER failed");
279 xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR,
281 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
282 if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
284 "SDFEC disabling irq with IDR failed");
292 xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
298 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
299 XSDFEC_ECC_ISR_MASK);
300 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
301 if (mask_read & XSDFEC_ECC_ISR_MASK) {
303 "SDFEC enabling ECC irq with ECC IER failed");
308 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
309 XSDFEC_ECC_ISR_MASK);
310 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
311 if ((mask_read & XSDFEC_ECC_ISR_MASK) != XSDFEC_ECC_ISR_MASK) {
313 "SDFEC disable ECC irq with ECC IDR failed");
321 xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
323 struct xsdfec_irq irq;
326 err = copy_from_user(&irq, arg, sizeof(irq));
328 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
329 __func__, xsdfec->config.fec_id);
333 /* Setup tlast related IRQ */
334 if (irq.enable_isr) {
335 err = xsdfec_isr_enable(xsdfec, true);
340 /* Setup ECC related IRQ */
341 if (irq.enable_ecc_isr) {
342 err = xsdfec_ecc_isr_enable(xsdfec, true);
350 #define XSDFEC_TURBO_SCALE_MASK (0xF)
351 #define XSDFEC_TURBO_SCALE_BIT_POS (8)
353 xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
355 struct xsdfec_turbo turbo;
359 err = copy_from_user(&turbo, arg, sizeof(turbo));
361 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
362 __func__, xsdfec->config.fec_id);
366 /* Check to see what device tree says about the FEC codes */
367 if (xsdfec->config.code == XSDFEC_LDPC_CODE) {
369 "%s: Unable to write Turbo to SDFEC%d check DT",
370 __func__, xsdfec->config.fec_id);
372 } else if (xsdfec->config.code == XSDFEC_CODE_INVALID) {
373 xsdfec->config.code = XSDFEC_TURBO_CODE;
376 if (xsdfec->wr_protect)
377 xsdfec_wr_protect(xsdfec, false);
379 turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK) <<
380 XSDFEC_TURBO_SCALE_BIT_POS) | turbo.alg;
381 xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
386 xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
389 struct xsdfec_turbo turbo_params;
392 if (xsdfec->config.code == XSDFEC_LDPC_CODE) {
394 "%s: SDFEC%d is configured for LDPC, check DT",
395 __func__, xsdfec->config.fec_id);
399 reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
401 turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
402 XSDFEC_TURBO_SCALE_BIT_POS;
403 turbo_params.alg = reg_value & 0x1;
405 err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
407 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
408 __func__, xsdfec->config.fec_id);
415 #define XSDFEC_LDPC_REG_JUMP (0x10)
416 #define XSDFEC_REG0_N_MASK (0x0000FFFF)
417 #define XSDFEC_REG0_N_LSB (0)
418 #define XSDFEC_REG0_K_MASK (0x7fff0000)
419 #define XSDFEC_REG0_K_LSB (16)
421 xsdfec_reg0_write(struct xsdfec_dev *xsdfec,
422 u32 n, u32 k, u32 offset)
426 /* Use only lower 16 bits */
427 if (n & ~XSDFEC_REG0_N_MASK)
428 dev_err(xsdfec->dev, "N value is beyond 16 bits");
429 n &= XSDFEC_REG0_N_MASK;
430 n <<= XSDFEC_REG0_N_LSB;
432 if (k & XSDFEC_REG0_K_MASK)
433 dev_err(xsdfec->dev, "K value is beyond 16 bits");
435 k = ((k << XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK);
438 if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
439 > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
441 "Writing outside of LDPC reg0 space 0x%x",
442 XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
443 (offset * XSDFEC_LDPC_REG_JUMP));
446 xsdfec_regwrite(xsdfec,
447 XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
448 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
453 xsdfec_collect_ldpc_reg0(struct xsdfec_dev *xsdfec,
455 struct xsdfec_ldpc_params *ldpc_params)
458 u32 reg_addr = XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
459 (code_id * XSDFEC_LDPC_REG_JUMP);
461 if (reg_addr > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
463 "Accessing outside of LDPC reg0 for code_id %d",
468 reg_value = xsdfec_regread(xsdfec, reg_addr);
470 ldpc_params->n = (reg_value >> XSDFEC_REG0_N_LSB) & XSDFEC_REG0_N_MASK;
472 ldpc_params->k = (reg_value >> XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK;
477 #define XSDFEC_REG1_PSIZE_MASK (0x000001ff)
478 #define XSDFEC_REG1_NO_PACKING_MASK (0x00000400)
479 #define XSDFEC_REG1_NO_PACKING_LSB (10)
480 #define XSDFEC_REG1_NM_MASK (0x000ff800)
481 #define XSDFEC_REG1_NM_LSB (11)
482 #define XSDFEC_REG1_BYPASS_MASK (0x00100000)
484 xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
485 u32 no_packing, u32 nm, u32 offset)
489 if (psize & ~XSDFEC_REG1_PSIZE_MASK)
490 dev_err(xsdfec->dev, "Psize is beyond 10 bits");
491 psize &= XSDFEC_REG1_PSIZE_MASK;
493 if (no_packing != 0 && no_packing != 1)
494 dev_err(xsdfec->dev, "No-packing bit register invalid");
495 no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
496 XSDFEC_REG1_NO_PACKING_MASK);
498 if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
499 dev_err(xsdfec->dev, "NM is beyond 10 bits");
500 nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
502 wdata = nm | no_packing | psize;
503 if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
504 > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
506 "Writing outside of LDPC reg1 space 0x%x",
507 XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
508 (offset * XSDFEC_LDPC_REG_JUMP));
511 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
512 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
517 xsdfec_collect_ldpc_reg1(struct xsdfec_dev *xsdfec,
519 struct xsdfec_ldpc_params *ldpc_params)
522 u32 reg_addr = XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
523 (code_id * XSDFEC_LDPC_REG_JUMP);
525 if (reg_addr > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
527 "Accessing outside of LDPC reg1 for code_id %d",
532 reg_value = xsdfec_regread(xsdfec, reg_addr);
534 ldpc_params->psize = reg_value & XSDFEC_REG1_PSIZE_MASK;
536 ldpc_params->no_packing = ((reg_value >> XSDFEC_REG1_NO_PACKING_LSB) &
537 XSDFEC_REG1_NO_PACKING_MASK);
539 ldpc_params->nm = (reg_value >> XSDFEC_REG1_NM_LSB) &
544 #define XSDFEC_REG2_NLAYERS_MASK (0x000001FF)
545 #define XSDFEC_REG2_NLAYERS_LSB (0)
546 #define XSDFEC_REG2_NNMQC_MASK (0x000FFE00)
547 #define XSDFEC_REG2_NMQC_LSB (9)
548 #define XSDFEC_REG2_NORM_TYPE_MASK (0x00100000)
549 #define XSDFEC_REG2_NORM_TYPE_LSB (20)
550 #define XSDFEC_REG2_SPECIAL_QC_MASK (0x00200000)
551 #define XSDFEC_REG2_SPEICAL_QC_LSB (21)
552 #define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x00400000)
553 #define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
554 #define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x01800000)
555 #define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
558 xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
559 u32 norm_type, u32 special_qc, u32 no_final_parity,
560 u32 max_schedule, u32 offset)
564 if (nlayers & ~(XSDFEC_REG2_NLAYERS_MASK >>
565 XSDFEC_REG2_NLAYERS_LSB))
566 dev_err(xsdfec->dev, "Nlayers exceeds 9 bits");
567 nlayers &= XSDFEC_REG2_NLAYERS_MASK;
569 if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
570 dev_err(xsdfec->dev, "NMQC exceeds 11 bits");
571 nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
574 dev_err(xsdfec->dev, "Norm type is invalid");
575 norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
576 XSDFEC_REG2_NORM_TYPE_MASK);
578 dev_err(xsdfec->dev, "Special QC in invalid");
579 special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
580 XSDFEC_REG2_SPECIAL_QC_MASK);
582 if (no_final_parity > 1)
583 dev_err(xsdfec->dev, "No final parity check invalid");
585 ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
586 XSDFEC_REG2_NO_FINAL_PARITY_MASK);
587 if (max_schedule & ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >>
588 XSDFEC_REG2_MAX_SCHEDULE_LSB))
589 dev_err(xsdfec->dev, "Max Schdule exceeds 2 bits");
590 max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
591 XSDFEC_REG2_MAX_SCHEDULE_MASK);
593 wdata = (max_schedule | no_final_parity | special_qc | norm_type |
596 if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
597 > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
599 "Writing outside of LDPC reg2 space 0x%x",
600 XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
601 (offset * XSDFEC_LDPC_REG_JUMP));
604 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
605 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
610 xsdfec_collect_ldpc_reg2(struct xsdfec_dev *xsdfec,
612 struct xsdfec_ldpc_params *ldpc_params)
615 u32 reg_addr = XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
616 (code_id * XSDFEC_LDPC_REG_JUMP);
618 if (reg_addr > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
620 "Accessing outside of LDPC reg2 for code_id %d",
625 reg_value = xsdfec_regread(xsdfec, reg_addr);
627 ldpc_params->nlayers = ((reg_value >> XSDFEC_REG2_NLAYERS_LSB) &
628 XSDFEC_REG2_NLAYERS_MASK);
630 ldpc_params->nmqc = (reg_value >> XSDFEC_REG2_NMQC_LSB) &
631 XSDFEC_REG2_NNMQC_MASK;
633 ldpc_params->norm_type = ((reg_value >> XSDFEC_REG2_NORM_TYPE_LSB) &
634 XSDFEC_REG2_NORM_TYPE_MASK);
636 ldpc_params->special_qc = ((reg_value >> XSDFEC_REG2_SPEICAL_QC_LSB) &
637 XSDFEC_REG2_SPECIAL_QC_MASK);
639 ldpc_params->no_final_parity =
640 ((reg_value >> XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
641 XSDFEC_REG2_NO_FINAL_PARITY_MASK);
643 ldpc_params->max_schedule =
644 ((reg_value >> XSDFEC_REG2_MAX_SCHEDULE_LSB) &
645 XSDFEC_REG2_MAX_SCHEDULE_MASK);
650 #define XSDFEC_REG3_LA_OFF_LSB (8)
651 #define XSDFEC_REG3_QC_OFF_LSB (16)
653 xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off,
654 u8 la_off, u16 qc_off, u32 offset)
658 wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
659 (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
660 if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
661 (offset * XSDFEC_LDPC_REG_JUMP) >
662 XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
664 "Writing outside of LDPC reg3 space 0x%x",
665 XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
666 (offset * XSDFEC_LDPC_REG_JUMP));
669 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
670 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
675 xsdfec_collect_ldpc_reg3(struct xsdfec_dev *xsdfec,
677 struct xsdfec_ldpc_params *ldpc_params)
680 u32 reg_addr = XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
681 (code_id * XSDFEC_LDPC_REG_JUMP);
683 if (reg_addr > XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
685 "Accessing outside of LDPC reg3 for code_id %d",
690 reg_value = xsdfec_regread(xsdfec, reg_addr);
692 ldpc_params->qc_off = (reg_addr >> XSDFEC_REG3_QC_OFF_LSB) & 0xFF;
693 ldpc_params->la_off = (reg_addr >> XSDFEC_REG3_LA_OFF_LSB) & 0xFF;
694 ldpc_params->sc_off = (reg_addr & 0xFF);
699 #define XSDFEC_SC_TABLE_DEPTH (0x3fc)
700 #define XSDFEC_REG_WIDTH_JUMP (4)
702 xsdfec_sc_table_write(struct xsdfec_dev *xsdfec, u32 offset,
703 u32 *sc_ptr, u32 len)
708 * Writes that go beyond the length of
709 * Shared Scale(SC) table should fail
711 if ((XSDFEC_REG_WIDTH_JUMP * (offset + len)) > XSDFEC_SC_TABLE_DEPTH) {
712 dev_err(xsdfec->dev, "Write exceeds SC table length");
717 * sc_off tracks the points to the last written location
718 * in the Shared Scale(SC) table. Those shared codes might
719 * be in use. Updating them without quiescing the device
720 * can put the SDFEC device in an indeterminate state
722 if ((XSDFEC_REG_WIDTH_JUMP * offset) < xsdfec->sc_off) {
723 dev_err(xsdfec->dev, "Might write to in use shared SC code");
727 for (reg = 0; reg < len; reg++) {
728 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
729 (offset + reg) * XSDFEC_REG_WIDTH_JUMP, sc_ptr[reg]);
731 xsdfec->sc_off = reg + (XSDFEC_REG_WIDTH_JUMP * offset);
736 xsdfec_collect_sc_table(struct xsdfec_dev *xsdfec, u32 offset,
737 u32 *sc_ptr, u32 len)
741 u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
743 if (deepest_reach > XSDFEC_SC_TABLE_DEPTH) {
744 dev_err(xsdfec->dev, "Access will exceed SC table length");
748 for (reg = 0; reg < len; reg++) {
749 reg_addr = XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
750 ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
752 sc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
758 #define XSDFEC_LA_TABLE_DEPTH (0xFFC)
760 xsdfec_la_table_write(struct xsdfec_dev *xsdfec, u32 offset,
761 u32 *la_ptr, u32 len)
765 if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_LA_TABLE_DEPTH) {
766 dev_err(xsdfec->dev, "Write exceeds LA table length");
770 if (XSDFEC_REG_WIDTH_JUMP * offset < xsdfec->la_off) {
771 dev_err(xsdfec->dev, "Might write to in use shared LA code");
775 for (reg = 0; reg < len; reg++) {
776 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
777 (offset + reg) * XSDFEC_REG_WIDTH_JUMP,
780 xsdfec->la_off = reg + (offset * XSDFEC_REG_WIDTH_JUMP);
785 xsdfec_collect_la_table(struct xsdfec_dev *xsdfec, u32 offset,
786 u32 *la_ptr, u32 len)
790 u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
792 if (deepest_reach > XSDFEC_LA_TABLE_DEPTH) {
793 dev_err(xsdfec->dev, "Access will exceed LA table length");
797 for (reg = 0; reg < len; reg++) {
798 reg_addr = XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
799 ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
801 la_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
807 #define XSDFEC_QC_TABLE_DEPTH (0x7FFC)
809 xsdfec_qc_table_write(struct xsdfec_dev *xsdfec,
810 u32 offset, u32 *qc_ptr, u32 len)
814 if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_QC_TABLE_DEPTH) {
815 dev_err(xsdfec->dev, "Write exceeds QC table length");
819 if (XSDFEC_REG_WIDTH_JUMP * offset < xsdfec->qc_off) {
820 dev_err(xsdfec->dev, "Might write to in use shared LA code");
824 for (reg = 0; reg < len; reg++) {
825 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
826 (offset + reg) * XSDFEC_REG_WIDTH_JUMP, qc_ptr[reg]);
829 xsdfec->qc_off = reg + (offset * XSDFEC_REG_WIDTH_JUMP);
834 xsdfec_collect_qc_table(struct xsdfec_dev *xsdfec,
835 u32 offset, u32 *qc_ptr, u32 len)
839 u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
841 if (deepest_reach > XSDFEC_QC_TABLE_DEPTH) {
842 dev_err(xsdfec->dev, "Access will exceed QC table length");
846 for (reg = 0; reg < len; reg++) {
847 reg_addr = XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
848 (offset + reg) * XSDFEC_REG_WIDTH_JUMP;
850 qc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
857 xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
859 struct xsdfec_ldpc_params *ldpc;
862 ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
866 err = copy_from_user(ldpc, arg, sizeof(*ldpc));
869 "%s failed to copy from user for SDFEC%d",
870 __func__, xsdfec->config.fec_id);
873 if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
875 "%s: Unable to write LDPC to SDFEC%d check DT",
876 __func__, xsdfec->config.fec_id);
879 xsdfec->config.code = XSDFEC_LDPC_CODE;
880 /* Disable Write Protection before proceeding */
881 if (xsdfec->wr_protect)
882 xsdfec_wr_protect(xsdfec, false);
885 err = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->code_id);
890 err = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing,
891 ldpc->nm, ldpc->code_id);
896 err = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
897 ldpc->norm_type, ldpc->special_qc,
898 ldpc->no_final_parity, ldpc->max_schedule,
904 err = xsdfec_reg3_write(xsdfec, ldpc->sc_off,
905 ldpc->la_off, ldpc->qc_off, ldpc->code_id);
909 /* Write Shared Codes */
910 err = xsdfec_sc_table_write(xsdfec, ldpc->sc_off,
911 ldpc->sc_table, ldpc->nlayers);
915 err = xsdfec_la_table_write(xsdfec, 4 * ldpc->la_off,
916 ldpc->la_table, ldpc->nlayers);
920 err = xsdfec_qc_table_write(xsdfec, 4 * ldpc->qc_off,
921 ldpc->qc_table, ldpc->nqc);
934 xsdfec_get_ldpc_code_params(struct xsdfec_dev *xsdfec, void __user *arg)
936 struct xsdfec_ldpc_params *ldpc_params;
939 if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
941 "%s: SDFEC%d is configured for TURBO, check DT",
942 __func__, xsdfec->config.fec_id);
946 ldpc_params = kzalloc(sizeof(*ldpc_params), GFP_KERNEL);
950 err = copy_from_user(ldpc_params, arg, sizeof(*ldpc_params));
953 "%s failed to copy from user for SDFEC%d",
954 __func__, xsdfec->config.fec_id);
958 err = xsdfec_collect_ldpc_reg0(xsdfec, ldpc_params->code_id,
963 err = xsdfec_collect_ldpc_reg1(xsdfec, ldpc_params->code_id,
968 err = xsdfec_collect_ldpc_reg2(xsdfec, ldpc_params->code_id,
973 err = xsdfec_collect_ldpc_reg3(xsdfec, ldpc_params->code_id,
979 * Collect the shared table values, needs to happen after reading
982 err = xsdfec_collect_sc_table(xsdfec, ldpc_params->sc_off,
983 ldpc_params->sc_table,
984 ldpc_params->nlayers);
988 err = xsdfec_collect_la_table(xsdfec, 4 * ldpc_params->la_off,
989 ldpc_params->la_table,
990 ldpc_params->nlayers);
994 err = xsdfec_collect_qc_table(xsdfec, 4 * ldpc_params->qc_off,
995 ldpc_params->qc_table,
1000 err = copy_to_user(arg, ldpc_params, sizeof(*ldpc_params));
1002 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
1003 __func__, xsdfec->config.fec_id);
1016 xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
1018 bool order_out_of_range;
1019 enum xsdfec_order order = *((enum xsdfec_order *)arg);
1021 order_out_of_range = (order <= XSDFEC_INVALID_ORDER) ||
1022 (order >= XSDFEC_ORDER_MAX);
1023 if (order_out_of_range) {
1024 dev_err(xsdfec->dev,
1025 "%s invalid order value %d for SDFEC%d",
1026 __func__, order, xsdfec->config.fec_id);
1030 /* Verify Device has not started */
1031 if (xsdfec->state == XSDFEC_STARTED) {
1032 dev_err(xsdfec->dev,
1033 "%s attempting to set Order while started for SDFEC%d",
1034 __func__, xsdfec->config.fec_id);
1038 xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, (order - 1));
1040 xsdfec->config.order = order;
1046 xsdfec_set_bypass(struct xsdfec_dev *xsdfec, void __user *arg)
1048 unsigned long bypass = *((unsigned long *)arg);
1051 dev_err(xsdfec->dev,
1052 "%s invalid bypass value %ld for SDFEC%d",
1053 __func__, bypass, xsdfec->config.fec_id);
1057 /* Verify Device has not started */
1058 if (xsdfec->state == XSDFEC_STARTED) {
1059 dev_err(xsdfec->dev,
1060 "%s attempting to set bypass while started for SDFEC%d",
1061 __func__, xsdfec->config.fec_id);
1065 xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, bypass);
1071 xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *is_active)
1075 reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
1076 /* using a double ! operator instead of casting */
1077 *is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
1082 static int xsdfec_start(struct xsdfec_dev *xsdfec)
1086 /* Verify Code is loaded */
1087 if (xsdfec->config.code == XSDFEC_CODE_INVALID) {
1088 dev_err(xsdfec->dev,
1089 "%s : set code before start for SDFEC%d",
1090 __func__, xsdfec->config.fec_id);
1093 regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
1095 if (regread != (xsdfec->config.code - 1)) {
1096 dev_err(xsdfec->dev,
1097 "%s SDFEC HW code does not match driver code, reg %d, code %d",
1098 __func__, regread, (xsdfec->config.code - 1));
1102 /* Verify Order has been set */
1103 if (xsdfec->config.order == XSDFEC_INVALID_ORDER) {
1104 dev_err(xsdfec->dev,
1105 "%s : set order before starting SDFEC%d",
1106 __func__, xsdfec->config.fec_id);
1110 /* Set AXIS width */
1111 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, 0);
1112 /* Set AXIS enable */
1113 xsdfec_regwrite(xsdfec,
1114 XSDFEC_AXIS_ENABLE_ADDR,
1115 XSDFEC_AXIS_ENABLE_MASK);
1116 /* Write Protect Code and Registers */
1117 xsdfec_wr_protect(xsdfec, true);
1119 xsdfec->state = XSDFEC_STARTED;
1124 xsdfec_stop(struct xsdfec_dev *xsdfec)
1128 if (xsdfec->state != XSDFEC_STARTED)
1129 dev_err(xsdfec->dev, "Device not started correctly");
1130 /* Disable Write Protect */
1131 xsdfec_wr_protect(xsdfec, false);
1132 /* Disable AXIS_ENABLE register */
1133 regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
1134 regread &= (~XSDFEC_AXIS_ENABLE_MASK);
1135 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
1137 xsdfec->state = XSDFEC_STOPPED;
1142 * Reset will happen asynchronously
1143 * since there is no in-band reset register
1144 * Prepare driver for reset
1148 xsdfec_reset_req(struct xsdfec_dev *xsdfec)
1150 xsdfec->state = XSDFEC_INIT;
1151 xsdfec->config.order = XSDFEC_INVALID_ORDER;
1155 xsdfec->wr_protect = false;
1156 atomic_set(&xsdfec->isr_err_count, 0);
1157 atomic_set(&xsdfec->uecc_count, 0);
1158 atomic_set(&xsdfec->cecc_count, 0);
1159 atomic_inc(&xsdfec->reset_count);
1164 xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, unsigned long data)
1166 struct xsdfec_dev *xsdfec = fptr->private_data;
1167 void __user *arg = NULL;
1174 /* In failed state allow only reset and get status IOCTLs */
1175 if (xsdfec->state == XSDFEC_NEEDS_RESET &&
1176 (cmd != XSDFEC_RESET_REQ && cmd != XSDFEC_GET_STATUS)) {
1177 dev_err(xsdfec->dev,
1178 "SDFEC%d in failed state. Reset Required",
1179 xsdfec->config.fec_id);
1183 if (_IOC_TYPE(cmd) != XSDFEC_MAGIC) {
1184 dev_err(xsdfec->dev, "Not a xilinx sdfec ioctl");
1188 /* check if ioctl argument is present and valid */
1189 if (_IOC_DIR(cmd) != _IOC_NONE) {
1190 arg = (void __user *)data;
1192 dev_err(xsdfec->dev, "xilinx sdfec ioctl argument is NULL Pointer");
1197 /* Access check of the argument if present */
1198 if (_IOC_DIR(cmd) & _IOC_READ)
1199 err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
1200 else if (_IOC_DIR(cmd) & _IOC_WRITE)
1201 err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
1204 dev_err(xsdfec->dev, "Invalid xilinx sdfec ioctl argument");
1209 case XSDFEC_START_DEV:
1210 rval = xsdfec_start(xsdfec);
1212 case XSDFEC_STOP_DEV:
1213 rval = xsdfec_stop(xsdfec);
1215 case XSDFEC_RESET_REQ:
1216 rval = xsdfec_reset_req(xsdfec);
1218 case XSDFEC_GET_STATUS:
1219 rval = xsdfec_get_status(xsdfec, arg);
1221 case XSDFEC_GET_CONFIG:
1222 rval = xsdfec_get_config(xsdfec, arg);
1224 case XSDFEC_SET_IRQ:
1225 rval = xsdfec_set_irq(xsdfec, arg);
1227 case XSDFEC_SET_TURBO:
1228 rval = xsdfec_set_turbo(xsdfec, arg);
1230 case XSDFEC_GET_TURBO:
1231 rval = xsdfec_get_turbo(xsdfec, arg);
1233 case XSDFEC_ADD_LDPC_CODE_PARAMS:
1234 rval = xsdfec_add_ldpc(xsdfec, arg);
1236 case XSDFEC_GET_LDPC_CODE_PARAMS:
1237 rval = xsdfec_get_ldpc_code_params(xsdfec, arg);
1239 case XSDFEC_SET_ORDER:
1240 rval = xsdfec_set_order(xsdfec, arg);
1242 case XSDFEC_SET_BYPASS:
1243 rval = xsdfec_set_bypass(xsdfec, arg);
1245 case XSDFEC_IS_ACTIVE:
1246 rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
1249 /* Should not get here */
1250 dev_err(xsdfec->dev, "Undefined SDFEC IOCTL");
1257 xsdfec_poll(struct file *file, poll_table *wait)
1260 struct xsdfec_dev *xsdfec = file->private_data;
1263 return POLLNVAL | POLLHUP;
1265 poll_wait(file, &xsdfec->waitq, wait);
1267 /* XSDFEC ISR detected an error */
1268 if (xsdfec->state == XSDFEC_NEEDS_RESET)
1269 mask = POLLIN | POLLRDNORM;
1271 mask = POLLPRI | POLLERR;
1276 static const struct file_operations xsdfec_fops = {
1277 .owner = THIS_MODULE,
1278 .open = xsdfec_dev_open,
1279 .release = xsdfec_dev_release,
1280 .unlocked_ioctl = xsdfec_dev_ioctl,
1281 .poll = xsdfec_poll,
1285 xsdfec_parse_of(struct xsdfec_dev *xsdfec)
1287 struct device *dev = xsdfec->dev;
1288 struct device_node *node = dev->of_node;
1290 const char *fec_code;
1291 const char *fec_op_mode;
1293 rval = of_property_read_string(node,
1294 "xlnx,sdfec-op-mode",
1297 dev_err(dev, "xlnx,sdfec-op-mode not in DT");
1301 if (!strcasecmp(fec_op_mode, "encode")) {
1302 xsdfec->config.mode = XSDFEC_ENCODE;
1303 } else if (!strcasecmp(fec_op_mode, "decode")) {
1304 xsdfec->config.mode = XSDFEC_DECODE;
1306 dev_err(dev, "Encode or Decode not specified in DT");
1310 rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
1312 dev_err(dev, "xlnx,sdfec-code not in DT");
1316 if (!strcasecmp(fec_code, "ldpc")) {
1317 xsdfec->config.code = XSDFEC_LDPC_CODE;
1318 } else if (!strcasecmp(fec_code, "turbo")) {
1319 xsdfec->config.code = XSDFEC_TURBO_CODE;
1321 dev_err(xsdfec->dev, "Invalid Op Mode in DT");
1325 /* Write LDPC to CODE Register */
1326 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code - 1);
1332 xsdfec_log_ecc_errors(struct xsdfec_dev *xsdfec, u32 ecc_err)
1337 cecc = ecc_err & XSDFEC_ECC_ISR_SBE;
1338 uecc = ecc_err & XSDFEC_ECC_ISR_MBE;
1340 uecc_cnt = atomic_add_return(hweight32(uecc), &xsdfec->uecc_count);
1341 atomic_add(hweight32(cecc), &xsdfec->cecc_count);
1343 if (uecc_cnt > 0 && uecc_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1344 dev_err(xsdfec->dev,
1345 "Multi-bit error on xsdfec%d. Needs reset",
1346 xsdfec->config.fec_id);
1349 /* Clear ECC errors */
1350 xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1354 xsdfec_log_isr_errors(struct xsdfec_dev *xsdfec, u32 isr_err)
1358 /* Update ISR error counts */
1359 isr_err_cnt = atomic_add_return(hweight32(isr_err),
1360 &xsdfec->isr_err_count);
1361 if (isr_err_cnt > 0 && isr_err_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1362 dev_err(xsdfec->dev,
1363 "Tlast,or DIN_WORDS or DOUT_WORDS not correct");
1366 /* Clear ISR error status */
1367 xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1371 xsdfec_reset_required(struct xsdfec_dev *xsdfec)
1373 xsdfec->state = XSDFEC_NEEDS_RESET;
1377 xsdfec_irq_thread(int irq, void *dev_id)
1379 struct xsdfec_dev *xsdfec = dev_id;
1380 irqreturn_t ret = IRQ_HANDLED;
1383 bool fatal_err = false;
1385 WARN_ON(xsdfec->irq != irq);
1387 /* Mask Interrupts */
1388 xsdfec_isr_enable(xsdfec, false);
1389 xsdfec_ecc_isr_enable(xsdfec, false);
1391 /* Read Interrupt Status Registers */
1392 ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
1393 isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
1395 if (ecc_err & XSDFEC_ECC_ISR_MBE) {
1396 /* Multi-Bit Errors need Reset */
1397 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1398 xsdfec_reset_required(xsdfec);
1400 } else if (isr_err & XSDFEC_ISR_MASK) {
1402 * Tlast, DIN_WORDS and DOUT_WORDS related
1405 xsdfec_log_isr_errors(xsdfec, isr_err);
1406 xsdfec_reset_required(xsdfec);
1408 } else if (ecc_err & XSDFEC_ECC_ISR_SBE) {
1409 /* Correctable ECC Errors */
1410 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1416 wake_up_interruptible(&xsdfec->waitq);
1418 /* Unmaks Interrupts */
1419 xsdfec_isr_enable(xsdfec, true);
1420 xsdfec_ecc_isr_enable(xsdfec, true);
1426 xsdfec_probe(struct platform_device *pdev)
1428 struct xsdfec_dev *xsdfec;
1430 struct device *dev_create;
1431 struct resource *res;
1433 bool irq_enabled = true;
1435 xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
1439 xsdfec->dev = &pdev->dev;
1440 xsdfec->config.fec_id = atomic_read(&xsdfec_ndevs);
1443 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1444 xsdfec->regs = devm_ioremap_resource(dev, res);
1445 if (IS_ERR(xsdfec->regs)) {
1446 dev_err(dev, "Unable to map resource");
1447 err = PTR_ERR(xsdfec->regs);
1448 goto err_xsdfec_dev;
1451 xsdfec->irq = platform_get_irq(pdev, 0);
1452 if (xsdfec->irq < 0) {
1453 dev_dbg(dev, "platform_get_irq failed");
1454 irq_enabled = false;
1457 err = xsdfec_parse_of(xsdfec);
1459 goto err_xsdfec_dev;
1461 /* Save driver private data */
1462 platform_set_drvdata(pdev, xsdfec);
1465 init_waitqueue_head(&xsdfec->waitq);
1466 /* Register IRQ thread */
1467 err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
1473 dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
1474 goto err_xsdfec_dev;
1478 cdev_init(&xsdfec->xsdfec_cdev, &xsdfec_fops);
1479 xsdfec->xsdfec_cdev.owner = THIS_MODULE;
1480 err = cdev_add(&xsdfec->xsdfec_cdev,
1481 MKDEV(MAJOR(xsdfec_devt), xsdfec->config.fec_id), 1);
1483 dev_err(dev, "cdev_add failed");
1485 goto err_xsdfec_dev;
1488 if (!xsdfec_class) {
1490 dev_err(dev, "xsdfec class not created correctly");
1491 goto err_xsdfec_cdev;
1494 dev_create = device_create(xsdfec_class, dev,
1495 MKDEV(MAJOR(xsdfec_devt),
1496 xsdfec->config.fec_id),
1497 xsdfec, "xsdfec%d", xsdfec->config.fec_id);
1498 if (IS_ERR(dev_create)) {
1499 dev_err(dev, "unable to create device");
1500 err = PTR_ERR(dev_create);
1501 goto err_xsdfec_cdev;
1504 atomic_set(&xsdfec->open_count, 1);
1505 dev_info(dev, "XSDFEC%d Probe Successful", xsdfec->config.fec_id);
1506 atomic_inc(&xsdfec_ndevs);
1509 /* Failure cleanup */
1511 cdev_del(&xsdfec->xsdfec_cdev);
1517 xsdfec_remove(struct platform_device *pdev)
1519 struct xsdfec_dev *xsdfec;
1520 struct device *dev = &pdev->dev;
1522 xsdfec = platform_get_drvdata(pdev);
1526 if (!xsdfec_class) {
1527 dev_err(dev, "xsdfec_class is NULL");
1531 device_destroy(xsdfec_class,
1532 MKDEV(MAJOR(xsdfec_devt), xsdfec->config.fec_id));
1533 cdev_del(&xsdfec->xsdfec_cdev);
1534 atomic_dec(&xsdfec_ndevs);
1538 static const struct of_device_id xsdfec_of_match[] = {
1539 { .compatible = "xlnx,fec-engine", },
1540 { /* end of table */ }
1542 MODULE_DEVICE_TABLE(of, xsdfec_of_match);
1544 static struct platform_driver xsdfec_driver = {
1546 .name = "xilinx-sdfec",
1547 .of_match_table = xsdfec_of_match,
1549 .probe = xsdfec_probe,
1550 .remove = xsdfec_remove,
1553 static int __init xsdfec_init_mod(void)
1557 xsdfec_class = class_create(THIS_MODULE, DRIVER_NAME);
1558 if (IS_ERR(xsdfec_class)) {
1559 err = PTR_ERR(xsdfec_class);
1560 pr_err("%s : Unable to register xsdfec class", __func__);
1564 err = alloc_chrdev_region(&xsdfec_devt,
1565 0, DRIVER_MAX_DEV, DRIVER_NAME);
1567 pr_err("%s : Unable to get major number", __func__);
1568 goto err_xsdfec_class;
1571 err = platform_driver_register(&xsdfec_driver);
1573 pr_err("%s Unabled to register %s driver",
1574 __func__, DRIVER_NAME);
1575 goto err_xsdfec_drv;
1581 unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1583 class_destroy(xsdfec_class);
1587 static void __exit xsdfec_cleanup_mod(void)
1589 platform_driver_unregister(&xsdfec_driver);
1590 unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1591 class_destroy(xsdfec_class);
1592 xsdfec_class = NULL;
1595 module_init(xsdfec_init_mod);
1596 module_exit(xsdfec_cleanup_mod);
1598 MODULE_AUTHOR("Xilinx, Inc");
1599 MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
1600 MODULE_LICENSE("GPL");
1601 MODULE_VERSION(DRIVER_VERSION);