4 * Copyright (C) 2016 - 2017 Xilinx, Inc.
7 * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
8 * IP. It exposes a char device interface in sysfs and supports file
9 * operations like open(), close() and ioctl().
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
25 #include <linux/cdev.h>
26 #include <linux/device.h>
29 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
33 #include <linux/of_platform.h>
34 #include <linux/platform_device.h>
35 #include <linux/poll.h>
36 #include <linux/slab.h>
37 #include <linux/uaccess.h>
39 #include <uapi/misc/xilinx_sdfec.h>
41 #define DRIVER_NAME "xilinx_sdfec"
42 #define DRIVER_VERSION "0.3"
43 #define DRIVER_MAX_DEV (6)
45 static struct class *xsdfec_class;
46 static atomic_t xsdfec_ndevs = ATOMIC_INIT(0);
47 static dev_t xsdfec_devt;
49 /* Xilinx SDFEC Register Map */
50 #define XSDFEC_AXI_WR_PROTECT_ADDR (0x00000)
51 #define XSDFEC_CODE_WR_PROTECT_ADDR (0x00004)
52 #define XSDFEC_ACTIVE_ADDR (0x00008)
53 #define XSDFEC_AXIS_WIDTH_ADDR (0x0000c)
54 #define XSDFEC_AXIS_ENABLE_ADDR (0x00010)
55 #define XSDFEC_AXIS_ENABLE_MASK (0x0001F)
56 #define XSDFEC_FEC_CODE_ADDR (0x00014)
57 #define XSDFEC_ORDER_ADDR (0x00018)
59 /* Interrupt Status Register Bit Mask*/
60 #define XSDFEC_ISR_MASK (0x0003F)
61 /* Interrupt Status Register */
62 #define XSDFEC_ISR_ADDR (0x0001c)
63 /* Write Only - Interrupt Enable Register */
64 #define XSDFEC_IER_ADDR (0x00020)
65 /* Write Only - Interrupt Disable Register */
66 #define XSDFEC_IDR_ADDR (0x00024)
67 /* Read Only - Interrupt Mask Register */
68 #define XSDFEC_IMR_ADDR (0x00028)
70 /* Single Bit Errors */
71 #define XSDFEC_ECC_ISR_SBE (0x7FF)
72 /* Multi Bit Errors */
73 #define XSDFEC_ECC_ISR_MBE (0x3FF800)
74 /* ECC Interrupt Status Bit Mask */
75 #define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE | XSDFEC_ECC_ISR_MBE)
77 /* Multi Bit Error Postion */
78 #define XSDFEC_ECC_MULTI_BIT_POS (11)
79 #define XSDFEC_ERROR_MAX_THRESHOLD (100)
81 /* ECC Interrupt Status Register */
82 #define XSDFEC_ECC_ISR_ADDR (0x0002c)
83 /* Write Only - ECC Interrupt Enable Register */
84 #define XSDFEC_ECC_IER_ADDR (0x00030)
85 /* Write Only - ECC Interrupt Disable Register */
86 #define XSDFEC_ECC_IDR_ADDR (0x00034)
87 /* Read Only - ECC Interrupt Mask Register */
88 #define XSDFEC_ECC_IMR_ADDR (0x00038)
90 #define XSDFEC_BYPASS_ADDR (0x0003c)
91 #define XSDFEC_TEST_EMA_ADDR_BASE (0x00080)
92 #define XSDFEC_TEST_EMA_ADDR_HIGH (0x00089)
93 #define XSDFEC_TURBO_ADDR (0x00100)
94 #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x02000)
95 #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x021fc)
96 #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x02004)
97 #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x02200)
98 #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x02008)
99 #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x02204)
100 #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x0200c)
101 #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x02208)
104 * struct xsdfec_dev - Driver data for SDFEC
105 * @regs: device physical base address
106 * @dev: pointer to device struct
107 * @fec_id: Instance number
108 * @intr_enabled: indicates IRQ enabled
109 * @wr_protect: indicates Write Protect enabled
110 * @code: LDPC or Turbo Codes being used
111 * @order: In-Order or Out-of-Order
112 * @state: State of the SDFEC device
113 * @op_mode: Operating in Encode or Decode
114 * @isr_err_count: Count of ISR errors
115 * @cecc_count: Count of Correctable ECC errors (SBE)
116 * @uecc_count: Count of Uncorrectable ECC errors (MBE)
117 * @reset_count: Count of Resets requested
118 * @open_count: Count of char device being opened
120 * @xsdfec_cdev: Character device handle
121 * @sc_off: Shared Scale Table Offset
122 * @qc_off: Shared Circulant Table Offset
123 * @la_off: Shared Layer Table Offset
124 * @waitq: Driver wait queue
126 * This structure contains necessary state for SDFEC driver to operate
134 enum xsdfec_code code;
135 enum xsdfec_order order;
136 enum xsdfec_state state;
137 enum xsdfec_op_mode op_mode;
138 atomic_t isr_err_count;
141 atomic_t reset_count;
144 struct cdev xsdfec_cdev;
148 wait_queue_head_t waitq;
152 xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, u32 value)
154 if (xsdfec->wr_protect) {
155 dev_err(xsdfec->dev, "SDFEC in write protect");
160 "Writing 0x%x to offset 0x%x", value, addr);
161 iowrite32(value, xsdfec->regs + addr);
165 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
169 rval = ioread32(xsdfec->regs + addr);
170 dev_info(xsdfec->dev,
171 "Read value = 0x%x from offset 0x%x",
176 #define XSDFEC_WRITE_PROTECT_ENABLE (1)
177 #define XSDFEC_WRITE_PROTECT_DISABLE (0)
179 xsdfec_wr_protect(struct xsdfec_dev *xsdfec, bool wr_pr)
182 xsdfec_regwrite(xsdfec,
183 XSDFEC_CODE_WR_PROTECT_ADDR,
184 XSDFEC_WRITE_PROTECT_ENABLE);
185 xsdfec_regwrite(xsdfec,
186 XSDFEC_AXI_WR_PROTECT_ADDR,
187 XSDFEC_WRITE_PROTECT_ENABLE);
189 xsdfec_regwrite(xsdfec,
190 XSDFEC_AXI_WR_PROTECT_ADDR,
191 XSDFEC_WRITE_PROTECT_DISABLE);
192 xsdfec_regwrite(xsdfec,
193 XSDFEC_CODE_WR_PROTECT_ADDR,
194 XSDFEC_WRITE_PROTECT_DISABLE);
196 xsdfec->wr_protect = wr_pr;
200 xsdfec_dev_open(struct inode *iptr, struct file *fptr)
202 struct xsdfec_dev *xsdfec;
204 xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
208 /* Only one open per device at a time */
209 if (!atomic_dec_and_test(&xsdfec->open_count))
212 fptr->private_data = xsdfec;
217 xsdfec_dev_release(struct inode *iptr, struct file *fptr)
219 struct xsdfec_dev *xsdfec;
221 xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
225 atomic_inc(&xsdfec->open_count);
229 #define XSDFEC_IS_ACTIVITY_SET (0x1)
231 xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
233 struct xsdfec_status status;
236 status.fec_id = xsdfec->fec_id;
237 status.state = xsdfec->state;
238 status.code = xsdfec->code;
239 status.order = xsdfec->order;
240 status.mode = xsdfec->op_mode;
242 (xsdfec_regread(xsdfec,
243 XSDFEC_ACTIVE_ADDR) &
244 XSDFEC_IS_ACTIVITY_SET);
245 status.cecc_count = atomic_read(&xsdfec->cecc_count);
247 err = copy_to_user(arg, &status, sizeof(status));
249 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
250 __func__, xsdfec->fec_id);
257 xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
259 struct xsdfec_config config;
262 config.fec_id = xsdfec->fec_id;
263 config.state = xsdfec->state;
264 config.code = xsdfec->code;
265 config.mode = xsdfec->op_mode;
266 config.order = xsdfec->order;
268 err = copy_to_user(arg, &config, sizeof(config));
270 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
271 __func__, xsdfec->fec_id);
278 xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
284 xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR,
286 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
287 if (mask_read & XSDFEC_ISR_MASK) {
289 "SDFEC enabling irq with IER failed");
294 xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR,
296 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
297 if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
299 "SDFEC disabling irq with IDR failed");
307 xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
313 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
314 XSDFEC_ECC_ISR_MASK);
315 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
316 if (mask_read & XSDFEC_ECC_ISR_MASK) {
318 "SDFEC enabling ECC irq with ECC IER failed");
323 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
324 XSDFEC_ECC_ISR_MASK);
325 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
326 if ((mask_read & XSDFEC_ECC_ISR_MASK) != XSDFEC_ECC_ISR_MASK) {
328 "SDFEC disable ECC irq with ECC IDR failed");
336 xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
338 struct xsdfec_irq irq;
341 err = copy_from_user(&irq, arg, sizeof(irq));
343 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
344 __func__, xsdfec->fec_id);
348 /* Setup tlast related IRQ */
349 if (irq.enable_isr) {
350 err = xsdfec_isr_enable(xsdfec, true);
355 /* Setup ECC related IRQ */
356 if (irq.enable_ecc_isr) {
357 err = xsdfec_ecc_isr_enable(xsdfec, true);
365 #define XSDFEC_TURBO_SCALE_MASK (0xF)
366 #define XSDFEC_TURBO_SCALE_BIT_POS (8)
368 xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
370 struct xsdfec_turbo turbo;
374 err = copy_from_user(&turbo, arg, sizeof(turbo));
376 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
377 __func__, xsdfec->fec_id);
381 /* Check to see what device tree says about the FEC codes */
382 if (xsdfec->code == XSDFEC_LDPC_CODE) {
384 "%s: Unable to write Turbo to SDFEC%d check DT",
385 __func__, xsdfec->fec_id);
387 } else if (xsdfec->code == XSDFEC_CODE_INVALID) {
388 xsdfec->code = XSDFEC_TURBO_CODE;
391 if (xsdfec->wr_protect)
392 xsdfec_wr_protect(xsdfec, false);
394 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, (xsdfec->code - 1));
395 turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK) <<
396 XSDFEC_TURBO_SCALE_BIT_POS) | turbo.alg;
397 xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
402 xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
405 struct xsdfec_turbo turbo_params;
408 if (xsdfec->code == XSDFEC_LDPC_CODE) {
410 "%s: SDFEC%d is configured for LDPC, check DT",
411 __func__, xsdfec->fec_id);
415 reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
417 turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
418 XSDFEC_TURBO_SCALE_BIT_POS;
419 turbo_params.alg = reg_value & 0x1;
421 err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
423 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
424 __func__, xsdfec->fec_id);
431 #define XSDFEC_LDPC_REG_JUMP (0x10)
432 #define XSDFEC_REG0_N_MASK (0x0000FFFF)
433 #define XSDFEC_REG0_N_LSB (0)
434 #define XSDFEC_REG0_K_MASK (0x7fff0000)
435 #define XSDFEC_REG0_K_LSB (16)
437 xsdfec_reg0_write(struct xsdfec_dev *xsdfec,
438 u32 n, u32 k, u32 offset)
442 /* Use only lower 16 bits */
443 if (n & ~XSDFEC_REG0_N_MASK)
444 dev_err(xsdfec->dev, "N value is beyond 16 bits");
445 n &= XSDFEC_REG0_N_MASK;
446 n <<= XSDFEC_REG0_N_LSB;
448 if (k & XSDFEC_REG0_K_MASK)
449 dev_err(xsdfec->dev, "K value is beyond 16 bits");
451 k = ((k << XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK);
454 if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
455 > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
457 "Writing outside of LDPC reg0 space 0x%x",
458 XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
459 (offset * XSDFEC_LDPC_REG_JUMP));
462 xsdfec_regwrite(xsdfec,
463 XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
464 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
469 xsdfec_collect_ldpc_reg0(struct xsdfec_dev *xsdfec,
471 struct xsdfec_ldpc_params *ldpc_params)
474 u32 reg_addr = XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
475 (code_id * XSDFEC_LDPC_REG_JUMP);
477 if (reg_addr > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
479 "Accessing outside of LDPC reg0 space 0x%x",
484 reg_value = xsdfec_regread(xsdfec, reg_addr);
486 ldpc_params->n = (reg_value >> XSDFEC_REG0_N_LSB) & XSDFEC_REG0_N_MASK;
488 ldpc_params->k = (reg_value >> XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK;
493 #define XSDFEC_REG1_PSIZE_MASK (0x000001ff)
494 #define XSDFEC_REG1_NO_PACKING_MASK (0x00000400)
495 #define XSDFEC_REG1_NO_PACKING_LSB (10)
496 #define XSDFEC_REG1_NM_MASK (0x000ff800)
497 #define XSDFEC_REG1_NM_LSB (11)
498 #define XSDFEC_REG1_BYPASS_MASK (0x00100000)
500 xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
501 u32 no_packing, u32 nm, u32 offset)
505 if (psize & ~XSDFEC_REG1_PSIZE_MASK)
506 dev_err(xsdfec->dev, "Psize is beyond 10 bits");
507 psize &= XSDFEC_REG1_PSIZE_MASK;
509 if (no_packing != 0 && no_packing != 1)
510 dev_err(xsdfec->dev, "No-packing bit register invalid");
511 no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
512 XSDFEC_REG1_NO_PACKING_MASK);
514 if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
515 dev_err(xsdfec->dev, "NM is beyond 10 bits");
516 nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
518 wdata = nm | no_packing | psize;
519 if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
520 > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
522 "Writing outside of LDPC reg1 space 0x%x",
523 XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
524 (offset * XSDFEC_LDPC_REG_JUMP));
527 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
528 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
533 xsdfec_collect_ldpc_reg1(struct xsdfec_dev *xsdfec,
535 struct xsdfec_ldpc_params *ldpc_params)
538 u32 reg_addr = XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
539 (code_id * XSDFEC_LDPC_REG_JUMP);
541 if (reg_addr > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
543 "Accessing outside of LDPC reg1 space 0x%x",
548 reg_value = xsdfec_regread(xsdfec, reg_addr);
550 ldpc_params->psize = reg_value & XSDFEC_REG1_PSIZE_MASK;
552 ldpc_params->no_packing = ((reg_value >> XSDFEC_REG1_NO_PACKING_LSB) &
553 XSDFEC_REG1_NO_PACKING_MASK);
555 ldpc_params->nm = (reg_value >> XSDFEC_REG1_NM_LSB) &
560 #define XSDFEC_REG2_NLAYERS_MASK (0x000001FF)
561 #define XSDFEC_REG2_NLAYERS_LSB (0)
562 #define XSDFEC_REG2_NNMQC_MASK (0x000FFE00)
563 #define XSDFEC_REG2_NMQC_LSB (9)
564 #define XSDFEC_REG2_NORM_TYPE_MASK (0x00100000)
565 #define XSDFEC_REG2_NORM_TYPE_LSB (20)
566 #define XSDFEC_REG2_SPECIAL_QC_MASK (0x00200000)
567 #define XSDFEC_REG2_SPEICAL_QC_LSB (21)
568 #define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x00400000)
569 #define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
570 #define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x01800000)
571 #define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
574 xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
575 u32 norm_type, u32 special_qc, u32 no_final_parity,
576 u32 max_schedule, u32 offset)
580 if (nlayers & ~(XSDFEC_REG2_NLAYERS_MASK >>
581 XSDFEC_REG2_NLAYERS_LSB))
582 dev_err(xsdfec->dev, "Nlayers exceeds 9 bits");
583 nlayers &= XSDFEC_REG2_NLAYERS_MASK;
585 if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
586 dev_err(xsdfec->dev, "NMQC exceeds 11 bits");
587 nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
590 dev_err(xsdfec->dev, "Norm type is invalid");
591 norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
592 XSDFEC_REG2_NORM_TYPE_MASK);
594 dev_err(xsdfec->dev, "Special QC in invalid");
595 special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
596 XSDFEC_REG2_SPECIAL_QC_MASK);
598 if (no_final_parity > 1)
599 dev_err(xsdfec->dev, "No final parity check invalid");
601 ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
602 XSDFEC_REG2_NO_FINAL_PARITY_MASK);
603 if (max_schedule & ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >>
604 XSDFEC_REG2_MAX_SCHEDULE_LSB))
605 dev_err(xsdfec->dev, "Max Schdule exceeds 2 bits");
606 max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
607 XSDFEC_REG2_MAX_SCHEDULE_MASK);
609 wdata = (max_schedule | no_final_parity | special_qc | norm_type |
612 if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
613 > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
615 "Writing outside of LDPC reg2 space 0x%x",
616 XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
617 (offset * XSDFEC_LDPC_REG_JUMP));
620 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
621 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
626 xsdfec_collect_ldpc_reg2(struct xsdfec_dev *xsdfec,
628 struct xsdfec_ldpc_params *ldpc_params)
631 u32 reg_addr = XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
632 (code_id * XSDFEC_LDPC_REG_JUMP);
634 if (reg_addr > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
636 "Accessing outside of LDPC reg1 space 0x%x",
641 reg_value = xsdfec_regread(xsdfec, reg_addr);
643 ldpc_params->nlayers = ((reg_value >> XSDFEC_REG2_NLAYERS_LSB) &
644 XSDFEC_REG2_NLAYERS_MASK);
646 ldpc_params->nmqc = (reg_value >> XSDFEC_REG2_NMQC_LSB) &
647 XSDFEC_REG2_NNMQC_MASK;
649 ldpc_params->norm_type = ((reg_value >> XSDFEC_REG2_NORM_TYPE_LSB) &
650 XSDFEC_REG2_NORM_TYPE_MASK);
652 ldpc_params->special_qc = ((reg_value >> XSDFEC_REG2_SPEICAL_QC_LSB) &
653 XSDFEC_REG2_SPECIAL_QC_MASK);
655 ldpc_params->no_final_parity =
656 ((reg_value >> XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
657 XSDFEC_REG2_NO_FINAL_PARITY_MASK);
659 ldpc_params->max_schedule =
660 ((reg_value >> XSDFEC_REG2_MAX_SCHEDULE_LSB) &
661 XSDFEC_REG2_MAX_SCHEDULE_MASK);
666 #define XSDFEC_REG3_LA_OFF_LSB (8)
667 #define XSDFEC_REG3_QC_OFF_LSB (16)
669 xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off,
670 u8 la_off, u16 qc_off, u32 offset)
674 wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
675 (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
676 if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
677 (offset * XSDFEC_LDPC_REG_JUMP) >
678 XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
680 "Writing outside of LDPC reg3 space 0x%x",
681 XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
682 (offset * XSDFEC_LDPC_REG_JUMP));
685 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
686 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
691 xsdfec_collect_ldpc_reg3(struct xsdfec_dev *xsdfec,
693 struct xsdfec_ldpc_params *ldpc_params)
696 u32 reg_addr = XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
697 (code_id * XSDFEC_LDPC_REG_JUMP);
699 if (reg_addr > XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
701 "Accessing outside of LDPC reg3 space 0x%x",
706 reg_value = xsdfec_regread(xsdfec, reg_addr);
708 ldpc_params->qc_off = (reg_addr >> XSDFEC_REG3_QC_OFF_LSB) & 0xFF;
709 ldpc_params->la_off = (reg_addr >> XSDFEC_REG3_LA_OFF_LSB) & 0xFF;
710 ldpc_params->sc_off = (reg_addr & 0xFF);
715 #define XSDFEC_SC_TABLE_DEPTH (0x3fc)
716 #define XSDFEC_REG_WIDTH_JUMP (4)
718 xsdfec_sc_table_write(struct xsdfec_dev *xsdfec, u32 offset,
719 u32 *sc_ptr, u32 len)
724 * Writes that go beyond the length of
725 * Shared Scale(SC) table should fail
727 if ((XSDFEC_REG_WIDTH_JUMP * (offset + len)) > XSDFEC_SC_TABLE_DEPTH) {
728 dev_err(xsdfec->dev, "Write exceeds SC table length");
733 * sc_off tracks the points to the last written location
734 * in the Shared Scale(SC) table. Those shared codes might
735 * be in use. Updating them without quiescing the device
736 * can put the SDFEC device in an indeterminate state
738 if ((XSDFEC_REG_WIDTH_JUMP * offset) < xsdfec->sc_off) {
739 dev_err(xsdfec->dev, "Might write to in use shared SC code");
743 for (reg = 0; reg < len; reg++) {
744 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
745 (offset + reg) * XSDFEC_REG_WIDTH_JUMP, sc_ptr[reg]);
747 xsdfec->sc_off = reg + (XSDFEC_REG_WIDTH_JUMP * offset);
752 xsdfec_collect_sc_table(struct xsdfec_dev *xsdfec, u32 offset,
753 u32 *sc_ptr, u32 len)
757 u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
759 if (deepest_reach > XSDFEC_SC_TABLE_DEPTH) {
760 dev_err(xsdfec->dev, "Access will exceed SC table length");
764 for (reg = 0; reg < len; reg++) {
765 reg_addr = XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
766 ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
768 sc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
774 #define XSDFEC_LA_TABLE_DEPTH (0xFFC)
776 xsdfec_la_table_write(struct xsdfec_dev *xsdfec, u32 offset,
777 u32 *la_ptr, u32 len)
781 if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_LA_TABLE_DEPTH) {
782 dev_err(xsdfec->dev, "Write exceeds LA table length");
786 if (XSDFEC_REG_WIDTH_JUMP * offset < xsdfec->la_off) {
787 dev_err(xsdfec->dev, "Might write to in use shared LA code");
791 for (reg = 0; reg < len; reg++) {
792 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
793 (offset + reg) * XSDFEC_REG_WIDTH_JUMP,
796 xsdfec->la_off = reg + (offset * XSDFEC_REG_WIDTH_JUMP);
801 xsdfec_collect_la_table(struct xsdfec_dev *xsdfec, u32 offset,
802 u32 *la_ptr, u32 len)
806 u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
808 if (deepest_reach > XSDFEC_LA_TABLE_DEPTH) {
809 dev_err(xsdfec->dev, "Access will exceed LA table length");
813 for (reg = 0; reg < len; reg++) {
814 reg_addr = XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
815 ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
817 la_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
823 #define XSDFEC_QC_TABLE_DEPTH (0x7FFC)
825 xsdfec_qc_table_write(struct xsdfec_dev *xsdfec,
826 u32 offset, u32 *qc_ptr, u32 len)
830 if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_QC_TABLE_DEPTH) {
831 dev_err(xsdfec->dev, "Write exceeds QC table length");
835 if (XSDFEC_REG_WIDTH_JUMP * offset < xsdfec->qc_off) {
836 dev_err(xsdfec->dev, "Might write to in use shared LA code");
840 for (reg = 0; reg < len; reg++) {
841 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
842 (offset + reg) * XSDFEC_REG_WIDTH_JUMP, qc_ptr[reg]);
845 xsdfec->qc_off = reg + (offset * XSDFEC_REG_WIDTH_JUMP);
850 xsdfec_collect_qc_table(struct xsdfec_dev *xsdfec,
851 u32 offset, u32 *qc_ptr, u32 len)
855 u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
857 if (deepest_reach > XSDFEC_QC_TABLE_DEPTH) {
858 dev_err(xsdfec->dev, "Access will exceed QC table length");
862 for (reg = 0; reg < len; reg++) {
863 reg_addr = XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
864 (offset + reg) * XSDFEC_REG_WIDTH_JUMP;
866 qc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
873 xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
875 struct xsdfec_ldpc_params *ldpc;
878 ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
882 err = copy_from_user(ldpc, arg, sizeof(*ldpc));
885 "%s failed to copy from user for SDFEC%d",
886 __func__, xsdfec->fec_id);
889 if (xsdfec->code == XSDFEC_TURBO_CODE) {
891 "%s: Unable to write LDPC to SDFEC%d check DT",
892 __func__, xsdfec->fec_id);
895 xsdfec->code = XSDFEC_LDPC_CODE;
896 /* Disable Write Protection before proceeding */
897 if (xsdfec->wr_protect)
898 xsdfec_wr_protect(xsdfec, false);
900 /* Write LDPC to CODE Register */
901 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, (xsdfec->code - 1));
903 err = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->code_id);
908 err = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing,
909 ldpc->nm, ldpc->code_id);
914 err = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
915 ldpc->norm_type, ldpc->special_qc,
916 ldpc->no_final_parity, ldpc->max_schedule,
922 err = xsdfec_reg3_write(xsdfec, ldpc->sc_off,
923 ldpc->la_off, ldpc->qc_off, ldpc->code_id);
927 /* Write Shared Codes */
928 err = xsdfec_sc_table_write(xsdfec, ldpc->sc_off,
929 ldpc->sc_table, ldpc->nlayers);
933 err = xsdfec_la_table_write(xsdfec, 4 * ldpc->la_off,
934 ldpc->la_table, ldpc->nlayers);
938 err = xsdfec_qc_table_write(xsdfec, 4 * ldpc->qc_off,
939 ldpc->qc_table, ldpc->nqc);
952 xsdfec_get_ldpc_code_params(struct xsdfec_dev *xsdfec, void __user *arg)
954 struct xsdfec_ldpc_params *ldpc_params;
957 if (xsdfec->code == XSDFEC_TURBO_CODE) {
959 "%s: SDFEC%d is configured for TURBO, check DT",
960 __func__, xsdfec->fec_id);
964 ldpc_params = kzalloc(sizeof(*ldpc_params), GFP_KERNEL);
968 err = copy_from_user(ldpc_params, arg, sizeof(*ldpc_params));
971 "%s failed to copy from user for SDFEC%d",
972 __func__, xsdfec->fec_id);
976 err = xsdfec_collect_ldpc_reg0(xsdfec, ldpc_params->code_id,
981 err = xsdfec_collect_ldpc_reg1(xsdfec, ldpc_params->code_id,
986 err = xsdfec_collect_ldpc_reg2(xsdfec, ldpc_params->code_id,
991 err = xsdfec_collect_ldpc_reg3(xsdfec, ldpc_params->code_id,
997 * Collect the shared table values, needs to happen after reading
1000 err = xsdfec_collect_sc_table(xsdfec, ldpc_params->sc_off,
1001 ldpc_params->sc_table,
1002 ldpc_params->nlayers);
1006 err = xsdfec_collect_la_table(xsdfec, 4 * ldpc_params->la_off,
1007 ldpc_params->la_table,
1008 ldpc_params->nlayers);
1012 err = xsdfec_collect_qc_table(xsdfec, 4 * ldpc_params->qc_off,
1013 ldpc_params->qc_table,
1018 err = copy_to_user(arg, ldpc_params, sizeof(*ldpc_params));
1020 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
1021 __func__, xsdfec->fec_id);
1033 static int xsdfec_start(struct xsdfec_dev *xsdfec)
1037 /* Verify Code is loaded */
1038 if (xsdfec->code == XSDFEC_CODE_INVALID) {
1039 dev_err(xsdfec->dev,
1040 "%s : set code before start for SDFEC%d",
1041 __func__, xsdfec->fec_id);
1044 regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
1046 if (regread + 1 != xsdfec->code) {
1047 dev_err(xsdfec->dev,
1048 "%s SDFEC HW code does not match driver code",
1052 /* Set Order to maintain order */
1053 xsdfec->order = XSDFEC_MAINTAIN_ORDER;
1054 xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, (xsdfec->order - 1));
1055 /* Set AXIS width */
1056 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, 0);
1057 /* Set AXIS enable */
1058 xsdfec_regwrite(xsdfec,
1059 XSDFEC_AXIS_ENABLE_ADDR,
1060 XSDFEC_AXIS_ENABLE_MASK);
1061 /* Write Protect Code and Registers */
1062 xsdfec_wr_protect(xsdfec, true);
1064 xsdfec->state = XSDFEC_STARTED;
1069 xsdfec_stop(struct xsdfec_dev *xsdfec)
1073 if (xsdfec->state != XSDFEC_STARTED)
1074 dev_err(xsdfec->dev, "Device not started correctly");
1075 /* Disable Write Protect */
1076 xsdfec_wr_protect(xsdfec, false);
1077 /* Disable AXIS_ENABLE register */
1078 regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
1079 regread &= (~XSDFEC_AXIS_ENABLE_MASK);
1080 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
1082 xsdfec->state = XSDFEC_STOPPED;
1087 * Reset will happen asynchronously
1088 * since there is no in-band reset register
1089 * Prepare driver for reset
1093 xsdfec_reset_req(struct xsdfec_dev *xsdfec)
1095 xsdfec->state = XSDFEC_INIT;
1096 xsdfec->order = XSDFEC_INVALID_ORDER;
1100 xsdfec->wr_protect = false;
1101 atomic_set(&xsdfec->isr_err_count, 0);
1102 atomic_set(&xsdfec->uecc_count, 0);
1103 atomic_set(&xsdfec->cecc_count, 0);
1104 atomic_inc(&xsdfec->reset_count);
1109 xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, unsigned long data)
1111 struct xsdfec_dev *xsdfec = fptr->private_data;
1112 void __user *arg = (void __user *)data;
1120 /* In failed state allow only reset and get status IOCTLs */
1121 if (xsdfec->state == XSDFEC_NEEDS_RESET &&
1122 (cmd != XSDFEC_RESET_REQ && cmd != XSDFEC_GET_STATUS)) {
1123 dev_err(xsdfec->dev,
1124 "SDFEC%d in failed state. Reset Required",
1130 case XSDFEC_START_DEV:
1131 rval = xsdfec_start(xsdfec);
1133 case XSDFEC_STOP_DEV:
1134 rval = xsdfec_stop(xsdfec);
1136 case XSDFEC_RESET_REQ:
1137 rval = xsdfec_reset_req(xsdfec);
1139 case XSDFEC_GET_STATUS:
1140 rval = xsdfec_get_status(xsdfec, arg);
1142 case XSDFEC_GET_CONFIG:
1143 rval = xsdfec_get_config(xsdfec, arg);
1145 case XSDFEC_SET_IRQ:
1146 rval = xsdfec_set_irq(xsdfec, arg);
1148 case XSDFEC_SET_TURBO:
1149 rval = xsdfec_set_turbo(xsdfec, arg);
1151 case XSDFEC_GET_TURBO:
1152 rval = xsdfec_get_turbo(xsdfec, arg);
1154 case XSDFEC_ADD_LDPC_CODE_PARAMS:
1155 rval = xsdfec_add_ldpc(xsdfec, arg);
1157 case XSDFEC_GET_LDPC_CODE_PARAMS:
1158 rval = xsdfec_get_ldpc_code_params(xsdfec, arg);
1161 /* Should not get here */
1162 dev_err(xsdfec->dev, "Undefined SDFEC IOCTL");
1169 xsdfec_poll(struct file *file, poll_table *wait)
1172 struct xsdfec_dev *xsdfec = file->private_data;
1175 return POLLNVAL | POLLHUP;
1177 poll_wait(file, &xsdfec->waitq, wait);
1179 /* XSDFEC ISR detected an error */
1180 if (xsdfec->state == XSDFEC_NEEDS_RESET)
1181 mask = POLLIN | POLLRDNORM;
1183 mask = POLLPRI | POLLERR;
1188 static const struct file_operations xsdfec_fops = {
1189 .owner = THIS_MODULE,
1190 .open = xsdfec_dev_open,
1191 .release = xsdfec_dev_release,
1192 .unlocked_ioctl = xsdfec_dev_ioctl,
1193 .poll = xsdfec_poll,
1197 xsdfec_parse_of(struct xsdfec_dev *xsdfec)
1199 struct device *dev = xsdfec->dev;
1200 struct device_node *node = dev->of_node;
1202 const char *fec_code;
1203 const char *fec_op_mode;
1205 rval = of_property_read_string(node,
1206 "xlnx,sdfec-op-mode",
1209 dev_err(dev, "xlnx,sdfec-op-mode not in DT");
1213 if (!strcasecmp(fec_op_mode, "encode")) {
1214 xsdfec->op_mode = XSDFEC_ENCODE;
1215 } else if (!strcasecmp(fec_op_mode, "decode")) {
1216 xsdfec->op_mode = XSDFEC_DECODE;
1218 dev_err(dev, "Encode or Decode not specified in DT");
1222 rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
1224 dev_err(dev, "xlnx,sdfec-code not in DT");
1228 if (!strcasecmp(fec_code, "ldpc")) {
1229 xsdfec->code = XSDFEC_LDPC_CODE;
1230 } else if (!strcasecmp(fec_code, "turbo")) {
1231 xsdfec->code = XSDFEC_TURBO_CODE;
1233 dev_err(xsdfec->dev, "Invalid Op Mode in DT");
1241 xsdfec_log_ecc_errors(struct xsdfec_dev *xsdfec, u32 ecc_err)
1246 cecc = ecc_err & XSDFEC_ECC_ISR_SBE;
1247 uecc = ecc_err & XSDFEC_ECC_ISR_MBE;
1249 uecc_cnt = atomic_add_return(hweight32(uecc), &xsdfec->uecc_count);
1250 atomic_add(hweight32(cecc), &xsdfec->cecc_count);
1252 if (uecc_cnt > 0 && uecc_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1253 dev_err(xsdfec->dev,
1254 "Multi-bit error on xsdfec%d. Needs reset",
1258 /* Clear ECC errors */
1259 xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1263 xsdfec_log_isr_errors(struct xsdfec_dev *xsdfec, u32 isr_err)
1267 /* Update ISR error counts */
1268 isr_err_cnt = atomic_add_return(hweight32(isr_err),
1269 &xsdfec->isr_err_count);
1270 if (isr_err_cnt > 0 && isr_err_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1271 dev_err(xsdfec->dev,
1272 "Tlast,or DIN_WORDS or DOUT_WORDS not correct");
1275 /* Clear ISR error status */
1276 xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1280 xsdfec_reset_required(struct xsdfec_dev *xsdfec)
1282 xsdfec->state = XSDFEC_NEEDS_RESET;
1286 xsdfec_irq_thread(int irq, void *dev_id)
1288 struct xsdfec_dev *xsdfec = dev_id;
1289 irqreturn_t ret = IRQ_HANDLED;
1292 bool fatal_err = false;
1294 WARN_ON(xsdfec->irq != irq);
1296 /* Mask Interrupts */
1297 xsdfec_isr_enable(xsdfec, false);
1298 xsdfec_ecc_isr_enable(xsdfec, false);
1300 /* Read Interrupt Status Registers */
1301 ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
1302 isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
1304 if (ecc_err & XSDFEC_ECC_ISR_MBE) {
1305 /* Multi-Bit Errors need Reset */
1306 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1307 xsdfec_reset_required(xsdfec);
1309 } else if (isr_err & XSDFEC_ISR_MASK) {
1311 * Tlast, DIN_WORDS and DOUT_WORDS related
1314 xsdfec_log_isr_errors(xsdfec, isr_err);
1315 xsdfec_reset_required(xsdfec);
1317 } else if (ecc_err & XSDFEC_ECC_ISR_SBE) {
1318 /* Correctable ECC Errors */
1319 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1325 wake_up_interruptible(&xsdfec->waitq);
1327 /* Unmaks Interrupts */
1328 xsdfec_isr_enable(xsdfec, true);
1329 xsdfec_ecc_isr_enable(xsdfec, true);
1335 xsdfec_probe(struct platform_device *pdev)
1337 struct xsdfec_dev *xsdfec;
1339 struct device *dev_create;
1340 struct resource *res;
1342 bool irq_enabled = true;
1344 xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
1348 xsdfec->dev = &pdev->dev;
1349 if (atomic_read(&xsdfec_ndevs) > DRIVER_MAX_DEV) {
1351 "Cannot instantiate more than %d SDFEC instances",
1352 (DRIVER_MAX_DEV + 1));
1356 xsdfec->fec_id = atomic_read(&xsdfec_ndevs);
1359 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1360 xsdfec->regs = devm_ioremap_resource(dev, res);
1361 if (IS_ERR(xsdfec->regs)) {
1362 dev_err(dev, "Unable to map resource");
1363 err = PTR_ERR(xsdfec->regs);
1364 goto err_xsdfec_dev;
1367 xsdfec->irq = platform_get_irq(pdev, 0);
1368 if (xsdfec->irq < 0) {
1369 dev_dbg(dev, "platform_get_irq failed");
1370 irq_enabled = false;
1373 err = xsdfec_parse_of(xsdfec);
1375 goto err_xsdfec_dev;
1377 /* Save driver private data */
1378 platform_set_drvdata(pdev, xsdfec);
1381 init_waitqueue_head(&xsdfec->waitq);
1382 /* Register IRQ thread */
1383 err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
1389 dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
1390 goto err_xsdfec_dev;
1394 cdev_init(&xsdfec->xsdfec_cdev, &xsdfec_fops);
1395 xsdfec->xsdfec_cdev.owner = THIS_MODULE;
1396 err = cdev_add(&xsdfec->xsdfec_cdev,
1397 MKDEV(MAJOR(xsdfec_devt), xsdfec->fec_id), 1);
1399 dev_err(dev, "cdev_add failed");
1401 goto err_xsdfec_dev;
1404 if (!xsdfec_class) {
1406 dev_err(dev, "xsdfec class not created correctly");
1407 goto err_xsdfec_cdev;
1410 dev_create = device_create(xsdfec_class, dev,
1411 MKDEV(MAJOR(xsdfec_devt), xsdfec->fec_id),
1412 xsdfec, "xsdfec%d", xsdfec->fec_id);
1413 if (IS_ERR(dev_create)) {
1414 dev_err(dev, "unable to create device");
1415 err = PTR_ERR(dev_create);
1416 goto err_xsdfec_cdev;
1419 atomic_set(&xsdfec->open_count, 1);
1420 dev_info(dev, "XSDFEC%d Probe Successful", xsdfec->fec_id);
1421 atomic_inc(&xsdfec_ndevs);
1424 /* Failure cleanup */
1426 cdev_del(&xsdfec->xsdfec_cdev);
1432 xsdfec_remove(struct platform_device *pdev)
1434 struct xsdfec_dev *xsdfec;
1435 struct device *dev = &pdev->dev;
1437 xsdfec = platform_get_drvdata(pdev);
1441 if (!xsdfec_class) {
1442 dev_err(dev, "xsdfec_class is NULL");
1446 device_destroy(xsdfec_class,
1447 MKDEV(MAJOR(xsdfec_devt), xsdfec->fec_id));
1448 cdev_del(&xsdfec->xsdfec_cdev);
1449 atomic_dec(&xsdfec_ndevs);
1453 static const struct of_device_id xsdfec_of_match[] = {
1454 { .compatible = "xlnx,fec-engine", },
1455 { /* end of table */ }
1457 MODULE_DEVICE_TABLE(of, xsdfec_of_match);
1459 static struct platform_driver xsdfec_driver = {
1461 .name = "xilinx-sdfec",
1462 .of_match_table = xsdfec_of_match,
1464 .probe = xsdfec_probe,
1465 .remove = xsdfec_remove,
1468 static int __init xsdfec_init_mod(void)
1472 xsdfec_class = class_create(THIS_MODULE, DRIVER_NAME);
1473 if (IS_ERR(xsdfec_class)) {
1474 err = PTR_ERR(xsdfec_class);
1475 pr_err("%s : Unable to register xsdfec class", __func__);
1479 err = alloc_chrdev_region(&xsdfec_devt,
1480 0, DRIVER_MAX_DEV, DRIVER_NAME);
1482 pr_err("%s : Unable to get major number", __func__);
1483 goto err_xsdfec_class;
1486 err = platform_driver_register(&xsdfec_driver);
1488 pr_err("%s Unabled to register %s driver",
1489 __func__, DRIVER_NAME);
1490 goto err_xsdfec_drv;
1496 unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1498 class_destroy(xsdfec_class);
1502 static void __exit xsdfec_cleanup_mod(void)
1504 platform_driver_unregister(&xsdfec_driver);
1505 unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1506 class_destroy(xsdfec_class);
1507 xsdfec_class = NULL;
1510 module_init(xsdfec_init_mod);
1511 module_exit(xsdfec_cleanup_mod);
1513 MODULE_AUTHOR("Xilinx, Inc");
1514 MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
1515 MODULE_LICENSE("GPL");
1516 MODULE_VERSION(DRIVER_VERSION);