]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/misc/xilinx_sdfec.c
misc: xilinx-sdfec: Add IOCTL to get LDPC Params
[zynq/linux.git] / drivers / misc / xilinx_sdfec.c
1 /*
2  * Xilinx SDFEC
3  *
4  * Copyright (C) 2016 - 2017 Xilinx, Inc.
5  *
6  * Description:
7  * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
8  * IP. It exposes a char device interface in sysfs and supports file
9  * operations like  open(), close() and ioctl().
10  *
11  * This program is free software: you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  */
24
25 #include <linux/cdev.h>
26 #include <linux/device.h>
27 #include <linux/fs.h>
28 #include <linux/io.h>
29 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/of.h>
33 #include <linux/of_platform.h>
34 #include <linux/platform_device.h>
35 #include <linux/poll.h>
36 #include <linux/slab.h>
37 #include <linux/uaccess.h>
38
39 #include <uapi/misc/xilinx_sdfec.h>
40
41 #define DRIVER_NAME     "xilinx_sdfec"
42 #define DRIVER_VERSION  "0.3"
43 #define DRIVER_MAX_DEV  (6)
44
45 static  struct class *xsdfec_class;
46 static atomic_t xsdfec_ndevs = ATOMIC_INIT(0);
47 static dev_t xsdfec_devt;
48
49 /* Xilinx SDFEC Register Map */
50 #define XSDFEC_AXI_WR_PROTECT_ADDR              (0x00000)
51 #define XSDFEC_CODE_WR_PROTECT_ADDR             (0x00004)
52 #define XSDFEC_ACTIVE_ADDR                      (0x00008)
53 #define XSDFEC_AXIS_WIDTH_ADDR                  (0x0000c)
54 #define XSDFEC_AXIS_ENABLE_ADDR                 (0x00010)
55 #define XSDFEC_AXIS_ENABLE_MASK                 (0x0001F)
56 #define XSDFEC_FEC_CODE_ADDR                    (0x00014)
57 #define XSDFEC_ORDER_ADDR                       (0x00018)
58
59 /* Interrupt Status Register Bit Mask*/
60 #define XSDFEC_ISR_MASK                         (0x0003F)
61 /* Interrupt Status Register */
62 #define XSDFEC_ISR_ADDR                         (0x0001c)
63 /* Write Only - Interrupt Enable Register */
64 #define XSDFEC_IER_ADDR                         (0x00020)
65 /* Write Only - Interrupt Disable Register */
66 #define XSDFEC_IDR_ADDR                         (0x00024)
67 /* Read Only - Interrupt Mask Register */
68 #define XSDFEC_IMR_ADDR                         (0x00028)
69
70 /* Single Bit Errors */
71 #define XSDFEC_ECC_ISR_SBE                      (0x7FF)
72 /* Multi Bit Errors */
73 #define XSDFEC_ECC_ISR_MBE                      (0x3FF800)
74 /* ECC Interrupt Status Bit Mask */
75 #define XSDFEC_ECC_ISR_MASK     (XSDFEC_ECC_ISR_SBE | XSDFEC_ECC_ISR_MBE)
76
77 /* Multi Bit Error Postion */
78 #define XSDFEC_ECC_MULTI_BIT_POS                (11)
79 #define XSDFEC_ERROR_MAX_THRESHOLD              (100)
80
81 /* ECC Interrupt Status Register */
82 #define XSDFEC_ECC_ISR_ADDR                     (0x0002c)
83 /* Write Only - ECC Interrupt Enable Register */
84 #define XSDFEC_ECC_IER_ADDR                     (0x00030)
85 /* Write Only - ECC Interrupt Disable Register */
86 #define XSDFEC_ECC_IDR_ADDR                     (0x00034)
87 /* Read Only - ECC Interrupt Mask Register */
88 #define XSDFEC_ECC_IMR_ADDR                     (0x00038)
89
90 #define XSDFEC_BYPASS_ADDR                      (0x0003c)
91 #define XSDFEC_TEST_EMA_ADDR_BASE               (0x00080)
92 #define XSDFEC_TEST_EMA_ADDR_HIGH               (0x00089)
93 #define XSDFEC_TURBO_ADDR                       (0x00100)
94 #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE         (0x02000)
95 #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH         (0x021fc)
96 #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE         (0x02004)
97 #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH         (0x02200)
98 #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE         (0x02008)
99 #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH         (0x02204)
100 #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE         (0x0200c)
101 #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH         (0x02208)
102
103 /**
104  * struct xsdfec_dev - Driver data for SDFEC
105  * @regs: device physical base address
106  * @dev: pointer to device struct
107  * @fec_id: Instance number
108  * @intr_enabled: indicates IRQ enabled
109  * @wr_protect: indicates Write Protect enabled
110  * @code: LDPC or Turbo Codes being used
111  * @order: In-Order or Out-of-Order
112  * @state: State of the SDFEC device
113  * @op_mode: Operating in Encode or Decode
114  * @isr_err_count: Count of ISR errors
115  * @cecc_count: Count of Correctable ECC errors (SBE)
116  * @uecc_count: Count of Uncorrectable ECC errors (MBE)
117  * @reset_count: Count of Resets requested
118  * @open_count: Count of char device being opened
119  * @irq: IRQ number
120  * @xsdfec_cdev: Character device handle
121  * @sc_off: Shared Scale Table Offset
122  * @qc_off: Shared Circulant Table Offset
123  * @la_off: Shared Layer Table Offset
124  * @waitq: Driver wait queue
125  *
126  * This structure contains necessary state for SDFEC driver to operate
127  */
128 struct xsdfec_dev {
129         void __iomem *regs;
130         struct device *dev;
131         s32  fec_id;
132         bool intr_enabled;
133         bool wr_protect;
134         enum xsdfec_code code;
135         enum xsdfec_order order;
136         enum xsdfec_state state;
137         enum xsdfec_op_mode op_mode;
138         atomic_t isr_err_count;
139         atomic_t cecc_count;
140         atomic_t uecc_count;
141         atomic_t reset_count;
142         atomic_t open_count;
143         int  irq;
144         struct cdev xsdfec_cdev;
145         int sc_off;
146         int qc_off;
147         int la_off;
148         wait_queue_head_t waitq;
149 };
150
151 static inline void
152 xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, u32 value)
153 {
154         if (xsdfec->wr_protect) {
155                 dev_err(xsdfec->dev, "SDFEC in write protect");
156                 return;
157         }
158
159         dev_dbg(xsdfec->dev,
160                 "Writing 0x%x to offset 0x%x", value, addr);
161         iowrite32(value, xsdfec->regs + addr);
162 }
163
164 static inline u32
165 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
166 {
167         u32 rval;
168
169         rval = ioread32(xsdfec->regs + addr);
170         dev_info(xsdfec->dev,
171                  "Read value = 0x%x from offset 0x%x",
172                  rval, addr);
173         return rval;
174 }
175
176 #define XSDFEC_WRITE_PROTECT_ENABLE     (1)
177 #define XSDFEC_WRITE_PROTECT_DISABLE    (0)
178 static void
179 xsdfec_wr_protect(struct xsdfec_dev *xsdfec, bool wr_pr)
180 {
181         if (wr_pr) {
182                 xsdfec_regwrite(xsdfec,
183                                 XSDFEC_CODE_WR_PROTECT_ADDR,
184                                 XSDFEC_WRITE_PROTECT_ENABLE);
185                 xsdfec_regwrite(xsdfec,
186                                 XSDFEC_AXI_WR_PROTECT_ADDR,
187                                 XSDFEC_WRITE_PROTECT_ENABLE);
188         } else {
189                 xsdfec_regwrite(xsdfec,
190                                 XSDFEC_AXI_WR_PROTECT_ADDR,
191                                 XSDFEC_WRITE_PROTECT_DISABLE);
192                 xsdfec_regwrite(xsdfec,
193                                 XSDFEC_CODE_WR_PROTECT_ADDR,
194                                 XSDFEC_WRITE_PROTECT_DISABLE);
195         }
196         xsdfec->wr_protect = wr_pr;
197 }
198
199 static int
200 xsdfec_dev_open(struct inode *iptr, struct file *fptr)
201 {
202         struct xsdfec_dev *xsdfec;
203
204         xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
205         if (!xsdfec)
206                 return  -EAGAIN;
207
208         /* Only one open per device at a time */
209         if (!atomic_dec_and_test(&xsdfec->open_count))
210                 return -EBUSY;
211
212         fptr->private_data = xsdfec;
213         return 0;
214 }
215
216 static int
217 xsdfec_dev_release(struct inode *iptr, struct file *fptr)
218 {
219         struct xsdfec_dev *xsdfec;
220
221         xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
222         if (!xsdfec)
223                 return -EAGAIN;
224
225         atomic_inc(&xsdfec->open_count);
226         return 0;
227 }
228
229 #define XSDFEC_IS_ACTIVITY_SET  (0x1)
230 static int
231 xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
232 {
233         struct xsdfec_status status;
234         int err = 0;
235
236         status.fec_id = xsdfec->fec_id;
237         status.state = xsdfec->state;
238         status.code = xsdfec->code;
239         status.order = xsdfec->order;
240         status.mode = xsdfec->op_mode;
241         status.activity  =
242                 (xsdfec_regread(xsdfec,
243                                 XSDFEC_ACTIVE_ADDR) &
244                                 XSDFEC_IS_ACTIVITY_SET);
245         status.cecc_count = atomic_read(&xsdfec->cecc_count);
246
247         err = copy_to_user(arg, &status, sizeof(status));
248         if (err) {
249                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
250                         __func__, xsdfec->fec_id);
251                 err = -EFAULT;
252         }
253         return err;
254 }
255
256 static int
257 xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
258 {
259         struct xsdfec_config config;
260         int err = 0;
261
262         config.fec_id = xsdfec->fec_id;
263         config.state = xsdfec->state;
264         config.code = xsdfec->code;
265         config.mode = xsdfec->op_mode;
266         config.order = xsdfec->order;
267
268         err = copy_to_user(arg, &config, sizeof(config));
269         if (err) {
270                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
271                         __func__, xsdfec->fec_id);
272                 err = -EFAULT;
273         }
274         return err;
275 }
276
277 static int
278 xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
279 {
280         u32 mask_read;
281
282         if (enable) {
283                 /* Enable */
284                 xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR,
285                                 XSDFEC_ISR_MASK);
286                 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
287                 if (mask_read & XSDFEC_ISR_MASK) {
288                         dev_err(xsdfec->dev,
289                                 "SDFEC enabling irq with IER failed");
290                         return -EIO;
291                 }
292         } else {
293                 /* Disable */
294                 xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR,
295                                 XSDFEC_ISR_MASK);
296                 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
297                 if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
298                         dev_err(xsdfec->dev,
299                                 "SDFEC disabling irq with IDR failed");
300                         return -EIO;
301                 }
302         }
303         return 0;
304 }
305
306 static int
307 xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
308 {
309         u32 mask_read;
310
311         if (enable) {
312                 /* Enable */
313                 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
314                                 XSDFEC_ECC_ISR_MASK);
315                 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
316                 if (mask_read & XSDFEC_ECC_ISR_MASK) {
317                         dev_err(xsdfec->dev,
318                                 "SDFEC enabling ECC irq with ECC IER failed");
319                         return -EIO;
320                 }
321         } else {
322                 /* Disable */
323                 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
324                                 XSDFEC_ECC_ISR_MASK);
325                 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
326                 if ((mask_read & XSDFEC_ECC_ISR_MASK) != XSDFEC_ECC_ISR_MASK) {
327                         dev_err(xsdfec->dev,
328                                 "SDFEC disable ECC irq with ECC IDR failed");
329                         return -EIO;
330                 }
331         }
332         return 0;
333 }
334
335 static int
336 xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
337 {
338         struct xsdfec_irq  irq;
339         int err = 0;
340
341         err = copy_from_user(&irq, arg, sizeof(irq));
342         if (err) {
343                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
344                         __func__, xsdfec->fec_id);
345                 return -EFAULT;
346         }
347
348         /* Setup tlast related IRQ */
349         if (irq.enable_isr) {
350                 err = xsdfec_isr_enable(xsdfec, true);
351                 if (err < 0)
352                         return err;
353         }
354
355         /* Setup ECC related IRQ */
356         if (irq.enable_ecc_isr) {
357                 err = xsdfec_ecc_isr_enable(xsdfec, true);
358                 if (err < 0)
359                         return err;
360         }
361
362         return 0;
363 }
364
365 #define XSDFEC_TURBO_SCALE_MASK         (0xF)
366 #define XSDFEC_TURBO_SCALE_BIT_POS      (8)
367 static int
368 xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
369 {
370         struct xsdfec_turbo turbo;
371         int err = 0;
372         u32 turbo_write = 0;
373
374         err = copy_from_user(&turbo, arg, sizeof(turbo));
375         if (err) {
376                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
377                         __func__, xsdfec->fec_id);
378                 return -EFAULT;
379         }
380
381         /* Check to see what device tree says about the FEC codes */
382         if (xsdfec->code == XSDFEC_LDPC_CODE) {
383                 dev_err(xsdfec->dev,
384                         "%s: Unable to write Turbo to SDFEC%d check DT",
385                                 __func__, xsdfec->fec_id);
386                 return -EIO;
387         } else if (xsdfec->code == XSDFEC_CODE_INVALID) {
388                 xsdfec->code = XSDFEC_TURBO_CODE;
389         }
390
391         if (xsdfec->wr_protect)
392                 xsdfec_wr_protect(xsdfec, false);
393
394         xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, (xsdfec->code - 1));
395         turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK) <<
396                         XSDFEC_TURBO_SCALE_BIT_POS) | turbo.alg;
397         xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
398         return err;
399 }
400
401 static int
402 xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
403 {
404         u32 reg_value;
405         struct xsdfec_turbo turbo_params;
406         int err;
407
408         if (xsdfec->code == XSDFEC_LDPC_CODE) {
409                 dev_err(xsdfec->dev,
410                         "%s: SDFEC%d is configured for LDPC, check DT",
411                         __func__, xsdfec->fec_id);
412                 return -EIO;
413         }
414
415         reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
416
417         turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
418                               XSDFEC_TURBO_SCALE_BIT_POS;
419         turbo_params.alg = reg_value & 0x1;
420
421         err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
422         if (err) {
423                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
424                         __func__, xsdfec->fec_id);
425                 err = -EFAULT;
426         }
427
428         return err;
429 }
430
431 #define XSDFEC_LDPC_REG_JUMP    (0x10)
432 #define XSDFEC_REG0_N_MASK      (0x0000FFFF)
433 #define XSDFEC_REG0_N_LSB       (0)
434 #define XSDFEC_REG0_K_MASK      (0x7fff0000)
435 #define XSDFEC_REG0_K_LSB       (16)
436 static int
437 xsdfec_reg0_write(struct xsdfec_dev *xsdfec,
438                   u32 n, u32 k, u32 offset)
439 {
440         u32 wdata;
441
442         /* Use only lower 16 bits */
443         if (n & ~XSDFEC_REG0_N_MASK)
444                 dev_err(xsdfec->dev, "N value is beyond 16 bits");
445         n &= XSDFEC_REG0_N_MASK;
446         n <<= XSDFEC_REG0_N_LSB;
447
448         if (k & XSDFEC_REG0_K_MASK)
449                 dev_err(xsdfec->dev, "K value is beyond 16 bits");
450
451         k = ((k << XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK);
452         wdata = k | n;
453
454         if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
455                                 > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
456                 dev_err(xsdfec->dev,
457                         "Writing outside of LDPC reg0 space 0x%x",
458                         XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
459                         (offset * XSDFEC_LDPC_REG_JUMP));
460                 return -EINVAL;
461         }
462         xsdfec_regwrite(xsdfec,
463                         XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
464                         (offset * XSDFEC_LDPC_REG_JUMP), wdata);
465         return 0;
466 }
467
468 static int
469 xsdfec_collect_ldpc_reg0(struct xsdfec_dev *xsdfec,
470                          u32 code_id,
471                          struct xsdfec_ldpc_params *ldpc_params)
472 {
473         u32 reg_value;
474         u32 reg_addr = XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
475                 (code_id * XSDFEC_LDPC_REG_JUMP);
476
477         if (reg_addr > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
478                 dev_err(xsdfec->dev,
479                         "Accessing outside of LDPC reg0 space 0x%x",
480                         reg_addr);
481                 return -EINVAL;
482         }
483
484         reg_value = xsdfec_regread(xsdfec, reg_addr);
485
486         ldpc_params->n = (reg_value >> XSDFEC_REG0_N_LSB) & XSDFEC_REG0_N_MASK;
487
488         ldpc_params->k = (reg_value >> XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK;
489
490         return 0;
491 }
492
493 #define XSDFEC_REG1_PSIZE_MASK          (0x000001ff)
494 #define XSDFEC_REG1_NO_PACKING_MASK     (0x00000400)
495 #define XSDFEC_REG1_NO_PACKING_LSB      (10)
496 #define XSDFEC_REG1_NM_MASK             (0x000ff800)
497 #define XSDFEC_REG1_NM_LSB              (11)
498 #define XSDFEC_REG1_BYPASS_MASK (0x00100000)
499 static int
500 xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
501                   u32 no_packing, u32 nm, u32 offset)
502 {
503         u32 wdata;
504
505         if (psize & ~XSDFEC_REG1_PSIZE_MASK)
506                 dev_err(xsdfec->dev, "Psize is beyond 10 bits");
507         psize &= XSDFEC_REG1_PSIZE_MASK;
508
509         if (no_packing != 0 && no_packing != 1)
510                 dev_err(xsdfec->dev, "No-packing bit register invalid");
511         no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
512                                         XSDFEC_REG1_NO_PACKING_MASK);
513
514         if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
515                 dev_err(xsdfec->dev, "NM is beyond 10 bits");
516         nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
517
518         wdata = nm | no_packing | psize;
519         if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
520                 > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
521                 dev_err(xsdfec->dev,
522                         "Writing outside of LDPC reg1 space 0x%x",
523                         XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
524                         (offset * XSDFEC_LDPC_REG_JUMP));
525                 return -EINVAL;
526         }
527         xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
528                 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
529         return 0;
530 }
531
532 static int
533 xsdfec_collect_ldpc_reg1(struct xsdfec_dev *xsdfec,
534                          u32 code_id,
535                          struct xsdfec_ldpc_params *ldpc_params)
536 {
537         u32 reg_value;
538         u32 reg_addr = XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
539                 (code_id * XSDFEC_LDPC_REG_JUMP);
540
541         if (reg_addr > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
542                 dev_err(xsdfec->dev,
543                         "Accessing outside of LDPC reg1 space 0x%x",
544                         reg_addr);
545                 return -EINVAL;
546         }
547
548         reg_value = xsdfec_regread(xsdfec, reg_addr);
549
550         ldpc_params->psize = reg_value & XSDFEC_REG1_PSIZE_MASK;
551
552         ldpc_params->no_packing = ((reg_value >> XSDFEC_REG1_NO_PACKING_LSB) &
553                                     XSDFEC_REG1_NO_PACKING_MASK);
554
555         ldpc_params->nm = (reg_value >> XSDFEC_REG1_NM_LSB) &
556                            XSDFEC_REG1_NM_MASK;
557         return 0;
558 }
559
560 #define XSDFEC_REG2_NLAYERS_MASK                (0x000001FF)
561 #define XSDFEC_REG2_NLAYERS_LSB                 (0)
562 #define XSDFEC_REG2_NNMQC_MASK                  (0x000FFE00)
563 #define XSDFEC_REG2_NMQC_LSB                    (9)
564 #define XSDFEC_REG2_NORM_TYPE_MASK              (0x00100000)
565 #define XSDFEC_REG2_NORM_TYPE_LSB               (20)
566 #define XSDFEC_REG2_SPECIAL_QC_MASK             (0x00200000)
567 #define XSDFEC_REG2_SPEICAL_QC_LSB              (21)
568 #define XSDFEC_REG2_NO_FINAL_PARITY_MASK        (0x00400000)
569 #define XSDFEC_REG2_NO_FINAL_PARITY_LSB         (22)
570 #define XSDFEC_REG2_MAX_SCHEDULE_MASK           (0x01800000)
571 #define XSDFEC_REG2_MAX_SCHEDULE_LSB            (23)
572
573 static int
574 xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
575                   u32 norm_type, u32 special_qc, u32 no_final_parity,
576                   u32 max_schedule, u32 offset)
577 {
578         u32 wdata;
579
580         if (nlayers & ~(XSDFEC_REG2_NLAYERS_MASK >>
581                                 XSDFEC_REG2_NLAYERS_LSB))
582                 dev_err(xsdfec->dev, "Nlayers exceeds 9 bits");
583         nlayers &= XSDFEC_REG2_NLAYERS_MASK;
584
585         if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
586                 dev_err(xsdfec->dev, "NMQC exceeds 11 bits");
587         nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
588
589         if (norm_type > 1)
590                 dev_err(xsdfec->dev, "Norm type is invalid");
591         norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
592                                         XSDFEC_REG2_NORM_TYPE_MASK);
593         if (special_qc > 1)
594                 dev_err(xsdfec->dev, "Special QC in invalid");
595         special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
596                         XSDFEC_REG2_SPECIAL_QC_MASK);
597
598         if (no_final_parity > 1)
599                 dev_err(xsdfec->dev, "No final parity check invalid");
600         no_final_parity =
601                 ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
602                                         XSDFEC_REG2_NO_FINAL_PARITY_MASK);
603         if (max_schedule & ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >>
604                                         XSDFEC_REG2_MAX_SCHEDULE_LSB))
605                 dev_err(xsdfec->dev, "Max Schdule exceeds 2 bits");
606         max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
607                                 XSDFEC_REG2_MAX_SCHEDULE_MASK);
608
609         wdata = (max_schedule | no_final_parity | special_qc | norm_type |
610                         nmqc | nlayers);
611
612         if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
613                 > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
614                 dev_err(xsdfec->dev,
615                         "Writing outside of LDPC reg2 space 0x%x",
616                         XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
617                         (offset * XSDFEC_LDPC_REG_JUMP));
618                 return -EINVAL;
619         }
620         xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
621                         (offset * XSDFEC_LDPC_REG_JUMP), wdata);
622         return 0;
623 }
624
625 static int
626 xsdfec_collect_ldpc_reg2(struct xsdfec_dev *xsdfec,
627                          u32 code_id,
628                          struct xsdfec_ldpc_params *ldpc_params)
629 {
630         u32 reg_value;
631         u32 reg_addr = XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
632                 (code_id * XSDFEC_LDPC_REG_JUMP);
633
634         if (reg_addr > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
635                 dev_err(xsdfec->dev,
636                         "Accessing outside of LDPC reg1 space 0x%x",
637                         reg_addr);
638                 return -EINVAL;
639         }
640
641         reg_value = xsdfec_regread(xsdfec, reg_addr);
642
643         ldpc_params->nlayers = ((reg_value >> XSDFEC_REG2_NLAYERS_LSB) &
644                                 XSDFEC_REG2_NLAYERS_MASK);
645
646         ldpc_params->nmqc = (reg_value >> XSDFEC_REG2_NMQC_LSB) &
647                              XSDFEC_REG2_NNMQC_MASK;
648
649         ldpc_params->norm_type = ((reg_value >> XSDFEC_REG2_NORM_TYPE_LSB) &
650                                   XSDFEC_REG2_NORM_TYPE_MASK);
651
652         ldpc_params->special_qc = ((reg_value >> XSDFEC_REG2_SPEICAL_QC_LSB) &
653                                    XSDFEC_REG2_SPECIAL_QC_MASK);
654
655         ldpc_params->no_final_parity =
656                 ((reg_value >> XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
657                  XSDFEC_REG2_NO_FINAL_PARITY_MASK);
658
659         ldpc_params->max_schedule =
660                 ((reg_value >> XSDFEC_REG2_MAX_SCHEDULE_LSB) &
661                  XSDFEC_REG2_MAX_SCHEDULE_MASK);
662
663         return 0;
664 }
665
666 #define XSDFEC_REG3_LA_OFF_LSB          (8)
667 #define XSDFEC_REG3_QC_OFF_LSB          (16)
668 static int
669 xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off,
670                   u8 la_off, u16 qc_off, u32 offset)
671 {
672         u32 wdata;
673
674         wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
675                 (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
676         if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
677                 (offset *  XSDFEC_LDPC_REG_JUMP) >
678                         XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
679                 dev_err(xsdfec->dev,
680                         "Writing outside of LDPC reg3 space 0x%x",
681                         XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
682                         (offset * XSDFEC_LDPC_REG_JUMP));
683                 return -EINVAL;
684         }
685         xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
686                         (offset * XSDFEC_LDPC_REG_JUMP), wdata);
687         return 0;
688 }
689
690 static int
691 xsdfec_collect_ldpc_reg3(struct xsdfec_dev *xsdfec,
692                          u32 code_id,
693                          struct xsdfec_ldpc_params *ldpc_params)
694 {
695         u32 reg_value;
696         u32 reg_addr = XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
697                 (code_id * XSDFEC_LDPC_REG_JUMP);
698
699         if (reg_addr > XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
700                 dev_err(xsdfec->dev,
701                         "Accessing outside of LDPC reg3 space 0x%x",
702                         reg_addr);
703                 return -EINVAL;
704         }
705
706         reg_value = xsdfec_regread(xsdfec, reg_addr);
707
708         ldpc_params->qc_off = (reg_addr >> XSDFEC_REG3_QC_OFF_LSB) & 0xFF;
709         ldpc_params->la_off = (reg_addr >> XSDFEC_REG3_LA_OFF_LSB) & 0xFF;
710         ldpc_params->sc_off = (reg_addr & 0xFF);
711
712         return 0;
713 }
714
715 #define XSDFEC_SC_TABLE_DEPTH           (0x3fc)
716 #define XSDFEC_REG_WIDTH_JUMP           (4)
717 static int
718 xsdfec_sc_table_write(struct xsdfec_dev *xsdfec, u32 offset,
719                       u32 *sc_ptr, u32 len)
720 {
721         int reg;
722
723         /*
724          * Writes that go beyond the length of
725          * Shared Scale(SC) table should fail
726          */
727         if ((XSDFEC_REG_WIDTH_JUMP * (offset + len)) > XSDFEC_SC_TABLE_DEPTH) {
728                 dev_err(xsdfec->dev, "Write exceeds SC table length");
729                 return -EINVAL;
730         }
731
732         /*
733          * sc_off tracks the points to the last written location
734          * in the Shared Scale(SC) table. Those shared codes might
735          * be in use. Updating them without quiescing the device
736          * can put the SDFEC device in an indeterminate state
737          */
738         if ((XSDFEC_REG_WIDTH_JUMP * offset) < xsdfec->sc_off) {
739                 dev_err(xsdfec->dev, "Might write to in use shared SC code");
740                 return -EINVAL;
741         }
742
743         for (reg = 0; reg < len; reg++) {
744                 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
745                 (offset + reg) *  XSDFEC_REG_WIDTH_JUMP, sc_ptr[reg]);
746         }
747         xsdfec->sc_off = reg + (XSDFEC_REG_WIDTH_JUMP * offset);
748         return reg;
749 }
750
751 static int
752 xsdfec_collect_sc_table(struct xsdfec_dev *xsdfec, u32 offset,
753                         u32 *sc_ptr, u32 len)
754 {
755         u32 reg;
756         u32 reg_addr;
757         u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
758
759         if (deepest_reach > XSDFEC_SC_TABLE_DEPTH) {
760                 dev_err(xsdfec->dev, "Access will exceed SC table length");
761                 return -EINVAL;
762         }
763
764         for (reg = 0; reg < len; reg++) {
765                 reg_addr = XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
766                         ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
767
768                 sc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
769         }
770
771         return 0;
772 }
773
774 #define XSDFEC_LA_TABLE_DEPTH           (0xFFC)
775 static int
776 xsdfec_la_table_write(struct xsdfec_dev *xsdfec, u32 offset,
777                       u32 *la_ptr, u32 len)
778 {
779         int reg;
780
781         if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_LA_TABLE_DEPTH) {
782                 dev_err(xsdfec->dev, "Write exceeds LA table length");
783                 return -EINVAL;
784         }
785
786         if  (XSDFEC_REG_WIDTH_JUMP * offset < xsdfec->la_off) {
787                 dev_err(xsdfec->dev, "Might write to in use shared LA code");
788                 return -EINVAL;
789         }
790
791         for (reg = 0; reg < len; reg++) {
792                 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
793                                 (offset + reg) * XSDFEC_REG_WIDTH_JUMP,
794                                 la_ptr[reg]);
795         }
796         xsdfec->la_off = reg + (offset * XSDFEC_REG_WIDTH_JUMP);
797         return reg;
798 }
799
800 static int
801 xsdfec_collect_la_table(struct xsdfec_dev *xsdfec, u32 offset,
802                         u32 *la_ptr, u32 len)
803 {
804         u32 reg;
805         u32 reg_addr;
806         u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
807
808         if (deepest_reach > XSDFEC_LA_TABLE_DEPTH) {
809                 dev_err(xsdfec->dev, "Access will exceed LA table length");
810                 return -EINVAL;
811         }
812
813         for (reg = 0; reg < len; reg++) {
814                 reg_addr = XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
815                                 ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
816
817                 la_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
818         }
819
820         return 0;
821 }
822
823 #define XSDFEC_QC_TABLE_DEPTH           (0x7FFC)
824 static int
825 xsdfec_qc_table_write(struct xsdfec_dev *xsdfec,
826                       u32 offset, u32 *qc_ptr, u32 len)
827 {
828         int reg;
829
830         if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_QC_TABLE_DEPTH) {
831                 dev_err(xsdfec->dev, "Write exceeds QC table length");
832                 return -EINVAL;
833         }
834
835         if (XSDFEC_REG_WIDTH_JUMP * offset < xsdfec->qc_off) {
836                 dev_err(xsdfec->dev, "Might write to in use shared LA code");
837                 return -EINVAL;
838         }
839
840         for (reg = 0; reg < len; reg++) {
841                 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
842                  (offset + reg) * XSDFEC_REG_WIDTH_JUMP, qc_ptr[reg]);
843         }
844
845         xsdfec->qc_off = reg + (offset * XSDFEC_REG_WIDTH_JUMP);
846         return reg;
847 }
848
849 static int
850 xsdfec_collect_qc_table(struct xsdfec_dev *xsdfec,
851                         u32 offset, u32 *qc_ptr, u32 len)
852 {
853         u32 reg;
854         u32 reg_addr;
855         u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
856
857         if (deepest_reach > XSDFEC_QC_TABLE_DEPTH) {
858                 dev_err(xsdfec->dev, "Access will exceed QC table length");
859                 return -EINVAL;
860         }
861
862         for (reg = 0; reg < len; reg++) {
863                 reg_addr = XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
864                  (offset + reg) * XSDFEC_REG_WIDTH_JUMP;
865
866                 qc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
867         }
868
869         return 0;
870 }
871
872 static int
873 xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
874 {
875         struct xsdfec_ldpc_params *ldpc;
876         int err;
877
878         ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
879         if (!ldpc)
880                 return -ENOMEM;
881
882         err = copy_from_user(ldpc, arg, sizeof(*ldpc));
883         if (err) {
884                 dev_err(xsdfec->dev,
885                         "%s failed to copy from user for SDFEC%d",
886                         __func__, xsdfec->fec_id);
887                 return -EFAULT;
888         }
889         if (xsdfec->code == XSDFEC_TURBO_CODE) {
890                 dev_err(xsdfec->dev,
891                         "%s: Unable to write LDPC to SDFEC%d check DT",
892                         __func__, xsdfec->fec_id);
893                 return -EIO;
894         }
895         xsdfec->code = XSDFEC_LDPC_CODE;
896         /* Disable Write Protection before proceeding */
897         if (xsdfec->wr_protect)
898                 xsdfec_wr_protect(xsdfec, false);
899
900         /* Write LDPC to CODE Register */
901         xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, (xsdfec->code - 1));
902         /* Write Reg 0 */
903         err = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->code_id);
904         if (err)
905                 goto err_out;
906
907         /* Write Reg 1 */
908         err = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing,
909                                 ldpc->nm, ldpc->code_id);
910         if (err)
911                 goto err_out;
912
913         /* Write Reg 2 */
914         err = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
915                                 ldpc->norm_type, ldpc->special_qc,
916                                 ldpc->no_final_parity, ldpc->max_schedule,
917                                 ldpc->code_id);
918         if (err)
919                 goto err_out;
920
921         /* Write Reg 3 */
922         err = xsdfec_reg3_write(xsdfec, ldpc->sc_off,
923                                 ldpc->la_off, ldpc->qc_off, ldpc->code_id);
924         if (err)
925                 goto err_out;
926
927         /* Write Shared Codes */
928         err = xsdfec_sc_table_write(xsdfec, ldpc->sc_off,
929                                     ldpc->sc_table, ldpc->nlayers);
930         if (err < 0)
931                 goto err_out;
932
933         err = xsdfec_la_table_write(xsdfec, 4 * ldpc->la_off,
934                                     ldpc->la_table, ldpc->nlayers);
935         if (err < 0)
936                 goto err_out;
937
938         err = xsdfec_qc_table_write(xsdfec, 4 * ldpc->qc_off,
939                                     ldpc->qc_table, ldpc->nqc);
940         if (err < 0)
941                 goto err_out;
942
943         kfree(ldpc);
944         return 0;
945         /* Error Path */
946 err_out:
947         kfree(ldpc);
948         return err;
949 }
950
951 static int
952 xsdfec_get_ldpc_code_params(struct xsdfec_dev *xsdfec, void __user *arg)
953 {
954         struct xsdfec_ldpc_params *ldpc_params;
955         int err = 0;
956
957         if (xsdfec->code == XSDFEC_TURBO_CODE) {
958                 dev_err(xsdfec->dev,
959                         "%s: SDFEC%d is configured for TURBO, check DT",
960                                 __func__, xsdfec->fec_id);
961                 return -EIO;
962         }
963
964         ldpc_params = kzalloc(sizeof(*ldpc_params), GFP_KERNEL);
965         if (!ldpc_params)
966                 return -ENOMEM;
967
968         err = copy_from_user(ldpc_params, arg, sizeof(*ldpc_params));
969         if (err) {
970                 dev_err(xsdfec->dev,
971                         "%s failed to copy from user for SDFEC%d",
972                         __func__, xsdfec->fec_id);
973                 return -EFAULT;
974         }
975
976         err = xsdfec_collect_ldpc_reg0(xsdfec, ldpc_params->code_id,
977                                        ldpc_params);
978         if (err)
979                 goto err_out;
980
981         err = xsdfec_collect_ldpc_reg1(xsdfec, ldpc_params->code_id,
982                                        ldpc_params);
983         if (err)
984                 goto err_out;
985
986         err = xsdfec_collect_ldpc_reg2(xsdfec, ldpc_params->code_id,
987                                        ldpc_params);
988         if (err)
989                 goto err_out;
990
991         err = xsdfec_collect_ldpc_reg3(xsdfec, ldpc_params->code_id,
992                                        ldpc_params);
993         if (err)
994                 goto err_out;
995
996         /*
997          * Collect the shared table values, needs to happen after reading
998          * the registers
999          */
1000         err = xsdfec_collect_sc_table(xsdfec, ldpc_params->sc_off,
1001                                       ldpc_params->sc_table,
1002                                       ldpc_params->nlayers);
1003         if (err < 0)
1004                 goto err_out;
1005
1006         err = xsdfec_collect_la_table(xsdfec, 4 * ldpc_params->la_off,
1007                                       ldpc_params->la_table,
1008                                       ldpc_params->nlayers);
1009         if (err < 0)
1010                 goto err_out;
1011
1012         err = xsdfec_collect_qc_table(xsdfec, 4 * ldpc_params->qc_off,
1013                                       ldpc_params->qc_table,
1014                                       ldpc_params->nqc);
1015         if (err < 0)
1016                 goto err_out;
1017
1018         err = copy_to_user(arg, ldpc_params, sizeof(*ldpc_params));
1019         if (err) {
1020                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
1021                         __func__, xsdfec->fec_id);
1022                 err = -EFAULT;
1023         }
1024
1025         kfree(ldpc_params);
1026         return 0;
1027         /* Error Path */
1028 err_out:
1029         kfree(ldpc_params);
1030         return err;
1031 }
1032
1033 static int xsdfec_start(struct xsdfec_dev *xsdfec)
1034 {
1035         u32 regread;
1036
1037         /* Verify Code is loaded */
1038         if (xsdfec->code == XSDFEC_CODE_INVALID) {
1039                 dev_err(xsdfec->dev,
1040                         "%s : set code before start for SDFEC%d",
1041                         __func__, xsdfec->fec_id);
1042                 return -EINVAL;
1043         }
1044         regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
1045         regread &= 0x1;
1046         if (regread + 1 != xsdfec->code) {
1047                 dev_err(xsdfec->dev,
1048                         "%s SDFEC HW code does not match driver code",
1049                         __func__);
1050                 return -EINVAL;
1051         }
1052         /* Set Order to maintain order */
1053         xsdfec->order = XSDFEC_MAINTAIN_ORDER;
1054         xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, (xsdfec->order - 1));
1055         /* Set AXIS width */
1056         xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, 0);
1057         /* Set AXIS enable */
1058         xsdfec_regwrite(xsdfec,
1059                         XSDFEC_AXIS_ENABLE_ADDR,
1060                         XSDFEC_AXIS_ENABLE_MASK);
1061         /* Write Protect Code and Registers */
1062         xsdfec_wr_protect(xsdfec, true);
1063         /* Done */
1064         xsdfec->state = XSDFEC_STARTED;
1065         return 0;
1066 }
1067
1068 static int
1069 xsdfec_stop(struct xsdfec_dev *xsdfec)
1070 {
1071         u32 regread;
1072
1073         if (xsdfec->state != XSDFEC_STARTED)
1074                 dev_err(xsdfec->dev, "Device not started correctly");
1075         /* Disable Write Protect */
1076         xsdfec_wr_protect(xsdfec, false);
1077         /* Disable AXIS_ENABLE register */
1078         regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
1079         regread &= (~XSDFEC_AXIS_ENABLE_MASK);
1080         xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
1081         /* Stop */
1082         xsdfec->state = XSDFEC_STOPPED;
1083         return 0;
1084 }
1085
1086 /*
1087  * Reset will happen asynchronously
1088  * since there is no in-band reset register
1089  * Prepare driver for reset
1090  */
1091
1092 static int
1093 xsdfec_reset_req(struct xsdfec_dev *xsdfec)
1094 {
1095         xsdfec->state = XSDFEC_INIT;
1096         xsdfec->order = XSDFEC_INVALID_ORDER;
1097         xsdfec->sc_off = 0;
1098         xsdfec->la_off = 0;
1099         xsdfec->qc_off = 0;
1100         xsdfec->wr_protect = false;
1101         atomic_set(&xsdfec->isr_err_count, 0);
1102         atomic_set(&xsdfec->uecc_count, 0);
1103         atomic_set(&xsdfec->cecc_count, 0);
1104         atomic_inc(&xsdfec->reset_count);
1105         return 0;
1106 }
1107
1108 static long
1109 xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, unsigned long data)
1110 {
1111         struct xsdfec_dev *xsdfec = fptr->private_data;
1112         void __user *arg = (void __user *)data;
1113         int rval = -EINVAL;
1114
1115         if (!xsdfec)
1116                 return rval;
1117         if (!arg)
1118                 return rval;
1119
1120         /* In failed state allow only reset and get status IOCTLs */
1121         if (xsdfec->state == XSDFEC_NEEDS_RESET &&
1122             (cmd != XSDFEC_RESET_REQ && cmd != XSDFEC_GET_STATUS)) {
1123                 dev_err(xsdfec->dev,
1124                         "SDFEC%d in failed state. Reset Required",
1125                         xsdfec->fec_id);
1126                 return -EPERM;
1127         }
1128
1129         switch (cmd) {
1130         case XSDFEC_START_DEV:
1131                 rval = xsdfec_start(xsdfec);
1132                 break;
1133         case XSDFEC_STOP_DEV:
1134                 rval = xsdfec_stop(xsdfec);
1135                 break;
1136         case XSDFEC_RESET_REQ:
1137                 rval = xsdfec_reset_req(xsdfec);
1138                 break;
1139         case XSDFEC_GET_STATUS:
1140                 rval = xsdfec_get_status(xsdfec, arg);
1141                 break;
1142         case XSDFEC_GET_CONFIG:
1143                 rval = xsdfec_get_config(xsdfec, arg);
1144                 break;
1145         case XSDFEC_SET_IRQ:
1146                 rval = xsdfec_set_irq(xsdfec, arg);
1147                 break;
1148         case XSDFEC_SET_TURBO:
1149                 rval = xsdfec_set_turbo(xsdfec, arg);
1150                 break;
1151         case XSDFEC_GET_TURBO:
1152                 rval = xsdfec_get_turbo(xsdfec, arg);
1153                 break;
1154         case XSDFEC_ADD_LDPC_CODE_PARAMS:
1155                 rval  = xsdfec_add_ldpc(xsdfec, arg);
1156                 break;
1157         case XSDFEC_GET_LDPC_CODE_PARAMS:
1158                 rval = xsdfec_get_ldpc_code_params(xsdfec, arg);
1159                 break;
1160         default:
1161                 /* Should not get here */
1162                 dev_err(xsdfec->dev, "Undefined SDFEC IOCTL");
1163                 break;
1164         }
1165         return rval;
1166 }
1167
1168 static unsigned int
1169 xsdfec_poll(struct file *file, poll_table *wait)
1170 {
1171         unsigned int mask;
1172         struct xsdfec_dev *xsdfec = file->private_data;
1173
1174         if (!xsdfec)
1175                 return POLLNVAL | POLLHUP;
1176
1177         poll_wait(file, &xsdfec->waitq, wait);
1178
1179         /* XSDFEC ISR detected an error */
1180         if (xsdfec->state == XSDFEC_NEEDS_RESET)
1181                 mask = POLLIN | POLLRDNORM;
1182         else
1183                 mask = POLLPRI | POLLERR;
1184
1185         return mask;
1186 }
1187
1188 static const struct file_operations xsdfec_fops = {
1189         .owner = THIS_MODULE,
1190         .open = xsdfec_dev_open,
1191         .release = xsdfec_dev_release,
1192         .unlocked_ioctl = xsdfec_dev_ioctl,
1193         .poll = xsdfec_poll,
1194 };
1195
1196 static int
1197 xsdfec_parse_of(struct xsdfec_dev *xsdfec)
1198 {
1199         struct device *dev = xsdfec->dev;
1200         struct device_node *node = dev->of_node;
1201         int rval;
1202         const char *fec_code;
1203         const char *fec_op_mode;
1204
1205         rval = of_property_read_string(node,
1206                                        "xlnx,sdfec-op-mode",
1207                                        &fec_op_mode);
1208         if (rval < 0) {
1209                 dev_err(dev, "xlnx,sdfec-op-mode not in DT");
1210                 return rval;
1211         }
1212
1213         if (!strcasecmp(fec_op_mode, "encode")) {
1214                 xsdfec->op_mode = XSDFEC_ENCODE;
1215         } else if (!strcasecmp(fec_op_mode, "decode")) {
1216                 xsdfec->op_mode = XSDFEC_DECODE;
1217         } else {
1218                 dev_err(dev, "Encode or Decode not specified in DT");
1219                 return -EINVAL;
1220         }
1221
1222         rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
1223         if (rval < 0) {
1224                 dev_err(dev, "xlnx,sdfec-code not in DT");
1225                 return rval;
1226         }
1227
1228         if (!strcasecmp(fec_code, "ldpc")) {
1229                 xsdfec->code = XSDFEC_LDPC_CODE;
1230         } else if (!strcasecmp(fec_code, "turbo")) {
1231                 xsdfec->code = XSDFEC_TURBO_CODE;
1232         } else {
1233                 dev_err(xsdfec->dev, "Invalid Op Mode in DT");
1234                 return -EINVAL;
1235         }
1236
1237         return 0;
1238 }
1239
1240 static void
1241 xsdfec_log_ecc_errors(struct xsdfec_dev *xsdfec, u32 ecc_err)
1242 {
1243         u32 cecc, uecc;
1244         int uecc_cnt;
1245
1246         cecc = ecc_err & XSDFEC_ECC_ISR_SBE;
1247         uecc = ecc_err & XSDFEC_ECC_ISR_MBE;
1248
1249         uecc_cnt = atomic_add_return(hweight32(uecc), &xsdfec->uecc_count);
1250         atomic_add(hweight32(cecc), &xsdfec->cecc_count);
1251
1252         if (uecc_cnt > 0 && uecc_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1253                 dev_err(xsdfec->dev,
1254                         "Multi-bit error on xsdfec%d. Needs reset",
1255                         xsdfec->fec_id);
1256         }
1257
1258         /* Clear ECC errors */
1259         xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1260 }
1261
1262 static void
1263 xsdfec_log_isr_errors(struct xsdfec_dev *xsdfec, u32 isr_err)
1264 {
1265         int isr_err_cnt;
1266
1267         /* Update ISR error counts */
1268         isr_err_cnt = atomic_add_return(hweight32(isr_err),
1269                                         &xsdfec->isr_err_count);
1270         if (isr_err_cnt > 0 && isr_err_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1271                 dev_err(xsdfec->dev,
1272                         "Tlast,or DIN_WORDS or DOUT_WORDS not correct");
1273         }
1274
1275         /* Clear ISR error status */
1276         xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1277 }
1278
1279 static void
1280 xsdfec_reset_required(struct xsdfec_dev *xsdfec)
1281 {
1282         xsdfec->state = XSDFEC_NEEDS_RESET;
1283 }
1284
1285 static irqreturn_t
1286 xsdfec_irq_thread(int irq, void *dev_id)
1287 {
1288         struct xsdfec_dev *xsdfec = dev_id;
1289         irqreturn_t ret = IRQ_HANDLED;
1290         u32 ecc_err;
1291         u32 isr_err;
1292         bool fatal_err = false;
1293
1294         WARN_ON(xsdfec->irq != irq);
1295
1296         /* Mask Interrupts */
1297         xsdfec_isr_enable(xsdfec, false);
1298         xsdfec_ecc_isr_enable(xsdfec, false);
1299
1300         /* Read Interrupt Status Registers */
1301         ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
1302         isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
1303
1304         if (ecc_err & XSDFEC_ECC_ISR_MBE) {
1305                 /* Multi-Bit Errors need Reset */
1306                 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1307                 xsdfec_reset_required(xsdfec);
1308                 fatal_err = true;
1309         } else if (isr_err & XSDFEC_ISR_MASK) {
1310                 /*
1311                  * Tlast, DIN_WORDS and DOUT_WORDS related
1312                  * errors need Reset
1313                  */
1314                 xsdfec_log_isr_errors(xsdfec, isr_err);
1315                 xsdfec_reset_required(xsdfec);
1316                 fatal_err = true;
1317         } else if (ecc_err & XSDFEC_ECC_ISR_SBE) {
1318                 /* Correctable ECC Errors */
1319                 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1320         } else {
1321                 ret = IRQ_NONE;
1322         }
1323
1324         if (fatal_err)
1325                 wake_up_interruptible(&xsdfec->waitq);
1326
1327         /* Unmaks Interrupts */
1328         xsdfec_isr_enable(xsdfec, true);
1329         xsdfec_ecc_isr_enable(xsdfec, true);
1330
1331         return ret;
1332 }
1333
1334 static int
1335 xsdfec_probe(struct platform_device *pdev)
1336 {
1337         struct xsdfec_dev *xsdfec;
1338         struct device *dev;
1339         struct device *dev_create;
1340         struct resource *res;
1341         int err;
1342         bool irq_enabled = true;
1343
1344         xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
1345         if (!xsdfec)
1346                 return -ENOMEM;
1347
1348         xsdfec->dev = &pdev->dev;
1349         if (atomic_read(&xsdfec_ndevs) > DRIVER_MAX_DEV) {
1350                 dev_err(&pdev->dev,
1351                         "Cannot instantiate more than %d SDFEC instances",
1352                         (DRIVER_MAX_DEV + 1));
1353                 return -EINVAL;
1354         }
1355
1356         xsdfec->fec_id = atomic_read(&xsdfec_ndevs);
1357
1358         dev = xsdfec->dev;
1359         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1360         xsdfec->regs = devm_ioremap_resource(dev, res);
1361         if (IS_ERR(xsdfec->regs)) {
1362                 dev_err(dev, "Unable to map resource");
1363                 err = PTR_ERR(xsdfec->regs);
1364                 goto err_xsdfec_dev;
1365         }
1366
1367         xsdfec->irq = platform_get_irq(pdev, 0);
1368         if (xsdfec->irq < 0) {
1369                 dev_dbg(dev, "platform_get_irq failed");
1370                 irq_enabled = false;
1371         }
1372
1373         err = xsdfec_parse_of(xsdfec);
1374         if (err < 0)
1375                 goto err_xsdfec_dev;
1376
1377         /* Save driver private data */
1378         platform_set_drvdata(pdev, xsdfec);
1379
1380         if (irq_enabled) {
1381                 init_waitqueue_head(&xsdfec->waitq);
1382                 /* Register IRQ thread */
1383                 err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
1384                                                 xsdfec_irq_thread,
1385                                                 IRQF_ONESHOT,
1386                                                 "xilinx-sdfec16",
1387                                                 xsdfec);
1388                 if (err < 0) {
1389                         dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
1390                         goto err_xsdfec_dev;
1391                 }
1392         }
1393
1394         cdev_init(&xsdfec->xsdfec_cdev, &xsdfec_fops);
1395         xsdfec->xsdfec_cdev.owner = THIS_MODULE;
1396         err = cdev_add(&xsdfec->xsdfec_cdev,
1397                        MKDEV(MAJOR(xsdfec_devt), xsdfec->fec_id), 1);
1398         if (err < 0) {
1399                 dev_err(dev, "cdev_add failed");
1400                 err = -EIO;
1401                 goto err_xsdfec_dev;
1402         }
1403
1404         if (!xsdfec_class) {
1405                 err = -EIO;
1406                 dev_err(dev, "xsdfec class not created correctly");
1407                 goto err_xsdfec_cdev;
1408         }
1409
1410         dev_create = device_create(xsdfec_class, dev,
1411                                    MKDEV(MAJOR(xsdfec_devt), xsdfec->fec_id),
1412                                    xsdfec, "xsdfec%d", xsdfec->fec_id);
1413         if (IS_ERR(dev_create)) {
1414                 dev_err(dev, "unable to create device");
1415                 err = PTR_ERR(dev_create);
1416                 goto err_xsdfec_cdev;
1417         }
1418
1419         atomic_set(&xsdfec->open_count, 1);
1420         dev_info(dev, "XSDFEC%d Probe Successful", xsdfec->fec_id);
1421         atomic_inc(&xsdfec_ndevs);
1422         return 0;
1423
1424         /* Failure cleanup */
1425 err_xsdfec_cdev:
1426         cdev_del(&xsdfec->xsdfec_cdev);
1427 err_xsdfec_dev:
1428         return err;
1429 }
1430
1431 static int
1432 xsdfec_remove(struct platform_device *pdev)
1433 {
1434         struct xsdfec_dev *xsdfec;
1435         struct device *dev = &pdev->dev;
1436
1437         xsdfec = platform_get_drvdata(pdev);
1438         if (!xsdfec)
1439                 return -ENODEV;
1440         dev = xsdfec->dev;
1441         if (!xsdfec_class) {
1442                 dev_err(dev, "xsdfec_class is NULL");
1443                 return -EIO;
1444         }
1445
1446         device_destroy(xsdfec_class,
1447                        MKDEV(MAJOR(xsdfec_devt), xsdfec->fec_id));
1448         cdev_del(&xsdfec->xsdfec_cdev);
1449         atomic_dec(&xsdfec_ndevs);
1450         return 0;
1451 }
1452
1453 static const struct of_device_id xsdfec_of_match[] = {
1454         { .compatible = "xlnx,fec-engine", },
1455         { /* end of table */ }
1456 };
1457 MODULE_DEVICE_TABLE(of, xsdfec_of_match);
1458
1459 static struct platform_driver xsdfec_driver = {
1460         .driver = {
1461                 .name = "xilinx-sdfec",
1462                 .of_match_table = xsdfec_of_match,
1463         },
1464         .probe = xsdfec_probe,
1465         .remove =  xsdfec_remove,
1466 };
1467
1468 static int __init xsdfec_init_mod(void)
1469 {
1470         int err;
1471
1472         xsdfec_class = class_create(THIS_MODULE, DRIVER_NAME);
1473         if (IS_ERR(xsdfec_class)) {
1474                 err = PTR_ERR(xsdfec_class);
1475                 pr_err("%s : Unable to register xsdfec class", __func__);
1476                 return err;
1477         }
1478
1479         err = alloc_chrdev_region(&xsdfec_devt,
1480                                   0, DRIVER_MAX_DEV, DRIVER_NAME);
1481         if (err < 0) {
1482                 pr_err("%s : Unable to get major number", __func__);
1483                 goto err_xsdfec_class;
1484         }
1485
1486         err = platform_driver_register(&xsdfec_driver);
1487         if (err < 0) {
1488                 pr_err("%s Unabled to register %s driver",
1489                        __func__, DRIVER_NAME);
1490                 goto err_xsdfec_drv;
1491         }
1492         return 0;
1493
1494         /* Error Path */
1495 err_xsdfec_drv:
1496         unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1497 err_xsdfec_class:
1498         class_destroy(xsdfec_class);
1499         return err;
1500 }
1501
1502 static void __exit xsdfec_cleanup_mod(void)
1503 {
1504         platform_driver_unregister(&xsdfec_driver);
1505         unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1506         class_destroy(xsdfec_class);
1507         xsdfec_class = NULL;
1508 }
1509
1510 module_init(xsdfec_init_mod);
1511 module_exit(xsdfec_cleanup_mod);
1512
1513 MODULE_AUTHOR("Xilinx, Inc");
1514 MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
1515 MODULE_LICENSE("GPL");
1516 MODULE_VERSION(DRIVER_VERSION);