]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/misc/xilinx_sdfec.c
misc: xilinx-sdfec: Improve the IOCTL Handling
[zynq/linux.git] / drivers / misc / xilinx_sdfec.c
1 /*
2  * Xilinx SDFEC
3  *
4  * Copyright (C) 2016 - 2017 Xilinx, Inc.
5  *
6  * Description:
7  * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
8  * IP. It exposes a char device interface in sysfs and supports file
9  * operations like  open(), close() and ioctl().
10  *
11  * This program is free software: you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  */
24
25 #include <linux/cdev.h>
26 #include <linux/device.h>
27 #include <linux/fs.h>
28 #include <linux/io.h>
29 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/of.h>
33 #include <linux/of_platform.h>
34 #include <linux/platform_device.h>
35 #include <linux/poll.h>
36 #include <linux/slab.h>
37 #include <linux/uaccess.h>
38
39 #include <uapi/misc/xilinx_sdfec.h>
40
41 #define DRIVER_NAME     "xilinx_sdfec"
42 #define DRIVER_VERSION  "0.3"
43 #define DRIVER_MAX_DEV  BIT(MINORBITS)
44
45 static  struct class *xsdfec_class;
46 static atomic_t xsdfec_ndevs = ATOMIC_INIT(0);
47 static dev_t xsdfec_devt;
48
49 /* Xilinx SDFEC Register Map */
50 #define XSDFEC_AXI_WR_PROTECT_ADDR              (0x00000)
51 #define XSDFEC_CODE_WR_PROTECT_ADDR             (0x00004)
52 #define XSDFEC_ACTIVE_ADDR                      (0x00008)
53 #define XSDFEC_AXIS_WIDTH_ADDR                  (0x0000c)
54 #define XSDFEC_AXIS_ENABLE_ADDR                 (0x00010)
55 #define XSDFEC_AXIS_ENABLE_MASK                 (0x0001F)
56 #define XSDFEC_FEC_CODE_ADDR                    (0x00014)
57 #define XSDFEC_ORDER_ADDR                       (0x00018)
58
59 /* Interrupt Status Register Bit Mask*/
60 #define XSDFEC_ISR_MASK                         (0x0003F)
61 /* Interrupt Status Register */
62 #define XSDFEC_ISR_ADDR                         (0x0001c)
63 /* Write Only - Interrupt Enable Register */
64 #define XSDFEC_IER_ADDR                         (0x00020)
65 /* Write Only - Interrupt Disable Register */
66 #define XSDFEC_IDR_ADDR                         (0x00024)
67 /* Read Only - Interrupt Mask Register */
68 #define XSDFEC_IMR_ADDR                         (0x00028)
69
70 /* Single Bit Errors */
71 #define XSDFEC_ECC_ISR_SBE                      (0x7FF)
72 /* Multi Bit Errors */
73 #define XSDFEC_ECC_ISR_MBE                      (0x3FF800)
74 /* ECC Interrupt Status Bit Mask */
75 #define XSDFEC_ECC_ISR_MASK     (XSDFEC_ECC_ISR_SBE | XSDFEC_ECC_ISR_MBE)
76
77 /* Multi Bit Error Postion */
78 #define XSDFEC_ECC_MULTI_BIT_POS                (11)
79 #define XSDFEC_ERROR_MAX_THRESHOLD              (100)
80
81 /* ECC Interrupt Status Register */
82 #define XSDFEC_ECC_ISR_ADDR                     (0x0002c)
83 /* Write Only - ECC Interrupt Enable Register */
84 #define XSDFEC_ECC_IER_ADDR                     (0x00030)
85 /* Write Only - ECC Interrupt Disable Register */
86 #define XSDFEC_ECC_IDR_ADDR                     (0x00034)
87 /* Read Only - ECC Interrupt Mask Register */
88 #define XSDFEC_ECC_IMR_ADDR                     (0x00038)
89
90 #define XSDFEC_BYPASS_ADDR                      (0x0003c)
91 #define XSDFEC_TEST_EMA_ADDR_BASE               (0x00080)
92 #define XSDFEC_TEST_EMA_ADDR_HIGH               (0x00089)
93 #define XSDFEC_TURBO_ADDR                       (0x00100)
94 #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE         (0x02000)
95 #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH         (0x021fc)
96 #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE         (0x02004)
97 #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH         (0x02200)
98 #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE         (0x02008)
99 #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH         (0x02204)
100 #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE         (0x0200c)
101 #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH         (0x02208)
102
103 /**
104  * struct xsdfec_dev - Driver data for SDFEC
105  * @regs: device physical base address
106  * @dev: pointer to device struct
107  * @fec_id: Instance number
108  * @intr_enabled: indicates IRQ enabled
109  * @wr_protect: indicates Write Protect enabled
110  * @code: LDPC or Turbo Codes being used
111  * @order: In-Order or Out-of-Order
112  * @state: State of the SDFEC device
113  * @op_mode: Operating in Encode or Decode
114  * @isr_err_count: Count of ISR errors
115  * @cecc_count: Count of Correctable ECC errors (SBE)
116  * @uecc_count: Count of Uncorrectable ECC errors (MBE)
117  * @reset_count: Count of Resets requested
118  * @open_count: Count of char device being opened
119  * @irq: IRQ number
120  * @xsdfec_cdev: Character device handle
121  * @sc_off: Shared Scale Table Offset
122  * @qc_off: Shared Circulant Table Offset
123  * @la_off: Shared Layer Table Offset
124  * @waitq: Driver wait queue
125  *
126  * This structure contains necessary state for SDFEC driver to operate
127  */
128 struct xsdfec_dev {
129         void __iomem *regs;
130         struct device *dev;
131         s32  fec_id;
132         bool intr_enabled;
133         bool wr_protect;
134         enum xsdfec_code code;
135         enum xsdfec_order order;
136         enum xsdfec_state state;
137         enum xsdfec_op_mode op_mode;
138         atomic_t isr_err_count;
139         atomic_t cecc_count;
140         atomic_t uecc_count;
141         atomic_t reset_count;
142         atomic_t open_count;
143         int  irq;
144         struct cdev xsdfec_cdev;
145         int sc_off;
146         int qc_off;
147         int la_off;
148         wait_queue_head_t waitq;
149 };
150
151 static inline void
152 xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, u32 value)
153 {
154         if (xsdfec->wr_protect) {
155                 dev_err(xsdfec->dev, "SDFEC in write protect");
156                 return;
157         }
158
159         dev_dbg(xsdfec->dev,
160                 "Writing 0x%x to offset 0x%x", value, addr);
161         iowrite32(value, xsdfec->regs + addr);
162 }
163
164 static inline u32
165 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
166 {
167         u32 rval;
168
169         rval = ioread32(xsdfec->regs + addr);
170         dev_dbg(xsdfec->dev,
171                 "Read value = 0x%x from offset 0x%x",
172                 rval, addr);
173         return rval;
174 }
175
176 #define XSDFEC_WRITE_PROTECT_ENABLE     (1)
177 #define XSDFEC_WRITE_PROTECT_DISABLE    (0)
178 static void
179 xsdfec_wr_protect(struct xsdfec_dev *xsdfec, bool wr_pr)
180 {
181         if (wr_pr) {
182                 xsdfec_regwrite(xsdfec,
183                                 XSDFEC_CODE_WR_PROTECT_ADDR,
184                                 XSDFEC_WRITE_PROTECT_ENABLE);
185                 xsdfec_regwrite(xsdfec,
186                                 XSDFEC_AXI_WR_PROTECT_ADDR,
187                                 XSDFEC_WRITE_PROTECT_ENABLE);
188         } else {
189                 xsdfec_regwrite(xsdfec,
190                                 XSDFEC_AXI_WR_PROTECT_ADDR,
191                                 XSDFEC_WRITE_PROTECT_DISABLE);
192                 xsdfec_regwrite(xsdfec,
193                                 XSDFEC_CODE_WR_PROTECT_ADDR,
194                                 XSDFEC_WRITE_PROTECT_DISABLE);
195         }
196         xsdfec->wr_protect = wr_pr;
197 }
198
199 static int
200 xsdfec_dev_open(struct inode *iptr, struct file *fptr)
201 {
202         struct xsdfec_dev *xsdfec;
203
204         xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
205         if (!xsdfec)
206                 return  -EAGAIN;
207
208         /* Only one open per device at a time */
209         if (!atomic_dec_and_test(&xsdfec->open_count)) {
210                 atomic_inc(&xsdfec->open_count);
211                 return -EBUSY;
212         }
213
214         fptr->private_data = xsdfec;
215         return 0;
216 }
217
218 static int
219 xsdfec_dev_release(struct inode *iptr, struct file *fptr)
220 {
221         struct xsdfec_dev *xsdfec;
222
223         xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
224         if (!xsdfec)
225                 return -EAGAIN;
226
227         atomic_inc(&xsdfec->open_count);
228         return 0;
229 }
230
231 #define XSDFEC_IS_ACTIVITY_SET  (0x1)
232 static int
233 xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
234 {
235         struct xsdfec_status status;
236         int err = 0;
237
238         status.fec_id = xsdfec->fec_id;
239         status.state = xsdfec->state;
240         status.activity  =
241                 (xsdfec_regread(xsdfec,
242                                 XSDFEC_ACTIVE_ADDR) &
243                                 XSDFEC_IS_ACTIVITY_SET);
244
245         err = copy_to_user(arg, &status, sizeof(status));
246         if (err) {
247                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
248                         __func__, xsdfec->fec_id);
249                 err = -EFAULT;
250         }
251         return err;
252 }
253
254 static int
255 xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
256 {
257         struct xsdfec_config config;
258         int err = 0;
259
260         config.fec_id = xsdfec->fec_id;
261         config.state = xsdfec->state;
262         config.code = xsdfec->code;
263         config.mode = xsdfec->op_mode;
264         config.order = xsdfec->order;
265
266         err = copy_to_user(arg, &config, sizeof(config));
267         if (err) {
268                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
269                         __func__, xsdfec->fec_id);
270                 err = -EFAULT;
271         }
272         return err;
273 }
274
275 static int
276 xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
277 {
278         u32 mask_read;
279
280         if (enable) {
281                 /* Enable */
282                 xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR,
283                                 XSDFEC_ISR_MASK);
284                 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
285                 if (mask_read & XSDFEC_ISR_MASK) {
286                         dev_err(xsdfec->dev,
287                                 "SDFEC enabling irq with IER failed");
288                         return -EIO;
289                 }
290         } else {
291                 /* Disable */
292                 xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR,
293                                 XSDFEC_ISR_MASK);
294                 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
295                 if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
296                         dev_err(xsdfec->dev,
297                                 "SDFEC disabling irq with IDR failed");
298                         return -EIO;
299                 }
300         }
301         return 0;
302 }
303
304 static int
305 xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
306 {
307         u32 mask_read;
308
309         if (enable) {
310                 /* Enable */
311                 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
312                                 XSDFEC_ECC_ISR_MASK);
313                 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
314                 if (mask_read & XSDFEC_ECC_ISR_MASK) {
315                         dev_err(xsdfec->dev,
316                                 "SDFEC enabling ECC irq with ECC IER failed");
317                         return -EIO;
318                 }
319         } else {
320                 /* Disable */
321                 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
322                                 XSDFEC_ECC_ISR_MASK);
323                 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
324                 if ((mask_read & XSDFEC_ECC_ISR_MASK) != XSDFEC_ECC_ISR_MASK) {
325                         dev_err(xsdfec->dev,
326                                 "SDFEC disable ECC irq with ECC IDR failed");
327                         return -EIO;
328                 }
329         }
330         return 0;
331 }
332
333 static int
334 xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
335 {
336         struct xsdfec_irq  irq;
337         int err = 0;
338
339         err = copy_from_user(&irq, arg, sizeof(irq));
340         if (err) {
341                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
342                         __func__, xsdfec->fec_id);
343                 return -EFAULT;
344         }
345
346         /* Setup tlast related IRQ */
347         if (irq.enable_isr) {
348                 err = xsdfec_isr_enable(xsdfec, true);
349                 if (err < 0)
350                         return err;
351         }
352
353         /* Setup ECC related IRQ */
354         if (irq.enable_ecc_isr) {
355                 err = xsdfec_ecc_isr_enable(xsdfec, true);
356                 if (err < 0)
357                         return err;
358         }
359
360         return 0;
361 }
362
363 #define XSDFEC_TURBO_SCALE_MASK         (0xF)
364 #define XSDFEC_TURBO_SCALE_BIT_POS      (8)
365 static int
366 xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
367 {
368         struct xsdfec_turbo turbo;
369         int err = 0;
370         u32 turbo_write = 0;
371
372         err = copy_from_user(&turbo, arg, sizeof(turbo));
373         if (err) {
374                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
375                         __func__, xsdfec->fec_id);
376                 return -EFAULT;
377         }
378
379         /* Check to see what device tree says about the FEC codes */
380         if (xsdfec->code == XSDFEC_LDPC_CODE) {
381                 dev_err(xsdfec->dev,
382                         "%s: Unable to write Turbo to SDFEC%d check DT",
383                                 __func__, xsdfec->fec_id);
384                 return -EIO;
385         } else if (xsdfec->code == XSDFEC_CODE_INVALID) {
386                 xsdfec->code = XSDFEC_TURBO_CODE;
387         }
388
389         if (xsdfec->wr_protect)
390                 xsdfec_wr_protect(xsdfec, false);
391
392         turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK) <<
393                         XSDFEC_TURBO_SCALE_BIT_POS) | turbo.alg;
394         xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
395         return err;
396 }
397
398 static int
399 xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
400 {
401         u32 reg_value;
402         struct xsdfec_turbo turbo_params;
403         int err;
404
405         if (xsdfec->code == XSDFEC_LDPC_CODE) {
406                 dev_err(xsdfec->dev,
407                         "%s: SDFEC%d is configured for LDPC, check DT",
408                         __func__, xsdfec->fec_id);
409                 return -EIO;
410         }
411
412         reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
413
414         turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
415                               XSDFEC_TURBO_SCALE_BIT_POS;
416         turbo_params.alg = reg_value & 0x1;
417
418         err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
419         if (err) {
420                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
421                         __func__, xsdfec->fec_id);
422                 err = -EFAULT;
423         }
424
425         return err;
426 }
427
428 #define XSDFEC_LDPC_REG_JUMP    (0x10)
429 #define XSDFEC_REG0_N_MASK      (0x0000FFFF)
430 #define XSDFEC_REG0_N_LSB       (0)
431 #define XSDFEC_REG0_K_MASK      (0x7fff0000)
432 #define XSDFEC_REG0_K_LSB       (16)
433 static int
434 xsdfec_reg0_write(struct xsdfec_dev *xsdfec,
435                   u32 n, u32 k, u32 offset)
436 {
437         u32 wdata;
438
439         /* Use only lower 16 bits */
440         if (n & ~XSDFEC_REG0_N_MASK)
441                 dev_err(xsdfec->dev, "N value is beyond 16 bits");
442         n &= XSDFEC_REG0_N_MASK;
443         n <<= XSDFEC_REG0_N_LSB;
444
445         if (k & XSDFEC_REG0_K_MASK)
446                 dev_err(xsdfec->dev, "K value is beyond 16 bits");
447
448         k = ((k << XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK);
449         wdata = k | n;
450
451         if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
452                                 > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
453                 dev_err(xsdfec->dev,
454                         "Writing outside of LDPC reg0 space 0x%x",
455                         XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
456                         (offset * XSDFEC_LDPC_REG_JUMP));
457                 return -EINVAL;
458         }
459         xsdfec_regwrite(xsdfec,
460                         XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
461                         (offset * XSDFEC_LDPC_REG_JUMP), wdata);
462         return 0;
463 }
464
465 static int
466 xsdfec_collect_ldpc_reg0(struct xsdfec_dev *xsdfec,
467                          u32 code_id,
468                          struct xsdfec_ldpc_params *ldpc_params)
469 {
470         u32 reg_value;
471         u32 reg_addr = XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
472                 (code_id * XSDFEC_LDPC_REG_JUMP);
473
474         if (reg_addr > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
475                 dev_err(xsdfec->dev,
476                         "Accessing outside of LDPC reg0 for code_id %d",
477                         code_id);
478                 return -EINVAL;
479         }
480
481         reg_value = xsdfec_regread(xsdfec, reg_addr);
482
483         ldpc_params->n = (reg_value >> XSDFEC_REG0_N_LSB) & XSDFEC_REG0_N_MASK;
484
485         ldpc_params->k = (reg_value >> XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK;
486
487         return 0;
488 }
489
490 #define XSDFEC_REG1_PSIZE_MASK          (0x000001ff)
491 #define XSDFEC_REG1_NO_PACKING_MASK     (0x00000400)
492 #define XSDFEC_REG1_NO_PACKING_LSB      (10)
493 #define XSDFEC_REG1_NM_MASK             (0x000ff800)
494 #define XSDFEC_REG1_NM_LSB              (11)
495 #define XSDFEC_REG1_BYPASS_MASK (0x00100000)
496 static int
497 xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
498                   u32 no_packing, u32 nm, u32 offset)
499 {
500         u32 wdata;
501
502         if (psize & ~XSDFEC_REG1_PSIZE_MASK)
503                 dev_err(xsdfec->dev, "Psize is beyond 10 bits");
504         psize &= XSDFEC_REG1_PSIZE_MASK;
505
506         if (no_packing != 0 && no_packing != 1)
507                 dev_err(xsdfec->dev, "No-packing bit register invalid");
508         no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
509                                         XSDFEC_REG1_NO_PACKING_MASK);
510
511         if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
512                 dev_err(xsdfec->dev, "NM is beyond 10 bits");
513         nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
514
515         wdata = nm | no_packing | psize;
516         if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
517                 > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
518                 dev_err(xsdfec->dev,
519                         "Writing outside of LDPC reg1 space 0x%x",
520                         XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
521                         (offset * XSDFEC_LDPC_REG_JUMP));
522                 return -EINVAL;
523         }
524         xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
525                 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
526         return 0;
527 }
528
529 static int
530 xsdfec_collect_ldpc_reg1(struct xsdfec_dev *xsdfec,
531                          u32 code_id,
532                          struct xsdfec_ldpc_params *ldpc_params)
533 {
534         u32 reg_value;
535         u32 reg_addr = XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
536                 (code_id * XSDFEC_LDPC_REG_JUMP);
537
538         if (reg_addr > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
539                 dev_err(xsdfec->dev,
540                         "Accessing outside of LDPC reg1 for code_id %d",
541                         code_id);
542                 return -EINVAL;
543         }
544
545         reg_value = xsdfec_regread(xsdfec, reg_addr);
546
547         ldpc_params->psize = reg_value & XSDFEC_REG1_PSIZE_MASK;
548
549         ldpc_params->no_packing = ((reg_value >> XSDFEC_REG1_NO_PACKING_LSB) &
550                                     XSDFEC_REG1_NO_PACKING_MASK);
551
552         ldpc_params->nm = (reg_value >> XSDFEC_REG1_NM_LSB) &
553                            XSDFEC_REG1_NM_MASK;
554         return 0;
555 }
556
557 #define XSDFEC_REG2_NLAYERS_MASK                (0x000001FF)
558 #define XSDFEC_REG2_NLAYERS_LSB                 (0)
559 #define XSDFEC_REG2_NNMQC_MASK                  (0x000FFE00)
560 #define XSDFEC_REG2_NMQC_LSB                    (9)
561 #define XSDFEC_REG2_NORM_TYPE_MASK              (0x00100000)
562 #define XSDFEC_REG2_NORM_TYPE_LSB               (20)
563 #define XSDFEC_REG2_SPECIAL_QC_MASK             (0x00200000)
564 #define XSDFEC_REG2_SPEICAL_QC_LSB              (21)
565 #define XSDFEC_REG2_NO_FINAL_PARITY_MASK        (0x00400000)
566 #define XSDFEC_REG2_NO_FINAL_PARITY_LSB         (22)
567 #define XSDFEC_REG2_MAX_SCHEDULE_MASK           (0x01800000)
568 #define XSDFEC_REG2_MAX_SCHEDULE_LSB            (23)
569
570 static int
571 xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
572                   u32 norm_type, u32 special_qc, u32 no_final_parity,
573                   u32 max_schedule, u32 offset)
574 {
575         u32 wdata;
576
577         if (nlayers & ~(XSDFEC_REG2_NLAYERS_MASK >>
578                                 XSDFEC_REG2_NLAYERS_LSB))
579                 dev_err(xsdfec->dev, "Nlayers exceeds 9 bits");
580         nlayers &= XSDFEC_REG2_NLAYERS_MASK;
581
582         if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
583                 dev_err(xsdfec->dev, "NMQC exceeds 11 bits");
584         nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
585
586         if (norm_type > 1)
587                 dev_err(xsdfec->dev, "Norm type is invalid");
588         norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
589                                         XSDFEC_REG2_NORM_TYPE_MASK);
590         if (special_qc > 1)
591                 dev_err(xsdfec->dev, "Special QC in invalid");
592         special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
593                         XSDFEC_REG2_SPECIAL_QC_MASK);
594
595         if (no_final_parity > 1)
596                 dev_err(xsdfec->dev, "No final parity check invalid");
597         no_final_parity =
598                 ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
599                                         XSDFEC_REG2_NO_FINAL_PARITY_MASK);
600         if (max_schedule & ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >>
601                                         XSDFEC_REG2_MAX_SCHEDULE_LSB))
602                 dev_err(xsdfec->dev, "Max Schdule exceeds 2 bits");
603         max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
604                                 XSDFEC_REG2_MAX_SCHEDULE_MASK);
605
606         wdata = (max_schedule | no_final_parity | special_qc | norm_type |
607                         nmqc | nlayers);
608
609         if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
610                 > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
611                 dev_err(xsdfec->dev,
612                         "Writing outside of LDPC reg2 space 0x%x",
613                         XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
614                         (offset * XSDFEC_LDPC_REG_JUMP));
615                 return -EINVAL;
616         }
617         xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
618                         (offset * XSDFEC_LDPC_REG_JUMP), wdata);
619         return 0;
620 }
621
622 static int
623 xsdfec_collect_ldpc_reg2(struct xsdfec_dev *xsdfec,
624                          u32 code_id,
625                          struct xsdfec_ldpc_params *ldpc_params)
626 {
627         u32 reg_value;
628         u32 reg_addr = XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
629                 (code_id * XSDFEC_LDPC_REG_JUMP);
630
631         if (reg_addr > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
632                 dev_err(xsdfec->dev,
633                         "Accessing outside of LDPC reg2 for code_id %d",
634                         code_id);
635                 return -EINVAL;
636         }
637
638         reg_value = xsdfec_regread(xsdfec, reg_addr);
639
640         ldpc_params->nlayers = ((reg_value >> XSDFEC_REG2_NLAYERS_LSB) &
641                                 XSDFEC_REG2_NLAYERS_MASK);
642
643         ldpc_params->nmqc = (reg_value >> XSDFEC_REG2_NMQC_LSB) &
644                              XSDFEC_REG2_NNMQC_MASK;
645
646         ldpc_params->norm_type = ((reg_value >> XSDFEC_REG2_NORM_TYPE_LSB) &
647                                   XSDFEC_REG2_NORM_TYPE_MASK);
648
649         ldpc_params->special_qc = ((reg_value >> XSDFEC_REG2_SPEICAL_QC_LSB) &
650                                    XSDFEC_REG2_SPECIAL_QC_MASK);
651
652         ldpc_params->no_final_parity =
653                 ((reg_value >> XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
654                  XSDFEC_REG2_NO_FINAL_PARITY_MASK);
655
656         ldpc_params->max_schedule =
657                 ((reg_value >> XSDFEC_REG2_MAX_SCHEDULE_LSB) &
658                  XSDFEC_REG2_MAX_SCHEDULE_MASK);
659
660         return 0;
661 }
662
663 #define XSDFEC_REG3_LA_OFF_LSB          (8)
664 #define XSDFEC_REG3_QC_OFF_LSB          (16)
665 static int
666 xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off,
667                   u8 la_off, u16 qc_off, u32 offset)
668 {
669         u32 wdata;
670
671         wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
672                 (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
673         if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
674                 (offset *  XSDFEC_LDPC_REG_JUMP) >
675                         XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
676                 dev_err(xsdfec->dev,
677                         "Writing outside of LDPC reg3 space 0x%x",
678                         XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
679                         (offset * XSDFEC_LDPC_REG_JUMP));
680                 return -EINVAL;
681         }
682         xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
683                         (offset * XSDFEC_LDPC_REG_JUMP), wdata);
684         return 0;
685 }
686
687 static int
688 xsdfec_collect_ldpc_reg3(struct xsdfec_dev *xsdfec,
689                          u32 code_id,
690                          struct xsdfec_ldpc_params *ldpc_params)
691 {
692         u32 reg_value;
693         u32 reg_addr = XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
694                 (code_id * XSDFEC_LDPC_REG_JUMP);
695
696         if (reg_addr > XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
697                 dev_err(xsdfec->dev,
698                         "Accessing outside of LDPC reg3 for code_id %d",
699                         code_id);
700                 return -EINVAL;
701         }
702
703         reg_value = xsdfec_regread(xsdfec, reg_addr);
704
705         ldpc_params->qc_off = (reg_addr >> XSDFEC_REG3_QC_OFF_LSB) & 0xFF;
706         ldpc_params->la_off = (reg_addr >> XSDFEC_REG3_LA_OFF_LSB) & 0xFF;
707         ldpc_params->sc_off = (reg_addr & 0xFF);
708
709         return 0;
710 }
711
712 #define XSDFEC_SC_TABLE_DEPTH           (0x3fc)
713 #define XSDFEC_REG_WIDTH_JUMP           (4)
714 static int
715 xsdfec_sc_table_write(struct xsdfec_dev *xsdfec, u32 offset,
716                       u32 *sc_ptr, u32 len)
717 {
718         int reg;
719
720         /*
721          * Writes that go beyond the length of
722          * Shared Scale(SC) table should fail
723          */
724         if ((XSDFEC_REG_WIDTH_JUMP * (offset + len)) > XSDFEC_SC_TABLE_DEPTH) {
725                 dev_err(xsdfec->dev, "Write exceeds SC table length");
726                 return -EINVAL;
727         }
728
729         /*
730          * sc_off tracks the points to the last written location
731          * in the Shared Scale(SC) table. Those shared codes might
732          * be in use. Updating them without quiescing the device
733          * can put the SDFEC device in an indeterminate state
734          */
735         if ((XSDFEC_REG_WIDTH_JUMP * offset) < xsdfec->sc_off) {
736                 dev_err(xsdfec->dev, "Might write to in use shared SC code");
737                 return -EINVAL;
738         }
739
740         for (reg = 0; reg < len; reg++) {
741                 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
742                 (offset + reg) *  XSDFEC_REG_WIDTH_JUMP, sc_ptr[reg]);
743         }
744         xsdfec->sc_off = reg + (XSDFEC_REG_WIDTH_JUMP * offset);
745         return reg;
746 }
747
748 static int
749 xsdfec_collect_sc_table(struct xsdfec_dev *xsdfec, u32 offset,
750                         u32 *sc_ptr, u32 len)
751 {
752         u32 reg;
753         u32 reg_addr;
754         u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
755
756         if (deepest_reach > XSDFEC_SC_TABLE_DEPTH) {
757                 dev_err(xsdfec->dev, "Access will exceed SC table length");
758                 return -EINVAL;
759         }
760
761         for (reg = 0; reg < len; reg++) {
762                 reg_addr = XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
763                         ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
764
765                 sc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
766         }
767
768         return 0;
769 }
770
771 #define XSDFEC_LA_TABLE_DEPTH           (0xFFC)
772 static int
773 xsdfec_la_table_write(struct xsdfec_dev *xsdfec, u32 offset,
774                       u32 *la_ptr, u32 len)
775 {
776         int reg;
777
778         if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_LA_TABLE_DEPTH) {
779                 dev_err(xsdfec->dev, "Write exceeds LA table length");
780                 return -EINVAL;
781         }
782
783         if  (XSDFEC_REG_WIDTH_JUMP * offset < xsdfec->la_off) {
784                 dev_err(xsdfec->dev, "Might write to in use shared LA code");
785                 return -EINVAL;
786         }
787
788         for (reg = 0; reg < len; reg++) {
789                 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
790                                 (offset + reg) * XSDFEC_REG_WIDTH_JUMP,
791                                 la_ptr[reg]);
792         }
793         xsdfec->la_off = reg + (offset * XSDFEC_REG_WIDTH_JUMP);
794         return reg;
795 }
796
797 static int
798 xsdfec_collect_la_table(struct xsdfec_dev *xsdfec, u32 offset,
799                         u32 *la_ptr, u32 len)
800 {
801         u32 reg;
802         u32 reg_addr;
803         u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
804
805         if (deepest_reach > XSDFEC_LA_TABLE_DEPTH) {
806                 dev_err(xsdfec->dev, "Access will exceed LA table length");
807                 return -EINVAL;
808         }
809
810         for (reg = 0; reg < len; reg++) {
811                 reg_addr = XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
812                                 ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
813
814                 la_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
815         }
816
817         return 0;
818 }
819
820 #define XSDFEC_QC_TABLE_DEPTH           (0x7FFC)
821 static int
822 xsdfec_qc_table_write(struct xsdfec_dev *xsdfec,
823                       u32 offset, u32 *qc_ptr, u32 len)
824 {
825         int reg;
826
827         if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_QC_TABLE_DEPTH) {
828                 dev_err(xsdfec->dev, "Write exceeds QC table length");
829                 return -EINVAL;
830         }
831
832         if (XSDFEC_REG_WIDTH_JUMP * offset < xsdfec->qc_off) {
833                 dev_err(xsdfec->dev, "Might write to in use shared LA code");
834                 return -EINVAL;
835         }
836
837         for (reg = 0; reg < len; reg++) {
838                 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
839                  (offset + reg) * XSDFEC_REG_WIDTH_JUMP, qc_ptr[reg]);
840         }
841
842         xsdfec->qc_off = reg + (offset * XSDFEC_REG_WIDTH_JUMP);
843         return reg;
844 }
845
846 static int
847 xsdfec_collect_qc_table(struct xsdfec_dev *xsdfec,
848                         u32 offset, u32 *qc_ptr, u32 len)
849 {
850         u32 reg;
851         u32 reg_addr;
852         u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
853
854         if (deepest_reach > XSDFEC_QC_TABLE_DEPTH) {
855                 dev_err(xsdfec->dev, "Access will exceed QC table length");
856                 return -EINVAL;
857         }
858
859         for (reg = 0; reg < len; reg++) {
860                 reg_addr = XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
861                  (offset + reg) * XSDFEC_REG_WIDTH_JUMP;
862
863                 qc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
864         }
865
866         return 0;
867 }
868
869 static int
870 xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
871 {
872         struct xsdfec_ldpc_params *ldpc;
873         int err;
874
875         ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
876         if (!ldpc)
877                 return -ENOMEM;
878
879         err = copy_from_user(ldpc, arg, sizeof(*ldpc));
880         if (err) {
881                 dev_err(xsdfec->dev,
882                         "%s failed to copy from user for SDFEC%d",
883                         __func__, xsdfec->fec_id);
884                 goto err_out;
885         }
886         if (xsdfec->code == XSDFEC_TURBO_CODE) {
887                 dev_err(xsdfec->dev,
888                         "%s: Unable to write LDPC to SDFEC%d check DT",
889                         __func__, xsdfec->fec_id);
890                 goto err_out;
891         }
892         xsdfec->code = XSDFEC_LDPC_CODE;
893         /* Disable Write Protection before proceeding */
894         if (xsdfec->wr_protect)
895                 xsdfec_wr_protect(xsdfec, false);
896
897         /* Write Reg 0 */
898         err = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->code_id);
899         if (err)
900                 goto err_out;
901
902         /* Write Reg 1 */
903         err = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing,
904                                 ldpc->nm, ldpc->code_id);
905         if (err)
906                 goto err_out;
907
908         /* Write Reg 2 */
909         err = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
910                                 ldpc->norm_type, ldpc->special_qc,
911                                 ldpc->no_final_parity, ldpc->max_schedule,
912                                 ldpc->code_id);
913         if (err)
914                 goto err_out;
915
916         /* Write Reg 3 */
917         err = xsdfec_reg3_write(xsdfec, ldpc->sc_off,
918                                 ldpc->la_off, ldpc->qc_off, ldpc->code_id);
919         if (err)
920                 goto err_out;
921
922         /* Write Shared Codes */
923         err = xsdfec_sc_table_write(xsdfec, ldpc->sc_off,
924                                     ldpc->sc_table, ldpc->nlayers);
925         if (err < 0)
926                 goto err_out;
927
928         err = xsdfec_la_table_write(xsdfec, 4 * ldpc->la_off,
929                                     ldpc->la_table, ldpc->nlayers);
930         if (err < 0)
931                 goto err_out;
932
933         err = xsdfec_qc_table_write(xsdfec, 4 * ldpc->qc_off,
934                                     ldpc->qc_table, ldpc->nqc);
935         if (err < 0)
936                 goto err_out;
937
938         kfree(ldpc);
939         return 0;
940         /* Error Path */
941 err_out:
942         kfree(ldpc);
943         return err;
944 }
945
946 static int
947 xsdfec_get_ldpc_code_params(struct xsdfec_dev *xsdfec, void __user *arg)
948 {
949         struct xsdfec_ldpc_params *ldpc_params;
950         int err = 0;
951
952         if (xsdfec->code == XSDFEC_TURBO_CODE) {
953                 dev_err(xsdfec->dev,
954                         "%s: SDFEC%d is configured for TURBO, check DT",
955                                 __func__, xsdfec->fec_id);
956                 return -EIO;
957         }
958
959         ldpc_params = kzalloc(sizeof(*ldpc_params), GFP_KERNEL);
960         if (!ldpc_params)
961                 return -ENOMEM;
962
963         err = copy_from_user(ldpc_params, arg, sizeof(*ldpc_params));
964         if (err) {
965                 dev_err(xsdfec->dev,
966                         "%s failed to copy from user for SDFEC%d",
967                         __func__, xsdfec->fec_id);
968                 goto err_out;
969         }
970
971         err = xsdfec_collect_ldpc_reg0(xsdfec, ldpc_params->code_id,
972                                        ldpc_params);
973         if (err)
974                 goto err_out;
975
976         err = xsdfec_collect_ldpc_reg1(xsdfec, ldpc_params->code_id,
977                                        ldpc_params);
978         if (err)
979                 goto err_out;
980
981         err = xsdfec_collect_ldpc_reg2(xsdfec, ldpc_params->code_id,
982                                        ldpc_params);
983         if (err)
984                 goto err_out;
985
986         err = xsdfec_collect_ldpc_reg3(xsdfec, ldpc_params->code_id,
987                                        ldpc_params);
988         if (err)
989                 goto err_out;
990
991         /*
992          * Collect the shared table values, needs to happen after reading
993          * the registers
994          */
995         err = xsdfec_collect_sc_table(xsdfec, ldpc_params->sc_off,
996                                       ldpc_params->sc_table,
997                                       ldpc_params->nlayers);
998         if (err < 0)
999                 goto err_out;
1000
1001         err = xsdfec_collect_la_table(xsdfec, 4 * ldpc_params->la_off,
1002                                       ldpc_params->la_table,
1003                                       ldpc_params->nlayers);
1004         if (err < 0)
1005                 goto err_out;
1006
1007         err = xsdfec_collect_qc_table(xsdfec, 4 * ldpc_params->qc_off,
1008                                       ldpc_params->qc_table,
1009                                       ldpc_params->nqc);
1010         if (err < 0)
1011                 goto err_out;
1012
1013         err = copy_to_user(arg, ldpc_params, sizeof(*ldpc_params));
1014         if (err) {
1015                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
1016                         __func__, xsdfec->fec_id);
1017                 err = -EFAULT;
1018         }
1019
1020         kfree(ldpc_params);
1021         return 0;
1022         /* Error Path */
1023 err_out:
1024         kfree(ldpc_params);
1025         return err;
1026 }
1027
1028 static int
1029 xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
1030 {
1031         bool order_out_of_range;
1032         enum xsdfec_order order = *((enum xsdfec_order *)arg);
1033
1034         order_out_of_range = (order <= XSDFEC_INVALID_ORDER) ||
1035                              (order >= XSDFEC_ORDER_MAX);
1036         if (order_out_of_range) {
1037                 dev_err(xsdfec->dev,
1038                         "%s invalid order value %d for SDFEC%d",
1039                         __func__, order, xsdfec->fec_id);
1040                 return -EINVAL;
1041         }
1042
1043         /* Verify Device has not started */
1044         if (xsdfec->state == XSDFEC_STARTED) {
1045                 dev_err(xsdfec->dev,
1046                         "%s attempting to set Order while started for SDFEC%d",
1047                         __func__, xsdfec->fec_id);
1048                 return -EIO;
1049         }
1050
1051         xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, (order - 1));
1052
1053         xsdfec->order = order;
1054
1055         return 0;
1056 }
1057
1058 static int
1059 xsdfec_set_bypass(struct xsdfec_dev *xsdfec, void __user *arg)
1060 {
1061         unsigned long bypass = *((unsigned long *)arg);
1062
1063         if (bypass > 1) {
1064                 dev_err(xsdfec->dev,
1065                         "%s invalid bypass value %ld for SDFEC%d",
1066                         __func__, bypass, xsdfec->fec_id);
1067                 return -EINVAL;
1068         }
1069
1070         /* Verify Device has not started */
1071         if (xsdfec->state == XSDFEC_STARTED) {
1072                 dev_err(xsdfec->dev,
1073                         "%s attempting to set bypass while started for SDFEC%d",
1074                         __func__, xsdfec->fec_id);
1075                 return -EIO;
1076         }
1077
1078         xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, bypass);
1079
1080         return 0;
1081 }
1082
1083 static int
1084 xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *is_active)
1085 {
1086         u32 reg_value;
1087
1088         reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
1089         /* using a double ! operator instead of casting */
1090         *is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
1091
1092         return 0;
1093 }
1094
1095 static int xsdfec_start(struct xsdfec_dev *xsdfec)
1096 {
1097         u32 regread;
1098
1099         /* Verify Code is loaded */
1100         if (xsdfec->code == XSDFEC_CODE_INVALID) {
1101                 dev_err(xsdfec->dev,
1102                         "%s : set code before start for SDFEC%d",
1103                         __func__, xsdfec->fec_id);
1104                 return -EINVAL;
1105         }
1106         regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
1107         regread &= 0x1;
1108         if (regread != (xsdfec->code - 1)) {
1109                 dev_err(xsdfec->dev,
1110                         "%s SDFEC HW code does not match driver code, reg %d, code %d",
1111                         __func__, regread, (xsdfec->code - 1));
1112                 return -EINVAL;
1113         }
1114
1115         /* Verify Order has been set */
1116         if (xsdfec->order == XSDFEC_INVALID_ORDER) {
1117                 dev_err(xsdfec->dev,
1118                         "%s : set order before starting SDFEC%d",
1119                         __func__, xsdfec->fec_id);
1120                 return -EINVAL;
1121         }
1122
1123         /* Set AXIS width */
1124         xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, 0);
1125         /* Set AXIS enable */
1126         xsdfec_regwrite(xsdfec,
1127                         XSDFEC_AXIS_ENABLE_ADDR,
1128                         XSDFEC_AXIS_ENABLE_MASK);
1129         /* Write Protect Code and Registers */
1130         xsdfec_wr_protect(xsdfec, true);
1131         /* Done */
1132         xsdfec->state = XSDFEC_STARTED;
1133         return 0;
1134 }
1135
1136 static int
1137 xsdfec_stop(struct xsdfec_dev *xsdfec)
1138 {
1139         u32 regread;
1140
1141         if (xsdfec->state != XSDFEC_STARTED)
1142                 dev_err(xsdfec->dev, "Device not started correctly");
1143         /* Disable Write Protect */
1144         xsdfec_wr_protect(xsdfec, false);
1145         /* Disable AXIS_ENABLE register */
1146         regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
1147         regread &= (~XSDFEC_AXIS_ENABLE_MASK);
1148         xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
1149         /* Stop */
1150         xsdfec->state = XSDFEC_STOPPED;
1151         return 0;
1152 }
1153
1154 /*
1155  * Reset will happen asynchronously
1156  * since there is no in-band reset register
1157  * Prepare driver for reset
1158  */
1159
1160 static int
1161 xsdfec_reset_req(struct xsdfec_dev *xsdfec)
1162 {
1163         xsdfec->state = XSDFEC_INIT;
1164         xsdfec->order = XSDFEC_INVALID_ORDER;
1165         xsdfec->sc_off = 0;
1166         xsdfec->la_off = 0;
1167         xsdfec->qc_off = 0;
1168         xsdfec->wr_protect = false;
1169         atomic_set(&xsdfec->isr_err_count, 0);
1170         atomic_set(&xsdfec->uecc_count, 0);
1171         atomic_set(&xsdfec->cecc_count, 0);
1172         atomic_inc(&xsdfec->reset_count);
1173         return 0;
1174 }
1175
1176 static long
1177 xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, unsigned long data)
1178 {
1179         struct xsdfec_dev *xsdfec = fptr->private_data;
1180         void __user *arg = NULL;
1181         int rval = -EINVAL;
1182         int err = 0;
1183
1184         if (!xsdfec)
1185                 return rval;
1186
1187         /* In failed state allow only reset and get status IOCTLs */
1188         if (xsdfec->state == XSDFEC_NEEDS_RESET &&
1189             (cmd != XSDFEC_RESET_REQ && cmd != XSDFEC_GET_STATUS)) {
1190                 dev_err(xsdfec->dev,
1191                         "SDFEC%d in failed state. Reset Required",
1192                         xsdfec->fec_id);
1193                 return -EPERM;
1194         }
1195
1196         if (_IOC_TYPE(cmd) != XSDFEC_MAGIC) {
1197                 dev_err(xsdfec->dev, "Not a xilinx sdfec ioctl");
1198                 return -ENOTTY;
1199         }
1200
1201         /* check if ioctl argument is present and valid */
1202         if (_IOC_DIR(cmd) != _IOC_NONE) {
1203                 arg = (void __user *)data;
1204                 if (!arg) {
1205                         dev_err(xsdfec->dev, "xilinx sdfec ioctl argument is NULL Pointer");
1206                         return rval;
1207                 }
1208         }
1209
1210         /* Access check of the argument if present */
1211         if (_IOC_DIR(cmd) & _IOC_READ)
1212                 err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
1213         else if (_IOC_DIR(cmd) & _IOC_WRITE)
1214                 err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
1215
1216         if (err) {
1217                 dev_err(xsdfec->dev, "Invalid xilinx sdfec ioctl argument");
1218                 return -EFAULT;
1219         }
1220
1221         switch (cmd) {
1222         case XSDFEC_START_DEV:
1223                 rval = xsdfec_start(xsdfec);
1224                 break;
1225         case XSDFEC_STOP_DEV:
1226                 rval = xsdfec_stop(xsdfec);
1227                 break;
1228         case XSDFEC_RESET_REQ:
1229                 rval = xsdfec_reset_req(xsdfec);
1230                 break;
1231         case XSDFEC_GET_STATUS:
1232                 rval = xsdfec_get_status(xsdfec, arg);
1233                 break;
1234         case XSDFEC_GET_CONFIG:
1235                 rval = xsdfec_get_config(xsdfec, arg);
1236                 break;
1237         case XSDFEC_SET_IRQ:
1238                 rval = xsdfec_set_irq(xsdfec, arg);
1239                 break;
1240         case XSDFEC_SET_TURBO:
1241                 rval = xsdfec_set_turbo(xsdfec, arg);
1242                 break;
1243         case XSDFEC_GET_TURBO:
1244                 rval = xsdfec_get_turbo(xsdfec, arg);
1245                 break;
1246         case XSDFEC_ADD_LDPC_CODE_PARAMS:
1247                 rval  = xsdfec_add_ldpc(xsdfec, arg);
1248                 break;
1249         case XSDFEC_GET_LDPC_CODE_PARAMS:
1250                 rval = xsdfec_get_ldpc_code_params(xsdfec, arg);
1251                 break;
1252         case XSDFEC_SET_ORDER:
1253                 rval = xsdfec_set_order(xsdfec, arg);
1254                 break;
1255         case XSDFEC_SET_BYPASS:
1256                 rval = xsdfec_set_bypass(xsdfec, arg);
1257                 break;
1258         case XSDFEC_IS_ACTIVE:
1259                 rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
1260                 break;
1261         default:
1262                 /* Should not get here */
1263                 dev_err(xsdfec->dev, "Undefined SDFEC IOCTL");
1264                 break;
1265         }
1266         return rval;
1267 }
1268
1269 static unsigned int
1270 xsdfec_poll(struct file *file, poll_table *wait)
1271 {
1272         unsigned int mask;
1273         struct xsdfec_dev *xsdfec = file->private_data;
1274
1275         if (!xsdfec)
1276                 return POLLNVAL | POLLHUP;
1277
1278         poll_wait(file, &xsdfec->waitq, wait);
1279
1280         /* XSDFEC ISR detected an error */
1281         if (xsdfec->state == XSDFEC_NEEDS_RESET)
1282                 mask = POLLIN | POLLRDNORM;
1283         else
1284                 mask = POLLPRI | POLLERR;
1285
1286         return mask;
1287 }
1288
1289 static const struct file_operations xsdfec_fops = {
1290         .owner = THIS_MODULE,
1291         .open = xsdfec_dev_open,
1292         .release = xsdfec_dev_release,
1293         .unlocked_ioctl = xsdfec_dev_ioctl,
1294         .poll = xsdfec_poll,
1295 };
1296
1297 static int
1298 xsdfec_parse_of(struct xsdfec_dev *xsdfec)
1299 {
1300         struct device *dev = xsdfec->dev;
1301         struct device_node *node = dev->of_node;
1302         int rval;
1303         const char *fec_code;
1304         const char *fec_op_mode;
1305
1306         rval = of_property_read_string(node,
1307                                        "xlnx,sdfec-op-mode",
1308                                        &fec_op_mode);
1309         if (rval < 0) {
1310                 dev_err(dev, "xlnx,sdfec-op-mode not in DT");
1311                 return rval;
1312         }
1313
1314         if (!strcasecmp(fec_op_mode, "encode")) {
1315                 xsdfec->op_mode = XSDFEC_ENCODE;
1316         } else if (!strcasecmp(fec_op_mode, "decode")) {
1317                 xsdfec->op_mode = XSDFEC_DECODE;
1318         } else {
1319                 dev_err(dev, "Encode or Decode not specified in DT");
1320                 return -EINVAL;
1321         }
1322
1323         rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
1324         if (rval < 0) {
1325                 dev_err(dev, "xlnx,sdfec-code not in DT");
1326                 return rval;
1327         }
1328
1329         if (!strcasecmp(fec_code, "ldpc")) {
1330                 xsdfec->code = XSDFEC_LDPC_CODE;
1331         } else if (!strcasecmp(fec_code, "turbo")) {
1332                 xsdfec->code = XSDFEC_TURBO_CODE;
1333         } else {
1334                 dev_err(xsdfec->dev, "Invalid Op Mode in DT");
1335                 return -EINVAL;
1336         }
1337
1338         /* Write LDPC to CODE Register */
1339         xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, (xsdfec->code - 1));
1340
1341         return 0;
1342 }
1343
1344 static void
1345 xsdfec_log_ecc_errors(struct xsdfec_dev *xsdfec, u32 ecc_err)
1346 {
1347         u32 cecc, uecc;
1348         int uecc_cnt;
1349
1350         cecc = ecc_err & XSDFEC_ECC_ISR_SBE;
1351         uecc = ecc_err & XSDFEC_ECC_ISR_MBE;
1352
1353         uecc_cnt = atomic_add_return(hweight32(uecc), &xsdfec->uecc_count);
1354         atomic_add(hweight32(cecc), &xsdfec->cecc_count);
1355
1356         if (uecc_cnt > 0 && uecc_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1357                 dev_err(xsdfec->dev,
1358                         "Multi-bit error on xsdfec%d. Needs reset",
1359                         xsdfec->fec_id);
1360         }
1361
1362         /* Clear ECC errors */
1363         xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1364 }
1365
1366 static void
1367 xsdfec_log_isr_errors(struct xsdfec_dev *xsdfec, u32 isr_err)
1368 {
1369         int isr_err_cnt;
1370
1371         /* Update ISR error counts */
1372         isr_err_cnt = atomic_add_return(hweight32(isr_err),
1373                                         &xsdfec->isr_err_count);
1374         if (isr_err_cnt > 0 && isr_err_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1375                 dev_err(xsdfec->dev,
1376                         "Tlast,or DIN_WORDS or DOUT_WORDS not correct");
1377         }
1378
1379         /* Clear ISR error status */
1380         xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1381 }
1382
1383 static void
1384 xsdfec_reset_required(struct xsdfec_dev *xsdfec)
1385 {
1386         xsdfec->state = XSDFEC_NEEDS_RESET;
1387 }
1388
1389 static irqreturn_t
1390 xsdfec_irq_thread(int irq, void *dev_id)
1391 {
1392         struct xsdfec_dev *xsdfec = dev_id;
1393         irqreturn_t ret = IRQ_HANDLED;
1394         u32 ecc_err;
1395         u32 isr_err;
1396         bool fatal_err = false;
1397
1398         WARN_ON(xsdfec->irq != irq);
1399
1400         /* Mask Interrupts */
1401         xsdfec_isr_enable(xsdfec, false);
1402         xsdfec_ecc_isr_enable(xsdfec, false);
1403
1404         /* Read Interrupt Status Registers */
1405         ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
1406         isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
1407
1408         if (ecc_err & XSDFEC_ECC_ISR_MBE) {
1409                 /* Multi-Bit Errors need Reset */
1410                 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1411                 xsdfec_reset_required(xsdfec);
1412                 fatal_err = true;
1413         } else if (isr_err & XSDFEC_ISR_MASK) {
1414                 /*
1415                  * Tlast, DIN_WORDS and DOUT_WORDS related
1416                  * errors need Reset
1417                  */
1418                 xsdfec_log_isr_errors(xsdfec, isr_err);
1419                 xsdfec_reset_required(xsdfec);
1420                 fatal_err = true;
1421         } else if (ecc_err & XSDFEC_ECC_ISR_SBE) {
1422                 /* Correctable ECC Errors */
1423                 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1424         } else {
1425                 ret = IRQ_NONE;
1426         }
1427
1428         if (fatal_err)
1429                 wake_up_interruptible(&xsdfec->waitq);
1430
1431         /* Unmaks Interrupts */
1432         xsdfec_isr_enable(xsdfec, true);
1433         xsdfec_ecc_isr_enable(xsdfec, true);
1434
1435         return ret;
1436 }
1437
1438 static int
1439 xsdfec_probe(struct platform_device *pdev)
1440 {
1441         struct xsdfec_dev *xsdfec;
1442         struct device *dev;
1443         struct device *dev_create;
1444         struct resource *res;
1445         int err;
1446         bool irq_enabled = true;
1447
1448         xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
1449         if (!xsdfec)
1450                 return -ENOMEM;
1451
1452         xsdfec->dev = &pdev->dev;
1453         xsdfec->fec_id = atomic_read(&xsdfec_ndevs);
1454
1455         dev = xsdfec->dev;
1456         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1457         xsdfec->regs = devm_ioremap_resource(dev, res);
1458         if (IS_ERR(xsdfec->regs)) {
1459                 dev_err(dev, "Unable to map resource");
1460                 err = PTR_ERR(xsdfec->regs);
1461                 goto err_xsdfec_dev;
1462         }
1463
1464         xsdfec->irq = platform_get_irq(pdev, 0);
1465         if (xsdfec->irq < 0) {
1466                 dev_dbg(dev, "platform_get_irq failed");
1467                 irq_enabled = false;
1468         }
1469
1470         err = xsdfec_parse_of(xsdfec);
1471         if (err < 0)
1472                 goto err_xsdfec_dev;
1473
1474         /* Save driver private data */
1475         platform_set_drvdata(pdev, xsdfec);
1476
1477         if (irq_enabled) {
1478                 init_waitqueue_head(&xsdfec->waitq);
1479                 /* Register IRQ thread */
1480                 err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
1481                                                 xsdfec_irq_thread,
1482                                                 IRQF_ONESHOT,
1483                                                 "xilinx-sdfec16",
1484                                                 xsdfec);
1485                 if (err < 0) {
1486                         dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
1487                         goto err_xsdfec_dev;
1488                 }
1489         }
1490
1491         cdev_init(&xsdfec->xsdfec_cdev, &xsdfec_fops);
1492         xsdfec->xsdfec_cdev.owner = THIS_MODULE;
1493         err = cdev_add(&xsdfec->xsdfec_cdev,
1494                        MKDEV(MAJOR(xsdfec_devt), xsdfec->fec_id), 1);
1495         if (err < 0) {
1496                 dev_err(dev, "cdev_add failed");
1497                 err = -EIO;
1498                 goto err_xsdfec_dev;
1499         }
1500
1501         if (!xsdfec_class) {
1502                 err = -EIO;
1503                 dev_err(dev, "xsdfec class not created correctly");
1504                 goto err_xsdfec_cdev;
1505         }
1506
1507         dev_create = device_create(xsdfec_class, dev,
1508                                    MKDEV(MAJOR(xsdfec_devt), xsdfec->fec_id),
1509                                    xsdfec, "xsdfec%d", xsdfec->fec_id);
1510         if (IS_ERR(dev_create)) {
1511                 dev_err(dev, "unable to create device");
1512                 err = PTR_ERR(dev_create);
1513                 goto err_xsdfec_cdev;
1514         }
1515
1516         atomic_set(&xsdfec->open_count, 1);
1517         dev_info(dev, "XSDFEC%d Probe Successful", xsdfec->fec_id);
1518         atomic_inc(&xsdfec_ndevs);
1519         return 0;
1520
1521         /* Failure cleanup */
1522 err_xsdfec_cdev:
1523         cdev_del(&xsdfec->xsdfec_cdev);
1524 err_xsdfec_dev:
1525         return err;
1526 }
1527
1528 static int
1529 xsdfec_remove(struct platform_device *pdev)
1530 {
1531         struct xsdfec_dev *xsdfec;
1532         struct device *dev = &pdev->dev;
1533
1534         xsdfec = platform_get_drvdata(pdev);
1535         if (!xsdfec)
1536                 return -ENODEV;
1537         dev = xsdfec->dev;
1538         if (!xsdfec_class) {
1539                 dev_err(dev, "xsdfec_class is NULL");
1540                 return -EIO;
1541         }
1542
1543         device_destroy(xsdfec_class,
1544                        MKDEV(MAJOR(xsdfec_devt), xsdfec->fec_id));
1545         cdev_del(&xsdfec->xsdfec_cdev);
1546         atomic_dec(&xsdfec_ndevs);
1547         return 0;
1548 }
1549
1550 static const struct of_device_id xsdfec_of_match[] = {
1551         { .compatible = "xlnx,fec-engine", },
1552         { /* end of table */ }
1553 };
1554 MODULE_DEVICE_TABLE(of, xsdfec_of_match);
1555
1556 static struct platform_driver xsdfec_driver = {
1557         .driver = {
1558                 .name = "xilinx-sdfec",
1559                 .of_match_table = xsdfec_of_match,
1560         },
1561         .probe = xsdfec_probe,
1562         .remove =  xsdfec_remove,
1563 };
1564
1565 static int __init xsdfec_init_mod(void)
1566 {
1567         int err;
1568
1569         xsdfec_class = class_create(THIS_MODULE, DRIVER_NAME);
1570         if (IS_ERR(xsdfec_class)) {
1571                 err = PTR_ERR(xsdfec_class);
1572                 pr_err("%s : Unable to register xsdfec class", __func__);
1573                 return err;
1574         }
1575
1576         err = alloc_chrdev_region(&xsdfec_devt,
1577                                   0, DRIVER_MAX_DEV, DRIVER_NAME);
1578         if (err < 0) {
1579                 pr_err("%s : Unable to get major number", __func__);
1580                 goto err_xsdfec_class;
1581         }
1582
1583         err = platform_driver_register(&xsdfec_driver);
1584         if (err < 0) {
1585                 pr_err("%s Unabled to register %s driver",
1586                        __func__, DRIVER_NAME);
1587                 goto err_xsdfec_drv;
1588         }
1589         return 0;
1590
1591         /* Error Path */
1592 err_xsdfec_drv:
1593         unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1594 err_xsdfec_class:
1595         class_destroy(xsdfec_class);
1596         return err;
1597 }
1598
1599 static void __exit xsdfec_cleanup_mod(void)
1600 {
1601         platform_driver_unregister(&xsdfec_driver);
1602         unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1603         class_destroy(xsdfec_class);
1604         xsdfec_class = NULL;
1605 }
1606
1607 module_init(xsdfec_init_mod);
1608 module_exit(xsdfec_cleanup_mod);
1609
1610 MODULE_AUTHOR("Xilinx, Inc");
1611 MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
1612 MODULE_LICENSE("GPL");
1613 MODULE_VERSION(DRIVER_VERSION);