]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/misc/xilinx_sdfec.c
0d8e532805aba2131178db160093c616b70f516a
[zynq/linux.git] / drivers / misc / xilinx_sdfec.c
1 /*
2  * Xilinx SDFEC
3  *
4  * Copyright (C) 2016 - 2017 Xilinx, Inc.
5  *
6  * Description:
7  * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
8  * IP. It exposes a char device interface in sysfs and supports file
9  * operations like  open(), close() and ioctl().
10  *
11  * This program is free software: you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  */
24
25 #include <linux/cdev.h>
26 #include <linux/device.h>
27 #include <linux/fs.h>
28 #include <linux/io.h>
29 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/of.h>
33 #include <linux/of_platform.h>
34 #include <linux/platform_device.h>
35 #include <linux/poll.h>
36 #include <linux/slab.h>
37 #include <linux/uaccess.h>
38
39 #include <uapi/misc/xilinx_sdfec.h>
40
41 #define DRIVER_NAME     "xilinx_sdfec"
42 #define DRIVER_VERSION  "0.3"
43 #define DRIVER_MAX_DEV  (6)
44
45 static  struct class *xsdfec_class;
46 static atomic_t xsdfec_ndevs = ATOMIC_INIT(0);
47 static dev_t xsdfec_devt;
48
49 /* Xilinx SDFEC Register Map */
50 #define XSDFEC_AXI_WR_PROTECT_ADDR              (0x00000)
51 #define XSDFEC_CODE_WR_PROTECT_ADDR             (0x00004)
52 #define XSDFEC_ACTIVE_ADDR                      (0x00008)
53 #define XSDFEC_AXIS_WIDTH_ADDR                  (0x0000c)
54 #define XSDFEC_AXIS_ENABLE_ADDR                 (0x00010)
55 #define XSDFEC_AXIS_ENABLE_MASK                 (0x0001F)
56 #define XSDFEC_FEC_CODE_ADDR                    (0x00014)
57 #define XSDFEC_ORDER_ADDR                       (0x00018)
58
59 /* Interrupt Status Register Bit Mask*/
60 #define XSDFEC_ISR_MASK                         (0x0003F)
61 /* Interrupt Status Register */
62 #define XSDFEC_ISR_ADDR                         (0x0001c)
63 /* Write Only - Interrupt Enable Register */
64 #define XSDFEC_IER_ADDR                         (0x00020)
65 /* Write Only - Interrupt Disable Register */
66 #define XSDFEC_IDR_ADDR                         (0x00024)
67 /* Read Only - Interrupt Mask Register */
68 #define XSDFEC_IMR_ADDR                         (0x00028)
69
70 /* Single Bit Errors */
71 #define XSDFEC_ECC_ISR_SBE                      (0x7FF)
72 /* Multi Bit Errors */
73 #define XSDFEC_ECC_ISR_MBE                      (0x3FF800)
74 /* ECC Interrupt Status Bit Mask */
75 #define XSDFEC_ECC_ISR_MASK     (XSDFEC_ECC_ISR_SBE | XSDFEC_ECC_ISR_MBE)
76
77 /* Multi Bit Error Postion */
78 #define XSDFEC_ECC_MULTI_BIT_POS                (11)
79 #define XSDFEC_ERROR_MAX_THRESHOLD              (100)
80
81 /* ECC Interrupt Status Register */
82 #define XSDFEC_ECC_ISR_ADDR                     (0x0002c)
83 /* Write Only - ECC Interrupt Enable Register */
84 #define XSDFEC_ECC_IER_ADDR                     (0x00030)
85 /* Write Only - ECC Interrupt Disable Register */
86 #define XSDFEC_ECC_IDR_ADDR                     (0x00034)
87 /* Read Only - ECC Interrupt Mask Register */
88 #define XSDFEC_ECC_IMR_ADDR                     (0x00038)
89
90 #define XSDFEC_BYPASS_ADDR                      (0x0003c)
91 #define XSDFEC_TEST_EMA_ADDR_BASE               (0x00080)
92 #define XSDFEC_TEST_EMA_ADDR_HIGH               (0x00089)
93 #define XSDFEC_TURBO_ADDR                       (0x00100)
94 #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE         (0x02000)
95 #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH         (0x021fc)
96 #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE         (0x02004)
97 #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH         (0x02200)
98 #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE         (0x02008)
99 #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH         (0x02204)
100 #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE         (0x0200c)
101 #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH         (0x02208)
102
103 /**
104  * struct xsdfec_dev - Driver data for SDFEC
105  * @regs: device physical base address
106  * @dev: pointer to device struct
107  * @fec_id: Instance number
108  * @intr_enabled: indicates IRQ enabled
109  * @wr_protect: indicates Write Protect enabled
110  * @code: LDPC or Turbo Codes being used
111  * @order: In-Order or Out-of-Order
112  * @state: State of the SDFEC device
113  * @op_mode: Operating in Encode or Decode
114  * @isr_err_count: Count of ISR errors
115  * @cecc_count: Count of Correctable ECC errors (SBE)
116  * @uecc_count: Count of Uncorrectable ECC errors (MBE)
117  * @reset_count: Count of Resets requested
118  * @open_count: Count of char device being opened
119  * @irq: IRQ number
120  * @xsdfec_cdev: Character device handle
121  * @sc_off: Shared Scale Table Offset
122  * @qc_off: Shared Circulant Table Offset
123  * @la_off: Shared Layer Table Offset
124  * @waitq: Driver wait queue
125  *
126  * This structure contains necessary state for SDFEC driver to operate
127  */
128 struct xsdfec_dev {
129         void __iomem *regs;
130         struct device *dev;
131         s32  fec_id;
132         bool intr_enabled;
133         bool wr_protect;
134         enum xsdfec_code code;
135         enum xsdfec_order order;
136         enum xsdfec_state state;
137         enum xsdfec_op_mode op_mode;
138         atomic_t isr_err_count;
139         atomic_t cecc_count;
140         atomic_t uecc_count;
141         atomic_t reset_count;
142         atomic_t open_count;
143         int  irq;
144         struct cdev xsdfec_cdev;
145         int sc_off;
146         int qc_off;
147         int la_off;
148         wait_queue_head_t waitq;
149 };
150
151 static inline void
152 xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, u32 value)
153 {
154         if (xsdfec->wr_protect) {
155                 dev_err(xsdfec->dev, "SDFEC in write protect");
156                 return;
157         }
158
159         dev_dbg(xsdfec->dev,
160                 "Writing 0x%x to offset 0x%x", value, addr);
161         iowrite32(value, xsdfec->regs + addr);
162 }
163
164 static inline u32
165 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
166 {
167         u32 rval;
168
169         rval = ioread32(xsdfec->regs + addr);
170         dev_info(xsdfec->dev,
171                  "Read value = 0x%x from offset 0x%x",
172                  rval, addr);
173         return rval;
174 }
175
176 #define XSDFEC_WRITE_PROTECT_ENABLE     (1)
177 #define XSDFEC_WRITE_PROTECT_DISABLE    (0)
178 static void
179 xsdfec_wr_protect(struct xsdfec_dev *xsdfec, bool wr_pr)
180 {
181         if (wr_pr) {
182                 xsdfec_regwrite(xsdfec,
183                                 XSDFEC_CODE_WR_PROTECT_ADDR,
184                                 XSDFEC_WRITE_PROTECT_ENABLE);
185                 xsdfec_regwrite(xsdfec,
186                                 XSDFEC_AXI_WR_PROTECT_ADDR,
187                                 XSDFEC_WRITE_PROTECT_ENABLE);
188         } else {
189                 xsdfec_regwrite(xsdfec,
190                                 XSDFEC_AXI_WR_PROTECT_ADDR,
191                                 XSDFEC_WRITE_PROTECT_DISABLE);
192                 xsdfec_regwrite(xsdfec,
193                                 XSDFEC_CODE_WR_PROTECT_ADDR,
194                                 XSDFEC_WRITE_PROTECT_DISABLE);
195         }
196         xsdfec->wr_protect = wr_pr;
197 }
198
199 static int
200 xsdfec_dev_open(struct inode *iptr, struct file *fptr)
201 {
202         struct xsdfec_dev *xsdfec;
203
204         xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
205         if (!xsdfec)
206                 return  -EAGAIN;
207
208         /* Only one open per device at a time */
209         if (!atomic_dec_and_test(&xsdfec->open_count)) {
210                 atomic_inc(&xsdfec->open_count);
211                 return -EBUSY;
212         }
213
214         fptr->private_data = xsdfec;
215         return 0;
216 }
217
218 static int
219 xsdfec_dev_release(struct inode *iptr, struct file *fptr)
220 {
221         struct xsdfec_dev *xsdfec;
222
223         xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
224         if (!xsdfec)
225                 return -EAGAIN;
226
227         atomic_inc(&xsdfec->open_count);
228         return 0;
229 }
230
231 #define XSDFEC_IS_ACTIVITY_SET  (0x1)
232 static int
233 xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
234 {
235         struct xsdfec_status status;
236         int err = 0;
237
238         status.fec_id = xsdfec->fec_id;
239         status.state = xsdfec->state;
240         status.code = xsdfec->code;
241         status.order = xsdfec->order;
242         status.mode = xsdfec->op_mode;
243         status.activity  =
244                 (xsdfec_regread(xsdfec,
245                                 XSDFEC_ACTIVE_ADDR) &
246                                 XSDFEC_IS_ACTIVITY_SET);
247         status.cecc_count = atomic_read(&xsdfec->cecc_count);
248
249         err = copy_to_user(arg, &status, sizeof(status));
250         if (err) {
251                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
252                         __func__, xsdfec->fec_id);
253                 err = -EFAULT;
254         }
255         return err;
256 }
257
258 static int
259 xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
260 {
261         struct xsdfec_config config;
262         int err = 0;
263
264         config.fec_id = xsdfec->fec_id;
265         config.state = xsdfec->state;
266         config.code = xsdfec->code;
267         config.mode = xsdfec->op_mode;
268         config.order = xsdfec->order;
269
270         err = copy_to_user(arg, &config, sizeof(config));
271         if (err) {
272                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
273                         __func__, xsdfec->fec_id);
274                 err = -EFAULT;
275         }
276         return err;
277 }
278
279 static int
280 xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
281 {
282         u32 mask_read;
283
284         if (enable) {
285                 /* Enable */
286                 xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR,
287                                 XSDFEC_ISR_MASK);
288                 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
289                 if (mask_read & XSDFEC_ISR_MASK) {
290                         dev_err(xsdfec->dev,
291                                 "SDFEC enabling irq with IER failed");
292                         return -EIO;
293                 }
294         } else {
295                 /* Disable */
296                 xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR,
297                                 XSDFEC_ISR_MASK);
298                 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
299                 if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
300                         dev_err(xsdfec->dev,
301                                 "SDFEC disabling irq with IDR failed");
302                         return -EIO;
303                 }
304         }
305         return 0;
306 }
307
308 static int
309 xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
310 {
311         u32 mask_read;
312
313         if (enable) {
314                 /* Enable */
315                 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
316                                 XSDFEC_ECC_ISR_MASK);
317                 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
318                 if (mask_read & XSDFEC_ECC_ISR_MASK) {
319                         dev_err(xsdfec->dev,
320                                 "SDFEC enabling ECC irq with ECC IER failed");
321                         return -EIO;
322                 }
323         } else {
324                 /* Disable */
325                 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
326                                 XSDFEC_ECC_ISR_MASK);
327                 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
328                 if ((mask_read & XSDFEC_ECC_ISR_MASK) != XSDFEC_ECC_ISR_MASK) {
329                         dev_err(xsdfec->dev,
330                                 "SDFEC disable ECC irq with ECC IDR failed");
331                         return -EIO;
332                 }
333         }
334         return 0;
335 }
336
337 static int
338 xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
339 {
340         struct xsdfec_irq  irq;
341         int err = 0;
342
343         err = copy_from_user(&irq, arg, sizeof(irq));
344         if (err) {
345                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
346                         __func__, xsdfec->fec_id);
347                 return -EFAULT;
348         }
349
350         /* Setup tlast related IRQ */
351         if (irq.enable_isr) {
352                 err = xsdfec_isr_enable(xsdfec, true);
353                 if (err < 0)
354                         return err;
355         }
356
357         /* Setup ECC related IRQ */
358         if (irq.enable_ecc_isr) {
359                 err = xsdfec_ecc_isr_enable(xsdfec, true);
360                 if (err < 0)
361                         return err;
362         }
363
364         return 0;
365 }
366
367 #define XSDFEC_TURBO_SCALE_MASK         (0xF)
368 #define XSDFEC_TURBO_SCALE_BIT_POS      (8)
369 static int
370 xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
371 {
372         struct xsdfec_turbo turbo;
373         int err = 0;
374         u32 turbo_write = 0;
375
376         err = copy_from_user(&turbo, arg, sizeof(turbo));
377         if (err) {
378                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
379                         __func__, xsdfec->fec_id);
380                 return -EFAULT;
381         }
382
383         /* Check to see what device tree says about the FEC codes */
384         if (xsdfec->code == XSDFEC_LDPC_CODE) {
385                 dev_err(xsdfec->dev,
386                         "%s: Unable to write Turbo to SDFEC%d check DT",
387                                 __func__, xsdfec->fec_id);
388                 return -EIO;
389         } else if (xsdfec->code == XSDFEC_CODE_INVALID) {
390                 xsdfec->code = XSDFEC_TURBO_CODE;
391         }
392
393         if (xsdfec->wr_protect)
394                 xsdfec_wr_protect(xsdfec, false);
395
396         xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, (xsdfec->code - 1));
397         turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK) <<
398                         XSDFEC_TURBO_SCALE_BIT_POS) | turbo.alg;
399         xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
400         return err;
401 }
402
403 static int
404 xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
405 {
406         u32 reg_value;
407         struct xsdfec_turbo turbo_params;
408         int err;
409
410         if (xsdfec->code == XSDFEC_LDPC_CODE) {
411                 dev_err(xsdfec->dev,
412                         "%s: SDFEC%d is configured for LDPC, check DT",
413                         __func__, xsdfec->fec_id);
414                 return -EIO;
415         }
416
417         reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
418
419         turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
420                               XSDFEC_TURBO_SCALE_BIT_POS;
421         turbo_params.alg = reg_value & 0x1;
422
423         err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
424         if (err) {
425                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
426                         __func__, xsdfec->fec_id);
427                 err = -EFAULT;
428         }
429
430         return err;
431 }
432
433 #define XSDFEC_LDPC_REG_JUMP    (0x10)
434 #define XSDFEC_REG0_N_MASK      (0x0000FFFF)
435 #define XSDFEC_REG0_N_LSB       (0)
436 #define XSDFEC_REG0_K_MASK      (0x7fff0000)
437 #define XSDFEC_REG0_K_LSB       (16)
438 static int
439 xsdfec_reg0_write(struct xsdfec_dev *xsdfec,
440                   u32 n, u32 k, u32 offset)
441 {
442         u32 wdata;
443
444         /* Use only lower 16 bits */
445         if (n & ~XSDFEC_REG0_N_MASK)
446                 dev_err(xsdfec->dev, "N value is beyond 16 bits");
447         n &= XSDFEC_REG0_N_MASK;
448         n <<= XSDFEC_REG0_N_LSB;
449
450         if (k & XSDFEC_REG0_K_MASK)
451                 dev_err(xsdfec->dev, "K value is beyond 16 bits");
452
453         k = ((k << XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK);
454         wdata = k | n;
455
456         if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
457                                 > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
458                 dev_err(xsdfec->dev,
459                         "Writing outside of LDPC reg0 space 0x%x",
460                         XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
461                         (offset * XSDFEC_LDPC_REG_JUMP));
462                 return -EINVAL;
463         }
464         xsdfec_regwrite(xsdfec,
465                         XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
466                         (offset * XSDFEC_LDPC_REG_JUMP), wdata);
467         return 0;
468 }
469
470 static int
471 xsdfec_collect_ldpc_reg0(struct xsdfec_dev *xsdfec,
472                          u32 code_id,
473                          struct xsdfec_ldpc_params *ldpc_params)
474 {
475         u32 reg_value;
476         u32 reg_addr = XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
477                 (code_id * XSDFEC_LDPC_REG_JUMP);
478
479         if (reg_addr > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
480                 dev_err(xsdfec->dev,
481                         "Accessing outside of LDPC reg0 space 0x%x",
482                         reg_addr);
483                 return -EINVAL;
484         }
485
486         reg_value = xsdfec_regread(xsdfec, reg_addr);
487
488         ldpc_params->n = (reg_value >> XSDFEC_REG0_N_LSB) & XSDFEC_REG0_N_MASK;
489
490         ldpc_params->k = (reg_value >> XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK;
491
492         return 0;
493 }
494
495 #define XSDFEC_REG1_PSIZE_MASK          (0x000001ff)
496 #define XSDFEC_REG1_NO_PACKING_MASK     (0x00000400)
497 #define XSDFEC_REG1_NO_PACKING_LSB      (10)
498 #define XSDFEC_REG1_NM_MASK             (0x000ff800)
499 #define XSDFEC_REG1_NM_LSB              (11)
500 #define XSDFEC_REG1_BYPASS_MASK (0x00100000)
501 static int
502 xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
503                   u32 no_packing, u32 nm, u32 offset)
504 {
505         u32 wdata;
506
507         if (psize & ~XSDFEC_REG1_PSIZE_MASK)
508                 dev_err(xsdfec->dev, "Psize is beyond 10 bits");
509         psize &= XSDFEC_REG1_PSIZE_MASK;
510
511         if (no_packing != 0 && no_packing != 1)
512                 dev_err(xsdfec->dev, "No-packing bit register invalid");
513         no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
514                                         XSDFEC_REG1_NO_PACKING_MASK);
515
516         if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
517                 dev_err(xsdfec->dev, "NM is beyond 10 bits");
518         nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
519
520         wdata = nm | no_packing | psize;
521         if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
522                 > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
523                 dev_err(xsdfec->dev,
524                         "Writing outside of LDPC reg1 space 0x%x",
525                         XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
526                         (offset * XSDFEC_LDPC_REG_JUMP));
527                 return -EINVAL;
528         }
529         xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
530                 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
531         return 0;
532 }
533
534 static int
535 xsdfec_collect_ldpc_reg1(struct xsdfec_dev *xsdfec,
536                          u32 code_id,
537                          struct xsdfec_ldpc_params *ldpc_params)
538 {
539         u32 reg_value;
540         u32 reg_addr = XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
541                 (code_id * XSDFEC_LDPC_REG_JUMP);
542
543         if (reg_addr > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
544                 dev_err(xsdfec->dev,
545                         "Accessing outside of LDPC reg1 space 0x%x",
546                         reg_addr);
547                 return -EINVAL;
548         }
549
550         reg_value = xsdfec_regread(xsdfec, reg_addr);
551
552         ldpc_params->psize = reg_value & XSDFEC_REG1_PSIZE_MASK;
553
554         ldpc_params->no_packing = ((reg_value >> XSDFEC_REG1_NO_PACKING_LSB) &
555                                     XSDFEC_REG1_NO_PACKING_MASK);
556
557         ldpc_params->nm = (reg_value >> XSDFEC_REG1_NM_LSB) &
558                            XSDFEC_REG1_NM_MASK;
559         return 0;
560 }
561
562 #define XSDFEC_REG2_NLAYERS_MASK                (0x000001FF)
563 #define XSDFEC_REG2_NLAYERS_LSB                 (0)
564 #define XSDFEC_REG2_NNMQC_MASK                  (0x000FFE00)
565 #define XSDFEC_REG2_NMQC_LSB                    (9)
566 #define XSDFEC_REG2_NORM_TYPE_MASK              (0x00100000)
567 #define XSDFEC_REG2_NORM_TYPE_LSB               (20)
568 #define XSDFEC_REG2_SPECIAL_QC_MASK             (0x00200000)
569 #define XSDFEC_REG2_SPEICAL_QC_LSB              (21)
570 #define XSDFEC_REG2_NO_FINAL_PARITY_MASK        (0x00400000)
571 #define XSDFEC_REG2_NO_FINAL_PARITY_LSB         (22)
572 #define XSDFEC_REG2_MAX_SCHEDULE_MASK           (0x01800000)
573 #define XSDFEC_REG2_MAX_SCHEDULE_LSB            (23)
574
575 static int
576 xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
577                   u32 norm_type, u32 special_qc, u32 no_final_parity,
578                   u32 max_schedule, u32 offset)
579 {
580         u32 wdata;
581
582         if (nlayers & ~(XSDFEC_REG2_NLAYERS_MASK >>
583                                 XSDFEC_REG2_NLAYERS_LSB))
584                 dev_err(xsdfec->dev, "Nlayers exceeds 9 bits");
585         nlayers &= XSDFEC_REG2_NLAYERS_MASK;
586
587         if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
588                 dev_err(xsdfec->dev, "NMQC exceeds 11 bits");
589         nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
590
591         if (norm_type > 1)
592                 dev_err(xsdfec->dev, "Norm type is invalid");
593         norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
594                                         XSDFEC_REG2_NORM_TYPE_MASK);
595         if (special_qc > 1)
596                 dev_err(xsdfec->dev, "Special QC in invalid");
597         special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
598                         XSDFEC_REG2_SPECIAL_QC_MASK);
599
600         if (no_final_parity > 1)
601                 dev_err(xsdfec->dev, "No final parity check invalid");
602         no_final_parity =
603                 ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
604                                         XSDFEC_REG2_NO_FINAL_PARITY_MASK);
605         if (max_schedule & ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >>
606                                         XSDFEC_REG2_MAX_SCHEDULE_LSB))
607                 dev_err(xsdfec->dev, "Max Schdule exceeds 2 bits");
608         max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
609                                 XSDFEC_REG2_MAX_SCHEDULE_MASK);
610
611         wdata = (max_schedule | no_final_parity | special_qc | norm_type |
612                         nmqc | nlayers);
613
614         if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
615                 > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
616                 dev_err(xsdfec->dev,
617                         "Writing outside of LDPC reg2 space 0x%x",
618                         XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
619                         (offset * XSDFEC_LDPC_REG_JUMP));
620                 return -EINVAL;
621         }
622         xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
623                         (offset * XSDFEC_LDPC_REG_JUMP), wdata);
624         return 0;
625 }
626
627 static int
628 xsdfec_collect_ldpc_reg2(struct xsdfec_dev *xsdfec,
629                          u32 code_id,
630                          struct xsdfec_ldpc_params *ldpc_params)
631 {
632         u32 reg_value;
633         u32 reg_addr = XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
634                 (code_id * XSDFEC_LDPC_REG_JUMP);
635
636         if (reg_addr > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
637                 dev_err(xsdfec->dev,
638                         "Accessing outside of LDPC reg1 space 0x%x",
639                         reg_addr);
640                 return -EINVAL;
641         }
642
643         reg_value = xsdfec_regread(xsdfec, reg_addr);
644
645         ldpc_params->nlayers = ((reg_value >> XSDFEC_REG2_NLAYERS_LSB) &
646                                 XSDFEC_REG2_NLAYERS_MASK);
647
648         ldpc_params->nmqc = (reg_value >> XSDFEC_REG2_NMQC_LSB) &
649                              XSDFEC_REG2_NNMQC_MASK;
650
651         ldpc_params->norm_type = ((reg_value >> XSDFEC_REG2_NORM_TYPE_LSB) &
652                                   XSDFEC_REG2_NORM_TYPE_MASK);
653
654         ldpc_params->special_qc = ((reg_value >> XSDFEC_REG2_SPEICAL_QC_LSB) &
655                                    XSDFEC_REG2_SPECIAL_QC_MASK);
656
657         ldpc_params->no_final_parity =
658                 ((reg_value >> XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
659                  XSDFEC_REG2_NO_FINAL_PARITY_MASK);
660
661         ldpc_params->max_schedule =
662                 ((reg_value >> XSDFEC_REG2_MAX_SCHEDULE_LSB) &
663                  XSDFEC_REG2_MAX_SCHEDULE_MASK);
664
665         return 0;
666 }
667
668 #define XSDFEC_REG3_LA_OFF_LSB          (8)
669 #define XSDFEC_REG3_QC_OFF_LSB          (16)
670 static int
671 xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off,
672                   u8 la_off, u16 qc_off, u32 offset)
673 {
674         u32 wdata;
675
676         wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
677                 (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
678         if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
679                 (offset *  XSDFEC_LDPC_REG_JUMP) >
680                         XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
681                 dev_err(xsdfec->dev,
682                         "Writing outside of LDPC reg3 space 0x%x",
683                         XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
684                         (offset * XSDFEC_LDPC_REG_JUMP));
685                 return -EINVAL;
686         }
687         xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
688                         (offset * XSDFEC_LDPC_REG_JUMP), wdata);
689         return 0;
690 }
691
692 static int
693 xsdfec_collect_ldpc_reg3(struct xsdfec_dev *xsdfec,
694                          u32 code_id,
695                          struct xsdfec_ldpc_params *ldpc_params)
696 {
697         u32 reg_value;
698         u32 reg_addr = XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
699                 (code_id * XSDFEC_LDPC_REG_JUMP);
700
701         if (reg_addr > XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
702                 dev_err(xsdfec->dev,
703                         "Accessing outside of LDPC reg3 space 0x%x",
704                         reg_addr);
705                 return -EINVAL;
706         }
707
708         reg_value = xsdfec_regread(xsdfec, reg_addr);
709
710         ldpc_params->qc_off = (reg_addr >> XSDFEC_REG3_QC_OFF_LSB) & 0xFF;
711         ldpc_params->la_off = (reg_addr >> XSDFEC_REG3_LA_OFF_LSB) & 0xFF;
712         ldpc_params->sc_off = (reg_addr & 0xFF);
713
714         return 0;
715 }
716
717 #define XSDFEC_SC_TABLE_DEPTH           (0x3fc)
718 #define XSDFEC_REG_WIDTH_JUMP           (4)
719 static int
720 xsdfec_sc_table_write(struct xsdfec_dev *xsdfec, u32 offset,
721                       u32 *sc_ptr, u32 len)
722 {
723         int reg;
724
725         /*
726          * Writes that go beyond the length of
727          * Shared Scale(SC) table should fail
728          */
729         if ((XSDFEC_REG_WIDTH_JUMP * (offset + len)) > XSDFEC_SC_TABLE_DEPTH) {
730                 dev_err(xsdfec->dev, "Write exceeds SC table length");
731                 return -EINVAL;
732         }
733
734         /*
735          * sc_off tracks the points to the last written location
736          * in the Shared Scale(SC) table. Those shared codes might
737          * be in use. Updating them without quiescing the device
738          * can put the SDFEC device in an indeterminate state
739          */
740         if ((XSDFEC_REG_WIDTH_JUMP * offset) < xsdfec->sc_off) {
741                 dev_err(xsdfec->dev, "Might write to in use shared SC code");
742                 return -EINVAL;
743         }
744
745         for (reg = 0; reg < len; reg++) {
746                 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
747                 (offset + reg) *  XSDFEC_REG_WIDTH_JUMP, sc_ptr[reg]);
748         }
749         xsdfec->sc_off = reg + (XSDFEC_REG_WIDTH_JUMP * offset);
750         return reg;
751 }
752
753 static int
754 xsdfec_collect_sc_table(struct xsdfec_dev *xsdfec, u32 offset,
755                         u32 *sc_ptr, u32 len)
756 {
757         u32 reg;
758         u32 reg_addr;
759         u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
760
761         if (deepest_reach > XSDFEC_SC_TABLE_DEPTH) {
762                 dev_err(xsdfec->dev, "Access will exceed SC table length");
763                 return -EINVAL;
764         }
765
766         for (reg = 0; reg < len; reg++) {
767                 reg_addr = XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
768                         ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
769
770                 sc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
771         }
772
773         return 0;
774 }
775
776 #define XSDFEC_LA_TABLE_DEPTH           (0xFFC)
777 static int
778 xsdfec_la_table_write(struct xsdfec_dev *xsdfec, u32 offset,
779                       u32 *la_ptr, u32 len)
780 {
781         int reg;
782
783         if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_LA_TABLE_DEPTH) {
784                 dev_err(xsdfec->dev, "Write exceeds LA table length");
785                 return -EINVAL;
786         }
787
788         if  (XSDFEC_REG_WIDTH_JUMP * offset < xsdfec->la_off) {
789                 dev_err(xsdfec->dev, "Might write to in use shared LA code");
790                 return -EINVAL;
791         }
792
793         for (reg = 0; reg < len; reg++) {
794                 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
795                                 (offset + reg) * XSDFEC_REG_WIDTH_JUMP,
796                                 la_ptr[reg]);
797         }
798         xsdfec->la_off = reg + (offset * XSDFEC_REG_WIDTH_JUMP);
799         return reg;
800 }
801
802 static int
803 xsdfec_collect_la_table(struct xsdfec_dev *xsdfec, u32 offset,
804                         u32 *la_ptr, u32 len)
805 {
806         u32 reg;
807         u32 reg_addr;
808         u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
809
810         if (deepest_reach > XSDFEC_LA_TABLE_DEPTH) {
811                 dev_err(xsdfec->dev, "Access will exceed LA table length");
812                 return -EINVAL;
813         }
814
815         for (reg = 0; reg < len; reg++) {
816                 reg_addr = XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
817                                 ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
818
819                 la_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
820         }
821
822         return 0;
823 }
824
825 #define XSDFEC_QC_TABLE_DEPTH           (0x7FFC)
826 static int
827 xsdfec_qc_table_write(struct xsdfec_dev *xsdfec,
828                       u32 offset, u32 *qc_ptr, u32 len)
829 {
830         int reg;
831
832         if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_QC_TABLE_DEPTH) {
833                 dev_err(xsdfec->dev, "Write exceeds QC table length");
834                 return -EINVAL;
835         }
836
837         if (XSDFEC_REG_WIDTH_JUMP * offset < xsdfec->qc_off) {
838                 dev_err(xsdfec->dev, "Might write to in use shared LA code");
839                 return -EINVAL;
840         }
841
842         for (reg = 0; reg < len; reg++) {
843                 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
844                  (offset + reg) * XSDFEC_REG_WIDTH_JUMP, qc_ptr[reg]);
845         }
846
847         xsdfec->qc_off = reg + (offset * XSDFEC_REG_WIDTH_JUMP);
848         return reg;
849 }
850
851 static int
852 xsdfec_collect_qc_table(struct xsdfec_dev *xsdfec,
853                         u32 offset, u32 *qc_ptr, u32 len)
854 {
855         u32 reg;
856         u32 reg_addr;
857         u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
858
859         if (deepest_reach > XSDFEC_QC_TABLE_DEPTH) {
860                 dev_err(xsdfec->dev, "Access will exceed QC table length");
861                 return -EINVAL;
862         }
863
864         for (reg = 0; reg < len; reg++) {
865                 reg_addr = XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
866                  (offset + reg) * XSDFEC_REG_WIDTH_JUMP;
867
868                 qc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
869         }
870
871         return 0;
872 }
873
874 static int
875 xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
876 {
877         struct xsdfec_ldpc_params *ldpc;
878         int err;
879
880         ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
881         if (!ldpc)
882                 return -ENOMEM;
883
884         err = copy_from_user(ldpc, arg, sizeof(*ldpc));
885         if (err) {
886                 dev_err(xsdfec->dev,
887                         "%s failed to copy from user for SDFEC%d",
888                         __func__, xsdfec->fec_id);
889                 return -EFAULT;
890         }
891         if (xsdfec->code == XSDFEC_TURBO_CODE) {
892                 dev_err(xsdfec->dev,
893                         "%s: Unable to write LDPC to SDFEC%d check DT",
894                         __func__, xsdfec->fec_id);
895                 return -EIO;
896         }
897         xsdfec->code = XSDFEC_LDPC_CODE;
898         /* Disable Write Protection before proceeding */
899         if (xsdfec->wr_protect)
900                 xsdfec_wr_protect(xsdfec, false);
901
902         /* Write LDPC to CODE Register */
903         xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, (xsdfec->code - 1));
904         /* Write Reg 0 */
905         err = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->code_id);
906         if (err)
907                 goto err_out;
908
909         /* Write Reg 1 */
910         err = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing,
911                                 ldpc->nm, ldpc->code_id);
912         if (err)
913                 goto err_out;
914
915         /* Write Reg 2 */
916         err = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
917                                 ldpc->norm_type, ldpc->special_qc,
918                                 ldpc->no_final_parity, ldpc->max_schedule,
919                                 ldpc->code_id);
920         if (err)
921                 goto err_out;
922
923         /* Write Reg 3 */
924         err = xsdfec_reg3_write(xsdfec, ldpc->sc_off,
925                                 ldpc->la_off, ldpc->qc_off, ldpc->code_id);
926         if (err)
927                 goto err_out;
928
929         /* Write Shared Codes */
930         err = xsdfec_sc_table_write(xsdfec, ldpc->sc_off,
931                                     ldpc->sc_table, ldpc->nlayers);
932         if (err < 0)
933                 goto err_out;
934
935         err = xsdfec_la_table_write(xsdfec, 4 * ldpc->la_off,
936                                     ldpc->la_table, ldpc->nlayers);
937         if (err < 0)
938                 goto err_out;
939
940         err = xsdfec_qc_table_write(xsdfec, 4 * ldpc->qc_off,
941                                     ldpc->qc_table, ldpc->nqc);
942         if (err < 0)
943                 goto err_out;
944
945         kfree(ldpc);
946         return 0;
947         /* Error Path */
948 err_out:
949         kfree(ldpc);
950         return err;
951 }
952
953 static int
954 xsdfec_get_ldpc_code_params(struct xsdfec_dev *xsdfec, void __user *arg)
955 {
956         struct xsdfec_ldpc_params *ldpc_params;
957         int err = 0;
958
959         if (xsdfec->code == XSDFEC_TURBO_CODE) {
960                 dev_err(xsdfec->dev,
961                         "%s: SDFEC%d is configured for TURBO, check DT",
962                                 __func__, xsdfec->fec_id);
963                 return -EIO;
964         }
965
966         ldpc_params = kzalloc(sizeof(*ldpc_params), GFP_KERNEL);
967         if (!ldpc_params)
968                 return -ENOMEM;
969
970         err = copy_from_user(ldpc_params, arg, sizeof(*ldpc_params));
971         if (err) {
972                 dev_err(xsdfec->dev,
973                         "%s failed to copy from user for SDFEC%d",
974                         __func__, xsdfec->fec_id);
975                 return -EFAULT;
976         }
977
978         err = xsdfec_collect_ldpc_reg0(xsdfec, ldpc_params->code_id,
979                                        ldpc_params);
980         if (err)
981                 goto err_out;
982
983         err = xsdfec_collect_ldpc_reg1(xsdfec, ldpc_params->code_id,
984                                        ldpc_params);
985         if (err)
986                 goto err_out;
987
988         err = xsdfec_collect_ldpc_reg2(xsdfec, ldpc_params->code_id,
989                                        ldpc_params);
990         if (err)
991                 goto err_out;
992
993         err = xsdfec_collect_ldpc_reg3(xsdfec, ldpc_params->code_id,
994                                        ldpc_params);
995         if (err)
996                 goto err_out;
997
998         /*
999          * Collect the shared table values, needs to happen after reading
1000          * the registers
1001          */
1002         err = xsdfec_collect_sc_table(xsdfec, ldpc_params->sc_off,
1003                                       ldpc_params->sc_table,
1004                                       ldpc_params->nlayers);
1005         if (err < 0)
1006                 goto err_out;
1007
1008         err = xsdfec_collect_la_table(xsdfec, 4 * ldpc_params->la_off,
1009                                       ldpc_params->la_table,
1010                                       ldpc_params->nlayers);
1011         if (err < 0)
1012                 goto err_out;
1013
1014         err = xsdfec_collect_qc_table(xsdfec, 4 * ldpc_params->qc_off,
1015                                       ldpc_params->qc_table,
1016                                       ldpc_params->nqc);
1017         if (err < 0)
1018                 goto err_out;
1019
1020         err = copy_to_user(arg, ldpc_params, sizeof(*ldpc_params));
1021         if (err) {
1022                 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
1023                         __func__, xsdfec->fec_id);
1024                 err = -EFAULT;
1025         }
1026
1027         kfree(ldpc_params);
1028         return 0;
1029         /* Error Path */
1030 err_out:
1031         kfree(ldpc_params);
1032         return err;
1033 }
1034
1035 static int
1036 xsdfec_set_order(struct xsdfec_dev *xsdfec, enum xsdfec_order __user order)
1037 {
1038         bool order_out_of_range;
1039
1040         order_out_of_range = (order <= XSDFEC_INVALID_ORDER) ||
1041                              (order >= XSDFEC_ORDER_MAX);
1042         if (order_out_of_range) {
1043                 dev_err(xsdfec->dev,
1044                         "%s invalid order value %d for SDFEC%d",
1045                         __func__, order, xsdfec->fec_id);
1046                 return -EINVAL;
1047         }
1048
1049         /* Verify Device has not started */
1050         if (xsdfec->state == XSDFEC_STARTED) {
1051                 dev_err(xsdfec->dev,
1052                         "%s attempting to set Order while started for SDFEC%d",
1053                         __func__, xsdfec->fec_id);
1054                 return -EIO;
1055         }
1056
1057         xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, (order - 1));
1058
1059         xsdfec->order = order;
1060
1061         return 0;
1062 }
1063
1064 static int
1065 xsdfec_set_bypass(struct xsdfec_dev *xsdfec, unsigned long bypass)
1066 {
1067         if (bypass > 1) {
1068                 dev_err(xsdfec->dev,
1069                         "%s invalid bypass value %ld for SDFEC%d",
1070                         __func__, bypass, xsdfec->fec_id);
1071                 return -EINVAL;
1072         }
1073
1074         /* Verify Device has not started */
1075         if (xsdfec->state == XSDFEC_STARTED) {
1076                 dev_err(xsdfec->dev,
1077                         "%s attempting to set bypass while started for SDFEC%d",
1078                         __func__, xsdfec->fec_id);
1079                 return -EIO;
1080         }
1081
1082         xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, bypass);
1083
1084         return 0;
1085 }
1086
1087 static int
1088 xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *is_active)
1089 {
1090         u32 reg_value;
1091
1092         reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
1093         /* using a double ! operator instead of casting */
1094         *is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
1095
1096         return 0;
1097 }
1098
1099 static int xsdfec_start(struct xsdfec_dev *xsdfec)
1100 {
1101         u32 regread;
1102
1103         /* Verify Code is loaded */
1104         if (xsdfec->code == XSDFEC_CODE_INVALID) {
1105                 dev_err(xsdfec->dev,
1106                         "%s : set code before start for SDFEC%d",
1107                         __func__, xsdfec->fec_id);
1108                 return -EINVAL;
1109         }
1110         regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
1111         regread &= 0x1;
1112         if (regread + 1 != xsdfec->code) {
1113                 dev_err(xsdfec->dev,
1114                         "%s SDFEC HW code does not match driver code",
1115                         __func__);
1116                 return -EINVAL;
1117         }
1118
1119         /* Verify Order has been set */
1120         if (xsdfec->code == XSDFEC_CODE_INVALID) {
1121                 dev_err(xsdfec->dev,
1122                         "%s : set order before starting SDFEC%d",
1123                         __func__, xsdfec->fec_id);
1124                 return -EINVAL;
1125         }
1126
1127         /* Set AXIS width */
1128         xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, 0);
1129         /* Set AXIS enable */
1130         xsdfec_regwrite(xsdfec,
1131                         XSDFEC_AXIS_ENABLE_ADDR,
1132                         XSDFEC_AXIS_ENABLE_MASK);
1133         /* Write Protect Code and Registers */
1134         xsdfec_wr_protect(xsdfec, true);
1135         /* Done */
1136         xsdfec->state = XSDFEC_STARTED;
1137         return 0;
1138 }
1139
1140 static int
1141 xsdfec_stop(struct xsdfec_dev *xsdfec)
1142 {
1143         u32 regread;
1144
1145         if (xsdfec->state != XSDFEC_STARTED)
1146                 dev_err(xsdfec->dev, "Device not started correctly");
1147         /* Disable Write Protect */
1148         xsdfec_wr_protect(xsdfec, false);
1149         /* Disable AXIS_ENABLE register */
1150         regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
1151         regread &= (~XSDFEC_AXIS_ENABLE_MASK);
1152         xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
1153         /* Stop */
1154         xsdfec->state = XSDFEC_STOPPED;
1155         return 0;
1156 }
1157
1158 /*
1159  * Reset will happen asynchronously
1160  * since there is no in-band reset register
1161  * Prepare driver for reset
1162  */
1163
1164 static int
1165 xsdfec_reset_req(struct xsdfec_dev *xsdfec)
1166 {
1167         xsdfec->state = XSDFEC_INIT;
1168         xsdfec->order = XSDFEC_INVALID_ORDER;
1169         xsdfec->sc_off = 0;
1170         xsdfec->la_off = 0;
1171         xsdfec->qc_off = 0;
1172         xsdfec->wr_protect = false;
1173         atomic_set(&xsdfec->isr_err_count, 0);
1174         atomic_set(&xsdfec->uecc_count, 0);
1175         atomic_set(&xsdfec->cecc_count, 0);
1176         atomic_inc(&xsdfec->reset_count);
1177         return 0;
1178 }
1179
1180 static long
1181 xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, unsigned long data)
1182 {
1183         struct xsdfec_dev *xsdfec = fptr->private_data;
1184         void __user *arg = (void __user *)data;
1185         int rval = -EINVAL;
1186
1187         if (!xsdfec)
1188                 return rval;
1189
1190         /* In failed state allow only reset and get status IOCTLs */
1191         if (xsdfec->state == XSDFEC_NEEDS_RESET &&
1192             (cmd != XSDFEC_RESET_REQ && cmd != XSDFEC_GET_STATUS)) {
1193                 dev_err(xsdfec->dev,
1194                         "SDFEC%d in failed state. Reset Required",
1195                         xsdfec->fec_id);
1196                 return -EPERM;
1197         }
1198
1199         switch (cmd) {
1200         case XSDFEC_START_DEV:
1201                 rval = xsdfec_start(xsdfec);
1202                 break;
1203         case XSDFEC_STOP_DEV:
1204                 rval = xsdfec_stop(xsdfec);
1205                 break;
1206         case XSDFEC_RESET_REQ:
1207                 rval = xsdfec_reset_req(xsdfec);
1208                 break;
1209         case XSDFEC_GET_STATUS:
1210                 arg = (void __user *)data;
1211                 if (!arg)
1212                         return rval;
1213                 rval = xsdfec_get_status(xsdfec, arg);
1214                 break;
1215         case XSDFEC_GET_CONFIG:
1216                 arg = (void __user *)data;
1217                 if (!arg)
1218                         return rval;
1219                 rval = xsdfec_get_config(xsdfec, arg);
1220                 break;
1221         case XSDFEC_SET_IRQ:
1222                 arg = (void __user *)data;
1223                 if (!arg)
1224                         return rval;
1225                 rval = xsdfec_set_irq(xsdfec, arg);
1226                 break;
1227         case XSDFEC_SET_TURBO:
1228                 arg = (void __user *)data;
1229                 if (!arg)
1230                         return rval;
1231                 rval = xsdfec_set_turbo(xsdfec, arg);
1232                 break;
1233         case XSDFEC_GET_TURBO:
1234                 arg = (void __user *)data;
1235                 if (!arg)
1236                         return rval;
1237                 rval = xsdfec_get_turbo(xsdfec, arg);
1238                 break;
1239         case XSDFEC_ADD_LDPC_CODE_PARAMS:
1240                 arg = (void __user *)data;
1241                 if (!arg)
1242                         return rval;
1243                 rval  = xsdfec_add_ldpc(xsdfec, arg);
1244                 break;
1245         case XSDFEC_GET_LDPC_CODE_PARAMS:
1246                 arg = (void __user *)data;
1247                 if (!arg)
1248                         return rval;
1249                 rval = xsdfec_get_ldpc_code_params(xsdfec, arg);
1250                 break;
1251         case XSDFEC_SET_ORDER:
1252                 rval = xsdfec_set_order(xsdfec, (enum xsdfec_order)data);
1253                 break;
1254         case XSDFEC_SET_BYPASS:
1255                 rval = xsdfec_set_bypass(xsdfec, data);
1256                 break;
1257         case XSDFEC_IS_ACTIVE:
1258                 rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
1259                 break;
1260         default:
1261                 /* Should not get here */
1262                 dev_err(xsdfec->dev, "Undefined SDFEC IOCTL");
1263                 break;
1264         }
1265         return rval;
1266 }
1267
1268 static unsigned int
1269 xsdfec_poll(struct file *file, poll_table *wait)
1270 {
1271         unsigned int mask;
1272         struct xsdfec_dev *xsdfec = file->private_data;
1273
1274         if (!xsdfec)
1275                 return POLLNVAL | POLLHUP;
1276
1277         poll_wait(file, &xsdfec->waitq, wait);
1278
1279         /* XSDFEC ISR detected an error */
1280         if (xsdfec->state == XSDFEC_NEEDS_RESET)
1281                 mask = POLLIN | POLLRDNORM;
1282         else
1283                 mask = POLLPRI | POLLERR;
1284
1285         return mask;
1286 }
1287
1288 static const struct file_operations xsdfec_fops = {
1289         .owner = THIS_MODULE,
1290         .open = xsdfec_dev_open,
1291         .release = xsdfec_dev_release,
1292         .unlocked_ioctl = xsdfec_dev_ioctl,
1293         .poll = xsdfec_poll,
1294 };
1295
1296 static int
1297 xsdfec_parse_of(struct xsdfec_dev *xsdfec)
1298 {
1299         struct device *dev = xsdfec->dev;
1300         struct device_node *node = dev->of_node;
1301         int rval;
1302         const char *fec_code;
1303         const char *fec_op_mode;
1304
1305         rval = of_property_read_string(node,
1306                                        "xlnx,sdfec-op-mode",
1307                                        &fec_op_mode);
1308         if (rval < 0) {
1309                 dev_err(dev, "xlnx,sdfec-op-mode not in DT");
1310                 return rval;
1311         }
1312
1313         if (!strcasecmp(fec_op_mode, "encode")) {
1314                 xsdfec->op_mode = XSDFEC_ENCODE;
1315         } else if (!strcasecmp(fec_op_mode, "decode")) {
1316                 xsdfec->op_mode = XSDFEC_DECODE;
1317         } else {
1318                 dev_err(dev, "Encode or Decode not specified in DT");
1319                 return -EINVAL;
1320         }
1321
1322         rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
1323         if (rval < 0) {
1324                 dev_err(dev, "xlnx,sdfec-code not in DT");
1325                 return rval;
1326         }
1327
1328         if (!strcasecmp(fec_code, "ldpc")) {
1329                 xsdfec->code = XSDFEC_LDPC_CODE;
1330         } else if (!strcasecmp(fec_code, "turbo")) {
1331                 xsdfec->code = XSDFEC_TURBO_CODE;
1332         } else {
1333                 dev_err(xsdfec->dev, "Invalid Op Mode in DT");
1334                 return -EINVAL;
1335         }
1336
1337         return 0;
1338 }
1339
1340 static void
1341 xsdfec_log_ecc_errors(struct xsdfec_dev *xsdfec, u32 ecc_err)
1342 {
1343         u32 cecc, uecc;
1344         int uecc_cnt;
1345
1346         cecc = ecc_err & XSDFEC_ECC_ISR_SBE;
1347         uecc = ecc_err & XSDFEC_ECC_ISR_MBE;
1348
1349         uecc_cnt = atomic_add_return(hweight32(uecc), &xsdfec->uecc_count);
1350         atomic_add(hweight32(cecc), &xsdfec->cecc_count);
1351
1352         if (uecc_cnt > 0 && uecc_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1353                 dev_err(xsdfec->dev,
1354                         "Multi-bit error on xsdfec%d. Needs reset",
1355                         xsdfec->fec_id);
1356         }
1357
1358         /* Clear ECC errors */
1359         xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1360 }
1361
1362 static void
1363 xsdfec_log_isr_errors(struct xsdfec_dev *xsdfec, u32 isr_err)
1364 {
1365         int isr_err_cnt;
1366
1367         /* Update ISR error counts */
1368         isr_err_cnt = atomic_add_return(hweight32(isr_err),
1369                                         &xsdfec->isr_err_count);
1370         if (isr_err_cnt > 0 && isr_err_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1371                 dev_err(xsdfec->dev,
1372                         "Tlast,or DIN_WORDS or DOUT_WORDS not correct");
1373         }
1374
1375         /* Clear ISR error status */
1376         xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1377 }
1378
1379 static void
1380 xsdfec_reset_required(struct xsdfec_dev *xsdfec)
1381 {
1382         xsdfec->state = XSDFEC_NEEDS_RESET;
1383 }
1384
1385 static irqreturn_t
1386 xsdfec_irq_thread(int irq, void *dev_id)
1387 {
1388         struct xsdfec_dev *xsdfec = dev_id;
1389         irqreturn_t ret = IRQ_HANDLED;
1390         u32 ecc_err;
1391         u32 isr_err;
1392         bool fatal_err = false;
1393
1394         WARN_ON(xsdfec->irq != irq);
1395
1396         /* Mask Interrupts */
1397         xsdfec_isr_enable(xsdfec, false);
1398         xsdfec_ecc_isr_enable(xsdfec, false);
1399
1400         /* Read Interrupt Status Registers */
1401         ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
1402         isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
1403
1404         if (ecc_err & XSDFEC_ECC_ISR_MBE) {
1405                 /* Multi-Bit Errors need Reset */
1406                 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1407                 xsdfec_reset_required(xsdfec);
1408                 fatal_err = true;
1409         } else if (isr_err & XSDFEC_ISR_MASK) {
1410                 /*
1411                  * Tlast, DIN_WORDS and DOUT_WORDS related
1412                  * errors need Reset
1413                  */
1414                 xsdfec_log_isr_errors(xsdfec, isr_err);
1415                 xsdfec_reset_required(xsdfec);
1416                 fatal_err = true;
1417         } else if (ecc_err & XSDFEC_ECC_ISR_SBE) {
1418                 /* Correctable ECC Errors */
1419                 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1420         } else {
1421                 ret = IRQ_NONE;
1422         }
1423
1424         if (fatal_err)
1425                 wake_up_interruptible(&xsdfec->waitq);
1426
1427         /* Unmaks Interrupts */
1428         xsdfec_isr_enable(xsdfec, true);
1429         xsdfec_ecc_isr_enable(xsdfec, true);
1430
1431         return ret;
1432 }
1433
1434 static int
1435 xsdfec_probe(struct platform_device *pdev)
1436 {
1437         struct xsdfec_dev *xsdfec;
1438         struct device *dev;
1439         struct device *dev_create;
1440         struct resource *res;
1441         int err;
1442         bool irq_enabled = true;
1443
1444         xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
1445         if (!xsdfec)
1446                 return -ENOMEM;
1447
1448         xsdfec->dev = &pdev->dev;
1449         if (atomic_read(&xsdfec_ndevs) > DRIVER_MAX_DEV) {
1450                 dev_err(&pdev->dev,
1451                         "Cannot instantiate more than %d SDFEC instances",
1452                         (DRIVER_MAX_DEV + 1));
1453                 return -EINVAL;
1454         }
1455
1456         xsdfec->fec_id = atomic_read(&xsdfec_ndevs);
1457
1458         dev = xsdfec->dev;
1459         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1460         xsdfec->regs = devm_ioremap_resource(dev, res);
1461         if (IS_ERR(xsdfec->regs)) {
1462                 dev_err(dev, "Unable to map resource");
1463                 err = PTR_ERR(xsdfec->regs);
1464                 goto err_xsdfec_dev;
1465         }
1466
1467         xsdfec->irq = platform_get_irq(pdev, 0);
1468         if (xsdfec->irq < 0) {
1469                 dev_dbg(dev, "platform_get_irq failed");
1470                 irq_enabled = false;
1471         }
1472
1473         err = xsdfec_parse_of(xsdfec);
1474         if (err < 0)
1475                 goto err_xsdfec_dev;
1476
1477         /* Save driver private data */
1478         platform_set_drvdata(pdev, xsdfec);
1479
1480         if (irq_enabled) {
1481                 init_waitqueue_head(&xsdfec->waitq);
1482                 /* Register IRQ thread */
1483                 err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
1484                                                 xsdfec_irq_thread,
1485                                                 IRQF_ONESHOT,
1486                                                 "xilinx-sdfec16",
1487                                                 xsdfec);
1488                 if (err < 0) {
1489                         dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
1490                         goto err_xsdfec_dev;
1491                 }
1492         }
1493
1494         cdev_init(&xsdfec->xsdfec_cdev, &xsdfec_fops);
1495         xsdfec->xsdfec_cdev.owner = THIS_MODULE;
1496         err = cdev_add(&xsdfec->xsdfec_cdev,
1497                        MKDEV(MAJOR(xsdfec_devt), xsdfec->fec_id), 1);
1498         if (err < 0) {
1499                 dev_err(dev, "cdev_add failed");
1500                 err = -EIO;
1501                 goto err_xsdfec_dev;
1502         }
1503
1504         if (!xsdfec_class) {
1505                 err = -EIO;
1506                 dev_err(dev, "xsdfec class not created correctly");
1507                 goto err_xsdfec_cdev;
1508         }
1509
1510         dev_create = device_create(xsdfec_class, dev,
1511                                    MKDEV(MAJOR(xsdfec_devt), xsdfec->fec_id),
1512                                    xsdfec, "xsdfec%d", xsdfec->fec_id);
1513         if (IS_ERR(dev_create)) {
1514                 dev_err(dev, "unable to create device");
1515                 err = PTR_ERR(dev_create);
1516                 goto err_xsdfec_cdev;
1517         }
1518
1519         atomic_set(&xsdfec->open_count, 1);
1520         dev_info(dev, "XSDFEC%d Probe Successful", xsdfec->fec_id);
1521         atomic_inc(&xsdfec_ndevs);
1522         return 0;
1523
1524         /* Failure cleanup */
1525 err_xsdfec_cdev:
1526         cdev_del(&xsdfec->xsdfec_cdev);
1527 err_xsdfec_dev:
1528         return err;
1529 }
1530
1531 static int
1532 xsdfec_remove(struct platform_device *pdev)
1533 {
1534         struct xsdfec_dev *xsdfec;
1535         struct device *dev = &pdev->dev;
1536
1537         xsdfec = platform_get_drvdata(pdev);
1538         if (!xsdfec)
1539                 return -ENODEV;
1540         dev = xsdfec->dev;
1541         if (!xsdfec_class) {
1542                 dev_err(dev, "xsdfec_class is NULL");
1543                 return -EIO;
1544         }
1545
1546         device_destroy(xsdfec_class,
1547                        MKDEV(MAJOR(xsdfec_devt), xsdfec->fec_id));
1548         cdev_del(&xsdfec->xsdfec_cdev);
1549         atomic_dec(&xsdfec_ndevs);
1550         return 0;
1551 }
1552
1553 static const struct of_device_id xsdfec_of_match[] = {
1554         { .compatible = "xlnx,fec-engine", },
1555         { /* end of table */ }
1556 };
1557 MODULE_DEVICE_TABLE(of, xsdfec_of_match);
1558
1559 static struct platform_driver xsdfec_driver = {
1560         .driver = {
1561                 .name = "xilinx-sdfec",
1562                 .of_match_table = xsdfec_of_match,
1563         },
1564         .probe = xsdfec_probe,
1565         .remove =  xsdfec_remove,
1566 };
1567
1568 static int __init xsdfec_init_mod(void)
1569 {
1570         int err;
1571
1572         xsdfec_class = class_create(THIS_MODULE, DRIVER_NAME);
1573         if (IS_ERR(xsdfec_class)) {
1574                 err = PTR_ERR(xsdfec_class);
1575                 pr_err("%s : Unable to register xsdfec class", __func__);
1576                 return err;
1577         }
1578
1579         err = alloc_chrdev_region(&xsdfec_devt,
1580                                   0, DRIVER_MAX_DEV, DRIVER_NAME);
1581         if (err < 0) {
1582                 pr_err("%s : Unable to get major number", __func__);
1583                 goto err_xsdfec_class;
1584         }
1585
1586         err = platform_driver_register(&xsdfec_driver);
1587         if (err < 0) {
1588                 pr_err("%s Unabled to register %s driver",
1589                        __func__, DRIVER_NAME);
1590                 goto err_xsdfec_drv;
1591         }
1592         return 0;
1593
1594         /* Error Path */
1595 err_xsdfec_drv:
1596         unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1597 err_xsdfec_class:
1598         class_destroy(xsdfec_class);
1599         return err;
1600 }
1601
1602 static void __exit xsdfec_cleanup_mod(void)
1603 {
1604         platform_driver_unregister(&xsdfec_driver);
1605         unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1606         class_destroy(xsdfec_class);
1607         xsdfec_class = NULL;
1608 }
1609
1610 module_init(xsdfec_init_mod);
1611 module_exit(xsdfec_cleanup_mod);
1612
1613 MODULE_AUTHOR("Xilinx, Inc");
1614 MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
1615 MODULE_LICENSE("GPL");
1616 MODULE_VERSION(DRIVER_VERSION);