1 // SPDX-License-Identifier: GPL-2.0
3 * Xilinx Memory-to-Memory Video Multi-Scaler IP
5 * Copyright (C) 2018 Xilinx, Inc.
7 * Author: Suresh Gupta <suresh.gupta@xilinx.com>
9 * Based on the virtual v4l2-mem2mem example device
11 * This driver adds support to control the Xilinx Video Multi
15 #include <linux/delay.h>
17 #include <linux/gpio/consumer.h>
18 #include <linux/interrupt.h>
20 #include <linux/module.h>
22 #include <linux/of_irq.h>
23 #include <linux/platform_device.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
27 #include <media/v4l2-device.h>
28 #include <media/v4l2-ioctl.h>
29 #include <media/v4l2-mem2mem.h>
30 #include <media/videobuf2-dma-contig.h>
32 #include "xilinx-multi-scaler-coeff.h"
34 /* 0x0000 : Control signals */
35 #define XM2MSC_AP_CTRL 0x0000
36 #define XM2MSC_AP_CTRL_START BIT(0)
37 #define XM2MSC_AP_CTRL_DONE BIT(1)
38 #define XM2MSC_AP_CTRL_IDEL BIT(2)
39 #define XM2MSC_AP_CTRL_READY BIT(3)
40 #define XM2MSC_AP_CTRL_AUTO_RESTART BIT(7)
42 /* 0x0004 : Global Interrupt Enable Register */
43 #define XM2MSC_GIE 0x0004
44 #define XM2MSC_GIE_EN BIT(0)
46 /* 0x0008 : IP Interrupt Enable Register (Read/Write) */
47 #define XM2MSC_IER 0x0008
48 #define XM2MSC_ISR 0x000c
49 #define XM2MSC_ISR_DONE BIT(0)
50 #define XM2MSC_ISR_READY BIT(1)
52 #define XM2MSC_NUM_OUTS 0x0010
54 #define XM2MSC_WIDTHIN 0x000
55 #define XM2MSC_WIDTHOUT 0x008
56 #define XM2MSC_HEIGHTIN 0x010
57 #define XM2MSC_HEIGHTOUT 0x018
58 #define XM2MSC_LINERATE 0x020
59 #define XM2MSC_PIXELRATE 0x028
60 #define XM2MSC_INPIXELFMT 0x030
61 #define XM2MSC_OUTPIXELFMT 0x038
62 #define XM2MSC_INSTRIDE 0x050
63 #define XM2MSC_OUTSTRIDE 0x058
64 #define XM2MSC_SRCIMGBUF0 0x060
65 #define XM2MSC_SRCIMGBUF1 0x070
66 #define XM2MSC_DSTIMGBUF0 0x090
67 #define XM2MSC_DSTIMGBUF1 0x0100
69 #define XM2MVSC_VFLTCOEFF_L 0x2000
70 #define XM2MVSC_VFLTCOEFF(x) (XM2MVSC_VFLTCOEFF_L + 0x2000 * (x))
71 #define XM2MVSC_HFLTCOEFF_L 0x2800
72 #define XM2MVSC_HFLTCOEFF(x) (XM2MVSC_HFLTCOEFF_L + 0x2000 * (x))
74 #define XM2MSC_CHAN_REGS_START(x) (0x100 + 0x200 * x)
77 * IP has reserved area between XM2MSC_DSTIMGBUF0 and
78 * XM2MSC_DSTIMGBUF1 registers of channel 4
80 #define XM2MSC_RESERVED_AREA 0x600
82 /* GPIO RESET MACROS */
83 #define XM2MSC_RESET_ASSERT (0x1)
84 #define XM2MSC_RESET_DEASSERT (0x0)
86 #define XM2MSC_MIN_CHAN 1
87 #define XM2MSC_MAX_CHAN 8
89 #define XM2MSC_MAX_WIDTH (8192)
90 #define XM2MSC_MAX_HEIGHT (4320)
91 #define XM2MSC_MIN_WIDTH (64)
92 #define XM2MSC_MIN_HEIGHT (64)
93 #define XM2MSC_STEP_PRECISION (65536)
94 /* Mask definitions for Low 16 bits in a 32 bit number */
95 #define XM2MSC_MASK_LOW_16BITS GENMASK(15, 0)
96 #define XM2MSC_BITSHIFT_16 (16)
98 #define XM2MSC_DRIVER_NAME "xm2msc"
100 #define CHAN_ATTACHED BIT(0)
101 #define CHAN_OPENED BIT(1)
103 #define XM2MSC_CHAN_OUT 0
104 #define XM2MSC_CHAN_CAP 1
106 #define NUM_STREAM(_x) \
107 ({ typeof(_x) (x) = (_x); \
108 min(ffz(x->out_streamed_chan), \
109 ffz(x->cap_streamed_chan)); })
111 /* Xilinx Video Specific Color/Pixel Formats */
112 enum xm2msc_pix_fmt {
113 XILINX_M2MSC_FMT_RGBX8 = 10,
114 XILINX_M2MSC_FMT_YUVX8 = 11,
115 XILINX_M2MSC_FMT_YUYV8 = 12,
116 XILINX_M2MSC_FMT_RGBX10 = 15,
117 XILINX_M2MSC_FMT_YUVX10 = 16,
118 XILINX_M2MSC_FMT_Y_UV8 = 18,
119 XILINX_M2MSC_FMT_Y_UV8_420 = 19,
120 XILINX_M2MSC_FMT_RGB8 = 20,
121 XILINX_M2MSC_FMT_YUV8 = 21,
122 XILINX_M2MSC_FMT_Y_UV10 = 22,
123 XILINX_M2MSC_FMT_Y_UV10_420 = 23,
124 XILINX_M2MSC_FMT_Y8 = 24,
125 XILINX_M2MSC_FMT_Y10 = 25,
126 XILINX_M2MSC_FMT_BGRX8 = 27,
127 XILINX_M2MSC_FMT_UYVY8 = 28,
128 XILINX_M2MSC_FMT_BGR8 = 29,
132 * struct xm2msc_fmt - driver info for each of the supported video formats
133 * @name: human-readable device tree name for this entry
134 * @fourcc: standard format identifier
135 * @xm2msc_fmt: Xilinx Video Specific Color/Pixel Formats
136 * @num_planes: number of planes supported by format
141 enum xm2msc_pix_fmt xm2msc_fmt;
145 static const struct xm2msc_fmt formats[] = {
148 .fourcc = V4L2_PIX_FMT_BGRX32,
149 .xm2msc_fmt = XILINX_M2MSC_FMT_RGBX8,
154 .fourcc = V4L2_PIX_FMT_XVUY32,
155 .xm2msc_fmt = XILINX_M2MSC_FMT_YUVX8,
160 .fourcc = V4L2_PIX_FMT_YUYV,
161 .xm2msc_fmt = XILINX_M2MSC_FMT_YUYV8,
165 .name = "xbgr2101010",
166 .fourcc = V4L2_PIX_FMT_XBGR30,
167 .xm2msc_fmt = XILINX_M2MSC_FMT_RGBX10,
171 .name = "yuvx2101010",
172 .fourcc = V4L2_PIX_FMT_XVUY10,
173 .xm2msc_fmt = XILINX_M2MSC_FMT_YUVX10,
178 .fourcc = V4L2_PIX_FMT_NV16,
179 .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8,
184 .fourcc = V4L2_PIX_FMT_NV12,
185 .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8_420,
190 .fourcc = V4L2_PIX_FMT_RGB24,
191 .xm2msc_fmt = XILINX_M2MSC_FMT_RGB8,
196 .fourcc = V4L2_PIX_FMT_VUY24,
197 .xm2msc_fmt = XILINX_M2MSC_FMT_YUV8,
202 .fourcc = V4L2_PIX_FMT_XV20,
203 .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10,
208 .fourcc = V4L2_PIX_FMT_XV15,
209 .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10_420,
214 .fourcc = V4L2_PIX_FMT_GREY,
215 .xm2msc_fmt = XILINX_M2MSC_FMT_Y8,
220 .fourcc = V4L2_PIX_FMT_Y10,
221 .xm2msc_fmt = XILINX_M2MSC_FMT_Y10,
226 .fourcc = V4L2_PIX_FMT_XBGR32,
227 .xm2msc_fmt = XILINX_M2MSC_FMT_BGRX8,
232 .fourcc = V4L2_PIX_FMT_UYVY,
233 .xm2msc_fmt = XILINX_M2MSC_FMT_UYVY8,
238 .fourcc = V4L2_PIX_FMT_BGR24,
239 .xm2msc_fmt = XILINX_M2MSC_FMT_BGR8,
245 * struct xm2msc_q_data - Per-queue, driver-specific private data
246 * There is one source queue and one destination queue for each m2m context.
247 * @width: frame width
248 * @height: frame height
249 * @stride: bytes per lines
250 * @nplanes: Current number of planes
251 * @bytesperline: bytes per line per plane
252 * @sizeimage: image size per plane
253 * @colorspace: supported colorspace
254 * @field: supported field value
257 struct xm2msc_q_data {
261 unsigned int nplanes;
262 unsigned int bytesperline[2];
263 unsigned int sizeimage[2];
264 enum v4l2_colorspace colorspace;
265 enum v4l2_field field;
266 const struct xm2msc_fmt *fmt;
270 * struct xm2msc_chan_ctx - Scaler Channel Info, Per-Channel context
271 * @regs: IO mapped base address of the Channel
272 * @xm2msc_dev: Pointer to struct xm2m_msc_dev
273 * @num: HW Scaling Channel number
274 * @minor: Minor number of the video device
275 * @status: channel status, CHAN_ATTACHED or CHAN_OPENED
276 * @frames: number of frames processed
278 * @fh: v4l2 file handle
279 * @m2m_dev: m2m device
280 * @m2m_ctx: memory to memory context structure
281 * @q_data: src & dst queue data
283 struct xm2msc_chan_ctx {
285 struct xm2m_msc_dev *xm2msc_dev;
289 unsigned long frames;
291 struct video_device vfd;
293 struct v4l2_m2m_dev *m2m_dev;
294 struct v4l2_m2m_ctx *m2m_ctx;
296 struct xm2msc_q_data q_data[2];
300 * struct xm2m_msc_dev - Xilinx M2M Multi-scaler Device
301 * @dev: pointer to struct device instance used by the driver
302 * @regs: IO mapped base address of the HW/IP
303 * @irq: interrupt number
304 * @max_chan: maximum number of Scaling Channels
305 * @max_ht: maximum number of rows in a plane
306 * @max_wd: maximum number of column in a plane
307 * @taps: number of taps set in HW
308 * @supported_fmt: bitmap for all supported fmts by HW
309 * @dma_addr_size: Size of dma address pointer in IP (either 32 or 64)
310 * @rst_gpio: reset gpio handler
311 * @opened_chan: bitmap for all open channel
312 * @out_streamed_chan: bitmap for all out streamed channel
313 * @cap_streamed_chan: bitmap for all capture streamed channel
314 * @running_chan: currently running channels
315 * @device_busy: HW device is busy or not
316 * @isr_wait: flag to follow the ISR complete or not
317 * @isr_finished: Wait queue used to wait for IP to complete processing
318 * @v4l2_dev: main struct to for V4L2 device drivers
319 * @dev_mutex: lock for V4L2 device
320 * @mutex: lock for channel ctx
321 * @lock: lock used in IRQ
322 * @xm2msc_chan: arrey of channel context
323 * @hscaler_coeff: Array of filter coefficients for the Horizontal Scaler
324 * @vscaler_coeff: Array of filter coefficients for the Vertical Scaler
326 struct xm2m_msc_dev {
336 struct gpio_desc *rst_gpio;
339 u32 out_streamed_chan;
340 u32 cap_streamed_chan;
344 wait_queue_head_t isr_finished;
346 struct v4l2_device v4l2_dev;
348 struct mutex dev_mutex; /*the mutex for v4l2*/
349 struct mutex mutex; /*lock for bitmap reg*/
350 spinlock_t lock; /*IRQ lock*/
352 struct xm2msc_chan_ctx xm2msc_chan[XM2MSC_MAX_CHAN];
353 short hscaler_coeff[XSCALER_MAX_PHASES][XSCALER_MAX_TAPS];
354 short vscaler_coeff[XSCALER_MAX_PHASES][XSCALER_MAX_TAPS];
357 #define fh_to_chanctx(__fh) container_of(__fh, struct xm2msc_chan_ctx, fh)
359 static inline u32 xm2msc_readreg(const void __iomem *addr)
361 return ioread32(addr);
364 static inline void xm2msc_write64reg(void __iomem *addr, u64 value)
366 iowrite32(lower_32_bits(value), addr);
367 iowrite32(upper_32_bits(value), (void __iomem *)(addr + 4));
370 static inline void xm2msc_writereg(void __iomem *addr, u32 value)
372 iowrite32(value, addr);
375 static struct xm2msc_q_data *get_q_data(struct xm2msc_chan_ctx *chan_ctx,
376 enum v4l2_buf_type type)
379 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
380 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
381 return &chan_ctx->q_data[XM2MSC_CHAN_OUT];
382 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
383 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
384 return &chan_ctx->q_data[XM2MSC_CHAN_CAP];
386 v4l2_err(&chan_ctx->xm2msc_dev->v4l2_dev,
387 "Not supported Q type %d\n", type);
392 static u32 find_format_index(struct v4l2_format *f)
394 const struct xm2msc_fmt *fmt;
397 for (i = 0; i < ARRAY_SIZE(formats); i++) {
399 if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
406 static const struct xm2msc_fmt *find_format(struct v4l2_format *f)
408 const struct xm2msc_fmt *fmt;
411 for (i = 0; i < ARRAY_SIZE(formats); i++) {
413 if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
417 if (i == ARRAY_SIZE(formats))
424 xm2msc_hscaler_load_ext_coeff(struct xm2m_msc_dev *xm2msc,
425 const short *coeff, u32 ntaps)
427 unsigned int i, j, pad, offset;
428 const u32 nphases = XSCALER_MAX_PHASES;
430 /* Determine if coefficient needs padding (effective vs. max taps) */
431 pad = XSCALER_MAX_TAPS - ntaps;
434 memset(xm2msc->hscaler_coeff, 0, sizeof(xm2msc->hscaler_coeff));
436 /* Load coefficients into scaler coefficient table */
437 for (i = 0; i < nphases; i++) {
438 for (j = 0; j < ntaps; ++j)
439 xm2msc->hscaler_coeff[i][j + offset] =
440 coeff[i * ntaps + j];
444 static void xm2msc_hscaler_set_coeff(struct xm2msc_chan_ctx *chan_ctx,
447 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
448 int val, offset, rd_indx;
450 u32 ntaps = chan_ctx->xm2msc_dev->taps;
451 const u32 nphases = XSCALER_MAX_PHASES;
453 offset = (XSCALER_MAX_TAPS - ntaps) / 2;
454 for (i = 0; i < nphases; i++) {
455 for (j = 0; j < ntaps / 2; j++) {
456 rd_indx = j * 2 + offset;
457 val = (xm2msc->hscaler_coeff[i][rd_indx + 1] <<
458 XM2MSC_BITSHIFT_16) |
459 (xm2msc->hscaler_coeff[i][rd_indx] &
460 XM2MSC_MASK_LOW_16BITS);
461 xm2msc_writereg((xm2msc->regs + base_addr) +
462 ((i * ntaps / 2 + j) * 4), val);
468 xm2msc_vscaler_load_ext_coeff(struct xm2m_msc_dev *xm2msc,
469 const short *coeff, const u32 ntaps)
473 const u32 nphases = XSCALER_MAX_PHASES;
475 /* Determine if coefficient needs padding (effective vs. max taps) */
476 pad = XSCALER_MAX_TAPS - ntaps;
477 offset = pad ? (pad >> 1) : 0;
479 /* Zero Entire Array */
480 memset(xm2msc->vscaler_coeff, 0, sizeof(xm2msc->vscaler_coeff));
482 /* Load User defined coefficients into scaler coefficient table */
483 for (i = 0; i < nphases; i++) {
484 for (j = 0; j < ntaps; ++j)
485 xm2msc->vscaler_coeff[i][j + offset] =
486 coeff[i * ntaps + j];
491 xm2msc_vscaler_set_coeff(struct xm2msc_chan_ctx *chan_ctx,
494 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
495 u32 val, i, j, offset, rd_indx;
496 u32 ntaps = chan_ctx->xm2msc_dev->taps;
497 const u32 nphases = XSCALER_MAX_PHASES;
499 offset = (XSCALER_MAX_TAPS - ntaps) / 2;
501 for (i = 0; i < nphases; i++) {
502 for (j = 0; j < ntaps / 2; j++) {
503 rd_indx = j * 2 + offset;
504 val = (xm2msc->vscaler_coeff[i][rd_indx + 1] <<
505 XM2MSC_BITSHIFT_16) |
506 (xm2msc->vscaler_coeff[i][rd_indx] &
507 XM2MSC_MASK_LOW_16BITS);
508 xm2msc_writereg((xm2msc->regs +
509 base_addr) + ((i * ntaps / 2 + j) * 4), val);
515 xm2msc_select_hcoeff(struct xm2msc_chan_ctx *chan_ctx, const short **coeff)
518 u32 width_in = chan_ctx->q_data[XM2MSC_CHAN_OUT].width;
519 u32 width_out = chan_ctx->q_data[XM2MSC_CHAN_CAP].width;
520 u32 ntaps = chan_ctx->xm2msc_dev->taps;
522 if (width_out < width_in) {
523 hscale_ratio = (width_in * 10) / width_out;
525 switch (chan_ctx->xm2msc_dev->taps) {
526 case XSCALER_TAPS_12:
527 if (hscale_ratio > 35) {
528 *coeff = &xhsc_coeff_taps12[0][0];
529 ntaps = XSCALER_TAPS_12;
530 } else if (hscale_ratio > 25) {
531 *coeff = &xhsc_coeff_taps10[0][0];
532 ntaps = XSCALER_TAPS_10;
533 } else if (hscale_ratio > 15) {
534 *coeff = &xhsc_coeff_taps8[0][0];
535 ntaps = XSCALER_TAPS_8;
537 *coeff = &xhsc_coeff_taps6[0][0];
538 ntaps = XSCALER_TAPS_6;
541 case XSCALER_TAPS_10:
542 if (hscale_ratio > 25) {
543 *coeff = &xhsc_coeff_taps10[0][0];
544 ntaps = XSCALER_TAPS_10;
545 } else if (hscale_ratio > 15) {
546 *coeff = &xhsc_coeff_taps8[0][0];
547 ntaps = XSCALER_TAPS_8;
549 *coeff = &xhsc_coeff_taps6[0][0];
550 ntaps = XSCALER_TAPS_6;
554 if (hscale_ratio > 15) {
555 *coeff = &xhsc_coeff_taps8[0][0];
556 ntaps = XSCALER_TAPS_8;
558 *coeff = &xhsc_coeff_taps6[0][0];
559 ntaps = XSCALER_TAPS_6;
562 default: /* or XSCALER_TAPS_6 */
563 *coeff = &xhsc_coeff_taps6[0][0];
564 ntaps = XSCALER_TAPS_6;
568 * Scale Up Mode will always use 6 tap filter
569 * This also includes 1:1
571 *coeff = &xhsc_coeff_taps6[0][0];
572 ntaps = XSCALER_TAPS_6;
579 xm2msc_select_vcoeff(struct xm2msc_chan_ctx *chan_ctx, const short **coeff)
582 u32 height_in = chan_ctx->q_data[XM2MSC_CHAN_OUT].height;
583 u32 height_out = chan_ctx->q_data[XM2MSC_CHAN_CAP].height;
584 u32 ntaps = chan_ctx->xm2msc_dev->taps;
586 if (height_out < height_in) {
587 vscale_ratio = (height_in * 10) / height_out;
589 switch (chan_ctx->xm2msc_dev->taps) {
590 case XSCALER_TAPS_12:
591 if (vscale_ratio > 35) {
592 *coeff = &xvsc_coeff_taps12[0][0];
593 ntaps = XSCALER_TAPS_12;
594 } else if (vscale_ratio > 25) {
595 *coeff = &xvsc_coeff_taps10[0][0];
596 ntaps = XSCALER_TAPS_10;
597 } else if (vscale_ratio > 15) {
598 *coeff = &xvsc_coeff_taps8[0][0];
599 ntaps = XSCALER_TAPS_8;
601 *coeff = &xvsc_coeff_taps6[0][0];
602 ntaps = XSCALER_TAPS_6;
605 case XSCALER_TAPS_10:
606 if (vscale_ratio > 25) {
607 *coeff = &xvsc_coeff_taps10[0][0];
608 ntaps = XSCALER_TAPS_10;
609 } else if (vscale_ratio > 15) {
610 *coeff = &xvsc_coeff_taps8[0][0];
611 ntaps = XSCALER_TAPS_8;
613 *coeff = &xvsc_coeff_taps6[0][0];
614 ntaps = XSCALER_TAPS_6;
618 if (vscale_ratio > 15) {
619 *coeff = &xvsc_coeff_taps8[0][0];
620 ntaps = XSCALER_TAPS_8;
622 *coeff = &xvsc_coeff_taps6[0][0];
623 ntaps = XSCALER_TAPS_6;
626 default: /* or XSCALER_TAPS_6 */
627 *coeff = &xvsc_coeff_taps6[0][0];
628 ntaps = XSCALER_TAPS_6;
632 * Scale Up Mode will always use 6 tap filter
633 * This also includes 1:1
635 *coeff = &xvsc_coeff_taps6[0][0];
636 ntaps = XSCALER_TAPS_6;
642 static void xm2mvsc_initialize_coeff_banks(struct xm2msc_chan_ctx *chan_ctx)
644 const short *coeff = NULL;
646 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
648 ntaps = xm2msc_select_hcoeff(chan_ctx, &coeff);
649 xm2msc_hscaler_load_ext_coeff(xm2msc, coeff, ntaps);
650 xm2msc_hscaler_set_coeff(chan_ctx, XM2MVSC_HFLTCOEFF(chan_ctx->num));
652 dev_dbg(xm2msc->dev, "htaps %d selected for chan %d\n",
653 ntaps, chan_ctx->num);
655 ntaps = xm2msc_select_vcoeff(chan_ctx, &coeff);
656 xm2msc_vscaler_load_ext_coeff(xm2msc, coeff, ntaps);
657 xm2msc_vscaler_set_coeff(chan_ctx, XM2MVSC_VFLTCOEFF(chan_ctx->num));
659 dev_dbg(xm2msc->dev, "vtaps %d selected for chan %d\n",
660 ntaps, chan_ctx->num);
663 static void xm2msc_set_chan_params(struct xm2msc_chan_ctx *chan_ctx,
664 enum v4l2_buf_type type)
666 struct xm2msc_q_data *q_data = get_q_data(chan_ctx, type);
667 const struct xm2msc_fmt *fmt = q_data->fmt;
668 void __iomem *base = chan_ctx->regs;
670 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
671 xm2msc_writereg(base + XM2MSC_WIDTHIN, q_data->width);
672 xm2msc_writereg(base + XM2MSC_HEIGHTIN, q_data->height);
673 xm2msc_writereg(base + XM2MSC_INPIXELFMT, fmt->xm2msc_fmt);
674 xm2msc_writereg(base + XM2MSC_INSTRIDE, q_data->stride);
676 xm2msc_writereg(base + XM2MSC_WIDTHOUT, q_data->width);
677 xm2msc_writereg(base + XM2MSC_HEIGHTOUT, q_data->height);
678 xm2msc_writereg(base + XM2MSC_OUTPIXELFMT, fmt->xm2msc_fmt);
679 xm2msc_writereg(base + XM2MSC_OUTSTRIDE, q_data->stride);
683 static void xm2msc_set_chan_com_params(struct xm2msc_chan_ctx *chan_ctx)
685 void __iomem *base = chan_ctx->regs;
686 struct xm2msc_q_data *out_q_data = &chan_ctx->q_data[XM2MSC_CHAN_OUT];
687 struct xm2msc_q_data *cap_q_data = &chan_ctx->q_data[XM2MSC_CHAN_CAP];
691 xm2mvsc_initialize_coeff_banks(chan_ctx);
693 pixel_rate = (out_q_data->width * XM2MSC_STEP_PRECISION) /
695 line_rate = (out_q_data->height * XM2MSC_STEP_PRECISION) /
698 xm2msc_writereg(base + XM2MSC_PIXELRATE, pixel_rate);
699 xm2msc_writereg(base + XM2MSC_LINERATE, line_rate);
702 static void xm2msc_program_allchan(struct xm2m_msc_dev *xm2msc)
706 for (chan = 0; chan < xm2msc->running_chan; chan++) {
707 struct xm2msc_chan_ctx *chan_ctx;
709 chan_ctx = &xm2msc->xm2msc_chan[chan];
711 xm2msc_set_chan_params(chan_ctx,
712 V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
713 xm2msc_set_chan_params(chan_ctx,
714 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
715 xm2msc_set_chan_com_params(chan_ctx);
720 xm2msc_pr_q(struct device *dev, struct xm2msc_q_data *q, int chan,
721 int type, const char *fun_name)
724 const struct xm2msc_fmt *fmt = q->fmt;
726 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
727 dev_dbg(dev, "\n\nOUTPUT Q (%d) Context from [[ %s ]]",
730 dev_dbg(dev, "\n\nCAPTURE Q (%d) Context from [[ %s ]]",
733 dev_dbg(dev, "width height stride clrspace field planes\n");
734 dev_dbg(dev, " %d %d %d %d %d %d\n",
735 q->width, q->height, q->stride,
736 q->colorspace, q->field, q->nplanes);
738 for (i = 0; i < q->nplanes; i++) {
739 dev_dbg(dev, "[plane %d ] bytesperline sizeimage\n", i);
740 dev_dbg(dev, " %d %d\n",
741 q->bytesperline[i], q->sizeimage[i]);
744 dev_dbg(dev, "fmt_name 4cc xlnx-fmt\n");
745 dev_dbg(dev, "%s %d %d\n",
746 fmt->name, fmt->fourcc, fmt->xm2msc_fmt);
747 dev_dbg(dev, "\n\n");
751 xm2msc_pr_status(struct xm2m_msc_dev *xm2msc,
752 const char *fun_name)
754 struct device *dev = xm2msc->dev;
756 dev_dbg(dev, "Status in %s\n", fun_name);
757 dev_dbg(dev, "opened_chan out_streamed_chan cap_streamed_chan\n");
758 dev_dbg(dev, "0x%x 0x%x 0x%x\n",
759 xm2msc->opened_chan, xm2msc->out_streamed_chan,
760 xm2msc->cap_streamed_chan);
761 dev_dbg(dev, "\n\n");
765 xm2msc_pr_chanctx(struct xm2msc_chan_ctx *ctx, const char *fun_name)
767 struct device *dev = ctx->xm2msc_dev->dev;
769 dev_dbg(dev, "\n\n----- [[ %s ]]: Channel %d (0x%p) context -----\n",
770 fun_name, ctx->num, ctx);
771 dev_dbg(dev, "minor = %d\n", ctx->minor);
772 dev_dbg(dev, "reg mapped at %p\n", ctx->regs);
773 dev_dbg(dev, "xm2msc \tm2m_dev \tm2m_ctx\n");
774 dev_dbg(dev, "%p \t%p \t%p\n", ctx->xm2msc_dev,
775 ctx->m2m_dev, ctx->m2m_ctx);
777 if (ctx->status & CHAN_OPENED)
778 dev_dbg(dev, "Opened ");
779 if (ctx->status & CHAN_ATTACHED)
780 dev_dbg(dev, "and attached");
782 dev_dbg(dev, "-----------------------------------\n");
783 dev_dbg(dev, "\n\n");
787 xm2msc_pr_screg(struct device *dev, const void __iomem *base)
789 dev_dbg(dev, "Ctr, GIE, IE, IS OUT\n");
790 dev_dbg(dev, "0x%x 0x%x 0x%x 0x%x 0x%x\n",
791 xm2msc_readreg(base + XM2MSC_AP_CTRL),
792 xm2msc_readreg(base + XM2MSC_GIE),
793 xm2msc_readreg(base + XM2MSC_IER),
794 xm2msc_readreg(base + XM2MSC_ISR),
795 xm2msc_readreg(base + XM2MSC_NUM_OUTS));
799 xm2msc_pr_chanreg(struct device *dev, struct xm2msc_chan_ctx *chan)
801 const void __iomem *base = chan->regs;
803 dev_dbg(dev, "WIN HIN INPIXELFMT INSTRIDE SRCB0L/H SRCB1L/H\n");
804 dev_dbg(dev, "%d %d %d %d 0x%x/0x%x 0x%x/0x%x\n",
805 xm2msc_readreg(base + XM2MSC_WIDTHIN),
806 xm2msc_readreg(base + XM2MSC_HEIGHTIN),
807 xm2msc_readreg(base + XM2MSC_INPIXELFMT),
808 xm2msc_readreg(base + XM2MSC_INSTRIDE),
809 xm2msc_readreg(base + XM2MSC_SRCIMGBUF0),
810 xm2msc_readreg(base + XM2MSC_SRCIMGBUF0 + 4),
811 xm2msc_readreg(base + XM2MSC_SRCIMGBUF1),
812 xm2msc_readreg(base + XM2MSC_SRCIMGBUF1 + 4));
813 dev_dbg(dev, "WOUT HOUT OUTPIXELFMT OUTSTRIDE DBUF0L/H DBUF1L/H\n");
814 dev_dbg(dev, "%d %d %d %d 0x%x/0x%x 0x%x/0x%x\n",
815 xm2msc_readreg(base + XM2MSC_WIDTHOUT),
816 xm2msc_readreg(base + XM2MSC_HEIGHTOUT),
817 xm2msc_readreg(base + XM2MSC_OUTPIXELFMT),
818 xm2msc_readreg(base + XM2MSC_OUTSTRIDE),
819 xm2msc_readreg(base + XM2MSC_DSTIMGBUF0),
820 xm2msc_readreg(base + XM2MSC_DSTIMGBUF0 + 4),
822 xm2msc_readreg(base +
823 XM2MSC_DSTIMGBUF1 + XM2MSC_RESERVED_AREA) :
824 xm2msc_readreg(base + XM2MSC_DSTIMGBUF1),
826 xm2msc_readreg(base +
827 XM2MSC_DSTIMGBUF1 + XM2MSC_RESERVED_AREA + 4) :
828 xm2msc_readreg(base + XM2MSC_DSTIMGBUF1 + 4));
830 dev_dbg(dev, "LINERATE PIXELRATE\n");
831 dev_dbg(dev, "0x%x 0x%x\n",
832 xm2msc_readreg(base + XM2MSC_LINERATE),
833 xm2msc_readreg(base + XM2MSC_PIXELRATE));
837 xm2msc_pr_allchanreg(struct xm2m_msc_dev *xm2msc)
840 struct xm2msc_chan_ctx *chan_ctx;
841 struct device *dev = xm2msc->dev;
843 xm2msc_pr_screg(xm2msc->dev, xm2msc->regs);
845 for (i = 0; i < xm2msc->running_chan; i++) {
846 chan_ctx = &xm2msc->xm2msc_chan[i];
847 dev_dbg(dev, "Regs val for channel %d\n", i);
848 dev_dbg(dev, "______________________________________________\n");
849 xm2msc_pr_chanreg(dev, chan_ctx);
850 dev_dbg(dev, "processed frames = %lu\n", chan_ctx->frames);
851 dev_dbg(dev, "______________________________________________\n");
855 static inline bool xm2msc_testbit(int num, u32 *addr)
857 return (*addr & BIT(num));
860 static inline void xm2msc_setbit(int num, u32 *addr)
865 static inline void xm2msc_clrbit(int num, u32 *addr)
870 static void xm2msc_stop(struct xm2m_msc_dev *xm2msc)
872 void __iomem *base = xm2msc->regs;
873 u32 data = xm2msc_readreg(base + XM2MSC_AP_CTRL);
875 data &= ~XM2MSC_AP_CTRL_START;
876 xm2msc_writereg(base + XM2MSC_AP_CTRL, data);
879 static void xm2msc_start(struct xm2m_msc_dev *xm2msc)
881 void __iomem *base = xm2msc->regs;
882 u32 data = xm2msc_readreg(base + XM2MSC_AP_CTRL);
884 data |= XM2MSC_AP_CTRL_START;
885 xm2msc_writereg(base + XM2MSC_AP_CTRL, data);
888 static void xm2msc_set_chan(struct xm2msc_chan_ctx *ctx, bool state)
890 mutex_lock(&ctx->xm2msc_dev->mutex);
892 xm2msc_setbit(ctx->num, &ctx->xm2msc_dev->opened_chan);
894 xm2msc_clrbit(ctx->num, &ctx->xm2msc_dev->opened_chan);
895 mutex_unlock(&ctx->xm2msc_dev->mutex);
899 xm2msc_set_chan_stream(struct xm2msc_chan_ctx *ctx, bool state, int type)
903 if (type == XM2MSC_CHAN_OUT)
904 ptr = &ctx->xm2msc_dev->out_streamed_chan;
906 ptr = &ctx->xm2msc_dev->cap_streamed_chan;
908 spin_lock(&ctx->xm2msc_dev->lock);
910 xm2msc_setbit(ctx->num, ptr);
912 xm2msc_clrbit(ctx->num, ptr);
914 spin_unlock(&ctx->xm2msc_dev->lock);
918 xm2msc_chk_chan_stream(struct xm2msc_chan_ctx *ctx, int type)
923 if (type == XM2MSC_CHAN_OUT)
924 ptr = &ctx->xm2msc_dev->out_streamed_chan;
926 ptr = &ctx->xm2msc_dev->cap_streamed_chan;
928 mutex_lock(&ctx->xm2msc_dev->mutex);
929 ret = xm2msc_testbit(ctx->num, ptr);
930 mutex_unlock(&ctx->xm2msc_dev->mutex);
935 static void xm2msc_set_fmt(struct xm2m_msc_dev *xm2msc, u32 index)
937 xm2msc_setbit(index, &xm2msc->supported_fmt);
940 static int xm2msc_chk_fmt(struct xm2m_msc_dev *xm2msc, u32 index)
942 return xm2msc_testbit(index, &xm2msc->supported_fmt);
945 static void xm2msc_reset(struct xm2m_msc_dev *xm2msc)
947 gpiod_set_value_cansleep(xm2msc->rst_gpio, XM2MSC_RESET_ASSERT);
948 gpiod_set_value_cansleep(xm2msc->rst_gpio, XM2MSC_RESET_DEASSERT);
954 static int xm2msc_job_ready(void *priv)
956 struct xm2msc_chan_ctx *chan_ctx = priv;
958 if ((v4l2_m2m_num_src_bufs_ready(chan_ctx->m2m_ctx) > 0) &&
959 (v4l2_m2m_num_dst_bufs_ready(chan_ctx->m2m_ctx) > 0))
964 static bool xm2msc_alljob_ready(struct xm2m_msc_dev *xm2msc)
966 struct xm2msc_chan_ctx *chan_ctx;
969 for (chan = 0; chan < xm2msc->running_chan; chan++) {
970 chan_ctx = &xm2msc->xm2msc_chan[chan];
972 if (!xm2msc_job_ready((void *)chan_ctx)) {
973 dev_info(xm2msc->dev, "chan %d not ready\n",
982 static void xm2msc_chan_abort_bufs(struct xm2msc_chan_ctx *chan_ctx)
984 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
985 struct vb2_v4l2_buffer *dst_vb, *src_vb;
987 spin_lock(&xm2msc->lock);
988 dev_dbg(xm2msc->dev, "aborting all buffers\n");
990 while (v4l2_m2m_num_src_bufs_ready(chan_ctx->m2m_ctx) > 0) {
991 src_vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
992 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
995 while (v4l2_m2m_num_dst_bufs_ready(chan_ctx->m2m_ctx) > 0) {
996 dst_vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
997 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
1000 v4l2_m2m_job_finish(chan_ctx->m2m_dev, chan_ctx->m2m_ctx);
1001 spin_unlock(&xm2msc->lock);
1004 static void xm2msc_job_abort(void *priv)
1006 struct xm2msc_chan_ctx *chan_ctx = priv;
1008 xm2msc_chan_abort_bufs(chan_ctx);
1011 * Stream off the channel as job_abort may not always
1012 * be called after streamoff
1014 xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_OUT);
1015 xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_CAP);
1018 static int xm2msc_set_bufaddr(struct xm2m_msc_dev *xm2msc)
1021 struct xm2msc_chan_ctx *chan_ctx;
1022 struct vb2_v4l2_buffer *src_vb, *dst_vb;
1024 dma_addr_t src_luma, dst_luma;
1025 dma_addr_t src_croma, dst_croma;
1027 if (!xm2msc_alljob_ready(xm2msc))
1030 for (chan = 0; chan < xm2msc->running_chan; chan++) {
1031 chan_ctx = &xm2msc->xm2msc_chan[chan];
1032 base = chan_ctx->regs;
1034 src_vb = v4l2_m2m_next_src_buf(chan_ctx->m2m_ctx);
1035 dst_vb = v4l2_m2m_next_dst_buf(chan_ctx->m2m_ctx);
1037 if (!src_vb || !dst_vb) {
1038 v4l2_err(&xm2msc->v4l2_dev, "buffer not found chan = %d\n",
1043 src_luma = vb2_dma_contig_plane_dma_addr(&src_vb->vb2_buf, 0);
1044 dst_luma = vb2_dma_contig_plane_dma_addr(&dst_vb->vb2_buf, 0);
1046 if (chan_ctx->q_data[XM2MSC_CHAN_OUT].nplanes == 2)
1048 vb2_dma_contig_plane_dma_addr(&src_vb->vb2_buf, 1);
1052 if (chan_ctx->q_data[XM2MSC_CHAN_CAP].nplanes == 2)
1054 vb2_dma_contig_plane_dma_addr(&dst_vb->vb2_buf, 1);
1058 if (xm2msc->dma_addr_size == 64 &&
1059 sizeof(dma_addr_t) == sizeof(u64)) {
1060 xm2msc_write64reg(base + XM2MSC_SRCIMGBUF0, src_luma);
1061 xm2msc_write64reg(base + XM2MSC_SRCIMGBUF1, src_croma);
1062 xm2msc_write64reg(base + XM2MSC_DSTIMGBUF0, dst_luma);
1063 if (chan_ctx->num == 4) /* TODO: To be fixed in HW */
1064 xm2msc_write64reg(base + XM2MSC_DSTIMGBUF1 +
1065 XM2MSC_RESERVED_AREA,
1068 xm2msc_write64reg(base + XM2MSC_DSTIMGBUF1,
1071 xm2msc_writereg(base + XM2MSC_SRCIMGBUF0, src_luma);
1072 xm2msc_writereg(base + XM2MSC_SRCIMGBUF1, src_croma);
1073 xm2msc_writereg(base + XM2MSC_DSTIMGBUF0, dst_luma);
1074 if (chan_ctx->num == 4) /* TODO: To be fixed in HW */
1075 xm2msc_writereg(base + XM2MSC_DSTIMGBUF1 +
1076 XM2MSC_RESERVED_AREA,
1079 xm2msc_writereg(base + XM2MSC_DSTIMGBUF1,
1086 static void xm2msc_job_finish(struct xm2m_msc_dev *xm2msc)
1090 for (chan = 0; chan < xm2msc->running_chan; chan++) {
1091 struct xm2msc_chan_ctx *chan_ctx;
1093 chan_ctx = &xm2msc->xm2msc_chan[chan];
1094 v4l2_m2m_job_finish(chan_ctx->m2m_dev, chan_ctx->m2m_ctx);
1098 static void xm2msc_job_done(struct xm2m_msc_dev *xm2msc)
1102 for (chan = 0; chan < xm2msc->running_chan; chan++) {
1103 struct xm2msc_chan_ctx *chan_ctx;
1104 struct vb2_v4l2_buffer *src_vb, *dst_vb;
1105 unsigned long flags;
1107 chan_ctx = &xm2msc->xm2msc_chan[chan];
1109 src_vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
1110 dst_vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
1112 if (src_vb && dst_vb) {
1113 dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
1114 dst_vb->timecode = src_vb->timecode;
1115 dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1117 src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1119 spin_lock_irqsave(&xm2msc->lock, flags);
1120 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
1121 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
1122 spin_unlock_irqrestore(&xm2msc->lock, flags);
1128 static void xm2msc_device_run(void *priv)
1130 struct xm2msc_chan_ctx *chan_ctx = priv;
1131 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1132 void __iomem *base = xm2msc->regs;
1133 unsigned long flags;
1136 spin_lock_irqsave(&xm2msc->lock, flags);
1137 if (xm2msc->device_busy) {
1138 spin_unlock_irqrestore(&xm2msc->lock, flags);
1141 xm2msc->device_busy = true;
1143 if (xm2msc->running_chan != NUM_STREAM(xm2msc)) {
1144 dev_dbg(xm2msc->dev, "Running chan was %d\n",
1145 xm2msc->running_chan);
1146 xm2msc->running_chan = NUM_STREAM(xm2msc);
1148 /* IP need reset for updating of XM2MSC_NUM_OUT */
1149 xm2msc_reset(xm2msc);
1150 xm2msc_writereg(base + XM2MSC_NUM_OUTS, xm2msc->running_chan);
1151 xm2msc_program_allchan(xm2msc);
1153 spin_unlock_irqrestore(&xm2msc->lock, flags);
1155 dev_dbg(xm2msc->dev, "Running chan = %d\n", xm2msc->running_chan);
1156 if (!xm2msc->running_chan) {
1157 xm2msc->device_busy = false;
1161 ret = xm2msc_set_bufaddr(xm2msc);
1164 * All channel does not have buffer
1165 * Currently we do not handle the removal of any Intermediate
1166 * channel while streaming is going on
1168 if (xm2msc->out_streamed_chan || xm2msc->cap_streamed_chan)
1169 dev_err(xm2msc->dev,
1170 "Buffer not available, streaming chan 0x%x\n",
1171 xm2msc->cap_streamed_chan);
1173 xm2msc->device_busy = false;
1177 xm2msc_writereg(base + XM2MSC_GIE, XM2MSC_GIE_EN);
1178 xm2msc_writereg(base + XM2MSC_IER, XM2MSC_ISR_DONE);
1180 xm2msc_pr_status(xm2msc, __func__);
1181 xm2msc_pr_screg(xm2msc->dev, base);
1182 xm2msc_pr_allchanreg(xm2msc);
1184 xm2msc_start(xm2msc);
1186 xm2msc->isr_wait = true;
1187 wait_event(xm2msc->isr_finished, !xm2msc->isr_wait);
1189 xm2msc_job_done(xm2msc);
1191 xm2msc->device_busy = false;
1193 if (xm2msc_alljob_ready(xm2msc))
1194 xm2msc_device_run(xm2msc->xm2msc_chan);
1196 xm2msc_job_finish(xm2msc);
1199 static irqreturn_t xm2msc_isr(int irq, void *data)
1201 struct xm2m_msc_dev *xm2msc = (struct xm2m_msc_dev *)data;
1202 void __iomem *base = xm2msc->regs;
1205 status = xm2msc_readreg(base + XM2MSC_ISR);
1206 if (!(status & XM2MSC_ISR_DONE))
1209 xm2msc_writereg(base + XM2MSC_ISR, status & XM2MSC_ISR_DONE);
1211 xm2msc_stop(xm2msc);
1213 xm2msc->isr_wait = false;
1214 wake_up(&xm2msc->isr_finished);
1219 static int xm2msc_streamon(struct file *file, void *fh,
1220 enum v4l2_buf_type type)
1222 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1224 return v4l2_m2m_streamon(file, chan_ctx->m2m_ctx, type);
1227 static int xm2msc_streamoff(struct file *file, void *fh,
1228 enum v4l2_buf_type type)
1230 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1233 ret = v4l2_m2m_streamoff(file, chan_ctx->m2m_ctx, type);
1235 /* Check if any channel is still running */
1236 xm2msc_device_run(chan_ctx);
1240 static int xm2msc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
1242 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1244 return v4l2_m2m_qbuf(file, chan_ctx->m2m_ctx, buf);
1247 static int xm2msc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
1249 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1251 return v4l2_m2m_dqbuf(file, chan_ctx->m2m_ctx, buf);
1254 static int xm2msc_expbuf(struct file *file, void *fh,
1255 struct v4l2_exportbuffer *eb)
1257 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1259 return v4l2_m2m_expbuf(file, chan_ctx->m2m_ctx, eb);
1262 static int xm2msc_createbufs(struct file *file, void *fh,
1263 struct v4l2_create_buffers *cb)
1265 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1267 return v4l2_m2m_create_bufs(file, chan_ctx->m2m_ctx, cb);
1270 static int xm2msc_reqbufs(struct file *file, void *fh,
1271 struct v4l2_requestbuffers *reqbufs)
1273 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1275 return v4l2_m2m_reqbufs(file, chan_ctx->m2m_ctx, reqbufs);
1278 static int xm2msc_querybuf(struct file *file, void *fh,
1279 struct v4l2_buffer *buf)
1281 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1283 return v4l2_m2m_querybuf(file, chan_ctx->m2m_ctx, buf);
1287 xm2msc_cal_stride(unsigned int width, enum xm2msc_pix_fmt xfmt)
1289 unsigned int stride;
1291 /* Stride in Bytes = (Width × Bytes per Pixel); */
1292 /* TODO: The Width value must be a multiple of Pixels per Clock */
1294 case XILINX_M2MSC_FMT_RGBX8:
1295 case XILINX_M2MSC_FMT_YUVX8:
1296 case XILINX_M2MSC_FMT_RGBX10:
1297 case XILINX_M2MSC_FMT_YUVX10:
1298 case XILINX_M2MSC_FMT_BGRX8:
1301 case XILINX_M2MSC_FMT_YUYV8:
1302 case XILINX_M2MSC_FMT_UYVY8:
1305 case XILINX_M2MSC_FMT_Y_UV8:
1306 case XILINX_M2MSC_FMT_Y_UV8_420:
1307 case XILINX_M2MSC_FMT_Y8:
1310 case XILINX_M2MSC_FMT_RGB8:
1311 case XILINX_M2MSC_FMT_YUV8:
1312 case XILINX_M2MSC_FMT_BGR8:
1315 case XILINX_M2MSC_FMT_Y_UV10:
1316 case XILINX_M2MSC_FMT_Y_UV10_420:
1317 case XILINX_M2MSC_FMT_Y10:
1318 /* 4 bytes per 3 pixels */
1319 stride = DIV_ROUND_UP(width * 4, 3);
1329 vidioc_try_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
1331 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1332 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1333 struct xm2msc_q_data *q_data;
1334 struct vb2_queue *vq;
1337 if (pix->width < XM2MSC_MIN_WIDTH || pix->width > xm2msc->max_wd ||
1338 pix->height < XM2MSC_MIN_HEIGHT || pix->height > xm2msc->max_ht)
1339 dev_dbg(xm2msc->dev,
1340 "Wrong input parameters %d, wxh: %dx%d.\n",
1341 f->type, f->fmt.pix.width, f->fmt.pix.height);
1343 * V4L2 specification suggests the driver corrects the
1344 * format struct if any of the dimensions is unsupported
1346 if (pix->height < XM2MSC_MIN_HEIGHT)
1347 pix->height = XM2MSC_MIN_HEIGHT;
1348 else if (pix->height > xm2msc->max_ht)
1349 pix->height = xm2msc->max_ht;
1351 if (pix->width < XM2MSC_MIN_WIDTH)
1352 pix->width = XM2MSC_MIN_WIDTH;
1353 else if (pix->width > xm2msc->max_wd)
1354 pix->width = xm2msc->max_wd;
1356 vq = v4l2_m2m_get_vq(chan_ctx->m2m_ctx, f->type);
1360 q_data = get_q_data(chan_ctx, f->type);
1364 if (vb2_is_busy(vq)) {
1365 v4l2_err(&xm2msc->v4l2_dev,
1366 "%s queue busy\n", __func__);
1370 q_data->fmt = find_format(f);
1371 index = find_format_index(f);
1372 if (!q_data->fmt || index == ARRAY_SIZE(formats) ||
1373 !xm2msc_chk_fmt(xm2msc, index)) {
1374 v4l2_err(&xm2msc->v4l2_dev,
1375 "Couldn't set format type %d, wxh: %dx%d. ",
1376 f->type, f->fmt.pix.width, f->fmt.pix.height);
1377 v4l2_err(&xm2msc->v4l2_dev,
1378 "fmt: %d, field: %d\n",
1379 f->fmt.pix.pixelformat, f->fmt.pix.field);
1387 vidioc_s_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
1389 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1390 struct xm2msc_q_data *q_data = get_q_data(chan_ctx, f->type);
1393 q_data = get_q_data(chan_ctx, f->type);
1395 q_data->width = pix->width;
1396 q_data->height = pix->height;
1397 q_data->stride = xm2msc_cal_stride(pix->width,
1398 q_data->fmt->xm2msc_fmt);
1399 q_data->colorspace = pix->colorspace;
1400 q_data->field = pix->field;
1401 q_data->nplanes = q_data->fmt->num_planes;
1403 for (i = 0; i < q_data->nplanes; i++) {
1404 q_data->bytesperline[i] = q_data->stride;
1405 pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
1406 q_data->sizeimage[i] = q_data->stride * q_data->height;
1407 pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
1410 xm2msc_pr_q(chan_ctx->xm2msc_dev->dev, q_data,
1411 chan_ctx->num, f->type, __func__);
1416 static int xm2msc_try_fmt_vid_out(struct file *file, void *fh,
1417 struct v4l2_format *f)
1419 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1421 return vidioc_try_fmt(chan_ctx, f);
1424 static int xm2msc_try_fmt_vid_cap(struct file *file, void *fh,
1425 struct v4l2_format *f)
1427 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1429 return vidioc_try_fmt(chan_ctx, f);
1432 static int xm2msc_s_fmt_vid_cap(struct file *file, void *fh,
1433 struct v4l2_format *f)
1436 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1438 ret = xm2msc_try_fmt_vid_cap(file, fh, f);
1441 return vidioc_s_fmt(chan_ctx, f);
1444 static int xm2msc_s_fmt_vid_out(struct file *file, void *fh,
1445 struct v4l2_format *f)
1448 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1450 ret = xm2msc_try_fmt_vid_out(file, fh, f);
1454 return vidioc_s_fmt(chan_ctx, f);
1457 static int vidioc_g_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
1459 struct vb2_queue *vq;
1460 struct xm2msc_q_data *q_data;
1461 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1464 vq = v4l2_m2m_get_vq(chan_ctx->m2m_ctx, f->type);
1468 q_data = get_q_data(chan_ctx, f->type);
1472 pix->width = q_data->width;
1473 pix->height = q_data->height;
1474 pix->field = V4L2_FIELD_NONE;
1475 pix->pixelformat = q_data->fmt->fourcc;
1476 pix->colorspace = q_data->colorspace;
1477 pix->num_planes = q_data->nplanes;
1479 for (i = 0; i < pix->num_planes; i++) {
1480 pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
1481 pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
1487 static int xm2msc_g_fmt_vid_out(struct file *file, void *fh,
1488 struct v4l2_format *f)
1490 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1492 return vidioc_g_fmt(chan_ctx, f);
1495 static int xm2msc_g_fmt_vid_cap(struct file *file, void *fh,
1496 struct v4l2_format *f)
1498 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1500 return vidioc_g_fmt(chan_ctx, f);
1503 static int enum_fmt(struct xm2m_msc_dev *xm2msc, struct v4l2_fmtdesc *f)
1505 const struct xm2msc_fmt *fmt;
1506 unsigned int i, enabled = 0;
1508 for (i = 0; i < ARRAY_SIZE(formats); i++) {
1509 if (xm2msc_chk_fmt(xm2msc, i) && enabled++ == f->index)
1513 if (i == ARRAY_SIZE(formats))
1514 /* Format not found */
1519 strlcpy(f->description, fmt->name,
1520 sizeof(f->description));
1521 f->pixelformat = fmt->fourcc;
1526 static int xm2msc_enum_fmt_vid_cap(struct file *file, void *fh,
1527 struct v4l2_fmtdesc *f)
1529 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1531 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1534 return enum_fmt(chan_ctx->xm2msc_dev, f);
1537 static int xm2msc_enum_fmt_vid_out(struct file *file, void *fh,
1538 struct v4l2_fmtdesc *f)
1540 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1542 if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1545 return enum_fmt(chan_ctx->xm2msc_dev, f);
1548 static int xm2msc_querycap(struct file *file, void *fh,
1549 struct v4l2_capability *cap)
1551 strncpy(cap->driver, XM2MSC_DRIVER_NAME, sizeof(cap->driver) - 1);
1552 strncpy(cap->card, XM2MSC_DRIVER_NAME, sizeof(cap->card) - 1);
1553 snprintf(cap->bus_info, sizeof(cap->bus_info),
1554 "platform:%s", XM2MSC_DRIVER_NAME);
1556 * This is only a mem-to-mem video device. The STREAMING
1557 * device capability flags are left only for compatibility
1558 * and are scheduled for removal.
1560 cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE;
1561 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1565 static int xm2msc_queue_setup(struct vb2_queue *vq,
1566 unsigned int *nbuffers, unsigned int *nplanes,
1567 unsigned int sizes[], struct device *alloc_devs[])
1570 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vq);
1571 struct xm2msc_q_data *q_data;
1573 q_data = get_q_data(chan_ctx, vq->type);
1577 *nplanes = q_data->nplanes;
1579 for (i = 0; i < *nplanes; i++)
1580 sizes[i] = q_data->sizeimage[i];
1582 dev_dbg(chan_ctx->xm2msc_dev->dev, "get %d buffer(s) of size %d",
1583 *nbuffers, sizes[0]);
1584 if (q_data->nplanes == 2)
1585 dev_dbg(chan_ctx->xm2msc_dev->dev, " and %d\n", sizes[1]);
1590 static int xm2msc_buf_prepare(struct vb2_buffer *vb)
1592 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vb->vb2_queue);
1593 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1594 struct xm2msc_q_data *q_data;
1595 unsigned int i, num_planes;
1597 q_data = get_q_data(chan_ctx, vb->vb2_queue->type);
1600 num_planes = q_data->nplanes;
1602 for (i = 0; i < num_planes; i++) {
1603 if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
1604 v4l2_err(&xm2msc->v4l2_dev, "data will not fit into plane ");
1605 v4l2_err(&xm2msc->v4l2_dev, "(%lu < %lu)\n",
1606 vb2_plane_size(vb, i),
1607 (long)q_data->sizeimage[i]);
1612 for (i = 0; i < num_planes; i++)
1613 vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
1618 static void xm2msc_buf_queue(struct vb2_buffer *vb)
1620 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1621 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vb->vb2_queue);
1623 v4l2_m2m_buf_queue(chan_ctx->m2m_ctx, vbuf);
1626 static void xm2msc_return_all_buffers(struct xm2msc_chan_ctx *chan_ctx,
1627 struct vb2_queue *q,
1628 enum vb2_buffer_state state)
1630 struct vb2_v4l2_buffer *vb;
1631 unsigned long flags;
1634 if (V4L2_TYPE_IS_OUTPUT(q->type))
1635 vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
1637 vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
1640 spin_lock_irqsave(&chan_ctx->xm2msc_dev->lock, flags);
1641 v4l2_m2m_buf_done(vb, state);
1642 spin_unlock_irqrestore(&chan_ctx->xm2msc_dev->lock, flags);
1646 static int xm2msc_start_streaming(struct vb2_queue *q, unsigned int count)
1648 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(q);
1649 static struct xm2msc_q_data *q_data;
1652 if (V4L2_TYPE_IS_OUTPUT(q->type))
1653 xm2msc_set_chan_stream(chan_ctx, true, XM2MSC_CHAN_OUT);
1655 xm2msc_set_chan_stream(chan_ctx, true, XM2MSC_CHAN_CAP);
1657 xm2msc_set_chan_params(chan_ctx, q->type);
1659 if (xm2msc_chk_chan_stream(chan_ctx, XM2MSC_CHAN_CAP) &&
1660 xm2msc_chk_chan_stream(chan_ctx, XM2MSC_CHAN_OUT))
1661 xm2msc_set_chan_com_params(chan_ctx);
1663 type = V4L2_TYPE_IS_OUTPUT(q->type) ?
1664 V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
1665 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1666 q_data = get_q_data(chan_ctx, type);
1667 xm2msc_pr_q(chan_ctx->xm2msc_dev->dev, q_data, chan_ctx->num,
1669 xm2msc_pr_status(chan_ctx->xm2msc_dev, __func__);
1674 static void xm2msc_stop_streaming(struct vb2_queue *q)
1676 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(q);
1678 xm2msc_return_all_buffers(chan_ctx, q, VB2_BUF_STATE_ERROR);
1680 if (V4L2_TYPE_IS_OUTPUT(q->type))
1681 xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_OUT);
1683 xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_CAP);
1686 static const struct vb2_ops xm2msc_qops = {
1687 .queue_setup = xm2msc_queue_setup,
1688 .buf_prepare = xm2msc_buf_prepare,
1689 .buf_queue = xm2msc_buf_queue,
1690 .start_streaming = xm2msc_start_streaming,
1691 .stop_streaming = xm2msc_stop_streaming,
1694 static int queue_init(void *priv, struct vb2_queue *src_vq,
1695 struct vb2_queue *dst_vq)
1697 struct xm2msc_chan_ctx *chan_ctx = priv;
1698 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1701 memset(src_vq, 0, sizeof(*src_vq));
1702 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1703 src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
1704 src_vq->drv_priv = chan_ctx;
1705 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1706 src_vq->ops = &xm2msc_qops;
1707 src_vq->mem_ops = &vb2_dma_contig_memops;
1708 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1709 src_vq->lock = &xm2msc->dev_mutex;
1710 src_vq->dev = xm2msc->v4l2_dev.dev;
1712 ret = vb2_queue_init(src_vq);
1716 memset(dst_vq, 0, sizeof(*dst_vq));
1717 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1718 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
1719 dst_vq->drv_priv = chan_ctx;
1720 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1721 dst_vq->ops = &xm2msc_qops;
1722 dst_vq->mem_ops = &vb2_dma_contig_memops;
1723 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1724 dst_vq->lock = &xm2msc->dev_mutex;
1725 dst_vq->dev = xm2msc->v4l2_dev.dev;
1727 return vb2_queue_init(dst_vq);
1730 static const struct v4l2_ioctl_ops xm2msc_ioctl_ops = {
1731 .vidioc_querycap = xm2msc_querycap,
1733 .vidioc_enum_fmt_vid_cap_mplane = xm2msc_enum_fmt_vid_cap,
1734 .vidioc_g_fmt_vid_cap_mplane = xm2msc_g_fmt_vid_cap,
1735 .vidioc_try_fmt_vid_cap_mplane = xm2msc_try_fmt_vid_cap,
1736 .vidioc_s_fmt_vid_cap_mplane = xm2msc_s_fmt_vid_cap,
1738 .vidioc_enum_fmt_vid_out_mplane = xm2msc_enum_fmt_vid_out,
1739 .vidioc_g_fmt_vid_out_mplane = xm2msc_g_fmt_vid_out,
1740 .vidioc_try_fmt_vid_out_mplane = xm2msc_try_fmt_vid_out,
1741 .vidioc_s_fmt_vid_out_mplane = xm2msc_s_fmt_vid_out,
1743 .vidioc_reqbufs = xm2msc_reqbufs,
1744 .vidioc_querybuf = xm2msc_querybuf,
1745 .vidioc_expbuf = xm2msc_expbuf,
1746 .vidioc_create_bufs = xm2msc_createbufs,
1748 .vidioc_qbuf = xm2msc_qbuf,
1749 .vidioc_dqbuf = xm2msc_dqbuf,
1751 .vidioc_streamon = xm2msc_streamon,
1752 .vidioc_streamoff = xm2msc_streamoff,
1755 static int xm2msc_open(struct file *file)
1757 struct xm2m_msc_dev *xm2msc = video_drvdata(file);
1758 struct xm2msc_chan_ctx *chan_ctx = NULL;
1762 if (mutex_lock_interruptible(&xm2msc->dev_mutex))
1763 return -ERESTARTSYS;
1765 minor = iminor(file_inode(file));
1767 for (chan = 0; chan < xm2msc->max_chan; chan++) {
1768 chan_ctx = &xm2msc->xm2msc_chan[chan];
1770 if ((chan_ctx->status & CHAN_ATTACHED) &&
1771 chan_ctx->minor == minor)
1775 if (chan == xm2msc->max_chan) {
1776 v4l2_err(&xm2msc->v4l2_dev,
1777 "%s Chan not found with minor = %d\n",
1783 /* Already opened, do not allow same channel
1784 * to be open more then once
1786 if (chan_ctx->status & CHAN_OPENED) {
1787 v4l2_warn(&xm2msc->v4l2_dev,
1788 "%s Chan already opened for minor = %d\n",
1794 v4l2_fh_init(&chan_ctx->fh, &chan_ctx->vfd);
1795 file->private_data = &chan_ctx->fh;
1796 v4l2_fh_add(&chan_ctx->fh);
1798 chan_ctx->m2m_ctx = v4l2_m2m_ctx_init(chan_ctx->m2m_dev,
1799 chan_ctx, &queue_init);
1800 if (IS_ERR(chan_ctx->m2m_ctx)) {
1801 ret = PTR_ERR(chan_ctx->m2m_ctx);
1802 v4l2_err(&xm2msc->v4l2_dev,
1803 "%s Chan M2M CTX not creted for minor %d\n",
1808 chan_ctx->fh.m2m_ctx = chan_ctx->m2m_ctx;
1809 chan_ctx->status |= CHAN_OPENED;
1810 chan_ctx->xm2msc_dev = xm2msc;
1811 chan_ctx->frames = 0;
1812 xm2msc_set_chan(chan_ctx, true);
1814 v4l2_info(&xm2msc->v4l2_dev, "Channel %d instance created\n", chan);
1816 mutex_unlock(&xm2msc->dev_mutex);
1817 xm2msc_pr_chanctx(chan_ctx, __func__);
1818 xm2msc_pr_status(xm2msc, __func__);
1822 v4l2_fh_del(&chan_ctx->fh);
1823 v4l2_fh_exit(&chan_ctx->fh);
1825 mutex_unlock(&xm2msc->dev_mutex);
1826 xm2msc_pr_chanctx(chan_ctx, __func__);
1827 xm2msc_pr_status(xm2msc, __func__);
1831 static int xm2msc_release(struct file *file)
1833 struct xm2m_msc_dev *xm2msc = video_drvdata(file);
1834 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(file->private_data);
1836 if (mutex_lock_interruptible(&xm2msc->dev_mutex))
1837 return -ERESTARTSYS;
1839 v4l2_m2m_ctx_release(chan_ctx->m2m_ctx);
1840 v4l2_fh_del(&chan_ctx->fh);
1841 v4l2_fh_exit(&chan_ctx->fh);
1842 chan_ctx->status &= ~CHAN_OPENED;
1843 xm2msc_set_chan(chan_ctx, false);
1845 v4l2_info(&xm2msc->v4l2_dev, "Channel %d instance released\n",
1848 mutex_unlock(&xm2msc->dev_mutex);
1852 static unsigned int xm2msc_poll(struct file *file,
1853 struct poll_table_struct *wait)
1855 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(file->private_data);
1856 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1859 mutex_lock(&xm2msc->dev_mutex);
1860 ret = v4l2_m2m_poll(file, chan_ctx->m2m_ctx, wait);
1861 mutex_unlock(&xm2msc->dev_mutex);
1866 static int xm2msc_mmap(struct file *file, struct vm_area_struct *vma)
1868 struct xm2msc_chan_ctx *chan_ctx = file->private_data;
1869 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1872 mutex_lock(&xm2msc->dev_mutex);
1873 ret = v4l2_m2m_mmap(file, chan_ctx->m2m_ctx, vma);
1875 mutex_unlock(&xm2msc->dev_mutex);
1879 static const struct v4l2_file_operations xm2msc_fops = {
1880 .owner = THIS_MODULE,
1881 .open = xm2msc_open,
1882 .release = xm2msc_release,
1883 .poll = xm2msc_poll,
1884 .unlocked_ioctl = video_ioctl2,
1885 .mmap = xm2msc_mmap,
1888 static const struct video_device xm2msc_videodev = {
1889 .name = XM2MSC_DRIVER_NAME,
1890 .fops = &xm2msc_fops,
1891 .ioctl_ops = &xm2msc_ioctl_ops,
1893 .release = video_device_release_empty,
1894 .vfl_dir = VFL_DIR_M2M,
1897 static const struct v4l2_m2m_ops xm2msc_m2m_ops = {
1898 .device_run = xm2msc_device_run,
1899 .job_ready = xm2msc_job_ready,
1900 .job_abort = xm2msc_job_abort,
1903 static int xm2msc_parse_of(struct platform_device *pdev,
1904 struct xm2m_msc_dev *xm2msc)
1906 struct resource *res;
1907 struct device *dev = &pdev->dev;
1908 struct device_node *node = dev->of_node;
1910 const char *vid_fmts[ARRAY_SIZE(formats)];
1914 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1915 xm2msc->regs = devm_ioremap_resource(dev, res);
1916 if (IS_ERR((__force void *)xm2msc->regs))
1917 return PTR_ERR((__force const void *)xm2msc->regs);
1919 dev_dbg(dev, "IO Mem 0x%llx mapped at %p\n", res->start, xm2msc->regs);
1921 ret = of_property_read_u32(node, "xlnx,max-chan",
1926 if (xm2msc->max_chan < XM2MSC_MIN_CHAN ||
1927 xm2msc->max_chan > XM2MSC_MAX_CHAN) {
1929 "Invalid maximum scaler channels : %d",
1934 ret = of_property_read_u32(node, "xlnx,max-width",
1938 "missing xlnx,max-width prop\n");
1942 if (xm2msc->max_wd < XM2MSC_MIN_WIDTH ||
1943 xm2msc->max_wd > XM2MSC_MAX_WIDTH) {
1944 dev_err(dev, "Invalid width : %d",
1949 ret = of_property_read_u32(node, "xlnx,max-height",
1952 dev_err(dev, "missing xlnx,max-height prop\n");
1956 if (xm2msc->max_ht < XM2MSC_MIN_HEIGHT ||
1957 xm2msc->max_ht > XM2MSC_MAX_HEIGHT) {
1958 dev_err(dev, "Invalid height : %d",
1963 ret = of_property_read_u32(node, "xlnx,dma-addr-width",
1964 &xm2msc->dma_addr_size);
1965 if (ret || (xm2msc->dma_addr_size != 32 &&
1966 xm2msc->dma_addr_size != 64)) {
1967 dev_err(dev, "missing/invalid addr width dts prop\n");
1971 ret = of_property_read_u32(node, "xlnx,num-taps",
1973 if (ret || (xm2msc->taps != XSCALER_TAPS_6 &&
1974 xm2msc->taps != XSCALER_TAPS_8 &&
1975 xm2msc->taps != XSCALER_TAPS_10 &&
1976 xm2msc->taps != XSCALER_TAPS_12)) {
1977 dev_err(dev, "missing/invalid taps in dts prop\n");
1981 xm2msc->irq = irq_of_parse_and_map(node, 0);
1982 if (xm2msc->irq < 0) {
1983 dev_err(dev, "Unable to get IRQ");
1987 dev_dbg(dev, "Max Channel Supported = %d\n", xm2msc->max_chan);
1988 dev_dbg(dev, "DMA Addr width Supported = %d\n", xm2msc->dma_addr_size);
1989 dev_dbg(dev, "Max col/row Supported = (%d) / (%d)\n",
1990 xm2msc->max_wd, xm2msc->max_ht);
1991 dev_dbg(dev, "taps Supported = %d\n", xm2msc->taps);
1992 /* read supported video formats and update internal table */
1993 hw_vid_fmt_cnt = of_property_count_strings(node, "xlnx,vid-formats");
1995 ret = of_property_read_string_array(node, "xlnx,vid-formats",
1996 vid_fmts, hw_vid_fmt_cnt);
1999 "Missing or invalid xlnx,vid-formats dts prop\n");
2003 dev_dbg(dev, "Supported format = ");
2004 for (i = 0; i < hw_vid_fmt_cnt; i++) {
2005 const char *vid_fmt_name = vid_fmts[i];
2007 for (j = 0; j < ARRAY_SIZE(formats); j++) {
2008 const char *dts_name = formats[j].name;
2010 if (strcmp(vid_fmt_name, dts_name))
2012 dev_dbg(dev, "%s ", dts_name);
2014 xm2msc_set_fmt(xm2msc, j);
2018 xm2msc->rst_gpio = devm_gpiod_get(dev, "reset",
2020 if (IS_ERR(xm2msc->rst_gpio)) {
2021 ret = PTR_ERR(xm2msc->rst_gpio);
2022 if (ret == -EPROBE_DEFER)
2024 "Probe deferred due to GPIO reset defer\n");
2027 "Unable to locate reset property in dt\n");
2034 static void xm2msc_unreg_video_n_m2m(struct xm2m_msc_dev *xm2msc)
2036 struct xm2msc_chan_ctx *chan_ctx;
2039 for (chan = 0; chan < xm2msc->max_chan; chan++) {
2040 chan_ctx = &xm2msc->xm2msc_chan[chan];
2041 if (!(chan_ctx->status & CHAN_ATTACHED))
2042 break; /*We register video sequentially */
2043 video_unregister_device(&chan_ctx->vfd);
2044 chan_ctx->status &= ~CHAN_ATTACHED;
2046 if (!IS_ERR(chan_ctx->m2m_dev))
2047 v4l2_m2m_release(chan_ctx->m2m_dev);
2051 static int xm2m_msc_probe(struct platform_device *pdev)
2054 struct xm2m_msc_dev *xm2msc;
2055 struct xm2msc_chan_ctx *chan_ctx;
2056 struct video_device *vfd;
2059 xm2msc = devm_kzalloc(&pdev->dev, sizeof(*xm2msc), GFP_KERNEL);
2063 ret = xm2msc_parse_of(pdev, xm2msc);
2067 xm2msc->dev = &pdev->dev;
2069 xm2msc_reset(xm2msc);
2071 spin_lock_init(&xm2msc->lock);
2073 ret = v4l2_device_register(&pdev->dev, &xm2msc->v4l2_dev);
2077 for (chan = 0; chan < xm2msc->max_chan; chan++) {
2078 chan_ctx = &xm2msc->xm2msc_chan[chan];
2080 vfd = &chan_ctx->vfd;
2081 *vfd = xm2msc_videodev;
2082 vfd->lock = &xm2msc->dev_mutex;
2083 vfd->v4l2_dev = &xm2msc->v4l2_dev;
2085 ret = video_register_device(vfd, VFL_TYPE_GRABBER, chan);
2087 v4l2_err(&xm2msc->v4l2_dev,
2088 "Failed to register video dev for chan %d\n",
2093 chan_ctx->status = CHAN_ATTACHED;
2095 video_set_drvdata(vfd, xm2msc);
2096 snprintf(vfd->name, sizeof(vfd->name),
2097 "%s", xm2msc_videodev.name);
2098 v4l2_info(&xm2msc->v4l2_dev,
2099 " Device registered as /dev/video%d\n", vfd->num);
2101 dev_dbg(xm2msc->dev, "%s Device registered as /dev/video%d\n",
2102 __func__, vfd->num);
2104 chan_ctx->m2m_dev = v4l2_m2m_init(&xm2msc_m2m_ops);
2105 if (IS_ERR(chan_ctx->m2m_dev)) {
2106 v4l2_err(&xm2msc->v4l2_dev,
2107 "Failed to init mem2mem device for chan %d\n",
2109 ret = PTR_ERR(chan_ctx->m2m_dev);
2112 chan_ctx->xm2msc_dev = xm2msc;
2113 chan_ctx->regs = xm2msc->regs + XM2MSC_CHAN_REGS_START(chan);
2114 if (chan > 4) /* TODO: To be fixed in HW */
2115 chan_ctx->regs += XM2MSC_RESERVED_AREA;
2116 chan_ctx->num = chan;
2117 chan_ctx->minor = vfd->minor;
2118 xm2msc_pr_chanctx(chan_ctx, __func__);
2121 mutex_init(&xm2msc->dev_mutex);
2122 mutex_init(&xm2msc->mutex);
2123 init_waitqueue_head(&xm2msc->isr_finished);
2125 ret = devm_request_irq(&pdev->dev, xm2msc->irq,
2126 xm2msc_isr, IRQF_SHARED,
2127 XM2MSC_DRIVER_NAME, xm2msc);
2129 dev_err(&pdev->dev, "Unable to register IRQ\n");
2133 platform_set_drvdata(pdev, xm2msc);
2138 xm2msc_unreg_video_n_m2m(xm2msc);
2139 v4l2_device_unregister(&xm2msc->v4l2_dev);
2143 static int xm2m_msc_remove(struct platform_device *pdev)
2145 struct xm2m_msc_dev *xm2msc = platform_get_drvdata(pdev);
2147 xm2msc_unreg_video_n_m2m(xm2msc);
2148 v4l2_device_unregister(&xm2msc->v4l2_dev);
2152 static const struct of_device_id xm2m_msc_of_id_table[] = {
2153 {.compatible = "xlnx,v-multi-scaler-v1.0"},
2157 MODULE_DEVICE_TABLE(of, xm2m_msc_of_id_table);
2159 static struct platform_driver xm2m_msc_driver = {
2161 .name = "xilinx-multiscaler",
2162 .of_match_table = xm2m_msc_of_id_table,
2164 .probe = xm2m_msc_probe,
2165 .remove = xm2m_msc_remove,
2168 module_platform_driver(xm2m_msc_driver);
2170 MODULE_DESCRIPTION("Xilinx M2M Multi-Scaler Driver");
2171 MODULE_LICENSE("GPL v2");
2172 MODULE_ALIAS("xlnx_m2m_multiscaler_dev");