1 // SPDX-License-Identifier: GPL-2.0
3 * Xilinx Memory-to-Memory Video Multi-Scaler IP
5 * Copyright (C) 2018 Xilinx, Inc.
7 * Author: Suresh Gupta <suresh.gupta@xilinx.com>
9 * Based on the virtual v4l2-mem2mem example device
11 * This driver adds support to control the Xilinx Video Multi
15 #include <linux/delay.h>
17 #include <linux/gpio/consumer.h>
18 #include <linux/interrupt.h>
20 #include <linux/module.h>
22 #include <linux/of_irq.h>
23 #include <linux/platform_device.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
27 #include <media/v4l2-device.h>
28 #include <media/v4l2-ioctl.h>
29 #include <media/v4l2-mem2mem.h>
30 #include <media/videobuf2-dma-contig.h>
32 #include "xilinx-multi-scaler-coeff.h"
34 /* 0x0000 : Control signals */
35 #define XM2MSC_AP_CTRL 0x0000
36 #define XM2MSC_AP_CTRL_START BIT(0)
37 #define XM2MSC_AP_CTRL_DONE BIT(1)
38 #define XM2MSC_AP_CTRL_IDEL BIT(2)
39 #define XM2MSC_AP_CTRL_READY BIT(3)
40 #define XM2MSC_AP_CTRL_AUTO_RESTART BIT(7)
42 /* 0x0004 : Global Interrupt Enable Register */
43 #define XM2MSC_GIE 0x0004
44 #define XM2MSC_GIE_EN BIT(0)
46 /* 0x0008 : IP Interrupt Enable Register (Read/Write) */
47 #define XM2MSC_IER 0x0008
48 #define XM2MSC_ISR 0x000c
49 #define XM2MSC_ISR_DONE BIT(0)
50 #define XM2MSC_ISR_READY BIT(1)
52 #define XM2MSC_NUM_OUTS 0x0010
54 #define XM2MSC_WIDTHIN 0x000
55 #define XM2MSC_WIDTHOUT 0x008
56 #define XM2MSC_HEIGHTIN 0x010
57 #define XM2MSC_HEIGHTOUT 0x018
58 #define XM2MSC_LINERATE 0x020
59 #define XM2MSC_PIXELRATE 0x028
60 #define XM2MSC_INPIXELFMT 0x030
61 #define XM2MSC_OUTPIXELFMT 0x038
62 #define XM2MSC_INSTRIDE 0x050
63 #define XM2MSC_OUTSTRIDE 0x058
64 #define XM2MSC_SRCIMGBUF0 0x060
65 #define XM2MSC_SRCIMGBUF1 0x070
66 #define XM2MSC_DSTIMGBUF0 0x090
67 #define XM2MSC_DSTIMGBUF1 0x0100
69 #define XM2MVSC_VFLTCOEFF_L 0x2000
70 #define XM2MVSC_VFLTCOEFF(x) (XM2MVSC_VFLTCOEFF_L + 0x2000 * (x))
71 #define XM2MVSC_HFLTCOEFF_L 0x2800
72 #define XM2MVSC_HFLTCOEFF(x) (XM2MVSC_HFLTCOEFF_L + 0x2000 * (x))
74 #define XM2MSC_CHAN_REGS_START(x) (0x100 + 0x200 * x)
77 * IP has reserved area between XM2MSC_DSTIMGBUF0 and
78 * XM2MSC_DSTIMGBUF1 registers of channel 4
80 #define XM2MSC_RESERVED_AREA 0x600
82 /* GPIO RESET MACROS */
83 #define XM2MSC_RESET_ASSERT (0x1)
84 #define XM2MSC_RESET_DEASSERT (0x0)
86 #define XM2MSC_MIN_CHAN 1
87 #define XM2MSC_MAX_CHAN 8
89 #define XM2MSC_MAX_WIDTH (8192)
90 #define XM2MSC_MAX_HEIGHT (4320)
91 #define XM2MSC_MIN_WIDTH (64)
92 #define XM2MSC_MIN_HEIGHT (64)
93 #define XM2MSC_STEP_PRECISION (65536)
94 /* Mask definitions for Low 16 bits in a 32 bit number */
95 #define XM2MSC_MASK_LOW_16BITS GENMASK(15, 0)
96 #define XM2MSC_BITSHIFT_16 (16)
98 #define XM2MSC_DRIVER_NAME "xm2msc"
100 #define CHAN_ATTACHED BIT(0)
101 #define CHAN_OPENED BIT(1)
103 #define XM2MSC_CHAN_OUT 0
104 #define XM2MSC_CHAN_CAP 1
106 #define NUM_STREAM(_x) \
107 ({ typeof(_x) (x) = (_x); \
108 min(ffz(x->out_streamed_chan), \
109 ffz(x->cap_streamed_chan)); })
111 /* Xilinx Video Specific Color/Pixel Formats */
112 enum xm2msc_pix_fmt {
113 XILINX_M2MSC_FMT_RGBX8 = 10,
114 XILINX_M2MSC_FMT_YUVX8 = 11,
115 XILINX_M2MSC_FMT_YUYV8 = 12,
116 XILINX_M2MSC_FMT_RGBX10 = 15,
117 XILINX_M2MSC_FMT_YUVX10 = 16,
118 XILINX_M2MSC_FMT_Y_UV8 = 18,
119 XILINX_M2MSC_FMT_Y_UV8_420 = 19,
120 XILINX_M2MSC_FMT_RGB8 = 20,
121 XILINX_M2MSC_FMT_YUV8 = 21,
122 XILINX_M2MSC_FMT_Y_UV10 = 22,
123 XILINX_M2MSC_FMT_Y_UV10_420 = 23,
124 XILINX_M2MSC_FMT_Y8 = 24,
125 XILINX_M2MSC_FMT_Y10 = 25,
126 XILINX_M2MSC_FMT_BGRX8 = 27,
127 XILINX_M2MSC_FMT_UYVY8 = 28,
128 XILINX_M2MSC_FMT_BGR8 = 29,
132 * struct xm2msc_fmt - driver info for each of the supported video formats
133 * @name: human-readable device tree name for this entry
134 * @fourcc: standard format identifier
135 * @xm2msc_fmt: Xilinx Video Specific Color/Pixel Formats
136 * @num_planes: number of planes supported by format
141 enum xm2msc_pix_fmt xm2msc_fmt;
145 static const struct xm2msc_fmt formats[] = {
148 .fourcc = V4L2_PIX_FMT_BGRX32,
149 .xm2msc_fmt = XILINX_M2MSC_FMT_RGBX8,
154 .fourcc = V4L2_PIX_FMT_XVUY32,
155 .xm2msc_fmt = XILINX_M2MSC_FMT_YUVX8,
160 .fourcc = V4L2_PIX_FMT_YUYV,
161 .xm2msc_fmt = XILINX_M2MSC_FMT_YUYV8,
165 .name = "xbgr2101010",
166 .fourcc = V4L2_PIX_FMT_XBGR30,
167 .xm2msc_fmt = XILINX_M2MSC_FMT_RGBX10,
171 .name = "yuvx2101010",
172 .fourcc = V4L2_PIX_FMT_XVUY10,
173 .xm2msc_fmt = XILINX_M2MSC_FMT_YUVX10,
178 .fourcc = V4L2_PIX_FMT_NV16,
179 .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8,
184 .fourcc = V4L2_PIX_FMT_NV12,
185 .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8_420,
190 .fourcc = V4L2_PIX_FMT_RGB24,
191 .xm2msc_fmt = XILINX_M2MSC_FMT_RGB8,
196 .fourcc = V4L2_PIX_FMT_VUY24,
197 .xm2msc_fmt = XILINX_M2MSC_FMT_YUV8,
202 .fourcc = V4L2_PIX_FMT_XV20,
203 .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10,
208 .fourcc = V4L2_PIX_FMT_XV15,
209 .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10_420,
214 .fourcc = V4L2_PIX_FMT_GREY,
215 .xm2msc_fmt = XILINX_M2MSC_FMT_Y8,
220 .fourcc = V4L2_PIX_FMT_Y10,
221 .xm2msc_fmt = XILINX_M2MSC_FMT_Y10,
226 .fourcc = V4L2_PIX_FMT_XBGR32,
227 .xm2msc_fmt = XILINX_M2MSC_FMT_BGRX8,
232 .fourcc = V4L2_PIX_FMT_UYVY,
233 .xm2msc_fmt = XILINX_M2MSC_FMT_UYVY8,
238 .fourcc = V4L2_PIX_FMT_BGR24,
239 .xm2msc_fmt = XILINX_M2MSC_FMT_BGR8,
245 * struct xm2msc_q_data - Per-queue, driver-specific private data
246 * There is one source queue and one destination queue for each m2m context.
247 * @width: frame width
248 * @height: frame height
249 * @stride: bytes per lines
250 * @nplanes: Current number of planes
251 * @bytesperline: bytes per line per plane
252 * @sizeimage: image size per plane
253 * @colorspace: supported colorspace
254 * @field: supported field value
257 struct xm2msc_q_data {
261 unsigned int nplanes;
262 unsigned int bytesperline[2];
263 unsigned int sizeimage[2];
264 enum v4l2_colorspace colorspace;
265 enum v4l2_field field;
266 const struct xm2msc_fmt *fmt;
270 * struct xm2msc_chan_ctx - Scaler Channel Info, Per-Channel context
271 * @regs: IO mapped base address of the Channel
272 * @xm2msc_dev: Pointer to struct xm2m_msc_dev
273 * @num: HW Scaling Channel number
274 * @minor: Minor number of the video device
275 * @status: channel status, CHAN_ATTACHED or CHAN_OPENED
276 * @frames: number of frames processed
278 * @fh: v4l2 file handle
279 * @m2m_dev: m2m device
280 * @m2m_ctx: memory to memory context structure
281 * @q_data: src & dst queue data
283 struct xm2msc_chan_ctx {
285 struct xm2m_msc_dev *xm2msc_dev;
289 unsigned long frames;
291 struct video_device vfd;
293 struct v4l2_m2m_dev *m2m_dev;
294 struct v4l2_m2m_ctx *m2m_ctx;
296 struct xm2msc_q_data q_data[2];
300 * struct xm2m_msc_dev - Xilinx M2M Multi-scaler Device
301 * @dev: pointer to struct device instance used by the driver
302 * @regs: IO mapped base address of the HW/IP
303 * @irq: interrupt number
304 * @max_chan: maximum number of Scaling Channels
305 * @max_ht: maximum number of rows in a plane
306 * @max_wd: maximum number of column in a plane
307 * @taps: number of taps set in HW
308 * @supported_fmt: bitmap for all supported fmts by HW
309 * @dma_addr_size: Size of dma address pointer in IP (either 32 or 64)
310 * @rst_gpio: reset gpio handler
311 * @opened_chan: bitmap for all open channel
312 * @out_streamed_chan: bitmap for all out streamed channel
313 * @cap_streamed_chan: bitmap for all capture streamed channel
314 * @running_chan: currently running channels
315 * @device_busy: HW device is busy or not
316 * @isr_wait: flag to follow the ISR complete or not
317 * @isr_finished: Wait queue used to wait for IP to complete processing
318 * @v4l2_dev: main struct to for V4L2 device drivers
319 * @dev_mutex: lock for V4L2 device
320 * @mutex: lock for channel ctx
321 * @lock: lock used in IRQ
322 * @xm2msc_chan: arrey of channel context
323 * @hscaler_coeff: Array of filter coefficients for the Horizontal Scaler
324 * @vscaler_coeff: Array of filter coefficients for the Vertical Scaler
326 struct xm2m_msc_dev {
336 struct gpio_desc *rst_gpio;
339 u32 out_streamed_chan;
340 u32 cap_streamed_chan;
344 wait_queue_head_t isr_finished;
346 struct v4l2_device v4l2_dev;
348 struct mutex dev_mutex; /*the mutex for v4l2*/
349 struct mutex mutex; /*lock for bitmap reg*/
350 spinlock_t lock; /*IRQ lock*/
352 struct xm2msc_chan_ctx xm2msc_chan[XM2MSC_MAX_CHAN];
353 short hscaler_coeff[XSCALER_MAX_PHASES][XSCALER_MAX_TAPS];
354 short vscaler_coeff[XSCALER_MAX_PHASES][XSCALER_MAX_TAPS];
357 #define fh_to_chanctx(__fh) container_of(__fh, struct xm2msc_chan_ctx, fh)
359 static inline u32 xm2msc_readreg(const void __iomem *addr)
361 return ioread32(addr);
364 static inline void xm2msc_write64reg(void __iomem *addr, u64 value)
366 iowrite32(lower_32_bits(value), addr);
367 iowrite32(upper_32_bits(value), (void __iomem *)(addr + 4));
370 static inline void xm2msc_writereg(void __iomem *addr, u32 value)
372 iowrite32(value, addr);
375 static struct xm2msc_q_data *get_q_data(struct xm2msc_chan_ctx *chan_ctx,
376 enum v4l2_buf_type type)
379 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
380 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
381 return &chan_ctx->q_data[XM2MSC_CHAN_OUT];
382 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
383 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
384 return &chan_ctx->q_data[XM2MSC_CHAN_CAP];
386 v4l2_err(&chan_ctx->xm2msc_dev->v4l2_dev,
387 "Not supported Q type %d\n", type);
392 static u32 find_format_index(struct v4l2_format *f)
394 const struct xm2msc_fmt *fmt;
397 for (i = 0; i < ARRAY_SIZE(formats); i++) {
399 if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
406 static const struct xm2msc_fmt *find_format(struct v4l2_format *f)
408 const struct xm2msc_fmt *fmt;
411 for (i = 0; i < ARRAY_SIZE(formats); i++) {
413 if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
417 if (i == ARRAY_SIZE(formats))
424 xv_hscaler_load_ext_coeff(struct xm2m_msc_dev *xm2msc,
425 const short *coeff, u32 ntaps)
427 unsigned int i, j, pad, offset;
428 const u32 nphases = XSCALER_MAX_PHASES;
430 /* Determine if coefficient needs padding (effective vs. max taps) */
431 pad = XSCALER_MAX_TAPS - ntaps;
434 memset(xm2msc->hscaler_coeff, 0, sizeof(xm2msc->hscaler_coeff));
436 /* Load coefficients into scaler coefficient table */
437 for (i = 0; i < nphases; i++) {
438 for (j = 0; j < ntaps; ++j)
439 xm2msc->hscaler_coeff[i][j + offset] =
440 coeff[i * ntaps + j];
444 static void xv_hscaler_set_coeff(struct xm2msc_chan_ctx *chan_ctx,
447 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
448 int val, offset, rd_indx;
450 u32 ntaps = chan_ctx->xm2msc_dev->taps;
451 const u32 nphases = XSCALER_MAX_PHASES;
453 offset = (XSCALER_MAX_TAPS - ntaps) / 2;
454 for (i = 0; i < nphases; i++) {
455 for (j = 0; j < ntaps / 2; j++) {
456 rd_indx = j * 2 + offset;
457 val = (xm2msc->hscaler_coeff[i][rd_indx + 1] <<
458 XM2MSC_BITSHIFT_16) |
459 (xm2msc->hscaler_coeff[i][rd_indx] &
460 XM2MSC_MASK_LOW_16BITS);
461 xm2msc_writereg((xm2msc->regs + base_addr) +
462 ((i * ntaps / 2 + j) * 4), val);
468 xv_vscaler_load_ext_coeff(struct xm2m_msc_dev *xm2msc,
469 const short *coeff, const u32 ntaps)
473 const u32 nphases = XSCALER_MAX_PHASES;
475 /* Determine if coefficient needs padding (effective vs. max taps) */
476 pad = XSCALER_MAX_TAPS - ntaps;
477 offset = pad ? (pad >> 1) : 0;
479 /* Zero Entire Array */
480 memset(xm2msc->vscaler_coeff, 0, sizeof(xm2msc->vscaler_coeff));
482 /* Load User defined coefficients into scaler coefficient table */
483 for (i = 0; i < nphases; i++) {
484 for (j = 0; j < ntaps; ++j)
485 xm2msc->vscaler_coeff[i][j + offset] =
486 coeff[i * ntaps + j];
490 static void xv_vscaler_set_coeff(struct xm2msc_chan_ctx *chan_ctx,
493 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
494 u32 val, i, j, offset, rd_indx;
495 u32 ntaps = chan_ctx->xm2msc_dev->taps;
496 const u32 nphases = XSCALER_MAX_PHASES;
498 offset = (XSCALER_MAX_TAPS - ntaps) / 2;
500 for (i = 0; i < nphases; i++) {
501 for (j = 0; j < ntaps / 2; j++) {
502 rd_indx = j * 2 + offset;
503 val = (xm2msc->vscaler_coeff[i][rd_indx + 1] <<
504 XM2MSC_BITSHIFT_16) |
505 (xm2msc->vscaler_coeff[i][rd_indx] &
506 XM2MSC_MASK_LOW_16BITS);
507 xm2msc_writereg((xm2msc->regs +
508 base_addr) + ((i * ntaps / 2 + j) * 4), val);
513 static void xm2mvsc_initialize_coeff_banks(struct xm2msc_chan_ctx *chan_ctx)
515 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
516 /* Bank 0 is init as 6 tap filter for 6, 8, 10 & 12 tap filters */
517 xv_hscaler_load_ext_coeff(xm2msc, &xhsc_coeff_taps6[0][0],
519 xv_hscaler_set_coeff(chan_ctx, XM2MVSC_HFLTCOEFF(chan_ctx->num));
520 xv_vscaler_load_ext_coeff(xm2msc, &xvsc_coeff_taps6[0][0],
522 xv_vscaler_set_coeff(chan_ctx, XM2MVSC_VFLTCOEFF(chan_ctx->num));
525 static void xm2msc_set_chan_params(struct xm2msc_chan_ctx *chan_ctx,
526 enum v4l2_buf_type type)
528 struct xm2msc_q_data *q_data = get_q_data(chan_ctx, type);
529 const struct xm2msc_fmt *fmt = q_data->fmt;
530 void __iomem *base = chan_ctx->regs;
532 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
533 xm2msc_writereg(base + XM2MSC_WIDTHIN, q_data->width);
534 xm2msc_writereg(base + XM2MSC_HEIGHTIN, q_data->height);
535 xm2msc_writereg(base + XM2MSC_INPIXELFMT, fmt->xm2msc_fmt);
536 xm2msc_writereg(base + XM2MSC_INSTRIDE, q_data->stride);
538 xm2msc_writereg(base + XM2MSC_WIDTHOUT, q_data->width);
539 xm2msc_writereg(base + XM2MSC_HEIGHTOUT, q_data->height);
540 xm2msc_writereg(base + XM2MSC_OUTPIXELFMT, fmt->xm2msc_fmt);
541 xm2msc_writereg(base + XM2MSC_OUTSTRIDE, q_data->stride);
545 static void xm2msc_set_chan_com_params(struct xm2msc_chan_ctx *chan_ctx)
547 void __iomem *base = chan_ctx->regs;
548 struct xm2msc_q_data *out_q_data = &chan_ctx->q_data[XM2MSC_CHAN_OUT];
549 struct xm2msc_q_data *cap_q_data = &chan_ctx->q_data[XM2MSC_CHAN_CAP];
553 /* Currently only 6 tabs supported */
554 chan_ctx->xm2msc_dev->taps = XSCALER_TAPS_6;
555 xm2mvsc_initialize_coeff_banks(chan_ctx);
557 pixel_rate = (out_q_data->width * XM2MSC_STEP_PRECISION) /
559 line_rate = (out_q_data->height * XM2MSC_STEP_PRECISION) /
562 xm2msc_writereg(base + XM2MSC_PIXELRATE, pixel_rate);
563 xm2msc_writereg(base + XM2MSC_LINERATE, line_rate);
566 static void xm2msc_program_allchan(struct xm2m_msc_dev *xm2msc)
570 for (chan = 0; chan < xm2msc->running_chan; chan++) {
571 struct xm2msc_chan_ctx *chan_ctx;
573 chan_ctx = &xm2msc->xm2msc_chan[chan];
575 xm2msc_set_chan_params(chan_ctx,
576 V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
577 xm2msc_set_chan_params(chan_ctx,
578 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
579 xm2msc_set_chan_com_params(chan_ctx);
584 xm2msc_pr_q(struct device *dev, struct xm2msc_q_data *q, int chan,
585 int type, const char *fun_name)
588 const struct xm2msc_fmt *fmt = q->fmt;
590 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
591 dev_dbg(dev, "\n\nOUTPUT Q (%d) Context from [[ %s ]]",
594 dev_dbg(dev, "\n\nCAPTURE Q (%d) Context from [[ %s ]]",
597 dev_dbg(dev, "width height stride clrspace field planes\n");
598 dev_dbg(dev, " %d %d %d %d %d %d\n",
599 q->width, q->height, q->stride,
600 q->colorspace, q->field, q->nplanes);
602 for (i = 0; i < q->nplanes; i++) {
603 dev_dbg(dev, "[plane %d ] bytesperline sizeimage\n", i);
604 dev_dbg(dev, " %d %d\n",
605 q->bytesperline[i], q->sizeimage[i]);
608 dev_dbg(dev, "fmt_name 4cc xlnx-fmt\n");
609 dev_dbg(dev, "%s %d %d\n",
610 fmt->name, fmt->fourcc, fmt->xm2msc_fmt);
611 dev_dbg(dev, "\n\n");
615 xm2msc_pr_status(struct xm2m_msc_dev *xm2msc,
616 const char *fun_name)
618 struct device *dev = xm2msc->dev;
620 dev_dbg(dev, "Status in %s\n", fun_name);
621 dev_dbg(dev, "opened_chan out_streamed_chan cap_streamed_chan\n");
622 dev_dbg(dev, "0x%x 0x%x 0x%x\n",
623 xm2msc->opened_chan, xm2msc->out_streamed_chan,
624 xm2msc->cap_streamed_chan);
625 dev_dbg(dev, "\n\n");
629 xm2msc_pr_chanctx(struct xm2msc_chan_ctx *ctx, const char *fun_name)
631 struct device *dev = ctx->xm2msc_dev->dev;
633 dev_dbg(dev, "\n\n----- [[ %s ]]: Channel %d (0x%p) context -----\n",
634 fun_name, ctx->num, ctx);
635 dev_dbg(dev, "minor = %d\n", ctx->minor);
636 dev_dbg(dev, "reg mapped at %p\n", ctx->regs);
637 dev_dbg(dev, "xm2msc \tm2m_dev \tm2m_ctx\n");
638 dev_dbg(dev, "%p \t%p \t%p\n", ctx->xm2msc_dev,
639 ctx->m2m_dev, ctx->m2m_ctx);
641 if (ctx->status & CHAN_OPENED)
642 dev_dbg(dev, "Opened ");
643 if (ctx->status & CHAN_ATTACHED)
644 dev_dbg(dev, "and attached");
646 dev_dbg(dev, "-----------------------------------\n");
647 dev_dbg(dev, "\n\n");
651 xm2msc_pr_screg(struct device *dev, const void __iomem *base)
653 dev_dbg(dev, "Ctr, GIE, IE, IS OUT\n");
654 dev_dbg(dev, "0x%x 0x%x 0x%x 0x%x 0x%x\n",
655 xm2msc_readreg(base + XM2MSC_AP_CTRL),
656 xm2msc_readreg(base + XM2MSC_GIE),
657 xm2msc_readreg(base + XM2MSC_IER),
658 xm2msc_readreg(base + XM2MSC_ISR),
659 xm2msc_readreg(base + XM2MSC_NUM_OUTS));
663 xm2msc_pr_chanreg(struct device *dev, struct xm2msc_chan_ctx *chan)
665 const void __iomem *base = chan->regs;
667 dev_dbg(dev, "WIN HIN INPIXELFMT INSTRIDE SRCB0L/H SRCB1L/H\n");
668 dev_dbg(dev, "%d %d %d %d 0x%x/0x%x 0x%x/0x%x\n",
669 xm2msc_readreg(base + XM2MSC_WIDTHIN),
670 xm2msc_readreg(base + XM2MSC_HEIGHTIN),
671 xm2msc_readreg(base + XM2MSC_INPIXELFMT),
672 xm2msc_readreg(base + XM2MSC_INSTRIDE),
673 xm2msc_readreg(base + XM2MSC_SRCIMGBUF0),
674 xm2msc_readreg(base + XM2MSC_SRCIMGBUF0 + 4),
675 xm2msc_readreg(base + XM2MSC_SRCIMGBUF1),
676 xm2msc_readreg(base + XM2MSC_SRCIMGBUF1 + 4));
677 dev_dbg(dev, "WOUT HOUT OUTPIXELFMT OUTSTRIDE DBUF0L/H DBUF1L/H\n");
678 dev_dbg(dev, "%d %d %d %d 0x%x/0x%x 0x%x/0x%x\n",
679 xm2msc_readreg(base + XM2MSC_WIDTHOUT),
680 xm2msc_readreg(base + XM2MSC_HEIGHTOUT),
681 xm2msc_readreg(base + XM2MSC_OUTPIXELFMT),
682 xm2msc_readreg(base + XM2MSC_OUTSTRIDE),
683 xm2msc_readreg(base + XM2MSC_DSTIMGBUF0),
684 xm2msc_readreg(base + XM2MSC_DSTIMGBUF0 + 4),
686 xm2msc_readreg(base +
687 XM2MSC_DSTIMGBUF1 + XM2MSC_RESERVED_AREA) :
688 xm2msc_readreg(base + XM2MSC_DSTIMGBUF1),
690 xm2msc_readreg(base +
691 XM2MSC_DSTIMGBUF1 + XM2MSC_RESERVED_AREA + 4) :
692 xm2msc_readreg(base + XM2MSC_DSTIMGBUF1 + 4));
694 dev_dbg(dev, "LINERATE PIXELRATE\n");
695 dev_dbg(dev, "0x%x 0x%x\n",
696 xm2msc_readreg(base + XM2MSC_LINERATE),
697 xm2msc_readreg(base + XM2MSC_PIXELRATE));
701 xm2msc_pr_allchanreg(struct xm2m_msc_dev *xm2msc)
704 struct xm2msc_chan_ctx *chan_ctx;
705 struct device *dev = xm2msc->dev;
707 xm2msc_pr_screg(xm2msc->dev, xm2msc->regs);
709 for (i = 0; i < xm2msc->running_chan; i++) {
710 chan_ctx = &xm2msc->xm2msc_chan[i];
711 dev_dbg(dev, "Regs val for channel %d\n", i);
712 dev_dbg(dev, "______________________________________________\n");
713 xm2msc_pr_chanreg(dev, chan_ctx);
714 dev_dbg(dev, "processed frames = %lu\n", chan_ctx->frames);
715 dev_dbg(dev, "______________________________________________\n");
719 static inline bool xm2msc_testbit(int num, u32 *addr)
721 return (*addr & BIT(num));
724 static inline void xm2msc_setbit(int num, u32 *addr)
729 static inline void xm2msc_clrbit(int num, u32 *addr)
734 static void xm2msc_stop(struct xm2m_msc_dev *xm2msc)
736 void __iomem *base = xm2msc->regs;
737 u32 data = xm2msc_readreg(base + XM2MSC_AP_CTRL);
739 data &= ~XM2MSC_AP_CTRL_START;
740 xm2msc_writereg(base + XM2MSC_AP_CTRL, data);
743 static void xm2msc_start(struct xm2m_msc_dev *xm2msc)
745 void __iomem *base = xm2msc->regs;
746 u32 data = xm2msc_readreg(base + XM2MSC_AP_CTRL);
748 data |= XM2MSC_AP_CTRL_START;
749 xm2msc_writereg(base + XM2MSC_AP_CTRL, data);
752 static void xm2msc_set_chan(struct xm2msc_chan_ctx *ctx, bool state)
754 mutex_lock(&ctx->xm2msc_dev->mutex);
756 xm2msc_setbit(ctx->num, &ctx->xm2msc_dev->opened_chan);
758 xm2msc_clrbit(ctx->num, &ctx->xm2msc_dev->opened_chan);
759 mutex_unlock(&ctx->xm2msc_dev->mutex);
763 xm2msc_set_chan_stream(struct xm2msc_chan_ctx *ctx, bool state, int type)
767 if (type == XM2MSC_CHAN_OUT)
768 ptr = &ctx->xm2msc_dev->out_streamed_chan;
770 ptr = &ctx->xm2msc_dev->cap_streamed_chan;
772 spin_lock(&ctx->xm2msc_dev->lock);
774 xm2msc_setbit(ctx->num, ptr);
776 xm2msc_clrbit(ctx->num, ptr);
778 spin_unlock(&ctx->xm2msc_dev->lock);
782 xm2msc_chk_chan_stream(struct xm2msc_chan_ctx *ctx, int type)
787 if (type == XM2MSC_CHAN_OUT)
788 ptr = &ctx->xm2msc_dev->out_streamed_chan;
790 ptr = &ctx->xm2msc_dev->cap_streamed_chan;
792 mutex_lock(&ctx->xm2msc_dev->mutex);
793 ret = xm2msc_testbit(ctx->num, ptr);
794 mutex_unlock(&ctx->xm2msc_dev->mutex);
799 static void xm2msc_set_fmt(struct xm2m_msc_dev *xm2msc, u32 index)
801 xm2msc_setbit(index, &xm2msc->supported_fmt);
804 static int xm2msc_chk_fmt(struct xm2m_msc_dev *xm2msc, u32 index)
806 return xm2msc_testbit(index, &xm2msc->supported_fmt);
809 static void xm2msc_reset(struct xm2m_msc_dev *xm2msc)
811 gpiod_set_value_cansleep(xm2msc->rst_gpio, XM2MSC_RESET_ASSERT);
812 gpiod_set_value_cansleep(xm2msc->rst_gpio, XM2MSC_RESET_DEASSERT);
818 static int xm2msc_job_ready(void *priv)
820 struct xm2msc_chan_ctx *chan_ctx = priv;
822 if ((v4l2_m2m_num_src_bufs_ready(chan_ctx->m2m_ctx) > 0) &&
823 (v4l2_m2m_num_dst_bufs_ready(chan_ctx->m2m_ctx) > 0))
828 static bool xm2msc_alljob_ready(struct xm2m_msc_dev *xm2msc)
830 struct xm2msc_chan_ctx *chan_ctx;
833 for (chan = 0; chan < xm2msc->running_chan; chan++) {
834 chan_ctx = &xm2msc->xm2msc_chan[chan];
836 if (!xm2msc_job_ready((void *)chan_ctx)) {
837 dev_info(xm2msc->dev, "chan %d not ready\n",
846 static void xm2msc_chan_abort_bufs(struct xm2msc_chan_ctx *chan_ctx)
848 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
849 struct vb2_v4l2_buffer *dst_vb, *src_vb;
851 spin_lock(&xm2msc->lock);
852 dev_dbg(xm2msc->dev, "aborting all buffers\n");
854 while (v4l2_m2m_num_src_bufs_ready(chan_ctx->m2m_ctx) > 0) {
855 src_vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
856 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
859 while (v4l2_m2m_num_dst_bufs_ready(chan_ctx->m2m_ctx) > 0) {
860 dst_vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
861 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
864 v4l2_m2m_job_finish(chan_ctx->m2m_dev, chan_ctx->m2m_ctx);
865 spin_unlock(&xm2msc->lock);
868 static void xm2msc_job_abort(void *priv)
870 struct xm2msc_chan_ctx *chan_ctx = priv;
872 xm2msc_chan_abort_bufs(chan_ctx);
875 * Stream off the channel as job_abort may not always
876 * be called after streamoff
878 xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_OUT);
879 xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_CAP);
882 static int xm2msc_set_bufaddr(struct xm2m_msc_dev *xm2msc)
885 struct xm2msc_chan_ctx *chan_ctx;
886 struct vb2_v4l2_buffer *src_vb, *dst_vb;
888 dma_addr_t src_luma, dst_luma;
889 dma_addr_t src_croma, dst_croma;
891 if (!xm2msc_alljob_ready(xm2msc))
894 for (chan = 0; chan < xm2msc->running_chan; chan++) {
895 chan_ctx = &xm2msc->xm2msc_chan[chan];
896 base = chan_ctx->regs;
898 src_vb = v4l2_m2m_next_src_buf(chan_ctx->m2m_ctx);
899 dst_vb = v4l2_m2m_next_dst_buf(chan_ctx->m2m_ctx);
901 if (!src_vb || !dst_vb) {
902 v4l2_err(&xm2msc->v4l2_dev, "buffer not found chan = %d\n",
907 src_luma = vb2_dma_contig_plane_dma_addr(&src_vb->vb2_buf, 0);
908 dst_luma = vb2_dma_contig_plane_dma_addr(&dst_vb->vb2_buf, 0);
910 if (chan_ctx->q_data[XM2MSC_CHAN_OUT].nplanes == 2)
912 vb2_dma_contig_plane_dma_addr(&src_vb->vb2_buf, 1);
916 if (chan_ctx->q_data[XM2MSC_CHAN_CAP].nplanes == 2)
918 vb2_dma_contig_plane_dma_addr(&dst_vb->vb2_buf, 1);
922 if (xm2msc->dma_addr_size == 64 &&
923 sizeof(dma_addr_t) == sizeof(u64)) {
924 xm2msc_write64reg(base + XM2MSC_SRCIMGBUF0, src_luma);
925 xm2msc_write64reg(base + XM2MSC_SRCIMGBUF1, src_croma);
926 xm2msc_write64reg(base + XM2MSC_DSTIMGBUF0, dst_luma);
927 if (chan_ctx->num == 4) /* TODO: To be fixed in HW */
928 xm2msc_write64reg(base + XM2MSC_DSTIMGBUF1 +
929 XM2MSC_RESERVED_AREA,
932 xm2msc_write64reg(base + XM2MSC_DSTIMGBUF1,
935 xm2msc_writereg(base + XM2MSC_SRCIMGBUF0, src_luma);
936 xm2msc_writereg(base + XM2MSC_SRCIMGBUF1, src_croma);
937 xm2msc_writereg(base + XM2MSC_DSTIMGBUF0, dst_luma);
938 if (chan_ctx->num == 4) /* TODO: To be fixed in HW */
939 xm2msc_writereg(base + XM2MSC_DSTIMGBUF1 +
940 XM2MSC_RESERVED_AREA,
943 xm2msc_writereg(base + XM2MSC_DSTIMGBUF1,
950 static void xm2msc_job_finish(struct xm2m_msc_dev *xm2msc)
954 for (chan = 0; chan < xm2msc->running_chan; chan++) {
955 struct xm2msc_chan_ctx *chan_ctx;
957 chan_ctx = &xm2msc->xm2msc_chan[chan];
958 v4l2_m2m_job_finish(chan_ctx->m2m_dev, chan_ctx->m2m_ctx);
962 static void xm2msc_job_done(struct xm2m_msc_dev *xm2msc)
966 for (chan = 0; chan < xm2msc->running_chan; chan++) {
967 struct xm2msc_chan_ctx *chan_ctx;
968 struct vb2_v4l2_buffer *src_vb, *dst_vb;
971 chan_ctx = &xm2msc->xm2msc_chan[chan];
973 src_vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
974 dst_vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
976 if (src_vb && dst_vb) {
977 dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
978 dst_vb->timecode = src_vb->timecode;
979 dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
981 src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
983 spin_lock_irqsave(&xm2msc->lock, flags);
984 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
985 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
986 spin_unlock_irqrestore(&xm2msc->lock, flags);
992 static void xm2msc_device_run(void *priv)
994 struct xm2msc_chan_ctx *chan_ctx = priv;
995 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
996 void __iomem *base = xm2msc->regs;
1000 spin_lock_irqsave(&xm2msc->lock, flags);
1001 if (xm2msc->device_busy) {
1002 spin_unlock_irqrestore(&xm2msc->lock, flags);
1005 xm2msc->device_busy = true;
1007 if (xm2msc->running_chan != NUM_STREAM(xm2msc)) {
1008 dev_dbg(xm2msc->dev, "Running chan was %d\n",
1009 xm2msc->running_chan);
1010 xm2msc->running_chan = NUM_STREAM(xm2msc);
1012 /* IP need reset for updating of XM2MSC_NUM_OUT */
1013 xm2msc_reset(xm2msc);
1014 xm2msc_writereg(base + XM2MSC_NUM_OUTS, xm2msc->running_chan);
1015 xm2msc_program_allchan(xm2msc);
1017 spin_unlock_irqrestore(&xm2msc->lock, flags);
1019 dev_dbg(xm2msc->dev, "Running chan = %d\n", xm2msc->running_chan);
1020 if (!xm2msc->running_chan) {
1021 xm2msc->device_busy = false;
1025 ret = xm2msc_set_bufaddr(xm2msc);
1028 * All channel does not have buffer
1029 * Currently we do not handle the removal of any Intermediate
1030 * channel while streaming is going on
1032 if (xm2msc->out_streamed_chan || xm2msc->cap_streamed_chan)
1033 dev_err(xm2msc->dev,
1034 "Buffer not available, streaming chan 0x%x\n",
1035 xm2msc->cap_streamed_chan);
1037 xm2msc->device_busy = false;
1041 xm2msc_writereg(base + XM2MSC_GIE, XM2MSC_GIE_EN);
1042 xm2msc_writereg(base + XM2MSC_IER, XM2MSC_ISR_DONE);
1044 xm2msc_pr_status(xm2msc, __func__);
1045 xm2msc_pr_screg(xm2msc->dev, base);
1046 xm2msc_pr_allchanreg(xm2msc);
1048 xm2msc_start(xm2msc);
1050 xm2msc->isr_wait = true;
1051 wait_event(xm2msc->isr_finished, !xm2msc->isr_wait);
1053 xm2msc_job_done(xm2msc);
1055 xm2msc->device_busy = false;
1057 if (xm2msc_alljob_ready(xm2msc))
1058 xm2msc_device_run(xm2msc->xm2msc_chan);
1060 xm2msc_job_finish(xm2msc);
1063 static irqreturn_t xm2msc_isr(int irq, void *data)
1065 struct xm2m_msc_dev *xm2msc = (struct xm2m_msc_dev *)data;
1066 void __iomem *base = xm2msc->regs;
1069 status = xm2msc_readreg(base + XM2MSC_ISR);
1070 if (!(status & XM2MSC_ISR_DONE))
1073 xm2msc_writereg(base + XM2MSC_ISR, status & XM2MSC_ISR_DONE);
1075 xm2msc_stop(xm2msc);
1077 xm2msc->isr_wait = false;
1078 wake_up(&xm2msc->isr_finished);
1083 static int xm2msc_streamon(struct file *file, void *fh,
1084 enum v4l2_buf_type type)
1086 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1088 return v4l2_m2m_streamon(file, chan_ctx->m2m_ctx, type);
1091 static int xm2msc_streamoff(struct file *file, void *fh,
1092 enum v4l2_buf_type type)
1094 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1097 ret = v4l2_m2m_streamoff(file, chan_ctx->m2m_ctx, type);
1099 /* Check if any channel is still running */
1100 xm2msc_device_run(chan_ctx);
1104 static int xm2msc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
1106 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1108 return v4l2_m2m_qbuf(file, chan_ctx->m2m_ctx, buf);
1111 static int xm2msc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
1113 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1115 return v4l2_m2m_dqbuf(file, chan_ctx->m2m_ctx, buf);
1118 static int xm2msc_expbuf(struct file *file, void *fh,
1119 struct v4l2_exportbuffer *eb)
1121 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1123 return v4l2_m2m_expbuf(file, chan_ctx->m2m_ctx, eb);
1126 static int xm2msc_createbufs(struct file *file, void *fh,
1127 struct v4l2_create_buffers *cb)
1129 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1131 return v4l2_m2m_create_bufs(file, chan_ctx->m2m_ctx, cb);
1134 static int xm2msc_reqbufs(struct file *file, void *fh,
1135 struct v4l2_requestbuffers *reqbufs)
1137 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1139 return v4l2_m2m_reqbufs(file, chan_ctx->m2m_ctx, reqbufs);
1142 static int xm2msc_querybuf(struct file *file, void *fh,
1143 struct v4l2_buffer *buf)
1145 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1147 return v4l2_m2m_querybuf(file, chan_ctx->m2m_ctx, buf);
1151 xm2msc_cal_stride(unsigned int width, enum xm2msc_pix_fmt xfmt)
1153 unsigned int stride;
1155 /* Stride in Bytes = (Width × Bytes per Pixel); */
1156 /* TODO: The Width value must be a multiple of Pixels per Clock */
1158 case XILINX_M2MSC_FMT_RGBX8:
1159 case XILINX_M2MSC_FMT_YUVX8:
1160 case XILINX_M2MSC_FMT_RGBX10:
1161 case XILINX_M2MSC_FMT_YUVX10:
1162 case XILINX_M2MSC_FMT_BGRX8:
1165 case XILINX_M2MSC_FMT_YUYV8:
1166 case XILINX_M2MSC_FMT_UYVY8:
1169 case XILINX_M2MSC_FMT_Y_UV8:
1170 case XILINX_M2MSC_FMT_Y_UV8_420:
1171 case XILINX_M2MSC_FMT_Y8:
1174 case XILINX_M2MSC_FMT_RGB8:
1175 case XILINX_M2MSC_FMT_YUV8:
1176 case XILINX_M2MSC_FMT_BGR8:
1179 case XILINX_M2MSC_FMT_Y_UV10:
1180 case XILINX_M2MSC_FMT_Y_UV10_420:
1181 case XILINX_M2MSC_FMT_Y10:
1182 /* 4 bytes per 3 pixels */
1183 stride = DIV_ROUND_UP(width * 4, 3);
1193 vidioc_try_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
1195 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1196 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1197 struct xm2msc_q_data *q_data;
1198 struct vb2_queue *vq;
1201 if (pix->width < XM2MSC_MIN_WIDTH || pix->width > xm2msc->max_wd ||
1202 pix->height < XM2MSC_MIN_HEIGHT || pix->height > xm2msc->max_ht)
1203 dev_dbg(xm2msc->dev,
1204 "Wrong input parameters %d, wxh: %dx%d.\n",
1205 f->type, f->fmt.pix.width, f->fmt.pix.height);
1207 * V4L2 specification suggests the driver corrects the
1208 * format struct if any of the dimensions is unsupported
1210 if (pix->height < XM2MSC_MIN_HEIGHT)
1211 pix->height = XM2MSC_MIN_HEIGHT;
1212 else if (pix->height > xm2msc->max_ht)
1213 pix->height = xm2msc->max_ht;
1215 if (pix->width < XM2MSC_MIN_WIDTH)
1216 pix->width = XM2MSC_MIN_WIDTH;
1217 else if (pix->width > xm2msc->max_wd)
1218 pix->width = xm2msc->max_wd;
1220 vq = v4l2_m2m_get_vq(chan_ctx->m2m_ctx, f->type);
1224 q_data = get_q_data(chan_ctx, f->type);
1228 if (vb2_is_busy(vq)) {
1229 v4l2_err(&xm2msc->v4l2_dev,
1230 "%s queue busy\n", __func__);
1234 q_data->fmt = find_format(f);
1235 index = find_format_index(f);
1236 if (!q_data->fmt || index == ARRAY_SIZE(formats) ||
1237 !xm2msc_chk_fmt(xm2msc, index)) {
1238 v4l2_err(&xm2msc->v4l2_dev,
1239 "Couldn't set format type %d, wxh: %dx%d. ",
1240 f->type, f->fmt.pix.width, f->fmt.pix.height);
1241 v4l2_err(&xm2msc->v4l2_dev,
1242 "fmt: %d, field: %d\n",
1243 f->fmt.pix.pixelformat, f->fmt.pix.field);
1251 vidioc_s_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
1253 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1254 struct xm2msc_q_data *q_data = get_q_data(chan_ctx, f->type);
1257 q_data = get_q_data(chan_ctx, f->type);
1259 q_data->width = pix->width;
1260 q_data->height = pix->height;
1261 q_data->stride = xm2msc_cal_stride(pix->width,
1262 q_data->fmt->xm2msc_fmt);
1263 q_data->colorspace = pix->colorspace;
1264 q_data->field = pix->field;
1265 q_data->nplanes = q_data->fmt->num_planes;
1267 for (i = 0; i < q_data->nplanes; i++) {
1268 q_data->bytesperline[i] = q_data->stride;
1269 pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
1270 q_data->sizeimage[i] = q_data->stride * q_data->height;
1271 pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
1274 xm2msc_pr_q(chan_ctx->xm2msc_dev->dev, q_data,
1275 chan_ctx->num, f->type, __func__);
1280 static int xm2msc_try_fmt_vid_out(struct file *file, void *fh,
1281 struct v4l2_format *f)
1283 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1285 return vidioc_try_fmt(chan_ctx, f);
1288 static int xm2msc_try_fmt_vid_cap(struct file *file, void *fh,
1289 struct v4l2_format *f)
1291 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1293 return vidioc_try_fmt(chan_ctx, f);
1296 static int xm2msc_s_fmt_vid_cap(struct file *file, void *fh,
1297 struct v4l2_format *f)
1300 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1302 ret = xm2msc_try_fmt_vid_cap(file, fh, f);
1305 return vidioc_s_fmt(chan_ctx, f);
1308 static int xm2msc_s_fmt_vid_out(struct file *file, void *fh,
1309 struct v4l2_format *f)
1312 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1314 ret = xm2msc_try_fmt_vid_out(file, fh, f);
1318 return vidioc_s_fmt(chan_ctx, f);
1321 static int vidioc_g_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
1323 struct vb2_queue *vq;
1324 struct xm2msc_q_data *q_data;
1325 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1328 vq = v4l2_m2m_get_vq(chan_ctx->m2m_ctx, f->type);
1332 q_data = get_q_data(chan_ctx, f->type);
1336 pix->width = q_data->width;
1337 pix->height = q_data->height;
1338 pix->field = V4L2_FIELD_NONE;
1339 pix->pixelformat = q_data->fmt->fourcc;
1340 pix->colorspace = q_data->colorspace;
1341 pix->num_planes = q_data->nplanes;
1343 for (i = 0; i < pix->num_planes; i++) {
1344 pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
1345 pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
1351 static int xm2msc_g_fmt_vid_out(struct file *file, void *fh,
1352 struct v4l2_format *f)
1354 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1356 return vidioc_g_fmt(chan_ctx, f);
1359 static int xm2msc_g_fmt_vid_cap(struct file *file, void *fh,
1360 struct v4l2_format *f)
1362 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1364 return vidioc_g_fmt(chan_ctx, f);
1367 static int enum_fmt(struct xm2m_msc_dev *xm2msc, struct v4l2_fmtdesc *f)
1369 const struct xm2msc_fmt *fmt;
1370 unsigned int i, enabled = 0;
1372 for (i = 0; i < ARRAY_SIZE(formats); i++) {
1373 if (xm2msc_chk_fmt(xm2msc, i) && enabled++ == f->index)
1377 if (i == ARRAY_SIZE(formats))
1378 /* Format not found */
1383 strlcpy(f->description, fmt->name,
1384 sizeof(f->description));
1385 f->pixelformat = fmt->fourcc;
1390 static int xm2msc_enum_fmt_vid_cap(struct file *file, void *fh,
1391 struct v4l2_fmtdesc *f)
1393 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1395 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1398 return enum_fmt(chan_ctx->xm2msc_dev, f);
1401 static int xm2msc_enum_fmt_vid_out(struct file *file, void *fh,
1402 struct v4l2_fmtdesc *f)
1404 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1406 if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1409 return enum_fmt(chan_ctx->xm2msc_dev, f);
1412 static int xm2msc_querycap(struct file *file, void *fh,
1413 struct v4l2_capability *cap)
1415 strncpy(cap->driver, XM2MSC_DRIVER_NAME, sizeof(cap->driver) - 1);
1416 strncpy(cap->card, XM2MSC_DRIVER_NAME, sizeof(cap->card) - 1);
1417 snprintf(cap->bus_info, sizeof(cap->bus_info),
1418 "platform:%s", XM2MSC_DRIVER_NAME);
1420 * This is only a mem-to-mem video device. The STREAMING
1421 * device capability flags are left only for compatibility
1422 * and are scheduled for removal.
1424 cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE;
1425 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1429 static int xm2msc_queue_setup(struct vb2_queue *vq,
1430 unsigned int *nbuffers, unsigned int *nplanes,
1431 unsigned int sizes[], struct device *alloc_devs[])
1434 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vq);
1435 struct xm2msc_q_data *q_data;
1437 q_data = get_q_data(chan_ctx, vq->type);
1441 *nplanes = q_data->nplanes;
1443 for (i = 0; i < *nplanes; i++)
1444 sizes[i] = q_data->sizeimage[i];
1446 dev_dbg(chan_ctx->xm2msc_dev->dev, "get %d buffer(s) of size %d",
1447 *nbuffers, sizes[0]);
1448 if (q_data->nplanes == 2)
1449 dev_dbg(chan_ctx->xm2msc_dev->dev, " and %d\n", sizes[1]);
1454 static int xm2msc_buf_prepare(struct vb2_buffer *vb)
1456 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vb->vb2_queue);
1457 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1458 struct xm2msc_q_data *q_data;
1459 unsigned int i, num_planes;
1461 q_data = get_q_data(chan_ctx, vb->vb2_queue->type);
1464 num_planes = q_data->nplanes;
1466 for (i = 0; i < num_planes; i++) {
1467 if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
1468 v4l2_err(&xm2msc->v4l2_dev, "data will not fit into plane ");
1469 v4l2_err(&xm2msc->v4l2_dev, "(%lu < %lu)\n",
1470 vb2_plane_size(vb, i),
1471 (long)q_data->sizeimage[i]);
1476 for (i = 0; i < num_planes; i++)
1477 vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
1482 static void xm2msc_buf_queue(struct vb2_buffer *vb)
1484 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1485 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vb->vb2_queue);
1487 v4l2_m2m_buf_queue(chan_ctx->m2m_ctx, vbuf);
1490 static void xm2msc_return_all_buffers(struct xm2msc_chan_ctx *chan_ctx,
1491 struct vb2_queue *q,
1492 enum vb2_buffer_state state)
1494 struct vb2_v4l2_buffer *vb;
1495 unsigned long flags;
1498 if (V4L2_TYPE_IS_OUTPUT(q->type))
1499 vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
1501 vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
1504 spin_lock_irqsave(&chan_ctx->xm2msc_dev->lock, flags);
1505 v4l2_m2m_buf_done(vb, state);
1506 spin_unlock_irqrestore(&chan_ctx->xm2msc_dev->lock, flags);
1510 static int xm2msc_start_streaming(struct vb2_queue *q, unsigned int count)
1512 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(q);
1513 static struct xm2msc_q_data *q_data;
1516 if (V4L2_TYPE_IS_OUTPUT(q->type))
1517 xm2msc_set_chan_stream(chan_ctx, true, XM2MSC_CHAN_OUT);
1519 xm2msc_set_chan_stream(chan_ctx, true, XM2MSC_CHAN_CAP);
1521 xm2msc_set_chan_params(chan_ctx, q->type);
1523 if (xm2msc_chk_chan_stream(chan_ctx, XM2MSC_CHAN_CAP) &&
1524 xm2msc_chk_chan_stream(chan_ctx, XM2MSC_CHAN_OUT))
1525 xm2msc_set_chan_com_params(chan_ctx);
1527 type = V4L2_TYPE_IS_OUTPUT(q->type) ?
1528 V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
1529 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1530 q_data = get_q_data(chan_ctx, type);
1531 xm2msc_pr_q(chan_ctx->xm2msc_dev->dev, q_data, chan_ctx->num,
1533 xm2msc_pr_status(chan_ctx->xm2msc_dev, __func__);
1538 static void xm2msc_stop_streaming(struct vb2_queue *q)
1540 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(q);
1542 xm2msc_return_all_buffers(chan_ctx, q, VB2_BUF_STATE_ERROR);
1544 if (V4L2_TYPE_IS_OUTPUT(q->type))
1545 xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_OUT);
1547 xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_CAP);
1550 static const struct vb2_ops xm2msc_qops = {
1551 .queue_setup = xm2msc_queue_setup,
1552 .buf_prepare = xm2msc_buf_prepare,
1553 .buf_queue = xm2msc_buf_queue,
1554 .start_streaming = xm2msc_start_streaming,
1555 .stop_streaming = xm2msc_stop_streaming,
1558 static int queue_init(void *priv, struct vb2_queue *src_vq,
1559 struct vb2_queue *dst_vq)
1561 struct xm2msc_chan_ctx *chan_ctx = priv;
1562 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1565 memset(src_vq, 0, sizeof(*src_vq));
1566 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1567 src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
1568 src_vq->drv_priv = chan_ctx;
1569 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1570 src_vq->ops = &xm2msc_qops;
1571 src_vq->mem_ops = &vb2_dma_contig_memops;
1572 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1573 src_vq->lock = &xm2msc->dev_mutex;
1574 src_vq->dev = xm2msc->v4l2_dev.dev;
1576 ret = vb2_queue_init(src_vq);
1580 memset(dst_vq, 0, sizeof(*dst_vq));
1581 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1582 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
1583 dst_vq->drv_priv = chan_ctx;
1584 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1585 dst_vq->ops = &xm2msc_qops;
1586 dst_vq->mem_ops = &vb2_dma_contig_memops;
1587 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1588 dst_vq->lock = &xm2msc->dev_mutex;
1589 dst_vq->dev = xm2msc->v4l2_dev.dev;
1591 return vb2_queue_init(dst_vq);
1594 static const struct v4l2_ioctl_ops xm2msc_ioctl_ops = {
1595 .vidioc_querycap = xm2msc_querycap,
1597 .vidioc_enum_fmt_vid_cap_mplane = xm2msc_enum_fmt_vid_cap,
1598 .vidioc_g_fmt_vid_cap_mplane = xm2msc_g_fmt_vid_cap,
1599 .vidioc_try_fmt_vid_cap_mplane = xm2msc_try_fmt_vid_cap,
1600 .vidioc_s_fmt_vid_cap_mplane = xm2msc_s_fmt_vid_cap,
1602 .vidioc_enum_fmt_vid_out_mplane = xm2msc_enum_fmt_vid_out,
1603 .vidioc_g_fmt_vid_out_mplane = xm2msc_g_fmt_vid_out,
1604 .vidioc_try_fmt_vid_out_mplane = xm2msc_try_fmt_vid_out,
1605 .vidioc_s_fmt_vid_out_mplane = xm2msc_s_fmt_vid_out,
1607 .vidioc_reqbufs = xm2msc_reqbufs,
1608 .vidioc_querybuf = xm2msc_querybuf,
1609 .vidioc_expbuf = xm2msc_expbuf,
1610 .vidioc_create_bufs = xm2msc_createbufs,
1612 .vidioc_qbuf = xm2msc_qbuf,
1613 .vidioc_dqbuf = xm2msc_dqbuf,
1615 .vidioc_streamon = xm2msc_streamon,
1616 .vidioc_streamoff = xm2msc_streamoff,
1619 static int xm2msc_open(struct file *file)
1621 struct xm2m_msc_dev *xm2msc = video_drvdata(file);
1622 struct xm2msc_chan_ctx *chan_ctx = NULL;
1626 if (mutex_lock_interruptible(&xm2msc->dev_mutex))
1627 return -ERESTARTSYS;
1629 minor = iminor(file_inode(file));
1631 for (chan = 0; chan < xm2msc->max_chan; chan++) {
1632 chan_ctx = &xm2msc->xm2msc_chan[chan];
1634 if ((chan_ctx->status & CHAN_ATTACHED) &&
1635 chan_ctx->minor == minor)
1639 if (chan == xm2msc->max_chan) {
1640 v4l2_err(&xm2msc->v4l2_dev,
1641 "%s Chan not found with minor = %d\n",
1647 /* Already opened, do not allow same channel
1648 * to be open more then once
1650 if (chan_ctx->status & CHAN_OPENED) {
1651 v4l2_warn(&xm2msc->v4l2_dev,
1652 "%s Chan already opened for minor = %d\n",
1658 v4l2_fh_init(&chan_ctx->fh, &chan_ctx->vfd);
1659 file->private_data = &chan_ctx->fh;
1660 v4l2_fh_add(&chan_ctx->fh);
1662 chan_ctx->m2m_ctx = v4l2_m2m_ctx_init(chan_ctx->m2m_dev,
1663 chan_ctx, &queue_init);
1664 if (IS_ERR(chan_ctx->m2m_ctx)) {
1665 ret = PTR_ERR(chan_ctx->m2m_ctx);
1666 v4l2_err(&xm2msc->v4l2_dev,
1667 "%s Chan M2M CTX not creted for minor %d\n",
1672 chan_ctx->fh.m2m_ctx = chan_ctx->m2m_ctx;
1673 chan_ctx->status |= CHAN_OPENED;
1674 chan_ctx->xm2msc_dev = xm2msc;
1675 chan_ctx->frames = 0;
1676 xm2msc_set_chan(chan_ctx, true);
1678 v4l2_info(&xm2msc->v4l2_dev, "Channel %d instance created\n", chan);
1680 mutex_unlock(&xm2msc->dev_mutex);
1681 xm2msc_pr_chanctx(chan_ctx, __func__);
1682 xm2msc_pr_status(xm2msc, __func__);
1686 v4l2_fh_del(&chan_ctx->fh);
1687 v4l2_fh_exit(&chan_ctx->fh);
1689 mutex_unlock(&xm2msc->dev_mutex);
1690 xm2msc_pr_chanctx(chan_ctx, __func__);
1691 xm2msc_pr_status(xm2msc, __func__);
1695 static int xm2msc_release(struct file *file)
1697 struct xm2m_msc_dev *xm2msc = video_drvdata(file);
1698 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(file->private_data);
1700 if (mutex_lock_interruptible(&xm2msc->dev_mutex))
1701 return -ERESTARTSYS;
1703 v4l2_m2m_ctx_release(chan_ctx->m2m_ctx);
1704 v4l2_fh_del(&chan_ctx->fh);
1705 v4l2_fh_exit(&chan_ctx->fh);
1706 chan_ctx->status &= ~CHAN_OPENED;
1707 xm2msc_set_chan(chan_ctx, false);
1709 v4l2_info(&xm2msc->v4l2_dev, "Channel %d instance released\n",
1712 mutex_unlock(&xm2msc->dev_mutex);
1716 static unsigned int xm2msc_poll(struct file *file,
1717 struct poll_table_struct *wait)
1719 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(file->private_data);
1720 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1723 mutex_lock(&xm2msc->dev_mutex);
1724 ret = v4l2_m2m_poll(file, chan_ctx->m2m_ctx, wait);
1725 mutex_unlock(&xm2msc->dev_mutex);
1730 static int xm2msc_mmap(struct file *file, struct vm_area_struct *vma)
1732 struct xm2msc_chan_ctx *chan_ctx = file->private_data;
1733 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1736 mutex_lock(&xm2msc->dev_mutex);
1737 ret = v4l2_m2m_mmap(file, chan_ctx->m2m_ctx, vma);
1739 mutex_unlock(&xm2msc->dev_mutex);
1743 static const struct v4l2_file_operations xm2msc_fops = {
1744 .owner = THIS_MODULE,
1745 .open = xm2msc_open,
1746 .release = xm2msc_release,
1747 .poll = xm2msc_poll,
1748 .unlocked_ioctl = video_ioctl2,
1749 .mmap = xm2msc_mmap,
1752 static const struct video_device xm2msc_videodev = {
1753 .name = XM2MSC_DRIVER_NAME,
1754 .fops = &xm2msc_fops,
1755 .ioctl_ops = &xm2msc_ioctl_ops,
1757 .release = video_device_release_empty,
1758 .vfl_dir = VFL_DIR_M2M,
1761 static const struct v4l2_m2m_ops xm2msc_m2m_ops = {
1762 .device_run = xm2msc_device_run,
1763 .job_ready = xm2msc_job_ready,
1764 .job_abort = xm2msc_job_abort,
1767 static int xm2msc_parse_of(struct platform_device *pdev,
1768 struct xm2m_msc_dev *xm2msc)
1770 struct resource *res;
1771 struct device *dev = &pdev->dev;
1772 struct device_node *node = dev->of_node;
1774 const char *vid_fmts[ARRAY_SIZE(formats)];
1778 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1779 xm2msc->regs = devm_ioremap_resource(dev, res);
1780 if (IS_ERR((__force void *)xm2msc->regs))
1781 return PTR_ERR((__force const void *)xm2msc->regs);
1783 dev_dbg(dev, "IO Mem 0x%llx mapped at %p\n", res->start, xm2msc->regs);
1785 ret = of_property_read_u32(node, "xlnx,max-chan",
1790 if (xm2msc->max_chan < XM2MSC_MIN_CHAN ||
1791 xm2msc->max_chan > XM2MSC_MAX_CHAN) {
1793 "Invalid maximum scaler channels : %d",
1798 ret = of_property_read_u32(node, "xlnx,max-width",
1802 "missing xlnx,max-width prop\n");
1806 if (xm2msc->max_wd < XM2MSC_MIN_WIDTH ||
1807 xm2msc->max_wd > XM2MSC_MAX_WIDTH) {
1808 dev_err(dev, "Invalid width : %d",
1813 ret = of_property_read_u32(node, "xlnx,max-height",
1816 dev_err(dev, "missing xlnx,max-height prop\n");
1820 if (xm2msc->max_ht < XM2MSC_MIN_HEIGHT ||
1821 xm2msc->max_ht > XM2MSC_MAX_HEIGHT) {
1822 dev_err(dev, "Invalid height : %d",
1827 ret = of_property_read_u32(node, "xlnx,dma-addr-width",
1828 &xm2msc->dma_addr_size);
1829 if (ret || (xm2msc->dma_addr_size != 32 &&
1830 xm2msc->dma_addr_size != 64)) {
1831 dev_err(dev, "missing/invalid addr width dts prop\n");
1835 xm2msc->irq = irq_of_parse_and_map(node, 0);
1836 if (xm2msc->irq < 0) {
1837 dev_err(dev, "Unable to get IRQ");
1841 dev_dbg(dev, "Max Channel Supported = %d\n", xm2msc->max_chan);
1842 dev_dbg(dev, "DMA Addr width Supported = %d\n", xm2msc->dma_addr_size);
1843 dev_dbg(dev, "Max col/row Supported = (%d) / (%d)\n",
1844 xm2msc->max_wd, xm2msc->max_ht);
1845 /* read supported video formats and update internal table */
1846 hw_vid_fmt_cnt = of_property_count_strings(node, "xlnx,vid-formats");
1848 ret = of_property_read_string_array(node, "xlnx,vid-formats",
1849 vid_fmts, hw_vid_fmt_cnt);
1852 "Missing or invalid xlnx,vid-formats dts prop\n");
1856 dev_dbg(dev, "Supported format = ");
1857 for (i = 0; i < hw_vid_fmt_cnt; i++) {
1858 const char *vid_fmt_name = vid_fmts[i];
1860 for (j = 0; j < ARRAY_SIZE(formats); j++) {
1861 const char *dts_name = formats[j].name;
1863 if (strcmp(vid_fmt_name, dts_name))
1865 dev_dbg(dev, "%s ", dts_name);
1867 xm2msc_set_fmt(xm2msc, j);
1871 xm2msc->rst_gpio = devm_gpiod_get(dev, "reset",
1873 if (IS_ERR(xm2msc->rst_gpio)) {
1874 ret = PTR_ERR(xm2msc->rst_gpio);
1875 if (ret == -EPROBE_DEFER)
1877 "Probe deferred due to GPIO reset defer\n");
1880 "Unable to locate reset property in dt\n");
1887 static void xm2msc_unreg_video_n_m2m(struct xm2m_msc_dev *xm2msc)
1889 struct xm2msc_chan_ctx *chan_ctx;
1892 for (chan = 0; chan < xm2msc->max_chan; chan++) {
1893 chan_ctx = &xm2msc->xm2msc_chan[chan];
1894 if (!(chan_ctx->status & CHAN_ATTACHED))
1895 break; /*We register video sequentially */
1896 video_unregister_device(&chan_ctx->vfd);
1897 chan_ctx->status &= ~CHAN_ATTACHED;
1899 if (!IS_ERR(chan_ctx->m2m_dev))
1900 v4l2_m2m_release(chan_ctx->m2m_dev);
1904 static int xm2m_msc_probe(struct platform_device *pdev)
1907 struct xm2m_msc_dev *xm2msc;
1908 struct xm2msc_chan_ctx *chan_ctx;
1909 struct video_device *vfd;
1912 xm2msc = devm_kzalloc(&pdev->dev, sizeof(*xm2msc), GFP_KERNEL);
1916 ret = xm2msc_parse_of(pdev, xm2msc);
1920 xm2msc->dev = &pdev->dev;
1922 xm2msc_reset(xm2msc);
1924 spin_lock_init(&xm2msc->lock);
1926 ret = v4l2_device_register(&pdev->dev, &xm2msc->v4l2_dev);
1930 for (chan = 0; chan < xm2msc->max_chan; chan++) {
1931 chan_ctx = &xm2msc->xm2msc_chan[chan];
1933 vfd = &chan_ctx->vfd;
1934 *vfd = xm2msc_videodev;
1935 vfd->lock = &xm2msc->dev_mutex;
1936 vfd->v4l2_dev = &xm2msc->v4l2_dev;
1938 ret = video_register_device(vfd, VFL_TYPE_GRABBER, chan);
1940 v4l2_err(&xm2msc->v4l2_dev,
1941 "Failed to register video dev for chan %d\n",
1946 chan_ctx->status = CHAN_ATTACHED;
1948 video_set_drvdata(vfd, xm2msc);
1949 snprintf(vfd->name, sizeof(vfd->name),
1950 "%s", xm2msc_videodev.name);
1951 v4l2_info(&xm2msc->v4l2_dev,
1952 " Device registered as /dev/video%d\n", vfd->num);
1954 dev_dbg(xm2msc->dev, "%s Device registered as /dev/video%d\n",
1955 __func__, vfd->num);
1957 chan_ctx->m2m_dev = v4l2_m2m_init(&xm2msc_m2m_ops);
1958 if (IS_ERR(chan_ctx->m2m_dev)) {
1959 v4l2_err(&xm2msc->v4l2_dev,
1960 "Failed to init mem2mem device for chan %d\n",
1962 ret = PTR_ERR(chan_ctx->m2m_dev);
1965 chan_ctx->xm2msc_dev = xm2msc;
1966 chan_ctx->regs = xm2msc->regs + XM2MSC_CHAN_REGS_START(chan);
1967 if (chan > 4) /* TODO: To be fixed in HW */
1968 chan_ctx->regs += XM2MSC_RESERVED_AREA;
1969 chan_ctx->num = chan;
1970 chan_ctx->minor = vfd->minor;
1971 xm2msc_pr_chanctx(chan_ctx, __func__);
1974 mutex_init(&xm2msc->dev_mutex);
1975 mutex_init(&xm2msc->mutex);
1976 init_waitqueue_head(&xm2msc->isr_finished);
1978 ret = devm_request_irq(&pdev->dev, xm2msc->irq,
1979 xm2msc_isr, IRQF_SHARED,
1980 XM2MSC_DRIVER_NAME, xm2msc);
1982 dev_err(&pdev->dev, "Unable to register IRQ\n");
1986 platform_set_drvdata(pdev, xm2msc);
1991 xm2msc_unreg_video_n_m2m(xm2msc);
1992 v4l2_device_unregister(&xm2msc->v4l2_dev);
1996 static int xm2m_msc_remove(struct platform_device *pdev)
1998 struct xm2m_msc_dev *xm2msc = platform_get_drvdata(pdev);
2000 xm2msc_unreg_video_n_m2m(xm2msc);
2001 v4l2_device_unregister(&xm2msc->v4l2_dev);
2005 static const struct of_device_id xm2m_msc_of_id_table[] = {
2006 {.compatible = "xlnx,v-multi-scaler-v1.0"},
2010 MODULE_DEVICE_TABLE(of, xm2m_msc_of_id_table);
2012 static struct platform_driver xm2m_msc_driver = {
2014 .name = "xilinx-multiscaler",
2015 .of_match_table = xm2m_msc_of_id_table,
2017 .probe = xm2m_msc_probe,
2018 .remove = xm2m_msc_remove,
2021 module_platform_driver(xm2m_msc_driver);
2023 MODULE_DESCRIPTION("Xilinx M2M Multi-Scaler Driver");
2024 MODULE_LICENSE("GPL v2");
2025 MODULE_ALIAS("xlnx_m2m_multiscaler_dev");