1 // SPDX-License-Identifier: GPL-2.0
3 * Xilinx Memory-to-Memory Video Multi-Scaler IP
5 * Copyright (C) 2018 Xilinx, Inc.
7 * Author: Suresh Gupta <suresh.gupta@xilinx.com>
9 * Based on the virtual v4l2-mem2mem example device
11 * This driver adds support to control the Xilinx Video Multi
15 #include <linux/delay.h>
17 #include <linux/gpio/consumer.h>
18 #include <linux/interrupt.h>
20 #include <linux/module.h>
22 #include <linux/of_irq.h>
23 #include <linux/platform_device.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
27 #include <media/v4l2-device.h>
28 #include <media/v4l2-ioctl.h>
29 #include <media/v4l2-mem2mem.h>
30 #include <media/videobuf2-dma-contig.h>
32 #include "xilinx-multi-scaler-coeff.h"
34 /* 0x0000 : Control signals */
35 #define XM2MSC_AP_CTRL 0x0000
36 #define XM2MSC_AP_CTRL_START BIT(0)
37 #define XM2MSC_AP_CTRL_DONE BIT(1)
38 #define XM2MSC_AP_CTRL_IDEL BIT(2)
39 #define XM2MSC_AP_CTRL_READY BIT(3)
40 #define XM2MSC_AP_CTRL_AUTO_RESTART BIT(7)
42 /* 0x0004 : Global Interrupt Enable Register */
43 #define XM2MSC_GIE 0x0004
44 #define XM2MSC_GIE_EN BIT(0)
46 /* 0x0008 : IP Interrupt Enable Register (Read/Write) */
47 #define XM2MSC_IER 0x0008
48 #define XM2MSC_ISR 0x000c
49 #define XM2MSC_ISR_DONE BIT(0)
50 #define XM2MSC_ISR_READY BIT(1)
52 #define XM2MSC_NUM_OUTS 0x0010
54 #define XM2MSC_WIDTHIN 0x000
55 #define XM2MSC_WIDTHOUT 0x008
56 #define XM2MSC_HEIGHTIN 0x010
57 #define XM2MSC_HEIGHTOUT 0x018
58 #define XM2MSC_LINERATE 0x020
59 #define XM2MSC_PIXELRATE 0x028
60 #define XM2MSC_INPIXELFMT 0x030
61 #define XM2MSC_OUTPIXELFMT 0x038
62 #define XM2MSC_INSTRIDE 0x050
63 #define XM2MSC_OUTSTRIDE 0x058
64 #define XM2MSC_SRCIMGBUF0 0x060
65 #define XM2MSC_SRCIMGBUF1 0x070
66 #define XM2MSC_DSTIMGBUF0 0x090
67 #define XM2MSC_DSTIMGBUF1 0x0100
69 #define XM2MVSC_VFLTCOEFF_L 0x2000
70 #define XM2MVSC_VFLTCOEFF(x) (XM2MVSC_VFLTCOEFF_L + 0x2000 * (x))
71 #define XM2MVSC_HFLTCOEFF_L 0x2800
72 #define XM2MVSC_HFLTCOEFF(x) (XM2MVSC_HFLTCOEFF_L + 0x2000 * (x))
74 #define XM2MSC_CHAN_REGS_START(x) (0x100 + 0x200 * x)
76 /* GPIO RESET MACROS */
77 #define XM2MSC_RESET_ASSERT (0x1)
78 #define XM2MSC_RESET_DEASSERT (0x0)
80 #define XM2MSC_MIN_CHAN 1
81 #define XM2MSC_MAX_CHAN 8
83 #define XM2MSC_MAX_WIDTH (3840)
84 #define XM2MSC_MAX_HEIGHT (2160)
85 #define XM2MSC_MIN_WIDTH (64)
86 #define XM2MSC_MIN_HEIGHT (64)
87 #define XM2MSC_STEP_PRECISION (65536)
88 /* Mask definitions for Low 16 bits in a 32 bit number */
89 #define XM2MSC_MASK_LOW_16BITS GENMASK(15, 0)
90 #define XM2MSC_BITSHIFT_16 (16)
92 #define XM2MSC_DRIVER_NAME "xm2msc"
94 #define CHAN_ATTACHED BIT(0)
95 #define CHAN_OPENED BIT(1)
97 #define XM2MSC_CHAN_OUT 0
98 #define XM2MSC_CHAN_CAP 1
100 #define NUM_STREAM(_x) \
101 ({ typeof(_x) (x) = (_x); \
102 min(ffz(x->out_streamed_chan), \
103 ffz(x->cap_streamed_chan)); })
105 /* Xilinx Video Specific Color/Pixel Formats */
106 enum xm2msc_pix_fmt {
107 XILINX_M2MSC_FMT_RGBX8 = 10,
108 XILINX_M2MSC_FMT_YUVX8 = 11,
109 XILINX_M2MSC_FMT_YUYV8 = 12,
110 XILINX_M2MSC_FMT_RGBX10 = 15,
111 XILINX_M2MSC_FMT_YUVX10 = 16,
112 XILINX_M2MSC_FMT_Y_UV8 = 18,
113 XILINX_M2MSC_FMT_Y_UV8_420 = 19,
114 XILINX_M2MSC_FMT_RGB8 = 20,
115 XILINX_M2MSC_FMT_YUV8 = 21,
116 XILINX_M2MSC_FMT_Y_UV10 = 22,
117 XILINX_M2MSC_FMT_Y_UV10_420 = 23,
118 XILINX_M2MSC_FMT_Y8 = 24,
119 XILINX_M2MSC_FMT_Y10 = 25,
120 XILINX_M2MSC_FMT_BGRX8 = 27,
121 XILINX_M2MSC_FMT_UYVY8 = 28,
122 XILINX_M2MSC_FMT_BGR8 = 29,
126 * struct xm2msc_fmt - driver info for each of the supported video formats
127 * @name: human-readable device tree name for this entry
128 * @fourcc: standard format identifier
129 * @xm2msc_fmt: Xilinx Video Specific Color/Pixel Formats
130 * @num_planes: number of planes supported by format
135 enum xm2msc_pix_fmt xm2msc_fmt;
139 static const struct xm2msc_fmt formats[] = {
142 .fourcc = V4L2_PIX_FMT_BGRX32,
143 .xm2msc_fmt = XILINX_M2MSC_FMT_RGBX8,
148 .fourcc = V4L2_PIX_FMT_XVUY32,
149 .xm2msc_fmt = XILINX_M2MSC_FMT_YUVX8,
154 .fourcc = V4L2_PIX_FMT_YUYV,
155 .xm2msc_fmt = XILINX_M2MSC_FMT_YUYV8,
159 .name = "xbgr2101010",
160 .fourcc = V4L2_PIX_FMT_XBGR30,
161 .xm2msc_fmt = XILINX_M2MSC_FMT_RGBX10,
165 .name = "yuvx2101010",
166 .fourcc = V4L2_PIX_FMT_XVUY10,
167 .xm2msc_fmt = XILINX_M2MSC_FMT_YUVX10,
172 .fourcc = V4L2_PIX_FMT_NV16,
173 .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8,
178 .fourcc = V4L2_PIX_FMT_NV12,
179 .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8_420,
184 .fourcc = V4L2_PIX_FMT_RGB24,
185 .xm2msc_fmt = XILINX_M2MSC_FMT_RGB8,
190 .fourcc = V4L2_PIX_FMT_VUY24,
191 .xm2msc_fmt = XILINX_M2MSC_FMT_YUV8,
196 .fourcc = V4L2_PIX_FMT_XV20,
197 .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10,
202 .fourcc = V4L2_PIX_FMT_XV15,
203 .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10_420,
208 .fourcc = V4L2_PIX_FMT_GREY,
209 .xm2msc_fmt = XILINX_M2MSC_FMT_Y8,
214 .fourcc = V4L2_PIX_FMT_Y10,
215 .xm2msc_fmt = XILINX_M2MSC_FMT_Y10,
220 .fourcc = V4L2_PIX_FMT_XBGR32,
221 .xm2msc_fmt = XILINX_M2MSC_FMT_BGRX8,
226 .fourcc = V4L2_PIX_FMT_UYVY,
227 .xm2msc_fmt = XILINX_M2MSC_FMT_UYVY8,
232 .fourcc = V4L2_PIX_FMT_BGR24,
233 .xm2msc_fmt = XILINX_M2MSC_FMT_BGR8,
239 * struct xm2msc_q_data - Per-queue, driver-specific private data
240 * There is one source queue and one destination queue for each m2m context.
241 * @width: frame width
242 * @height: frame height
243 * @stride: bytes per lines
244 * @nplanes: Current number of planes
245 * @bytesperline: bytes per line per plane
246 * @sizeimage: image size per plane
247 * @colorspace: supported colorspace
248 * @field: supported field value
251 struct xm2msc_q_data {
255 unsigned int nplanes;
256 unsigned int bytesperline[2];
257 unsigned int sizeimage[2];
258 enum v4l2_colorspace colorspace;
259 enum v4l2_field field;
260 const struct xm2msc_fmt *fmt;
264 * struct xm2msc_chan_ctx - Scaler Channel Info, Per-Channel context
265 * @regs: IO mapped base address of the Channel
266 * @xm2msc_dev: Pointer to struct xm2m_msc_dev
267 * @num: HW Scaling Channel number
268 * @minor: Minor number of the video device
269 * @status: channel status, CHAN_ATTACHED or CHAN_OPENED
270 * @taps: number of hwtaps required for channel
272 * @fh: v4l2 file handle
273 * @m2m_dev: m2m device
274 * @m2m_ctx: memory to memory context structure
275 * @q_data: src & dst queue data
277 struct xm2msc_chan_ctx {
279 struct xm2m_msc_dev *xm2msc_dev;
285 struct video_device vfd;
287 struct v4l2_m2m_dev *m2m_dev;
288 struct v4l2_m2m_ctx *m2m_ctx;
290 struct xm2msc_q_data q_data[2];
294 * struct xm2m_msc_dev - Xilinx M2M Multi-scaler Device
295 * @dev: pointer to struct device instance used by the driver
296 * @regs: IO mapped base address of the HW/IP
297 * @irq: interrupt number
298 * @max_chan: maximum number of Scaling Channels
299 * @max_ht: maximum number of rows in a plane
300 * @max_wd: maximum number of column in a plane
301 * @supported_fmt: bitmap for all supported fmts by HW
302 * @dma_addr_size: Size of dma address pointer in IP (either 32 or 64)
303 * @rst_gpio: reset gpio handler
304 * @opened_chan: bitmap for all open channel
305 * @out_streamed_chan: bitmap for all out streamed channel
306 * @cap_streamed_chan: bitmap for all capture streamed channel
307 * @running_chan: currently running channels
308 * @device_busy: HW device is busy or not
309 * @isr_wait: flag to follow the ISR complete or not
310 * @isr_finished: Wait queue used to wait for IP to complete processing
311 * @v4l2_dev: main struct to for V4L2 device drivers
312 * @dev_mutex: lock for V4L2 device
313 * @mutex: lock for channel ctx
314 * @lock: lock used in IRQ
315 * @xm2msc_chan: arrey of channel context
316 * @hscaler_coeff: Array of filter coefficients for the Horizontal Scaler
317 * @vscaler_coeff: Array of filter coefficients for the Vertical Scaler
319 struct xm2m_msc_dev {
328 struct gpio_desc *rst_gpio;
331 u32 out_streamed_chan;
332 u32 cap_streamed_chan;
336 wait_queue_head_t isr_finished;
338 struct v4l2_device v4l2_dev;
340 struct mutex dev_mutex; /*the mutex for v4l2*/
341 struct mutex mutex; /*lock for bitmap reg*/
342 spinlock_t lock; /*IRQ lock*/
344 struct xm2msc_chan_ctx xm2msc_chan[XM2MSC_MAX_CHAN];
345 short hscaler_coeff[XSCALER_MAX_PHASES][XSCALER_MAX_TAPS];
346 short vscaler_coeff[XSCALER_MAX_PHASES][XSCALER_MAX_TAPS];
349 #define fh_to_chanctx(__fh) container_of(__fh, struct xm2msc_chan_ctx, fh)
351 static inline u32 xm2msc_readreg(const volatile void __iomem *addr)
353 return ioread32(addr);
356 static inline void xm2msc_write64reg(volatile void __iomem *addr, u64 value)
358 iowrite32(lower_32_bits(value), addr);
359 iowrite32(upper_32_bits(value), (void __iomem *)(addr + 4));
362 static inline void xm2msc_writereg(volatile void __iomem *addr, u32 value)
364 iowrite32(value, addr);
367 static struct xm2msc_q_data *get_q_data(struct xm2msc_chan_ctx *chan_ctx,
368 enum v4l2_buf_type type)
371 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
372 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
373 return &chan_ctx->q_data[XM2MSC_CHAN_OUT];
374 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
375 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
376 return &chan_ctx->q_data[XM2MSC_CHAN_CAP];
378 v4l2_err(&chan_ctx->xm2msc_dev->v4l2_dev,
379 "Not supported Q type %d\n", type);
384 static u32 find_format_index(struct v4l2_format *f)
386 const struct xm2msc_fmt *fmt;
389 for (i = 0; i < ARRAY_SIZE(formats); i++) {
391 if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
398 static const struct xm2msc_fmt *find_format(struct v4l2_format *f)
400 const struct xm2msc_fmt *fmt;
403 for (i = 0; i < ARRAY_SIZE(formats); i++) {
405 if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
409 if (i == ARRAY_SIZE(formats))
416 xv_hscaler_load_ext_coeff(struct xm2m_msc_dev *xm2msc,
417 const short *coeff, u32 ntaps)
419 unsigned int i, j, pad, offset;
420 const u32 nphases = XSCALER_MAX_PHASES;
422 /* Determine if coefficient needs padding (effective vs. max taps) */
423 pad = XSCALER_MAX_TAPS - ntaps;
426 memset(xm2msc->hscaler_coeff, 0, sizeof(xm2msc->hscaler_coeff));
428 /* Load coefficients into scaler coefficient table */
429 for (i = 0; i < nphases; i++) {
430 for (j = 0; j < ntaps; ++j)
431 xm2msc->hscaler_coeff[i][j + offset] =
432 coeff[i * ntaps + j];
436 static void xv_hscaler_set_coeff(struct xm2msc_chan_ctx *chan_ctx,
439 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
440 int val, offset, rd_indx;
442 u32 ntaps = chan_ctx->taps;
443 const u32 nphases = XSCALER_MAX_PHASES;
445 offset = (XSCALER_MAX_TAPS - ntaps) / 2;
446 for (i = 0; i < nphases; i++) {
447 for (j = 0; j < ntaps / 2; j++) {
448 rd_indx = j * 2 + offset;
449 val = (xm2msc->hscaler_coeff[i][rd_indx + 1] <<
450 XM2MSC_BITSHIFT_16) |
451 (xm2msc->hscaler_coeff[i][rd_indx] &
452 XM2MSC_MASK_LOW_16BITS);
453 xm2msc_writereg((xm2msc->regs + base_addr) +
454 ((i * ntaps / 2 + j) * 4), val);
460 xv_vscaler_load_ext_coeff(struct xm2m_msc_dev *xm2msc,
461 const short *coeff, const u32 ntaps)
465 const u32 nphases = XSCALER_MAX_PHASES;
467 /* Determine if coefficient needs padding (effective vs. max taps) */
468 pad = XSCALER_MAX_TAPS - ntaps;
469 offset = pad ? (pad >> 1) : 0;
471 /* Zero Entire Array */
472 memset(xm2msc->vscaler_coeff, 0, sizeof(xm2msc->vscaler_coeff));
474 /* Load User defined coefficients into scaler coefficient table */
475 for (i = 0; i < nphases; i++) {
476 for (j = 0; j < ntaps; ++j)
477 xm2msc->vscaler_coeff[i][j + offset] =
478 coeff[i * ntaps + j];
482 static void xv_vscaler_set_coeff(struct xm2msc_chan_ctx *chan_ctx,
485 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
486 u32 val, i, j, offset, rd_indx;
487 u32 ntaps = chan_ctx->taps;
488 const u32 nphases = XSCALER_MAX_PHASES;
490 offset = (XSCALER_MAX_TAPS - ntaps) / 2;
492 for (i = 0; i < nphases; i++) {
493 for (j = 0; j < ntaps / 2; j++) {
494 rd_indx = j * 2 + offset;
495 val = (xm2msc->vscaler_coeff[i][rd_indx + 1] <<
496 XM2MSC_BITSHIFT_16) |
497 (xm2msc->vscaler_coeff[i][rd_indx] &
498 XM2MSC_MASK_LOW_16BITS);
499 xm2msc_writereg((xm2msc->regs +
500 base_addr) + ((i * ntaps / 2 + j) * 4), val);
505 static void xm2mvsc_initialize_coeff_banks(struct xm2msc_chan_ctx *chan_ctx)
507 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
508 /* Bank 0 is init as 6 tap filter for 6, 8, 10 & 12 tap filters */
509 xv_hscaler_load_ext_coeff(xm2msc, &xhsc_coeff_taps6[0][0],
511 xv_hscaler_set_coeff(chan_ctx, XM2MVSC_HFLTCOEFF(chan_ctx->num));
512 xv_vscaler_load_ext_coeff(xm2msc, &xvsc_coeff_taps6[0][0],
514 xv_vscaler_set_coeff(chan_ctx, XM2MVSC_VFLTCOEFF(chan_ctx->num));
517 static void xm2msc_set_chan_params(struct xm2msc_chan_ctx *chan_ctx,
518 enum v4l2_buf_type type)
520 struct xm2msc_q_data *q_data = get_q_data(chan_ctx, type);
521 const struct xm2msc_fmt *fmt = q_data->fmt;
522 void __iomem *base = chan_ctx->regs;
524 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
525 xm2msc_writereg(base + XM2MSC_WIDTHIN, q_data->width);
526 xm2msc_writereg(base + XM2MSC_HEIGHTIN, q_data->height);
527 xm2msc_writereg(base + XM2MSC_INPIXELFMT, fmt->xm2msc_fmt);
528 xm2msc_writereg(base + XM2MSC_INSTRIDE, q_data->stride);
530 xm2msc_writereg(base + XM2MSC_WIDTHOUT, q_data->width);
531 xm2msc_writereg(base + XM2MSC_HEIGHTOUT, q_data->height);
532 xm2msc_writereg(base + XM2MSC_OUTPIXELFMT, fmt->xm2msc_fmt);
533 xm2msc_writereg(base + XM2MSC_OUTSTRIDE, q_data->stride);
537 static void xm2msc_set_chan_com_params(struct xm2msc_chan_ctx *chan_ctx)
539 void __iomem *base = chan_ctx->regs;
540 struct xm2msc_q_data *out_q_data = &chan_ctx->q_data[XM2MSC_CHAN_OUT];
541 struct xm2msc_q_data *cap_q_data = &chan_ctx->q_data[XM2MSC_CHAN_CAP];
545 chan_ctx->taps = XSCALER_TAPS_6; /* Currently only 6 tabs supported */
546 xm2mvsc_initialize_coeff_banks(chan_ctx);
548 pixel_rate = (out_q_data->width * XM2MSC_STEP_PRECISION) /
550 line_rate = (out_q_data->height * XM2MSC_STEP_PRECISION) /
553 xm2msc_writereg(base + XM2MSC_PIXELRATE, pixel_rate);
554 xm2msc_writereg(base + XM2MSC_LINERATE, line_rate);
557 static void xm2msc_program_allchan(struct xm2m_msc_dev *xm2msc)
561 for (chan = 0; chan < xm2msc->running_chan; chan++) {
562 struct xm2msc_chan_ctx *chan_ctx;
564 chan_ctx = &xm2msc->xm2msc_chan[chan];
566 xm2msc_set_chan_params(chan_ctx,
567 V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
568 xm2msc_set_chan_params(chan_ctx,
569 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
570 xm2msc_set_chan_com_params(chan_ctx);
575 xm2msc_pr_q(struct device *dev, struct xm2msc_q_data *q, int chan,
576 int type, const char *fun_name)
579 const struct xm2msc_fmt *fmt = q->fmt;
581 if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
582 dev_dbg(dev, "\n\nOUTPUT Q (%d) Context from [[ %s ]]",
585 dev_dbg(dev, "\n\nCAPTURE Q (%d) Context from [[ %s ]]",
588 dev_dbg(dev, "width height stride clrspace field planes\n");
589 dev_dbg(dev, " %d %d %d %d %d %d\n",
590 q->width, q->height, q->stride,
591 q->colorspace, q->field, q->nplanes);
593 for (i = 0; i < q->nplanes; i++) {
594 dev_dbg(dev, "[plane %d ] bytesperline sizeimage\n", i);
595 dev_dbg(dev, " %d %d\n",
596 q->bytesperline[i], q->sizeimage[i]);
599 dev_dbg(dev, "fmt_name 4cc xlnx-fmt\n");
600 dev_dbg(dev, "%s %d %d\n",
601 fmt->name, fmt->fourcc, fmt->xm2msc_fmt);
602 dev_dbg(dev, "\n\n");
606 xm2msc_pr_status(struct xm2m_msc_dev *xm2msc,
607 const char *fun_name)
609 struct device *dev = xm2msc->dev;
611 dev_dbg(dev, "Status in %s\n", fun_name);
612 dev_dbg(dev, "opened_chan out_streamed_chan cap_streamed_chan\n");
613 dev_dbg(dev, "0x%x 0x%x 0x%x\n",
614 xm2msc->opened_chan, xm2msc->out_streamed_chan,
615 xm2msc->cap_streamed_chan);
616 dev_dbg(dev, "\n\n");
620 xm2msc_pr_chanctx(struct xm2msc_chan_ctx *ctx, const char *fun_name)
622 struct device *dev = ctx->xm2msc_dev->dev;
624 dev_dbg(dev, "\n\n----- [[ %s ]]: Channel %d (0x%p) context -----\n",
625 fun_name, ctx->num, ctx);
626 dev_dbg(dev, "minor = %d, taps = %d\n", ctx->minor, ctx->taps);
627 dev_dbg(dev, "reg mapped at %p\n", ctx->regs);
628 dev_dbg(dev, "xm2msc \tm2m_dev \tm2m_ctx\n");
629 dev_dbg(dev, "%p \t%p \t%p\n", ctx->xm2msc_dev,
630 ctx->m2m_dev, ctx->m2m_ctx);
632 if (ctx->status & CHAN_OPENED)
633 dev_dbg(dev, "Opened ");
634 if (ctx->status & CHAN_ATTACHED)
635 dev_dbg(dev, "and attached");
637 dev_dbg(dev, "-----------------------------------\n");
638 dev_dbg(dev, "\n\n");
642 xm2msc_pr_screg(struct device *dev, const volatile void __iomem *base)
644 dev_dbg(dev, "Ctr, GIE, IE, IS OUT\n");
645 dev_dbg(dev, "0x%x 0x%x 0x%x 0x%x 0x%x\n",
646 xm2msc_readreg(base + XM2MSC_AP_CTRL),
647 xm2msc_readreg(base + XM2MSC_GIE),
648 xm2msc_readreg(base + XM2MSC_IER),
649 xm2msc_readreg(base + XM2MSC_ISR),
650 xm2msc_readreg(base + XM2MSC_NUM_OUTS));
654 xm2msc_pr_chanreg(struct device *dev, const volatile void __iomem *base)
656 dev_dbg(dev, "WIN HIN INPIXELFMT INSTRIDE SRCB0L/H SRCB1L/H\n");
657 dev_dbg(dev, "%d %d %d %d 0x%x/0x%x 0x%x/0x%x\n",
658 xm2msc_readreg(base + XM2MSC_WIDTHIN),
659 xm2msc_readreg(base + XM2MSC_HEIGHTIN),
660 xm2msc_readreg(base + XM2MSC_INPIXELFMT),
661 xm2msc_readreg(base + XM2MSC_INSTRIDE),
662 xm2msc_readreg(base + XM2MSC_SRCIMGBUF0),
663 xm2msc_readreg(base + XM2MSC_SRCIMGBUF0 + 4),
664 xm2msc_readreg(base + XM2MSC_SRCIMGBUF1),
665 xm2msc_readreg(base + XM2MSC_SRCIMGBUF1 + 4));
666 dev_dbg(dev, "WOUT HOUT OUTPIXELFMT OUTSTRIDE DBUF0L/H DBUF1L/H\n");
667 dev_dbg(dev, "%d %d %d %d 0x%x/0x%x 0x%x/0x%x\n",
668 xm2msc_readreg(base + XM2MSC_WIDTHOUT),
669 xm2msc_readreg(base + XM2MSC_HEIGHTOUT),
670 xm2msc_readreg(base + XM2MSC_OUTPIXELFMT),
671 xm2msc_readreg(base + XM2MSC_OUTSTRIDE),
672 xm2msc_readreg(base + XM2MSC_DSTIMGBUF0),
673 xm2msc_readreg(base + XM2MSC_DSTIMGBUF0 + 4),
674 xm2msc_readreg(base + XM2MSC_DSTIMGBUF1),
675 xm2msc_readreg(base + XM2MSC_DSTIMGBUF1 + 4));
677 dev_dbg(dev, "LINERATE PIXELRATE\n");
678 dev_dbg(dev, "0x%x 0x%x\n",
679 xm2msc_readreg(base + XM2MSC_LINERATE),
680 xm2msc_readreg(base + XM2MSC_PIXELRATE));
684 xm2msc_pr_allchanreg(struct xm2m_msc_dev *xm2msc)
687 struct xm2msc_chan_ctx *chan_ctx;
688 struct device *dev = xm2msc->dev;
690 xm2msc_pr_screg(xm2msc->dev, xm2msc->regs);
692 for (i = 0; i < xm2msc->running_chan; i++) {
693 chan_ctx = &xm2msc->xm2msc_chan[i];
694 dev_dbg(dev, "Regs val for channel %d\n", i);
695 dev_dbg(dev, "______________________________________________\n");
696 xm2msc_pr_chanreg(dev, chan_ctx->regs);
697 dev_dbg(dev, "______________________________________________\n");
701 static inline bool xm2msc_testbit(int num, u32 *addr)
703 return (*addr & BIT(num));
706 static inline void xm2msc_setbit(int num, u32 *addr)
711 static inline void xm2msc_clrbit(int num, u32 *addr)
716 static void xm2msc_stop(struct xm2m_msc_dev *xm2msc)
718 void __iomem *base = xm2msc->regs;
719 u32 data = xm2msc_readreg(base + XM2MSC_AP_CTRL);
721 data &= ~XM2MSC_AP_CTRL_START;
722 xm2msc_writereg(base + XM2MSC_AP_CTRL, data);
725 static void xm2msc_start(struct xm2m_msc_dev *xm2msc)
727 void __iomem *base = xm2msc->regs;
728 u32 data = xm2msc_readreg(base + XM2MSC_AP_CTRL);
730 data |= XM2MSC_AP_CTRL_START;
731 xm2msc_writereg(base + XM2MSC_AP_CTRL, data);
734 static void xm2msc_set_chan(struct xm2msc_chan_ctx *ctx, bool state)
736 mutex_lock(&ctx->xm2msc_dev->mutex);
738 xm2msc_setbit(ctx->num, &ctx->xm2msc_dev->opened_chan);
740 xm2msc_clrbit(ctx->num, &ctx->xm2msc_dev->opened_chan);
741 mutex_unlock(&ctx->xm2msc_dev->mutex);
745 xm2msc_set_chan_stream(struct xm2msc_chan_ctx *ctx, bool state, int type)
749 if (type == XM2MSC_CHAN_OUT)
750 ptr = &ctx->xm2msc_dev->cap_streamed_chan;
752 ptr = &ctx->xm2msc_dev->out_streamed_chan;
754 spin_lock(&ctx->xm2msc_dev->lock);
756 xm2msc_setbit(ctx->num, ptr);
758 xm2msc_clrbit(ctx->num, ptr);
760 spin_unlock(&ctx->xm2msc_dev->lock);
764 xm2msc_chk_chan_stream(struct xm2msc_chan_ctx *ctx, int type)
769 if (type == XM2MSC_CHAN_OUT)
770 ptr = &ctx->xm2msc_dev->cap_streamed_chan;
772 ptr = &ctx->xm2msc_dev->out_streamed_chan;
774 mutex_lock(&ctx->xm2msc_dev->mutex);
775 ret = xm2msc_testbit(ctx->num, ptr);
776 mutex_unlock(&ctx->xm2msc_dev->mutex);
781 static void xm2msc_set_fmt(struct xm2m_msc_dev *xm2msc, u32 index)
783 xm2msc_setbit(index, &xm2msc->supported_fmt);
786 static int xm2msc_chk_fmt(struct xm2m_msc_dev *xm2msc, u32 index)
788 return xm2msc_testbit(index, &xm2msc->supported_fmt);
791 static void xm2msc_reset(struct xm2m_msc_dev *xm2msc)
793 gpiod_set_value_cansleep(xm2msc->rst_gpio, XM2MSC_RESET_ASSERT);
794 gpiod_set_value_cansleep(xm2msc->rst_gpio, XM2MSC_RESET_DEASSERT);
800 static int xm2msc_job_ready(void *priv)
802 struct xm2msc_chan_ctx *chan_ctx = priv;
804 if ((v4l2_m2m_num_src_bufs_ready(chan_ctx->m2m_ctx) > 0) &&
805 (v4l2_m2m_num_dst_bufs_ready(chan_ctx->m2m_ctx) > 0))
810 static bool xm2msc_alljob_ready(struct xm2m_msc_dev *xm2msc)
812 struct xm2msc_chan_ctx *chan_ctx;
815 for (chan = 0; chan < xm2msc->running_chan; chan++) {
816 chan_ctx = &xm2msc->xm2msc_chan[chan];
818 if (!xm2msc_job_ready((void *)chan_ctx)) {
819 dev_info(xm2msc->dev, "chan %d not ready\n",
828 static void xm2msc_chan_abort_bufs(struct xm2msc_chan_ctx *chan_ctx)
830 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
831 struct vb2_v4l2_buffer *dst_vb, *src_vb;
833 spin_lock(&xm2msc->lock);
834 dev_dbg(xm2msc->dev, "aborting all buffers\n");
836 while (v4l2_m2m_num_src_bufs_ready(chan_ctx->m2m_ctx) > 0) {
837 src_vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
838 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
841 while (v4l2_m2m_num_dst_bufs_ready(chan_ctx->m2m_ctx) > 0) {
842 dst_vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
843 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
846 v4l2_m2m_job_finish(chan_ctx->m2m_dev, chan_ctx->m2m_ctx);
847 spin_unlock(&xm2msc->lock);
850 static void xm2msc_job_abort(void *priv)
852 struct xm2msc_chan_ctx *chan_ctx = priv;
854 xm2msc_chan_abort_bufs(chan_ctx);
857 * Stream off the channel as job_abort may not always
858 * be called after streamoff
860 xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_OUT);
861 xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_CAP);
864 static int xm2msc_set_bufaddr(struct xm2m_msc_dev *xm2msc)
867 struct xm2msc_chan_ctx *chan_ctx;
868 struct vb2_v4l2_buffer *src_vb, *dst_vb;
870 dma_addr_t src_luma, dst_luma;
871 dma_addr_t src_croma, dst_croma;
873 if (!xm2msc_alljob_ready(xm2msc))
876 for (chan = 0; chan < xm2msc->running_chan; chan++) {
877 chan_ctx = &xm2msc->xm2msc_chan[chan];
878 base = chan_ctx->regs;
880 src_vb = v4l2_m2m_next_src_buf(chan_ctx->m2m_ctx);
881 dst_vb = v4l2_m2m_next_dst_buf(chan_ctx->m2m_ctx);
883 if (!src_vb || !dst_vb) {
884 v4l2_err(&xm2msc->v4l2_dev, "buffer not found chan = %d\n",
889 src_luma = vb2_dma_contig_plane_dma_addr(&src_vb->vb2_buf, 0);
890 dst_luma = vb2_dma_contig_plane_dma_addr(&dst_vb->vb2_buf, 0);
892 if (chan_ctx->q_data[XM2MSC_CHAN_OUT].nplanes == 2)
894 vb2_dma_contig_plane_dma_addr(&src_vb->vb2_buf, 1);
898 if (chan_ctx->q_data[XM2MSC_CHAN_CAP].nplanes == 2)
900 vb2_dma_contig_plane_dma_addr(&dst_vb->vb2_buf, 1);
904 if (xm2msc->dma_addr_size == 64 &&
905 sizeof(dma_addr_t) == sizeof(u64)) {
906 xm2msc_write64reg(base + XM2MSC_SRCIMGBUF0, src_luma);
907 xm2msc_write64reg(base + XM2MSC_SRCIMGBUF1, src_croma);
908 xm2msc_write64reg(base + XM2MSC_DSTIMGBUF0, dst_luma);
909 xm2msc_write64reg(base + XM2MSC_DSTIMGBUF1, dst_croma);
911 xm2msc_writereg(base + XM2MSC_SRCIMGBUF0, src_luma);
912 xm2msc_writereg(base + XM2MSC_SRCIMGBUF1, src_croma);
913 xm2msc_writereg(base + XM2MSC_DSTIMGBUF0, dst_luma);
914 xm2msc_writereg(base + XM2MSC_DSTIMGBUF1, dst_croma);
920 static void xm2msc_job_finish(struct xm2m_msc_dev *xm2msc)
924 for (chan = 0; chan < xm2msc->running_chan; chan++) {
925 struct xm2msc_chan_ctx *chan_ctx;
927 chan_ctx = &xm2msc->xm2msc_chan[chan];
928 v4l2_m2m_job_finish(chan_ctx->m2m_dev, chan_ctx->m2m_ctx);
932 static void xm2msc_job_done(struct xm2m_msc_dev *xm2msc)
936 for (chan = 0; chan < xm2msc->running_chan; chan++) {
937 struct xm2msc_chan_ctx *chan_ctx;
938 struct vb2_v4l2_buffer *src_vb, *dst_vb;
941 chan_ctx = &xm2msc->xm2msc_chan[chan];
943 src_vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
944 dst_vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
946 if (src_vb && dst_vb) {
947 dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
948 dst_vb->timecode = src_vb->timecode;
949 dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
951 src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
953 spin_lock_irqsave(&xm2msc->lock, flags);
954 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
955 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
956 spin_unlock_irqrestore(&xm2msc->lock, flags);
961 static void xm2msc_device_run(void *priv)
963 struct xm2msc_chan_ctx *chan_ctx = priv;
964 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
965 void __iomem *base = xm2msc->regs;
969 spin_lock_irqsave(&xm2msc->lock, flags);
970 if (xm2msc->device_busy) {
971 spin_unlock_irqrestore(&xm2msc->lock, flags);
974 xm2msc->device_busy = true;
976 if (xm2msc->running_chan != NUM_STREAM(xm2msc)) {
977 dev_dbg(xm2msc->dev, "Running chan was %d\n",
978 xm2msc->running_chan);
979 xm2msc->running_chan = NUM_STREAM(xm2msc);
981 /* IP need reset for updating of XM2MSC_NUM_OUT */
982 xm2msc_reset(xm2msc);
983 xm2msc_writereg(base + XM2MSC_NUM_OUTS, xm2msc->running_chan);
984 xm2msc_program_allchan(xm2msc);
986 spin_unlock_irqrestore(&xm2msc->lock, flags);
988 dev_dbg(xm2msc->dev, "Running chan = %d\n", xm2msc->running_chan);
989 if (!xm2msc->running_chan) {
990 xm2msc->device_busy = false;
994 ret = xm2msc_set_bufaddr(xm2msc);
997 * All channel does not have buffer
998 * Currently we do not handle the removal of any Intermediate
999 * channel while streaming is going on
1001 if (xm2msc->out_streamed_chan || xm2msc->cap_streamed_chan)
1002 dev_err(xm2msc->dev,
1003 "Buffer not available, streaming chan 0x%x\n",
1004 xm2msc->cap_streamed_chan);
1006 xm2msc->device_busy = false;
1010 xm2msc_writereg(base + XM2MSC_GIE, XM2MSC_GIE_EN);
1011 xm2msc_writereg(base + XM2MSC_IER, XM2MSC_ISR_DONE);
1013 xm2msc_pr_status(xm2msc, __func__);
1014 xm2msc_pr_screg(xm2msc->dev, base);
1015 xm2msc_pr_allchanreg(xm2msc);
1017 xm2msc_start(xm2msc);
1019 xm2msc->isr_wait = true;
1020 wait_event(xm2msc->isr_finished, !xm2msc->isr_wait);
1022 xm2msc_job_done(xm2msc);
1024 xm2msc->device_busy = false;
1026 if (xm2msc_alljob_ready(xm2msc))
1027 xm2msc_device_run(xm2msc->xm2msc_chan);
1029 xm2msc_job_finish(xm2msc);
1032 static irqreturn_t xm2msc_isr(int irq, void *data)
1034 struct xm2m_msc_dev *xm2msc = (struct xm2m_msc_dev *)data;
1035 void __iomem *base = xm2msc->regs;
1038 status = xm2msc_readreg(base + XM2MSC_ISR);
1039 if (!(status & XM2MSC_ISR_DONE))
1042 xm2msc_writereg(base + XM2MSC_ISR, status & XM2MSC_ISR_DONE);
1044 xm2msc_stop(xm2msc);
1046 xm2msc->isr_wait = false;
1047 wake_up(&xm2msc->isr_finished);
1052 static int xm2msc_streamon(struct file *file, void *fh,
1053 enum v4l2_buf_type type)
1055 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1057 return v4l2_m2m_streamon(file, chan_ctx->m2m_ctx, type);
1060 static int xm2msc_streamoff(struct file *file, void *fh,
1061 enum v4l2_buf_type type)
1063 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1065 return v4l2_m2m_streamoff(file, chan_ctx->m2m_ctx, type);
1068 static int xm2msc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
1070 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1072 return v4l2_m2m_qbuf(file, chan_ctx->m2m_ctx, buf);
1075 static int xm2msc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
1077 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1079 return v4l2_m2m_dqbuf(file, chan_ctx->m2m_ctx, buf);
1082 static int xm2msc_expbuf(struct file *file, void *fh,
1083 struct v4l2_exportbuffer *eb)
1085 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1087 return v4l2_m2m_expbuf(file, chan_ctx->m2m_ctx, eb);
1090 static int xm2msc_createbufs(struct file *file, void *fh,
1091 struct v4l2_create_buffers *cb)
1093 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1095 return v4l2_m2m_create_bufs(file, chan_ctx->m2m_ctx, cb);
1098 static int xm2msc_reqbufs(struct file *file, void *fh,
1099 struct v4l2_requestbuffers *reqbufs)
1101 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1103 return v4l2_m2m_reqbufs(file, chan_ctx->m2m_ctx, reqbufs);
1106 static int xm2msc_querybuf(struct file *file, void *fh,
1107 struct v4l2_buffer *buf)
1109 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1111 return v4l2_m2m_querybuf(file, chan_ctx->m2m_ctx, buf);
1115 xm2msc_cal_stride(unsigned int width, enum xm2msc_pix_fmt xfmt)
1117 unsigned int stride;
1119 /* Stride in Bytes = (Width × Bytes per Pixel); */
1120 /* TODO: The Width value must be a multiple of Pixels per Clock */
1122 case XILINX_M2MSC_FMT_RGBX8:
1123 case XILINX_M2MSC_FMT_YUVX8:
1124 case XILINX_M2MSC_FMT_RGBX10:
1125 case XILINX_M2MSC_FMT_YUVX10:
1126 case XILINX_M2MSC_FMT_BGRX8:
1129 case XILINX_M2MSC_FMT_YUYV8:
1130 case XILINX_M2MSC_FMT_UYVY8:
1133 case XILINX_M2MSC_FMT_Y_UV8:
1134 case XILINX_M2MSC_FMT_Y_UV8_420:
1135 case XILINX_M2MSC_FMT_Y8:
1138 case XILINX_M2MSC_FMT_RGB8:
1139 case XILINX_M2MSC_FMT_YUV8:
1140 case XILINX_M2MSC_FMT_BGR8:
1143 case XILINX_M2MSC_FMT_Y_UV10:
1144 case XILINX_M2MSC_FMT_Y_UV10_420:
1145 case XILINX_M2MSC_FMT_Y10:
1146 /* 4 bytes per 3 pixels */
1147 stride = DIV_ROUND_UP(width * 4, 3);
1157 vidioc_try_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
1159 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1160 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1161 struct xm2msc_q_data *q_data;
1162 struct vb2_queue *vq;
1165 if (pix->width < XM2MSC_MIN_WIDTH || pix->width > xm2msc->max_wd ||
1166 pix->height < XM2MSC_MIN_HEIGHT || pix->height > xm2msc->max_ht)
1167 dev_dbg(xm2msc->dev,
1168 "Wrong input parameters %d, wxh: %dx%d.\n",
1169 f->type, f->fmt.pix.width, f->fmt.pix.height);
1171 * V4L2 specification suggests the driver corrects the
1172 * format struct if any of the dimensions is unsupported
1174 if (pix->height < XM2MSC_MIN_HEIGHT)
1175 pix->height = XM2MSC_MIN_HEIGHT;
1176 else if (pix->height > xm2msc->max_ht)
1177 pix->height = xm2msc->max_ht;
1179 if (pix->width < XM2MSC_MIN_WIDTH)
1180 pix->width = XM2MSC_MIN_WIDTH;
1181 else if (pix->width > xm2msc->max_wd)
1182 pix->width = xm2msc->max_wd;
1184 vq = v4l2_m2m_get_vq(chan_ctx->m2m_ctx, f->type);
1188 q_data = get_q_data(chan_ctx, f->type);
1192 if (vb2_is_busy(vq)) {
1193 v4l2_err(&xm2msc->v4l2_dev,
1194 "%s queue busy\n", __func__);
1198 q_data->fmt = find_format(f);
1199 index = find_format_index(f);
1200 if (!q_data->fmt || index == ARRAY_SIZE(formats) ||
1201 !xm2msc_chk_fmt(xm2msc, index)) {
1202 v4l2_err(&xm2msc->v4l2_dev,
1203 "Couldn't set format type %d, wxh: %dx%d. ",
1204 f->type, f->fmt.pix.width, f->fmt.pix.height);
1205 v4l2_err(&xm2msc->v4l2_dev,
1206 "fmt: %d, field: %d\n",
1207 f->fmt.pix.pixelformat, f->fmt.pix.field);
1215 vidioc_s_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
1217 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1218 struct xm2msc_q_data *q_data = get_q_data(chan_ctx, f->type);
1221 q_data = get_q_data(chan_ctx, f->type);
1223 q_data->width = pix->width;
1224 q_data->height = pix->height;
1225 q_data->stride = xm2msc_cal_stride(pix->width,
1226 q_data->fmt->xm2msc_fmt);
1227 q_data->colorspace = pix->colorspace;
1228 q_data->field = pix->field;
1229 q_data->nplanes = q_data->fmt->num_planes;
1231 for (i = 0; i < q_data->nplanes; i++) {
1232 q_data->bytesperline[i] = q_data->stride;
1233 pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
1234 q_data->sizeimage[i] = q_data->stride * q_data->height;
1235 pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
1238 xm2msc_pr_q(chan_ctx->xm2msc_dev->dev, q_data,
1239 chan_ctx->num, f->type, __func__);
1244 static int xm2msc_try_fmt_vid_out(struct file *file, void *fh,
1245 struct v4l2_format *f)
1247 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1249 return vidioc_try_fmt(chan_ctx, f);
1252 static int xm2msc_try_fmt_vid_cap(struct file *file, void *fh,
1253 struct v4l2_format *f)
1255 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1257 return vidioc_try_fmt(chan_ctx, f);
1260 static int xm2msc_s_fmt_vid_cap(struct file *file, void *fh,
1261 struct v4l2_format *f)
1264 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1266 ret = xm2msc_try_fmt_vid_cap(file, fh, f);
1269 return vidioc_s_fmt(chan_ctx, f);
1272 static int xm2msc_s_fmt_vid_out(struct file *file, void *fh,
1273 struct v4l2_format *f)
1276 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1278 ret = xm2msc_try_fmt_vid_out(file, fh, f);
1282 return vidioc_s_fmt(chan_ctx, f);
1285 static int vidioc_g_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
1287 struct vb2_queue *vq;
1288 struct xm2msc_q_data *q_data;
1289 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1292 vq = v4l2_m2m_get_vq(chan_ctx->m2m_ctx, f->type);
1296 q_data = get_q_data(chan_ctx, f->type);
1300 pix->width = q_data->width;
1301 pix->height = q_data->height;
1302 pix->field = V4L2_FIELD_NONE;
1303 pix->pixelformat = q_data->fmt->fourcc;
1304 pix->colorspace = q_data->colorspace;
1305 pix->num_planes = q_data->nplanes;
1307 for (i = 0; i < pix->num_planes; i++) {
1308 pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
1309 pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
1315 static int xm2msc_g_fmt_vid_out(struct file *file, void *fh,
1316 struct v4l2_format *f)
1318 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1320 return vidioc_g_fmt(chan_ctx, f);
1323 static int xm2msc_g_fmt_vid_cap(struct file *file, void *fh,
1324 struct v4l2_format *f)
1326 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1328 return vidioc_g_fmt(chan_ctx, f);
1331 static int enum_fmt(struct xm2m_msc_dev *xm2msc, struct v4l2_fmtdesc *f)
1333 const struct xm2msc_fmt *fmt;
1334 unsigned int i, enabled = 0;
1336 for (i = 0; i < ARRAY_SIZE(formats); i++) {
1337 if (xm2msc_chk_fmt(xm2msc, i) && enabled++ == f->index)
1341 if (i == ARRAY_SIZE(formats))
1342 /* Format not found */
1347 strlcpy(f->description, fmt->name,
1348 sizeof(f->description));
1349 f->pixelformat = fmt->fourcc;
1354 static int xm2msc_enum_fmt_vid_cap(struct file *file, void *fh,
1355 struct v4l2_fmtdesc *f)
1357 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1359 if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1362 return enum_fmt(chan_ctx->xm2msc_dev, f);
1365 static int xm2msc_enum_fmt_vid_out(struct file *file, void *fh,
1366 struct v4l2_fmtdesc *f)
1368 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
1370 if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1373 return enum_fmt(chan_ctx->xm2msc_dev, f);
1376 static int xm2msc_querycap(struct file *file, void *fh,
1377 struct v4l2_capability *cap)
1379 strncpy(cap->driver, XM2MSC_DRIVER_NAME, sizeof(cap->driver) - 1);
1380 strncpy(cap->card, XM2MSC_DRIVER_NAME, sizeof(cap->card) - 1);
1381 snprintf(cap->bus_info, sizeof(cap->bus_info),
1382 "platform:%s", XM2MSC_DRIVER_NAME);
1384 * This is only a mem-to-mem video device. The STREAMING
1385 * device capability flags are left only for compatibility
1386 * and are scheduled for removal.
1388 cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE;
1389 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1393 static int xm2msc_queue_setup(struct vb2_queue *vq,
1394 unsigned int *nbuffers, unsigned int *nplanes,
1395 unsigned int sizes[], struct device *alloc_devs[])
1398 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vq);
1399 struct xm2msc_q_data *q_data;
1401 q_data = get_q_data(chan_ctx, vq->type);
1405 *nplanes = q_data->nplanes;
1407 for (i = 0; i < *nplanes; i++)
1408 sizes[i] = q_data->sizeimage[i];
1410 dev_dbg(chan_ctx->xm2msc_dev->dev, "get %d buffer(s) of size %d",
1411 *nbuffers, sizes[0]);
1412 if (q_data->nplanes == 2)
1413 dev_dbg(chan_ctx->xm2msc_dev->dev, " and %d\n", sizes[1]);
1418 static int xm2msc_buf_prepare(struct vb2_buffer *vb)
1420 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vb->vb2_queue);
1421 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1422 struct xm2msc_q_data *q_data;
1423 unsigned int i, num_planes;
1425 q_data = get_q_data(chan_ctx, vb->vb2_queue->type);
1428 num_planes = q_data->nplanes;
1430 for (i = 0; i < num_planes; i++) {
1431 if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
1432 v4l2_err(&xm2msc->v4l2_dev, "data will not fit into plane ");
1433 v4l2_err(&xm2msc->v4l2_dev, "(%lu < %lu)\n",
1434 vb2_plane_size(vb, i),
1435 (long)q_data->sizeimage[i]);
1440 for (i = 0; i < num_planes; i++)
1441 vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
1446 static void xm2msc_buf_queue(struct vb2_buffer *vb)
1448 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1449 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vb->vb2_queue);
1451 v4l2_m2m_buf_queue(chan_ctx->m2m_ctx, vbuf);
1454 static void xm2msc_return_all_buffers(struct xm2msc_chan_ctx *chan_ctx,
1455 struct vb2_queue *q,
1456 enum vb2_buffer_state state)
1458 struct vb2_v4l2_buffer *vb;
1459 unsigned long flags;
1462 if (V4L2_TYPE_IS_OUTPUT(q->type))
1463 vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
1465 vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
1468 spin_lock_irqsave(&chan_ctx->xm2msc_dev->lock, flags);
1469 v4l2_m2m_buf_done(vb, state);
1470 spin_unlock_irqrestore(&chan_ctx->xm2msc_dev->lock, flags);
1474 static int xm2msc_start_streaming(struct vb2_queue *q, unsigned int count)
1476 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(q);
1477 static struct xm2msc_q_data *q_data;
1480 if (V4L2_TYPE_IS_OUTPUT(q->type))
1481 xm2msc_set_chan_stream(chan_ctx, true, XM2MSC_CHAN_OUT);
1483 xm2msc_set_chan_stream(chan_ctx, true, XM2MSC_CHAN_CAP);
1485 xm2msc_set_chan_params(chan_ctx, q->type);
1487 if (xm2msc_chk_chan_stream(chan_ctx, XM2MSC_CHAN_CAP) &&
1488 xm2msc_chk_chan_stream(chan_ctx, XM2MSC_CHAN_OUT))
1489 xm2msc_set_chan_com_params(chan_ctx);
1491 type = V4L2_TYPE_IS_OUTPUT(q->type) ?
1492 V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
1493 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1494 q_data = get_q_data(chan_ctx, type);
1495 xm2msc_pr_q(chan_ctx->xm2msc_dev->dev, q_data, chan_ctx->num,
1497 xm2msc_pr_status(chan_ctx->xm2msc_dev, __func__);
1502 static void xm2msc_stop_streaming(struct vb2_queue *q)
1504 struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(q);
1506 xm2msc_return_all_buffers(chan_ctx, q, VB2_BUF_STATE_ERROR);
1508 if (V4L2_TYPE_IS_OUTPUT(q->type))
1509 xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_OUT);
1511 xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_CAP);
1514 static const struct vb2_ops xm2msc_qops = {
1515 .queue_setup = xm2msc_queue_setup,
1516 .buf_prepare = xm2msc_buf_prepare,
1517 .buf_queue = xm2msc_buf_queue,
1518 .start_streaming = xm2msc_start_streaming,
1519 .stop_streaming = xm2msc_stop_streaming,
1522 static int queue_init(void *priv, struct vb2_queue *src_vq,
1523 struct vb2_queue *dst_vq)
1525 struct xm2msc_chan_ctx *chan_ctx = priv;
1526 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1529 memset(src_vq, 0, sizeof(*src_vq));
1530 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1531 src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
1532 src_vq->drv_priv = chan_ctx;
1533 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1534 src_vq->ops = &xm2msc_qops;
1535 src_vq->mem_ops = &vb2_dma_contig_memops;
1536 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1537 src_vq->lock = &xm2msc->dev_mutex;
1538 src_vq->dev = xm2msc->v4l2_dev.dev;
1540 ret = vb2_queue_init(src_vq);
1544 memset(dst_vq, 0, sizeof(*dst_vq));
1545 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1546 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
1547 dst_vq->drv_priv = chan_ctx;
1548 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1549 dst_vq->ops = &xm2msc_qops;
1550 dst_vq->mem_ops = &vb2_dma_contig_memops;
1551 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1552 dst_vq->lock = &xm2msc->dev_mutex;
1553 dst_vq->dev = xm2msc->v4l2_dev.dev;
1555 return vb2_queue_init(dst_vq);
1558 static const struct v4l2_ioctl_ops xm2msc_ioctl_ops = {
1559 .vidioc_querycap = xm2msc_querycap,
1561 .vidioc_enum_fmt_vid_cap_mplane = xm2msc_enum_fmt_vid_cap,
1562 .vidioc_g_fmt_vid_cap_mplane = xm2msc_g_fmt_vid_cap,
1563 .vidioc_try_fmt_vid_cap_mplane = xm2msc_try_fmt_vid_cap,
1564 .vidioc_s_fmt_vid_cap_mplane = xm2msc_s_fmt_vid_cap,
1566 .vidioc_enum_fmt_vid_out_mplane = xm2msc_enum_fmt_vid_out,
1567 .vidioc_g_fmt_vid_out_mplane = xm2msc_g_fmt_vid_out,
1568 .vidioc_try_fmt_vid_out_mplane = xm2msc_try_fmt_vid_out,
1569 .vidioc_s_fmt_vid_out_mplane = xm2msc_s_fmt_vid_out,
1571 .vidioc_reqbufs = xm2msc_reqbufs,
1572 .vidioc_querybuf = xm2msc_querybuf,
1573 .vidioc_expbuf = xm2msc_expbuf,
1574 .vidioc_create_bufs = xm2msc_createbufs,
1576 .vidioc_qbuf = xm2msc_qbuf,
1577 .vidioc_dqbuf = xm2msc_dqbuf,
1579 .vidioc_streamon = xm2msc_streamon,
1580 .vidioc_streamoff = xm2msc_streamoff,
1583 static int xm2msc_open(struct file *file)
1585 struct xm2m_msc_dev *xm2msc = video_drvdata(file);
1586 struct xm2msc_chan_ctx *chan_ctx = NULL;
1590 if (mutex_lock_interruptible(&xm2msc->dev_mutex))
1591 return -ERESTARTSYS;
1593 minor = iminor(file_inode(file));
1595 for (chan = 0; chan < xm2msc->max_chan; chan++) {
1596 chan_ctx = &xm2msc->xm2msc_chan[chan];
1598 if ((chan_ctx->status & CHAN_ATTACHED) &&
1599 chan_ctx->minor == minor)
1603 if (chan == xm2msc->max_chan) {
1604 v4l2_err(&xm2msc->v4l2_dev,
1605 "%s Chan not found with minor = %d\n",
1611 /* Already opened, do not allow same channel
1612 * to be open more then once
1614 if (chan_ctx->status & CHAN_OPENED) {
1615 v4l2_warn(&xm2msc->v4l2_dev,
1616 "%s Chan already opened for minor = %d\n",
1622 v4l2_fh_init(&chan_ctx->fh, &chan_ctx->vfd);
1623 file->private_data = &chan_ctx->fh;
1624 v4l2_fh_add(&chan_ctx->fh);
1626 chan_ctx->m2m_ctx = v4l2_m2m_ctx_init(chan_ctx->m2m_dev,
1627 chan_ctx, &queue_init);
1628 if (IS_ERR(chan_ctx->m2m_ctx)) {
1629 ret = PTR_ERR(chan_ctx->m2m_ctx);
1630 v4l2_err(&xm2msc->v4l2_dev,
1631 "%s Chan M2M CTX not creted for minor %d\n",
1636 chan_ctx->fh.m2m_ctx = chan_ctx->m2m_ctx;
1637 chan_ctx->status |= CHAN_OPENED;
1638 chan_ctx->xm2msc_dev = xm2msc;
1639 xm2msc_set_chan(chan_ctx, true);
1641 v4l2_info(&xm2msc->v4l2_dev, "Channel %d instance created\n", chan);
1643 mutex_unlock(&xm2msc->dev_mutex);
1644 xm2msc_pr_chanctx(chan_ctx, __func__);
1645 xm2msc_pr_status(xm2msc, __func__);
1649 v4l2_fh_del(&chan_ctx->fh);
1650 v4l2_fh_exit(&chan_ctx->fh);
1652 mutex_unlock(&xm2msc->dev_mutex);
1653 xm2msc_pr_chanctx(chan_ctx, __func__);
1654 xm2msc_pr_status(xm2msc, __func__);
1658 static int xm2msc_release(struct file *file)
1660 struct xm2m_msc_dev *xm2msc = video_drvdata(file);
1661 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(file->private_data);
1663 if (mutex_lock_interruptible(&xm2msc->dev_mutex))
1664 return -ERESTARTSYS;
1666 v4l2_m2m_ctx_release(chan_ctx->m2m_ctx);
1667 v4l2_fh_del(&chan_ctx->fh);
1668 v4l2_fh_exit(&chan_ctx->fh);
1669 chan_ctx->status &= ~CHAN_OPENED;
1670 xm2msc_set_chan(chan_ctx, false);
1672 v4l2_info(&xm2msc->v4l2_dev, "Channel %d instance released\n",
1675 mutex_unlock(&xm2msc->dev_mutex);
1679 static unsigned int xm2msc_poll(struct file *file,
1680 struct poll_table_struct *wait)
1682 struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(file->private_data);
1683 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1686 mutex_lock(&xm2msc->dev_mutex);
1687 ret = v4l2_m2m_poll(file, chan_ctx->m2m_ctx, wait);
1688 mutex_unlock(&xm2msc->dev_mutex);
1693 static int xm2msc_mmap(struct file *file, struct vm_area_struct *vma)
1695 struct xm2msc_chan_ctx *chan_ctx = file->private_data;
1696 struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
1699 mutex_lock(&xm2msc->dev_mutex);
1700 ret = v4l2_m2m_mmap(file, chan_ctx->m2m_ctx, vma);
1702 mutex_unlock(&xm2msc->dev_mutex);
1706 static const struct v4l2_file_operations xm2msc_fops = {
1707 .owner = THIS_MODULE,
1708 .open = xm2msc_open,
1709 .release = xm2msc_release,
1710 .poll = xm2msc_poll,
1711 .unlocked_ioctl = video_ioctl2,
1712 .mmap = xm2msc_mmap,
1715 static const struct video_device xm2msc_videodev = {
1716 .name = XM2MSC_DRIVER_NAME,
1717 .fops = &xm2msc_fops,
1718 .ioctl_ops = &xm2msc_ioctl_ops,
1720 .release = video_device_release_empty,
1721 .vfl_dir = VFL_DIR_M2M,
1724 static const struct v4l2_m2m_ops xm2msc_m2m_ops = {
1725 .device_run = xm2msc_device_run,
1726 .job_ready = xm2msc_job_ready,
1727 .job_abort = xm2msc_job_abort,
1730 static int xm2msc_parse_of(struct platform_device *pdev,
1731 struct xm2m_msc_dev *xm2msc)
1733 struct resource *res;
1734 struct device *dev = &pdev->dev;
1735 struct device_node *node = dev->of_node;
1737 const char *vid_fmts[ARRAY_SIZE(formats)];
1741 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1742 xm2msc->regs = devm_ioremap_resource(dev, res);
1743 if (IS_ERR((__force void *)xm2msc->regs))
1744 return PTR_ERR((__force const void *)xm2msc->regs);
1746 dev_dbg(dev, "IO Mem 0x%llx mapped at %p\n", res->start, xm2msc->regs);
1748 ret = of_property_read_u32(node, "xlnx,max-chan",
1753 if (xm2msc->max_chan < XM2MSC_MIN_CHAN ||
1754 xm2msc->max_chan > XM2MSC_MAX_CHAN) {
1756 "Invalid maximum scaler channels : %d",
1761 ret = of_property_read_u32(node, "xlnx,max-width",
1765 "missing xlnx,max-width prop\n");
1769 if (xm2msc->max_wd < XM2MSC_MIN_WIDTH ||
1770 xm2msc->max_wd > XM2MSC_MAX_WIDTH) {
1771 dev_err(dev, "Invalid width : %d",
1776 ret = of_property_read_u32(node, "xlnx,max-height",
1779 dev_err(dev, "missing xlnx,max-height prop\n");
1783 if (xm2msc->max_ht < XM2MSC_MIN_HEIGHT ||
1784 xm2msc->max_ht > XM2MSC_MAX_HEIGHT) {
1785 dev_err(dev, "Invalid height : %d",
1790 ret = of_property_read_u32(node, "xlnx,dma-addr-width",
1791 &xm2msc->dma_addr_size);
1792 if (ret || (xm2msc->dma_addr_size != 32 &&
1793 xm2msc->dma_addr_size != 64)) {
1794 dev_err(dev, "missing/invalid addr width dts prop\n");
1798 xm2msc->irq = irq_of_parse_and_map(node, 0);
1799 if (xm2msc->irq < 0) {
1800 dev_err(dev, "Unable to get IRQ");
1804 dev_dbg(dev, "Max Channel Supported = %d\n", xm2msc->max_chan);
1805 dev_dbg(dev, "DMA Addr width Supported = %d\n", xm2msc->dma_addr_size);
1806 dev_dbg(dev, "Max col/row Supported = (%d) / (%d)\n",
1807 xm2msc->max_wd, xm2msc->max_ht);
1808 /* read supported video formats and update internal table */
1809 hw_vid_fmt_cnt = of_property_count_strings(node, "xlnx,vid-formats");
1811 ret = of_property_read_string_array(node, "xlnx,vid-formats",
1812 vid_fmts, hw_vid_fmt_cnt);
1815 "Missing or invalid xlnx,vid-formats dts prop\n");
1819 dev_dbg(dev, "Supported format = ");
1820 for (i = 0; i < hw_vid_fmt_cnt; i++) {
1821 const char *vid_fmt_name = vid_fmts[i];
1823 for (j = 0; j < ARRAY_SIZE(formats); j++) {
1824 const char *dts_name = formats[j].name;
1826 if (strcmp(vid_fmt_name, dts_name))
1828 dev_dbg(dev, "%s ", dts_name);
1830 xm2msc_set_fmt(xm2msc, j);
1834 xm2msc->rst_gpio = devm_gpiod_get(dev, "reset",
1836 if (IS_ERR(xm2msc->rst_gpio)) {
1837 ret = PTR_ERR(xm2msc->rst_gpio);
1838 if (ret == -EPROBE_DEFER)
1840 "Probe deferred due to GPIO reset defer\n");
1843 "Unable to locate reset property in dt\n");
1850 static void xm2msc_unreg_video_n_m2m(struct xm2m_msc_dev *xm2msc)
1852 struct xm2msc_chan_ctx *chan_ctx;
1855 for (chan = 0; chan < xm2msc->max_chan; chan++) {
1856 chan_ctx = &xm2msc->xm2msc_chan[chan];
1857 if (!(chan_ctx->status & CHAN_ATTACHED))
1858 break; /*We register video sequentially */
1859 video_unregister_device(&chan_ctx->vfd);
1860 chan_ctx->status &= ~CHAN_ATTACHED;
1862 if (!IS_ERR(chan_ctx->m2m_dev))
1863 v4l2_m2m_release(chan_ctx->m2m_dev);
1867 static int xm2m_msc_probe(struct platform_device *pdev)
1870 struct xm2m_msc_dev *xm2msc;
1871 struct xm2msc_chan_ctx *chan_ctx;
1872 struct video_device *vfd;
1875 xm2msc = devm_kzalloc(&pdev->dev, sizeof(*xm2msc), GFP_KERNEL);
1879 ret = xm2msc_parse_of(pdev, xm2msc);
1883 xm2msc->dev = &pdev->dev;
1885 xm2msc_reset(xm2msc);
1887 spin_lock_init(&xm2msc->lock);
1889 ret = v4l2_device_register(&pdev->dev, &xm2msc->v4l2_dev);
1893 for (chan = 0; chan < xm2msc->max_chan; chan++) {
1894 chan_ctx = &xm2msc->xm2msc_chan[chan];
1896 vfd = &chan_ctx->vfd;
1897 *vfd = xm2msc_videodev;
1898 vfd->lock = &xm2msc->dev_mutex;
1899 vfd->v4l2_dev = &xm2msc->v4l2_dev;
1901 ret = video_register_device(vfd, VFL_TYPE_GRABBER, chan);
1903 v4l2_err(&xm2msc->v4l2_dev,
1904 "Failed to register video dev for chan %d\n",
1909 chan_ctx->status = CHAN_ATTACHED;
1911 video_set_drvdata(vfd, xm2msc);
1912 snprintf(vfd->name, sizeof(vfd->name),
1913 "%s", xm2msc_videodev.name);
1914 v4l2_info(&xm2msc->v4l2_dev,
1915 " Device registered as /dev/video%d\n", vfd->num);
1917 dev_dbg(xm2msc->dev, "%s Device registered as /dev/video%d\n",
1918 __func__, vfd->num);
1920 chan_ctx->m2m_dev = v4l2_m2m_init(&xm2msc_m2m_ops);
1921 if (IS_ERR(chan_ctx->m2m_dev)) {
1922 v4l2_err(&xm2msc->v4l2_dev,
1923 "Failed to init mem2mem device for chan %d\n",
1925 ret = PTR_ERR(chan_ctx->m2m_dev);
1928 chan_ctx->xm2msc_dev = xm2msc;
1929 chan_ctx->regs = xm2msc->regs + XM2MSC_CHAN_REGS_START(chan);
1930 chan_ctx->num = chan;
1931 chan_ctx->minor = vfd->minor;
1932 xm2msc_pr_chanctx(chan_ctx, __func__);
1935 mutex_init(&xm2msc->dev_mutex);
1936 mutex_init(&xm2msc->mutex);
1937 init_waitqueue_head(&xm2msc->isr_finished);
1939 ret = devm_request_irq(&pdev->dev, xm2msc->irq,
1940 xm2msc_isr, IRQF_SHARED,
1941 XM2MSC_DRIVER_NAME, xm2msc);
1943 dev_err(&pdev->dev, "Unable to register IRQ\n");
1947 platform_set_drvdata(pdev, xm2msc);
1952 xm2msc_unreg_video_n_m2m(xm2msc);
1953 v4l2_device_unregister(&xm2msc->v4l2_dev);
1957 static int xm2m_msc_remove(struct platform_device *pdev)
1959 struct xm2m_msc_dev *xm2msc = platform_get_drvdata(pdev);
1961 xm2msc_unreg_video_n_m2m(xm2msc);
1962 v4l2_device_unregister(&xm2msc->v4l2_dev);
1966 static const struct of_device_id xm2m_msc_of_id_table[] = {
1967 {.compatible = "xlnx,v-multi-scaler-v1.0"},
1971 MODULE_DEVICE_TABLE(of, xm2m_msc_of_id_table);
1973 static struct platform_driver xm2m_msc_driver = {
1975 .name = "xilinx-multiscaler",
1976 .of_match_table = xm2m_msc_of_id_table,
1978 .probe = xm2m_msc_probe,
1979 .remove = xm2m_msc_remove,
1982 module_platform_driver(xm2m_msc_driver);
1984 MODULE_DESCRIPTION("Xilinx M2M Multi-Scaler Driver");
1985 MODULE_LICENSE("GPL v2");
1986 MODULE_ALIAS("xlnx_m2m_multiscaler_dev");