aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_fourcc.c42
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c3
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c31
-rw-r--r--drivers/gpu/drm/xilinx/Kconfig59
-rw-r--r--drivers/gpu/drm/xilinx/Makefile14
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_cresample.c154
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_cresample.h40
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_connector.c204
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_connector.h29
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_crtc.c595
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_crtc.h39
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_dp.c2186
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.c2265
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.h69
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_drv.c614
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_drv.h65
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_dsi.c808
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_encoder.c240
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_encoder.h28
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_fb.c516
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_fb.h38
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_gem.c45
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_gem.h25
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_plane.c1098
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_plane.h61
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_sdi.c1452
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_sdi.h29
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_osd.c382
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_osd.h62
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_rgb2yuv.c119
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_rgb2yuv.h35
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_vtc.c645
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_vtc.h44
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig104
-rw-r--r--drivers/gpu/drm/xlnx/Makefile21
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_bridge.c561
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_bridge.h178
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_crtc.c206
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_crtc.h76
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_csc.c571
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_drv.c540
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_drv.h33
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_dsi.c907
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_fb.c306
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_fb.h33
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_gem.c47
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_gem.h26
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_mixer.c2821
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_pl_disp.c618
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_scaler.c1748
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi.c1227
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi_modes.h356
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi_timing.c425
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi_timing.h20
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_vtc.c447
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c3333
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.h36
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c1917
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.h38
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c194
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.h28
-rw-r--r--drivers/gpu/drm/zocl/Kconfig8
-rw-r--r--drivers/gpu/drm/zocl/Makefile4
-rw-r--r--drivers/gpu/drm/zocl/zocl_bo.c271
-rw-r--r--drivers/gpu/drm/zocl/zocl_drv.c217
-rw-r--r--drivers/gpu/drm/zocl/zocl_drv.h59
68 files changed, 29420 insertions, 1 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 36f900d63979..7b764cce6028 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -307,6 +307,10 @@ source "drivers/gpu/drm/bridge/Kconfig"
source "drivers/gpu/drm/sti/Kconfig"
+source "drivers/gpu/drm/xilinx/Kconfig"
+
+source "drivers/gpu/drm/zocl/Kconfig"
+
source "drivers/gpu/drm/imx/Kconfig"
source "drivers/gpu/drm/v3d/Kconfig"
@@ -331,6 +335,8 @@ source "drivers/gpu/drm/tinydrm/Kconfig"
source "drivers/gpu/drm/pl111/Kconfig"
+source "drivers/gpu/drm/xlnx/Kconfig"
+
source "drivers/gpu/drm/tve200/Kconfig"
source "drivers/gpu/drm/xen/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 72f5036d9bfa..269c153f1f8a 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -93,6 +93,8 @@ obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STM) += stm/
obj-$(CONFIG_DRM_STI) += sti/
+obj-$(CONFIG_DRM_XILINX) += xilinx/
+obj-$(CONFIG_DRM_ZOCL)) += zocl/
obj-$(CONFIG_DRM_IMX) += imx/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_MESON) += meson/
@@ -107,6 +109,7 @@ obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
obj-$(CONFIG_DRM_PL111) += pl111/
+obj-$(CONFIG_DRM_XLNX) += xlnx/
obj-$(CONFIG_DRM_TVE200) += tve200/
obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 6ea55fb4526d..c6ed69ac7d0d 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -224,6 +224,8 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XV15, .depth = 0, .num_planes = 2, .pixels_per_macropixel = { 3, 3, 0 }, .bytes_per_macropixel = { 4, 8, 0 }, .hsub = 2, .vsub = 2, }, /* FIXME consider is_yuv = true */
+ { .format = DRM_FORMAT_XV20, .depth = 0, .num_planes = 2, .pixels_per_macropixel = { 3, 3, 0 }, .bytes_per_macropixel = { 4, 8, 0 }, .hsub = 2, .vsub = 1, },
{ .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
@@ -274,6 +276,11 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_YUV420_10BIT, .depth = 0,
.num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
.is_yuv = true },
+ { .format = DRM_FORMAT_AVUY, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XVUY8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XVUY2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_Y8, .depth = 0, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_Y10, .depth = 0, .num_planes = 1, .pixels_per_macropixel = { 3, 0, 0 }, .bytes_per_macropixel = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
};
unsigned int i;
@@ -511,3 +518,38 @@ uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
drm_format_info_block_height(info, plane));
}
EXPORT_SYMBOL(drm_format_info_min_pitch);
+
+/**
+ * drm_format_plane_width_bytes - bytes of the given width of the plane
+ * @info: DRM format information
+ * @plane: plane index
+ * @width: width to get the number of bytes
+ *
+ * This returns the number of bytes for given @width and @plane.
+ * The @cpp or macro pixel information should be valid.
+ *
+ * Returns:
+ * The bytes of @width of @plane. 0 for invalid format info.
+ */
+int drm_format_plane_width_bytes(const struct drm_format_info *info,
+ int plane, int width)
+{
+ if (!info || plane >= info->num_planes)
+ return 0;
+
+ if (info->cpp[plane])
+ return info->cpp[plane] * width;
+
+ if (WARN_ON(!info->bytes_per_macropixel[plane] ||
+ !info->pixels_per_macropixel[plane])) {
+ struct drm_format_name_buf buf;
+
+ DRM_WARN("Either cpp or macro-pixel info should be valid: %s\n",
+ drm_get_format_name(info->format, &buf));
+ return 0;
+ }
+
+ return DIV_ROUND_UP(width * info->bytes_per_macropixel[plane],
+ info->pixels_per_macropixel[plane]);
+}
+EXPORT_SYMBOL(drm_format_plane_width_bytes);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 45f6f11a88a7..3eebc5eca7b2 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -290,7 +290,8 @@ drm_internal_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
int ret;
- if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
+ if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS |
+ DRM_MODE_FB_ALTERNATE_TOP | DRM_MODE_FB_ALTERNATE_BOTTOM)) {
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 3a7410057c92..c3998bc6f968 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2976,6 +2976,34 @@ static const struct panel_desc_dsi auo_b080uan01 = {
.lanes = 4,
};
+static const struct drm_display_mode auo_b101uan01_mode = {
+ .clock = 154500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 185,
+ .hsync_end = 1920 + 185,
+ .htotal = 1920 + 185 + 925,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 3,
+ .vsync_end = 1200 + 3 + 5,
+ .vtotal = 1200 + 3 + 5 + 4,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi auo_b101uan01 = {
+ .desc = {
+ .modes = &auo_b101uan01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 272,
+ },
+ },
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
static const struct drm_display_mode boe_tv080wum_nl0_mode = {
.clock = 160000,
.hdisplay = 1200,
@@ -3124,6 +3152,9 @@ static const struct of_device_id dsi_of_match[] = {
.compatible = "auo,b080uan01",
.data = &auo_b080uan01
}, {
+ .compatible = "auo,b101uan01",
+ .data = &auo_b101uan01
+ }, {
.compatible = "boe,tv080wum-nl0",
.data = &boe_tv080wum_nl0
}, {
diff --git a/drivers/gpu/drm/xilinx/Kconfig b/drivers/gpu/drm/xilinx/Kconfig
new file mode 100644
index 000000000000..57e18a9d774d
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/Kconfig
@@ -0,0 +1,59 @@
+config DRM_XILINX
+ tristate "Xilinx DRM"
+ depends on DRM && HAVE_CLK
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DMA_ENGINE
+ select DRM_XILINX_DP_SUB
+ select XILINX_FRMBUF
+ help
+ DRM display driver for Xilinx IP based pipelines.
+
+config DRM_XILINX_DP
+ tristate "Xilinx DRM Display Port Driver"
+ depends on DRM_XILINX
+ help
+ DRM driver for Xilinx Display Port IP.
+
+config DRM_XILINX_DP_DEBUG_FS
+ bool "Xilinx DRM DP debugfs"
+ depends on DEBUG_FS && DRM_XILINX_DP
+ help
+ Enable the debugfs code for DPDMA driver. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config DRM_XILINX_DP_SUB
+ tristate "Xilinx DRM Display Port Subsystem Driver"
+ depends on DRM_XILINX
+ select DRM_XILINX_DP
+ help
+ DRM driver for Xilinx Display Port Subsystem.
+
+config DRM_XILINX_DP_SUB_DEBUG_FS
+ bool "Xilinx DRM DPSUB debugfs"
+ depends on DEBUG_FS && DRM_XILINX_DP_SUB
+ select DRM_XILINX_DP_DEBUG_FS
+ help
+ Enable the debugfs code for DP Sub driver. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config DRM_XILINX_MIPI_DSI
+ tristate "Xilinx DRM MIPI DSI Driver"
+ depends on DRM_XILINX
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ help
+ DRM driver for Xilinx MIPI DSI IP.
+
+config DRM_XILINX_SDI
+ tristate "Xilinx DRM SDI Subsystem Driver"
+ depends on DRM_XILINX
+ help
+ DRM driver for Xilinx SDI Tx Subsystem.
diff --git a/drivers/gpu/drm/xilinx/Makefile b/drivers/gpu/drm/xilinx/Makefile
new file mode 100644
index 000000000000..19bc1541ca17
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+xilinx_drm-y := xilinx_drm_crtc.o xilinx_drm_connector.o xilinx_drm_drv.o \
+ xilinx_drm_encoder.o xilinx_drm_fb.o xilinx_drm_gem.o \
+ xilinx_drm_plane.o
+xilinx_drm-y += xilinx_cresample.o xilinx_osd.o xilinx_rgb2yuv.o xilinx_vtc.o
+
+obj-$(CONFIG_DRM_XILINX) += xilinx_drm.o
+obj-$(CONFIG_DRM_XILINX_DP) += xilinx_drm_dp.o
+obj-$(CONFIG_DRM_XILINX_DP_SUB) += xilinx_drm_dp_sub.o
+obj-$(CONFIG_DRM_XILINX_MIPI_DSI) += xilinx_drm_dsi.o
+obj-$(CONFIG_DRM_XILINX_SDI) += xilinx_drm_sdi.o
diff --git a/drivers/gpu/drm/xilinx/xilinx_cresample.c b/drivers/gpu/drm/xilinx/xilinx_cresample.c
new file mode 100644
index 000000000000..6ddad66913ae
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_cresample.c
@@ -0,0 +1,154 @@
+/*
+ * Xilinx Chroma Resampler support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "xilinx_drm_drv.h"
+
+#include "xilinx_cresample.h"
+
+/* registers */
+/* general control registers */
+#define CRESAMPLE_CONTROL 0x0000
+
+/* horizontal and vertical active frame size */
+#define CRESAMPLE_ACTIVE_SIZE 0x0020
+
+/* control register bit definition */
+#define CRESAMPLE_CTL_EN (1 << 0) /* enable */
+#define CRESAMPLE_CTL_RU (1 << 1) /* reg update */
+#define CRESAMPLE_CTL_RESET (1 << 31) /* instant reset */
+
+struct xilinx_cresample {
+ void __iomem *base;
+ const char *input_format_name;
+ const char *output_format_name;
+};
+
+/* enable cresample */
+void xilinx_cresample_enable(struct xilinx_cresample *cresample)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(cresample->base, CRESAMPLE_CONTROL);
+ xilinx_drm_writel(cresample->base, CRESAMPLE_CONTROL,
+ reg | CRESAMPLE_CTL_EN);
+}
+
+/* disable cresample */
+void xilinx_cresample_disable(struct xilinx_cresample *cresample)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(cresample->base, CRESAMPLE_CONTROL);
+ xilinx_drm_writel(cresample->base, CRESAMPLE_CONTROL,
+ reg & ~CRESAMPLE_CTL_EN);
+}
+
+/* configure cresample */
+void xilinx_cresample_configure(struct xilinx_cresample *cresample,
+ int hactive, int vactive)
+{
+ /* configure hsize and vsize */
+ xilinx_drm_writel(cresample->base, CRESAMPLE_ACTIVE_SIZE,
+ (vactive << 16) | hactive);
+}
+
+/* reset cresample */
+void xilinx_cresample_reset(struct xilinx_cresample *cresample)
+{
+ u32 reg;
+
+ xilinx_drm_writel(cresample->base, CRESAMPLE_CONTROL,
+ CRESAMPLE_CTL_RESET);
+
+ /* enable register update */
+ reg = xilinx_drm_readl(cresample->base, CRESAMPLE_CONTROL);
+ xilinx_drm_writel(cresample->base, CRESAMPLE_CONTROL,
+ reg | CRESAMPLE_CTL_RU);
+}
+
+/* get an input format */
+const char *
+xilinx_cresample_get_input_format_name(struct xilinx_cresample *cresample)
+{
+ return cresample->input_format_name;
+}
+
+/* get an output format */
+const char *
+xilinx_cresample_get_output_format_name(struct xilinx_cresample *cresample)
+{
+ return cresample->output_format_name;
+}
+
+static const struct of_device_id xilinx_cresample_of_match[] = {
+ { .compatible = "xlnx,v-cresample-3.01.a" },
+ { /* end of table */ },
+};
+
+struct xilinx_cresample *xilinx_cresample_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct xilinx_cresample *cresample;
+ const struct of_device_id *match;
+ struct resource res;
+ int ret;
+
+ match = of_match_node(xilinx_cresample_of_match, node);
+ if (!match) {
+ dev_err(dev, "failed to match the device node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ cresample = devm_kzalloc(dev, sizeof(*cresample), GFP_KERNEL);
+ if (!cresample)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to of_address_to_resource\n");
+ return ERR_PTR(ret);
+ }
+
+ cresample->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(cresample->base))
+ return ERR_CAST(cresample->base);
+
+ ret = of_property_read_string(node, "xlnx,input-format",
+ &cresample->input_format_name);
+ if (ret) {
+ dev_warn(dev, "failed to get an input format prop\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = of_property_read_string(node, "xlnx,output-format",
+ &cresample->output_format_name);
+ if (ret) {
+ dev_warn(dev, "failed to get an output format prop\n");
+ return ERR_PTR(ret);
+ }
+
+ xilinx_cresample_reset(cresample);
+
+ return cresample;
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_cresample.h b/drivers/gpu/drm/xilinx/xilinx_cresample.h
new file mode 100644
index 000000000000..34323c722881
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_cresample.h
@@ -0,0 +1,40 @@
+/*
+ * Xilinx Chroma Resampler Header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_CRESAMPLE_H_
+#define _XILINX_CRESAMPLE_H_
+
+struct xilinx_cresample;
+
+void xilinx_cresample_configure(struct xilinx_cresample *cresample,
+ int hactive, int vactive);
+void xilinx_cresample_reset(struct xilinx_cresample *cresample);
+void xilinx_cresample_enable(struct xilinx_cresample *cresample);
+void xilinx_cresample_disable(struct xilinx_cresample *cresample);
+
+const char *
+xilinx_cresample_get_input_format_name(struct xilinx_cresample *cresample);
+const char *
+xilinx_cresample_get_output_format_name(struct xilinx_cresample *cresample);
+
+struct device;
+struct device_node;
+
+struct xilinx_cresample *xilinx_cresample_probe(struct device *dev,
+ struct device_node *node);
+
+#endif /* _XILINX_CRESAMPLE_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_connector.c b/drivers/gpu/drm/xilinx/xilinx_drm_connector.c
new file mode 100644
index 000000000000..b37bb50108da
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_connector.c
@@ -0,0 +1,204 @@
+/*
+ * Xilinx DRM connector driver for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include <linux/device.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_connector.h"
+
+struct xilinx_drm_connector {
+ struct drm_connector base;
+ struct drm_encoder *encoder;
+};
+
+struct xilinx_drm_connector_type {
+ const char *name;
+ const int type;
+};
+
+#define to_xilinx_connector(x) \
+ container_of(x, struct xilinx_drm_connector, base)
+
+/* get mode list */
+static int xilinx_drm_connector_get_modes(struct drm_connector *base_connector)
+{
+ struct xilinx_drm_connector *connector =
+ to_xilinx_connector(base_connector);
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_encoder_slave *encoder_slave = to_encoder_slave(encoder);
+ const struct drm_encoder_slave_funcs *encoder_sfuncs =
+ encoder_slave->slave_funcs;
+ int count = 0;
+
+ if (encoder_sfuncs->get_modes)
+ count = encoder_sfuncs->get_modes(encoder, base_connector);
+
+ return count;
+}
+
+/* check if mode is valid */
+static int xilinx_drm_connector_mode_valid(struct drm_connector *base_connector,
+ struct drm_display_mode *mode)
+{
+ struct xilinx_drm_connector *connector =
+ to_xilinx_connector(base_connector);
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_encoder_slave *encoder_slave = to_encoder_slave(encoder);
+ const struct drm_encoder_slave_funcs *encoder_sfuncs =
+ encoder_slave->slave_funcs;
+ int ret = MODE_OK;
+
+ if (encoder_sfuncs->mode_valid)
+ ret = encoder_sfuncs->mode_valid(encoder, mode);
+
+ return ret;
+}
+
+/* find best encoder: return stored encoder */
+static struct drm_encoder *
+xilinx_drm_connector_best_encoder(struct drm_connector *base_connector)
+{
+ struct xilinx_drm_connector *connector =
+ to_xilinx_connector(base_connector);
+
+ return connector->encoder;
+}
+
+static struct drm_connector_helper_funcs xilinx_drm_connector_helper_funcs = {
+ .get_modes = xilinx_drm_connector_get_modes,
+ .mode_valid = xilinx_drm_connector_mode_valid,
+ .best_encoder = xilinx_drm_connector_best_encoder,
+};
+
+static enum drm_connector_status
+xilinx_drm_connector_detect(struct drm_connector *base_connector, bool force)
+{
+ struct xilinx_drm_connector *connector =
+ to_xilinx_connector(base_connector);
+ enum drm_connector_status status = connector_status_unknown;
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_encoder_slave *encoder_slave = to_encoder_slave(encoder);
+ const struct drm_encoder_slave_funcs *encoder_sfuncs =
+ encoder_slave->slave_funcs;
+
+ if (encoder_sfuncs->detect)
+ status = encoder_sfuncs->detect(encoder, base_connector);
+
+ /* some connector ignores the first hpd, so try again if forced */
+ if (force && (status != connector_status_connected))
+ status = encoder_sfuncs->detect(encoder, base_connector);
+
+ DRM_DEBUG_KMS("status: %d\n", status);
+
+ return status;
+}
+
+/* destroy connector */
+void xilinx_drm_connector_destroy(struct drm_connector *base_connector)
+{
+ drm_connector_unregister(base_connector);
+ drm_connector_cleanup(base_connector);
+}
+
+static const struct drm_connector_funcs xilinx_drm_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = xilinx_drm_connector_detect,
+ .destroy = xilinx_drm_connector_destroy,
+};
+
+static const struct xilinx_drm_connector_type connector_types[] = {
+ { "HDMIA", DRM_MODE_CONNECTOR_HDMIA },
+ { "DisplayPort", DRM_MODE_CONNECTOR_DisplayPort },
+};
+
+/* create connector */
+struct drm_connector *
+xilinx_drm_connector_create(struct drm_device *drm,
+ struct drm_encoder *base_encoder, int id)
+{
+ struct xilinx_drm_connector *connector;
+ const char *string;
+ int type = DRM_MODE_CONNECTOR_Unknown;
+ int i, ret;
+
+ connector = devm_kzalloc(drm->dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return ERR_PTR(-ENOMEM);
+
+ connector->base.polled = DRM_CONNECTOR_POLL_HPD |
+ DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ ret = of_property_read_string_index(drm->dev->of_node,
+ "xlnx,connector-type", id, &string);
+ if (ret < 0) {
+ dev_err(drm->dev, "No connector type in DT\n");
+ return ERR_PTR(ret);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(connector_types); i++)
+ if (strcmp(connector_types[i].name, string) == 0) {
+ type = connector_types[i].type;
+ break;
+ }
+
+ if (type == DRM_MODE_CONNECTOR_Unknown) {
+ dev_err(drm->dev, "Unknown connector type in DT\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = drm_connector_init(drm, &connector->base,
+ &xilinx_drm_connector_funcs, type);
+ if (ret) {
+ DRM_ERROR("failed to initialize connector\n");
+ return ERR_PTR(ret);
+ }
+
+ drm_connector_helper_add(&connector->base,
+ &xilinx_drm_connector_helper_funcs);
+
+ /* add entry for connector */
+ ret = drm_connector_register(&connector->base);
+ if (ret) {
+ DRM_ERROR("failed to register a connector\n");
+ goto err_register;
+ }
+
+ /* connect connector and encoder */
+ ret = drm_connector_attach_encoder(&connector->base, base_encoder);
+ if (ret) {
+ DRM_ERROR("failed to attach connector to encoder\n");
+ goto err_attach;
+ }
+ connector->encoder = base_encoder;
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+
+ return &connector->base;
+
+err_attach:
+ drm_connector_unregister(&connector->base);
+err_register:
+ drm_connector_cleanup(&connector->base);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_connector.h b/drivers/gpu/drm/xilinx/xilinx_drm_connector.h
new file mode 100644
index 000000000000..750bfd8d1e86
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_connector.h
@@ -0,0 +1,29 @@
+/*
+ * Xilinx DRM connector header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_CONNECTOR_H_
+#define _XILINX_DRM_CONNECTOR_H_
+
+struct drm_device;
+struct drm_connector;
+
+struct drm_connector *
+xilinx_drm_connector_create(struct drm_device *drm,
+ struct drm_encoder *base_encoder, int id);
+void xilinx_drm_connector_destroy(struct drm_connector *base_connector);
+
+#endif /* _XILINX_DRM_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_crtc.c b/drivers/gpu/drm/xilinx/xilinx_drm_crtc.c
new file mode 100644
index 000000000000..66513b13b045
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_crtc.c
@@ -0,0 +1,595 @@
+/*
+ * Xilinx DRM crtc driver for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+
+#include <video/videomode.h>
+
+#include "xilinx_drm_crtc.h"
+#include "xilinx_drm_dp_sub.h"
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_plane.h"
+
+#include "xilinx_cresample.h"
+#include "xilinx_rgb2yuv.h"
+#include "xilinx_vtc.h"
+#include "xilinx_drm_sdi.h"
+
+struct xilinx_drm_crtc {
+ struct drm_crtc base;
+ struct xilinx_cresample *cresample;
+ struct xilinx_rgb2yuv *rgb2yuv;
+ struct clk *pixel_clock;
+ bool pixel_clock_enabled;
+ struct xilinx_vtc *vtc;
+ struct xilinx_drm_plane_manager *plane_manager;
+ int dpms;
+ unsigned int alpha;
+ struct drm_pending_vblank_event *event;
+ struct xilinx_drm_dp_sub *dp_sub;
+ struct xilinx_sdi *sdi;
+};
+
+#define to_xilinx_crtc(x) container_of(x, struct xilinx_drm_crtc, base)
+
+/* set crtc dpms */
+static void xilinx_drm_crtc_dpms(struct drm_crtc *base_crtc, int dpms)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ int ret;
+
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", crtc->dpms, dpms);
+
+ if (crtc->dpms == dpms)
+ return;
+
+ crtc->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (!crtc->pixel_clock_enabled) {
+ ret = clk_prepare_enable(crtc->pixel_clock);
+ if (ret)
+ DRM_ERROR("failed to enable a pixel clock\n");
+ else
+ crtc->pixel_clock_enabled = true;
+ }
+
+ xilinx_drm_plane_manager_dpms(crtc->plane_manager, dpms);
+ xilinx_drm_plane_dpms(base_crtc->primary, dpms);
+ if (crtc->rgb2yuv)
+ xilinx_rgb2yuv_enable(crtc->rgb2yuv);
+ if (crtc->cresample)
+ xilinx_cresample_enable(crtc->cresample);
+ if (crtc->vtc)
+ xilinx_vtc_enable(crtc->vtc);
+ break;
+ default:
+ if (crtc->vtc) {
+ xilinx_vtc_disable(crtc->vtc);
+ xilinx_vtc_reset(crtc->vtc);
+ }
+ if (crtc->cresample) {
+ xilinx_cresample_disable(crtc->cresample);
+ xilinx_cresample_reset(crtc->cresample);
+ }
+ if (crtc->rgb2yuv) {
+ xilinx_rgb2yuv_disable(crtc->rgb2yuv);
+ xilinx_rgb2yuv_reset(crtc->rgb2yuv);
+ }
+ xilinx_drm_plane_dpms(base_crtc->primary, dpms);
+ xilinx_drm_plane_manager_dpms(crtc->plane_manager, dpms);
+ if (crtc->pixel_clock_enabled) {
+ clk_disable_unprepare(crtc->pixel_clock);
+ crtc->pixel_clock_enabled = false;
+ }
+ break;
+ }
+}
+
+/* prepare crtc */
+static void xilinx_drm_crtc_prepare(struct drm_crtc *base_crtc)
+{
+ xilinx_drm_crtc_dpms(base_crtc, DRM_MODE_DPMS_OFF);
+}
+
+/* apply mode to crtc pipe */
+static void xilinx_drm_crtc_commit(struct drm_crtc *base_crtc)
+{
+ xilinx_drm_crtc_dpms(base_crtc, DRM_MODE_DPMS_ON);
+ xilinx_drm_plane_commit(base_crtc->primary);
+}
+
+/* fix mode */
+static bool xilinx_drm_crtc_mode_fixup(struct drm_crtc *base_crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* no op */
+ return true;
+}
+
+/* set new mode in crtc pipe */
+static int xilinx_drm_crtc_mode_set(struct drm_crtc *base_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ struct videomode vm;
+ long diff;
+ int ret;
+
+ if (crtc->pixel_clock_enabled) {
+ clk_disable_unprepare(crtc->pixel_clock);
+ crtc->pixel_clock_enabled = false;
+ }
+
+ /* set pixel clock */
+ ret = clk_set_rate(crtc->pixel_clock, adjusted_mode->clock * 1000);
+ if (ret) {
+ DRM_ERROR("failed to set a pixel clock\n");
+ return ret;
+ }
+
+ diff = clk_get_rate(crtc->pixel_clock) - adjusted_mode->clock * 1000;
+ if (abs(diff) > (adjusted_mode->clock * 1000) / 20)
+ DRM_DEBUG_KMS("actual pixel clock rate(%d) is off by %ld\n",
+ adjusted_mode->clock, diff);
+
+ if (crtc->vtc) {
+ /* set video timing */
+ vm.hactive = adjusted_mode->hdisplay;
+ vm.hfront_porch = adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay;
+ vm.hback_porch = adjusted_mode->htotal -
+ adjusted_mode->hsync_end;
+ vm.hsync_len = adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start;
+
+ vm.vactive = adjusted_mode->vdisplay;
+ vm.vfront_porch = adjusted_mode->vsync_start -
+ adjusted_mode->vdisplay;
+ vm.vback_porch = adjusted_mode->vtotal -
+ adjusted_mode->vsync_end;
+ vm.vsync_len = adjusted_mode->vsync_end -
+ adjusted_mode->vsync_start;
+
+ vm.flags = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vm.flags |= DISPLAY_FLAGS_INTERLACED;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ vm.flags |= DISPLAY_FLAGS_HSYNC_LOW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ vm.flags |= DISPLAY_FLAGS_VSYNC_LOW;
+
+ xilinx_vtc_config_sig(crtc->vtc, &vm);
+ }
+
+ /* configure cresample and rgb2yuv */
+ if (crtc->cresample)
+ xilinx_cresample_configure(crtc->cresample,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+ if (crtc->rgb2yuv)
+ xilinx_rgb2yuv_configure(crtc->rgb2yuv,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+
+ /* configure a plane: vdma and osd layer */
+ xilinx_drm_plane_manager_mode_set(crtc->plane_manager,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+ ret = xilinx_drm_plane_mode_set(base_crtc->primary,
+ base_crtc->primary->fb, 0, 0,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay,
+ x, y,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+ if (ret) {
+ DRM_ERROR("failed to mode set a plane\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int _xilinx_drm_crtc_mode_set_base(struct drm_crtc *base_crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ int ret;
+
+ /* configure a plane */
+ xilinx_drm_plane_manager_mode_set(crtc->plane_manager,
+ base_crtc->hwmode.hdisplay,
+ base_crtc->hwmode.vdisplay);
+ ret = xilinx_drm_plane_mode_set(base_crtc->primary,
+ fb, 0, 0,
+ base_crtc->hwmode.hdisplay,
+ base_crtc->hwmode.vdisplay,
+ x, y,
+ base_crtc->hwmode.hdisplay,
+ base_crtc->hwmode.vdisplay);
+ if (ret) {
+ DRM_ERROR("failed to mode set a plane\n");
+ return ret;
+ }
+
+ /* apply the new fb addr */
+ xilinx_drm_crtc_commit(base_crtc);
+
+ return 0;
+}
+
+/* update address and information from fb */
+static int xilinx_drm_crtc_mode_set_base(struct drm_crtc *base_crtc,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ /* configure a plane */
+ return _xilinx_drm_crtc_mode_set_base(base_crtc, base_crtc->primary->fb,
+ x, y);
+}
+
+static struct drm_crtc_helper_funcs xilinx_drm_crtc_helper_funcs = {
+ .dpms = xilinx_drm_crtc_dpms,
+ .prepare = xilinx_drm_crtc_prepare,
+ .commit = xilinx_drm_crtc_commit,
+ .mode_fixup = xilinx_drm_crtc_mode_fixup,
+ .mode_set = xilinx_drm_crtc_mode_set,
+ .mode_set_base = xilinx_drm_crtc_mode_set_base,
+};
+
+/* destroy crtc */
+void xilinx_drm_crtc_destroy(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ /* make sure crtc is off */
+ xilinx_drm_crtc_dpms(base_crtc, DRM_MODE_DPMS_OFF);
+
+ drm_crtc_cleanup(base_crtc);
+
+ if (crtc->dp_sub)
+ xilinx_drm_dp_sub_put(crtc->dp_sub);
+
+ if (crtc->pixel_clock_enabled) {
+ clk_disable_unprepare(crtc->pixel_clock);
+ crtc->pixel_clock_enabled = false;
+ }
+
+ xilinx_drm_plane_remove_manager(crtc->plane_manager);
+}
+
+/* cancel page flip functions */
+void xilinx_drm_crtc_cancel_page_flip(struct drm_crtc *base_crtc,
+ struct drm_file *file)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ struct drm_device *drm = base_crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ event = crtc->event;
+ if (event && (event->base.file_priv == file)) {
+ crtc->event = NULL;
+ kfree(&event->base);
+ drm_crtc_vblank_put(base_crtc);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+/* finish page flip functions */
+static void xilinx_drm_crtc_finish_page_flip(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ struct drm_device *drm = base_crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ event = crtc->event;
+ crtc->event = NULL;
+ if (event) {
+ drm_crtc_send_vblank_event(base_crtc, event);
+ drm_crtc_vblank_put(base_crtc);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+/* page flip functions */
+static int xilinx_drm_crtc_page_flip(struct drm_crtc *base_crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ struct drm_device *drm = base_crtc->dev;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ if (crtc->event) {
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+
+ /* configure a plane */
+ ret = _xilinx_drm_crtc_mode_set_base(base_crtc, fb,
+ base_crtc->x, base_crtc->y);
+ if (ret) {
+ DRM_ERROR("failed to mode set a plane\n");
+ return ret;
+ }
+
+ base_crtc->primary->fb = fb;
+
+ if (event) {
+ event->pipe = 0;
+ drm_crtc_vblank_get(base_crtc);
+ spin_lock_irqsave(&drm->event_lock, flags);
+ crtc->event = event;
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+
+ return 0;
+}
+
+/* vblank interrupt handler */
+static void xilinx_drm_crtc_vblank_handler(void *data)
+{
+ struct drm_crtc *base_crtc = data;
+ struct drm_device *drm;
+
+ if (!base_crtc)
+ return;
+
+ drm = base_crtc->dev;
+
+ drm_handle_vblank(drm, 0);
+ xilinx_drm_crtc_finish_page_flip(base_crtc);
+}
+
+/* enable vblank interrupt */
+void xilinx_drm_crtc_enable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ if (crtc->vtc)
+ xilinx_vtc_enable_vblank_intr(crtc->vtc,
+ xilinx_drm_crtc_vblank_handler,
+ base_crtc);
+ if (crtc->dp_sub)
+ xilinx_drm_dp_sub_enable_vblank(crtc->dp_sub,
+ xilinx_drm_crtc_vblank_handler,
+ base_crtc);
+#ifdef CONFIG_DRM_XILINX_SDI
+ if (crtc->sdi)
+ xilinx_drm_sdi_enable_vblank(crtc->sdi,
+ xilinx_drm_crtc_vblank_handler,
+ base_crtc);
+#endif
+}
+
+/* disable vblank interrupt */
+void xilinx_drm_crtc_disable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ if (crtc->dp_sub)
+ xilinx_drm_dp_sub_disable_vblank(crtc->dp_sub);
+ if (crtc->vtc)
+ xilinx_vtc_disable_vblank_intr(crtc->vtc);
+#ifdef CONFIG_DRM_XILINX_SDI
+ if (crtc->sdi)
+ xilinx_drm_sdi_disable_vblank(crtc->sdi);
+#endif
+}
+
+/**
+ * xilinx_drm_crtc_restore - Restore the crtc states
+ * @base_crtc: base crtc object
+ *
+ * Restore the crtc states to the default ones. The request is propagated
+ * to the plane driver.
+ */
+void xilinx_drm_crtc_restore(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ xilinx_drm_plane_restore(crtc->plane_manager);
+}
+
+/* check max width */
+unsigned int xilinx_drm_crtc_get_max_width(struct drm_crtc *base_crtc)
+{
+ return xilinx_drm_plane_get_max_width(base_crtc->primary);
+}
+
+/* check format */
+bool xilinx_drm_crtc_check_format(struct drm_crtc *base_crtc, uint32_t fourcc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ return xilinx_drm_plane_check_format(crtc->plane_manager, fourcc);
+}
+
+/* get format */
+uint32_t xilinx_drm_crtc_get_format(struct drm_crtc *base_crtc)
+{
+ return xilinx_drm_plane_get_format(base_crtc->primary);
+}
+
+/**
+ * xilinx_drm_crtc_get_align - Get the alignment value for pitch
+ * @base_crtc: Base crtc object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+unsigned int xilinx_drm_crtc_get_align(struct drm_crtc *base_crtc)
+{
+ return xilinx_drm_plane_get_align(base_crtc->primary);
+}
+
+static struct drm_crtc_funcs xilinx_drm_crtc_funcs = {
+ .destroy = xilinx_drm_crtc_destroy,
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = xilinx_drm_crtc_page_flip,
+};
+
+/* create crtc */
+struct drm_crtc *xilinx_drm_crtc_create(struct drm_device *drm)
+{
+ struct xilinx_drm_crtc *crtc;
+ struct drm_plane *primary_plane;
+ struct device_node *sub_node;
+ int possible_crtcs = 1;
+ int ret;
+
+ crtc = devm_kzalloc(drm->dev, sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return ERR_PTR(-ENOMEM);
+
+ /* probe chroma resampler and enable */
+ sub_node = of_parse_phandle(drm->dev->of_node, "xlnx,cresample", 0);
+ if (sub_node) {
+ crtc->cresample = xilinx_cresample_probe(drm->dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(crtc->cresample)) {
+ DRM_ERROR("failed to probe a cresample\n");
+ return ERR_CAST(crtc->cresample);
+ }
+ }
+
+ /* probe color space converter and enable */
+ sub_node = of_parse_phandle(drm->dev->of_node, "xlnx,rgb2yuv", 0);
+ if (sub_node) {
+ crtc->rgb2yuv = xilinx_rgb2yuv_probe(drm->dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(crtc->rgb2yuv)) {
+ DRM_ERROR("failed to probe a rgb2yuv\n");
+ return ERR_CAST(crtc->rgb2yuv);
+ }
+ }
+
+ /* probe a plane manager */
+ crtc->plane_manager = xilinx_drm_plane_probe_manager(drm);
+ if (IS_ERR(crtc->plane_manager)) {
+ if (PTR_ERR(crtc->plane_manager) != -EPROBE_DEFER)
+ DRM_ERROR("failed to probe a plane manager\n");
+ return ERR_CAST(crtc->plane_manager);
+ }
+
+ /* create a primary plane. there's only one crtc now */
+ primary_plane = xilinx_drm_plane_create_primary(crtc->plane_manager,
+ possible_crtcs);
+ if (IS_ERR(primary_plane)) {
+ DRM_ERROR("failed to create a primary plane for crtc\n");
+ ret = PTR_ERR(primary_plane);
+ goto err_plane;
+ }
+
+ /* create extra planes */
+ xilinx_drm_plane_create_planes(crtc->plane_manager, possible_crtcs);
+
+ crtc->pixel_clock = devm_clk_get(drm->dev, NULL);
+ if (IS_ERR(crtc->pixel_clock)) {
+ if (PTR_ERR(crtc->pixel_clock) == -EPROBE_DEFER) {
+ ret = PTR_ERR(crtc->pixel_clock);
+ goto err_plane;
+ } else {
+ DRM_DEBUG_KMS("failed to get pixel clock\n");
+ crtc->pixel_clock = NULL;
+ }
+ }
+
+ ret = clk_prepare_enable(crtc->pixel_clock);
+ if (ret) {
+ DRM_ERROR("failed to enable a pixel clock\n");
+ crtc->pixel_clock_enabled = false;
+ goto err_plane;
+ }
+ clk_disable_unprepare(crtc->pixel_clock);
+
+ sub_node = of_parse_phandle(drm->dev->of_node, "xlnx,vtc", 0);
+ if (sub_node) {
+ crtc->vtc = xilinx_vtc_probe(drm->dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(crtc->vtc)) {
+ DRM_ERROR("failed to probe video timing controller\n");
+ ret = PTR_ERR(crtc->vtc);
+ goto err_pixel_clk;
+ }
+ }
+
+ crtc->dp_sub = xilinx_drm_dp_sub_of_get(drm->dev->of_node);
+ if (IS_ERR(crtc->dp_sub)) {
+ ret = PTR_ERR(crtc->dp_sub);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("failed to get a dp_sub\n");
+ goto err_pixel_clk;
+ }
+
+#ifdef CONFIG_DRM_XILINX_SDI
+ crtc->sdi = xilinx_drm_sdi_of_get(drm->dev->of_node);
+ if (IS_ERR(crtc->sdi)) {
+ ret = PTR_ERR(crtc->sdi);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("failed to get a sdi\n");
+ goto err_pixel_clk;
+ }
+#endif
+ crtc->dpms = DRM_MODE_DPMS_OFF;
+
+ /* initialize drm crtc */
+ ret = drm_crtc_init_with_planes(drm, &crtc->base, primary_plane,
+ NULL, &xilinx_drm_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize crtc\n");
+ goto err_pixel_clk;
+ }
+ drm_crtc_helper_add(&crtc->base, &xilinx_drm_crtc_helper_funcs);
+
+ return &crtc->base;
+
+err_pixel_clk:
+ if (crtc->pixel_clock_enabled) {
+ clk_disable_unprepare(crtc->pixel_clock);
+ crtc->pixel_clock_enabled = false;
+ }
+err_plane:
+ xilinx_drm_plane_remove_manager(crtc->plane_manager);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_crtc.h b/drivers/gpu/drm/xilinx/xilinx_drm_crtc.h
new file mode 100644
index 000000000000..3566e0eba036
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_crtc.h
@@ -0,0 +1,39 @@
+/*
+ * Xilinx DRM crtc header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_CRTC_H_
+#define _XILINX_DRM_CRTC_H_
+
+struct drm_device;
+struct drm_crtc;
+
+void xilinx_drm_crtc_enable_vblank(struct drm_crtc *base_crtc);
+void xilinx_drm_crtc_disable_vblank(struct drm_crtc *base_crtc);
+void xilinx_drm_crtc_cancel_page_flip(struct drm_crtc *base_crtc,
+ struct drm_file *file);
+
+void xilinx_drm_crtc_restore(struct drm_crtc *base_crtc);
+
+unsigned int xilinx_drm_crtc_get_max_width(struct drm_crtc *base_crtc);
+bool xilinx_drm_crtc_check_format(struct drm_crtc *base_crtc, uint32_t fourcc);
+uint32_t xilinx_drm_crtc_get_format(struct drm_crtc *base_crtc);
+unsigned int xilinx_drm_crtc_get_align(struct drm_crtc *base_crtc);
+
+struct drm_crtc *xilinx_drm_crtc_create(struct drm_device *drm);
+void xilinx_drm_crtc_destroy(struct drm_crtc *base_crtc);
+
+#endif /* _XILINX_DRM_CRTC_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_dp.c b/drivers/gpu/drm/xilinx/xilinx_drm_dp.c
new file mode 100644
index 000000000000..fdb5e74cc96c
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_dp.c
@@ -0,0 +1,2186 @@
+/*
+ * Xilinx DRM DisplayPort encoder driver for Xilinx
+ *
+ * Copyright (C) 2014 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+
+#include "xilinx_drm_dp_sub.h"
+#include "xilinx_drm_drv.h"
+
+static uint xilinx_drm_dp_aux_timeout_ms = 50;
+module_param_named(aux_timeout_ms, xilinx_drm_dp_aux_timeout_ms, uint, 0444);
+MODULE_PARM_DESC(aux_timeout_ms,
+ "DP aux timeout value in msec (default: 50)");
+
+static uint xilinx_drm_dp_power_on_delay_ms = 4;
+module_param_named(power_on_delay_ms, xilinx_drm_dp_power_on_delay_ms, uint,
+ 0644);
+MODULE_PARM_DESC(power_on_delay,
+ "Delay after power on request in msec (default: 4)");
+
+/* Link configuration registers */
+#define XILINX_DP_TX_LINK_BW_SET 0x0
+#define XILINX_DP_TX_LANE_CNT_SET 0x4
+#define XILINX_DP_TX_ENHANCED_FRAME_EN 0x8
+#define XILINX_DP_TX_TRAINING_PATTERN_SET 0xc
+#define XILINX_DP_TX_SCRAMBLING_DISABLE 0x14
+#define XILINX_DP_TX_DOWNSPREAD_CTL 0x18
+#define XILINX_DP_TX_SW_RESET 0x1c
+#define XILINX_DP_TX_SW_RESET_STREAM1 BIT(0)
+#define XILINX_DP_TX_SW_RESET_STREAM2 BIT(1)
+#define XILINX_DP_TX_SW_RESET_STREAM3 BIT(2)
+#define XILINX_DP_TX_SW_RESET_STREAM4 BIT(3)
+#define XILINX_DP_TX_SW_RESET_AUX BIT(7)
+#define XILINX_DP_TX_SW_RESET_ALL (XILINX_DP_TX_SW_RESET_STREAM1 | \
+ XILINX_DP_TX_SW_RESET_STREAM2 | \
+ XILINX_DP_TX_SW_RESET_STREAM3 | \
+ XILINX_DP_TX_SW_RESET_STREAM4 | \
+ XILINX_DP_TX_SW_RESET_AUX)
+
+/* Core enable registers */
+#define XILINX_DP_TX_ENABLE 0x80
+#define XILINX_DP_TX_ENABLE_MAIN_STREAM 0x84
+#define XILINX_DP_TX_FORCE_SCRAMBLER_RESET 0xc0
+#define XILINX_DP_TX_VERSION 0xf8
+#define XILINX_DP_TX_VERSION_MAJOR_MASK (0xff << 24)
+#define XILINX_DP_TX_VERSION_MAJOR_SHIFT 24
+#define XILINX_DP_TX_VERSION_MINOR_MASK (0xff << 16)
+#define XILINX_DP_TX_VERSION_MINOR_SHIFT 16
+#define XILINX_DP_TX_VERSION_REVISION_MASK (0xf << 12)
+#define XILINX_DP_TX_VERSION_REVISION_SHIFT 12
+#define XILINX_DP_TX_VERSION_PATCH_MASK (0xf << 8)
+#define XILINX_DP_TX_VERSION_PATCH_SHIFT 8
+#define XILINX_DP_TX_VERSION_INTERNAL_MASK (0xff << 0)
+#define XILINX_DP_TX_VERSION_INTERNAL_SHIFT 0
+
+/* Core ID registers */
+#define XILINX_DP_TX_CORE_ID 0xfc
+#define XILINX_DP_TX_CORE_ID_MAJOR_MASK (0xff << 24)
+#define XILINX_DP_TX_CORE_ID_MAJOR_SHIFT 24
+#define XILINX_DP_TX_CORE_ID_MINOR_MASK (0xff << 16)
+#define XILINX_DP_TX_CORE_ID_MINOR_SHIFT 16
+#define XILINX_DP_TX_CORE_ID_REVISION_MASK (0xff << 8)
+#define XILINX_DP_TX_CORE_ID_REVISION_SHIFT 8
+#define XILINX_DP_TX_CORE_ID_DIRECTION BIT(0)
+
+/* AUX channel interface registers */
+#define XILINX_DP_TX_AUX_COMMAND 0x100
+#define XILINX_DP_TX_AUX_COMMAND_CMD_SHIFT 8
+#define XILINX_DP_TX_AUX_COMMAND_ADDRESS_ONLY BIT(12)
+#define XILINX_DP_TX_AUX_COMMAND_BYTES_SHIFT 0
+#define XILINX_DP_TX_AUX_WRITE_FIFO 0x104
+#define XILINX_DP_TX_AUX_ADDRESS 0x108
+#define XILINX_DP_TX_CLK_DIVIDER 0x10c
+#define XILINX_DP_TX_CLK_DIVIDER_MHZ 1000000
+#define XILINX_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT 8
+#define XILINX_DP_TX_INTR_SIGNAL_STATE 0x130
+#define XILINX_DP_TX_INTR_SIGNAL_STATE_HPD BIT(0)
+#define XILINX_DP_TX_INTR_SIGNAL_STATE_REQUEST BIT(1)
+#define XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY BIT(2)
+#define XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT BIT(3)
+#define XILINX_DP_TX_AUX_REPLY_DATA 0x134
+#define XILINX_DP_TX_AUX_REPLY_CODE 0x138
+#define XILINX_DP_TX_AUX_REPLY_CODE_AUX_ACK (0)
+#define XILINX_DP_TX_AUX_REPLY_CODE_AUX_NACK BIT(0)
+#define XILINX_DP_TX_AUX_REPLY_CODE_AUX_DEFER BIT(1)
+#define XILINX_DP_TX_AUX_REPLY_CODE_I2C_ACK (0)
+#define XILINX_DP_TX_AUX_REPLY_CODE_I2C_NACK BIT(2)
+#define XILINX_DP_TX_AUX_REPLY_CODE_I2C_DEFER BIT(3)
+#define XILINX_DP_TX_AUX_REPLY_CNT 0x13c
+#define XILINX_DP_TX_AUX_REPLY_CNT_MASK 0xff
+#define XILINX_DP_TX_INTR_STATUS 0x140
+#define XILINX_DP_TX_INTR_MASK 0x144
+#define XILINX_DP_TX_INTR_HPD_IRQ BIT(0)
+#define XILINX_DP_TX_INTR_HPD_EVENT BIT(1)
+#define XILINX_DP_TX_INTR_REPLY_RECV BIT(2)
+#define XILINX_DP_TX_INTR_REPLY_TIMEOUT BIT(3)
+#define XILINX_DP_TX_INTR_HPD_PULSE BIT(4)
+#define XILINX_DP_TX_INTR_EXT_PKT_TXD BIT(5)
+#define XILINX_DP_TX_INTR_LIV_ABUF_UNDRFLW BIT(12)
+#define XILINX_DP_TX_INTR_VBLANK_START BIT(13)
+#define XILINX_DP_TX_INTR_PIXEL0_MATCH BIT(14)
+#define XILINX_DP_TX_INTR_PIXEL1_MATCH BIT(15)
+#define XILINX_DP_TX_INTR_CHBUF_UNDERFLW_MASK 0x3f0000
+#define XILINX_DP_TX_INTR_CHBUF_OVERFLW_MASK 0xfc00000
+#define XILINX_DP_TX_INTR_CUST_TS_2 BIT(28)
+#define XILINX_DP_TX_INTR_CUST_TS BIT(29)
+#define XILINX_DP_TX_INTR_EXT_VSYNC_TS BIT(30)
+#define XILINX_DP_TX_INTR_VSYNC_TS BIT(31)
+#define XILINX_DP_TX_INTR_ALL (XILINX_DP_TX_INTR_HPD_IRQ | \
+ XILINX_DP_TX_INTR_HPD_EVENT | \
+ XILINX_DP_TX_INTR_REPLY_RECV | \
+ XILINX_DP_TX_INTR_REPLY_TIMEOUT | \
+ XILINX_DP_TX_INTR_HPD_PULSE | \
+ XILINX_DP_TX_INTR_EXT_PKT_TXD | \
+ XILINX_DP_TX_INTR_LIV_ABUF_UNDRFLW | \
+ XILINX_DP_TX_INTR_VBLANK_START | \
+ XILINX_DP_TX_INTR_CHBUF_UNDERFLW_MASK | \
+ XILINX_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+#define XILINX_DP_TX_REPLY_DATA_CNT 0x148
+#define XILINX_DP_SUB_TX_INTR_STATUS 0x3a0
+#define XILINX_DP_SUB_TX_INTR_MASK 0x3a4
+#define XILINX_DP_SUB_TX_INTR_EN 0x3a8
+#define XILINX_DP_SUB_TX_INTR_DS 0x3ac
+
+/* Main stream attribute registers */
+#define XILINX_DP_TX_MAIN_STREAM_HTOTAL 0x180
+#define XILINX_DP_TX_MAIN_STREAM_VTOTAL 0x184
+#define XILINX_DP_TX_MAIN_STREAM_POLARITY 0x188
+#define XILINX_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT 0
+#define XILINX_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT 1
+#define XILINX_DP_TX_MAIN_STREAM_HSWIDTH 0x18c
+#define XILINX_DP_TX_MAIN_STREAM_VSWIDTH 0x190
+#define XILINX_DP_TX_MAIN_STREAM_HRES 0x194
+#define XILINX_DP_TX_MAIN_STREAM_VRES 0x198
+#define XILINX_DP_TX_MAIN_STREAM_HSTART 0x19c
+#define XILINX_DP_TX_MAIN_STREAM_VSTART 0x1a0
+#define XILINX_DP_TX_MAIN_STREAM_MISC0 0x1a4
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_SYNC BIT(0)
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_FORMAT_SHIFT 1
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_DYNAMIC_RANGE BIT(3)
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_YCBCR_COLRIMETRY BIT(4)
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_BPC_SHIFT 5
+#define XILINX_DP_TX_MAIN_STREAM_MISC1 0x1a8
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_INTERLACED_VERT BIT(0)
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_STEREO_VID_SHIFT 1
+#define XILINX_DP_TX_M_VID 0x1ac
+#define XILINX_DP_TX_TRANSFER_UNIT_SIZE 0x1b0
+#define XILINX_DP_TX_DEF_TRANSFER_UNIT_SIZE 64
+#define XILINX_DP_TX_N_VID 0x1b4
+#define XILINX_DP_TX_USER_PIXEL_WIDTH 0x1b8
+#define XILINX_DP_TX_USER_DATA_CNT_PER_LANE 0x1bc
+#define XILINX_DP_TX_MIN_BYTES_PER_TU 0x1c4
+#define XILINX_DP_TX_FRAC_BYTES_PER_TU 0x1c8
+#define XILINX_DP_TX_INIT_WAIT 0x1cc
+
+/* PHY configuration and status registers */
+#define XILINX_DP_TX_PHY_CONFIG 0x200
+#define XILINX_DP_TX_PHY_CONFIG_PHY_RESET BIT(0)
+#define XILINX_DP_TX_PHY_CONFIG_GTTX_RESET BIT(1)
+#define XILINX_DP_TX_PHY_CONFIG_PHY_PMA_RESET BIT(8)
+#define XILINX_DP_TX_PHY_CONFIG_PHY_PCS_RESET BIT(9)
+#define XILINX_DP_TX_PHY_CONFIG_ALL_RESET (XILINX_DP_TX_PHY_CONFIG_PHY_RESET | \
+ XILINX_DP_TX_PHY_CONFIG_GTTX_RESET | \
+ XILINX_DP_TX_PHY_CONFIG_PHY_PMA_RESET | \
+ XILINX_DP_TX_PHY_CONFIG_PHY_PCS_RESET)
+#define XILINX_DP_TX_PHY_PREEMPHASIS_LANE_0 0x210
+#define XILINX_DP_TX_PHY_PREEMPHASIS_LANE_1 0x214
+#define XILINX_DP_TX_PHY_PREEMPHASIS_LANE_2 0x218
+#define XILINX_DP_TX_PHY_PREEMPHASIS_LANE_3 0x21c
+#define XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_0 0x220
+#define XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_1 0x224
+#define XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_2 0x228
+#define XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_3 0x22c
+#define XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING 0x234
+#define XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162 0x1
+#define XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270 0x3
+#define XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540 0x5
+#define XILINX_DP_TX_PHY_POWER_DOWN 0x238
+#define XILINX_DP_TX_PHY_POWER_DOWN_LANE_0 BIT(0)
+#define XILINX_DP_TX_PHY_POWER_DOWN_LANE_1 BIT(1)
+#define XILINX_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2)
+#define XILINX_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3)
+#define XILINX_DP_TX_PHY_POWER_DOWN_ALL 0xf
+#define XILINX_DP_TX_PHY_PRECURSOR_LANE_0 0x23c
+#define XILINX_DP_TX_PHY_PRECURSOR_LANE_1 0x240
+#define XILINX_DP_TX_PHY_PRECURSOR_LANE_2 0x244
+#define XILINX_DP_TX_PHY_PRECURSOR_LANE_3 0x248
+#define XILINX_DP_TX_PHY_POSTCURSOR_LANE_0 0x24c
+#define XILINX_DP_TX_PHY_POSTCURSOR_LANE_1 0x250
+#define XILINX_DP_TX_PHY_POSTCURSOR_LANE_2 0x254
+#define XILINX_DP_TX_PHY_POSTCURSOR_LANE_3 0x258
+#define XILINX_DP_SUB_TX_PHY_PRECURSOR_LANE_0 0x24c
+#define XILINX_DP_SUB_TX_PHY_PRECURSOR_LANE_1 0x250
+#define XILINX_DP_TX_PHY_STATUS 0x280
+#define XILINX_DP_TX_PHY_STATUS_PLL_LOCKED_SHIFT 4
+#define XILINX_DP_TX_PHY_STATUS_FPGA_PLL_LOCKED BIT(6)
+
+/* Audio registers */
+#define XILINX_DP_TX_AUDIO_CONTROL 0x300
+#define XILINX_DP_TX_AUDIO_CHANNELS 0x304
+#define XILINX_DP_TX_AUDIO_INFO_DATA 0x308
+#define XILINX_DP_TX_AUDIO_M_AUD 0x328
+#define XILINX_DP_TX_AUDIO_N_AUD 0x32c
+#define XILINX_DP_TX_AUDIO_EXT_DATA 0x330
+
+#define XILINX_DP_MISC0_RGB (0)
+#define XILINX_DP_MISC0_YCRCB_422 (5 << 1)
+#define XILINX_DP_MISC0_YCRCB_444 (6 << 1)
+#define XILINX_DP_MISC0_FORMAT_MASK 0xe
+#define XILINX_DP_MISC0_BPC_6 (0 << 5)
+#define XILINX_DP_MISC0_BPC_8 (1 << 5)
+#define XILINX_DP_MISC0_BPC_10 (2 << 5)
+#define XILINX_DP_MISC0_BPC_12 (3 << 5)
+#define XILINX_DP_MISC0_BPC_16 (4 << 5)
+#define XILINX_DP_MISC0_BPC_MASK 0xe0
+#define XILINX_DP_MISC1_Y_ONLY (1 << 7)
+
+#define DP_REDUCED_BIT_RATE 162000
+#define DP_HIGH_BIT_RATE 270000
+#define DP_HIGH_BIT_RATE2 540000
+#define DP_MAX_TRAINING_TRIES 5
+#define DP_MAX_LANES 4
+
+enum dp_version {
+ DP_V1_1A = 0x11,
+ DP_V1_2 = 0x12
+};
+
+/**
+ * struct xilinx_drm_dp_link_config - Common link config between source and sink
+ * @max_rate: maximum link rate
+ * @max_lanes: maximum number of lanes
+ */
+struct xilinx_drm_dp_link_config {
+ int max_rate;
+ u8 max_lanes;
+};
+
+/**
+ * struct xilinx_drm_dp_mode - Configured mode of DisplayPort
+ * @bw_code: code for bandwidth(link rate)
+ * @lane_cnt: number of lanes
+ * @pclock: pixel clock frequency of current mode
+ */
+struct xilinx_drm_dp_mode {
+ u8 bw_code;
+ u8 lane_cnt;
+ int pclock;
+};
+
+/**
+ * struct xilinx_drm_dp_config - Configuration of DisplayPort from DTS
+ * @dp_version: DisplayPort protocol version
+ * @max_lanes: max number of lanes
+ * @max_link_rate: max link rate
+ * @max_bpc: maximum bits-per-color
+ * @max_pclock: maximum pixel clock rate
+ * @enable_yonly: enable yonly color space logic
+ * @enable_ycrcb: enable ycrcb color space logic
+ * @misc0: misc0 configuration (per DP v1.2 spec)
+ * @misc1: misc1 configuration (per DP v1.2 spec)
+ * @bpp: bits per pixel
+ */
+struct xilinx_drm_dp_config {
+ enum dp_version dp_version;
+ u32 max_lanes;
+ u32 max_link_rate;
+ u32 max_bpc;
+ u32 max_pclock;
+ bool enable_yonly;
+ bool enable_ycrcb;
+
+ u8 misc0;
+ u8 misc1;
+ u8 bpp;
+};
+
+/**
+ * struct xilinx_drm_dp - Xilinx DisplayPort core
+ * @encoder: pointer to the drm encoder structure
+ * @dev: device structure
+ * @iomem: device I/O memory for register access
+ * @config: IP core configuration from DTS
+ * @aux: aux channel
+ * @dp_sub: DisplayPort subsystem
+ * @phy: PHY handles for DP lanes
+ * @aclk: clock source device for internal axi4-lite clock
+ * @aud_clk: clock source device for audio clock
+ * @aud_clk_enabled: if audio clock is enabled
+ * @dpms: current dpms state
+ * @status: the connection status
+ * @dpcd: DP configuration data from currently connected sink device
+ * @link_config: common link configuration between IP core and sink device
+ * @mode: current mode between IP core and sink device
+ * @train_set: set of training data
+ */
+struct xilinx_drm_dp {
+ struct drm_encoder *encoder;
+ struct device *dev;
+ void __iomem *iomem;
+
+ struct xilinx_drm_dp_config config;
+ struct drm_dp_aux aux;
+ struct xilinx_drm_dp_sub *dp_sub;
+ struct phy *phy[DP_MAX_LANES];
+ struct clk *aclk;
+ struct clk *aud_clk;
+ bool aud_clk_enabled;
+
+ int dpms;
+ enum drm_connector_status status;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ struct xilinx_drm_dp_link_config link_config;
+ struct xilinx_drm_dp_mode mode;
+ u8 train_set[DP_MAX_LANES];
+};
+
+static inline struct xilinx_drm_dp *to_dp(struct drm_encoder *encoder)
+{
+ return to_encoder_slave(encoder)->slave_priv;
+}
+
+#define AUX_READ_BIT 0x1
+
+#ifdef CONFIG_DRM_XILINX_DP_DEBUG_FS
+#define XILINX_DP_DEBUGFS_READ_MAX_SIZE 32
+#define XILINX_DP_DEBUGFS_UINT8_MAX_STR "255"
+#define IN_RANGE(x, min, max) ({ \
+ typeof(x) _x = (x); \
+ _x >= (min) && _x <= (max); })
+
+static inline int xilinx_drm_dp_max_rate(int link_rate, u8 lane_num, u8 bpp);
+/* Match xilinx_dp_testcases vs dp_debugfs_reqs[] entry */
+enum xilinx_dp_testcases {
+ DP_TC_LINK_RATE,
+ DP_TC_LANE_COUNT,
+ DP_TC_OUTPUT_FMT,
+ DP_TC_NONE
+};
+
+struct xilinx_dp_debugfs {
+ enum xilinx_dp_testcases testcase;
+ u8 link_rate;
+ u8 lane_cnt;
+ u8 old_output_fmt;
+ struct xilinx_drm_dp *dp;
+};
+
+struct xilinx_dp_debugfs dp_debugfs;
+struct xilinx_dp_debugfs_request {
+ const char *req;
+ enum xilinx_dp_testcases tc;
+ ssize_t (*read_handler)(char **kern_buff);
+ ssize_t (*write_handler)(char **cmd);
+};
+
+static s64 xilinx_dp_debugfs_argument_value(char *arg)
+{
+ s64 value;
+
+ if (!arg)
+ return -1;
+
+ if (!kstrtos64(arg, 0, &value))
+ return value;
+
+ return -1;
+}
+
+static int xilinx_dp_update_output_format(u8 output_fmt, u32 num_colors)
+{
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ struct xilinx_drm_dp_config *config = &dp->config;
+ u32 bpc;
+ u8 bpc_bits = (config->misc0 & XILINX_DP_MISC0_BPC_MASK);
+ bool misc1 = output_fmt & XILINX_DP_MISC1_Y_ONLY ? true : false;
+
+ switch (bpc_bits) {
+ case XILINX_DP_MISC0_BPC_6:
+ bpc = 6;
+ break;
+ case XILINX_DP_MISC0_BPC_8:
+ bpc = 8;
+ break;
+ case XILINX_DP_MISC0_BPC_10:
+ bpc = 10;
+ break;
+ case XILINX_DP_MISC0_BPC_12:
+ bpc = 12;
+ break;
+ case XILINX_DP_MISC0_BPC_16:
+ bpc = 16;
+ break;
+ default:
+ dev_err(dp->dev, "Invalid bpc count for misc0\n");
+ return -EINVAL;
+ }
+
+ /* clear old format */
+ config->misc0 &= ~XILINX_DP_MISC0_FORMAT_MASK;
+ config->misc1 &= ~XILINX_DP_MISC1_Y_ONLY;
+
+ if (misc1) {
+ config->misc1 |= output_fmt;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_MISC1,
+ config->misc1);
+ } else {
+ config->misc0 |= output_fmt;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_MISC0,
+ config->misc0);
+ }
+ config->bpp = num_colors * bpc;
+
+ return 0;
+}
+
+static ssize_t xilinx_dp_debugfs_max_linkrate_write(char **dp_test_arg)
+{
+ char *link_rate_arg;
+ s64 link_rate;
+
+ link_rate_arg = strsep(dp_test_arg, " ");
+ link_rate = xilinx_dp_debugfs_argument_value(link_rate_arg);
+ if (link_rate < 0 || (link_rate != DP_HIGH_BIT_RATE2 &&
+ link_rate != DP_HIGH_BIT_RATE &&
+ link_rate != DP_REDUCED_BIT_RATE))
+ return -EINVAL;
+
+ dp_debugfs.link_rate = drm_dp_link_rate_to_bw_code(link_rate);
+ dp_debugfs.testcase = DP_TC_LINK_RATE;
+
+ return 0;
+}
+
+static ssize_t xilinx_dp_debugfs_max_lanecnt_write(char **dp_test_arg)
+{
+ char *lane_cnt_arg;
+ s64 lane_count;
+
+ lane_cnt_arg = strsep(dp_test_arg, " ");
+ lane_count = xilinx_dp_debugfs_argument_value(lane_cnt_arg);
+ if (lane_count < 0 || !IN_RANGE(lane_count, 1,
+ dp_debugfs.dp->config.max_lanes))
+ return -EINVAL;
+
+ dp_debugfs.lane_cnt = lane_count;
+ dp_debugfs.testcase = DP_TC_LANE_COUNT;
+
+ return 0;
+}
+
+static ssize_t xilinx_dp_debugfs_output_display_format_write(char **dp_test_arg)
+{
+ int ret;
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ char *output_format;
+ u8 output_fmt;
+ u32 num_colors;
+
+ /* Read the value from an user value */
+ output_format = strsep(dp_test_arg, " ");
+
+ if (strncmp(output_format, "rgb", 3) == 0) {
+ output_fmt = XILINX_DP_MISC0_RGB;
+ num_colors = 3;
+ } else if (strncmp(output_format, "ycbcr422", 8) == 0) {
+ output_fmt = XILINX_DP_MISC0_YCRCB_422;
+ num_colors = 2;
+ } else if (strncmp(output_format, "ycbcr444", 8) == 0) {
+ output_fmt = XILINX_DP_MISC0_YCRCB_444;
+ num_colors = 3;
+ } else if (strncmp(output_format, "yonly", 5) == 0) {
+ output_fmt = XILINX_DP_MISC1_Y_ONLY;
+ num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid output format\n");
+ return -EINVAL;
+ }
+
+ if (dp->config.misc1 & XILINX_DP_MISC1_Y_ONLY)
+ dp_debugfs.old_output_fmt = XILINX_DP_MISC1_Y_ONLY;
+ else
+ dp_debugfs.old_output_fmt = dp->config.misc0 &
+ XILINX_DP_MISC0_FORMAT_MASK;
+
+ ret = xilinx_dp_update_output_format(output_fmt, num_colors);
+ if (!ret)
+ dp_debugfs.testcase = DP_TC_OUTPUT_FMT;
+ return ret;
+}
+
+static ssize_t xilinx_dp_debugfs_max_linkrate_read(char **kern_buff)
+{
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ size_t output_str_len;
+ u8 dpcd_link_bw;
+ int ret;
+
+ dp_debugfs.testcase = DP_TC_NONE;
+ dp_debugfs.link_rate = 0;
+
+ /* Getting Sink Side Link Rate */
+ ret = drm_dp_dpcd_readb(&dp->aux, DP_LINK_BW_SET, &dpcd_link_bw);
+ if (ret < 0) {
+ dev_err(dp->dev, "Failed to read link rate via AUX.\n");
+ kfree(*kern_buff);
+ return ret;
+ }
+
+ output_str_len = strlen(XILINX_DP_DEBUGFS_UINT8_MAX_STR);
+ output_str_len = min_t(size_t, XILINX_DP_DEBUGFS_READ_MAX_SIZE,
+ output_str_len);
+ snprintf(*kern_buff, output_str_len, "%u", dpcd_link_bw);
+
+ return 0;
+}
+
+static ssize_t xilinx_dp_debugfs_max_lanecnt_read(char **kern_buff)
+{
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ size_t output_str_len;
+ u8 dpcd_lane_cnt;
+ int ret;
+
+ dp_debugfs.testcase = DP_TC_NONE;
+ dp_debugfs.lane_cnt = 0;
+
+ /* Getting Sink Side Lane Count */
+ ret = drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &dpcd_lane_cnt);
+ if (ret < 0) {
+ dev_err(dp->dev, "Failed to read link rate via AUX.\n");
+ kfree(*kern_buff);
+ return ret;
+ }
+
+ dpcd_lane_cnt &= DP_LANE_COUNT_MASK;
+ output_str_len = strlen(XILINX_DP_DEBUGFS_UINT8_MAX_STR);
+ output_str_len = min_t(size_t, XILINX_DP_DEBUGFS_READ_MAX_SIZE,
+ output_str_len);
+ snprintf(*kern_buff, output_str_len, "%u", dpcd_lane_cnt);
+
+ return 0;
+}
+
+static ssize_t
+xilinx_dp_debugfs_output_display_format_read(char **kern_buff)
+{
+ int ret;
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ u8 old_output_fmt = dp_debugfs.old_output_fmt;
+ size_t output_str_len;
+ u32 num_colors;
+
+ dp_debugfs.testcase = DP_TC_NONE;
+
+ if (old_output_fmt == XILINX_DP_MISC0_RGB) {
+ num_colors = 3;
+ } else if (old_output_fmt == XILINX_DP_MISC0_YCRCB_422) {
+ num_colors = 2;
+ } else if (old_output_fmt == XILINX_DP_MISC0_YCRCB_444) {
+ num_colors = 3;
+ } else if (old_output_fmt == XILINX_DP_MISC1_Y_ONLY) {
+ num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid output format in misc0\n");
+ return -EINVAL;
+ }
+
+ ret = xilinx_dp_update_output_format(old_output_fmt, num_colors);
+ if (ret)
+ return ret;
+
+ output_str_len = strlen("Success");
+ output_str_len = min_t(size_t, XILINX_DP_DEBUGFS_READ_MAX_SIZE,
+ output_str_len);
+ snprintf(*kern_buff, output_str_len, "%s", "Success");
+
+ return 0;
+}
+
+/* Match xilinx_dp_testcases vs dp_debugfs_reqs[] entry */
+struct xilinx_dp_debugfs_request dp_debugfs_reqs[] = {
+ {"LINK_RATE", DP_TC_LINK_RATE,
+ xilinx_dp_debugfs_max_linkrate_read,
+ xilinx_dp_debugfs_max_linkrate_write},
+ {"LANE_COUNT", DP_TC_LANE_COUNT,
+ xilinx_dp_debugfs_max_lanecnt_read,
+ xilinx_dp_debugfs_max_lanecnt_write},
+ {"OUTPUT_DISPLAY_FORMAT", DP_TC_OUTPUT_FMT,
+ xilinx_dp_debugfs_output_display_format_read,
+ xilinx_dp_debugfs_output_display_format_write},
+};
+
+static ssize_t xilinx_dp_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff = NULL;
+ size_t kern_buff_len, out_str_len;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ kern_buff = kzalloc(XILINX_DP_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+ if (!kern_buff) {
+ dp_debugfs.testcase = DP_TC_NONE;
+ return -ENOMEM;
+ }
+
+ if (dp_debugfs.testcase == DP_TC_NONE) {
+ out_str_len = strlen("No testcase executed");
+ out_str_len = min_t(size_t, XILINX_DP_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(kern_buff, out_str_len, "%s", "No testcase executed");
+ } else {
+ ret = dp_debugfs_reqs[dp_debugfs.testcase].read_handler(
+ &kern_buff);
+ if (ret) {
+ kfree(kern_buff);
+ return ret;
+ }
+ }
+
+ kern_buff_len = strlen(kern_buff);
+ size = min(size, kern_buff_len);
+
+ ret = copy_to_user(buf, kern_buff, size);
+
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static ssize_t
+xilinx_dp_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff, *kern_buff_start;
+ char *dp_test_req;
+ int ret;
+ int i;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ if (dp_debugfs.testcase != DP_TC_NONE)
+ return -EBUSY;
+
+ kern_buff = kzalloc(size, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+ kern_buff_start = kern_buff;
+
+ ret = strncpy_from_user(kern_buff, buf, size);
+ if (ret < 0) {
+ kfree(kern_buff_start);
+ return ret;
+ }
+
+ /* Read the testcase name and argument from an user request */
+ dp_test_req = strsep(&kern_buff, " ");
+
+ for (i = 0; i < ARRAY_SIZE(dp_debugfs_reqs); i++) {
+ if (!strcasecmp(dp_test_req, dp_debugfs_reqs[i].req))
+ if (!dp_debugfs_reqs[i].write_handler(&kern_buff)) {
+ kfree(kern_buff_start);
+ return size;
+ }
+ }
+
+ kfree(kern_buff_start);
+ return -EINVAL;
+}
+
+static const struct file_operations fops_xilinx_dp_dbgfs = {
+ .owner = THIS_MODULE,
+ .read = xilinx_dp_debugfs_read,
+ .write = xilinx_dp_debugfs_write,
+};
+
+static int xilinx_dp_debugfs_init(struct xilinx_drm_dp *dp)
+{
+ int err;
+ struct dentry *xilinx_dp_debugfs_dir, *xilinx_dp_debugfs_file;
+
+ dp_debugfs.testcase = DP_TC_NONE;
+ dp_debugfs.dp = dp;
+
+ xilinx_dp_debugfs_dir = debugfs_create_dir("dp", NULL);
+ if (!xilinx_dp_debugfs_dir) {
+ dev_err(dp->dev, "debugfs_create_dir failed\n");
+ return -ENODEV;
+ }
+
+ xilinx_dp_debugfs_file =
+ debugfs_create_file("testcase", 0444, xilinx_dp_debugfs_dir,
+ NULL, &fops_xilinx_dp_dbgfs);
+ if (!xilinx_dp_debugfs_file) {
+ dev_err(dp->dev, "debugfs_create_file testcase failed\n");
+ err = -ENODEV;
+ goto err_dbgfs;
+ }
+ return 0;
+
+err_dbgfs:
+ debugfs_remove_recursive(xilinx_dp_debugfs_dir);
+ xilinx_dp_debugfs_dir = NULL;
+ return err;
+}
+
+static void xilinx_dp_debugfs_mode_config(struct xilinx_drm_dp *dp, u8 *lanes,
+ u8 *rate_code, int pclock)
+{
+ int debugfs_rate = 0;
+ u8 bpp = dp->config.bpp;
+
+ if (!dp_debugfs.link_rate && !dp_debugfs.lane_cnt)
+ return;
+
+ if (dp_debugfs.link_rate) {
+ debugfs_rate = min(dp_debugfs.link_rate, *rate_code);
+ debugfs_rate =
+ drm_dp_bw_code_to_link_rate(debugfs_rate);
+ debugfs_rate =
+ xilinx_drm_dp_max_rate(debugfs_rate, *lanes, bpp);
+ }
+
+ if (dp_debugfs.lane_cnt) {
+ u8 lane;
+
+ lane = min(dp_debugfs.lane_cnt, *lanes);
+ debugfs_rate =
+ xilinx_drm_dp_max_rate(debugfs_rate, lane, bpp);
+ }
+
+ if (pclock > debugfs_rate) {
+ dev_dbg(dp->dev, "debugfs could't configure link values\n");
+ return;
+ }
+
+ if (dp_debugfs.link_rate)
+ *rate_code = dp_debugfs.link_rate;
+ if (dp_debugfs.lane_cnt)
+ *lanes = dp_debugfs.lane_cnt;
+
+}
+#else
+static int xilinx_dp_debugfs_init(struct xilinx_drm_dp *dp)
+{
+ return 0;
+}
+
+static void xilinx_dp_debugfs_mode_config(struct xilinx_drm_dp *dp, u8 *lanes,
+ u8 *rate_code, int pclock)
+{
+}
+#endif /* DRM_XILINX_DP_DEBUG_FS */
+
+/**
+ * xilinx_drm_dp_aux_cmd_submit - Submit aux command
+ * @dp: DisplayPort IP core structure
+ * @cmd: aux command
+ * @addr: aux address
+ * @buf: buffer for command data
+ * @bytes: number of bytes for @buf
+ * @reply: reply code to be returned
+ *
+ * Submit an aux command. All aux related commands, native or i2c aux
+ * read/write, are submitted through this function. The function is mapped to
+ * the transfer function of struct drm_dp_aux. This function involves in
+ * multiple register reads/writes, thus synchronization is needed, and it is
+ * done by drm_dp_helper using @hw_mutex. The calling thread goes into sleep
+ * if there's no immediate reply to the command submission. The reply code is
+ * returned at @reply if @reply != NULL.
+ *
+ * Return: 0 if the command is submitted properly, or corresponding error code:
+ * -EBUSY when there is any request already being processed
+ * -ETIMEDOUT when receiving reply is timed out
+ * -EIO when received bytes are less than requested
+ */
+static int xilinx_drm_dp_aux_cmd_submit(struct xilinx_drm_dp *dp, u32 cmd,
+ u16 addr, u8 *buf, u8 bytes, u8 *reply)
+{
+ bool is_read = (cmd & AUX_READ_BIT) ? true : false;
+ void __iomem *iomem = dp->iomem;
+ u32 reg, i;
+
+ reg = xilinx_drm_readl(iomem, XILINX_DP_TX_INTR_SIGNAL_STATE);
+ if (reg & XILINX_DP_TX_INTR_SIGNAL_STATE_REQUEST)
+ return -EBUSY;
+
+ xilinx_drm_writel(iomem, XILINX_DP_TX_AUX_ADDRESS, addr);
+
+ if (!is_read)
+ for (i = 0; i < bytes; i++)
+ xilinx_drm_writel(iomem, XILINX_DP_TX_AUX_WRITE_FIFO,
+ buf[i]);
+
+ reg = cmd << XILINX_DP_TX_AUX_COMMAND_CMD_SHIFT;
+ if (!buf || !bytes)
+ reg |= XILINX_DP_TX_AUX_COMMAND_ADDRESS_ONLY;
+ else
+ reg |= (bytes - 1) << XILINX_DP_TX_AUX_COMMAND_BYTES_SHIFT;
+ xilinx_drm_writel(iomem, XILINX_DP_TX_AUX_COMMAND, reg);
+
+ /* Wait for reply to be delivered upto 2ms */
+ for (i = 0; ; i++) {
+ reg = xilinx_drm_readl(iomem, XILINX_DP_TX_INTR_SIGNAL_STATE);
+
+ if (reg & XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY)
+ break;
+
+ if (reg & XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT ||
+ i == 2)
+ return -ETIMEDOUT;
+
+ usleep_range(1000, 1100);
+ }
+
+ reg = xilinx_drm_readl(iomem, XILINX_DP_TX_AUX_REPLY_CODE);
+ if (reply)
+ *reply = reg;
+
+ if (is_read &&
+ (reg == XILINX_DP_TX_AUX_REPLY_CODE_AUX_ACK ||
+ reg == XILINX_DP_TX_AUX_REPLY_CODE_I2C_ACK)) {
+ reg = xilinx_drm_readl(iomem, XILINX_DP_TX_REPLY_DATA_CNT);
+ if ((reg & XILINX_DP_TX_AUX_REPLY_CNT_MASK) != bytes)
+ return -EIO;
+
+ for (i = 0; i < bytes; i++)
+ buf[i] = xilinx_drm_readl(iomem,
+ XILINX_DP_TX_AUX_REPLY_DATA);
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_phy_ready - Check if PHY is ready
+ * @dp: DisplayPort IP core structure
+ *
+ * Check if PHY is ready. If PHY is not ready, wait 1ms to check for 100 times.
+ * This amount of delay was suggested by IP designer.
+ *
+ * Return: 0 if PHY is ready, or -ENODEV if PHY is not ready.
+ */
+static int xilinx_drm_dp_phy_ready(struct xilinx_drm_dp *dp)
+{
+ u32 i, reg, ready, lane;
+
+ lane = dp->config.max_lanes;
+ ready = (1 << lane) - 1;
+ if (!dp->dp_sub)
+ ready |= XILINX_DP_TX_PHY_STATUS_FPGA_PLL_LOCKED;
+
+ /* Wait for 100 * 1ms. This should be enough time for PHY to be ready */
+ for (i = 0; ; i++) {
+ reg = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_PHY_STATUS);
+ if ((reg & ready) == ready)
+ return 0;
+
+ if (i == 100) {
+ DRM_ERROR("PHY isn't ready\n");
+ return -ENODEV;
+ }
+
+ usleep_range(1000, 1100);
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_max_rate - Calculate and return available max pixel clock
+ * @link_rate: link rate (Kilo-bytes / sec)
+ * @lane_num: number of lanes
+ * @bpp: bits per pixel
+ *
+ * Return: max pixel clock (KHz) supported by current link config.
+ */
+static inline int xilinx_drm_dp_max_rate(int link_rate, u8 lane_num, u8 bpp)
+{
+ return link_rate * lane_num * 8 / bpp;
+}
+
+/**
+ * xilinx_drm_dp_mode_configure - Configure the link values
+ * @dp: DisplayPort IP core structure
+ * @pclock: pixel clock for requested display mode
+ * @current_bw: current link rate
+ *
+ * Find the link configuration values, rate and lane count for requested pixel
+ * clock @pclock. The @pclock is stored in the mode to be used in other
+ * functions later. The returned rate is downshifted from the current rate
+ * @current_bw.
+ *
+ * Return: Current link rate code, or -EINVAL.
+ */
+static int xilinx_drm_dp_mode_configure(struct xilinx_drm_dp *dp, int pclock,
+ u8 current_bw)
+{
+ int max_rate = dp->link_config.max_rate;
+ u8 bws[3] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate);
+ u8 bpp = dp->config.bpp;
+ u8 lane_cnt;
+ s8 i;
+
+ if (current_bw == DP_LINK_BW_1_62)
+ return -EINVAL;
+
+ xilinx_dp_debugfs_mode_config(dp, &max_lanes, &max_link_rate_code,
+ pclock);
+
+ for (i = ARRAY_SIZE(bws) - 1; i >= 0; i--) {
+ if (current_bw && bws[i] >= current_bw)
+ continue;
+
+ if (bws[i] <= max_link_rate_code)
+ break;
+ }
+
+ for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) {
+ int bw;
+ u32 rate;
+
+ bw = drm_dp_bw_code_to_link_rate(bws[i]);
+ rate = xilinx_drm_dp_max_rate(bw, lane_cnt, bpp);
+ if (pclock <= rate) {
+ dp->mode.bw_code = bws[i];
+ dp->mode.lane_cnt = lane_cnt;
+ dp->mode.pclock = pclock;
+ return dp->mode.bw_code;
+ }
+ }
+
+ dev_dbg(dp->dev, "failed to configure link values\n");
+
+ return -EINVAL;
+}
+
+/**
+ * xilinx_drm_dp_adjust_train - Adjust train values
+ * @dp: DisplayPort IP core structure
+ * @link_status: link status from sink which contains requested training values
+ */
+static void xilinx_drm_dp_adjust_train(struct xilinx_drm_dp *dp,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 *train_set = dp->train_set;
+ u8 voltage = 0, preemphasis = 0;
+ u8 max_preemphasis;
+ u8 i;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ u8 v = drm_dp_get_adjust_request_voltage(link_status, i);
+ u8 p = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+
+ if (v > voltage)
+ voltage = v;
+
+ if (p > preemphasis)
+ preemphasis = p;
+ }
+
+ if (voltage >= DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
+ voltage |= DP_TRAIN_MAX_SWING_REACHED;
+
+ max_preemphasis = (dp->dp_sub) ? DP_TRAIN_PRE_EMPH_LEVEL_2 :
+ DP_TRAIN_PRE_EMPH_LEVEL_3;
+
+ if (preemphasis >= max_preemphasis)
+ preemphasis |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++)
+ train_set[i] = voltage | preemphasis;
+}
+
+/**
+ * xilinx_drm_dp_update_vs_emph - Update the training values
+ * @dp: DisplayPort IP core structure
+ *
+ * Update the training values based on the request from sink. The mapped values
+ * are predefined, and values(vs, pe, pc) are from the device manual.
+ *
+ * Return: 0 if vs and emph are updated successfully, or the error code returned
+ * by drm_dp_dpcd_write().
+ */
+static int xilinx_drm_dp_update_vs_emph(struct xilinx_drm_dp *dp)
+{
+ u8 *train_set = dp->train_set;
+ u8 i, v_level, p_level;
+ int ret;
+ static u8 vs[4][4] = { { 0x2a, 0x27, 0x24, 0x20 },
+ { 0x27, 0x23, 0x20, 0xff },
+ { 0x24, 0x20, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff } };
+ static u8 pe[4][4] = { { 0x2, 0x2, 0x2, 0x2 },
+ { 0x1, 0x1, 0x1, 0xff },
+ { 0x0, 0x0, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff } };
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set,
+ dp->mode.lane_cnt);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ v_level = (train_set[i] & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ p_level = (train_set[i] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (dp->phy[i]) {
+ u32 reg = XILINX_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4;
+
+ xpsgtr_margining_factor(dp->phy[i], p_level, v_level);
+ xpsgtr_override_deemph(dp->phy[i], p_level, v_level);
+ xilinx_drm_writel(dp->iomem, reg, 0x2);
+ } else {
+ u32 reg;
+
+ reg = XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_0 + i + 4;
+ xilinx_drm_writel(dp->iomem, reg, vs[p_level][v_level]);
+ reg = XILINX_DP_TX_PHY_PRECURSOR_LANE_0 + i + 4;
+ xilinx_drm_writel(dp->iomem, reg, pe[p_level][v_level]);
+ reg = XILINX_DP_TX_PHY_POSTCURSOR_LANE_0 + i + 4;
+ xilinx_drm_writel(dp->iomem, reg, 0);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_link_train_cr - Train clock recovery
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if clock recovery train is done successfully, or corresponding
+ * error code.
+ */
+static int xilinx_drm_dp_link_train_cr(struct xilinx_drm_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 vs = 0, tries = 0;
+ u16 max_tries, i;
+ bool cr_done;
+ int ret;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ /* 256 loops should be maximum iterations for 4 lanes and 4 values.
+ * So, This loop should exit before 512 iterations
+ */
+ for (max_tries = 0; max_tries < 512; max_tries++) {
+ ret = xilinx_drm_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_clock_recovery_delay(dp->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ cr_done = drm_dp_clock_recovery_ok(link_status, lane_cnt);
+ if (cr_done)
+ break;
+
+ for (i = 0; i < lane_cnt; i++)
+ if (!(dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED))
+ break;
+
+ if (i == lane_cnt)
+ break;
+
+ if ((dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == vs)
+ tries++;
+ else
+ tries = 0;
+
+ if (tries == DP_MAX_TRAINING_TRIES)
+ break;
+
+ vs = dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ xilinx_drm_dp_adjust_train(dp, link_status);
+ }
+
+ if (!cr_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_link_train_ce - Train channel equalization
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if channel equalization train is done successfully, or
+ * corresponding error code.
+ */
+static int xilinx_drm_dp_link_train_ce(struct xilinx_drm_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 pat, tries;
+ int ret;
+ bool ce_done;
+
+ if (dp->config.dp_version == DP_V1_2 &&
+ dp->dpcd[DP_DPCD_REV] >= DP_V1_2 &&
+ dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED)
+ pat = DP_TRAINING_PATTERN_3;
+ else
+ pat = DP_TRAINING_PATTERN_2;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRAINING_PATTERN_SET, pat);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ pat | DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) {
+ ret = xilinx_drm_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_channel_eq_delay(dp->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ ce_done = drm_dp_channel_eq_ok(link_status, lane_cnt);
+ if (ce_done)
+ break;
+
+ xilinx_drm_dp_adjust_train(dp, link_status);
+ }
+
+ if (!ce_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_link_train - Train the link
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if all trains are done successfully, or corresponding error code.
+ */
+static int xilinx_drm_dp_train(struct xilinx_drm_dp *dp)
+{
+ u32 reg;
+ u8 bw_code = dp->mode.bw_code;
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 aux_lane_cnt = lane_cnt;
+ bool enhanced;
+ int ret;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_LANE_CNT_SET, lane_cnt);
+
+ enhanced = drm_dp_enhanced_frame_cap(dp->dpcd);
+ if (enhanced) {
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENHANCED_FRAME_EN, 1);
+ aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+
+ if (dp->dpcd[3] & 0x1) {
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_DOWNSPREAD_CTL, 1);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+ } else {
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_DOWNSPREAD_CTL, 0);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL, 0);
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET, aux_lane_cnt);
+ if (ret < 0) {
+ DRM_ERROR("failed to set lane count\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+ DP_SET_ANSI_8B10B);
+ if (ret < 0) {
+ DRM_ERROR("failed to set ANSI 8B/10B encoding\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LINK_BW_SET, bw_code);
+ if (ret < 0) {
+ DRM_ERROR("failed to set DP bandwidth\n");
+ return ret;
+ }
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_LINK_BW_SET, bw_code);
+
+ switch (bw_code) {
+ case DP_LINK_BW_1_62:
+ reg = XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162;
+ break;
+ case DP_LINK_BW_2_7:
+ reg = XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270;
+ break;
+ case DP_LINK_BW_5_4:
+ default:
+ reg = XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540;
+ break;
+ }
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING,
+ reg);
+ ret = xilinx_drm_dp_phy_ready(dp);
+ if (ret < 0)
+ return ret;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_SCRAMBLING_DISABLE, 1);
+
+ memset(dp->train_set, 0, 4);
+
+ ret = xilinx_drm_dp_link_train_cr(dp);
+ if (ret)
+ return ret;
+
+ ret = xilinx_drm_dp_link_train_ce(dp);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ if (ret < 0) {
+ DRM_ERROR("failed to disable training pattern\n");
+ return ret;
+ }
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_SCRAMBLING_DISABLE, 0);
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_train_loop - Downshift the link rate during training
+ * @dp: DisplayPort IP core structure
+ *
+ * Train the link by downshifting the link rate if training is not successful.
+ */
+static void xilinx_drm_dp_train_loop(struct xilinx_drm_dp *dp)
+{
+ struct xilinx_drm_dp_mode *mode = &dp->mode;
+ u8 bw = mode->bw_code;
+ int ret;
+
+ do {
+ if (dp->status == connector_status_disconnected)
+ return;
+
+ ret = xilinx_drm_dp_train(dp);
+ if (!ret)
+ return;
+
+ ret = xilinx_drm_dp_mode_configure(dp, mode->pclock, bw);
+ if (ret < 0)
+ return;
+ bw = ret;
+ } while (bw >= DP_LINK_BW_1_62);
+
+ DRM_ERROR("failed to train the DP link\n");
+}
+
+/**
+ * xilinx_drm_dp_init_aux - Initialize the DP aux
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the DP aux. The aux clock is derived from the axi clock, so
+ * this function gets the axi clock frequency and calculates the filter
+ * value. Additionally, the interrupts and transmitter are enabled.
+ *
+ * Return: 0 on success, error value otherwise
+ */
+static int xilinx_drm_dp_init_aux(struct xilinx_drm_dp *dp)
+{
+ int clock_rate;
+ u32 reg, w;
+
+ clock_rate = clk_get_rate(dp->aclk);
+ if (clock_rate < XILINX_DP_TX_CLK_DIVIDER_MHZ) {
+ DRM_ERROR("aclk should be higher than 1MHz\n");
+ return -EINVAL;
+ }
+
+ /* Allowable values for this register are: 8, 16, 24, 32, 40, 48 */
+ for (w = 8; w <= 48; w += 8) {
+ /* AUX pulse width should be between 0.4 to 0.6 usec */
+ if (w >= (4 * clock_rate / 10000000) &&
+ w <= (6 * clock_rate / 10000000))
+ break;
+ }
+
+ if (w > 48) {
+ DRM_ERROR("aclk frequency too high\n");
+ return -EINVAL;
+ }
+ reg = w << XILINX_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT;
+
+ reg |= clock_rate / XILINX_DP_TX_CLK_DIVIDER_MHZ;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_CLK_DIVIDER, reg);
+
+ if (dp->dp_sub)
+ xilinx_drm_writel(dp->iomem, XILINX_DP_SUB_TX_INTR_EN,
+ XILINX_DP_TX_INTR_ALL);
+ else
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_INTR_MASK,
+ (u32)~XILINX_DP_TX_INTR_ALL);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENABLE, 1);
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_init_phy - Initialize the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the phy.
+ *
+ * Return: 0 if the phy instances are initialized correctly, or the error code
+ * returned from the callee functions.
+ */
+static int xilinx_drm_dp_init_phy(struct xilinx_drm_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->config.max_lanes; i++) {
+ ret = phy_init(dp->phy[i]);
+ if (ret) {
+ dev_err(dp->dev, "failed to init phy lane %d\n", i);
+ return ret;
+ }
+ }
+
+ if (dp->dp_sub)
+ xilinx_drm_writel(dp->iomem, XILINX_DP_SUB_TX_INTR_DS,
+ XILINX_DP_TX_INTR_ALL);
+ else
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_INTR_MASK,
+ XILINX_DP_TX_INTR_ALL);
+
+ xilinx_drm_clr(dp->iomem, XILINX_DP_TX_PHY_CONFIG,
+ XILINX_DP_TX_PHY_CONFIG_ALL_RESET);
+
+ /* Wait for PLL to be locked for the primary (1st) */
+ if (dp->phy[0]) {
+ ret = xpsgtr_wait_pll_lock(dp->phy[0]);
+ if (ret) {
+ dev_err(dp->dev, "failed to lock pll\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_exit_phy - Exit the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Exit the phy.
+ */
+static void xilinx_drm_dp_exit_phy(struct xilinx_drm_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->config.max_lanes; i++) {
+ ret = phy_exit(dp->phy[i]);
+ if (ret) {
+ dev_err(dp->dev,
+ "failed to exit phy (%d) %d\n", i, ret);
+ }
+ }
+}
+
+static void xilinx_drm_dp_dpms(struct drm_encoder *encoder, int dpms)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ void __iomem *iomem = dp->iomem;
+ unsigned int i;
+ int ret;
+
+ if (dp->dpms == dpms)
+ return;
+
+ dp->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ pm_runtime_get_sync(dp->dev);
+
+ if (dp->aud_clk && !dp->aud_clk_enabled) {
+ ret = clk_prepare_enable(dp->aud_clk);
+ if (ret) {
+ dev_err(dp->dev, "failed to enable aud_clk\n");
+ } else {
+ xilinx_drm_writel(iomem,
+ XILINX_DP_TX_AUDIO_CONTROL,
+ 1);
+ dp->aud_clk_enabled = true;
+ }
+ }
+ xilinx_drm_writel(iomem, XILINX_DP_TX_PHY_POWER_DOWN, 0);
+
+ if (dp->status == connector_status_connected) {
+ for (i = 0; i < 3; i++) {
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ usleep_range(300, 500);
+ }
+ /* Some monitors take time to wake up properly */
+ msleep(xilinx_drm_dp_power_on_delay_ms);
+ if (ret != 1)
+ dev_dbg(dp->dev, "DP aux failed\n");
+ else
+ xilinx_drm_dp_train_loop(dp);
+ }
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_SW_RESET,
+ XILINX_DP_TX_SW_RESET_ALL);
+ xilinx_drm_writel(iomem, XILINX_DP_TX_ENABLE_MAIN_STREAM, 1);
+
+ return;
+ default:
+ xilinx_drm_writel(iomem, XILINX_DP_TX_ENABLE_MAIN_STREAM, 0);
+ if (dp->status == connector_status_connected) {
+ drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ }
+ xilinx_drm_writel(iomem, XILINX_DP_TX_PHY_POWER_DOWN,
+ XILINX_DP_TX_PHY_POWER_DOWN_ALL);
+ if (dp->aud_clk && dp->aud_clk_enabled) {
+ xilinx_drm_writel(iomem, XILINX_DP_TX_AUDIO_CONTROL, 0);
+ clk_disable_unprepare(dp->aud_clk);
+ dp->aud_clk_enabled = false;
+ }
+ pm_runtime_put_sync(dp->dev);
+
+ return;
+ }
+}
+
+static void xilinx_drm_dp_save(struct drm_encoder *encoder)
+{
+ /* no op */
+}
+
+static void xilinx_drm_dp_restore(struct drm_encoder *encoder)
+{
+ /* no op */
+}
+
+#define XILINX_DP_SUB_TX_MIN_H_BACKPORCH 20
+
+static bool xilinx_drm_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ int diff = mode->htotal - mode->hsync_end;
+
+ /*
+ * ZynqMP DP requires horizontal backporch to be greater than 12.
+ * This limitation may conflict with the sink device.
+ */
+ if (dp->dp_sub && diff < XILINX_DP_SUB_TX_MIN_H_BACKPORCH) {
+ int vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+
+ diff = XILINX_DP_SUB_TX_MIN_H_BACKPORCH - diff;
+ adjusted_mode->htotal += diff;
+ adjusted_mode->clock = adjusted_mode->vtotal *
+ adjusted_mode->htotal * vrefresh / 1000;
+ }
+
+ return true;
+}
+
+static int xilinx_drm_dp_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 bpp = dp->config.bpp;
+ u32 max_pclock = dp->config.max_pclock;
+ int max_rate = dp->link_config.max_rate;
+ int rate;
+
+ if (max_pclock && mode->clock > max_pclock)
+ return MODE_CLOCK_HIGH;
+
+ rate = xilinx_drm_dp_max_rate(max_rate, max_lanes, bpp);
+ if (mode->clock > rate)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+/**
+ * xilinx_drm_dp_mode_set_transfer_unit - Set the transfer unit values
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Set the transfer unit, and caculate all transfer unit size related values.
+ * Calculation is based on DP and IP core specification.
+ */
+static void xilinx_drm_dp_mode_set_transfer_unit(struct xilinx_drm_dp *dp,
+ struct drm_display_mode *mode)
+{
+ u32 tu = XILINX_DP_TX_DEF_TRANSFER_UNIT_SIZE;
+ u32 bw, vid_kbytes, avg_bytes_per_tu, init_wait;
+
+ /* Use the max transfer unit size (default) */
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRANSFER_UNIT_SIZE, tu);
+
+ vid_kbytes = mode->clock * (dp->config.bpp / 8);
+ bw = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ avg_bytes_per_tu = vid_kbytes * tu / (dp->mode.lane_cnt * bw / 1000);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MIN_BYTES_PER_TU,
+ avg_bytes_per_tu / 1000);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_FRAC_BYTES_PER_TU,
+ avg_bytes_per_tu % 1000);
+
+ /* Configure the initial wait cycle based on transfer unit size */
+ if (tu < (avg_bytes_per_tu / 1000))
+ init_wait = 0;
+ else if ((avg_bytes_per_tu / 1000) <= 4)
+ init_wait = tu;
+ else
+ init_wait = tu - avg_bytes_per_tu / 1000;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_INIT_WAIT, init_wait);
+}
+
+/**
+ * xilinx_drm_dp_mode_set_stream - Configure the main stream
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Configure the main stream based on the requested mode @mode. Calculation is
+ * based on IP core specification.
+ */
+static void xilinx_drm_dp_mode_set_stream(struct xilinx_drm_dp *dp,
+ struct drm_display_mode *mode)
+{
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 reg, wpl;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_HTOTAL,
+ mode->htotal);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_VTOTAL,
+ mode->vtotal);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_POLARITY,
+ (!!(mode->flags & DRM_MODE_FLAG_PVSYNC) <<
+ XILINX_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT) |
+ (!!(mode->flags & DRM_MODE_FLAG_PHSYNC) <<
+ XILINX_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT));
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_HSWIDTH,
+ mode->hsync_end - mode->hsync_start);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_VSWIDTH,
+ mode->vsync_end - mode->vsync_start);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_HRES,
+ mode->hdisplay);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_VRES,
+ mode->vdisplay);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_HSTART,
+ mode->htotal - mode->hsync_start);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_VSTART,
+ mode->vtotal - mode->vsync_start);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_MISC0,
+ dp->config.misc0);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_MISC1,
+ dp->config.misc1);
+
+ /* In synchronous mode, set the diviers */
+ if (dp->config.misc0 & XILINX_DP_TX_MAIN_STREAM_MISC0_SYNC) {
+ reg = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_N_VID, reg);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_M_VID, mode->clock);
+ if (dp->aud_clk) {
+ int aud_rate = clk_get_rate(dp->aud_clk);
+
+ dev_dbg(dp->dev, "Audio rate: %d\n", aud_rate / 512);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_AUDIO_N_AUD,
+ reg);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_AUDIO_M_AUD,
+ aud_rate / 1000);
+ }
+ }
+
+ /* Only 2 channel is supported now */
+ if (dp->aud_clk)
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_AUDIO_CHANNELS, 1);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_USER_PIXEL_WIDTH, 1);
+
+ /* Translate to the native 16 bit datapath based on IP core spec */
+ wpl = (mode->hdisplay * dp->config.bpp + 15) / 16;
+ reg = wpl + wpl % lane_cnt - lane_cnt;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_USER_DATA_CNT_PER_LANE, reg);
+}
+
+static void xilinx_drm_dp_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ int ret;
+
+ ret = xilinx_drm_dp_mode_configure(dp, adjusted_mode->clock, 0);
+ if (ret < 0)
+ return;
+
+ xilinx_drm_dp_mode_set_stream(dp, adjusted_mode);
+ xilinx_drm_dp_mode_set_transfer_unit(dp, adjusted_mode);
+}
+
+static enum drm_connector_status
+xilinx_drm_dp_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ struct xilinx_drm_dp_link_config *link_config = &dp->link_config;
+ u32 state;
+ int ret;
+
+ state = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_INTR_SIGNAL_STATE);
+ if (state & XILINX_DP_TX_INTR_SIGNAL_STATE_HPD) {
+ dp->status = connector_status_connected;
+ ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0)
+ goto disconnected;
+
+ link_config->max_rate = min_t(int,
+ drm_dp_max_link_rate(dp->dpcd),
+ dp->config.max_link_rate);
+ link_config->max_lanes = min_t(u8,
+ drm_dp_max_lane_count(dp->dpcd),
+ dp->config.max_lanes);
+ return dp->status;
+ }
+
+disconnected:
+ dp->status = connector_status_disconnected;
+ return dp->status;
+}
+
+static int xilinx_drm_dp_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &dp->aux.ddc);
+ if (!edid)
+ return 0;
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return ret;
+}
+
+static struct drm_encoder_slave_funcs xilinx_drm_dp_encoder_funcs = {
+ .dpms = xilinx_drm_dp_dpms,
+ .save = xilinx_drm_dp_save,
+ .restore = xilinx_drm_dp_restore,
+ .mode_fixup = xilinx_drm_dp_mode_fixup,
+ .mode_valid = xilinx_drm_dp_mode_valid,
+ .mode_set = xilinx_drm_dp_mode_set,
+ .detect = xilinx_drm_dp_detect,
+ .get_modes = xilinx_drm_dp_get_modes,
+};
+
+static int xilinx_drm_dp_encoder_init(struct platform_device *pdev,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder)
+{
+ struct xilinx_drm_dp *dp = platform_get_drvdata(pdev);
+
+ encoder->slave_priv = dp;
+ encoder->slave_funcs = &xilinx_drm_dp_encoder_funcs;
+
+ dp->encoder = &encoder->base;
+
+ return xilinx_drm_dp_init_aux(dp);
+}
+
+static irqreturn_t xilinx_drm_dp_irq_handler(int irq, void *data)
+{
+ struct xilinx_drm_dp *dp = (struct xilinx_drm_dp *)data;
+ u32 reg, status;
+
+ reg = dp->dp_sub ?
+ XILINX_DP_SUB_TX_INTR_STATUS : XILINX_DP_TX_INTR_STATUS;
+ status = xilinx_drm_readl(dp->iomem, reg);
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XILINX_DP_TX_INTR_CHBUF_UNDERFLW_MASK)
+ dev_dbg(dp->dev, "underflow interrupt\n");
+ if (status & XILINX_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+ dev_dbg(dp->dev, "overflow interrupt\n");
+
+ xilinx_drm_writel(dp->iomem, reg, status);
+
+ if (status & XILINX_DP_TX_INTR_VBLANK_START)
+ xilinx_drm_dp_sub_handle_vblank(dp->dp_sub);
+
+ if (status & XILINX_DP_TX_INTR_HPD_EVENT)
+ drm_helper_hpd_irq_event(dp->encoder->dev);
+
+ if (status & XILINX_DP_TX_INTR_HPD_IRQ) {
+ u8 status[DP_LINK_STATUS_SIZE + 2];
+ int ret;
+
+ ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
+ DP_LINK_STATUS_SIZE + 2);
+ if (ret < 0)
+ goto handled;
+
+ if (status[4] & DP_LINK_STATUS_UPDATED ||
+ !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
+ !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt))
+ xilinx_drm_dp_train_loop(dp);
+ }
+
+handled:
+ return IRQ_HANDLED;
+}
+
+static ssize_t
+xilinx_drm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct xilinx_drm_dp *dp = container_of(aux, struct xilinx_drm_dp, aux);
+ int ret;
+ unsigned int i, iter;
+
+ /* Number of loops = timeout in msec / aux delay (400 usec) */
+ iter = xilinx_drm_dp_aux_timeout_ms * 1000 / 400;
+ iter = iter ? iter : 1;
+
+ for (i = 0; i < iter; i++) {
+ ret = xilinx_drm_dp_aux_cmd_submit(dp, msg->request,
+ msg->address, msg->buffer,
+ msg->size, &msg->reply);
+ if (!ret) {
+ dev_dbg(dp->dev, "aux %d retries\n", i);
+ return msg->size;
+ }
+
+ if (dp->status == connector_status_disconnected) {
+ dev_dbg(dp->dev, "no aux dev\n");
+ return -ENODEV;
+ }
+
+ usleep_range(400, 500);
+ }
+
+ dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret);
+
+ return ret;
+}
+
+static int xilinx_drm_dp_parse_of(struct xilinx_drm_dp *dp)
+{
+ struct device_node *node = dp->dev->of_node;
+ struct xilinx_drm_dp_config *config = &dp->config;
+ const char *string;
+ u32 num_colors, bpc;
+ bool sync;
+ int ret;
+
+ ret = of_property_read_string(node, "xlnx,dp-version", &string);
+ if (ret < 0) {
+ dev_err(dp->dev, "No DP version in DT\n");
+ return ret;
+ }
+
+ if (strcmp(string, "v1.1a") == 0) {
+ config->dp_version = DP_V1_1A;
+ } else if (strcmp(string, "v1.2") == 0) {
+ config->dp_version = DP_V1_2;
+ } else {
+ dev_err(dp->dev, "Invalid DP version in DT\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-lanes", &config->max_lanes);
+ if (ret < 0) {
+ dev_err(dp->dev, "No lane count in DT\n");
+ return ret;
+ }
+
+ if (config->max_lanes != 1 && config->max_lanes != 2 &&
+ config->max_lanes != 4) {
+ dev_err(dp->dev, "Invalid max lanes in DT\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-link-rate",
+ &config->max_link_rate);
+ if (ret < 0) {
+ dev_err(dp->dev, "No link rate in DT\n");
+ return ret;
+ }
+
+ if (config->max_link_rate != DP_REDUCED_BIT_RATE &&
+ config->max_link_rate != DP_HIGH_BIT_RATE &&
+ config->max_link_rate != DP_HIGH_BIT_RATE2) {
+ dev_err(dp->dev, "Invalid link rate in DT\n");
+ return -EINVAL;
+ }
+
+ config->enable_yonly = of_property_read_bool(node, "xlnx,enable-yonly");
+ config->enable_ycrcb = of_property_read_bool(node, "xlnx,enable-ycrcb");
+
+ sync = of_property_read_bool(node, "xlnx,sync");
+ if (sync)
+ config->misc0 |= XILINX_DP_TX_MAIN_STREAM_MISC0_SYNC;
+
+ ret = of_property_read_string(node, "xlnx,colormetry", &string);
+ if (ret < 0) {
+ dev_err(dp->dev, "No colormetry in DT\n");
+ return ret;
+ }
+
+ if (strcmp(string, "rgb") == 0) {
+ config->misc0 |= XILINX_DP_MISC0_RGB;
+ num_colors = 3;
+ } else if (config->enable_ycrcb && strcmp(string, "ycrcb422") == 0) {
+ config->misc0 |= XILINX_DP_MISC0_YCRCB_422;
+ num_colors = 2;
+ } else if (config->enable_ycrcb && strcmp(string, "ycrcb444") == 0) {
+ config->misc0 |= XILINX_DP_MISC0_YCRCB_444;
+ num_colors = 3;
+ } else if (config->enable_yonly && strcmp(string, "yonly") == 0) {
+ config->misc1 |= XILINX_DP_MISC1_Y_ONLY;
+ num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid colormetry in DT\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-bpc", &config->max_bpc);
+ if (ret < 0) {
+ dev_err(dp->dev, "No max bpc in DT\n");
+ return ret;
+ }
+
+ if (config->max_bpc != 8 && config->max_bpc != 10 &&
+ config->max_bpc != 12 && config->max_bpc != 16) {
+ dev_err(dp->dev, "Invalid max bpc in DT\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,bpc", &bpc);
+ if (ret < 0) {
+ dev_err(dp->dev, "No color depth(bpc) in DT\n");
+ return ret;
+ }
+
+ if (bpc > config->max_bpc) {
+ dev_err(dp->dev, "Invalid color depth(bpc) in DT\n");
+ return -EINVAL;
+ }
+
+ switch (bpc) {
+ case 6:
+ config->misc0 |= XILINX_DP_MISC0_BPC_6;
+ break;
+ case 8:
+ config->misc0 |= XILINX_DP_MISC0_BPC_8;
+ break;
+ case 10:
+ config->misc0 |= XILINX_DP_MISC0_BPC_10;
+ break;
+ case 12:
+ config->misc0 |= XILINX_DP_MISC0_BPC_12;
+ break;
+ case 16:
+ config->misc0 |= XILINX_DP_MISC0_BPC_16;
+ break;
+ default:
+ dev_err(dp->dev, "Not supported color depth in DT\n");
+ return -EINVAL;
+ }
+
+ config->bpp = num_colors * bpc;
+
+ of_property_read_u32(node, "xlnx,max-pclock-frequency",
+ &config->max_pclock);
+
+ return 0;
+}
+
+static int __maybe_unused xilinx_drm_dp_pm_suspend(struct device *dev)
+{
+ struct xilinx_drm_dp *dp = dev_get_drvdata(dev);
+
+ xilinx_drm_dp_exit_phy(dp);
+
+ return 0;
+}
+
+static int __maybe_unused xilinx_drm_dp_pm_resume(struct device *dev)
+{
+ struct xilinx_drm_dp *dp = dev_get_drvdata(dev);
+
+ xilinx_drm_dp_init_phy(dp);
+ xilinx_drm_dp_init_aux(dp);
+ drm_helper_hpd_irq_event(dp->encoder->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops xilinx_drm_dp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_drm_dp_pm_suspend,
+ xilinx_drm_dp_pm_resume)
+};
+
+static int xilinx_drm_dp_probe(struct platform_device *pdev)
+{
+ struct xilinx_drm_dp *dp;
+ struct resource *res;
+ u32 version, i;
+ int irq, ret;
+
+ dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->dpms = DRM_MODE_DPMS_OFF;
+ dp->status = connector_status_disconnected;
+ dp->dev = &pdev->dev;
+
+ ret = xilinx_drm_dp_parse_of(dp);
+ if (ret < 0)
+ return ret;
+
+ dp->aclk = devm_clk_get(dp->dev, "aclk");
+ if (IS_ERR(dp->aclk))
+ return PTR_ERR(dp->aclk);
+
+ ret = clk_prepare_enable(dp->aclk);
+ if (ret) {
+ dev_err(dp->dev, "failed to enable the aclk\n");
+ return ret;
+ }
+
+ dp->aud_clk = devm_clk_get(dp->dev, "aud_clk");
+ if (IS_ERR(dp->aud_clk)) {
+ ret = PTR_ERR(dp->aud_clk);
+ if (ret == -EPROBE_DEFER)
+ goto error_aclk;
+ dp->aud_clk = NULL;
+ dev_dbg(dp->dev, "failed to get the aud_clk:\n");
+ }
+
+ dp->dp_sub = xilinx_drm_dp_sub_of_get(pdev->dev.of_node);
+ if (IS_ERR(dp->dp_sub)) {
+ ret = PTR_ERR(dp->dp_sub);
+ goto error_aclk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dp->iomem = devm_ioremap_resource(dp->dev, res);
+ if (IS_ERR(dp->iomem)) {
+ ret = PTR_ERR(dp->iomem);
+ goto error_dp_sub;
+ }
+
+ platform_set_drvdata(pdev, dp);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_PHY_POWER_DOWN,
+ XILINX_DP_TX_PHY_POWER_DOWN_ALL);
+ xilinx_drm_set(dp->iomem, XILINX_DP_TX_PHY_CONFIG,
+ XILINX_DP_TX_PHY_CONFIG_ALL_RESET);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_FORCE_SCRAMBLER_RESET, 1);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENABLE, 0);
+
+ if (dp->dp_sub) {
+ for (i = 0; i < dp->config.max_lanes; i++) {
+ char phy_name[16];
+
+ snprintf(phy_name, sizeof(phy_name), "dp-phy%d", i);
+ dp->phy[i] = devm_phy_get(dp->dev, phy_name);
+ if (IS_ERR(dp->phy[i])) {
+ dev_err(dp->dev, "failed to get phy lane\n");
+ ret = PTR_ERR(dp->phy[i]);
+ dp->phy[i] = NULL;
+ goto error_dp_sub;
+ }
+ }
+ }
+
+ ret = xilinx_drm_dp_init_phy(dp);
+ if (ret)
+ goto error_phy;
+
+ dp->aux.name = "Xilinx DP AUX";
+ dp->aux.dev = dp->dev;
+ dp->aux.transfer = xilinx_drm_dp_aux_transfer;
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to initialize DP aux\n");
+ goto error;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto error;
+ }
+
+ ret = devm_request_threaded_irq(dp->dev, irq, NULL,
+ xilinx_drm_dp_irq_handler, IRQF_ONESHOT,
+ dev_name(dp->dev), dp);
+ if (ret < 0)
+ goto error;
+
+ version = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_VERSION);
+
+ dev_info(dp->dev, "device found, version %u.%02x%x\n",
+ ((version & XILINX_DP_TX_VERSION_MAJOR_MASK) >>
+ XILINX_DP_TX_VERSION_MAJOR_SHIFT),
+ ((version & XILINX_DP_TX_VERSION_MINOR_MASK) >>
+ XILINX_DP_TX_VERSION_MINOR_SHIFT),
+ ((version & XILINX_DP_TX_VERSION_REVISION_MASK) >>
+ XILINX_DP_TX_VERSION_REVISION_SHIFT));
+
+ version = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_CORE_ID);
+ if (version & XILINX_DP_TX_CORE_ID_DIRECTION) {
+ dev_err(dp->dev, "Receiver is not supported\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ dev_info(dp->dev, "Display Port, version %u.%02x%02x (tx)\n",
+ ((version & XILINX_DP_TX_CORE_ID_MAJOR_MASK) >>
+ XILINX_DP_TX_CORE_ID_MAJOR_SHIFT),
+ ((version & XILINX_DP_TX_CORE_ID_MINOR_MASK) >>
+ XILINX_DP_TX_CORE_ID_MINOR_SHIFT),
+ ((version & XILINX_DP_TX_CORE_ID_REVISION_MASK) >>
+ XILINX_DP_TX_CORE_ID_REVISION_SHIFT));
+
+ pm_runtime_enable(dp->dev);
+
+ xilinx_dp_debugfs_init(dp);
+
+ return 0;
+
+error:
+ drm_dp_aux_unregister(&dp->aux);
+error_dp_sub:
+ xilinx_drm_dp_sub_put(dp->dp_sub);
+error_phy:
+ xilinx_drm_dp_exit_phy(dp);
+error_aclk:
+ clk_disable_unprepare(dp->aclk);
+ return ret;
+}
+
+static int xilinx_drm_dp_remove(struct platform_device *pdev)
+{
+ struct xilinx_drm_dp *dp = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(dp->dev);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENABLE, 0);
+
+ drm_dp_aux_unregister(&dp->aux);
+ xilinx_drm_dp_exit_phy(dp);
+ xilinx_drm_dp_sub_put(dp->dp_sub);
+
+ if (dp->aud_clk && dp->aud_clk_enabled)
+ clk_disable_unprepare(dp->aud_clk);
+ clk_disable_unprepare(dp->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_drm_dp_of_match[] = {
+ { .compatible = "xlnx,v-dp", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_drm_dp_of_match);
+
+static struct drm_platform_encoder_driver xilinx_drm_dp_driver = {
+ .platform_driver = {
+ .probe = xilinx_drm_dp_probe,
+ .remove = xilinx_drm_dp_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "xilinx-drm-dp",
+ .of_match_table = xilinx_drm_dp_of_match,
+ .pm = &xilinx_drm_dp_pm_ops,
+ },
+ },
+
+ .encoder_init = xilinx_drm_dp_encoder_init,
+};
+
+static int __init xilinx_drm_dp_init(void)
+{
+ return platform_driver_register(&xilinx_drm_dp_driver.platform_driver);
+}
+
+static void __exit xilinx_drm_dp_exit(void)
+{
+ platform_driver_unregister(&xilinx_drm_dp_driver.platform_driver);
+}
+
+module_init(xilinx_drm_dp_init);
+module_exit(xilinx_drm_dp_exit);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DRM KMS DiplayPort Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.c b/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.c
new file mode 100644
index 000000000000..e3a68b36fafb
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.c
@@ -0,0 +1,2265 @@
+/*
+ * DisplayPort subsystem support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+#include "xilinx_drm_dp_sub.h"
+#include "xilinx_drm_drv.h"
+
+/* Blender registers */
+#define XILINX_DP_SUB_V_BLEND_BG_CLR_0 0x0
+#define XILINX_DP_SUB_V_BLEND_BG_CLR_1 0x4
+#define XILINX_DP_SUB_V_BLEND_BG_CLR_2 0x8
+#define XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA 0xc
+#define XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA_MASK 0x1fe
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT 0x14
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB 0x0
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR444 0x1
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR422 0x2
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YONLY 0x3
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_XVYCC 0x4
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_EN_DOWNSAMPLE BIT(4)
+#define XILINX_DP_SUB_V_BLEND_LAYER_CONTROL 0x18
+#define XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_EN_US BIT(0)
+#define XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_RGB BIT(1)
+#define XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_BYPASS BIT(8)
+#define XILINX_DP_SUB_V_BLEND_NUM_COEFF 9
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF0 0x20
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF1 0x24
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF2 0x28
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF3 0x2c
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF4 0x30
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF5 0x34
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF6 0x38
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF7 0x3c
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF8 0x40
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF0 0x44
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF1 0x48
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF2 0x4c
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF3 0x50
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF4 0x54
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF5 0x58
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF6 0x5c
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF7 0x60
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF8 0x64
+#define XILINX_DP_SUB_V_BLEND_NUM_OFFSET 3
+#define XILINX_DP_SUB_V_BLEND_LUMA_IN1CSC_OFFSET 0x68
+#define XILINX_DP_SUB_V_BLEND_CR_IN1CSC_OFFSET 0x6c
+#define XILINX_DP_SUB_V_BLEND_CB_IN1CSC_OFFSET 0x70
+#define XILINX_DP_SUB_V_BLEND_LUMA_OUTCSC_OFFSET 0x74
+#define XILINX_DP_SUB_V_BLEND_CR_OUTCSC_OFFSET 0x78
+#define XILINX_DP_SUB_V_BLEND_CB_OUTCSC_OFFSET 0x7c
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF0 0x80
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF1 0x84
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF2 0x88
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF3 0x8c
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF4 0x90
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF5 0x94
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF6 0x98
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF7 0x9c
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF8 0xa0
+#define XILINX_DP_SUB_V_BLEND_LUMA_IN2CSC_OFFSET 0xa4
+#define XILINX_DP_SUB_V_BLEND_CR_IN2CSC_OFFSET 0xa8
+#define XILINX_DP_SUB_V_BLEND_CB_IN2CSC_OFFSET 0xac
+#define XILINX_DP_SUB_V_BLEND_CHROMA_KEY_ENABLE 0x1d0
+#define XILINX_DP_SUB_V_BLEND_CHROMA_KEY_COMP1 0x1d4
+#define XILINX_DP_SUB_V_BLEND_CHROMA_KEY_COMP2 0x1d8
+#define XILINX_DP_SUB_V_BLEND_CHROMA_KEY_COMP3 0x1dc
+
+/* AV buffer manager registers */
+#define XILINX_DP_SUB_AV_BUF_FMT 0x0
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_SHIFT 0
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_MASK (0x1f << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_UYVY (0 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_VYUY (1 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YVYU (2 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUYV (3 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16 (4 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV24 (5 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI (6 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_MONO (7 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI2 (8 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUV444 (9 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888 (10 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGBA8880 (11 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888_10 (12 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUV444_10 (13 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI2_10 (14 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_10 (15 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_10 (16 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV24_10 (17 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YONLY_10 (18 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_420 (19 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420 (20 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI2_420 (21 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_420_10 (22 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420_10 (23 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI2_420_10 (24 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_SHIFT 8
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_MASK (0xf << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA8888 (0 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_ABGR8888 (1 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB888 (2 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_BGR888 (3 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA5551 (4 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA4444 (5 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB565 (6 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_8BPP (7 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_4BPP (8 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_2BPP (9 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_1BPP (10 << 8)
+#define XILINX_DP_SUB_AV_BUF_NON_LIVE_LATENCY 0x8
+#define XILINX_DP_SUB_AV_BUF_CHBUF 0x10
+#define XILINX_DP_SUB_AV_BUF_CHBUF_EN BIT(0)
+#define XILINX_DP_SUB_AV_BUF_CHBUF_FLUSH BIT(1)
+#define XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_SHIFT 2
+#define XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_MASK (0xf << 2)
+#define XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_MAX 0xf
+#define XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_AUD_MAX 0x3
+#define XILINX_DP_SUB_AV_BUF_STATUS 0x28
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL 0x2c
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EN BIT(0)
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_SHIFT 1
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_EX_VSYNC 0
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_EX_VID 1
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_EX_AUD 2
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_INT_VSYNC 3
+#define XILINX_DP_SUB_AV_BUF_STC_INIT_VALUE0 0x30
+#define XILINX_DP_SUB_AV_BUF_STC_INIT_VALUE1 0x34
+#define XILINX_DP_SUB_AV_BUF_STC_ADJ 0x38
+#define XILINX_DP_SUB_AV_BUF_STC_VID_VSYNC_TS0 0x3c
+#define XILINX_DP_SUB_AV_BUF_STC_VID_VSYNC_TS1 0x40
+#define XILINX_DP_SUB_AV_BUF_STC_EXT_VSYNC_TS0 0x44
+#define XILINX_DP_SUB_AV_BUF_STC_EXT_VSYNC_TS1 0x48
+#define XILINX_DP_SUB_AV_BUF_STC_CUSTOM_EVENT_TS0 0x4c
+#define XILINX_DP_SUB_AV_BUF_STC_CUSTOM_EVENT_TS1 0x50
+#define XILINX_DP_SUB_AV_BUF_STC_CUSTOM_EVENT2_TS0 0x54
+#define XILINX_DP_SUB_AV_BUF_STC_CUSTOM_EVENT2_TS1 0x58
+#define XILINX_DP_SUB_AV_BUF_STC_SNAPSHOT0 0x60
+#define XILINX_DP_SUB_AV_BUF_STC_SNAPSHOT1 0x64
+#define XILINX_DP_SUB_AV_BUF_OUTPUT 0x70
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_SHIFT 0
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MASK (0x3 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_PL (0 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MEM (1 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_PATTERN (2 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_NONE (3 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_SHIFT 2
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MASK (0x3 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_DISABLE (0 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MEM (1 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_LIVE (2 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_NONE (3 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_SHIFT 4
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MASK (0x3 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_PL (0 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MEM (1 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_PATTERN (2 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_DISABLE (3 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD2_EN BIT(6)
+#define XILINX_DP_SUB_AV_BUF_HCOUNT_VCOUNT_INT0 0x74
+#define XILINX_DP_SUB_AV_BUF_HCOUNT_VCOUNT_INT1 0x78
+#define XILINX_DP_SUB_AV_BUF_PATTERN_GEN_SELECT 0x100
+#define XILINX_DP_SUB_AV_BUF_CLK_SRC 0x120
+#define XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_FROM_PS BIT(0)
+#define XILINX_DP_SUB_AV_BUF_CLK_SRC_AUD_FROM_PS BIT(1)
+#define XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING BIT(2)
+#define XILINX_DP_SUB_AV_BUF_SRST_REG 0x124
+#define XILINX_DP_SUB_AV_BUF_SRST_REG_VID_RST BIT(1)
+#define XILINX_DP_SUB_AV_BUF_AUDIO_CH_CONFIG 0x12c
+#define XILINX_DP_SUB_AV_BUF_GFX_COMP0_SF 0x200
+#define XILINX_DP_SUB_AV_BUF_GFX_COMP1_SF 0x204
+#define XILINX_DP_SUB_AV_BUF_GFX_COMP2_SF 0x208
+#define XILINX_DP_SUB_AV_BUF_VID_COMP0_SF 0x20c
+#define XILINX_DP_SUB_AV_BUF_VID_COMP1_SF 0x210
+#define XILINX_DP_SUB_AV_BUF_VID_COMP2_SF 0x214
+#define XILINX_DP_SUB_AV_BUF_LIVE_VID_COMP0_SF 0x218
+#define XILINX_DP_SUB_AV_BUF_LIVE_VID_COMP1_SF 0x21c
+#define XILINX_DP_SUB_AV_BUF_LIVE_VID_COMP2_SF 0x220
+#define XILINX_DP_SUB_AV_BUF_4BIT_SF 0x11111
+#define XILINX_DP_SUB_AV_BUF_5BIT_SF 0x10842
+#define XILINX_DP_SUB_AV_BUF_6BIT_SF 0x10410
+#define XILINX_DP_SUB_AV_BUF_8BIT_SF 0x10101
+#define XILINX_DP_SUB_AV_BUF_10BIT_SF 0x10040
+#define XILINX_DP_SUB_AV_BUF_NULL_SF 0
+#define XILINX_DP_SUB_AV_BUF_NUM_SF 3
+#define XILINX_DP_SUB_AV_BUF_LIVE_CB_CR_SWAP 0x224
+#define XILINX_DP_SUB_AV_BUF_PALETTE_MEMORY 0x400
+
+/* Audio registers */
+#define XILINX_DP_SUB_AUD_MIXER_VOLUME 0x0
+#define XILINX_DP_SUB_AUD_MIXER_VOLUME_NO_SCALE 0x20002000
+#define XILINX_DP_SUB_AUD_MIXER_META_DATA 0x4
+#define XILINX_DP_SUB_AUD_CH_STATUS0 0x8
+#define XILINX_DP_SUB_AUD_CH_STATUS1 0xc
+#define XILINX_DP_SUB_AUD_CH_STATUS2 0x10
+#define XILINX_DP_SUB_AUD_CH_STATUS3 0x14
+#define XILINX_DP_SUB_AUD_CH_STATUS4 0x18
+#define XILINX_DP_SUB_AUD_CH_STATUS5 0x1c
+#define XILINX_DP_SUB_AUD_CH_A_DATA0 0x20
+#define XILINX_DP_SUB_AUD_CH_A_DATA1 0x24
+#define XILINX_DP_SUB_AUD_CH_A_DATA2 0x28
+#define XILINX_DP_SUB_AUD_CH_A_DATA3 0x2c
+#define XILINX_DP_SUB_AUD_CH_A_DATA4 0x30
+#define XILINX_DP_SUB_AUD_CH_A_DATA5 0x34
+#define XILINX_DP_SUB_AUD_CH_B_DATA0 0x38
+#define XILINX_DP_SUB_AUD_CH_B_DATA1 0x3c
+#define XILINX_DP_SUB_AUD_CH_B_DATA2 0x40
+#define XILINX_DP_SUB_AUD_CH_B_DATA3 0x44
+#define XILINX_DP_SUB_AUD_CH_B_DATA4 0x48
+#define XILINX_DP_SUB_AUD_CH_B_DATA5 0x4c
+#define XILINX_DP_SUB_AUD_SOFT_RESET 0xc00
+#define XILINX_DP_SUB_AUD_SOFT_RESET_AUD_SRST BIT(0)
+
+#define XILINX_DP_SUB_AV_BUF_NUM_VID_GFX_BUFFERS 4
+#define XILINX_DP_SUB_AV_BUF_NUM_BUFFERS 6
+
+/**
+ * enum xilinx_drm_dp_sub_layer_type - Layer type
+ * @XILINX_DRM_DP_SUB_LAYER_VID: video layer
+ * @XILINX_DRM_DP_SUB_LAYER_GFX: graphics layer
+ */
+enum xilinx_drm_dp_sub_layer_type {
+ XILINX_DRM_DP_SUB_LAYER_VID,
+ XILINX_DRM_DP_SUB_LAYER_GFX
+};
+
+/**
+ * struct xilinx_drm_dp_sub_layer - DP subsystem layer
+ * @id: layer ID
+ * @offset: layer offset in the register space
+ * @avail: flag if layer is available
+ * @primary: flag for primary plane
+ * @enabled: flag if the layer is enabled
+ * @fmt: format descriptor
+ * @drm_fmts: array of supported DRM formats
+ * @num_fmts: number of supported DRM formats
+ * @w: width
+ * @h: height
+ * @other: other layer
+ */
+struct xilinx_drm_dp_sub_layer {
+ enum xilinx_drm_dp_sub_layer_type id;
+ u32 offset;
+ bool avail;
+ bool primary;
+ bool enabled;
+ const struct xilinx_drm_dp_sub_fmt *fmt;
+ u32 *drm_fmts;
+ unsigned int num_fmts;
+ u32 w;
+ u32 h;
+ struct xilinx_drm_dp_sub_layer *other;
+};
+
+/**
+ * struct xilinx_drm_dp_sub_blend - DP subsystem blender
+ * @base: pre-calculated base address
+ */
+struct xilinx_drm_dp_sub_blend {
+ void __iomem *base;
+};
+
+/**
+ * struct xilinx_drm_dp_sub_av_buf - DP subsystem av buffer manager
+ * @base: pre-calculated base address
+ */
+struct xilinx_drm_dp_sub_av_buf {
+ void __iomem *base;
+};
+
+/**
+ * struct xilinx_drm_dp_sub_aud - DP subsystem audio
+ * @base: pre-calculated base address
+ */
+struct xilinx_drm_dp_sub_aud {
+ void __iomem *base;
+};
+
+/**
+ * struct xilinx_drm_dp_sub - DP subsystem
+ * @dev: device structure
+ * @blend: blender device
+ * @av_buf: av buffer manager device
+ * @aud: audio device
+ * @layers: layers
+ * @list: entry in the global DP subsystem list
+ * @vblank_fn: vblank handler
+ * @vblank_data: vblank data to be used in vblank_fn
+ * @vid_clk_pl: flag if the clock is from PL
+ * @alpha: stored global alpha value
+ * @alpha_en: flag if the global alpha is enabled
+ */
+struct xilinx_drm_dp_sub {
+ struct device *dev;
+ struct xilinx_drm_dp_sub_blend blend;
+ struct xilinx_drm_dp_sub_av_buf av_buf;
+ struct xilinx_drm_dp_sub_aud aud;
+ struct xilinx_drm_dp_sub_layer layers[XILINX_DRM_DP_SUB_NUM_LAYERS];
+ struct list_head list;
+ void (*vblank_fn)(void *);
+ void *vblank_data;
+ bool vid_clk_pl;
+ u32 alpha;
+ bool alpha_en;
+};
+
+/**
+ * struct xilinx_drm_dp_sub_fmt - DP subsystem format mapping
+ * @drm_fmt: drm format
+ * @dp_sub_fmt: DP subsystem format
+ * @rgb: flag for RGB formats
+ * @swap: flag to swap r & b for rgb formats, and u & v for yuv formats
+ * @chroma_sub: flag for chroma subsampled formats
+ * @sf: scaling factors for upto 3 color components
+ * @name: format name
+ */
+struct xilinx_drm_dp_sub_fmt {
+ u32 drm_fmt;
+ u32 dp_sub_fmt;
+ bool rgb;
+ bool swap;
+ bool chroma_sub;
+ u32 sf[3];
+ const char *name;
+};
+
+static LIST_HEAD(xilinx_drm_dp_sub_list);
+static DEFINE_MUTEX(xilinx_drm_dp_sub_lock);
+
+#ifdef CONFIG_DRM_XILINX_DP_SUB_DEBUG_FS
+#define XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE 32
+#define XILINX_DP_SUB_DEBUGFS_MAX_BG_COLOR_VAL 0xFFF
+#define IN_RANGE(x, min, max) ({ \
+ typeof(x) _x = (x); \
+ _x >= (min) && _x <= (max); })
+
+/* Match xilinx_dp_testcases vs dp_debugfs_reqs[] entry */
+enum xilinx_dp_sub_testcases {
+ DP_SUB_TC_BG_COLOR,
+ DP_SUB_TC_OUTPUT_FMT,
+ DP_SUB_TC_NONE
+};
+
+struct xilinx_dp_sub_debugfs {
+ enum xilinx_dp_sub_testcases testcase;
+ u16 r_value;
+ u16 g_value;
+ u16 b_value;
+ u32 output_fmt;
+ struct xilinx_drm_dp_sub *xilinx_dp_sub;
+};
+
+static struct xilinx_dp_sub_debugfs dp_sub_debugfs;
+struct xilinx_dp_sub_debugfs_request {
+ const char *req;
+ enum xilinx_dp_sub_testcases tc;
+ ssize_t (*read_handler)(char **kern_buff);
+ ssize_t (*write_handler)(char **cmd);
+};
+
+static s64 xilinx_dp_sub_debugfs_argument_value(char *arg)
+{
+ s64 value;
+
+ if (!arg)
+ return -1;
+
+ if (!kstrtos64(arg, 0, &value))
+ return value;
+
+ return -1;
+}
+
+static void
+xilinx_dp_sub_debugfs_update_v_blend(u16 *sdtv_coeffs, u32 *full_range_offsets)
+{
+ struct xilinx_drm_dp_sub *dp_sub = dp_sub_debugfs.xilinx_dp_sub;
+ u32 offset, i;
+
+ /* Hardcode SDTV coefficients. Can be runtime configurable */
+ offset = XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF0;
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_COEFF; i++)
+ xilinx_drm_writel(dp_sub->blend.base, offset + i * 4,
+ sdtv_coeffs[i]);
+
+ offset = XILINX_DP_SUB_V_BLEND_LUMA_OUTCSC_OFFSET;
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_OFFSET; i++)
+ xilinx_drm_writel(dp_sub->blend.base, offset + i * 4,
+ full_range_offsets[i]);
+}
+
+static void xilinx_dp_sub_debugfs_output_format(u32 fmt)
+{
+ struct xilinx_drm_dp_sub *dp_sub = dp_sub_debugfs.xilinx_dp_sub;
+
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT, fmt);
+
+ if (fmt != XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB) {
+ u16 sdtv_coeffs[] = { 0x4c9, 0x864, 0x1d3,
+ 0x7d4d, 0x7ab3, 0x800,
+ 0x800, 0x794d, 0x7eb3 };
+ u32 full_range_offsets[] = { 0x0, 0x8000000, 0x8000000 };
+
+ xilinx_dp_sub_debugfs_update_v_blend(sdtv_coeffs,
+ full_range_offsets);
+ } else {
+ /* In case of RGB set the reset values*/
+ u16 sdtv_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u32 full_range_offsets[] = { 0x0, 0x0, 0x0 };
+
+ xilinx_dp_sub_debugfs_update_v_blend(sdtv_coeffs,
+ full_range_offsets);
+ }
+}
+
+static ssize_t
+xilinx_dp_sub_debugfs_background_color_write(char **dp_sub_test_arg)
+{
+ char *r_color, *g_color, *b_color;
+ s64 r_val, g_val, b_val;
+
+ r_color = strsep(dp_sub_test_arg, " ");
+ g_color = strsep(dp_sub_test_arg, " ");
+ b_color = strsep(dp_sub_test_arg, " ");
+
+ /* char * to int conversion */
+ r_val = xilinx_dp_sub_debugfs_argument_value(r_color);
+ g_val = xilinx_dp_sub_debugfs_argument_value(g_color);
+ b_val = xilinx_dp_sub_debugfs_argument_value(b_color);
+
+ if (!(IN_RANGE(r_val, 0, XILINX_DP_SUB_DEBUGFS_MAX_BG_COLOR_VAL) &&
+ IN_RANGE(g_val, 0, XILINX_DP_SUB_DEBUGFS_MAX_BG_COLOR_VAL) &&
+ IN_RANGE(b_val, 0, XILINX_DP_SUB_DEBUGFS_MAX_BG_COLOR_VAL)))
+ return -EINVAL;
+
+ dp_sub_debugfs.r_value = r_val;
+ dp_sub_debugfs.g_value = g_val;
+ dp_sub_debugfs.b_value = b_val;
+
+ dp_sub_debugfs.testcase = DP_SUB_TC_BG_COLOR;
+
+ return 0;
+}
+
+static ssize_t
+xilinx_dp_sub_debugfs_output_display_format_write(char **dp_sub_test_arg)
+{
+ char *output_format;
+ struct xilinx_drm_dp_sub *dp_sub = dp_sub_debugfs.xilinx_dp_sub;
+ u32 fmt;
+
+ /* Read the value from an user value */
+ output_format = strsep(dp_sub_test_arg, " ");
+ if (strncmp(output_format, "rgb", 3) == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB;
+ } else if (strncmp(output_format, "ycbcr444", 8) == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR444;
+ } else if (strncmp(output_format, "ycbcr422", 8) == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR422;
+ fmt |= XILINX_DP_SUB_V_BLEND_OUTPUT_EN_DOWNSAMPLE;
+ } else if (strncmp(output_format, "yonly", 5) == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YONLY;
+ } else {
+ dev_err(dp_sub->dev, "Invalid output format\n");
+ return -EINVAL;
+ }
+
+ dp_sub_debugfs.output_fmt =
+ xilinx_drm_readl(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT);
+
+ xilinx_dp_sub_debugfs_output_format(fmt);
+ dp_sub_debugfs.testcase = DP_SUB_TC_OUTPUT_FMT;
+
+ return 0;
+}
+
+static ssize_t
+xilinx_dp_sub_debugfs_output_display_format_read(char **kern_buff)
+{
+ size_t out_str_len;
+
+ dp_sub_debugfs.testcase = DP_SUB_TC_NONE;
+ xilinx_dp_sub_debugfs_output_format(dp_sub_debugfs.output_fmt);
+
+ out_str_len = strlen("Success");
+ out_str_len = min_t(size_t, XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(*kern_buff, out_str_len, "%s", "Success");
+
+ return 0;
+}
+
+static ssize_t
+xilinx_dp_sub_debugfs_background_color_read(char **kern_buff)
+{
+ size_t out_str_len;
+
+ dp_sub_debugfs.testcase = DP_SUB_TC_NONE;
+ dp_sub_debugfs.r_value = 0;
+ dp_sub_debugfs.g_value = 0;
+ dp_sub_debugfs.b_value = 0;
+
+ out_str_len = strlen("Success");
+ out_str_len = min_t(size_t, XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(*kern_buff, out_str_len, "%s", "Success");
+
+ return 0;
+}
+
+/* Match xilinx_dp_testcases vs dp_debugfs_reqs[] entry */
+static struct xilinx_dp_sub_debugfs_request dp_sub_debugfs_reqs[] = {
+ {"BACKGROUND_COLOR", DP_SUB_TC_BG_COLOR,
+ xilinx_dp_sub_debugfs_background_color_read,
+ xilinx_dp_sub_debugfs_background_color_write},
+ {"OUTPUT_DISPLAY_FORMAT", DP_SUB_TC_OUTPUT_FMT,
+ xilinx_dp_sub_debugfs_output_display_format_read,
+ xilinx_dp_sub_debugfs_output_display_format_write},
+};
+
+static ssize_t
+xilinx_dp_sub_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff, *dp_sub_test_req, *kern_buff_start;
+ int ret;
+ unsigned int i;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ if (dp_sub_debugfs.testcase != DP_SUB_TC_NONE)
+ return -EBUSY;
+
+ kern_buff = kzalloc(size, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+ kern_buff_start = kern_buff;
+
+ ret = strncpy_from_user(kern_buff, buf, size);
+ if (ret < 0) {
+ kfree(kern_buff_start);
+ return ret;
+ }
+
+ /* Read the testcase name and argument from an user request */
+ dp_sub_test_req = strsep(&kern_buff, " ");
+
+ for (i = 0; i < ARRAY_SIZE(dp_sub_debugfs_reqs); i++) {
+ if (!strcasecmp(dp_sub_test_req, dp_sub_debugfs_reqs[i].req))
+ if (!dp_sub_debugfs_reqs[i].write_handler(&kern_buff)) {
+ kfree(kern_buff_start);
+ return size;
+ }
+ }
+ kfree(kern_buff_start);
+ return -EINVAL;
+}
+
+static ssize_t xilinx_dp_sub_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff = NULL;
+ size_t kern_buff_len, out_str_len;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ kern_buff = kzalloc(XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+ if (!kern_buff) {
+ dp_sub_debugfs.testcase = DP_SUB_TC_NONE;
+ return -ENOMEM;
+ }
+
+ if (dp_sub_debugfs.testcase == DP_SUB_TC_NONE) {
+ out_str_len = strlen("No testcase executed");
+ out_str_len = min_t(size_t, XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(kern_buff, out_str_len, "%s", "No testcase executed");
+ } else {
+ ret = dp_sub_debugfs_reqs[dp_sub_debugfs.testcase].read_handler(
+ &kern_buff);
+ if (ret) {
+ kfree(kern_buff);
+ return ret;
+ }
+ }
+
+ kern_buff_len = strlen(kern_buff);
+ size = min(size, kern_buff_len);
+
+ ret = copy_to_user(buf, kern_buff, size);
+
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static const struct file_operations fops_xilinx_dp_sub_dbgfs = {
+ .owner = THIS_MODULE,
+ .read = xilinx_dp_sub_debugfs_read,
+ .write = xilinx_dp_sub_debugfs_write,
+};
+
+static int xilinx_dp_sub_debugfs_init(struct xilinx_drm_dp_sub *dp_sub)
+{
+ int err;
+ struct dentry *xilinx_dp_sub_debugfs_dir, *xilinx_dp_sub_debugfs_file;
+
+ dp_sub_debugfs.testcase = DP_SUB_TC_NONE;
+ dp_sub_debugfs.xilinx_dp_sub = dp_sub;
+
+ xilinx_dp_sub_debugfs_dir = debugfs_create_dir("dp_sub", NULL);
+ if (!xilinx_dp_sub_debugfs_dir) {
+ dev_err(dp_sub->dev, "debugfs_create_dir failed\n");
+ return -ENODEV;
+ }
+
+ xilinx_dp_sub_debugfs_file =
+ debugfs_create_file("testcase", 0444,
+ xilinx_dp_sub_debugfs_dir, NULL,
+ &fops_xilinx_dp_sub_dbgfs);
+ if (!xilinx_dp_sub_debugfs_file) {
+ dev_err(dp_sub->dev, "debugfs_create_file testcase failed\n");
+ err = -ENODEV;
+ goto err_dbgfs;
+ }
+ return 0;
+
+err_dbgfs:
+ debugfs_remove_recursive(xilinx_dp_sub_debugfs_dir);
+ xilinx_dp_sub_debugfs_dir = NULL;
+ return err;
+}
+
+static void xilinx_drm_dp_sub_debugfs_bg_color(struct xilinx_drm_dp_sub *dp_sub)
+{
+ if (dp_sub_debugfs.testcase == DP_SUB_TC_BG_COLOR) {
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_BG_CLR_0,
+ dp_sub_debugfs.r_value);
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_BG_CLR_1,
+ dp_sub_debugfs.g_value);
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_BG_CLR_2,
+ dp_sub_debugfs.b_value);
+ }
+}
+#else
+static int xilinx_dp_sub_debugfs_init(struct xilinx_drm_dp_sub *dp_sub)
+{
+ return 0;
+}
+
+static void xilinx_drm_dp_sub_debugfs_bg_color(struct xilinx_drm_dp_sub *dp_sub)
+{
+}
+#endif /* CONFIG_DP_DEBUG_FS */
+
+/* Blender functions */
+
+/**
+ * xilinx_drm_dp_sub_blend_layer_enable - Enable a layer
+ * @blend: blend object
+ * @layer: layer to enable
+ *
+ * Enable a layer @layer.
+ */
+static void
+xilinx_drm_dp_sub_blend_layer_enable(struct xilinx_drm_dp_sub_blend *blend,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ u32 reg, offset, i, s0, s1;
+ u16 sdtv_coeffs[] = { 0x1000, 0x166f, 0x0,
+ 0x1000, 0x7483, 0x7a7f,
+ 0x1000, 0x0, 0x1c5a };
+ u16 swap_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u16 *coeffs;
+ u32 offsets[] = { 0x0, 0x1800, 0x1800 };
+
+ reg = layer->fmt->rgb ? XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_RGB : 0;
+ reg |= layer->fmt->chroma_sub ?
+ XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_EN_US : 0;
+
+ xilinx_drm_writel(blend->base,
+ XILINX_DP_SUB_V_BLEND_LAYER_CONTROL + layer->offset,
+ reg);
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID)
+ offset = XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF0;
+ else
+ offset = XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF0;
+
+ if (!layer->fmt->rgb) {
+ coeffs = sdtv_coeffs;
+ s0 = 1;
+ s1 = 2;
+ } else {
+ coeffs = swap_coeffs;
+ s0 = 0;
+ s1 = 2;
+
+ /* No offset for RGB formats */
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_OFFSET; i++)
+ offsets[i] = 0;
+ }
+
+ if (layer->fmt->swap) {
+ for (i = 0; i < 3; i++) {
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ coeffs[i * 3 + s1] ^= coeffs[i * 3 + s0];
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ }
+ }
+
+ /* Program coefficients. Can be runtime configurable */
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_COEFF; i++)
+ xilinx_drm_writel(blend->base, offset + i * 4, coeffs[i]);
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID)
+ offset = XILINX_DP_SUB_V_BLEND_LUMA_IN1CSC_OFFSET;
+ else
+ offset = XILINX_DP_SUB_V_BLEND_LUMA_IN2CSC_OFFSET;
+
+ /* Program offsets. Can be runtime configurable */
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_OFFSET; i++)
+ xilinx_drm_writel(blend->base, offset + i * 4, offsets[i]);
+}
+
+/**
+ * xilinx_drm_dp_sub_blend_layer_disable - Disable a layer
+ * @blend: blend object
+ * @layer: layer to disable
+ *
+ * Disable a layer @layer.
+ */
+static void
+xilinx_drm_dp_sub_blend_layer_disable(struct xilinx_drm_dp_sub_blend *blend,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ xilinx_drm_writel(blend->base,
+ XILINX_DP_SUB_V_BLEND_LAYER_CONTROL + layer->offset,
+ 0);
+}
+
+/**
+ * xilinx_drm_dp_sub_blend_set_bg_color - Set the background color
+ * @blend: blend object
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color.
+ */
+static void
+xilinx_drm_dp_sub_blend_set_bg_color(struct xilinx_drm_dp_sub_blend *blend,
+ u32 c0, u32 c1, u32 c2)
+{
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_BG_CLR_0, c0);
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_BG_CLR_1, c1);
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_BG_CLR_2, c2);
+}
+
+/**
+ * xilinx_drm_dp_sub_blend_set_alpha - Set the alpha for blending
+ * @blend: blend object
+ * @alpha: alpha value to be used
+ *
+ * Set the alpha for blending.
+ */
+static void
+xilinx_drm_dp_sub_blend_set_alpha(struct xilinx_drm_dp_sub_blend *blend,
+ u32 alpha)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(blend->base,
+ XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA);
+ reg &= ~XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA_MASK;
+ reg |= alpha << 1;
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA,
+ reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_blend_enable_alpha - Enable/disable the global alpha
+ * @blend: blend object
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Enable/disable the global alpha blending based on @enable.
+ */
+static void
+xilinx_drm_dp_sub_blend_enable_alpha(struct xilinx_drm_dp_sub_blend *blend,
+ bool enable)
+{
+ if (enable)
+ xilinx_drm_set(blend->base,
+ XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+ else
+ xilinx_drm_clr(blend->base,
+ XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+}
+
+static const struct xilinx_drm_dp_sub_fmt blend_output_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .dp_sub_fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "rgb888",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .dp_sub_fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR444,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuv444",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .dp_sub_fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR422,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .name = "yuv422",
+ }, {
+ }
+};
+
+/**
+ * xilinx_drm_dp_sub_blend_set_output_fmt - Set the output format
+ * @blend: blend object
+ * @fmt: output format
+ *
+ * Set the output format to @fmt.
+ */
+static void
+xilinx_drm_dp_sub_blend_set_output_fmt(struct xilinx_drm_dp_sub_blend *blend,
+ u32 fmt)
+{
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT,
+ fmt);
+}
+
+/* AV buffer manager functions */
+
+static const struct xilinx_drm_dp_sub_fmt av_buf_vid_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_VYUY,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "vyuy",
+ }, {
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "uyvy",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUYV,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuyv",
+ }, {
+ .drm_fmt = DRM_FORMAT_YVYU,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yvyu",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuv422",
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU422,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yvu422",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuv444",
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU444,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yvu444",
+ }, {
+ .drm_fmt = DRM_FORMAT_NV16,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "nv16",
+ }, {
+ .drm_fmt = DRM_FORMAT_NV61,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "nv61",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "bgr888",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "rgb888",
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "xbgr8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "xrgb8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR2101010,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .name = "xbgr2101010",
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB2101010,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .name = "xrgb2101010",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV420,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuv420",
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU420,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yvu420",
+ }, {
+ .drm_fmt = DRM_FORMAT_NV12,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "nv12",
+ }, {
+ .drm_fmt = DRM_FORMAT_NV21,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "nv21",
+ }, {
+ .drm_fmt = DRM_FORMAT_XV15,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .name = "yuv42010b",
+ }, {
+ .drm_fmt = DRM_FORMAT_XV20,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .name = "yuv42210b",
+ }
+};
+
+static const struct xilinx_drm_dp_sub_fmt av_buf_gfx_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_ABGR8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "abgr8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_ARGB8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "argb8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "rgba8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "bgra8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "bgr888",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_BGR888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "rgb888",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA5551,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .name = "rgba5551",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA5551,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .name = "bgra5551",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA4444,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .name = "rgba4444",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA4444,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .name = "bgra4444",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB565,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_6BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .name = "rgb565",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR565,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_6BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .name = "bgr565",
+ }
+};
+
+/**
+ * xilinx_drm_dp_sub_av_buf_set_fmt - Set the input formats
+ * @av_buf: av buffer manager
+ * @fmt: formats
+ *
+ * Set the av buffer manager format to @fmt. @fmt should have valid values
+ * for both video and graphics layer.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_set_fmt(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ u32 fmt)
+{
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_FMT, fmt);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_get_fmt - Get the input formats
+ * @av_buf: av buffer manager
+ *
+ * Get the input formats (which include video and graphics) of
+ * av buffer manager.
+ *
+ * Return: value of XILINX_DP_SUB_AV_BUF_FMT register.
+ */
+static u32
+xilinx_drm_dp_sub_av_buf_get_fmt(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ return xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_FMT);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_set_vid_clock_src - Set the video clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the video clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void xilinx_drm_dp_sub_av_buf_set_vid_clock_src(
+ struct xilinx_drm_dp_sub_av_buf *av_buf, bool from_ps)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC);
+ if (from_ps)
+ reg |= XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_FROM_PS;
+ else
+ reg &= ~XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_FROM_PS;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_set_vid_timing_src - Set the video timing source
+ * @av_buf: av buffer manager
+ * @internal: flag if the video timing is generated internally
+ *
+ * Set the video timing source based on @internal. It can come externally or
+ * be generated internally.
+ */
+static void xilinx_drm_dp_sub_av_buf_set_vid_timing_src(
+ struct xilinx_drm_dp_sub_av_buf *av_buf, bool internal)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC);
+ if (internal)
+ reg |= XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ else
+ reg &= ~XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_set_aud_clock_src - Set the audio clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the audio clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void xilinx_drm_dp_sub_av_buf_set_aud_clock_src(
+ struct xilinx_drm_dp_sub_av_buf *av_buf, bool from_ps)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC);
+ if (from_ps)
+ reg |= XILINX_DP_SUB_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ else
+ reg &= ~XILINX_DP_SUB_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_enable_buf - Enable buffers
+ * @av_buf: av buffer manager
+ *
+ * Enable all (video and audio) buffers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_enable_buf(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = XILINX_DP_SUB_AV_BUF_CHBUF_EN;
+ reg |= XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_MAX <<
+ XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (i = 0; i < XILINX_DP_SUB_AV_BUF_NUM_VID_GFX_BUFFERS; i++)
+ xilinx_drm_writel(av_buf->base,
+ XILINX_DP_SUB_AV_BUF_CHBUF + i * 4, reg);
+
+ reg = XILINX_DP_SUB_AV_BUF_CHBUF_EN;
+ reg |= XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_AUD_MAX <<
+ XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (; i < XILINX_DP_SUB_AV_BUF_NUM_BUFFERS; i++)
+ xilinx_drm_writel(av_buf->base,
+ XILINX_DP_SUB_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_disable_buf - Disable buffers
+ * @av_buf: av buffer manager
+ *
+ * Disable all (video and audio) buffers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_disable_buf(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = XILINX_DP_SUB_AV_BUF_CHBUF_FLUSH & ~XILINX_DP_SUB_AV_BUF_CHBUF_EN;
+ for (i = 0; i < XILINX_DP_SUB_AV_BUF_NUM_BUFFERS; i++)
+ xilinx_drm_writel(av_buf->base,
+ XILINX_DP_SUB_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_enable_aud - Enable audio
+ * @av_buf: av buffer manager
+ *
+ * Enable all audio buffers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_enable_aud(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT);
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MEM;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_AUD2_EN;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_enable - Enable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * De-assert the video pipe reset
+ */
+static void
+xilinx_drm_dp_sub_av_buf_enable(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_SRST_REG, 0);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_disable - Disable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * Assert the video pipe reset
+ */
+static void
+xilinx_drm_dp_sub_av_buf_disable(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_SRST_REG,
+ XILINX_DP_SUB_AV_BUF_SRST_REG_VID_RST);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_disable_aud - Disable audio
+ * @av_buf: av buffer manager
+ *
+ * Disable all audio buffers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_disable_aud(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT);
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_DISABLE;
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_AUD2_EN;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_enable_vid - Enable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to enable
+ *
+ * Enable the video/graphics buffer for @layer.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_enable_vid(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT);
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID) {
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MEM;
+ } else {
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MEM;
+ }
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_disable_vid - Disable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to disable
+ *
+ * Disable the video/graphics buffer for @layer.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_disable_vid(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT);
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID) {
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_NONE;
+ } else {
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_DISABLE;
+ }
+
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_init_fmts - Initialize the layer formats
+ * @av_buf: av buffer manager
+ * @vid_fmt: video format descriptor
+ * @gfx_fmt: graphics format descriptor
+ *
+ * Initialize formats of both video and graphics layers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_init_fmts(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ const struct xilinx_drm_dp_sub_fmt *vid_fmt,
+ const struct xilinx_drm_dp_sub_fmt *gfx_fmt)
+{
+ u32 reg;
+
+ reg = vid_fmt->dp_sub_fmt;
+ reg |= gfx_fmt->dp_sub_fmt;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_FMT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_init_sf - Initialize scaling factors
+ * @av_buf: av buffer manager
+ * @vid_fmt: video format descriptor
+ * @gfx_fmt: graphics format descriptor
+ *
+ * Initialize scaling factors for both video and graphics layers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_init_sf(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ const struct xilinx_drm_dp_sub_fmt *vid_fmt,
+ const struct xilinx_drm_dp_sub_fmt *gfx_fmt)
+{
+ unsigned int i;
+ int offset;
+
+ if (gfx_fmt) {
+ offset = XILINX_DP_SUB_AV_BUF_GFX_COMP0_SF;
+ for (i = 0; i < XILINX_DP_SUB_AV_BUF_NUM_SF; i++)
+ xilinx_drm_writel(av_buf->base, offset + i * 4,
+ gfx_fmt->sf[i]);
+ }
+
+ if (vid_fmt) {
+ offset = XILINX_DP_SUB_AV_BUF_VID_COMP0_SF;
+ for (i = 0; i < XILINX_DP_SUB_AV_BUF_NUM_SF; i++)
+ xilinx_drm_writel(av_buf->base, offset + i * 4,
+ vid_fmt->sf[i]);
+ }
+}
+
+/* Audio functions */
+
+/**
+ * xilinx_drm_dp_sub_aud_init - Initialize the audio
+ * @aud: audio
+ *
+ * Initialize the audio with default mixer volume. The de-assertion will
+ * initialize the audio states.
+ */
+static void xilinx_drm_dp_sub_aud_init(struct xilinx_drm_dp_sub_aud *aud)
+{
+ /* Clear the audio soft reset register as it's an non-reset flop */
+ xilinx_drm_writel(aud->base, XILINX_DP_SUB_AUD_SOFT_RESET, 0);
+ xilinx_drm_writel(aud->base, XILINX_DP_SUB_AUD_MIXER_VOLUME,
+ XILINX_DP_SUB_AUD_MIXER_VOLUME_NO_SCALE);
+}
+
+/**
+ * xilinx_drm_dp_sub_aud_deinit - De-initialize the audio
+ * @aud: audio
+ *
+ * Put the audio in reset.
+ */
+static void xilinx_drm_dp_sub_aud_deinit(struct xilinx_drm_dp_sub_aud *aud)
+{
+ xilinx_drm_set(aud->base, XILINX_DP_SUB_AUD_SOFT_RESET,
+ XILINX_DP_SUB_AUD_SOFT_RESET_AUD_SRST);
+}
+
+/* DP subsystem layer functions */
+
+/**
+ * xilinx_drm_dp_sub_layer_check_size - Verify width and height for the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer
+ * @width: width
+ * @height: height
+ *
+ * The DP subsystem has the limitation that both layers should have
+ * identical size. This function stores width and height of @layer, and verifies
+ * if the size (width and height) is valid.
+ *
+ * Return: 0 on success, or -EINVAL if width or/and height is invalid.
+ */
+int xilinx_drm_dp_sub_layer_check_size(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 width, u32 height)
+{
+ struct xilinx_drm_dp_sub_layer *other = layer->other;
+
+ if (other->enabled && (other->w != width || other->h != height)) {
+ dev_err(dp_sub->dev, "Layer width:height must be %d:%d\n",
+ other->w, other->h);
+ return -EINVAL;
+ }
+
+ layer->w = width;
+ layer->h = height;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_check_size);
+
+/**
+ * xilinx_drm_dp_sub_map_fmt - Find the DP subsystem format for given drm format
+ * @fmts: format table to look up
+ * @size: size of the table @fmts
+ * @drm_fmt: DRM format to search
+ *
+ * Search a DP subsystem format corresponding to the given DRM format @drm_fmt,
+ * and return the format descriptor which contains the DP subsystem format
+ * value.
+ *
+ * Return: a DP subsystem format descriptor on success, or NULL.
+ */
+static const struct xilinx_drm_dp_sub_fmt *
+xilinx_drm_dp_sub_map_fmt(const struct xilinx_drm_dp_sub_fmt fmts[],
+ unsigned int size, u32 drm_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (fmts[i].drm_fmt == drm_fmt)
+ return &fmts[i];
+
+ return NULL;
+}
+
+/**
+ * xilinx_drm_dp_sub_set_fmt - Set the format of the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to set the format
+ * @drm_fmt: DRM format to set
+ *
+ * Set the format of the given layer to @drm_fmt.
+ *
+ * Return: 0 on success. -EINVAL if @drm_fmt is not supported by the layer.
+ */
+int xilinx_drm_dp_sub_layer_set_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 drm_fmt)
+{
+ const struct xilinx_drm_dp_sub_fmt *fmt;
+ const struct xilinx_drm_dp_sub_fmt *vid_fmt = NULL, *gfx_fmt = NULL;
+ u32 size, fmts, mask;
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID) {
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ mask = ~XILINX_DP_SUB_AV_BUF_FMT_NL_VID_MASK;
+ fmt = xilinx_drm_dp_sub_map_fmt(av_buf_vid_fmts, size, drm_fmt);
+ vid_fmt = fmt;
+ } else {
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ mask = ~XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_MASK;
+ fmt = xilinx_drm_dp_sub_map_fmt(av_buf_gfx_fmts, size, drm_fmt);
+ gfx_fmt = fmt;
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmts = xilinx_drm_dp_sub_av_buf_get_fmt(&dp_sub->av_buf);
+ fmts &= mask;
+ fmts |= fmt->dp_sub_fmt;
+ xilinx_drm_dp_sub_av_buf_set_fmt(&dp_sub->av_buf, fmts);
+ xilinx_drm_dp_sub_av_buf_init_sf(&dp_sub->av_buf, vid_fmt, gfx_fmt);
+
+ layer->fmt = fmt;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_set_fmt);
+
+/**
+ * xilinx_drm_dp_sub_get_fmt - Get the format of the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to set the format
+ *
+ * Get the format of the given layer.
+ *
+ * Return: DRM format of the layer
+ */
+u32 xilinx_drm_dp_sub_layer_get_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ return layer->fmt->drm_fmt;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_get_fmt);
+
+/**
+ * xilinx_drm_dp_sub_get_fmt - Get the supported DRM formats of the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to get the formats
+ * @drm_fmts: pointer to array of DRM format strings
+ * @num_fmts: pointer to number of returned DRM formats
+ *
+ * Get the supported DRM formats of the given layer.
+ */
+void xilinx_drm_dp_sub_layer_get_fmts(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 **drm_fmts,
+ unsigned int *num_fmts)
+{
+ *drm_fmts = layer->drm_fmts;
+ *num_fmts = layer->num_fmts;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_get_fmts);
+
+/**
+ * xilinx_drm_dp_sub_layer_enable - Enable the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to esable
+ *
+ * Enable the layer @layer.
+ */
+void xilinx_drm_dp_sub_layer_enable(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ xilinx_drm_dp_sub_av_buf_enable_vid(&dp_sub->av_buf, layer);
+ xilinx_drm_dp_sub_blend_layer_enable(&dp_sub->blend, layer);
+ layer->enabled = true;
+ if (layer->other->enabled) {
+ xilinx_drm_dp_sub_blend_set_alpha(&dp_sub->blend,
+ dp_sub->alpha);
+ xilinx_drm_dp_sub_blend_enable_alpha(&dp_sub->blend,
+ dp_sub->alpha_en);
+ } else {
+ u32 alpha;
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID)
+ alpha = 0;
+ else
+ alpha = XILINX_DRM_DP_SUB_MAX_ALPHA;
+ xilinx_drm_dp_sub_blend_set_alpha(&dp_sub->blend, alpha);
+ xilinx_drm_dp_sub_blend_enable_alpha(&dp_sub->blend, true);
+ }
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_enable);
+
+/**
+ * xilinx_drm_dp_sub_layer_enable - Disable the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to disable
+ *
+ * Disable the layer @layer.
+ */
+void xilinx_drm_dp_sub_layer_disable(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ xilinx_drm_dp_sub_av_buf_disable_vid(&dp_sub->av_buf, layer);
+ xilinx_drm_dp_sub_blend_layer_disable(&dp_sub->blend, layer);
+ layer->enabled = false;
+ if (layer->other->enabled) {
+ u32 alpha;
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID)
+ alpha = XILINX_DRM_DP_SUB_MAX_ALPHA;
+ else
+ alpha = 0;
+ xilinx_drm_dp_sub_blend_set_alpha(&dp_sub->blend, alpha);
+ xilinx_drm_dp_sub_blend_enable_alpha(&dp_sub->blend, true);
+ }
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_disable);
+
+/**
+ * xilinx_drm_dp_sub_layer_get - Get the DP subsystem layer
+ * @dp_sub: DP subsystem
+ * @primary: flag to indicate the primary plane
+ *
+ * Check if there's any available layer based on the flag @primary, and return
+ * the found layer.
+ *
+ * Return: a DP subsystem layer on success, or -ENODEV error pointer.
+ */
+struct xilinx_drm_dp_sub_layer *
+xilinx_drm_dp_sub_layer_get(struct xilinx_drm_dp_sub *dp_sub, bool primary)
+{
+ struct xilinx_drm_dp_sub_layer *layer = NULL;
+ unsigned int i;
+
+ for (i = 0; i < XILINX_DRM_DP_SUB_NUM_LAYERS; i++) {
+ if (dp_sub->layers[i].primary == primary) {
+ layer = &dp_sub->layers[i];
+ break;
+ }
+ }
+
+ if (!layer || !layer->avail)
+ return ERR_PTR(-ENODEV);
+
+ return layer;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_get);
+
+/**
+ * xilinx_drm_dp_sub_layer_get - Put the DP subsystem layer
+ * @dp_sub: DP subsystem
+ * @layer: DP subsystem layer
+ *
+ * Return the DP subsystem layer @layer when it's no longer used.
+ */
+void xilinx_drm_dp_sub_layer_put(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ layer->avail = true;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_put);
+
+/* DP subsystem functions */
+
+/**
+ * xilinx_drm_dp_sub_set_output_fmt - Set the output format
+ * @dp_sub: DP subsystem
+ * @drm_fmt: DRM format to set
+ *
+ * Set the output format of the DP subsystem. The flag @primary indicates that
+ * which layer to configure.
+ *
+ * Return: 0 on success, or -EINVAL if @drm_fmt is not supported for output.
+ */
+int xilinx_drm_dp_sub_set_output_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ u32 drm_fmt)
+{
+ const struct xilinx_drm_dp_sub_fmt *fmt;
+
+ fmt = xilinx_drm_dp_sub_map_fmt(blend_output_fmts,
+ ARRAY_SIZE(blend_output_fmts), drm_fmt);
+ if (!fmt)
+ return -EINVAL;
+
+ xilinx_drm_dp_sub_blend_set_output_fmt(&dp_sub->blend, fmt->dp_sub_fmt);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_set_output_fmt);
+
+/**
+ * xilinx_drm_dp_sub_set_bg_color - Set the background color
+ * @dp_sub: DP subsystem
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color with given color components (@c0, @c1, @c2).
+ */
+void xilinx_drm_dp_sub_set_bg_color(struct xilinx_drm_dp_sub *dp_sub,
+ u32 c0, u32 c1, u32 c2)
+{
+ xilinx_drm_dp_sub_blend_set_bg_color(&dp_sub->blend, c0, c1, c2);
+ xilinx_drm_dp_sub_debugfs_bg_color(dp_sub);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_set_bg_color);
+
+/**
+ * xilinx_drm_dp_sub_set_alpha - Set the alpha value
+ * @dp_sub: DP subsystem
+ * @alpha: alpha value to set
+ *
+ * Set the alpha value for blending.
+ */
+void xilinx_drm_dp_sub_set_alpha(struct xilinx_drm_dp_sub *dp_sub, u32 alpha)
+{
+ dp_sub->alpha = alpha;
+ if (dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID].enabled &&
+ dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX].enabled)
+ xilinx_drm_dp_sub_blend_set_alpha(&dp_sub->blend, alpha);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_set_alpha);
+
+/**
+ * xilinx_drm_dp_sub_enable_alpha - Enable/disable the global alpha blending
+ * @dp_sub: DP subsystem
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Set the alpha value for blending.
+ */
+void
+xilinx_drm_dp_sub_enable_alpha(struct xilinx_drm_dp_sub *dp_sub, bool enable)
+{
+ dp_sub->alpha_en = enable;
+ if (dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID].enabled &&
+ dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX].enabled)
+ xilinx_drm_dp_sub_blend_enable_alpha(&dp_sub->blend, enable);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_enable_alpha);
+
+/**
+ * xilinx_drm_dp_sub_handle_vblank - Vblank handling wrapper
+ * @dp_sub: DP subsystem
+ *
+ * Trigger the registered vblank handler. This function is supposed to be
+ * called in the actual vblank handler.
+ */
+void xilinx_drm_dp_sub_handle_vblank(struct xilinx_drm_dp_sub *dp_sub)
+{
+ if (dp_sub->vblank_fn)
+ dp_sub->vblank_fn(dp_sub->vblank_data);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_handle_vblank);
+
+/**
+ * xilinx_drm_dp_sub_enable_vblank - Enable the vblank handling
+ * @dp_sub: DP subsystem
+ * @vblank_fn: callback to be called on vblank event
+ * @vblank_data: data to be used in @vblank_fn
+ *
+ * This function register the vblank handler, and the handler will be triggered
+ * on vblank event after.
+ */
+void xilinx_drm_dp_sub_enable_vblank(struct xilinx_drm_dp_sub *dp_sub,
+ void (*vblank_fn)(void *),
+ void *vblank_data)
+{
+ dp_sub->vblank_fn = vblank_fn;
+ dp_sub->vblank_data = vblank_data;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_enable_vblank);
+
+/**
+ * xilinx_drm_dp_sub_disable_vblank - Disable the vblank handling
+ * @dp_sub: DP subsystem
+ *
+ * Disable the vblank handler. The vblank handler and data are unregistered.
+ */
+void xilinx_drm_dp_sub_disable_vblank(struct xilinx_drm_dp_sub *dp_sub)
+{
+ dp_sub->vblank_fn = NULL;
+ dp_sub->vblank_data = NULL;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_disable_vblank);
+
+/**
+ * xilinx_drm_dp_sub_enable - Enable the DP subsystem
+ * @dp_sub: DP subsystem
+ *
+ * Enable the DP subsystem.
+ */
+void xilinx_drm_dp_sub_enable(struct xilinx_drm_dp_sub *dp_sub)
+{
+ const struct xilinx_drm_dp_sub_fmt *vid_fmt;
+ const struct xilinx_drm_dp_sub_fmt *gfx_fmt;
+
+ vid_fmt = dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID].fmt;
+ gfx_fmt = dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX].fmt;
+ xilinx_drm_dp_sub_av_buf_enable(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_av_buf_init_fmts(&dp_sub->av_buf, vid_fmt, gfx_fmt);
+ xilinx_drm_dp_sub_av_buf_init_sf(&dp_sub->av_buf, vid_fmt, gfx_fmt);
+ xilinx_drm_dp_sub_av_buf_set_vid_clock_src(&dp_sub->av_buf,
+ !dp_sub->vid_clk_pl);
+ xilinx_drm_dp_sub_av_buf_set_vid_timing_src(&dp_sub->av_buf, true);
+ xilinx_drm_dp_sub_av_buf_set_aud_clock_src(&dp_sub->av_buf, true);
+ xilinx_drm_dp_sub_av_buf_enable_buf(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_av_buf_enable_aud(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_aud_init(&dp_sub->aud);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_enable);
+
+/**
+ * xilinx_drm_dp_sub_enable - Disable the DP subsystem
+ * @dp_sub: DP subsystem
+ *
+ * Disable the DP subsystem.
+ */
+void xilinx_drm_dp_sub_disable(struct xilinx_drm_dp_sub *dp_sub)
+{
+ xilinx_drm_dp_sub_aud_deinit(&dp_sub->aud);
+ xilinx_drm_dp_sub_av_buf_disable_aud(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_av_buf_disable_buf(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_av_buf_disable(&dp_sub->av_buf);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_disable);
+
+/* DP subsystem initialization functions */
+
+/**
+ * xilinx_drm_dp_sub_of_get - Get the DP subsystem instance
+ * @np: parent device node
+ *
+ * This function searches and returns a DP subsystem structure for
+ * the parent device node, @np. The DP subsystem node should be a child node of
+ * @np, with 'xlnx,dp-sub' property pointing to the DP device node. An instance
+ * can be shared by multiple users.
+ *
+ * Return: corresponding DP subsystem structure if found. NULL if
+ * the device node doesn't have 'xlnx,dp-sub' property, or -EPROBE_DEFER error
+ * pointer if the the DP subsystem isn't found.
+ */
+struct xilinx_drm_dp_sub *xilinx_drm_dp_sub_of_get(struct device_node *np)
+{
+ struct device_node *xilinx_drm_dp_sub_node;
+ struct xilinx_drm_dp_sub *found = NULL;
+ struct xilinx_drm_dp_sub *dp_sub;
+
+ if (!of_find_property(np, "xlnx,dp-sub", NULL))
+ return NULL;
+
+ xilinx_drm_dp_sub_node = of_parse_phandle(np, "xlnx,dp-sub", 0);
+ if (!xilinx_drm_dp_sub_node)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&xilinx_drm_dp_sub_lock);
+ list_for_each_entry(dp_sub, &xilinx_drm_dp_sub_list, list) {
+ if (dp_sub->dev->of_node == xilinx_drm_dp_sub_node) {
+ found = dp_sub;
+ break;
+ }
+ }
+ mutex_unlock(&xilinx_drm_dp_sub_lock);
+
+ of_node_put(xilinx_drm_dp_sub_node);
+
+ if (!found)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return found;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_of_get);
+
+/**
+ * xilinx_drm_dp_sub_put - Put the DP subsystem instance
+ * @dp_sub: DP subsystem
+ *
+ * Put the DP subsystem instance @dp_sub.
+ */
+void xilinx_drm_dp_sub_put(struct xilinx_drm_dp_sub *dp_sub)
+{
+ /* no-op */
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_put);
+
+/**
+ * xilinx_drm_dp_register_device - Register the DP subsystem to the global list
+ * @dp_sub: DP subsystem
+ *
+ * Register the DP subsystem instance to the global list
+ */
+static void xilinx_drm_dp_sub_register_device(struct xilinx_drm_dp_sub *dp_sub)
+{
+ mutex_lock(&xilinx_drm_dp_sub_lock);
+ list_add_tail(&dp_sub->list, &xilinx_drm_dp_sub_list);
+ mutex_unlock(&xilinx_drm_dp_sub_lock);
+}
+
+/**
+ * xilinx_drm_dp_register_device - Unregister the DP subsystem instance
+ * @dp_sub: DP subsystem
+ *
+ * Unregister the DP subsystem instance from the global list
+ */
+static void
+xilinx_drm_dp_sub_unregister_device(struct xilinx_drm_dp_sub *dp_sub)
+{
+ mutex_lock(&xilinx_drm_dp_sub_lock);
+ list_del(&dp_sub->list);
+ mutex_unlock(&xilinx_drm_dp_sub_lock);
+}
+
+/**
+ * xilinx_drm_dp_sub_parse_of - Parse the DP subsystem device tree node
+ * @dp_sub: DP subsystem
+ *
+ * Parse the DP subsystem device tree node.
+ *
+ * Return: 0 on success, or the corresponding error code.
+ */
+static int xilinx_drm_dp_sub_parse_of(struct xilinx_drm_dp_sub *dp_sub)
+{
+ struct device_node *node = dp_sub->dev->of_node;
+ struct xilinx_drm_dp_sub_layer *layer;
+ const char *string;
+ u32 fmt, i, size;
+ int ret;
+
+ ret = of_property_read_string(node, "xlnx,output-fmt", &string);
+ if (ret < 0) {
+ dev_err(dp_sub->dev, "No colormetry in DT\n");
+ return ret;
+ }
+
+ if (strcmp(string, "rgb") == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB;
+ } else if (strcmp(string, "ycrcb444") == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR444;
+ } else if (strcmp(string, "ycrcb422") == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR422;
+ fmt |= XILINX_DP_SUB_V_BLEND_OUTPUT_EN_DOWNSAMPLE;
+ } else if (strcmp(string, "yonly") == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YONLY;
+ } else {
+ dev_err(dp_sub->dev, "Invalid output format in DT\n");
+ return -EINVAL;
+ }
+
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT, fmt);
+
+ if (fmt != XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB) {
+ u16 sdtv_coeffs[] = { 0x4c9, 0x864, 0x1d3,
+ 0x7d4d, 0x7ab3, 0x800,
+ 0x800, 0x794d, 0x7eb3 };
+ u32 full_range_offsets[] = { 0x0, 0x8000000, 0x8000000 };
+ u32 offset, i;
+
+ /* Hardcode SDTV coefficients. Can be runtime configurable */
+ offset = XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF0;
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_COEFF; i++)
+ xilinx_drm_writel(dp_sub->blend.base, offset + i * 4,
+ sdtv_coeffs[i]);
+
+ offset = XILINX_DP_SUB_V_BLEND_LUMA_OUTCSC_OFFSET;
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_OFFSET; i++)
+ xilinx_drm_writel(dp_sub->blend.base, offset + i * 4,
+ full_range_offsets[i]);
+ }
+
+ if (of_property_read_bool(node, "xlnx,vid-primary"))
+ dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID].primary = true;
+ else
+ dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX].primary = true;
+
+ ret = of_property_read_string(node, "xlnx,vid-fmt", &string);
+ if (!ret) {
+ layer = &dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID];
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(dp_sub->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+
+ for (i = 0; i < layer->num_fmts; i++) {
+ const struct xilinx_drm_dp_sub_fmt *fmt =
+ &av_buf_vid_fmts[i];
+
+ if (strcmp(string, fmt->name) == 0)
+ layer->fmt = fmt;
+
+ layer->drm_fmts[i] = fmt->drm_fmt;
+ }
+
+ if (!layer->fmt) {
+ dev_info(dp_sub->dev, "Invalid vid-fmt in DT\n");
+ layer->fmt = &av_buf_vid_fmts[0];
+ }
+ }
+
+ ret = of_property_read_string(node, "xlnx,gfx-fmt", &string);
+ if (!ret) {
+ layer = &dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX];
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(dp_sub->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+
+ for (i = 0; i < layer->num_fmts; i++) {
+ const struct xilinx_drm_dp_sub_fmt *fmt =
+ &av_buf_gfx_fmts[i];
+
+ if (strcmp(string, fmt->name) == 0)
+ layer->fmt = fmt;
+
+ layer->drm_fmts[i] = fmt->drm_fmt;
+ }
+
+ if (!layer->fmt) {
+ dev_info(dp_sub->dev, "Invalid vid-fmt in DT\n");
+ layer->fmt = &av_buf_gfx_fmts[0];
+ }
+ }
+
+ dp_sub->vid_clk_pl = of_property_read_bool(node, "xlnx,vid-clk-pl");
+
+ return 0;
+}
+
+static int xilinx_drm_dp_sub_probe(struct platform_device *pdev)
+{
+ struct xilinx_drm_dp_sub *dp_sub;
+ struct resource *res;
+ int ret;
+
+ dp_sub = devm_kzalloc(&pdev->dev, sizeof(*dp_sub), GFP_KERNEL);
+ if (!dp_sub)
+ return -ENOMEM;
+
+ dp_sub->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "blend");
+ dp_sub->blend.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp_sub->blend.base))
+ return PTR_ERR(dp_sub->blend.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "av_buf");
+ dp_sub->av_buf.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp_sub->av_buf.base))
+ return PTR_ERR(dp_sub->av_buf.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
+ dp_sub->aud.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp_sub->aud.base))
+ return PTR_ERR(dp_sub->aud.base);
+
+ dp_sub->layers[0].id = XILINX_DRM_DP_SUB_LAYER_VID;
+ dp_sub->layers[0].offset = 0;
+ dp_sub->layers[0].avail = true;
+ dp_sub->layers[0].other = &dp_sub->layers[1];
+
+ dp_sub->layers[1].id = XILINX_DRM_DP_SUB_LAYER_GFX;
+ dp_sub->layers[1].offset = 4;
+ dp_sub->layers[1].avail = true;
+ dp_sub->layers[1].other = &dp_sub->layers[0];
+
+ ret = xilinx_drm_dp_sub_parse_of(dp_sub);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, dp_sub);
+
+ xilinx_drm_dp_sub_register_device(dp_sub);
+
+ xilinx_dp_sub_debugfs_init(dp_sub);
+
+ dev_info(dp_sub->dev, "Xilinx DisplayPort Subsystem is probed\n");
+
+ return 0;
+}
+
+static int xilinx_drm_dp_sub_remove(struct platform_device *pdev)
+{
+ struct xilinx_drm_dp_sub *dp_sub = platform_get_drvdata(pdev);
+
+ xilinx_drm_dp_sub_unregister_device(dp_sub);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_drm_dp_sub_of_id_table[] = {
+ { .compatible = "xlnx,dp-sub" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_drm_dp_sub_of_id_table);
+
+static struct platform_driver xilinx_drm_dp_sub_driver = {
+ .driver = {
+ .name = "xilinx-drm-dp-sub",
+ .of_match_table = xilinx_drm_dp_sub_of_id_table,
+ },
+ .probe = xilinx_drm_dp_sub_probe,
+ .remove = xilinx_drm_dp_sub_remove,
+};
+
+module_platform_driver(xilinx_drm_dp_sub_driver);
+
+MODULE_DESCRIPTION("Xilinx DisplayPort Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.h b/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.h
new file mode 100644
index 000000000000..b86e74622541
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.h
@@ -0,0 +1,69 @@
+/*
+ * DisplayPort subsystem header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2014 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_DP_SUB_H_
+#define _XILINX_DRM_DP_SUB_H_
+
+#define XILINX_DRM_DP_SUB_NUM_LAYERS 2
+#define XILINX_DRM_DP_SUB_MAX_WIDTH 4096
+#define XILINX_DRM_DP_SUB_MAX_ALPHA 255
+
+struct drm_device;
+struct xilinx_drm_dp_sub;
+struct xilinx_drm_dp_sub_layer;
+
+int xilinx_drm_dp_sub_layer_check_size(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 width, u32 height);
+int xilinx_drm_dp_sub_layer_set_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 drm_fmt);
+u32 xilinx_drm_dp_sub_layer_get_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer);
+void xilinx_drm_dp_sub_layer_get_fmts(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 **drm_fmts,
+ unsigned int *num_fmts);
+void xilinx_drm_dp_sub_layer_enable(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer);
+void xilinx_drm_dp_sub_layer_disable(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer);
+struct xilinx_drm_dp_sub_layer *
+xilinx_drm_dp_sub_layer_get(struct xilinx_drm_dp_sub *dp_sub, bool priv);
+void xilinx_drm_dp_sub_layer_put(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer);
+
+int xilinx_drm_dp_sub_set_output_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ u32 drm_fmt);
+void xilinx_drm_dp_sub_set_bg_color(struct xilinx_drm_dp_sub *dp_sub,
+ u32 c0, u32 c1, u32 c2);
+void xilinx_drm_dp_sub_set_alpha(struct xilinx_drm_dp_sub *dp_sub, u32 alpha);
+void
+xilinx_drm_dp_sub_enable_alpha(struct xilinx_drm_dp_sub *dp_sub, bool enable);
+
+void xilinx_drm_dp_sub_enable_vblank(struct xilinx_drm_dp_sub *dp_sub,
+ void (*vblank_fn)(void *),
+ void *vblank_data);
+void xilinx_drm_dp_sub_disable_vblank(struct xilinx_drm_dp_sub *dp_sub);
+void xilinx_drm_dp_sub_handle_vblank(struct xilinx_drm_dp_sub *dp_sub);
+void xilinx_drm_dp_sub_enable(struct xilinx_drm_dp_sub *dp_sub);
+void xilinx_drm_dp_sub_disable(struct xilinx_drm_dp_sub *dp_sub);
+
+struct xilinx_drm_dp_sub *xilinx_drm_dp_sub_of_get(struct device_node *np);
+void xilinx_drm_dp_sub_put(struct xilinx_drm_dp_sub *dp_sub);
+
+#endif /* _XILINX_DRM_DP_SUB_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_drv.c b/drivers/gpu/drm/xilinx/xilinx_drm_drv.c
new file mode 100644
index 000000000000..52706757ffe0
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_drv.c
@@ -0,0 +1,614 @@
+/*
+ * Xilinx DRM KMS support for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include "xilinx_drm_connector.h"
+#include "xilinx_drm_crtc.h"
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_encoder.h"
+#include "xilinx_drm_fb.h"
+#include "xilinx_drm_gem.h"
+
+#define DRIVER_NAME "xilinx_drm"
+#define DRIVER_DESC "Xilinx DRM KMS support for Xilinx"
+#define DRIVER_DATE "20130509"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static uint xilinx_drm_fbdev_vres = 2;
+module_param_named(fbdev_vres, xilinx_drm_fbdev_vres, uint, 0444);
+MODULE_PARM_DESC(fbdev_vres,
+ "fbdev virtual resolution multiplier for fb (default: 2)");
+
+/*
+ * TODO: The possible pipeline configurations are numerous with Xilinx soft IPs.
+ * It's not too bad for now, but the more proper way(Common Display Framework,
+ * or some internal abstraction) should be considered, when it reaches a point
+ * that such thing is required.
+ */
+
+struct xilinx_drm_private {
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ struct drm_fb_helper *fb;
+ struct platform_device *pdev;
+ bool is_master;
+};
+
+/**
+ * struct xilinx_video_format_desc - Xilinx Video IP video format description
+ * @name: Xilinx video format name
+ * @depth: color depth
+ * @bpp: bits per pixel
+ * @xilinx_format: xilinx format code
+ * @drm_format: drm format code
+ */
+struct xilinx_video_format_desc {
+ const char *name;
+ unsigned int depth;
+ unsigned int bpp;
+ unsigned int xilinx_format;
+ u32 drm_format;
+};
+
+static const struct xilinx_video_format_desc xilinx_video_formats[] = {
+ { "yuv420", 16, 16, XILINX_VIDEO_FORMAT_YUV420, DRM_FORMAT_YUV420 },
+ { "uvy422", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_UYVY },
+ { "vuy422", 16, 16, XILINX_VIDEO_FORMAT_YUV422, DRM_FORMAT_VYUY },
+ { "yuv422", 16, 16, XILINX_VIDEO_FORMAT_YUV422, DRM_FORMAT_YUYV },
+ { "yvu422", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_YVYU },
+ { "yuv444", 24, 24, XILINX_VIDEO_FORMAT_YUV444, DRM_FORMAT_YUV444 },
+ { "nv12", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV12 },
+ { "nv21", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV21 },
+ { "nv16", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV16 },
+ { "nv61", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV61 },
+ { "abgr1555", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ABGR1555 },
+ { "argb1555", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ARGB1555 },
+ { "rgba4444", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_RGBA4444 },
+ { "bgra4444", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_BGRA4444 },
+ { "bgr565", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_BGR565 },
+ { "rgb565", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_RGB565 },
+ { "bgr888", 24, 24, XILINX_VIDEO_FORMAT_RGB, DRM_FORMAT_BGR888 },
+ { "rgb888", 24, 24, XILINX_VIDEO_FORMAT_RGB, DRM_FORMAT_RGB888 },
+ { "xbgr8888", 24, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_XBGR8888 },
+ { "xrgb8888", 24, 32, XILINX_VIDEO_FORMAT_XRGB, DRM_FORMAT_XRGB8888 },
+ { "abgr8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ABGR8888 },
+ { "argb8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ARGB8888 },
+ { "bgra8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_BGRA8888 },
+ { "rgba8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_RGBA8888 },
+};
+
+/**
+ * xilinx_drm_check_format - Check if the given format is supported
+ * @drm: DRM device
+ * @fourcc: format fourcc
+ *
+ * Check if the given format @fourcc is supported by the current pipeline
+ *
+ * Return: true if the format is supported, or false
+ */
+bool xilinx_drm_check_format(struct drm_device *drm, u32 fourcc)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ return xilinx_drm_crtc_check_format(private->crtc, fourcc);
+}
+
+/**
+ * xilinx_drm_get_format - Get the current device format
+ * @drm: DRM device
+ *
+ * Get the current format of pipeline
+ *
+ * Return: the corresponding DRM_FORMAT_XXX
+ */
+u32 xilinx_drm_get_format(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ return xilinx_drm_crtc_get_format(private->crtc);
+}
+
+/**
+ * xilinx_drm_get_align - Get the alignment value for pitch
+ * @drm: DRM object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+unsigned int xilinx_drm_get_align(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ return xilinx_drm_crtc_get_align(private->crtc);
+}
+
+/* poll changed handler */
+static void xilinx_drm_output_poll_changed(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_fb_hotplug_event(private->fb);
+}
+
+static const struct drm_mode_config_funcs xilinx_drm_mode_config_funcs = {
+ .fb_create = xilinx_drm_fb_create,
+ .output_poll_changed = xilinx_drm_output_poll_changed,
+};
+
+/* enable vblank */
+static int xilinx_drm_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_crtc_enable_vblank(private->crtc);
+
+ return 0;
+}
+
+/* disable vblank */
+static void xilinx_drm_disable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_crtc_disable_vblank(private->crtc);
+}
+
+/* initialize mode config */
+static void xilinx_drm_mode_config_init(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width =
+ xilinx_drm_crtc_get_max_width(private->crtc);
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.funcs = &xilinx_drm_mode_config_funcs;
+}
+
+/* convert xilinx format to drm format by code */
+int xilinx_drm_format_by_code(unsigned int xilinx_format, u32 *drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (format->xilinx_format == xilinx_format) {
+ *drm_format = format->drm_format;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("Unknown Xilinx video format: %d\n", xilinx_format);
+
+ return -EINVAL;
+}
+
+/* convert xilinx format to drm format by name */
+int xilinx_drm_format_by_name(const char *name, u32 *drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (strcmp(format->name, name) == 0) {
+ *drm_format = format->drm_format;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("Unknown Xilinx video format: %s\n", name);
+
+ return -EINVAL;
+}
+
+/* get bpp of given format */
+unsigned int xilinx_drm_format_bpp(u32 drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (format->drm_format == drm_format)
+ return format->bpp;
+ }
+
+ return 0;
+}
+
+/* get color depth of given format */
+unsigned int xilinx_drm_format_depth(u32 drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (format->drm_format == drm_format)
+ return format->depth;
+ }
+
+ return 0;
+}
+
+static int xilinx_drm_bind(struct device *dev)
+{
+ struct xilinx_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+
+ return component_bind_all(dev, drm);
+}
+
+static void xilinx_drm_unbind(struct device *dev)
+{
+ dev_set_drvdata(dev, NULL);
+}
+
+static const struct component_master_ops xilinx_drm_ops = {
+ .bind = xilinx_drm_bind,
+ .unbind = xilinx_drm_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ struct device_node *np = data;
+
+ return dev->of_node == np;
+}
+
+static int xilinx_drm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct xilinx_drm_private *private = dev->dev_private;
+
+ /* This is a hack way to allow the root user to run as a master */
+ if (!(drm_is_primary_client(file) && !dev->master) &&
+ !file->is_master && capable(CAP_SYS_ADMIN)) {
+ file->is_master = 1;
+ private->is_master = true;
+ }
+
+ return 0;
+}
+
+static int xilinx_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file = filp->private_data;
+ struct drm_minor *minor = file->minor;
+ struct drm_device *drm = minor->dev;
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ if (private->is_master) {
+ private->is_master = false;
+ file->is_master = 0;
+ }
+
+ return drm_release(inode, filp);
+}
+
+/* restore the default mode when xilinx drm is released */
+static void xilinx_drm_lastclose(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_crtc_restore(private->crtc);
+
+ xilinx_drm_fb_restore_mode(private->fb);
+}
+
+static const struct file_operations xilinx_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = xilinx_drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver xilinx_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
+ DRIVER_PRIME | DRIVER_LEGACY,
+ .open = xilinx_drm_open,
+ .lastclose = xilinx_drm_lastclose,
+
+ .enable_vblank = xilinx_drm_enable_vblank,
+ .disable_vblank = xilinx_drm_disable_vblank,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = xilinx_drm_gem_cma_dumb_create,
+
+ .fops = &xilinx_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+#if defined(CONFIG_PM_SLEEP)
+/* suspend xilinx drm */
+static int xilinx_drm_pm_suspend(struct device *dev)
+{
+ struct xilinx_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+ struct drm_connector *connector;
+
+ drm_kms_helper_poll_disable(drm);
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ int old_dpms = connector->dpms;
+
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector,
+ DRM_MODE_DPMS_SUSPEND);
+
+ connector->dpms = old_dpms;
+ }
+ drm_modeset_unlock_all(drm);
+
+ return 0;
+}
+
+/* resume xilinx drm */
+static int xilinx_drm_pm_resume(struct device *dev)
+{
+ struct xilinx_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+ struct drm_connector *connector;
+
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ if (connector->funcs->dpms) {
+ int dpms = connector->dpms;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ connector->funcs->dpms(connector, dpms);
+ }
+ }
+ drm_modeset_unlock_all(drm);
+
+ drm_helper_resume_force_mode(drm);
+
+ drm_modeset_lock_all(drm);
+ drm_kms_helper_poll_enable(drm);
+ drm_modeset_unlock_all(drm);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops xilinx_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_drm_pm_suspend, xilinx_drm_pm_resume)
+};
+
+/* init xilinx drm platform */
+static int xilinx_drm_platform_probe(struct platform_device *pdev)
+{
+ struct xilinx_drm_private *private;
+ struct drm_device *drm;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ const struct drm_format_info *info;
+ struct device_node *encoder_node, *ep = NULL, *remote;
+ struct component_match *match = NULL;
+ unsigned int align, i = 0;
+ int ret;
+ u32 format;
+
+ drm = drm_dev_alloc(&xilinx_drm_driver, &pdev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ private = devm_kzalloc(drm->dev, sizeof(*private), GFP_KERNEL);
+ if (!private) {
+ ret = -ENOMEM;
+ goto err_drm;
+ }
+
+ drm_mode_config_init(drm);
+
+ /* create a xilinx crtc */
+ private->crtc = xilinx_drm_crtc_create(drm);
+ if (IS_ERR(private->crtc)) {
+ DRM_DEBUG_DRIVER("failed to create xilinx crtc\n");
+ ret = PTR_ERR(private->crtc);
+ goto err_config;
+ }
+
+ while ((encoder_node = of_parse_phandle(drm->dev->of_node,
+ "xlnx,encoder-slave", i))) {
+ encoder = xilinx_drm_encoder_create(drm, encoder_node);
+ of_node_put(encoder_node);
+ if (IS_ERR(encoder)) {
+ DRM_DEBUG_DRIVER("failed to create xilinx encoder\n");
+ ret = PTR_ERR(encoder);
+ goto err_config;
+ }
+
+ connector = xilinx_drm_connector_create(drm, encoder, i);
+ if (IS_ERR(connector)) {
+ DRM_DEBUG_DRIVER("failed to create xilinx connector\n");
+ ret = PTR_ERR(connector);
+ goto err_config;
+ }
+
+ i++;
+ }
+
+ while (1) {
+ ep = of_graph_get_next_endpoint(drm->dev->of_node, ep);
+ if (!ep)
+ break;
+
+ of_node_put(ep);
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
+ continue;
+ }
+
+ component_match_add(drm->dev, &match, compare_of, remote);
+ of_node_put(remote);
+ i++;
+ }
+
+ if (i == 0) {
+ DRM_ERROR("failed to get an encoder slave node\n");
+ return -ENODEV;
+ }
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize vblank\n");
+ goto err_master;
+ }
+
+ /* enable irq to enable vblank feature */
+ drm->irq_enabled = 1;
+
+ drm->dev_private = private;
+ private->drm = drm;
+ xilinx_drm_mode_config_init(drm);
+
+ format = xilinx_drm_crtc_get_format(private->crtc);
+ info = drm_format_info(format);
+ if (info && info->depth && info->cpp[0]) {
+ align = xilinx_drm_crtc_get_align(private->crtc);
+ private->fb = xilinx_drm_fb_init(drm, info->cpp[0] * 8, 1,
+ align, xilinx_drm_fbdev_vres);
+ if (IS_ERR(private->fb)) {
+ DRM_ERROR("failed to initialize drm fb\n");
+ private->fb = NULL;
+ }
+ } else {
+ dev_info(&pdev->dev, "fbdev is not initialized\n");
+ }
+
+ drm_kms_helper_poll_init(drm);
+
+ drm_helper_disable_unused_functions(drm);
+
+ platform_set_drvdata(pdev, private);
+
+ if (match) {
+ ret = component_master_add_with_match(drm->dev,
+ &xilinx_drm_ops, match);
+ if (ret)
+ goto err_master;
+ }
+
+ ret = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(sizeof(dma_addr_t) * 8));
+ if (ret) {
+ dev_info(&pdev->dev, "failed to set coherent mask (%zu)\n",
+ sizeof(dma_addr_t));
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto err_master;
+
+ return 0;
+
+err_master:
+ component_master_del(drm->dev, &xilinx_drm_ops);
+err_config:
+ drm_mode_config_cleanup(drm);
+ if (ret == -EPROBE_DEFER)
+ DRM_INFO("load() is defered & will be called again\n");
+err_drm:
+ drm_dev_put(drm);
+ return ret;
+}
+
+/* exit xilinx drm platform */
+static int xilinx_drm_platform_remove(struct platform_device *pdev)
+{
+ struct xilinx_drm_private *private = platform_get_drvdata(pdev);
+ struct drm_device *drm = private->drm;
+
+ component_master_del(drm->dev, &xilinx_drm_ops);
+ drm_kms_helper_poll_fini(drm);
+ xilinx_drm_fb_fini(private->fb);
+ drm_mode_config_cleanup(drm);
+ drm->dev_private = NULL;
+ drm_dev_put(private->drm);
+
+ return 0;
+}
+
+static void xilinx_drm_platform_shutdown(struct platform_device *pdev)
+{
+ struct xilinx_drm_private *private = platform_get_drvdata(pdev);
+
+ drm_put_dev(private->drm);
+}
+
+static const struct of_device_id xilinx_drm_of_match[] = {
+ { .compatible = "xlnx,drm", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_drm_of_match);
+
+static struct platform_driver xilinx_drm_private_driver = {
+ .probe = xilinx_drm_platform_probe,
+ .remove = xilinx_drm_platform_remove,
+ .shutdown = xilinx_drm_platform_shutdown,
+ .driver = {
+ .name = "xilinx-drm",
+ .pm = &xilinx_drm_pm_ops,
+ .of_match_table = xilinx_drm_of_match,
+ },
+};
+
+module_platform_driver(xilinx_drm_private_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DRM KMS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_drv.h b/drivers/gpu/drm/xilinx/xilinx_drm_drv.h
new file mode 100644
index 000000000000..b871d421df84
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_drv.h
@@ -0,0 +1,65 @@
+/*
+ * Xilinx DRM KMS Header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_H_
+#define _XILINX_DRM_H_
+
+enum xilinx_video_format {
+ XILINX_VIDEO_FORMAT_YUV422 = 0,
+ XILINX_VIDEO_FORMAT_YUV444 = 1,
+ XILINX_VIDEO_FORMAT_RGB = 2,
+ XILINX_VIDEO_FORMAT_YUV420 = 3,
+ XILINX_VIDEO_FORMAT_XRGB = 16,
+ XILINX_VIDEO_FORMAT_NONE = 32,
+};
+
+/* convert the xilinx format to the drm format */
+int xilinx_drm_format_by_code(unsigned int xilinx_format, u32 *drm_format);
+int xilinx_drm_format_by_name(const char *name, u32 *drm_format);
+
+unsigned int xilinx_drm_format_bpp(u32 drm_format);
+unsigned int xilinx_drm_format_depth(u32 drm_format);
+
+/* io write operations */
+static inline void xilinx_drm_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+/* io read operations */
+static inline u32 xilinx_drm_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static inline void xilinx_drm_clr(void __iomem *base, int offset, u32 clr)
+{
+ xilinx_drm_writel(base, offset, xilinx_drm_readl(base, offset) & ~clr);
+}
+
+static inline void xilinx_drm_set(void __iomem *base, int offset, u32 set)
+{
+ xilinx_drm_writel(base, offset, xilinx_drm_readl(base, offset) | set);
+}
+
+struct drm_device;
+
+bool xilinx_drm_check_format(struct drm_device *drm, uint32_t fourcc);
+uint32_t xilinx_drm_get_format(struct drm_device *drm);
+unsigned int xilinx_drm_get_align(struct drm_device *drm);
+
+#endif /* _XILINX_DRM_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_dsi.c b/drivers/gpu/drm/xilinx/xilinx_drm_dsi.c
new file mode 100644
index 000000000000..b168ee26a6fe
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_dsi.c
@@ -0,0 +1,808 @@
+/*
+ * Xilinx FPGA MIPI DSI Tx Controller driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Siva Rajesh J <siva.rajesh.jarugula@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+/* DSI Tx IP registers */
+#define XDSI_CCR 0x00
+#define XDSI_CCR_COREENB BIT(0)
+#define XDSI_CCR_CRREADY BIT(2)
+#define XDSI_PCR 0x04
+#define XDSI_PCR_VIDEOMODE(x) (((x) & 0x3) << 3)
+#define XDSI_PCR_VIDEOMODE_MASK (0x3 << 3)
+#define XDSI_PCR_VIDEOMODE_SHIFT 3
+#define XDSI_PCR_BLLPTYPE(x) ((x) << 5)
+#define XDSI_PCR_BLLPMODE(x) ((x) << 6)
+#define XDSI_PCR_EOTPENABLE(x) ((x) << 13)
+#define XDSI_GIER 0x20
+#define XDSI_ISR 0x24
+#define XDSI_IER 0x28
+#define XDSI_CMD 0x30
+#define XDSI_CMD_QUEUE_PACKET(x) (((x) & 0xffffff) << 0)
+#define XDSI_TIME1 0x50
+#define XDSI_TIME1_BLLP_BURST(x) (((x) & 0xffff) << 0)
+#define XDSI_TIME1_HSA(x) (((x) & 0xffff) << 16)
+#define XDSI_TIME2 0x54
+#define XDSI_TIME2_VACT(x) (((x) & 0xffff) << 0)
+#define XDSI_TIME2_HACT(x) (((x) & 0xffff) << 16)
+#define XDSI_HACT_MULTIPLIER GENMASK(1, 0)
+#define XDSI_TIME3 0x58
+#define XDSI_TIME3_HFP(x) (((x) & 0xffff) << 0)
+#define XDSI_TIME3_HBP(x) (((x) & 0xffff) << 16)
+#define XDSI_TIME4 0x5c
+#define XDSI_TIME4_VFP(x) (((x) & 0xff) << 0)
+#define XDSI_TIME4_VBP(x) (((x) & 0xff) << 8)
+#define XDSI_TIME4_VSA(x) (((x) & 0xff) << 16)
+#define XDSI_LTIME 0x60
+#define XDSI_BLLP_TIME 0x64
+#define XDSI_NUM_DATA_TYPES 4
+#define XDSI_VIDEO_MODE_SYNC_PULSE 0x0
+#define XDSI_VIDEO_MODE_SYNC_EVENT 0x1
+#define XDSI_VIDEO_MODE_BURST 0x2
+
+/*
+ * Used as a multiplication factor for HACT based on used
+ * DSI data type.
+ *
+ * e.g. for RGB666_L datatype and 1920x1080 resolution,
+ * the Hact (WC) would be as follows -
+ * 1920 pixels * 18 bits per pixel / 8 bits per byte
+ * = 1920 pixels * 2.25 bytes per pixel = 4320 bytes.
+ *
+ * Data Type - Multiplication factor
+ * RGB888 - 3
+ * RGB666_L - 2.25
+ * RGB666_P - 2.25
+ * RGB565 - 2
+ *
+ * Since the multiplication factor maybe a floating number,
+ * a 100x multiplication factor is used.
+ *
+ * XDSI_NUM_DATA_TYPES represents number of data types in the
+ * enum mipi_dsi_pixel_format in the MIPI DSI part of DRM framework.
+ */
+static const int xdsi_mul_factor[XDSI_NUM_DATA_TYPES] = {300, 225, 225, 200};
+
+/*
+ * struct xilinx_dsi - Core configuration DSI Tx subsystem device structure
+ * @drm_encoder: DRM encoder structure
+ * @dsi_host: DSI host device
+ * @connector: DRM connector structure
+ * @panel_node: MIPI DSI device panel node
+ * @panel: DRM panel structure
+ * @dev: device structure
+ * @iomem: Base address of DSI subsystem
+ * @lanes: number of active data lanes supported by DSI controller
+ * @mode_flags: DSI operation mode related flags
+ * @format: pixel format for video mode of DSI controller
+ * @vm: videomode data structure
+ * @mul_factor: multiplication factor for HACT timing parameter
+ * @eotp_prop: configurable EoTP DSI parameter
+ * @bllp_mode_prop: configurable BLLP mode DSI parameter
+ * @bllp_type_prop: configurable BLLP type DSI parameter
+ * @video_mode_prop: configurable Video mode DSI parameter
+ * @bllp_burst_time_prop: Configurable BLLP time for burst mode
+ * @cmd_queue_prop: configurable command queue
+ * @eotp_prop_val: configurable EoTP DSI parameter value
+ * @bllp_mode_prop_val: configurable BLLP mode DSI parameter value
+ * @bllp_type_prop_val: configurable BLLP type DSI parameter value
+ * @video_mode_prop_val: configurable Video mode DSI parameter value
+ * @bllp_burst_time_prop_val: Configurable BLLP time for burst mode value
+ * @cmd_queue_prop_val: configurable command queue value
+ */
+struct xilinx_dsi {
+ struct drm_encoder encoder;
+ struct mipi_dsi_host dsi_host;
+ struct drm_connector connector;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct device *dev;
+ void __iomem *iomem;
+ u32 lanes;
+ u32 mode_flags;
+ enum mipi_dsi_pixel_format format;
+ struct videomode vm;
+ u32 mul_factor;
+ struct drm_property *eotp_prop;
+ struct drm_property *bllp_mode_prop;
+ struct drm_property *bllp_type_prop;
+ struct drm_property *video_mode_prop;
+ struct drm_property *bllp_burst_time_prop;
+ struct drm_property *cmd_queue_prop;
+ bool eotp_prop_val;
+ bool bllp_mode_prop_val;
+ bool bllp_type_prop_val;
+ u32 video_mode_prop_val;
+ u32 bllp_burst_time_prop_val;
+ u32 cmd_queue_prop_val;
+};
+
+#define host_to_dsi(host) container_of(host, struct xilinx_dsi, dsi_host)
+#define connector_to_dsi(c) container_of(c, struct xilinx_dsi, connector)
+#define encoder_to_dsi(e) container_of(e, struct xilinx_dsi, encoder)
+
+static inline void xilinx_dsi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xilinx_dsi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xilinx_dsi_set_default_drm_properties - Configure DSI DRM
+ * properties with their default values
+ * @dsi: DSI structure having the updated user parameters
+ */
+static void
+xilinx_dsi_set_default_drm_properties(struct xilinx_dsi *dsi)
+{
+ drm_object_property_set_value(&dsi->connector.base, dsi->eotp_prop, 1);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->bllp_mode_prop, 0);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->bllp_type_prop, 0);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->video_mode_prop, 0);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->bllp_burst_time_prop, 0);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->cmd_queue_prop, 0);
+}
+
+/**
+ * xilinx_dsi_set_config_parameters - Configure DSI Tx registers with parameters
+ * given from user application.
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI structure having drm_property parameters
+ * configured from user application and writes them into DSI IP registers.
+ */
+static void xilinx_dsi_set_config_parameters(struct xilinx_dsi *dsi)
+{
+ u32 reg = 0;
+
+ reg |= XDSI_PCR_EOTPENABLE(dsi->eotp_prop_val);
+ reg |= XDSI_PCR_VIDEOMODE(dsi->video_mode_prop_val);
+ reg |= XDSI_PCR_BLLPTYPE(dsi->bllp_type_prop_val);
+ reg |= XDSI_PCR_BLLPMODE(dsi->bllp_mode_prop_val);
+
+ xilinx_dsi_writel(dsi->iomem, XDSI_PCR, reg);
+
+ /* Configure the burst time if video mode is burst.
+ * HSA of TIME1 register is ignored in this mode.
+ */
+ if (dsi->video_mode_prop_val == XDSI_VIDEO_MODE_BURST) {
+ reg = XDSI_TIME1_BLLP_BURST(dsi->bllp_burst_time_prop_val);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_CMD_QUEUE_PACKET(dsi->cmd_queue_prop_val);
+ xilinx_dsi_writel(dsi->iomem, XDSI_CMD, reg);
+
+ dev_dbg(dsi->dev, "PCR register value is = %x\n",
+ xilinx_dsi_readl(dsi->iomem, XDSI_PCR));
+}
+
+/**
+ * xilinx_dsi_set_display_mode - Configure DSI timing registers
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function writes the timing parameters of DSI IP which are
+ * retrieved from panel timing values.
+ */
+static void xilinx_dsi_set_display_mode(struct xilinx_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+ u32 reg, video_mode;
+
+ reg = xilinx_dsi_readl(dsi->iomem, XDSI_PCR);
+ video_mode = ((reg & XDSI_PCR_VIDEOMODE_MASK) >>
+ XDSI_PCR_VIDEOMODE_SHIFT);
+
+ /* configure the HSA value only if non_burst_sync_pluse video mode */
+ if ((!video_mode) &&
+ (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)) {
+ reg = XDSI_TIME1_HSA(vm->hsync_len);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_TIME4_VFP(vm->vfront_porch) |
+ XDSI_TIME4_VBP(vm->vback_porch) |
+ XDSI_TIME4_VSA(vm->vsync_len);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME4, reg);
+
+ reg = XDSI_TIME3_HFP(vm->hfront_porch) |
+ XDSI_TIME3_HBP(vm->hback_porch);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME3, reg);
+
+ dev_dbg(dsi->dev, "mul factor for parsed datatype is = %d\n",
+ (dsi->mul_factor) / 100);
+
+ /* The HACT parameter received from panel timing values should be
+ * divisible by 4. The reason for this is, the word count given as
+ * input to DSI controller is HACT * mul_factor. The mul_factor is
+ * 3, 2.25, 2.25, 2 respectively for RGB888, RGB666_L, RGB666_P and
+ * RGB565.
+ * e.g. for RGB666_L color format and 1080p, the word count is
+ * 1920*2.25 = 4320 which is divisible by 4 and it is a valid input
+ * to DSI controller. Based on this 2.25 mul factor, we come up with
+ * the division factor of (XDSI_HACT_MULTIPLIER) as 4 for checking
+ */
+ if (((vm->hactive) & XDSI_HACT_MULTIPLIER) != 0)
+ dev_alert(dsi->dev, "Incorrect HACT will be programmed\n");
+
+ reg = XDSI_TIME2_HACT((vm->hactive) * (dsi->mul_factor) / 100) |
+ XDSI_TIME2_VACT(vm->vactive);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME2, reg);
+
+ dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
+}
+
+/**
+ * xilinx_dsi_set_display_enable - Enables the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xilinx_dsi_set_display_enable(struct xilinx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xilinx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg |= XDSI_CCR_COREENB;
+
+ xilinx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "MIPI DSI Tx controller is enabled.\n");
+}
+
+/**
+ * xilinx_dsi_set_display_disable - Disable the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xilinx_dsi_set_display_disable(struct xilinx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xilinx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg &= ~XDSI_CCR_COREENB;
+
+ xilinx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "DSI Tx is disabled. reset regs to default values\n");
+}
+
+static void xilinx_dsi_encoder_dpms(struct drm_encoder *encoder,
+ int mode)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+
+ dev_dbg(dsi->dev, "encoder dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ xilinx_dsi_set_display_enable(dsi);
+ break;
+ default:
+ xilinx_dsi_set_display_disable(dsi);
+ xilinx_dsi_set_default_drm_properties(dsi);
+ break;
+ }
+}
+
+/**
+ * xilinx_dsi_connector_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @base_connector: pointer Xilinx DSI connector
+ * @property: pointer to the drm_property structure
+ * @value: DSI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the DSI structure property varabiles with the values.
+ * These values are later used to configure the DSI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int
+xilinx_dsi_connector_set_property(struct drm_connector *base_connector,
+ struct drm_property *property,
+ u64 value)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(base_connector);
+
+ dev_dbg(dsi->dev, "property name = %s, value = %lld\n",
+ property->name, value);
+
+ if (property == dsi->eotp_prop)
+ dsi->eotp_prop_val = !!value;
+ else if (property == dsi->bllp_mode_prop)
+ dsi->bllp_mode_prop_val = !!value;
+ else if (property == dsi->bllp_type_prop)
+ dsi->bllp_type_prop_val = !!value;
+ else if (property == dsi->video_mode_prop)
+ dsi->video_mode_prop_val = (unsigned int)value;
+ else if (property == dsi->bllp_burst_time_prop)
+ dsi->bllp_burst_time_prop_val = (unsigned int)value;
+ else if (property == dsi->cmd_queue_prop)
+ dsi->cmd_queue_prop_val = (unsigned int)value;
+ else
+ return -EINVAL;
+
+ xilinx_dsi_set_config_parameters(dsi);
+
+ return 0;
+}
+
+static int xilinx_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ u32 panel_lanes;
+ struct xilinx_dsi *dsi = host_to_dsi(host);
+
+ panel_lanes = device->lanes;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel_node = device->dev.of_node;
+
+ if (panel_lanes != dsi->lanes) {
+ dev_err(dsi->dev, "Mismatch of lanes. panel = %d, DSI = %d\n",
+ panel_lanes, dsi->lanes);
+ return -EINVAL;
+ }
+
+ if ((dsi->lanes > 4) || (dsi->lanes < 1)) {
+ dev_err(dsi->dev, "%d lanes : invalid xlnx,dsi-num-lanes\n",
+ dsi->lanes);
+ return -EINVAL;
+ }
+
+ if (device->format != dsi->format) {
+ dev_err(dsi->dev, "Mismatch of format. panel = %d, DSI = %d\n",
+ device->format, dsi->format);
+ return -EINVAL;
+ }
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static int xilinx_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct xilinx_dsi *dsi = host_to_dsi(host);
+
+ dsi->panel_node = NULL;
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops xilinx_dsi_ops = {
+ .attach = xilinx_dsi_host_attach,
+ .detach = xilinx_dsi_host_detach,
+};
+
+static int xilinx_dsi_connector_dpms(struct drm_connector *connector,
+ int mode)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(connector);
+ int ret;
+
+ dev_dbg(dsi->dev, "connector dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ ret = drm_panel_prepare(dsi->panel);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ drm_panel_unprepare(dsi->panel);
+ dev_err(dsi->dev, "DRM panel not enabled. power off DSI\n");
+ return ret;
+ }
+ break;
+ default:
+ drm_panel_disable(dsi->panel);
+ drm_panel_unprepare(dsi->panel);
+ break;
+ }
+
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static enum drm_connector_status
+xilinx_dsi_detect(struct drm_connector *connector, bool force)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(connector);
+
+ if (!dsi->panel) {
+ dsi->panel = of_drm_find_panel(dsi->panel_node);
+ if (dsi->panel)
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ } else if (!dsi->panel_node) {
+ xilinx_dsi_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ }
+
+ if (dsi->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void xilinx_dsi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xilinx_dsi_connector_funcs = {
+ .dpms = xilinx_dsi_connector_dpms,
+ .detect = xilinx_dsi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xilinx_dsi_connector_destroy,
+ .set_property = xilinx_dsi_connector_set_property,
+};
+
+static int xilinx_dsi_get_modes(struct drm_connector *connector)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(connector);
+
+ if (dsi->panel)
+ return dsi->panel->funcs->get_modes(dsi->panel);
+
+ return 0;
+}
+
+static struct drm_encoder *
+xilinx_dsi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_dsi(connector)->encoder);
+}
+
+static struct drm_connector_helper_funcs xilinx_dsi_connector_helper_funcs = {
+ .get_modes = xilinx_dsi_get_modes,
+ .best_encoder = xilinx_dsi_best_encoder,
+};
+
+/**
+ * xilinx_drm_dsi_connector_create_property - create DSI connector properties
+ *
+ * @base_connector: pointer to Xilinx DSI connector
+ *
+ * This function takes the xilinx DSI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void
+xilinx_drm_dsi_connector_create_property(struct drm_connector *base_connector)
+{
+ struct drm_device *dev = base_connector->dev;
+ struct xilinx_dsi *dsi = connector_to_dsi(base_connector);
+
+ dsi->eotp_prop = drm_property_create_bool(dev, 1, "eotp");
+ dsi->video_mode_prop = drm_property_create_range(dev, 0,
+ "video_mode", 0, 2);
+ dsi->bllp_mode_prop = drm_property_create_bool(dev, 0, "bllp_mode");
+ dsi->bllp_type_prop = drm_property_create_bool(dev, 0, "bllp_type");
+ dsi->bllp_burst_time_prop = drm_property_create_range(dev, 0,
+ "bllp_burst_time", 0, 0xFFFF);
+ dsi->cmd_queue_prop = drm_property_create_range(dev, 0,
+ "cmd_queue", 0, 0xFFFFFF);
+}
+
+/**
+ * xilinx_drm_dsi_connector_attach_property - attach DSI connector
+ * properties
+ *
+ * @base_connector: pointer to Xilinx DSI connector
+ */
+static void
+xilinx_drm_dsi_connector_attach_property(struct drm_connector *base_connector)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(base_connector);
+ struct drm_mode_object *obj = &base_connector->base;
+
+ if (dsi->eotp_prop)
+ drm_object_attach_property(obj, dsi->eotp_prop, 1);
+
+ if (dsi->video_mode_prop)
+ drm_object_attach_property(obj, dsi->video_mode_prop, 0);
+
+ if (dsi->bllp_burst_time_prop)
+ drm_object_attach_property(&base_connector->base,
+ dsi->bllp_burst_time_prop, 0);
+
+ if (dsi->bllp_mode_prop)
+ drm_object_attach_property(&base_connector->base,
+ dsi->bllp_mode_prop, 0);
+
+ if (dsi->bllp_type_prop)
+ drm_object_attach_property(&base_connector->base,
+ dsi->bllp_type_prop, 0);
+
+ if (dsi->cmd_queue_prop)
+ drm_object_attach_property(&base_connector->base,
+ dsi->cmd_queue_prop, 0);
+}
+
+static int xilinx_dsi_create_connector(struct drm_encoder *encoder)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+ struct drm_connector *connector = &dsi->connector;
+ int ret;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xilinx_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (ret) {
+ dev_err(dsi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xilinx_dsi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xilinx_drm_dsi_connector_create_property(connector);
+ xilinx_drm_dsi_connector_attach_property(connector);
+
+ return 0;
+}
+
+static bool xilinx_dsi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/**
+ * xilinx_dsi_mode_set - derive the DSI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @mode: DRM kernel-internal display mode structure
+ * @adjusted_mode: DSI panel timing parameters
+ *
+ * This function derives the DSI IP timing parameters from the timing
+ * values given in the attached panel driver.
+ */
+static void xilinx_dsi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+ struct videomode *vm = &dsi->vm;
+ struct drm_display_mode *m = adjusted_mode;
+
+ vm->hactive = m->hdisplay;
+ vm->vactive = m->vdisplay;
+ vm->vfront_porch = m->vsync_start - m->vdisplay;
+ vm->vback_porch = m->vtotal - m->vsync_end;
+ vm->vsync_len = m->vsync_end - m->vsync_start;
+ vm->hfront_porch = m->hsync_start - m->hdisplay;
+ vm->hback_porch = m->htotal - m->hsync_end;
+ vm->hsync_len = m->hsync_end - m->hsync_start;
+ xilinx_dsi_set_display_mode(dsi);
+}
+
+static void xilinx_dsi_prepare(struct drm_encoder *encoder)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+
+ dev_dbg(dsi->dev, "%s %d\n", __func__, __LINE__);
+ xilinx_dsi_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void xilinx_dsi_commit(struct drm_encoder *encoder)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+
+ dev_dbg(dsi->dev, "config and enable the DSI: %s %d\n",
+ __func__, __LINE__);
+
+ xilinx_dsi_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs xilinx_dsi_encoder_helper_funcs = {
+ .dpms = xilinx_dsi_encoder_dpms,
+ .mode_fixup = xilinx_dsi_mode_fixup,
+ .mode_set = xilinx_dsi_mode_set,
+ .prepare = xilinx_dsi_prepare,
+ .commit = xilinx_dsi_commit,
+};
+
+static const struct drm_encoder_funcs xilinx_dsi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xilinx_dsi_parse_dt(struct xilinx_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+ u32 datatype;
+
+ ret = of_property_read_u32(node, "xlnx,dsi-num-lanes",
+ &dsi->lanes);
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-num-lanes property\n");
+ return ret;
+ }
+
+ if ((dsi->lanes > 4) || (dsi->lanes < 1)) {
+ dev_err(dsi->dev, "%d lanes : invalid xlnx,dsi-num-lanes\n",
+ dsi->lanes);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,dsi-data-type", &datatype);
+
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-data-type property\n");
+ return ret;
+ }
+
+ dsi->format = datatype;
+
+ if (datatype > MIPI_DSI_FMT_RGB565) {
+ dev_err(dsi->dev, "Invalid xlnx,dsi-data-type string\n");
+ return -EINVAL;
+ }
+
+ dsi->mul_factor = xdsi_mul_factor[datatype];
+
+ dev_dbg(dsi->dev, "DSI controller num lanes = %d", dsi->lanes);
+
+ dev_dbg(dsi->dev, "DSI controller datatype = %d\n", datatype);
+
+ return 0;
+}
+
+static int xilinx_dsi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xilinx_dsi *dsi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &dsi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * DSI tx drivers. DRM framework can support more than one CRTCs and
+ * DSI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+
+ drm_encoder_init(drm_dev, encoder, &xilinx_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+
+ drm_encoder_helper_add(encoder, &xilinx_dsi_encoder_helper_funcs);
+
+ ret = xilinx_dsi_create_connector(encoder);
+ if (ret) {
+ dev_err(dsi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+ if (ret) {
+ xilinx_dsi_connector_destroy(&dsi->connector);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void xilinx_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xilinx_dsi *dsi = dev_get_drvdata(dev);
+
+ xilinx_dsi_encoder_dpms(&dsi->encoder, DRM_MODE_DPMS_OFF);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+}
+
+static const struct component_ops xilinx_dsi_component_ops = {
+ .bind = xilinx_dsi_bind,
+ .unbind = xilinx_dsi_unbind,
+};
+
+static int xilinx_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_dsi *dsi;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->dsi_host.ops = &xilinx_dsi_ops;
+ dsi->dsi_host.dev = dev;
+ dsi->dev = dev;
+
+ ret = xilinx_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->iomem = devm_ioremap_resource(dev, res);
+ dev_dbg(dsi->dev, "dsi virtual address = %p %s %d\n",
+ dsi->iomem, __func__, __LINE__);
+
+ if (IS_ERR(dsi->iomem)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(dsi->iomem);
+ }
+
+ platform_set_drvdata(pdev, dsi);
+
+ return component_add(dev, &xilinx_dsi_component_ops);
+}
+
+static int xilinx_dsi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &xilinx_dsi_component_ops);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dsi_of_match[] = {
+ { .compatible = "xlnx,mipi-dsi-tx-subsystem"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_dsi_of_match);
+
+static struct platform_driver dsi_driver = {
+ .probe = xilinx_dsi_probe,
+ .remove = xilinx_dsi_remove,
+ .driver = {
+ .name = "xilinx-mipi-dsi",
+ .owner = THIS_MODULE,
+ .of_match_table = xilinx_dsi_of_match,
+ },
+};
+
+module_platform_driver(dsi_driver);
+
+MODULE_AUTHOR("Siva Rajesh <sivaraj@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA MIPI DSI Tx Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_encoder.c b/drivers/gpu/drm/xilinx/xilinx_drm_encoder.c
new file mode 100644
index 000000000000..ca3f9f112162
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_encoder.c
@@ -0,0 +1,240 @@
+/*
+ * Xilinx DRM encoder driver for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_encoder.h"
+
+struct xilinx_drm_encoder {
+ struct drm_encoder_slave slave;
+ struct device *dev;
+ int dpms;
+};
+
+#define to_xilinx_encoder(x) \
+ container_of(x, struct xilinx_drm_encoder, slave)
+
+/* set encoder dpms */
+static void xilinx_drm_encoder_dpms(struct drm_encoder *base_encoder, int dpms)
+{
+ struct xilinx_drm_encoder *encoder;
+ struct drm_encoder_slave *encoder_slave;
+ const struct drm_encoder_slave_funcs *encoder_sfuncs;
+
+ encoder_slave = to_encoder_slave(base_encoder);
+ encoder_sfuncs = encoder_slave->slave_funcs;
+ encoder = to_xilinx_encoder(encoder_slave);
+
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", encoder->dpms, dpms);
+
+ if (encoder->dpms == dpms)
+ return;
+
+ encoder->dpms = dpms;
+ if (encoder_sfuncs->dpms)
+ encoder_sfuncs->dpms(base_encoder, dpms);
+}
+
+/* adjust a mode if needed */
+static bool
+xilinx_drm_encoder_mode_fixup(struct drm_encoder *base_encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_encoder_slave *encoder_slave;
+ const struct drm_encoder_slave_funcs *encoder_sfuncs = NULL;
+ bool ret = true;
+
+ encoder_slave = to_encoder_slave(base_encoder);
+ encoder_sfuncs = encoder_slave->slave_funcs;
+ if (encoder_sfuncs->mode_fixup)
+ ret = encoder_sfuncs->mode_fixup(base_encoder, mode,
+ adjusted_mode);
+
+ return ret;
+}
+
+/* set mode to xilinx encoder */
+static void xilinx_drm_encoder_mode_set(struct drm_encoder *base_encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_encoder_slave *encoder_slave;
+ const struct drm_encoder_slave_funcs *encoder_sfuncs;
+
+ DRM_DEBUG_KMS("h: %d, v: %d\n",
+ adjusted_mode->hdisplay, adjusted_mode->vdisplay);
+ DRM_DEBUG_KMS("refresh: %d, pclock: %d khz\n",
+ adjusted_mode->vrefresh, adjusted_mode->clock);
+
+ encoder_slave = to_encoder_slave(base_encoder);
+ encoder_sfuncs = encoder_slave->slave_funcs;
+ if (encoder_sfuncs->mode_set)
+ encoder_sfuncs->mode_set(base_encoder, mode, adjusted_mode);
+}
+
+/* apply mode to encoder pipe */
+static void xilinx_drm_encoder_commit(struct drm_encoder *base_encoder)
+{
+ /* start encoder with new mode */
+ xilinx_drm_encoder_dpms(base_encoder, DRM_MODE_DPMS_ON);
+}
+
+/* prepare encoder */
+static void xilinx_drm_encoder_prepare(struct drm_encoder *base_encoder)
+{
+ xilinx_drm_encoder_dpms(base_encoder, DRM_MODE_DPMS_OFF);
+}
+
+/* get crtc */
+static struct drm_crtc *
+xilinx_drm_encoder_get_crtc(struct drm_encoder *base_encoder)
+{
+ return base_encoder->crtc;
+}
+
+static const struct drm_encoder_helper_funcs xilinx_drm_encoder_helper_funcs = {
+ .dpms = xilinx_drm_encoder_dpms,
+ .mode_fixup = xilinx_drm_encoder_mode_fixup,
+ .mode_set = xilinx_drm_encoder_mode_set,
+ .prepare = xilinx_drm_encoder_prepare,
+ .commit = xilinx_drm_encoder_commit,
+ .get_crtc = xilinx_drm_encoder_get_crtc,
+};
+
+/* destroy encoder */
+void xilinx_drm_encoder_destroy(struct drm_encoder *base_encoder)
+{
+ struct xilinx_drm_encoder *encoder;
+ struct drm_encoder_slave *encoder_slave;
+
+ encoder_slave = to_encoder_slave(base_encoder);
+ encoder = to_xilinx_encoder(encoder_slave);
+
+ /* make sure encoder is off */
+ xilinx_drm_encoder_dpms(base_encoder, DRM_MODE_DPMS_OFF);
+
+ drm_encoder_cleanup(base_encoder);
+ put_device(encoder->dev);
+}
+
+static const struct drm_encoder_funcs xilinx_drm_encoder_funcs = {
+ .destroy = xilinx_drm_encoder_destroy,
+};
+
+/* create encoder */
+struct drm_encoder *xilinx_drm_encoder_create(struct drm_device *drm,
+ struct device_node *node)
+{
+ struct xilinx_drm_encoder *encoder;
+ struct i2c_client *i2c_slv;
+ struct i2c_driver *i2c_driver;
+ struct drm_i2c_encoder_driver *drm_i2c_driver;
+ struct device_driver *device_driver;
+ struct platform_device *platform_slv;
+ struct platform_driver *platform_driver;
+ struct drm_platform_encoder_driver *drm_platform_driver;
+ int ret = 0;
+
+ encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder)
+ return ERR_PTR(-ENOMEM);
+
+ encoder->dpms = DRM_MODE_DPMS_OFF;
+
+ /* FIXME: Use DT to figure out crtcs / clones */
+ encoder->slave.base.possible_crtcs = 1;
+ encoder->slave.base.possible_clones = ~0;
+ ret = drm_encoder_init(drm, &encoder->slave.base,
+ &xilinx_drm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize drm encoder\n");
+ return ERR_PTR(ret);
+ }
+
+ drm_encoder_helper_add(&encoder->slave.base,
+ &xilinx_drm_encoder_helper_funcs);
+
+ /* initialize slave encoder */
+ i2c_slv = of_find_i2c_device_by_node(node);
+ if (i2c_slv && i2c_slv->dev.driver) {
+ i2c_driver = to_i2c_driver(i2c_slv->dev.driver);
+ drm_i2c_driver = to_drm_i2c_encoder_driver(i2c_driver);
+ if (!drm_i2c_driver || !drm_i2c_driver->encoder_init) {
+ DRM_DEBUG_KMS("failed to initialize i2c slave\n");
+ ret = -EPROBE_DEFER;
+ goto err_out;
+ }
+
+ encoder->dev = &i2c_slv->dev;
+ ret = drm_i2c_driver->encoder_init(i2c_slv, drm,
+ &encoder->slave);
+ } else {
+ platform_slv = of_find_device_by_node(node);
+ if (!platform_slv) {
+ DRM_DEBUG_KMS("failed to get an encoder slv\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ device_driver = platform_slv->dev.driver;
+ if (!device_driver) {
+ DRM_DEBUG_KMS("failed to get device driver\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ platform_driver = to_platform_driver(device_driver);
+ drm_platform_driver =
+ to_drm_platform_encoder_driver(platform_driver);
+ if (!drm_platform_driver ||
+ !drm_platform_driver->encoder_init) {
+ DRM_DEBUG_KMS("failed to initialize platform slave\n");
+ ret = -EPROBE_DEFER;
+ goto err_out;
+ }
+
+ encoder->dev = &platform_slv->dev;
+ ret = drm_platform_driver->encoder_init(platform_slv, drm,
+ &encoder->slave);
+ }
+
+ if (ret) {
+ DRM_ERROR("failed to initialize encoder slave\n");
+ goto err_out;
+ }
+
+ if (!encoder->slave.slave_funcs) {
+ DRM_ERROR("there's no encoder slave function\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ return &encoder->slave.base;
+
+err_out:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_encoder.h b/drivers/gpu/drm/xilinx/xilinx_drm_encoder.h
new file mode 100644
index 000000000000..7707f14db499
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_encoder.h
@@ -0,0 +1,28 @@
+/*
+ * Xilinx DRM encoder header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_ENCODER_H_
+#define _XILINX_DRM_ENCODER_H_
+
+struct drm_device;
+struct drm_encoder;
+
+struct drm_encoder *xilinx_drm_encoder_create(struct drm_device *drm,
+ struct device_node *node);
+void xilinx_drm_encoder_destroy(struct drm_encoder *base_encoder);
+
+#endif /* _XILINX_DRM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_fb.c b/drivers/gpu/drm/xilinx/xilinx_drm_fb.c
new file mode 100644
index 000000000000..e9fe1daaaec2
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_fb.c
@@ -0,0 +1,516 @@
+/*
+ * Xilinx DRM KMS Framebuffer helper
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * Based on drm_fb_cma_helper.c
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_fb.h"
+
+struct xilinx_drm_fb {
+ struct drm_framebuffer base;
+ struct drm_gem_cma_object *obj[4];
+};
+
+struct xilinx_drm_fbdev {
+ struct drm_fb_helper fb_helper;
+ struct xilinx_drm_fb *fb;
+ unsigned int align;
+ unsigned int vres_mult;
+};
+
+static inline struct xilinx_drm_fbdev *to_fbdev(struct drm_fb_helper *fb_helper)
+{
+ return container_of(fb_helper, struct xilinx_drm_fbdev, fb_helper);
+}
+
+static inline struct xilinx_drm_fb *to_fb(struct drm_framebuffer *base_fb)
+{
+ return container_of(base_fb, struct xilinx_drm_fb, base);
+}
+
+static void xilinx_drm_fb_destroy(struct drm_framebuffer *base_fb)
+{
+ struct xilinx_drm_fb *fb = to_fb(base_fb);
+ int i;
+
+ for (i = 0; i < 4; i++)
+ if (fb->obj[i])
+ drm_gem_object_put_unlocked(&fb->obj[i]->base);
+
+ drm_framebuffer_cleanup(base_fb);
+ kfree(fb);
+}
+
+static int xilinx_drm_fb_create_handle(struct drm_framebuffer *base_fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct xilinx_drm_fb *fb = to_fb(base_fb);
+
+ return drm_gem_handle_create(file_priv, &fb->obj[0]->base, handle);
+}
+
+static struct drm_framebuffer_funcs xilinx_drm_fb_funcs = {
+ .destroy = xilinx_drm_fb_destroy,
+ .create_handle = xilinx_drm_fb_create_handle,
+};
+
+/**
+ * xilinx_drm_fb_alloc - Allocate a xilinx_drm_fb
+ * @drm: DRM object
+ * @mode_cmd: drm_mode_fb_cmd2 struct
+ * @obj: pointers for returned drm_gem_cma_objects
+ * @num_planes: number of planes to be allocated
+ *
+ * This function is based on drm_fb_cma_alloc().
+ *
+ * Return: a xilinx_drm_fb object, or ERR_PTR.
+ */
+static struct xilinx_drm_fb *
+xilinx_drm_fb_alloc(struct drm_device *drm,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_cma_object **obj, unsigned int num_planes)
+{
+ struct xilinx_drm_fb *fb;
+ int ret;
+ int i;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(drm, &fb->base, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(drm, &fb->base, &xilinx_drm_fb_funcs);
+ if (ret) {
+ DRM_ERROR("Failed to initialize framebuffer: %d\n", ret);
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+
+ return fb;
+}
+
+/**
+ * xilinx_drm_fb_get_gem_obj - Get CMA GEM object for framebuffer
+ * @base_fb: the framebuffer
+ * @plane: which plane
+ *
+ * This function is based on drm_fb_cma_get_gem_obj().
+ *
+ * Return: a CMA GEM object for given framebuffer, or NULL if not available.
+ */
+struct drm_gem_cma_object *
+xilinx_drm_fb_get_gem_obj(struct drm_framebuffer *base_fb, unsigned int plane)
+{
+ struct xilinx_drm_fb *fb = to_fb(base_fb);
+
+ if (plane >= 4)
+ return NULL;
+
+ return fb->obj[plane];
+}
+
+static int xilinx_drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_mode_set *modeset;
+ int ret = 0;
+ int i;
+
+ if (oops_in_progress)
+ return -EBUSY;
+
+ drm_modeset_lock_all(dev);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+
+ modeset->x = var->xoffset;
+ modeset->y = var->yoffset;
+
+ if (modeset->num_connectors) {
+ ret = drm_mode_set_config_internal(modeset);
+ if (!ret) {
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ }
+ }
+ }
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+static int
+xilinx_drm_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ unsigned int i;
+ int ret = 0;
+
+ switch (cmd) {
+ case FBIO_WAITFORVSYNC:
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set;
+ struct drm_crtc *crtc;
+
+ mode_set = &fb_helper->crtc_info[i].mode_set;
+ crtc = mode_set->crtc;
+ ret = drm_crtc_vblank_get(crtc);
+ if (!ret) {
+ drm_crtc_wait_one_vblank(crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+ }
+ return ret;
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct fb_ops xilinx_drm_fbdev_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = xilinx_drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_ioctl = xilinx_drm_fb_ioctl,
+};
+
+/**
+ * xilinx_drm_fbdev_create - Create the fbdev with a framebuffer
+ * @fb_helper: fb helper structure
+ * @sizes: framebuffer size info
+ *
+ * This function is based on drm_fbdev_cma_create().
+ *
+ * Return: 0 if successful, or the error code.
+ */
+static int xilinx_drm_fbdev_create(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct xilinx_drm_fbdev *fbdev = to_fbdev(fb_helper);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_device *drm = fb_helper->dev;
+ struct drm_gem_cma_object *obj;
+ struct drm_framebuffer *base_fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ size_t size;
+ int ret;
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = ALIGN(sizes->surface_width * bytes_per_pixel,
+ fbdev->align);
+ mode_cmd.pixel_format = xilinx_drm_get_format(drm);
+
+ mode_cmd.height *= fbdev->vres_mult;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ obj = drm_gem_cma_create(drm, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ fbi = framebuffer_alloc(0, drm->dev);
+ if (!fbi) {
+ DRM_ERROR("Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_drm_gem_cma_free_object;
+ }
+
+ fbdev->fb = xilinx_drm_fb_alloc(drm, &mode_cmd, &obj, 1);
+ if (IS_ERR(fbdev->fb)) {
+ DRM_ERROR("Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(fbdev->fb);
+ goto err_framebuffer_release;
+ }
+
+ base_fb = &fbdev->fb->base;
+ fb_helper->fb = base_fb;
+ fb_helper->fbdev = fbi;
+
+ fbi->par = fb_helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &xilinx_drm_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("Failed to allocate color map.\n");
+ goto err_xilinx_drm_fb_destroy;
+ }
+
+ drm_fb_helper_fill_info(fbi, fb_helper, sizes);
+ fbi->var.yres = base_fb->height / fbdev->vres_mult;
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * base_fb->pitches[0];
+
+ drm->mode_config.fb_base = (resource_size_t)obj->paddr;
+ fbi->screen_base = (char __iomem *)(obj->vaddr + offset);
+ fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+ fbi->screen_size = size;
+ fbi->fix.smem_len = size;
+
+ return 0;
+
+err_xilinx_drm_fb_destroy:
+ drm_framebuffer_unregister_private(base_fb);
+ xilinx_drm_fb_destroy(base_fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+ drm_gem_cma_free_object(&obj->base);
+ return ret;
+}
+
+static struct drm_fb_helper_funcs xilinx_drm_fb_helper_funcs = {
+ .fb_probe = xilinx_drm_fbdev_create,
+};
+
+/**
+ * xilinx_drm_fb_init - Allocate and initializes the Xilinx framebuffer
+ * @drm: DRM device
+ * @preferred_bpp: preferred bits per pixel for the device
+ * @num_crtc: number of CRTCs
+ * @max_conn_count: maximum number of connectors
+ * @align: alignment value for pitch
+ * @vres_mult: multiplier for virtual resolution
+ *
+ * This function is based on drm_fbdev_cma_init().
+ *
+ * Return: a newly allocated drm_fb_helper struct or a ERR_PTR.
+ */
+struct drm_fb_helper *
+xilinx_drm_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult)
+{
+ struct xilinx_drm_fbdev *fbdev;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ DRM_ERROR("Failed to allocate drm fbdev.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fbdev->vres_mult = vres_mult;
+
+ fbdev->align = align;
+ fb_helper = &fbdev->fb_helper;
+ drm_fb_helper_prepare(drm, fb_helper, &xilinx_drm_fb_helper_funcs);
+
+ ret = drm_fb_helper_init(drm, fb_helper, max_conn_count);
+ if (ret < 0) {
+ DRM_ERROR("Failed to initialize drm fb helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret < 0) {
+ DRM_ERROR("Failed to add connectors.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ drm_helper_disable_unused_functions(drm);
+
+ ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
+ if (ret < 0) {
+ DRM_ERROR("Failed to set initial hw configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return fb_helper;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_free:
+ kfree(fbdev);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * xilinx_drm_fbdev_fini - Free the Xilinx framebuffer
+ * @fb_helper: drm_fb_helper struct
+ *
+ * This function is based on drm_fbdev_cma_fini().
+ */
+void xilinx_drm_fb_fini(struct drm_fb_helper *fb_helper)
+{
+ struct xilinx_drm_fbdev *fbdev;
+
+ if (!fb_helper)
+ return;
+
+ fbdev = to_fbdev(fb_helper);
+ if (fbdev->fb_helper.fbdev) {
+ struct fb_info *info;
+ int ret;
+
+ info = fbdev->fb_helper.fbdev;
+ ret = unregister_framebuffer(info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(&fbdev->fb->base);
+ xilinx_drm_fb_destroy(&fbdev->fb->base);
+ }
+
+ drm_fb_helper_fini(&fbdev->fb_helper);
+ kfree(fbdev);
+}
+
+/**
+ * xilinx_drm_fb_restore_mode - Restores initial framebuffer mode
+ * @fb_helper: drm_fb_helper struct, may be NULL
+ *
+ * This function is based on drm_fbdev_cma_restore_mode() and usually called
+ * from the Xilinx DRM drivers lastclose callback.
+ */
+void xilinx_drm_fb_restore_mode(struct drm_fb_helper *fb_helper)
+{
+ if (!fb_helper)
+ return;
+
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+}
+
+/**
+ * xilinx_drm_fb_create - (struct drm_mode_config_funcs *)->fb_create callback
+ * @drm: DRM device
+ * @file_priv: drm file private data
+ * @mode_cmd: mode command for fb creation
+ *
+ * This functions creates a drm_framebuffer for given mode @mode_cmd. This
+ * functions is intended to be used for the fb_create callback function of
+ * drm_mode_config_funcs.
+ *
+ * Return: a drm_framebuffer object if successful, or ERR_PTR.
+ */
+struct drm_framebuffer *
+xilinx_drm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct xilinx_drm_fb *fb;
+ struct drm_gem_cma_object *objs[4];
+ struct drm_gem_object *obj;
+ const struct drm_format_info *info;
+ struct drm_format_name_buf format_name;
+ unsigned int hsub;
+ unsigned int vsub;
+ int ret;
+ int i;
+
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info) {
+ DRM_ERROR("Unsupported framebuffer format %s\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name));
+ return ERR_PTR(-EINVAL);
+ }
+
+ hsub = info->hsub;
+ vsub = info->vsub;
+
+ for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
+ obj = drm_gem_object_lookup(file_priv,
+ mode_cmd->handles[i]);
+ if (!obj) {
+ DRM_ERROR("Failed to lookup GEM object\n");
+ ret = -ENXIO;
+ goto err_gem_object_unreference;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i] + width *
+ info->cpp[i] + mode_cmd->offsets[i];
+
+ if (obj->size < min_size) {
+ drm_gem_object_put_unlocked(obj);
+ ret = -EINVAL;
+ goto err_gem_object_unreference;
+ }
+ objs[i] = to_drm_gem_cma_obj(obj);
+ }
+
+ fb = xilinx_drm_fb_alloc(drm, mode_cmd, objs, i);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto err_gem_object_unreference;
+ }
+
+ fb->base.format = info;
+
+ return &fb->base;
+
+err_gem_object_unreference:
+ for (i--; i >= 0; i--)
+ drm_gem_object_put_unlocked(&objs[i]->base);
+ return ERR_PTR(ret);
+}
+
+/**
+ * xilinx_drm_fb_hotplug_event - Poll for hotpulug events
+ * @fb_helper: drm_fb_helper struct, may be NULL
+ *
+ * This function is based on drm_fbdev_cma_hotplug_event() and usually called
+ * from the Xilinx DRM drivers output_poll_changed callback.
+ */
+void xilinx_drm_fb_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ if (!fb_helper)
+ return;
+
+ drm_fb_helper_hotplug_event(fb_helper);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_fb.h b/drivers/gpu/drm/xilinx/xilinx_drm_fb.h
new file mode 100644
index 000000000000..c8b436edd08d
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_fb.h
@@ -0,0 +1,38 @@
+/*
+ * Xilinx DRM KMS Framebuffer helper header
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_FB_H_
+#define _XILINX_DRM_FB_H_
+
+struct drm_fb_helper;
+
+struct drm_gem_cma_object *
+xilinx_drm_fb_get_gem_obj(struct drm_framebuffer *base_fb, unsigned int plane);
+
+struct drm_fb_helper *
+xilinx_drm_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult);
+void xilinx_drm_fb_fini(struct drm_fb_helper *fb_helper);
+
+void xilinx_drm_fb_restore_mode(struct drm_fb_helper *fb_helper);
+struct drm_framebuffer *
+xilinx_drm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+void xilinx_drm_fb_hotplug_event(struct drm_fb_helper *fb_helper);
+
+#endif /* _XILINX_DRM_FB_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_gem.c b/drivers/gpu/drm/xilinx/xilinx_drm_gem.c
new file mode 100644
index 000000000000..b554c200ca09
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_gem.c
@@ -0,0 +1,45 @@
+/*
+ * Xilinx DRM KMS GEM helper
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_gem.h"
+
+/*
+ * xilinx_drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * @file_priv: drm_file object
+ * @drm: DRM object
+ * @args: info for dumb scanout buffer creation
+ *
+ * This function is for dumb_create callback of drm_driver struct. Simply
+ * it wraps around drm_gem_cma_dumb_create() and sets the pitch value
+ * by retrieving the value from the device.
+ *
+ * Return: The return value from drm_gem_cma_dumb_create()
+ */
+int xilinx_drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ args->pitch = ALIGN(pitch, xilinx_drm_get_align(drm));
+
+ return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_gem.h b/drivers/gpu/drm/xilinx/xilinx_drm_gem.h
new file mode 100644
index 000000000000..9e05e78cb766
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_gem.h
@@ -0,0 +1,25 @@
+/*
+ * Xilinx DRM KMS GEM helper header
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_GEM_H_
+#define _XILINX_DRM_GEM_H_
+
+int xilinx_drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+#endif /* _XILINX_DRM_GEM_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_plane.c b/drivers/gpu/drm/xilinx/xilinx_drm_plane.c
new file mode 100644
index 000000000000..8467f22f86af
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_plane.c
@@ -0,0 +1,1098 @@
+/*
+ * Xilinx DRM plane driver for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+
+#include "xilinx_drm_dp_sub.h"
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_fb.h"
+#include "xilinx_drm_plane.h"
+
+#include "xilinx_cresample.h"
+#include "xilinx_osd.h"
+#include "xilinx_rgb2yuv.h"
+
+#define MAX_NUM_SUB_PLANES 4
+
+/**
+ * struct xilinx_drm_plane_dma - Xilinx drm plane VDMA object
+ *
+ * @chan: dma channel
+ * @xt: dma interleaved configuration template
+ * @sgl: data chunk for dma_interleaved_template
+ * @is_active: flag if the DMA is active
+ */
+struct xilinx_drm_plane_dma {
+ struct dma_chan *chan;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+ bool is_active;
+};
+
+/**
+ * struct xilinx_drm_plane - Xilinx drm plane object
+ *
+ * @base: base drm plane object
+ * @id: plane id
+ * @dpms: current dpms level
+ * @zpos: user requested z-position value
+ * @prio: actual layer priority
+ * @alpha: alpha value
+ * @alpha_enable: alpha enable value
+ * @primary: flag for primary plane
+ * @format: pixel format
+ * @dma: dma object
+ * @rgb2yuv: rgb2yuv instance
+ * @cresample: cresample instance
+ * @osd_layer: osd layer
+ * @dp_layer: DisplayPort subsystem layer
+ * @manager: plane manager
+ */
+struct xilinx_drm_plane {
+ struct drm_plane base;
+ int id;
+ int dpms;
+ unsigned int zpos;
+ unsigned int prio;
+ unsigned int alpha;
+ unsigned int alpha_enable;
+ bool primary;
+ u32 format;
+ struct xilinx_drm_plane_dma dma[MAX_NUM_SUB_PLANES];
+ struct xilinx_rgb2yuv *rgb2yuv;
+ struct xilinx_cresample *cresample;
+ struct xilinx_osd_layer *osd_layer;
+ struct xilinx_drm_dp_sub_layer *dp_layer;
+ struct xilinx_drm_plane_manager *manager;
+};
+
+#define MAX_PLANES 8
+
+/**
+ * struct xilinx_drm_plane_manager - Xilinx drm plane manager object
+ *
+ * @drm: drm device
+ * @node: plane device node
+ * @osd: osd instance
+ * @dp_sub: DisplayPort subsystem instance
+ * @num_planes: number of available planes
+ * @format: video format
+ * @max_width: maximum width
+ * @zpos_prop: z-position(priority) property
+ * @alpha_prop: alpha value property
+ * @alpha_enable_prop: alpha enable property
+ * @default_alpha: default alpha value
+ * @planes: xilinx drm planes
+ */
+struct xilinx_drm_plane_manager {
+ struct drm_device *drm;
+ struct device_node *node;
+ struct xilinx_osd *osd;
+ struct xilinx_drm_dp_sub *dp_sub;
+ int num_planes;
+ u32 format;
+ int max_width;
+ struct drm_property *zpos_prop;
+ struct drm_property *alpha_prop;
+ struct drm_property *alpha_enable_prop;
+ unsigned int default_alpha;
+ struct xilinx_drm_plane *planes[MAX_PLANES];
+};
+
+#define to_xilinx_plane(x) container_of(x, struct xilinx_drm_plane, base)
+
+void xilinx_drm_plane_commit(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ unsigned int i;
+
+ /* for xilinx video framebuffer dma, if used */
+ xilinx_xdma_drm_config(plane->dma[0].chan, plane->format);
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+
+ for (i = 0; i < MAX_NUM_SUB_PLANES; i++) {
+ struct xilinx_drm_plane_dma *dma = &plane->dma[i];
+
+ if (dma->chan && dma->is_active) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma->chan,
+ &dma->xt,
+ flags);
+ if (!desc) {
+ DRM_ERROR("failed to prepare DMA descriptor\n");
+ return;
+ }
+
+ dmaengine_submit(desc);
+
+ dma_async_issue_pending(dma->chan);
+ }
+ }
+}
+
+/* set plane dpms */
+void xilinx_drm_plane_dpms(struct drm_plane *base_plane, int dpms)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+ unsigned int i;
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", plane->dpms, dpms);
+
+ if (plane->dpms == dpms)
+ return;
+
+ plane->dpms = dpms;
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (manager->dp_sub) {
+ if (plane->primary) {
+ xilinx_drm_dp_sub_enable_alpha(manager->dp_sub,
+ plane->alpha_enable);
+ xilinx_drm_dp_sub_set_alpha(manager->dp_sub,
+ plane->alpha);
+ }
+ xilinx_drm_dp_sub_layer_enable(manager->dp_sub,
+ plane->dp_layer);
+ }
+
+ if (plane->rgb2yuv)
+ xilinx_rgb2yuv_enable(plane->rgb2yuv);
+
+ if (plane->cresample)
+ xilinx_cresample_enable(plane->cresample);
+
+ /* enable osd */
+ if (manager->osd) {
+ xilinx_osd_disable_rue(manager->osd);
+
+ xilinx_osd_layer_set_priority(plane->osd_layer,
+ plane->prio);
+ xilinx_osd_layer_enable_alpha(plane->osd_layer,
+ plane->alpha_enable);
+ xilinx_osd_layer_set_alpha(plane->osd_layer,
+ plane->alpha);
+ xilinx_osd_layer_enable(plane->osd_layer);
+
+ xilinx_osd_enable_rue(manager->osd);
+ }
+
+ xilinx_drm_plane_commit(base_plane);
+ break;
+ default:
+ /* disable/reset osd */
+ if (manager->osd) {
+ xilinx_osd_disable_rue(manager->osd);
+
+ xilinx_osd_layer_set_dimension(plane->osd_layer,
+ 0, 0, 0, 0);
+ xilinx_osd_layer_disable(plane->osd_layer);
+
+ xilinx_osd_enable_rue(manager->osd);
+ }
+
+ if (plane->cresample) {
+ xilinx_cresample_disable(plane->cresample);
+ xilinx_cresample_reset(plane->cresample);
+ }
+
+ if (plane->rgb2yuv) {
+ xilinx_rgb2yuv_disable(plane->rgb2yuv);
+ xilinx_rgb2yuv_reset(plane->rgb2yuv);
+ }
+
+ /* stop dma engine and release descriptors */
+ for (i = 0; i < MAX_NUM_SUB_PLANES; i++) {
+ if (plane->dma[i].chan && plane->dma[i].is_active)
+ dmaengine_terminate_all(plane->dma[i].chan);
+ }
+
+ if (manager->dp_sub)
+ xilinx_drm_dp_sub_layer_disable(manager->dp_sub,
+ plane->dp_layer);
+
+ break;
+ }
+}
+
+/* mode set a plane */
+int xilinx_drm_plane_mode_set(struct drm_plane *base_plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct drm_gem_cma_object *obj;
+ const struct drm_format_info *info;
+ struct drm_format_name_buf format_name;
+ size_t offset;
+ unsigned int hsub, vsub, i;
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+
+ /* configure cresample */
+ if (plane->cresample)
+ xilinx_cresample_configure(plane->cresample, crtc_w, crtc_h);
+
+ /* configure rgb2yuv */
+ if (plane->rgb2yuv)
+ xilinx_rgb2yuv_configure(plane->rgb2yuv, crtc_w, crtc_h);
+
+ DRM_DEBUG_KMS("h: %d(%d), v: %d(%d)\n",
+ src_w, crtc_x, src_h, crtc_y);
+ DRM_DEBUG_KMS("bpp: %d\n", fb->format->cpp[0] * 8);
+
+ info = fb->format;
+ if (!info) {
+ DRM_ERROR("Unsupported framebuffer format %s\n",
+ drm_get_format_name(info->format, &format_name));
+ return -EINVAL;
+ }
+
+ hsub = info->hsub;
+ vsub = info->vsub;
+
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = src_w / (i ? hsub : 1);
+ unsigned int height = src_h / (i ? vsub : 1);
+ unsigned int cpp = info->cpp[i];
+
+ if (!cpp)
+ cpp = xilinx_drm_format_bpp(fb->format->format) >> 3;
+
+ obj = xilinx_drm_fb_get_gem_obj(fb, i);
+ if (!obj) {
+ DRM_ERROR("failed to get a gem obj for fb\n");
+ return -EINVAL;
+ }
+
+ plane->dma[i].xt.numf = height;
+ plane->dma[i].sgl[0].size = drm_format_plane_width_bytes(info,
+ i,
+ width);
+ plane->dma[i].sgl[0].icg = fb->pitches[i] -
+ plane->dma[i].sgl[0].size;
+ offset = drm_format_plane_width_bytes(info, i, src_x);
+ offset += src_y * fb->pitches[i];
+ offset += fb->offsets[i];
+ plane->dma[i].xt.src_start = obj->paddr + offset;
+ plane->dma[i].xt.frame_size = 1;
+ plane->dma[i].xt.dir = DMA_MEM_TO_DEV;
+ plane->dma[i].xt.src_sgl = true;
+ plane->dma[i].xt.dst_sgl = false;
+ plane->dma[i].is_active = true;
+ }
+
+ for (; i < MAX_NUM_SUB_PLANES; i++)
+ plane->dma[i].is_active = false;
+
+ /* set OSD dimensions */
+ if (plane->manager->osd) {
+ xilinx_osd_disable_rue(plane->manager->osd);
+
+ xilinx_osd_layer_set_dimension(plane->osd_layer, crtc_x, crtc_y,
+ src_w, src_h);
+
+ xilinx_osd_enable_rue(plane->manager->osd);
+ }
+
+ if (plane->manager->dp_sub) {
+ int ret;
+
+ ret = xilinx_drm_dp_sub_layer_check_size(plane->manager->dp_sub,
+ plane->dp_layer,
+ src_w, src_h);
+ if (ret)
+ return ret;
+
+ ret = xilinx_drm_dp_sub_layer_set_fmt(plane->manager->dp_sub,
+ plane->dp_layer,
+ fb->format->format);
+ if (ret) {
+ DRM_ERROR("failed to set dp_sub layer fmt\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* update a plane. just call mode_set() with bit-shifted values */
+static int xilinx_drm_plane_update(struct drm_plane *base_plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ int ret;
+
+ ret = xilinx_drm_plane_mode_set(base_plane, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x >> 16, src_y >> 16,
+ src_w >> 16, src_h >> 16);
+ if (ret) {
+ DRM_ERROR("failed to mode-set a plane\n");
+ return ret;
+ }
+
+ /* make sure a plane is on */
+ if (plane->dpms != DRM_MODE_DPMS_ON)
+ xilinx_drm_plane_dpms(base_plane, DRM_MODE_DPMS_ON);
+ else
+ xilinx_drm_plane_commit(base_plane);
+
+ return 0;
+}
+
+/* disable a plane */
+static int xilinx_drm_plane_disable(struct drm_plane *base_plane,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ xilinx_drm_plane_dpms(base_plane, DRM_MODE_DPMS_OFF);
+
+ return 0;
+}
+
+/* destroy a plane */
+static void xilinx_drm_plane_destroy(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ unsigned int i;
+
+ xilinx_drm_plane_dpms(base_plane, DRM_MODE_DPMS_OFF);
+
+ plane->manager->planes[plane->id] = NULL;
+
+ drm_plane_cleanup(base_plane);
+
+ for (i = 0; i < MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan)
+ dma_release_channel(plane->dma[i].chan);
+
+ if (plane->manager->osd) {
+ xilinx_osd_layer_disable(plane->osd_layer);
+ xilinx_osd_layer_put(plane->osd_layer);
+ }
+
+ if (plane->manager->dp_sub) {
+ xilinx_drm_dp_sub_layer_disable(plane->manager->dp_sub,
+ plane->dp_layer);
+ xilinx_drm_dp_sub_layer_put(plane->manager->dp_sub,
+ plane->dp_layer);
+ }
+}
+
+/**
+ * xilinx_drm_plane_update_prio - Configure plane priorities based on zpos
+ * @manager: the plane manager
+ *
+ * Z-position values are user requested position of planes. The priority is
+ * the actual position of planes in hardware. Some hardware doesn't allow
+ * any duplicate priority, so this function needs to be called when a duplicate
+ * priority is found. Then planes are sorted by zpos value, and the priorities
+ * are reconfigured. A plane with lower plane ID gets assigned to the lower
+ * priority when planes have the same zpos value.
+ */
+static void
+xilinx_drm_plane_update_prio(struct xilinx_drm_plane_manager *manager)
+{
+ struct xilinx_drm_plane *planes[MAX_PLANES];
+ struct xilinx_drm_plane *plane;
+ unsigned int i, j;
+
+ /* sort planes by zpos */
+ for (i = 0; i < manager->num_planes; i++) {
+ plane = manager->planes[i];
+
+ for (j = i; j > 0; --j) {
+ if (planes[j - 1]->zpos <= plane->zpos)
+ break;
+ planes[j] = planes[j - 1];
+ }
+
+ planes[j] = plane;
+ }
+
+ xilinx_osd_disable_rue(manager->osd);
+
+ /* remove duplicates by reassigning priority */
+ for (i = 0; i < manager->num_planes; i++) {
+ planes[i]->prio = i;
+ xilinx_osd_layer_set_priority(planes[i]->osd_layer,
+ planes[i]->prio);
+ }
+
+ xilinx_osd_enable_rue(manager->osd);
+}
+
+static void xilinx_drm_plane_set_zpos(struct drm_plane *base_plane,
+ unsigned int zpos)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+ bool update = false;
+ int i;
+
+ for (i = 0; i < manager->num_planes; i++) {
+ if (manager->planes[i] != plane &&
+ manager->planes[i]->prio == zpos) {
+ update = true;
+ break;
+ }
+ }
+
+ plane->zpos = zpos;
+
+ if (update) {
+ xilinx_drm_plane_update_prio(manager);
+ } else {
+ plane->prio = zpos;
+ xilinx_osd_layer_set_priority(plane->osd_layer, plane->prio);
+ }
+}
+
+static void xilinx_drm_plane_set_alpha(struct drm_plane *base_plane,
+ unsigned int alpha)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+
+ plane->alpha = alpha;
+
+ if (plane->osd_layer)
+ xilinx_osd_layer_set_alpha(plane->osd_layer, plane->alpha);
+ else if (manager->dp_sub)
+ xilinx_drm_dp_sub_set_alpha(manager->dp_sub, plane->alpha);
+}
+
+static void xilinx_drm_plane_enable_alpha(struct drm_plane *base_plane,
+ bool enable)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+
+ plane->alpha_enable = enable;
+
+ if (plane->osd_layer)
+ xilinx_osd_layer_enable_alpha(plane->osd_layer, enable);
+ else if (manager->dp_sub)
+ xilinx_drm_dp_sub_enable_alpha(manager->dp_sub, enable);
+}
+
+/* set property of a plane */
+static int xilinx_drm_plane_set_property(struct drm_plane *base_plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+
+ if (property == manager->zpos_prop)
+ xilinx_drm_plane_set_zpos(base_plane, val);
+ else if (property == manager->alpha_prop)
+ xilinx_drm_plane_set_alpha(base_plane, val);
+ else if (property == manager->alpha_enable_prop)
+ xilinx_drm_plane_enable_alpha(base_plane, val);
+ else
+ return -EINVAL;
+
+ drm_object_property_set_value(&base_plane->base, property, val);
+
+ return 0;
+}
+
+static struct drm_plane_funcs xilinx_drm_plane_funcs = {
+ .update_plane = xilinx_drm_plane_update,
+ .disable_plane = xilinx_drm_plane_disable,
+ .destroy = xilinx_drm_plane_destroy,
+ .set_property = xilinx_drm_plane_set_property,
+};
+
+/* get a plane max width */
+int xilinx_drm_plane_get_max_width(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+
+ return plane->manager->max_width;
+}
+
+/* check if format is supported */
+bool xilinx_drm_plane_check_format(struct xilinx_drm_plane_manager *manager,
+ u32 format)
+{
+ int i;
+
+ for (i = 0; i < MAX_PLANES; i++)
+ if (manager->planes[i] &&
+ (manager->planes[i]->format == format))
+ return true;
+
+ return false;
+}
+
+/* get the number of planes */
+int xilinx_drm_plane_get_num_planes(struct xilinx_drm_plane_manager *manager)
+{
+ return manager->num_planes;
+}
+
+/**
+ * xilinx_drm_plane_restore - Restore the plane states
+ * @manager: the plane manager
+ *
+ * Restore the plane states to the default ones. Any state that needs to be
+ * restored should be here. This improves consistency as applications see
+ * the same default values, and removes mismatch between software and hardware
+ * values as software values are updated as hardware values are reset.
+ */
+void xilinx_drm_plane_restore(struct xilinx_drm_plane_manager *manager)
+{
+ struct xilinx_drm_plane *plane;
+ unsigned int i;
+
+ /*
+ * Reinitialize property default values as they get reset by DPMS OFF
+ * operation. User will read the correct default values later, and
+ * planes will be initialized with default values.
+ */
+ for (i = 0; i < manager->num_planes; i++) {
+ plane = manager->planes[i];
+
+ plane->prio = plane->id;
+ plane->zpos = plane->id;
+ if (manager->zpos_prop)
+ drm_object_property_set_value(&plane->base.base,
+ manager->zpos_prop,
+ plane->prio);
+
+ plane->alpha = manager->default_alpha;
+ if (manager->alpha_prop)
+ drm_object_property_set_value(&plane->base.base,
+ manager->alpha_prop,
+ plane->alpha);
+
+ plane->alpha_enable = true;
+ if (manager->alpha_enable_prop)
+ drm_object_property_set_value(&plane->base.base,
+ manager->alpha_enable_prop, true);
+ }
+}
+
+/* get the plane format */
+u32 xilinx_drm_plane_get_format(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+
+ return plane->format;
+}
+
+/**
+ * xilinx_drm_plane_get_align - Get the alignment value for pitch
+ * @base_plane: Base drm plane object
+ *
+ * Get the alignment value for pitch from the dma device
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+unsigned int xilinx_drm_plane_get_align(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+
+ return 1 << plane->dma[0].chan->device->copy_align;
+}
+
+/* create plane properties */
+static void
+xilinx_drm_plane_create_property(struct xilinx_drm_plane_manager *manager)
+{
+ if (manager->osd)
+ manager->zpos_prop = drm_property_create_range(manager->drm, 0,
+ "zpos", 0, manager->num_planes - 1);
+
+ if (manager->osd || manager->dp_sub) {
+ manager->alpha_prop = drm_property_create_range(manager->drm, 0,
+ "alpha", 0, manager->default_alpha);
+ manager->alpha_enable_prop =
+ drm_property_create_bool(manager->drm, 0,
+ "global alpha enable");
+ }
+}
+
+/* attach plane properties */
+static void xilinx_drm_plane_attach_property(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+
+ if (manager->zpos_prop)
+ drm_object_attach_property(&base_plane->base,
+ manager->zpos_prop,
+ plane->id);
+
+ if (manager->alpha_prop) {
+ if (manager->dp_sub && !plane->primary)
+ return;
+
+ drm_object_attach_property(&base_plane->base,
+ manager->alpha_prop,
+ manager->default_alpha);
+ drm_object_attach_property(&base_plane->base,
+ manager->alpha_enable_prop, false);
+ }
+
+ plane->alpha_enable = true;
+}
+
+/**
+ * xilinx_drm_plane_manager_dpms - Set DPMS for the Xilinx plane manager
+ * @manager: Xilinx plane manager object
+ * @dpms: requested DPMS
+ *
+ * Set the Xilinx plane manager to the given DPMS state. This function is
+ * usually called from the CRTC driver with calling xilinx_drm_plane_dpms().
+ */
+void xilinx_drm_plane_manager_dpms(struct xilinx_drm_plane_manager *manager,
+ int dpms)
+{
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (manager->dp_sub) {
+ xilinx_drm_dp_sub_set_bg_color(manager->dp_sub,
+ 0, 0, 0);
+ xilinx_drm_dp_sub_enable(manager->dp_sub);
+ }
+
+ if (manager->osd) {
+ xilinx_osd_disable_rue(manager->osd);
+ xilinx_osd_enable(manager->osd);
+ xilinx_osd_enable_rue(manager->osd);
+ }
+
+ break;
+ default:
+ if (manager->osd)
+ xilinx_osd_reset(manager->osd);
+
+ if (manager->dp_sub)
+ xilinx_drm_dp_sub_disable(manager->dp_sub);
+
+ break;
+ }
+}
+
+/**
+ * xilinx_drm_plane_manager_mode_set - Set the mode to the Xilinx plane manager
+ * @manager: Xilinx plane manager object
+ * @crtc_w: CRTC width
+ * @crtc_h: CRTC height
+ *
+ * Set the width and height of the Xilinx plane manager. This function is uaully
+ * called from the CRTC driver before calling the xilinx_drm_plane_mode_set().
+ */
+void xilinx_drm_plane_manager_mode_set(struct xilinx_drm_plane_manager *manager,
+ unsigned int crtc_w, unsigned int crtc_h)
+{
+ if (manager->osd)
+ xilinx_osd_set_dimension(manager->osd, crtc_w, crtc_h);
+}
+
+/* create a plane */
+static struct xilinx_drm_plane *
+xilinx_drm_plane_create(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs, bool primary)
+{
+ struct xilinx_drm_plane *plane;
+ struct device *dev = manager->drm->dev;
+ char plane_name[16];
+ struct device_node *plane_node;
+ struct device_node *sub_node;
+ struct property *prop;
+ const char *dma_name;
+ enum drm_plane_type type;
+ u32 fmt_in = 0;
+ u32 fmt_out = 0;
+ const char *fmt;
+ int i;
+ int ret;
+ u32 *fmts = NULL;
+ unsigned int num_fmts = 0;
+
+ for (i = 0; i < manager->num_planes; i++)
+ if (!manager->planes[i])
+ break;
+
+ if (i >= manager->num_planes) {
+ DRM_ERROR("failed to allocate plane\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ snprintf(plane_name, sizeof(plane_name), "plane%d", i);
+ plane_node = of_get_child_by_name(manager->node, plane_name);
+ if (!plane_node) {
+ DRM_ERROR("failed to find a plane node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
+ if (!plane) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ plane->primary = primary;
+ plane->id = i;
+ plane->prio = i;
+ plane->zpos = i;
+ plane->alpha = manager->default_alpha;
+ plane->dpms = DRM_MODE_DPMS_OFF;
+ plane->format = 0;
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+
+ i = 0;
+ of_property_for_each_string(plane_node, "dma-names", prop, dma_name) {
+ if (i >= MAX_NUM_SUB_PLANES) {
+ DRM_WARN("%s contains too many sub-planes (dma-names), indexes %d and above ignored\n",
+ of_node_full_name(plane_node),
+ MAX_NUM_SUB_PLANES);
+ break;
+ }
+ plane->dma[i].chan = of_dma_request_slave_channel(plane_node,
+ dma_name);
+ if (IS_ERR(plane->dma[i].chan)) {
+ ret = PTR_ERR(plane->dma[i].chan);
+ DRM_ERROR("failed to request dma channel \"%s\" for plane %s (err:%d)\n",
+ dma_name, of_node_full_name(plane_node), ret);
+ plane->dma[i].chan = NULL;
+ goto err_dma;
+ }
+ ++i;
+ }
+
+ if (i == 0) {
+ DRM_ERROR("plane \"%s\" doesn't have any dma channels (dma-names)\n",
+ of_node_full_name(plane_node));
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ /* probe color space converter */
+ sub_node = of_parse_phandle(plane_node, "xlnx,rgb2yuv", i);
+ if (sub_node) {
+ plane->rgb2yuv = xilinx_rgb2yuv_probe(dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(plane->rgb2yuv)) {
+ DRM_ERROR("failed to probe a rgb2yuv\n");
+ ret = PTR_ERR(plane->rgb2yuv);
+ goto err_dma;
+ }
+
+ /* rgb2yuv input format */
+ plane->format = DRM_FORMAT_XRGB8888;
+
+ /* rgb2yuv output format */
+ fmt_out = DRM_FORMAT_YUV444;
+ }
+
+ /* probe chroma resampler */
+ sub_node = of_parse_phandle(plane_node, "xlnx,cresample", i);
+ if (sub_node) {
+ plane->cresample = xilinx_cresample_probe(dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(plane->cresample)) {
+ DRM_ERROR("failed to probe a cresample\n");
+ ret = PTR_ERR(plane->cresample);
+ goto err_dma;
+ }
+
+ /* cresample input format */
+ fmt = xilinx_cresample_get_input_format_name(plane->cresample);
+ ret = xilinx_drm_format_by_name(fmt, &fmt_in);
+ if (ret)
+ goto err_dma;
+
+ /* format sanity check */
+ if ((fmt_out != 0) && (fmt_out != fmt_in)) {
+ DRM_ERROR("input/output format mismatch\n");
+ ret = -EINVAL;
+ goto err_dma;
+ }
+
+ if (plane->format == 0)
+ plane->format = fmt_in;
+
+ /* cresample output format */
+ fmt = xilinx_cresample_get_output_format_name(plane->cresample);
+ ret = xilinx_drm_format_by_name(fmt, &fmt_out);
+ if (ret)
+ goto err_dma;
+ }
+
+ /* create an OSD layer when OSD is available */
+ if (manager->osd) {
+ /* format sanity check */
+ if ((fmt_out != 0) && (fmt_out != manager->format)) {
+ DRM_ERROR("input/output format mismatch\n");
+ ret = -EINVAL;
+ goto err_dma;
+ }
+
+ /* create an osd layer */
+ plane->osd_layer = xilinx_osd_layer_get(manager->osd);
+ if (IS_ERR(plane->osd_layer)) {
+ DRM_ERROR("failed to create a osd layer\n");
+ ret = PTR_ERR(plane->osd_layer);
+ plane->osd_layer = NULL;
+ goto err_dma;
+ }
+
+ if (plane->format == 0)
+ plane->format = manager->format;
+ }
+
+ if (manager->dp_sub) {
+ plane->dp_layer = xilinx_drm_dp_sub_layer_get(manager->dp_sub,
+ primary);
+ if (IS_ERR(plane->dp_layer)) {
+ DRM_ERROR("failed to create a dp_sub layer\n");
+ ret = PTR_ERR(plane->dp_layer);
+ plane->dp_layer = NULL;
+ goto err_dma;
+ }
+
+ if (primary) {
+ ret = xilinx_drm_dp_sub_layer_set_fmt(manager->dp_sub,
+ plane->dp_layer,
+ manager->format);
+ if (ret) {
+ DRM_ERROR("failed to set dp_sub layer fmt\n");
+ goto err_dma;
+ }
+ }
+
+ plane->format =
+ xilinx_drm_dp_sub_layer_get_fmt(manager->dp_sub,
+ plane->dp_layer);
+ xilinx_drm_dp_sub_layer_get_fmts(manager->dp_sub,
+ plane->dp_layer, &fmts,
+ &num_fmts);
+ }
+
+ /* If there's no IP other than VDMA, pick the manager's format */
+ if (plane->format == 0)
+ plane->format = manager->format;
+
+ /* initialize drm plane */
+ type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(manager->drm, &plane->base,
+ possible_crtcs, &xilinx_drm_plane_funcs,
+ fmts ? fmts : &plane->format,
+ num_fmts ? num_fmts : 1, NULL, type,
+ NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ goto err_init;
+ }
+ plane->manager = manager;
+ manager->planes[plane->id] = plane;
+
+ xilinx_drm_plane_attach_property(&plane->base);
+
+ of_node_put(plane_node);
+
+ return plane;
+
+err_init:
+ if (manager->dp_sub) {
+ xilinx_drm_dp_sub_layer_disable(manager->dp_sub,
+ plane->dp_layer);
+ xilinx_drm_dp_sub_layer_put(plane->manager->dp_sub,
+ plane->dp_layer);
+ }
+ if (manager->osd) {
+ xilinx_osd_layer_disable(plane->osd_layer);
+ xilinx_osd_layer_put(plane->osd_layer);
+ }
+err_dma:
+ for (i = 0; i < MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan)
+ dma_release_channel(plane->dma[i].chan);
+err_out:
+ of_node_put(plane_node);
+ return ERR_PTR(ret);
+}
+
+/* create a primary plane */
+struct drm_plane *
+xilinx_drm_plane_create_primary(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs)
+{
+ struct xilinx_drm_plane *plane;
+
+ plane = xilinx_drm_plane_create(manager, possible_crtcs, true);
+ if (IS_ERR(plane)) {
+ DRM_ERROR("failed to allocate a primary plane\n");
+ return ERR_CAST(plane);
+ }
+
+ return &plane->base;
+}
+
+/* create extra planes */
+int xilinx_drm_plane_create_planes(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs)
+{
+ struct xilinx_drm_plane *plane;
+ int i;
+
+ /* find if there any available plane, and create if available */
+ for (i = 0; i < manager->num_planes; i++) {
+ if (manager->planes[i])
+ continue;
+
+ plane = xilinx_drm_plane_create(manager, possible_crtcs, false);
+ if (IS_ERR(plane)) {
+ DRM_ERROR("failed to allocate a plane\n");
+ return PTR_ERR(plane);
+ }
+
+ manager->planes[i] = plane;
+ }
+
+ return 0;
+}
+
+/* initialize a plane manager: num_planes, format, max_width */
+static int
+xilinx_drm_plane_init_manager(struct xilinx_drm_plane_manager *manager)
+{
+ unsigned int format;
+ u32 drm_format;
+ int ret = 0;
+
+ if (manager->osd) {
+ manager->num_planes = xilinx_osd_get_num_layers(manager->osd);
+ manager->max_width = xilinx_osd_get_max_width(manager->osd);
+
+ format = xilinx_osd_get_format(manager->osd);
+ ret = xilinx_drm_format_by_code(format, &drm_format);
+ if (drm_format != manager->format)
+ ret = -EINVAL;
+ } else if (manager->dp_sub) {
+ manager->num_planes = XILINX_DRM_DP_SUB_NUM_LAYERS;
+ manager->max_width = XILINX_DRM_DP_SUB_MAX_WIDTH;
+ } else {
+ /* without osd, only one plane is supported */
+ manager->num_planes = 1;
+ manager->max_width = 4096;
+ }
+
+ return ret;
+}
+
+struct xilinx_drm_plane_manager *
+xilinx_drm_plane_probe_manager(struct drm_device *drm)
+{
+ struct xilinx_drm_plane_manager *manager;
+ struct device *dev = drm->dev;
+ struct device_node *sub_node;
+ const char *format;
+ int ret;
+
+ manager = devm_kzalloc(dev, sizeof(*manager), GFP_KERNEL);
+ if (!manager)
+ return ERR_PTR(-ENOMEM);
+
+ /* this node is used to create a plane */
+ manager->node = of_get_child_by_name(dev->of_node, "planes");
+ if (!manager->node) {
+ DRM_ERROR("failed to get a planes node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* check the base pixel format of plane manager */
+ ret = of_property_read_string(manager->node, "xlnx,pixel-format",
+ &format);
+ if (ret < 0) {
+ DRM_ERROR("failed to get a plane manager format\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = xilinx_drm_format_by_name(format, &manager->format);
+ if (ret < 0) {
+ DRM_ERROR("invalid plane manager format\n");
+ return ERR_PTR(ret);
+ }
+
+ manager->drm = drm;
+
+ /* probe an OSD. proceed even if there's no OSD */
+ sub_node = of_parse_phandle(dev->of_node, "xlnx,osd", 0);
+ if (sub_node) {
+ manager->osd = xilinx_osd_probe(dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(manager->osd)) {
+ of_node_put(manager->node);
+ DRM_ERROR("failed to probe an osd\n");
+ return ERR_CAST(manager->osd);
+ }
+ manager->default_alpha = OSD_MAX_ALPHA;
+ }
+
+ manager->dp_sub = xilinx_drm_dp_sub_of_get(drm->dev->of_node);
+ if (IS_ERR(manager->dp_sub)) {
+ DRM_DEBUG_KMS("failed to get a dp_sub\n");
+ return ERR_CAST(manager->dp_sub);
+ } else if (manager->dp_sub) {
+ manager->default_alpha = XILINX_DRM_DP_SUB_MAX_ALPHA;
+ }
+
+ ret = xilinx_drm_plane_init_manager(manager);
+ if (ret) {
+ DRM_ERROR("failed to init a plane manager\n");
+ return ERR_PTR(ret);
+ }
+
+ xilinx_drm_plane_create_property(manager);
+
+ return manager;
+}
+
+void xilinx_drm_plane_remove_manager(struct xilinx_drm_plane_manager *manager)
+{
+ xilinx_drm_dp_sub_put(manager->dp_sub);
+ of_node_put(manager->node);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_plane.h b/drivers/gpu/drm/xilinx/xilinx_drm_plane.h
new file mode 100644
index 000000000000..3d3616b5a9d1
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_plane.h
@@ -0,0 +1,61 @@
+/*
+ * Xilinx DRM plane header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_PLANE_H_
+#define _XILINX_DRM_PLANE_H_
+
+struct drm_crtc;
+struct drm_plane;
+
+/* plane operations */
+void xilinx_drm_plane_dpms(struct drm_plane *base_plane, int dpms);
+void xilinx_drm_plane_commit(struct drm_plane *base_plane);
+int xilinx_drm_plane_mode_set(struct drm_plane *base_plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h);
+int xilinx_drm_plane_get_max_width(struct drm_plane *base_plane);
+u32 xilinx_drm_plane_get_format(struct drm_plane *base_plane);
+unsigned int xilinx_drm_plane_get_align(struct drm_plane *base_plane);
+
+/* plane manager operations */
+struct xilinx_drm_plane_manager;
+
+void
+xilinx_drm_plane_manager_mode_set(struct xilinx_drm_plane_manager *manager,
+ unsigned int crtc_w, unsigned int crtc_h);
+void xilinx_drm_plane_manager_dpms(struct xilinx_drm_plane_manager *manager,
+ int dpms);
+struct drm_plane *
+xilinx_drm_plane_create_primary(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs);
+int xilinx_drm_plane_create_planes(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs);
+
+bool xilinx_drm_plane_check_format(struct xilinx_drm_plane_manager *manager,
+ u32 format);
+int xilinx_drm_plane_get_num_planes(struct xilinx_drm_plane_manager *manager);
+
+void xilinx_drm_plane_restore(struct xilinx_drm_plane_manager *manager);
+
+struct xilinx_drm_plane_manager *
+xilinx_drm_plane_probe_manager(struct drm_device *drm);
+void xilinx_drm_plane_remove_manager(struct xilinx_drm_plane_manager *manager);
+
+#endif /* _XILINX_DRM_PLANE_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_sdi.c b/drivers/gpu/drm/xilinx/xilinx_drm_sdi.c
new file mode 100644
index 000000000000..c33b3dfb6809
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_sdi.c
@@ -0,0 +1,1452 @@
+/*
+ * Xilinx FPGA SDI Tx Controller driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/videomode.h>
+#include "xilinx_drm_sdi.h"
+#include "xilinx_vtc.h"
+
+/* SDI register offsets */
+#define XSDI_TX_RST_CTRL 0x00
+#define XSDI_TX_MDL_CTRL 0x04
+#define XSDI_TX_GLBL_IER 0x0C
+#define XSDI_TX_ISR_STAT 0x10
+#define XSDI_TX_IER_STAT 0x14
+#define XSDI_TX_ST352_LINE 0x18
+#define XSDI_TX_ST352_DATA_CH0 0x1C
+#define XSDI_TX_VER 0x3C
+#define XSDI_TX_SYS_CFG 0x40
+#define XSDI_TX_STS_SB_TDATA 0x60
+#define XSDI_TX_AXI4S_STS1 0x68
+#define XSDI_TX_AXI4S_STS2 0x6C
+
+/* MODULE_CTRL register masks */
+#define XSDI_TX_CTRL_MDL_EN_MASK BIT(0)
+#define XSDI_TX_CTRL_OUT_EN_MASK BIT(1)
+#define XSDI_TX_CTRL_M_MASK BIT(7)
+#define XSDI_TX_CTRL_INS_CRC_MASK BIT(12)
+#define XSDI_TX_CTRL_INS_ST352_MASK BIT(13)
+#define XSDI_TX_CTRL_OVR_ST352_MASK BIT(14)
+#define XSDI_TX_CTRL_INS_SYNC_BIT_MASK BIT(16)
+#define XSDI_TX_CTRL_SD_BITREP_BYPASS_MASK BIT(17)
+#define XSDI_TX_CTRL_USE_ANC_IN_MASK BIT(18)
+#define XSDI_TX_CTRL_INS_LN_MASK BIT(19)
+#define XSDI_TX_CTRL_INS_EDH_MASK BIT(20)
+#define XSDI_TX_CTRL_MODE_MASK 0x7
+#define XSDI_TX_CTRL_MUX_MASK 0x7
+#define XSDI_TX_CTRL_MODE_SHIFT 4
+#define XSDI_TX_CTRL_M_SHIFT 7
+#define XSDI_TX_CTRL_MUX_SHIFT 8
+#define XSDI_TX_CTRL_INS_CRC_SHIFT 12
+#define XSDI_TX_CTRL_INS_ST352_SHIFT 13
+#define XSDI_TX_CTRL_OVR_ST352_SHIFT 14
+#define XSDI_TX_CTRL_ST352_F2_EN_SHIFT 15
+#define XSDI_TX_CTRL_INS_SYNC_BIT_SHIFT 16
+#define XSDI_TX_CTRL_SD_BITREP_BYPASS_SHIFT 17
+#define XSDI_TX_CTRL_USE_ANC_IN_SHIFT 18
+#define XSDI_TX_CTRL_INS_LN_SHIFT 19
+#define XSDI_TX_CTRL_INS_EDH_SHIFT 20
+
+/* TX_ST352_LINE register masks */
+#define XSDI_TX_ST352_LINE_MASK GENMASK(10, 0)
+#define XSDI_TX_ST352_LINE_F2_SHIFT 16
+
+/* ISR STAT register masks */
+#define XSDI_GTTX_RSTDONE_INTR_MASK BIT(0)
+#define XSDI_TX_CE_ALIGN_ERR_INTR_MASK BIT(1)
+#define XSDI_AXI4S_VID_LOCK_INTR_MASK BIT(8)
+#define XSDI_OVERFLOW_INTR_MASK BIT(9)
+#define XSDI_UNDERFLOW_INTR_MASK BIT(10)
+#define XSDI_IER_EN_MASK (XSDI_GTTX_RSTDONE_INTR_MASK | \
+ XSDI_TX_CE_ALIGN_ERR_INTR_MASK | \
+ XSDI_OVERFLOW_INTR_MASK | \
+ XSDI_UNDERFLOW_INTR_MASK)
+
+/* RST_CTRL_OFFSET masks */
+#define XSDI_TX_BRIDGE_CTRL_EN_MASK BIT(8)
+#define XSDI_TX_AXI4S_CTRL_EN_MASK BIT(9)
+#define XSDI_TX_CTRL_EN_MASK BIT(0)
+
+/* STS_SB_TX_TDATA masks */
+#define XSDI_TX_TDATA_DONE_MASK BIT(0)
+#define XSDI_TX_TDATA_FAIL_MASK BIT(1)
+#define XSDI_TX_TDATA_GT_RESETDONE_MASK BIT(2)
+#define XSDI_TX_TDATA_SLEW_RATE_MASK BIT(3)
+#define XSDI_TX_TDATA_TXPLLCLKSEL_MASK GENMASK(5, 4)
+#define XSDI_TX_TDATA_GT_SYSCLKSEL_MASK GENMASK(7, 6)
+#define XSDI_TX_TDATA_FABRIC_RST_MASK BIT(8)
+#define XSDI_TX_TDATA_DRP_FAIL_MASK BIT(9)
+#define XSDI_TX_TDATA_FAIL_CODE_MASK GENMASK(14, 12)
+#define XSDI_TX_TDATA_DRP_FAIL_CNT_MASK 0xFF0000
+#define XSDI_TX_TDATA_GT_QPLL0LOCK_MASK BIT(24)
+#define XSDI_TX_TDATA_GT_QPLL1LOCK_MASK BIT(25)
+
+#define SDI_MAX_DATASTREAM 8
+
+#define XSDI_TX_MUX_SD_HD_3GA 0
+#define XSDI_TX_MUX_3GB 1
+#define XSDI_TX_MUX_8STREAM_6G_12G 2
+#define XSDI_TX_MUX_4STREAM_6G 3
+#define XSDI_TX_MUX_16STREAM_12G 4
+
+#define PIXELS_PER_CLK 2
+#define XSDI_CH_SHIFT 29
+#define XST352_PROG_PIC_MASK BIT(6)
+#define XST352_PROG_TRANS_MASK BIT(7)
+#define XST352_2048_SHIFT BIT(6)
+#define ST352_BYTE3 0x00
+#define ST352_BYTE4 0x01
+#define INVALID_VALUE -1
+#define GT_TIMEOUT 500
+
+static LIST_HEAD(xilinx_sdi_list);
+static DEFINE_MUTEX(xilinx_sdi_lock);
+/**
+ * enum payload_line_1 - Payload Ids Line 1 number
+ * @PAYLD_LN1_HD_3_6_12G: line 1 HD,3G,6G or 12G mode value
+ * @PAYLD_LN1_SDPAL: line 1 SD PAL mode value
+ * @PAYLD_LN1_SDNTSC: line 1 SD NTSC mode value
+ */
+enum payload_line_1 {
+ PAYLD_LN1_HD_3_6_12G = 10,
+ PAYLD_LN1_SDPAL = 9,
+ PAYLD_LN1_SDNTSC = 13
+};
+
+/**
+ * enum payload_line_2 - Payload Ids Line 2 number
+ * @PAYLD_LN2_HD_3_6_12G: line 2 HD,3G,6G or 12G mode value
+ * @PAYLD_LN2_SDPAL: line 2 SD PAL mode value
+ * @PAYLD_LN2_SDNTSC: line 2 SD NTSC mode value
+ */
+enum payload_line_2 {
+ PAYLD_LN2_HD_3_6_12G = 572,
+ PAYLD_LN2_SDPAL = 322,
+ PAYLD_LN2_SDNTSC = 276
+};
+
+/**
+ * enum sdi_modes - SDI modes
+ * @XSDI_MODE_HD: HD mode
+ * @XSDI_MODE_SD: SD mode
+ * @XSDI_MODE_3GA: 3GA mode
+ * @XSDI_MODE_3GB: 3GB mode
+ * @XSDI_MODE_6G: 6G mode
+ * @XSDI_MODE_12G: 12G mode
+ */
+enum sdi_modes {
+ XSDI_MODE_HD = 0,
+ XSDI_MODE_SD,
+ XSDI_MODE_3GA,
+ XSDI_MODE_3GB,
+ XSDI_MODE_6G,
+ XSDI_MODE_12G
+};
+
+/**
+ * struct xilinx_sdi - Core configuration SDI Tx subsystem device structure
+ * @encoder: DRM encoder structure
+ * @connector: DRM connector structure
+ * @vtc: Pointer to VTC structure
+ * @dev: device structure
+ * @base: Base address of SDI subsystem
+ * @mode_flags: SDI operation mode related flags
+ * @wait_event: wait event
+ * @event_received: wait event status
+ * @list: entry in the global SDI subsystem list
+ * @vblank_fn: vblank handler
+ * @vblank_data: vblank data to be used in vblank_fn
+ * @sdi_mode: configurable SDI mode parameter, supported values are:
+ * 0 - HD
+ * 1 - SD
+ * 2 - 3GA
+ * 3 - 3GB
+ * 4 - 6G
+ * 5 - 12G
+ * @sdi_mod_prop_val: configurable SDI mode parameter value
+ * @sdi_data_strm: configurable SDI data stream parameter
+ * @sdi_data_strm_prop_val: configurable number of SDI data streams
+ * value currently supported are 2, 4 and 8
+ * @is_frac_prop: configurable SDI fractional fps parameter
+ * @is_frac_prop_val: configurable SDI fractional fps parameter value
+ */
+struct xilinx_sdi {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct xilinx_vtc *vtc;
+ struct device *dev;
+ void __iomem *base;
+ u32 mode_flags;
+ wait_queue_head_t wait_event;
+ bool event_received;
+ struct list_head list;
+ void (*vblank_fn)(void *);
+ void *vblank_data;
+ struct drm_property *sdi_mode;
+ u32 sdi_mod_prop_val;
+ struct drm_property *sdi_data_strm;
+ u32 sdi_data_strm_prop_val;
+ struct drm_property *is_frac_prop;
+ bool is_frac_prop_val;
+};
+
+/**
+ * struct xilinx_sdi_display_config - SDI supported modes structure
+ * @mode: drm display mode
+ * @st352_byt2: st352 byte 2 value
+ * index 0 : value for integral fps
+ * index 1 : value for fractional fps
+ * @st352_byt1: st352 byte 1 value
+ * index 0 : value for HD mode
+ * index 1 : value for SD mode
+ * index 2 : value for 3GA
+ * index 3 : value for 3GB
+ * index 4 : value for 6G
+ * index 5 : value for 12G
+ */
+struct xlnx_sdi_display_config {
+ struct drm_display_mode mode;
+ u8 st352_byt2[2];
+ u8 st352_byt1[6];
+};
+
+/*
+ * xlnx_sdi_modes - SDI DRM modes
+ */
+static const struct xlnx_sdi_display_config xlnx_sdi_modes[] = {
+ /* 0 - dummy, VICs start at 1 */
+ { },
+ /* SD: 720x480i@60Hz */
+ {{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
+ 801, 858, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* SD: 720x576i@50Hz */
+ {{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
+ 795, 864, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* HD: 1280x720@25Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2990, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@24Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 3155, 4125, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@30Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2330, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@50Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@60Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1920x1080@24Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@25Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@30Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@48Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@50Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@60Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@24Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@25Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@30Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@48Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@50Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@60Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@24Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@25Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@30Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@30Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@25Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@24Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@48Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@50Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@60Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@60Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2136,
+ 2180, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@50Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@48Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@96Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2291,
+ 2379, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@100Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@120Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@96Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2377,
+ 2421, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@100Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2322,
+ 2366, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@120Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 6G: 3840x2160@30Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@25Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@24Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@24Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@25Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@30Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@48Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@50Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@60Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@48Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@50Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@60Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 593408, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+};
+
+#define connector_to_sdi(c) container_of(c, struct xilinx_sdi, connector)
+#define encoder_to_sdi(e) container_of(e, struct xilinx_sdi, encoder)
+
+/**
+ * xilinx_sdi_writel - Memory mapped SDI Tx register write
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ * @val: value to be written
+ *
+ * This function writes the value to SDI TX registers
+ */
+static inline void xilinx_sdi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+/**
+ * xilinx_sdi_readl - Memory mapped SDI Tx register read
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ *
+ * Return: The contents of the SDI Tx register
+ *
+ * This function returns the contents of the corresponding SDI Tx register.
+ */
+static inline u32 xilinx_sdi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xilinx_en_axi4s - Enable SDI Tx AXI4S-to-Video core
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx AXI4S-to-Video core.
+ */
+static void xilinx_en_axi4s(struct xilinx_sdi *sdi)
+{
+ u32 data;
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_AXI4S_CTRL_EN_MASK;
+ xilinx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xilinx_en_bridge - Enable SDI Tx bridge
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx bridge.
+ */
+static void xilinx_en_bridge(struct xilinx_sdi *sdi)
+{
+ u32 data;
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_BRIDGE_CTRL_EN_MASK;
+ xilinx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xilinx_sdi_set_default_drm_properties - Configure SDI DRM
+ * properties with their default values
+ * @sdi: SDI structure having the updated user parameters
+ */
+static void
+xilinx_sdi_set_default_drm_properties(struct xilinx_sdi *sdi)
+{
+ drm_object_property_set_value(&sdi->connector.base,
+ sdi->sdi_mode, 0);
+ drm_object_property_set_value(&sdi->connector.base,
+ sdi->sdi_data_strm, 0);
+ drm_object_property_set_value(&sdi->connector.base,
+ sdi->is_frac_prop, 0);
+}
+
+/**
+ * xilinx_sdi_irq_handler - SDI Tx interrupt
+ * @irq: irq number
+ * @data: irq data
+ *
+ * Return: IRQ_HANDLED for all cases.
+ *
+ * This is the compact GT ready interrupt.
+ */
+static irqreturn_t xilinx_sdi_irq_handler(int irq, void *data)
+{
+ struct xilinx_sdi *sdi = (struct xilinx_sdi *)data;
+ u32 reg;
+
+ reg = xilinx_sdi_readl(sdi->base, XSDI_TX_ISR_STAT);
+
+ if (reg & XSDI_GTTX_RSTDONE_INTR_MASK)
+ dev_dbg(sdi->dev, "GT reset interrupt received\n");
+ if (reg & XSDI_TX_CE_ALIGN_ERR_INTR_MASK)
+ dev_err_ratelimited(sdi->dev, "SDI SD CE align error\n");
+ if (reg & XSDI_OVERFLOW_INTR_MASK)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Overflow error\n");
+ if (reg & XSDI_UNDERFLOW_INTR_MASK)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Underflow error\n");
+ xilinx_sdi_writel(sdi->base, XSDI_TX_ISR_STAT,
+ reg & ~(XSDI_AXI4S_VID_LOCK_INTR_MASK));
+
+ reg = xilinx_sdi_readl(sdi->base, XSDI_TX_STS_SB_TDATA);
+ if (reg & XSDI_TX_TDATA_GT_RESETDONE_MASK) {
+ sdi->event_received = true;
+ wake_up_interruptible(&sdi->wait_event);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_sdi_set_payload_line - set ST352 packet line number
+ * @sdi: Pointer to SDI Tx structure
+ * @line_1: line number used to insert st352 packet for field 1.
+ * @line_2: line number used to insert st352 packet for field 2.
+ *
+ * This function set 352 packet line number.
+ */
+static void xilinx_sdi_set_payload_line(struct xilinx_sdi *sdi,
+ u32 line_1, u32 line_2)
+{
+ u32 data;
+
+ data = ((line_1 & XSDI_TX_ST352_LINE_MASK) |
+ ((line_2 & XSDI_TX_ST352_LINE_MASK) <<
+ XSDI_TX_ST352_LINE_F2_SHIFT));
+
+ xilinx_sdi_writel(sdi->base, XSDI_TX_ST352_LINE, data);
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data |= (1 << XSDI_TX_CTRL_ST352_F2_EN_SHIFT);
+
+ xilinx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xilinx_sdi_set_payload_data - set ST352 packet payload
+ * @sdi: Pointer to SDI Tx structure
+ * @data_strm: data stream number
+ * @payload: st352 packet payload
+ *
+ * This function set ST352 payload data to corresponding stream.
+ */
+static void xilinx_sdi_set_payload_data(struct xilinx_sdi *sdi,
+ u32 data_strm, u32 payload)
+{
+ xilinx_sdi_writel(sdi->base,
+ (XSDI_TX_ST352_DATA_CH0 + (data_strm * 4)), payload);
+}
+
+/**
+ * xilinx_sdi_set_display_disable - Disable the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xilinx_sdi_set_display_disable(struct xilinx_sdi *sdi)
+{
+ u32 i;
+
+ for (i = 0; i < SDI_MAX_DATASTREAM; i++)
+ xilinx_sdi_set_payload_data(sdi, i, 0);
+
+ xilinx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ xilinx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, 0);
+}
+
+/**
+ * xilinx_sdi_payload_config - config the SDI payload parameters
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: display mode
+ *
+ * This function config the SDI st352 payload parameter.
+ */
+static void xilinx_sdi_payload_config(struct xilinx_sdi *sdi, u32 mode)
+{
+ u32 payload_1, payload_2;
+
+ switch (mode) {
+ case XSDI_MODE_SD:
+ payload_1 = PAYLD_LN1_SDPAL;
+ payload_2 = PAYLD_LN2_SDPAL;
+ break;
+ case XSDI_MODE_HD:
+ case XSDI_MODE_3GA:
+ case XSDI_MODE_3GB:
+ case XSDI_MODE_6G:
+ case XSDI_MODE_12G:
+ payload_1 = PAYLD_LN1_HD_3_6_12G;
+ payload_2 = PAYLD_LN2_HD_3_6_12G;
+ break;
+ default:
+ payload_1 = 0;
+ payload_2 = 0;
+ break;
+ }
+
+ xilinx_sdi_set_payload_line(sdi, payload_1, payload_2);
+}
+
+/**
+ * xilinx_set_sdi_mode - Set mode parameters in SDI Tx
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: SDI Tx display mode
+ * @is_frac: 0 - integer 1 - fractional
+ * @mux_ptrn: specifiy the data stream interleaving pattern to be used
+ * This function config the SDI st352 payload parameter.
+ */
+static void xilinx_set_sdi_mode(struct xilinx_sdi *sdi, u32 mode,
+ bool is_frac, u32 mux_ptrn)
+{
+ u32 data;
+
+ xilinx_sdi_payload_config(sdi, mode);
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data &= ~((XSDI_TX_CTRL_MODE_MASK << XSDI_TX_CTRL_MODE_SHIFT) |
+ (XSDI_TX_CTRL_M_MASK) | (XSDI_TX_CTRL_MUX_MASK
+ << XSDI_TX_CTRL_MUX_SHIFT));
+
+ data |= (((mode & XSDI_TX_CTRL_MODE_MASK)
+ << XSDI_TX_CTRL_MODE_SHIFT) |
+ (is_frac << XSDI_TX_CTRL_M_SHIFT) |
+ ((mux_ptrn & XSDI_TX_CTRL_MUX_MASK) << XSDI_TX_CTRL_MUX_SHIFT));
+
+ xilinx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xilinx_sdi_set_config_parameters - Configure SDI Tx registers with parameters
+ * given from user application.
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI structure having drm_property parameters
+ * configured from user application and writes them into SDI IP registers.
+ */
+static void xilinx_sdi_set_config_parameters(struct xilinx_sdi *sdi)
+{
+ u32 mode;
+ int mux_ptrn = INVALID_VALUE;
+ bool is_frac;
+
+ mode = sdi->sdi_mod_prop_val;
+ is_frac = sdi->is_frac_prop_val;
+
+ switch (mode) {
+ case XSDI_MODE_3GA:
+ mux_ptrn = XSDI_TX_MUX_SD_HD_3GA;
+ break;
+ case XSDI_MODE_3GB:
+ mux_ptrn = XSDI_TX_MUX_3GB;
+ break;
+ case XSDI_MODE_6G:
+ if (sdi->sdi_data_strm_prop_val == 4)
+ mux_ptrn = XSDI_TX_MUX_4STREAM_6G;
+ else if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ case XSDI_MODE_12G:
+ if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ default:
+ mux_ptrn = 0;
+ break;
+ }
+ if (mux_ptrn == INVALID_VALUE) {
+ dev_err(sdi->dev, "%d data stream not supported for %d mode",
+ sdi->sdi_data_strm_prop_val, mode);
+ return;
+ }
+ xilinx_set_sdi_mode(sdi, mode, is_frac, mux_ptrn);
+}
+
+/**
+ * xilinx_sdi_connector_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @base_connector: pointer Xilinx SDI connector
+ * @property: pointer to the drm_property structure
+ * @value: SDI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the SDI structure property varabiles with the values.
+ * These values are later used to configure the SDI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int
+xilinx_sdi_connector_set_property(struct drm_connector *base_connector,
+ struct drm_property *property,
+ u64 value)
+{
+ struct xilinx_sdi *sdi = connector_to_sdi(base_connector);
+
+ if (property == sdi->sdi_mode)
+ sdi->sdi_mod_prop_val = (unsigned int)value;
+ else if (property == sdi->sdi_data_strm)
+ sdi->sdi_data_strm_prop_val = (unsigned int)value;
+ else if (property == sdi->is_frac_prop)
+ sdi->is_frac_prop_val = !!value;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * xilinx_sdi_get_mode_id - Search for a video mode in the supported modes table
+ *
+ * @mode: mode being searched
+ *
+ * Return: true if mode is found
+ */
+static int xilinx_sdi_get_mode_id(struct drm_display_mode *mode)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++)
+ if (drm_mode_equal(&xlnx_sdi_modes[i].mode, mode))
+ return i;
+ return -EINVAL;
+}
+
+/**
+ * xilinx_sdi_drm_add_modes - Adds SDI supported modes
+ * @connector: pointer Xilinx SDI connector
+ *
+ * Return: Count of modes added
+ *
+ * This function adds the SDI modes supported and returns its count
+ */
+static int xilinx_sdi_drm_add_modes(struct drm_connector *connector)
+{
+ int i, num_modes = 0;
+ struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++) {
+ const struct drm_display_mode *ptr = &xlnx_sdi_modes[i].mode;
+
+ mode = drm_mode_duplicate(dev, ptr);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ }
+ return num_modes;
+}
+
+static int xilinx_sdi_connector_dpms(struct drm_connector *connector,
+ int mode)
+{
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static enum drm_connector_status
+xilinx_sdi_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void xilinx_sdi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xilinx_sdi_connector_funcs = {
+ .dpms = xilinx_sdi_connector_dpms,
+ .detect = xilinx_sdi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xilinx_sdi_connector_destroy,
+ .set_property = xilinx_sdi_connector_set_property,
+};
+
+static struct drm_encoder *
+xilinx_sdi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_sdi(connector)->encoder);
+}
+
+static int xilinx_sdi_get_modes(struct drm_connector *connector)
+{
+ return xilinx_sdi_drm_add_modes(connector);
+}
+
+static struct drm_connector_helper_funcs xilinx_sdi_connector_helper_funcs = {
+ .get_modes = xilinx_sdi_get_modes,
+ .best_encoder = xilinx_sdi_best_encoder,
+};
+
+/**
+ * xilinx_sdi_drm_connector_create_property - create SDI connector properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ *
+ * This function takes the xilinx SDI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void
+xilinx_sdi_drm_connector_create_property(struct drm_connector *base_connector)
+{
+ struct drm_device *dev = base_connector->dev;
+ struct xilinx_sdi *sdi = connector_to_sdi(base_connector);
+
+ sdi->is_frac_prop = drm_property_create_bool(dev, 1, "is_frac");
+ sdi->sdi_mode = drm_property_create_range(dev, 0,
+ "sdi_mode", 0, 5);
+ sdi->sdi_data_strm = drm_property_create_range(dev, 0,
+ "sdi_data_stream", 2, 8);
+}
+
+/**
+ * xilinx_sdi_drm_connector_attach_property - attach SDI connector
+ * properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ */
+static void
+xilinx_sdi_drm_connector_attach_property(struct drm_connector *base_connector)
+{
+ struct xilinx_sdi *sdi = connector_to_sdi(base_connector);
+ struct drm_mode_object *obj = &base_connector->base;
+
+ if (sdi->sdi_mode)
+ drm_object_attach_property(obj, sdi->sdi_mode, 0);
+
+ if (sdi->sdi_data_strm)
+ drm_object_attach_property(obj, sdi->sdi_data_strm, 0);
+
+ if (sdi->is_frac_prop)
+ drm_object_attach_property(obj, sdi->is_frac_prop, 0);
+}
+
+static int xilinx_sdi_create_connector(struct drm_encoder *encoder)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+ struct drm_connector *connector = &sdi->connector;
+ int ret;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xilinx_sdi_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret) {
+ dev_err(sdi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xilinx_sdi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xilinx_sdi_drm_connector_create_property(connector);
+ xilinx_sdi_drm_connector_attach_property(connector);
+
+ return 0;
+}
+
+static bool xilinx_sdi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/**
+ * xilinx_sdi_set_display_enable - Enables the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xilinx_sdi_set_display_enable(struct xilinx_sdi *sdi)
+{
+ u32 data;
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_CTRL_EN_MASK;
+ /* start sdi stream */
+ xilinx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+static void xilinx_sdi_encoder_dpms(struct drm_encoder *encoder,
+ int mode)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+
+ dev_dbg(sdi->dev, "encoder dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ xilinx_sdi_set_display_enable(sdi);
+ return;
+ default:
+ xilinx_sdi_set_display_disable(sdi);
+ xilinx_sdi_set_default_drm_properties(sdi);
+ }
+}
+
+/**
+ * xilinx_sdi_calc_st352_payld - calculate the st352 payload
+ *
+ * @sdi: pointer to SDI Tx structure
+ * @mode: DRM display mode
+ *
+ * This function calculates the st352 payload to be configured.
+ * Please refer to SMPTE ST352 documents for it.
+ * Return: return st352 payload
+ */
+static u32 xilinx_sdi_calc_st352_payld(struct xilinx_sdi *sdi,
+ struct drm_display_mode *mode)
+{
+ u8 byt1, byt2;
+ u16 is_p;
+ u32 id, sdi_mode = sdi->sdi_mod_prop_val;
+ bool is_frac = sdi->is_frac_prop_val;
+ u32 byt3 = ST352_BYTE3;
+
+ id = xilinx_sdi_get_mode_id(mode);
+ dev_dbg(sdi->dev, "mode id: %d\n", id);
+ if (mode->hdisplay == 2048 || mode->hdisplay == 4096)
+ byt3 |= XST352_2048_SHIFT;
+ /* byte 2 calculation */
+ is_p = !(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ byt2 = xlnx_sdi_modes[id].st352_byt2[is_frac];
+ if ((sdi_mode == XSDI_MODE_3GB) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN) || is_p)
+ byt2 |= XST352_PROG_PIC_MASK;
+ if (is_p && (mode->vtotal >= 1125))
+ byt2 |= XST352_PROG_TRANS_MASK;
+
+ /* byte 1 calculation */
+ byt1 = xlnx_sdi_modes[id].st352_byt1[sdi_mode];
+
+ return (ST352_BYTE4 << 24 | byt3 << 16 | byt2 << 8 | byt1);
+}
+
+/**
+ * xilinx_sdi_mode_set - drive the SDI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @mode: DRM kernel-internal display mode structure
+ * @adjusted_mode: SDI panel timing parameters
+ *
+ * This function derives the SDI IP timing parameters from the timing
+ * values given by VTC driver.
+ */
+static void xilinx_sdi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+ struct videomode vm;
+ u32 payload, i;
+
+ xilinx_sdi_set_config_parameters(sdi);
+
+ /* set st352 payloads */
+ payload = xilinx_sdi_calc_st352_payld(sdi, adjusted_mode);
+ dev_dbg(sdi->dev, "payload : %0x\n", payload);
+
+ for (i = 0; i < sdi->sdi_data_strm_prop_val / 2; i++) {
+ if (sdi->sdi_mod_prop_val == XSDI_MODE_3GB)
+ payload |= (i << 1) << XSDI_CH_SHIFT;
+ xilinx_sdi_set_payload_data(sdi, i, payload);
+ }
+
+ /* UHDSDI is fixed 2 pixels per clock, horizontal timings div by 2 */
+ vm.hactive = adjusted_mode->hdisplay / PIXELS_PER_CLK;
+ vm.hfront_porch = (adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay) / PIXELS_PER_CLK;
+ vm.hback_porch = (adjusted_mode->htotal -
+ adjusted_mode->hsync_end) / PIXELS_PER_CLK;
+ vm.hsync_len = (adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start) / PIXELS_PER_CLK;
+
+ vm.vactive = adjusted_mode->vdisplay;
+ vm.vfront_porch = adjusted_mode->vsync_start -
+ adjusted_mode->vdisplay;
+ vm.vback_porch = adjusted_mode->vtotal -
+ adjusted_mode->vsync_end;
+ vm.vsync_len = adjusted_mode->vsync_end -
+ adjusted_mode->vsync_start;
+ vm.flags = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vm.flags |= DISPLAY_FLAGS_INTERLACED;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ vm.flags |= DISPLAY_FLAGS_HSYNC_LOW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ vm.flags |= DISPLAY_FLAGS_VSYNC_LOW;
+
+ xilinx_vtc_config_sig(sdi->vtc, &vm);
+}
+
+static void xilinx_sdi_prepare(struct drm_encoder *encoder)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+ u32 reg;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+
+ reg = xilinx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ reg |= XSDI_TX_CTRL_INS_CRC_MASK | XSDI_TX_CTRL_INS_ST352_MASK |
+ XSDI_TX_CTRL_OVR_ST352_MASK | XSDI_TX_CTRL_INS_SYNC_BIT_MASK |
+ XSDI_TX_CTRL_INS_EDH_MASK;
+ xilinx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, reg);
+ xilinx_sdi_writel(sdi->base, XSDI_TX_IER_STAT, XSDI_IER_EN_MASK);
+ xilinx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 1);
+ xilinx_vtc_reset(sdi->vtc);
+}
+
+static void xilinx_sdi_commit(struct drm_encoder *encoder)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+ u32 ret;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+ xilinx_sdi_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ ret = wait_event_interruptible_timeout(sdi->wait_event,
+ sdi->event_received,
+ usecs_to_jiffies(GT_TIMEOUT));
+ if (!ret) {
+ dev_err(sdi->dev, "Timeout: GT interrupt not received\n");
+ return;
+ }
+ sdi->event_received = false;
+ /* enable sdi bridge, vtc and Axi4s_vid_out_ctrl */
+ xilinx_en_bridge(sdi);
+ xilinx_vtc_enable(sdi->vtc);
+ xilinx_en_axi4s(sdi);
+}
+
+static const struct drm_encoder_helper_funcs xilinx_sdi_encoder_helper_funcs = {
+ .dpms = xilinx_sdi_encoder_dpms,
+ .mode_fixup = xilinx_sdi_mode_fixup,
+ .mode_set = xilinx_sdi_mode_set,
+ .prepare = xilinx_sdi_prepare,
+ .commit = xilinx_sdi_commit,
+};
+
+static const struct drm_encoder_funcs xilinx_sdi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xilinx_sdi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xilinx_sdi *sdi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &sdi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * SDI tx drivers. DRM framework can support more than one CRTCs and
+ * SDI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+
+ drm_encoder_init(drm_dev, encoder, &xilinx_sdi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ drm_encoder_helper_add(encoder, &xilinx_sdi_encoder_helper_funcs);
+
+ ret = xilinx_sdi_create_connector(encoder);
+ if (ret) {
+ dev_err(sdi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ }
+ return ret;
+}
+
+static void xilinx_sdi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xilinx_sdi *sdi = dev_get_drvdata(dev);
+
+ xilinx_sdi_encoder_dpms(&sdi->encoder, DRM_MODE_DPMS_OFF);
+ drm_encoder_cleanup(&sdi->encoder);
+ drm_connector_cleanup(&sdi->connector);
+}
+
+static const struct component_ops xilinx_sdi_component_ops = {
+ .bind = xilinx_sdi_bind,
+ .unbind = xilinx_sdi_unbind,
+};
+
+static irqreturn_t xilinx_sdi_vblank_handler(int irq, void *data)
+{
+ struct xilinx_sdi *sdi = (struct xilinx_sdi *)data;
+ u32 intr = xilinx_vtc_intr_get(sdi->vtc);
+
+ if (!intr)
+ return IRQ_NONE;
+
+ if (sdi->vblank_fn)
+ sdi->vblank_fn(sdi->vblank_data);
+
+ xilinx_vtc_intr_clear(sdi->vtc, intr);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_drm_sdi_enable_vblank - Enable the vblank handling
+ * @sdi: SDI subsystem
+ * @vblank_fn: callback to be called on vblank event
+ * @vblank_data: data to be used in @vblank_fn
+ *
+ * This function register the vblank handler, and the handler will be triggered
+ * on vblank event after.
+ */
+void xilinx_drm_sdi_enable_vblank(struct xilinx_sdi *sdi,
+ void (*vblank_fn)(void *),
+ void *vblank_data)
+{
+ sdi->vblank_fn = vblank_fn;
+ sdi->vblank_data = vblank_data;
+ xilinx_vtc_vblank_enable(sdi->vtc);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_sdi_enable_vblank);
+
+/**
+ * xilinx_drm_sdi_disable_vblank - Disable the vblank handling
+ * @sdi: SDI subsystem
+ *
+ * Disable the vblank handler. The vblank handler and data are unregistered.
+ */
+void xilinx_drm_sdi_disable_vblank(struct xilinx_sdi *sdi)
+{
+ sdi->vblank_fn = NULL;
+ sdi->vblank_data = NULL;
+ xilinx_vtc_vblank_disable(sdi->vtc);
+}
+
+/**
+ * xilinx_sdi_register_device - Register the SDI subsystem to the global list
+ * @sdi: SDI subsystem
+ *
+ * Register the SDI subsystem instance to the global list
+ */
+static void xilinx_sdi_register_device(struct xilinx_sdi *sdi)
+{
+ mutex_lock(&xilinx_sdi_lock);
+ list_add_tail(&sdi->list, &xilinx_sdi_list);
+ mutex_unlock(&xilinx_sdi_lock);
+}
+
+/**
+ * xilinx_drm_sdi_of_get - Get the SDI subsystem instance
+ * @np: parent device node
+ *
+ * This function searches and returns a SDI subsystem structure for
+ * the parent device node, @np. The SDI subsystem node should be a child node
+ * of @np, with 'xlnx,sdi' property pointing to the SDI device node.
+ * An instance can be shared by multiple users.
+ *
+ * Return: corresponding SDI subsystem structure if found. NULL if
+ * the device node doesn't have 'xlnx,sdi' property, or -EPROBE_DEFER error
+ * pointer if the the SDI subsystem isn't found.
+ */
+struct xilinx_sdi *xilinx_drm_sdi_of_get(struct device_node *np)
+{
+ struct xilinx_sdi *found = NULL;
+ struct xilinx_sdi *sdi;
+ struct device_node *sdi_node;
+
+ if (!of_find_property(np, "xlnx,sdi", NULL))
+ return NULL;
+
+ sdi_node = of_parse_phandle(np, "xlnx,sdi", 0);
+ if (!sdi_node)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&xilinx_sdi_lock);
+ list_for_each_entry(sdi, &xilinx_sdi_list, list) {
+ if (sdi->dev->of_node == sdi_node) {
+ found = sdi;
+ break;
+ }
+ }
+ mutex_unlock(&xilinx_sdi_lock);
+
+ of_node_put(sdi_node);
+ if (!found)
+ return ERR_PTR(-EPROBE_DEFER);
+ return found;
+}
+
+/**
+ * xilinx_sdi_unregister_device - Unregister the SDI subsystem instance
+ * @sdi: SDI subsystem
+ *
+ * Unregister the SDI subsystem instance from the global list
+ */
+static void xilinx_sdi_unregister_device(struct xilinx_sdi *sdi)
+{
+ mutex_lock(&xilinx_sdi_lock);
+ list_del(&sdi->list);
+ mutex_unlock(&xilinx_sdi_lock);
+}
+
+static int xilinx_sdi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_sdi *sdi;
+ struct device_node *vtc_node;
+ int ret, irq;
+
+ sdi = devm_kzalloc(dev, sizeof(*sdi), GFP_KERNEL);
+ if (!sdi)
+ return -ENOMEM;
+
+ sdi->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdi->base = devm_ioremap_resource(dev, res);
+
+ if (IS_ERR(sdi->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(sdi->base);
+ }
+ platform_set_drvdata(pdev, sdi);
+
+ vtc_node = of_parse_phandle(sdi->dev->of_node, "xlnx,vtc", 0);
+ if (!vtc_node) {
+ dev_err(dev, "vtc node not present\n");
+ return PTR_ERR(vtc_node);
+ }
+ sdi->vtc = xilinx_vtc_probe(sdi->dev, vtc_node);
+ of_node_put(vtc_node);
+ if (IS_ERR(sdi->vtc)) {
+ dev_err(dev, "failed to probe VTC\n");
+ return PTR_ERR(sdi->vtc);
+ }
+
+ /* disable interrupt */
+ xilinx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(sdi->dev, irq, NULL,
+ xilinx_sdi_irq_handler, IRQF_ONESHOT,
+ dev_name(sdi->dev), sdi);
+ if (ret < 0)
+ return ret;
+
+ irq = platform_get_irq(pdev, 1); /* vblank interrupt */
+ if (irq < 0)
+ return irq;
+ ret = devm_request_threaded_irq(sdi->dev, irq, NULL,
+ xilinx_sdi_vblank_handler, IRQF_ONESHOT,
+ "sdiTx-vblank", sdi);
+ if (ret < 0)
+ return ret;
+
+ init_waitqueue_head(&sdi->wait_event);
+ sdi->event_received = false;
+
+ xilinx_sdi_register_device(sdi);
+ return component_add(dev, &xilinx_sdi_component_ops);
+}
+
+static int xilinx_sdi_remove(struct platform_device *pdev)
+{
+ struct xilinx_sdi *sdi = platform_get_drvdata(pdev);
+
+ xilinx_sdi_unregister_device(sdi);
+ component_del(&pdev->dev, &xilinx_sdi_component_ops);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_sdi_of_match[] = {
+ { .compatible = "xlnx,v-smpte-uhdsdi-tx-ss"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_sdi_of_match);
+
+static struct platform_driver sdi_tx_driver = {
+ .probe = xilinx_sdi_probe,
+ .remove = xilinx_sdi_remove,
+ .driver = {
+ .name = "xlnx,uhdsdi-tx",
+ .of_match_table = xilinx_sdi_of_match,
+ },
+};
+
+module_platform_driver(sdi_tx_driver);
+
+MODULE_AUTHOR("Saurabh Sengar <saurabhs@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA SDI Tx Driver");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_sdi.h b/drivers/gpu/drm/xilinx/xilinx_drm_sdi.h
new file mode 100644
index 000000000000..b9a773eef094
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_sdi.h
@@ -0,0 +1,29 @@
+/*
+ * SDI subsystem header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_SDI_H_
+#define _XILINX_DRM_SDI_H_
+
+struct xilinx_sdi;
+struct device_node;
+
+struct xilinx_sdi *xilinx_drm_sdi_of_get(struct device_node *np);
+void xilinx_drm_sdi_enable_vblank(struct xilinx_sdi *sdi,
+ void (*vblank_fn)(void *),
+ void *vblank_data);
+void xilinx_drm_sdi_disable_vblank(struct xilinx_sdi *sdi);
+#endif /* _XILINX_DRM_SDI_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_osd.c b/drivers/gpu/drm/xilinx/xilinx_osd.c
new file mode 100644
index 000000000000..b777fbbed5b8
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_osd.c
@@ -0,0 +1,382 @@
+/*
+ * Xilinx OSD support
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include "xilinx_drm_drv.h"
+
+#include "xilinx_osd.h"
+
+/* registers */
+#define OSD_CTL 0x000 /* control */
+#define OSD_SS 0x020 /* screen size */
+#define OSD_ENC 0x028 /* encoding register */
+#define OSD_BC0 0x100 /* background color channel 0 */
+#define OSD_BC1 0x104 /* background color channel 1 */
+#define OSD_BC2 0x108 /* background color channel 2 */
+
+#define OSD_L0C 0x110 /* layer 0 control */
+
+/* register offset of layers */
+#define OSD_LAYER_SIZE 0x10
+#define OSD_LXC 0x00 /* layer control */
+#define OSD_LXP 0x04 /* layer position */
+#define OSD_LXS 0x08 /* layer size */
+
+/*osd control register bit definition */
+#define OSD_CTL_RUE (1 << 1) /* osd reg update enable */
+#define OSD_CTL_EN (1 << 0) /* osd enable */
+
+/* osd screen size register bit definition */
+#define OSD_SS_YSIZE_MASK 0x0fff0000 /* vertical height of OSD output */
+#define OSD_SS_YSIZE_SHIFT 16 /* bit shift of OSD_SS_YSIZE_MASK */
+#define OSD_SS_XSIZE_MASK 0x00000fff /* horizontal width of OSD output */
+
+/* osd vidoe format mask */
+#define OSD_VIDEO_FORMAT_MASK 0x0000000f /* video format */
+
+/* osd background color channel 0 */
+#define OSD_BC0_YG_MASK 0x000000ff /* Y (luma) or Green */
+
+/* osd background color channel 1 */
+#define OSD_BC1_UCBB_MASK 0x000000ff /* U (Cb) or Blue */
+
+/* osd background color channel 2 */
+#define OSD_BC2_VCRR_MASK 0x000000ff /* V(Cr) or Red */
+
+/* maximum number of the layers */
+#define OSD_MAX_NUM_OF_LAYERS 8
+
+/* osd layer control (layer 0 through (OSD_MAX_NUM_OF_LAYERS - 1)) */
+#define OSD_LXC_ALPHA_MASK 0x0fff0000 /* global alpha value */
+#define OSD_LXC_ALPHA_SHIFT 16 /* bit shift of alpha value */
+#define OSD_LXC_PRIORITY_MASK 0x00000700 /* layer priority */
+#define OSD_LXC_PRIORITY_SHIFT 8 /* bit shift of priority */
+#define OSD_LXC_GALPHAEN (1 << 1) /* global alpha enable */
+#define OSD_LXC_EN (1 << 0) /* layer enable */
+
+/* osd layer position (layer 0 through (OSD_MAX_NUM_OF_LAYERS - 1)) */
+#define OSD_LXP_YSTART_MASK 0x0fff0000 /* vert start line */
+#define OSD_LXP_YSTART_SHIFT 16 /* vert start line bit shift */
+#define OSD_LXP_XSTART_MASK 0x00000fff /* horizontal start pixel */
+
+/* osd layer size (layer 0 through (OSD_MAX_NUM_OF_LAYERS - 1)) */
+#define OSD_LXS_YSIZE_MASK 0x0fff0000 /* vert size */
+#define OSD_LXS_YSIZE_SHIFT 16 /* vertical size bit shift */
+#define OSD_LXS_XSIZE_MASK 0x00000fff /* horizontal size of layer */
+
+/* osd software reset */
+#define OSD_RST_RESET (1 << 31)
+
+/**
+ * struct xilinx_osd_layer - Xilinx OSD layer object
+ *
+ * @base: base address
+ * @id: id
+ * @avail: available flag
+ * @osd: osd
+ */
+struct xilinx_osd_layer {
+ void __iomem *base;
+ int id;
+ bool avail;
+ struct xilinx_osd *osd;
+};
+
+/**
+ * struct xilinx_osd - Xilinx OSD object
+ *
+ * @base: base address
+ * @layers: layers
+ * @num_layers: number of layers
+ * @max_width: maximum width
+ * @format: video format
+ */
+struct xilinx_osd {
+ void __iomem *base;
+ struct xilinx_osd_layer *layers[OSD_MAX_NUM_OF_LAYERS];
+ unsigned int num_layers;
+ unsigned int max_width;
+ unsigned int format;
+};
+
+/* osd layer operation */
+/* set layer alpha */
+void xilinx_osd_layer_set_alpha(struct xilinx_osd_layer *layer, u32 alpha)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+ DRM_DEBUG_DRIVER("alpha: 0x%08x\n", alpha);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value &= ~OSD_LXC_ALPHA_MASK;
+ value |= (alpha << OSD_LXC_ALPHA_SHIFT) & OSD_LXC_ALPHA_MASK;
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+void xilinx_osd_layer_enable_alpha(struct xilinx_osd_layer *layer, bool enable)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+ DRM_DEBUG_DRIVER("enable: %d\n", enable);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value = enable ? (value | OSD_LXC_GALPHAEN) :
+ (value & ~OSD_LXC_GALPHAEN);
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+/* set layer priority */
+void xilinx_osd_layer_set_priority(struct xilinx_osd_layer *layer, u32 prio)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+ DRM_DEBUG_DRIVER("prio: %d\n", prio);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value &= ~OSD_LXC_PRIORITY_MASK;
+ value |= (prio << OSD_LXC_PRIORITY_SHIFT) & OSD_LXC_PRIORITY_MASK;
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+/* set layer dimension */
+void xilinx_osd_layer_set_dimension(struct xilinx_osd_layer *layer,
+ u16 xstart, u16 ystart,
+ u16 xsize, u16 ysize)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+ DRM_DEBUG_DRIVER("w: %d(%d), h: %d(%d)\n",
+ xsize, xstart, ysize, ystart);
+
+ value = xstart & OSD_LXP_XSTART_MASK;
+ value |= (ystart << OSD_LXP_YSTART_SHIFT) & OSD_LXP_YSTART_MASK;
+
+ xilinx_drm_writel(layer->base, OSD_LXP, value);
+
+ value = xsize & OSD_LXS_XSIZE_MASK;
+ value |= (ysize << OSD_LXS_YSIZE_SHIFT) & OSD_LXS_YSIZE_MASK;
+
+ xilinx_drm_writel(layer->base, OSD_LXS, value);
+}
+
+/* enable layer */
+void xilinx_osd_layer_enable(struct xilinx_osd_layer *layer)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value |= OSD_LXC_EN;
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+/* disable layer */
+void xilinx_osd_layer_disable(struct xilinx_osd_layer *layer)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value &= ~OSD_LXC_EN;
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+/* get an available layer */
+struct xilinx_osd_layer *xilinx_osd_layer_get(struct xilinx_osd *osd)
+{
+ struct xilinx_osd_layer *layer = NULL;
+ int i;
+
+ for (i = 0; i < osd->num_layers; i++) {
+ if (osd->layers[i]->avail) {
+ layer = osd->layers[i];
+ layer->avail = false;
+ break;
+ }
+ }
+
+ if (!layer)
+ return ERR_PTR(-ENODEV);
+
+ DRM_DEBUG_DRIVER("layer id: %d\n", i);
+
+ return layer;
+}
+
+/* put a layer */
+void xilinx_osd_layer_put(struct xilinx_osd_layer *layer)
+{
+ layer->avail = true;
+}
+
+/* osd operations */
+/* set osd color */
+void xilinx_osd_set_color(struct xilinx_osd *osd, u8 r, u8 g, u8 b)
+{
+ u32 value;
+
+ value = g;
+ xilinx_drm_writel(osd->base, OSD_BC0, value);
+ value = b;
+ xilinx_drm_writel(osd->base, OSD_BC1, value);
+ value = r;
+ xilinx_drm_writel(osd->base, OSD_BC2, value);
+}
+
+/* set osd dimension */
+void xilinx_osd_set_dimension(struct xilinx_osd *osd, u32 width, u32 height)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("w: %d, h: %d\n", width, height);
+
+ value = width | ((height << OSD_SS_YSIZE_SHIFT) & OSD_SS_YSIZE_MASK);
+ xilinx_drm_writel(osd->base, OSD_SS, value);
+}
+
+/* get osd number of layers */
+unsigned int xilinx_osd_get_num_layers(struct xilinx_osd *osd)
+{
+ return osd->num_layers;
+}
+
+/* get osd max width */
+unsigned int xilinx_osd_get_max_width(struct xilinx_osd *osd)
+{
+ return osd->max_width;
+}
+
+/* get osd color format */
+unsigned int xilinx_osd_get_format(struct xilinx_osd *osd)
+{
+ return osd->format;
+}
+
+/* reset osd */
+void xilinx_osd_reset(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL, OSD_RST_RESET);
+}
+
+/* enable osd */
+void xilinx_osd_enable(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL,
+ xilinx_drm_readl(osd->base, OSD_CTL) | OSD_CTL_EN);
+}
+
+/* disable osd */
+void xilinx_osd_disable(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL,
+ xilinx_drm_readl(osd->base, OSD_CTL) & ~OSD_CTL_EN);
+}
+
+/* register-update-enable osd */
+void xilinx_osd_enable_rue(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL,
+ xilinx_drm_readl(osd->base, OSD_CTL) | OSD_CTL_RUE);
+}
+
+/* register-update-enable osd */
+void xilinx_osd_disable_rue(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL,
+ xilinx_drm_readl(osd->base, OSD_CTL) & ~OSD_CTL_RUE);
+}
+
+static const struct of_device_id xilinx_osd_of_match[] = {
+ { .compatible = "xlnx,v-osd-5.01.a" },
+ { /* end of table */ },
+};
+
+struct xilinx_osd *xilinx_osd_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct xilinx_osd *osd;
+ struct xilinx_osd_layer *layer;
+ const struct of_device_id *match;
+ struct resource res;
+ int i;
+ int ret;
+
+ match = of_match_node(xilinx_osd_of_match, node);
+ if (!match) {
+ dev_err(dev, "failed to match the device node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ osd = devm_kzalloc(dev, sizeof(*osd), GFP_KERNEL);
+ if (!osd)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to of_address_to_resource\n");
+ return ERR_PTR(ret);
+ }
+
+ osd->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(osd->base))
+ return ERR_CAST(osd->base);
+
+ ret = of_property_read_u32(node, "xlnx,num-layers", &osd->num_layers);
+ if (ret) {
+ dev_warn(dev, "failed to get num of layers prop\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,screen-width", &osd->max_width);
+ if (ret) {
+ dev_warn(dev, "failed to get screen width prop\n");
+ return ERR_PTR(ret);
+ }
+
+ /* read the video format set by a user */
+ osd->format = xilinx_drm_readl(osd->base, OSD_ENC) &
+ OSD_VIDEO_FORMAT_MASK;
+
+ for (i = 0; i < osd->num_layers; i++) {
+ layer = devm_kzalloc(dev, sizeof(*layer), GFP_KERNEL);
+ if (!layer)
+ return ERR_PTR(-ENOMEM);
+
+ layer->base = osd->base + OSD_L0C + OSD_LAYER_SIZE * i;
+ layer->id = i;
+ layer->osd = osd;
+ layer->avail = true;
+ osd->layers[i] = layer;
+ }
+
+ return osd;
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_osd.h b/drivers/gpu/drm/xilinx/xilinx_osd.h
new file mode 100644
index 000000000000..d84ee9117419
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_osd.h
@@ -0,0 +1,62 @@
+/*
+ * Xilinx OSD Header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_OSD_H_
+#define _XILINX_OSD_H_
+
+/* TODO: use the fixed max alpha value for 8 bit component width for now. */
+#define OSD_MAX_ALPHA 0x100
+
+struct xilinx_osd;
+struct xilinx_osd_layer;
+
+/* osd layer configuration */
+void xilinx_osd_layer_set_alpha(struct xilinx_osd_layer *layer, u32 alpha);
+void xilinx_osd_layer_enable_alpha(struct xilinx_osd_layer *layer, bool enable);
+void xilinx_osd_layer_set_priority(struct xilinx_osd_layer *layer, u32 prio);
+void xilinx_osd_layer_set_dimension(struct xilinx_osd_layer *layer,
+ u16 xstart, u16 ystart,
+ u16 xsize, u16 ysize);
+
+/* osd layer operation */
+void xilinx_osd_layer_enable(struct xilinx_osd_layer *layer);
+void xilinx_osd_layer_disable(struct xilinx_osd_layer *layer);
+struct xilinx_osd_layer *xilinx_osd_layer_get(struct xilinx_osd *osd);
+void xilinx_osd_layer_put(struct xilinx_osd_layer *layer);
+
+/* osd configuration */
+void xilinx_osd_set_color(struct xilinx_osd *osd, u8 r, u8 g, u8 b);
+void xilinx_osd_set_dimension(struct xilinx_osd *osd, u32 width, u32 height);
+
+unsigned int xilinx_osd_get_num_layers(struct xilinx_osd *osd);
+unsigned int xilinx_osd_get_max_width(struct xilinx_osd *osd);
+unsigned int xilinx_osd_get_format(struct xilinx_osd *osd);
+
+/* osd operation */
+void xilinx_osd_reset(struct xilinx_osd *osd);
+void xilinx_osd_enable(struct xilinx_osd *osd);
+void xilinx_osd_disable(struct xilinx_osd *osd);
+void xilinx_osd_enable_rue(struct xilinx_osd *osd);
+void xilinx_osd_disable_rue(struct xilinx_osd *osd);
+
+struct device;
+struct device_node;
+
+struct xilinx_osd *xilinx_osd_probe(struct device *dev,
+ struct device_node *node);
+
+#endif /* _XILINX_OSD_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.c b/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.c
new file mode 100644
index 000000000000..2d3400456cb0
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.c
@@ -0,0 +1,119 @@
+/*
+ * Xilinx rgb to yuv converter support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "xilinx_drm_drv.h"
+
+#include "xilinx_rgb2yuv.h"
+
+/* registers */
+/* control register */
+#define RGB_CONTROL 0x000
+/* active size v,h */
+#define RGB_ACTIVE_SIZE 0x020
+
+/* control register bit definition */
+#define RGB_CTL_EN (1 << 0) /* enable */
+#define RGB_CTL_RUE (1 << 1) /* register update enable */
+#define RGB_RST_RESET (1 << 31) /* instant reset */
+
+struct xilinx_rgb2yuv {
+ void __iomem *base;
+};
+
+/* enable rgb2yuv */
+void xilinx_rgb2yuv_enable(struct xilinx_rgb2yuv *rgb2yuv)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(rgb2yuv->base, RGB_CONTROL);
+ xilinx_drm_writel(rgb2yuv->base, RGB_CONTROL, reg | RGB_CTL_EN);
+}
+
+/* disable rgb2yuv */
+void xilinx_rgb2yuv_disable(struct xilinx_rgb2yuv *rgb2yuv)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(rgb2yuv->base, RGB_CONTROL);
+ xilinx_drm_writel(rgb2yuv->base, RGB_CONTROL, reg & ~RGB_CTL_EN);
+}
+
+/* configure rgb2yuv */
+void xilinx_rgb2yuv_configure(struct xilinx_rgb2yuv *rgb2yuv,
+ int hactive, int vactive)
+{
+ xilinx_drm_writel(rgb2yuv->base, RGB_ACTIVE_SIZE,
+ (vactive << 16) | hactive);
+}
+
+/* reset rgb2yuv */
+void xilinx_rgb2yuv_reset(struct xilinx_rgb2yuv *rgb2yuv)
+{
+ u32 reg;
+
+ xilinx_drm_writel(rgb2yuv->base, RGB_CONTROL, RGB_RST_RESET);
+
+ /* enable register update */
+ reg = xilinx_drm_readl(rgb2yuv->base, RGB_CONTROL);
+ xilinx_drm_writel(rgb2yuv->base, RGB_CONTROL, reg | RGB_CTL_RUE);
+}
+
+static const struct of_device_id xilinx_rgb2yuv_of_match[] = {
+ { .compatible = "xlnx,v-rgb2ycrcb-6.01.a" },
+ { /* end of table */ },
+};
+
+/* probe rgb2yuv */
+struct xilinx_rgb2yuv *xilinx_rgb2yuv_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct xilinx_rgb2yuv *rgb2yuv;
+ const struct of_device_id *match;
+ struct resource res;
+ int ret;
+
+ match = of_match_node(xilinx_rgb2yuv_of_match, node);
+ if (!match) {
+ dev_err(dev, "failed to match the device node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ rgb2yuv = devm_kzalloc(dev, sizeof(*rgb2yuv), GFP_KERNEL);
+ if (!rgb2yuv)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to of_address_to_resource\n");
+ return ERR_PTR(ret);
+ }
+
+ rgb2yuv->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(rgb2yuv->base))
+ return ERR_CAST(rgb2yuv->base);
+
+ xilinx_rgb2yuv_reset(rgb2yuv);
+
+ return rgb2yuv;
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.h b/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.h
new file mode 100644
index 000000000000..d1e544ac336b
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.h
@@ -0,0 +1,35 @@
+/*
+ * Color Space Converter Header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_RGB2YUV_H_
+#define _XILINX_RGB2YUV_H_
+
+struct xilinx_rgb2yuv;
+
+void xilinx_rgb2yuv_configure(struct xilinx_rgb2yuv *rgb2yuv,
+ int hactive, int vactive);
+void xilinx_rgb2yuv_reset(struct xilinx_rgb2yuv *rgb2yuv);
+void xilinx_rgb2yuv_enable(struct xilinx_rgb2yuv *rgb2yuv);
+void xilinx_rgb2yuv_disable(struct xilinx_rgb2yuv *rgb2yuv);
+
+struct device;
+struct device_node;
+
+struct xilinx_rgb2yuv *xilinx_rgb2yuv_probe(struct device *dev,
+ struct device_node *node);
+
+#endif /* _XILINX_RGB2YUV_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_vtc.c b/drivers/gpu/drm/xilinx/xilinx_vtc.c
new file mode 100644
index 000000000000..67a7e4fa14bf
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_vtc.c
@@ -0,0 +1,645 @@
+/*
+ * Video Timing Controller support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include <video/videomode.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_vtc.h"
+
+/* register offsets */
+#define VTC_CTL 0x000 /* control */
+#define VTC_STATS 0x004 /* status */
+#define VTC_ERROR 0x008 /* error */
+
+#define VTC_GASIZE 0x060 /* generator active size */
+#define VTC_GENC 0x068 /* generator encoding */
+#define VTC_GPOL 0x06c /* generator polarity */
+#define VTC_GHSIZE 0x070 /* generator frame horizontal size */
+#define VTC_GVSIZE 0x074 /* generator frame vertical size */
+#define VTC_GHSYNC 0x078 /* generator horizontal sync */
+#define VTC_GVBHOFF_F0 0x07c /* generator Field 0 vblank horizontal offset */
+#define VTC_GVSYNC_F0 0x080 /* generator Field 0 vertical sync */
+#define VTC_GVSHOFF_F0 0x084 /* generator Field 0 vsync horizontal offset */
+#define VTC_GVBHOFF_F1 0x088 /* generator Field 1 vblank horizontal offset */
+#define VTC_GVSYNC_F1 0x08C /* generator Field 1 vertical sync */
+#define VTC_GVSHOFF_F1 0x090 /* generator Field 1 vsync horizontal offset */
+
+#define VTC_RESET 0x000 /* reset register */
+#define VTC_ISR 0x004 /* interrupt status register */
+#define VTC_IER 0x00c /* interrupt enable register */
+
+/* control register bit */
+#define VTC_CTL_FIP (1 << 6) /* field id output polarity */
+#define VTC_CTL_ACP (1 << 5) /* active chroma output polarity */
+#define VTC_CTL_AVP (1 << 4) /* active video output polarity */
+#define VTC_CTL_HSP (1 << 3) /* hori sync output polarity */
+#define VTC_CTL_VSP (1 << 2) /* vert sync output polarity */
+#define VTC_CTL_HBP (1 << 1) /* hori blank output polarity */
+#define VTC_CTL_VBP (1 << 0) /* vert blank output polarity */
+
+#define VTC_CTL_FIPSS (1 << 26) /* field id output polarity source */
+#define VTC_CTL_ACPSS (1 << 25) /* active chroma out polarity source */
+#define VTC_CTL_AVPSS (1 << 24) /* active video out polarity source */
+#define VTC_CTL_HSPSS (1 << 23) /* hori sync out polarity source */
+#define VTC_CTL_VSPSS (1 << 22) /* vert sync out polarity source */
+#define VTC_CTL_HBPSS (1 << 21) /* hori blank out polarity source */
+#define VTC_CTL_VBPSS (1 << 20) /* vert blank out polarity source */
+
+#define VTC_CTL_VCSS (1 << 18) /* chroma source select */
+#define VTC_CTL_VASS (1 << 17) /* vertical offset source select */
+#define VTC_CTL_VBSS (1 << 16) /* vertical sync end source select */
+#define VTC_CTL_VSSS (1 << 15) /* vertical sync start source select */
+#define VTC_CTL_VFSS (1 << 14) /* vertical active size source select */
+#define VTC_CTL_VTSS (1 << 13) /* vertical frame size source select */
+
+#define VTC_CTL_HBSS (1 << 11) /* horiz sync end source select */
+#define VTC_CTL_HSSS (1 << 10) /* horiz sync start source select */
+#define VTC_CTL_HFSS (1 << 9) /* horiz active size source select */
+#define VTC_CTL_HTSS (1 << 8) /* horiz frame size source select */
+
+#define VTC_CTL_GE (1 << 2) /* vtc generator enable */
+#define VTC_CTL_RU (1 << 1) /* vtc register update */
+
+/* vtc generator horizontal 1 */
+#define VTC_GH1_BPSTART_MASK 0x1fff0000 /* horiz back porch start */
+#define VTC_GH1_BPSTART_SHIFT 16
+#define VTC_GH1_SYNCSTART_MASK 0x00001fff
+
+/* vtc generator vertical 1 (filed 0) */
+#define VTC_GV1_BPSTART_MASK 0x1fff0000 /* vertical back porch start */
+#define VTC_GV1_BPSTART_SHIFT 16
+#define VTC_GV1_SYNCSTART_MASK 0x00001fff
+
+/* vtc generator/detector vblank/vsync horizontal offset registers */
+#define VTC_XVXHOX_HEND_MASK 0x1fff0000 /* horiz offset end */
+#define VTC_XVXHOX_HEND_SHIFT 16 /* horiz offset end shift */
+#define VTC_XVXHOX_HSTART_MASK 0x00001fff /* horiz offset start */
+
+/* reset register bit definition */
+#define VTC_RESET_RESET (1 << 31) /* Software Reset */
+
+/* interrupt status/enable register bit definition */
+#define VTC_IXR_FSYNC15 (1 << 31) /* frame sync interrupt 15 */
+#define VTC_IXR_FSYNC14 (1 << 30) /* frame sync interrupt 14 */
+#define VTC_IXR_FSYNC13 (1 << 29) /* frame sync interrupt 13 */
+#define VTC_IXR_FSYNC12 (1 << 28) /* frame sync interrupt 12 */
+#define VTC_IXR_FSYNC11 (1 << 27) /* frame sync interrupt 11 */
+#define VTC_IXR_FSYNC10 (1 << 26) /* frame sync interrupt 10 */
+#define VTC_IXR_FSYNC09 (1 << 25) /* frame sync interrupt 09 */
+#define VTC_IXR_FSYNC08 (1 << 24) /* frame sync interrupt 08 */
+#define VTC_IXR_FSYNC07 (1 << 23) /* frame sync interrupt 07 */
+#define VTC_IXR_FSYNC06 (1 << 22) /* frame sync interrupt 06 */
+#define VTC_IXR_FSYNC05 (1 << 21) /* frame sync interrupt 05 */
+#define VTC_IXR_FSYNC04 (1 << 20) /* frame sync interrupt 04 */
+#define VTC_IXR_FSYNC03 (1 << 19) /* frame sync interrupt 03 */
+#define VTC_IXR_FSYNC02 (1 << 18) /* frame sync interrupt 02 */
+#define VTC_IXR_FSYNC01 (1 << 17) /* frame sync interrupt 01 */
+#define VTC_IXR_FSYNC00 (1 << 16) /* frame sync interrupt 00 */
+#define VTC_IXR_FSYNCALL_MASK (VTC_IXR_FSYNC00 | \
+ VTC_IXR_FSYNC01 | \
+ VTC_IXR_FSYNC02 | \
+ VTC_IXR_FSYNC03 | \
+ VTC_IXR_FSYNC04 | \
+ VTC_IXR_FSYNC05 | \
+ VTC_IXR_FSYNC06 | \
+ VTC_IXR_FSYNC07 | \
+ VTC_IXR_FSYNC08 | \
+ VTC_IXR_FSYNC09 | \
+ VTC_IXR_FSYNC10 | \
+ VTC_IXR_FSYNC11 | \
+ VTC_IXR_FSYNC12 | \
+ VTC_IXR_FSYNC13 | \
+ VTC_IXR_FSYNC14 | \
+ VTC_IXR_FSYNC15)
+
+#define VTC_IXR_G_AV (1 << 13) /* generator actv video intr */
+#define VTC_IXR_G_VBLANK (1 << 12) /* generator vblank interrupt */
+#define VTC_IXR_G_ALL_MASK (VTC_IXR_G_AV | \
+ VTC_IXR_G_VBLANK) /* all generator intr */
+
+#define VTC_IXR_D_AV (1 << 11) /* detector active video intr */
+#define VTC_IXR_D_VBLANK (1 << 10) /* detector vblank interrupt */
+#define VTC_IXR_D_ALL_MASK (VTC_IXR_D_AV | \
+ VTC_IXR_D_VBLANK) /* all detector intr */
+
+#define VTC_IXR_LOL (1 << 9) /* lock loss */
+#define VTC_IXR_LO (1 << 8) /* lock */
+#define VTC_IXR_LOCKALL_MASK (VTC_IXR_LOL | \
+ VTC_IXR_LO) /* all signal lock intr */
+
+#define VTC_IXR_ACL (1 << 21) /* active chroma signal lock */
+#define VTC_IXR_AVL (1 << 20) /* active video signal lock */
+#define VTC_IXR_HSL (1 << 19) /* horizontal sync signal lock */
+#define VTC_IXR_VSL (1 << 18) /* vertical sync signal lock */
+#define VTC_IXR_HBL (1 << 17) /* horizontal blank signal lock */
+#define VTC_IXR_VBL (1 << 16) /* vertical blank signal lock */
+
+#define VTC_GENC_INTERL BIT(6) /* Interlaced bit in VTC_GENC */
+/* mask for all interrupts */
+#define VTC_IXR_ALLINTR_MASK (VTC_IXR_FSYNCALL_MASK | \
+ VTC_IXR_G_ALL_MASK | \
+ VTC_IXR_D_ALL_MASK | \
+ VTC_IXR_LOCKALL_MASK)
+/**
+ * struct xilinx_vtc - Xilinx VTC object
+ *
+ * @base: base addr
+ * @irq: irq
+ * @vblank_fn: vblank handler func
+ * @vblank_data: vblank handler private data
+ */
+struct xilinx_vtc {
+ void __iomem *base;
+ int irq;
+ void (*vblank_fn)(void *);
+ void *vblank_data;
+};
+
+/**
+ * struct xilinx_vtc_polarity - vtc polarity config
+ *
+ * @active_chroma: active chroma polarity
+ * @active_video: active video polarity
+ * @field_id: field ID polarity
+ * @vblank: vblank polarity
+ * @vsync: vsync polarity
+ * @hblank: hblank polarity
+ * @hsync: hsync polarity
+ */
+struct xilinx_vtc_polarity {
+ u8 active_chroma;
+ u8 active_video;
+ u8 field_id;
+ u8 vblank;
+ u8 vsync;
+ u8 hblank;
+ u8 hsync;
+};
+
+/**
+ * struct xilinx_vtc_hori_offset - vtc horizontal offset config
+ *
+ * @v0blank_hori_start: vblank horizontal start (field 0)
+ * @v0blank_hori_end: vblank horizontal end (field 0)
+ * @v0sync_hori_start: vsync horizontal start (field 0)
+ * @v0sync_hori_end: vsync horizontal end (field 0)
+ * @v1blank_hori_start: vblank horizontal start (field 1)
+ * @v1blank_hori_end: vblank horizontal end (field 1)
+ * @v1sync_hori_start: vsync horizontal start (field 1)
+ * @v1sync_hori_end: vsync horizontal end (field 1)
+ */
+struct xilinx_vtc_hori_offset {
+ u16 v0blank_hori_start;
+ u16 v0blank_hori_end;
+ u16 v0sync_hori_start;
+ u16 v0sync_hori_end;
+ u16 v1blank_hori_start;
+ u16 v1blank_hori_end;
+ u16 v1sync_hori_start;
+ u16 v1sync_hori_end;
+};
+
+/**
+ * struct xilinx_vtc_src_config - vtc source config
+ *
+ * @field_id_pol: filed id polarity source
+ * @active_chroma_pol: active chroma polarity source
+ * @active_video_pol: active video polarity source
+ * @hsync_pol: hsync polarity source
+ * @vsync_pol: vsync polarity source
+ * @hblank_pol: hblnak polarity source
+ * @vblank_pol: vblank polarity source
+ * @vchroma: vchroma polarity start source
+ * @vactive: vactive size source
+ * @vbackporch: vbackporch start source
+ * @vsync: vsync start source
+ * @vfrontporch: vfrontporch start source
+ * @vtotal: vtotal size source
+ * @hactive: hactive start source
+ * @hbackporch: hbackporch start source
+ * @hsync: hsync start source
+ * @hfrontporch: hfrontporch start source
+ * @htotal: htotal size source
+ */
+struct xilinx_vtc_src_config {
+ u8 field_id_pol;
+ u8 active_chroma_pol;
+ u8 active_video_pol;
+ u8 hsync_pol;
+ u8 vsync_pol;
+ u8 hblank_pol;
+ u8 vblank_pol;
+
+ u8 vchroma;
+ u8 vactive;
+ u8 vbackporch;
+ u8 vsync;
+ u8 vfrontporch;
+ u8 vtotal;
+
+ u8 hactive;
+ u8 hbackporch;
+ u8 hsync;
+ u8 hfrontporch;
+ u8 htotal;
+};
+
+/* configure polarity of signals */
+static void xilinx_vtc_config_polarity(struct xilinx_vtc *vtc,
+ struct xilinx_vtc_polarity *polarity)
+{
+ u32 reg = 0;
+
+ if (polarity->active_chroma)
+ reg |= VTC_CTL_ACP;
+ if (polarity->active_video)
+ reg |= VTC_CTL_AVP;
+ if (polarity->field_id)
+ reg |= VTC_CTL_FIP;
+ if (polarity->vblank)
+ reg |= VTC_CTL_VBP;
+ if (polarity->vsync)
+ reg |= VTC_CTL_VSP;
+ if (polarity->hblank)
+ reg |= VTC_CTL_HBP;
+ if (polarity->hsync)
+ reg |= VTC_CTL_HSP;
+
+ xilinx_drm_writel(vtc->base, VTC_GPOL, reg);
+}
+
+/* configure horizontal offset */
+static void
+xilinx_vtc_config_hori_offset(struct xilinx_vtc *vtc,
+ struct xilinx_vtc_hori_offset *hori_offset)
+{
+ u32 reg;
+
+ /* Calculate and update Generator VBlank Hori field 0 */
+ reg = hori_offset->v0blank_hori_start & VTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_offset->v0blank_hori_end << VTC_XVXHOX_HEND_SHIFT) &
+ VTC_XVXHOX_HEND_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVBHOFF_F0, reg);
+
+ /* Calculate and update Generator VSync Hori field 0 */
+ reg = hori_offset->v0sync_hori_start & VTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_offset->v0sync_hori_end << VTC_XVXHOX_HEND_SHIFT) &
+ VTC_XVXHOX_HEND_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVSHOFF_F0, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ reg = hori_offset->v1blank_hori_start & VTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_offset->v1blank_hori_end << VTC_XVXHOX_HEND_SHIFT) &
+ VTC_XVXHOX_HEND_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVBHOFF_F1, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ reg = hori_offset->v1sync_hori_start & VTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_offset->v1sync_hori_end << VTC_XVXHOX_HEND_SHIFT) &
+ VTC_XVXHOX_HEND_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVSHOFF_F1, reg);
+
+}
+
+/* configure source */
+static void xilinx_vtc_config_src(struct xilinx_vtc *vtc,
+ struct xilinx_vtc_src_config *src_config)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+
+ if (src_config->field_id_pol)
+ reg |= VTC_CTL_FIPSS;
+ if (src_config->active_chroma_pol)
+ reg |= VTC_CTL_ACPSS;
+ if (src_config->active_video_pol)
+ reg |= VTC_CTL_AVPSS;
+ if (src_config->hsync_pol)
+ reg |= VTC_CTL_HSPSS;
+ if (src_config->vsync_pol)
+ reg |= VTC_CTL_VSPSS;
+ if (src_config->hblank_pol)
+ reg |= VTC_CTL_HBPSS;
+ if (src_config->vblank_pol)
+ reg |= VTC_CTL_VBPSS;
+
+ if (src_config->vchroma)
+ reg |= VTC_CTL_VCSS;
+ if (src_config->vactive)
+ reg |= VTC_CTL_VASS;
+ if (src_config->vbackporch)
+ reg |= VTC_CTL_VBSS;
+ if (src_config->vsync)
+ reg |= VTC_CTL_VSSS;
+ if (src_config->vfrontporch)
+ reg |= VTC_CTL_VFSS;
+ if (src_config->vtotal)
+ reg |= VTC_CTL_VTSS;
+
+ if (src_config->hbackporch)
+ reg |= VTC_CTL_HBSS;
+ if (src_config->hsync)
+ reg |= VTC_CTL_HSSS;
+ if (src_config->hfrontporch)
+ reg |= VTC_CTL_HFSS;
+ if (src_config->htotal)
+ reg |= VTC_CTL_HTSS;
+
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg);
+}
+
+/* enable vtc */
+void xilinx_vtc_enable(struct xilinx_vtc *vtc)
+{
+ u32 reg;
+
+ /* enable a generator only for now */
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg | VTC_CTL_GE);
+}
+
+/* disable vtc */
+void xilinx_vtc_disable(struct xilinx_vtc *vtc)
+{
+ u32 reg;
+
+ /* disable a generator only for now */
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg & ~VTC_CTL_GE);
+}
+
+/* configure vtc signals */
+void xilinx_vtc_config_sig(struct xilinx_vtc *vtc,
+ struct videomode *vm)
+{
+ u32 reg;
+ u32 htotal, hactive, hsync_start, hbackporch_start;
+ u32 vtotal, vactive, vsync_start, vbackporch_start;
+ struct xilinx_vtc_hori_offset hori_offset;
+ struct xilinx_vtc_polarity polarity;
+ struct xilinx_vtc_src_config src;
+
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg & ~VTC_CTL_RU);
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
+
+ hactive = vm->hactive;
+ vactive = vm->vactive;
+
+ hsync_start = vm->hactive + vm->hfront_porch;
+ vsync_start = vm->vactive + vm->vfront_porch;
+
+ hbackporch_start = hsync_start + vm->hsync_len;
+ vbackporch_start = vsync_start + vm->vsync_len;
+
+ reg = htotal & 0x1fff;
+ xilinx_drm_writel(vtc->base, VTC_GHSIZE, reg);
+
+ reg = vtotal & 0x1fff;
+ reg |= reg << VTC_GV1_BPSTART_SHIFT;
+ xilinx_drm_writel(vtc->base, VTC_GVSIZE, reg);
+
+ DRM_DEBUG_DRIVER("ht: %d, vt: %d\n", htotal, vtotal);
+
+ reg = hactive & 0x1fff;
+ reg |= (vactive & 0x1fff) << 16;
+ xilinx_drm_writel(vtc->base, VTC_GASIZE, reg);
+
+ DRM_DEBUG_DRIVER("ha: %d, va: %d\n", hactive, vactive);
+
+ reg = hsync_start & VTC_GH1_SYNCSTART_MASK;
+ reg |= (hbackporch_start << VTC_GH1_BPSTART_SHIFT) &
+ VTC_GH1_BPSTART_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GHSYNC, reg);
+
+ DRM_DEBUG_DRIVER("hs: %d, hb: %d\n", hsync_start, hbackporch_start);
+
+ reg = vsync_start & VTC_GV1_SYNCSTART_MASK;
+ reg |= (vbackporch_start << VTC_GV1_BPSTART_SHIFT) &
+ VTC_GV1_BPSTART_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVSYNC_F0, reg);
+ DRM_DEBUG_DRIVER("vs: %d, vb: %d\n", vsync_start, vbackporch_start);
+
+ hori_offset.v0blank_hori_start = hactive;
+ hori_offset.v0blank_hori_end = hactive;
+ hori_offset.v0sync_hori_start = hsync_start;
+ hori_offset.v0sync_hori_end = hsync_start;
+
+ hori_offset.v1blank_hori_start = hactive;
+ hori_offset.v1blank_hori_end = hactive;
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ hori_offset.v1sync_hori_start = hsync_start - (htotal / 2);
+ hori_offset.v1sync_hori_end = hsync_start - (htotal / 2);
+ xilinx_drm_writel(vtc->base, VTC_GVSYNC_F1, reg);
+ reg = xilinx_drm_readl(vtc->base, VTC_GENC) | VTC_GENC_INTERL;
+ xilinx_drm_writel(vtc->base, VTC_GENC, reg);
+ } else {
+ hori_offset.v1sync_hori_start = hsync_start;
+ hori_offset.v1sync_hori_end = hsync_start;
+ reg = xilinx_drm_readl(vtc->base, VTC_GENC) & ~VTC_GENC_INTERL;
+ xilinx_drm_writel(vtc->base, VTC_GENC, reg);
+ }
+
+ xilinx_vtc_config_hori_offset(vtc, &hori_offset);
+ /* set up polarity */
+ memset(&polarity, 0x0, sizeof(polarity));
+ polarity.hsync = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vsync = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.hblank = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vblank = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.active_video = 1;
+ polarity.active_chroma = 1;
+ polarity.field_id = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
+ xilinx_vtc_config_polarity(vtc, &polarity);
+
+ /* set up src config */
+ memset(&src, 0x0, sizeof(src));
+ src.vchroma = 1;
+ src.vactive = 1;
+ src.vbackporch = 1;
+ src.vsync = 1;
+ src.vfrontporch = 1;
+ src.vtotal = 1;
+ src.hactive = 1;
+ src.hbackporch = 1;
+ src.hsync = 1;
+ src.hfrontporch = 1;
+ src.htotal = 1;
+ xilinx_vtc_config_src(vtc, &src);
+
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg | VTC_CTL_RU);
+}
+
+/* reset vtc */
+void xilinx_vtc_reset(struct xilinx_vtc *vtc)
+{
+ u32 reg;
+
+ xilinx_drm_writel(vtc->base, VTC_RESET, VTC_RESET_RESET);
+
+ /* enable register update */
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg | VTC_CTL_RU);
+}
+
+/* enable vblank interrupt */
+void xilinx_vtc_vblank_enable(struct xilinx_vtc *vtc)
+{
+ xilinx_drm_writel(vtc->base, VTC_IER, VTC_IXR_G_VBLANK |
+ xilinx_drm_readl(vtc->base, VTC_IER));
+}
+EXPORT_SYMBOL_GPL(xilinx_vtc_vblank_enable);
+
+/* enable interrupt */
+static inline void xilinx_vtc_intr_enable(struct xilinx_vtc *vtc, u32 intr)
+{
+ xilinx_drm_writel(vtc->base, VTC_IER, (intr & VTC_IXR_ALLINTR_MASK) |
+ xilinx_drm_readl(vtc->base, VTC_IER));
+}
+
+/* disable interrupt */
+static inline void xilinx_vtc_intr_disable(struct xilinx_vtc *vtc, u32 intr)
+{
+ xilinx_drm_writel(vtc->base, VTC_IER, ~(intr & VTC_IXR_ALLINTR_MASK) &
+ xilinx_drm_readl(vtc->base, VTC_IER));
+}
+
+/* disable vblank interrupt */
+void xilinx_vtc_vblank_disable(struct xilinx_vtc *vtc)
+{
+ xilinx_drm_writel(vtc->base, VTC_IER, ~(VTC_IXR_G_VBLANK) &
+ xilinx_drm_readl(vtc->base, VTC_IER));
+}
+EXPORT_SYMBOL_GPL(xilinx_vtc_vblank_disable);
+
+/* get interrupt */
+u32 xilinx_vtc_intr_get(struct xilinx_vtc *vtc)
+{
+ return xilinx_drm_readl(vtc->base, VTC_IER) &
+ xilinx_drm_readl(vtc->base, VTC_ISR) & VTC_IXR_ALLINTR_MASK;
+}
+EXPORT_SYMBOL_GPL(xilinx_vtc_intr_get);
+
+/* clear interrupt */
+void xilinx_vtc_intr_clear(struct xilinx_vtc *vtc, u32 intr)
+{
+ xilinx_drm_writel(vtc->base, VTC_ISR, intr & VTC_IXR_ALLINTR_MASK);
+}
+EXPORT_SYMBOL_GPL(xilinx_vtc_intr_clear);
+
+/* interrupt handler */
+static irqreturn_t xilinx_vtc_intr_handler(int irq, void *data)
+{
+ struct xilinx_vtc *vtc = data;
+
+ u32 intr = xilinx_vtc_intr_get(vtc);
+
+ if (!intr)
+ return IRQ_NONE;
+
+ if ((intr & VTC_IXR_G_VBLANK) && (vtc->vblank_fn))
+ vtc->vblank_fn(vtc->vblank_data);
+
+ xilinx_vtc_intr_clear(vtc, intr);
+
+ return IRQ_HANDLED;
+}
+
+/* enable vblank interrupt */
+void xilinx_vtc_enable_vblank_intr(struct xilinx_vtc *vtc,
+ void (*vblank_fn)(void *),
+ void *vblank_priv)
+{
+ vtc->vblank_fn = vblank_fn;
+ vtc->vblank_data = vblank_priv;
+ xilinx_vtc_intr_enable(vtc, VTC_IXR_G_VBLANK);
+}
+
+/* disable vblank interrupt */
+void xilinx_vtc_disable_vblank_intr(struct xilinx_vtc *vtc)
+{
+ xilinx_vtc_intr_disable(vtc, VTC_IXR_G_VBLANK);
+ vtc->vblank_data = NULL;
+ vtc->vblank_fn = NULL;
+}
+
+static const struct of_device_id xilinx_vtc_of_match[] = {
+ { .compatible = "xlnx,v-tc-5.01.a" },
+ { /* end of table */ },
+};
+
+/* probe vtc */
+struct xilinx_vtc *xilinx_vtc_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct xilinx_vtc *vtc;
+ const struct of_device_id *match;
+ struct resource res;
+ int ret;
+
+ match = of_match_node(xilinx_vtc_of_match, node);
+ if (!match) {
+ dev_err(dev, "failed to match the device node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ vtc = devm_kzalloc(dev, sizeof(*vtc), GFP_KERNEL);
+ if (!vtc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to of_address_to_resource\n");
+ return ERR_PTR(ret);
+ }
+
+ vtc->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(vtc->base))
+ return ERR_CAST(vtc->base);
+
+ xilinx_vtc_intr_disable(vtc, VTC_IXR_ALLINTR_MASK);
+ vtc->irq = irq_of_parse_and_map(node, 0);
+ if (vtc->irq > 0) {
+ ret = devm_request_irq(dev, vtc->irq, xilinx_vtc_intr_handler,
+ IRQF_SHARED, "xilinx_vtc", vtc);
+ if (ret) {
+ dev_warn(dev, "failed to requet_irq() for vtc\n");
+ return ERR_PTR(ret);
+ }
+ }
+
+ xilinx_vtc_reset(vtc);
+
+ return vtc;
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_vtc.h b/drivers/gpu/drm/xilinx/xilinx_vtc.h
new file mode 100644
index 000000000000..33b4eb43513d
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_vtc.h
@@ -0,0 +1,44 @@
+/*
+ * Video Timing Controller Header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_VTC_H_
+#define _XILINX_VTC_H_
+
+struct xilinx_vtc;
+
+struct videomode;
+
+void xilinx_vtc_config_sig(struct xilinx_vtc *vtc,
+ struct videomode *vm);
+void xilinx_vtc_enable_vblank_intr(struct xilinx_vtc *vtc,
+ void (*fn)(void *), void *data);
+void xilinx_vtc_disable_vblank_intr(struct xilinx_vtc *vtc);
+void xilinx_vtc_reset(struct xilinx_vtc *vtc);
+void xilinx_vtc_enable(struct xilinx_vtc *vtc);
+void xilinx_vtc_disable(struct xilinx_vtc *vtc);
+
+struct device;
+struct device_node;
+
+struct xilinx_vtc *xilinx_vtc_probe(struct device *dev,
+ struct device_node *node);
+void xilinx_vtc_vblank_enable(struct xilinx_vtc *vtc);
+void xilinx_vtc_vblank_disable(struct xilinx_vtc *vtc);
+u32 xilinx_vtc_intr_get(struct xilinx_vtc *vtc);
+void xilinx_vtc_intr_clear(struct xilinx_vtc *vtc, u32 intr);
+
+#endif /* _XILINX_VTC_H_ */
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
new file mode 100644
index 000000000000..c7b695e83f2f
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -0,0 +1,104 @@
+config DRM_XLNX
+ tristate "Xilinx DRM KMS Driver"
+ depends on DRM && OF
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ Xilinx DRM KMS driver. Choose this option if you have
+ a Xilinx SoCs with hardened display pipeline or soft
+ display pipeline using Xilinx IPs in FPGA. This module
+ provides the kernel mode setting functionalities
+ for Xilinx display drivers.
+
+config DRM_XLNX_BRIDGE
+ tristate "Xilinx DRM KMS bridge"
+ depends on DRM_XLNX
+ help
+ Xilinx DRM KMS bridge. This module provides some interfaces
+ to enable inter-module communication. Choose this option
+ from the provider driver when the Xilinx bridge interface is
+ needed.
+
+config DRM_XLNX_BRIDGE_DEBUG_FS
+ bool "Xilinx DRM KMS bridge debugfs"
+ depends on DEBUG_FS && DRM_XLNX_BRIDGE
+ help
+ Enable the debugfs code for Xilinx bridge. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config DRM_ZYNQMP_DPSUB
+ tristate "ZynqMP DP Subsystem Driver"
+ depends on ARCH_ZYNQMP && OF && DRM_XLNX && COMMON_CLK
+ select XILINX_DPDMA
+ select PHY_XILINX_ZYNQMP
+ help
+ DRM KMS driver for ZynqMP DP Subsystem controller. Choose
+ this option if you have a Xilinx ZynqMP SoC with DisplayPort
+ subsystem. The driver provides the kernel mode setting
+ functionlaities for ZynqMP DP subsystem.
+
+config DRM_XLNX_DSI
+ tristate "Xilinx DRM DSI Subsystem Driver"
+ depends on DRM_XLNX
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ select BACKLIGHT_LCD_SUPPORT
+ select BACKLIGHT_CLASS_DEVICE
+ select DRM_PANEL_SIMPLE
+ help
+ DRM driver for Xilinx MIPI-DSI.
+
+config DRM_XLNX_MIXER
+ tristate "Xilinx DRM Mixer Driver"
+ depends on DRM_XLNX
+ select VIDEOMODE_HELPERS
+ help
+ DRM driver for Xilinx Mixer driver
+
+config DRM_XLNX_PL_DISP
+ tristate "Xilinx DRM PL display driver"
+ depends on DRM_XLNX
+ select VIDEOMODE_HELPERS
+ help
+ DRM driver for Xilinx PL display driver, provides drm
+ crtc and plane object to display pipeline. You need to
+ choose this option if your display pipeline needs one
+ crtc and plane object with single DMA connected.
+
+config DRM_XLNX_SDI
+ tristate "Xilinx DRM SDI Subsystem Driver"
+ depends on DRM_XLNX
+ help
+ DRM driver for Xilinx SDI Tx Subsystem.
+
+config DRM_XLNX_BRIDGE_CSC
+ tristate "Xilinx DRM CSC Driver"
+ depends on DRM_XLNX_BRIDGE
+ help
+ DRM brige driver for color space converter of VPSS. Choose
+ this option if color space converter is connected to an encoder.
+ The driver provides set/get resolution and color format
+ functionality through bridge layer.
+
+config DRM_XLNX_BRIDGE_SCALER
+ tristate "Xilinx DRM Scaler Driver"
+ depends on DRM_XLNX_BRIDGE
+ help
+ DRM brige driver for scaler of VPSS. Choose this option
+ if scaler is connected to an encoder. The driver provides
+ upscaling, down scaling and no scaling functionality through
+ bridge layer.
+
+config DRM_XLNX_BRIDGE_VTC
+ tristate "Xilinx DRM VTC Driver"
+ depends on DRM_XLNX_BRIDGE
+ help
+ DRM brige driver for Xilinx Video Timing Controller. Choose
+ this option to make VTC a part of the CRTC in display pipeline.
+ Currently the support is added to the Xilinx Video Mixer and
+ Xilinx PL display CRTC drivers. This driver provides ability
+ to generate timings through the bridge layer.
diff --git a/drivers/gpu/drm/xlnx/Makefile b/drivers/gpu/drm/xlnx/Makefile
new file mode 100644
index 000000000000..1d80be7d3e70
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/Makefile
@@ -0,0 +1,21 @@
+xlnx_drm-objs += xlnx_crtc.o xlnx_drv.o xlnx_fb.o xlnx_gem.o
+xlnx_drm-$(CONFIG_DRM_XLNX_BRIDGE) += xlnx_bridge.o
+obj-$(CONFIG_DRM_XLNX) += xlnx_drm.o
+
+obj-$(CONFIG_DRM_XLNX_BRIDGE_CSC) += xlnx_csc.o
+
+obj-$(CONFIG_DRM_XLNX_BRIDGE_SCALER) += xlnx_scaler.o
+
+obj-$(CONFIG_DRM_XLNX_BRIDGE_VTC) += xlnx_vtc.o
+
+obj-$(CONFIG_DRM_XLNX_DSI) += xlnx_dsi.o
+
+obj-$(CONFIG_DRM_XLNX_MIXER) += xlnx_mixer.o
+
+obj-$(CONFIG_DRM_XLNX_PL_DISP) += xlnx_pl_disp.o
+
+xlnx-sdi-objs += xlnx_sdi.o xlnx_sdi_timing.o
+obj-$(CONFIG_DRM_XLNX_SDI) += xlnx-sdi.o
+
+zynqmp-dpsub-objs += zynqmp_disp.o zynqmp_dpsub.o zynqmp_dp.o
+obj-$(CONFIG_DRM_ZYNQMP_DPSUB) += zynqmp-dpsub.o
diff --git a/drivers/gpu/drm/xlnx/xlnx_bridge.c b/drivers/gpu/drm/xlnx/xlnx_bridge.c
new file mode 100644
index 000000000000..6ee462ada676
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_bridge.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM bridge driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/list.h>
+
+#include "xlnx_bridge.h"
+#include "xlnx_drv.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * Similar to drm bridge, but this can be used by any DRM driver. There
+ * is no limitation to be used by non DRM drivers as well. No complex topology
+ * is modeled, thus it's assumed that the Xilinx bridge device is directly
+ * attached to client. The client should call Xilinx bridge functions explicitly
+ * where it's needed, as opposed to drm bridge functions which are called
+ * implicitly by DRM core.
+ * One Xlnx bridge can be owned by one driver at a time.
+ */
+
+/**
+ * struct xlnx_bridge_helper - Xilinx bridge helper
+ * @xlnx_bridges: list of Xilinx bridges
+ * @lock: lock to protect @xlnx_crtcs
+ * @refcnt: reference count
+ * @error: flag if in error state
+ */
+struct xlnx_bridge_helper {
+ struct list_head xlnx_bridges;
+ struct mutex lock; /* lock for @xlnx_bridges */
+ unsigned int refcnt;
+ bool error;
+};
+
+static struct xlnx_bridge_helper helper;
+
+struct videomode;
+/*
+ * Client functions
+ */
+
+/**
+ * xlnx_bridge_enable - Enable the bridge
+ * @bridge: bridge to enable
+ *
+ * Enable bridge.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_enable(struct xlnx_bridge *bridge)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->enable)
+ return bridge->enable(bridge);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_enable);
+
+/**
+ * xlnx_bridge_disable - Disable the bridge
+ * @bridge: bridge to disable
+ *
+ * Disable bridge.
+ */
+void xlnx_bridge_disable(struct xlnx_bridge *bridge)
+{
+ if (!bridge)
+ return;
+
+ if (helper.error)
+ return;
+
+ if (bridge->disable)
+ bridge->disable(bridge);
+}
+EXPORT_SYMBOL(xlnx_bridge_disable);
+
+/**
+ * xlnx_bridge_set_input - Set the input of @bridge
+ * @bridge: bridge to set
+ * @width: width
+ * @height: height
+ * @bus_fmt: bus format (ex, MEDIA_BUS_FMT_*);
+ *
+ * Set the bridge input with height / width / format.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->set_input)
+ return bridge->set_input(bridge, width, height, bus_fmt);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_set_input);
+
+/**
+ * xlnx_bridge_get_input_fmts - Get the supported input formats
+ * @bridge: bridge to set
+ * @fmts: pointer to formats
+ * @count: pointer to format count
+ *
+ * Get the list of supported input bus formats.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->get_input_fmts)
+ return bridge->get_input_fmts(bridge, fmts, count);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_get_input_fmts);
+
+/**
+ * xlnx_bridge_set_output - Set the output of @bridge
+ * @bridge: bridge to set
+ * @width: width
+ * @height: height
+ * @bus_fmt: bus format (ex, MEDIA_BUS_FMT_*);
+ *
+ * Set the bridge output with height / width / format.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->set_output)
+ return bridge->set_output(bridge, width, height, bus_fmt);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_set_output);
+
+/**
+ * xlnx_bridge_get_output_fmts - Get the supported output formats
+ * @bridge: bridge to set
+ * @fmts: pointer to formats
+ * @count: pointer to format count
+ *
+ * Get the list of supported output bus formats.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->get_output_fmts)
+ return bridge->get_output_fmts(bridge, fmts, count);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_get_output_fmts);
+
+/**
+ * xlnx_bridge_set_timing - Set the video timing
+ * @bridge: bridge to set
+ * @vm: Videomode
+ *
+ * Set the video mode so that timing can be generated using this
+ * by the video timing controller.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_set_timing(struct xlnx_bridge *bridge, struct videomode *vm)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->set_timing) {
+ bridge->set_timing(bridge, vm);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_set_timing);
+
+/**
+ * of_xlnx_bridge_get - Get the corresponding Xlnx bridge instance
+ * @bridge_np: The device node of the bridge device
+ *
+ * The function walks through the Xlnx bridge list of @drm, and return
+ * if any registered bridge matches the device node. The returned
+ * bridge will not be accesible by others.
+ *
+ * Return: the matching Xlnx bridge instance, or NULL
+ */
+struct xlnx_bridge *of_xlnx_bridge_get(struct device_node *bridge_np)
+{
+ struct xlnx_bridge *found = NULL;
+ struct xlnx_bridge *bridge;
+
+ if (helper.error)
+ return NULL;
+
+ mutex_lock(&helper.lock);
+ list_for_each_entry(bridge, &helper.xlnx_bridges, list) {
+ if (bridge->of_node == bridge_np && !bridge->owned) {
+ found = bridge;
+ bridge->owned = true;
+ break;
+ }
+ }
+ mutex_unlock(&helper.lock);
+
+ return found;
+}
+EXPORT_SYMBOL_GPL(of_xlnx_bridge_get);
+
+/**
+ * of_xlnx_bridge_put - Put the Xlnx bridge instance
+ * @bridge: Xlnx bridge instance to release
+ *
+ * Return the @bridge. After this, the bridge will be available for
+ * other drivers to use.
+ */
+void of_xlnx_bridge_put(struct xlnx_bridge *bridge)
+{
+ if (WARN_ON(helper.error))
+ return;
+
+ mutex_lock(&helper.lock);
+ WARN_ON(!bridge->owned);
+ bridge->owned = false;
+ mutex_unlock(&helper.lock);
+}
+EXPORT_SYMBOL_GPL(of_xlnx_bridge_put);
+
+#ifdef CONFIG_DRM_XLNX_BRIDGE_DEBUG_FS
+
+#include <linux/debugfs.h>
+
+struct xlnx_bridge_debugfs_dir {
+ struct dentry *dir;
+ int ref_cnt;
+};
+
+static struct xlnx_bridge_debugfs_dir *dir;
+
+struct xlnx_bridge_debugfs_file {
+ struct dentry *file;
+ const char *status;
+};
+
+#define XLNX_BRIDGE_DEBUGFS_MAX_BYTES 16
+
+static ssize_t xlnx_bridge_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct xlnx_bridge *bridge = f->f_inode->i_private;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ size = min(size, strlen(bridge->debugfs_file->status));
+ ret = copy_to_user(buf, bridge->debugfs_file->status, size);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static ssize_t xlnx_bridge_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct xlnx_bridge *bridge = f->f_inode->i_private;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ if (!strncmp(buf, "enable", 5)) {
+ xlnx_bridge_enable(bridge);
+ } else if (!strncmp(buf, "disable", 6)) {
+ xlnx_bridge_disable(bridge);
+ } else if (!strncmp(buf, "set_input", 3)) {
+ char *cmd, **tmp;
+ char *w, *h, *f;
+ u32 width, height, fmt;
+ int ret = -EINVAL;
+
+ cmd = kzalloc(size, GFP_KERNEL);
+ ret = strncpy_from_user(cmd, buf, size);
+ if (ret < 0) {
+ pr_err("%s %d failed to copy the command %s\n",
+ __func__, __LINE__, buf);
+ return ret;
+ }
+
+ tmp = &cmd;
+ strsep(tmp, " ");
+ w = strsep(tmp, " ");
+ h = strsep(tmp, " ");
+ f = strsep(tmp, " ");
+ if (w && h && f) {
+ ret = kstrtouint(w, 0, &width);
+ ret |= kstrtouint(h, 0, &height);
+ ret |= kstrtouint(f, 0, &fmt);
+ }
+
+ kfree(cmd);
+ if (ret) {
+ pr_err("%s %d invalid command: %s\n",
+ __func__, __LINE__, buf);
+ return -EINVAL;
+ }
+ xlnx_bridge_set_input(bridge, width, height, fmt);
+ }
+
+ return size;
+}
+
+static const struct file_operations xlnx_bridge_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = xlnx_bridge_debugfs_read,
+ .write = xlnx_bridge_debugfs_write,
+};
+
+static int xlnx_bridge_debugfs_register(struct xlnx_bridge *bridge)
+{
+ struct xlnx_bridge_debugfs_file *file;
+ char file_name[32];
+
+ file = kzalloc(sizeof(*file), GFP_KERNEL);
+ if (!file)
+ return -ENOMEM;
+
+ snprintf(file_name, sizeof(file_name), "xlnx_bridge-%s",
+ bridge->of_node->name);
+ file->file = debugfs_create_file(file_name, 0444, dir->dir, bridge,
+ &xlnx_bridge_debugfs_fops);
+ bridge->debugfs_file = file;
+
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_unregister(struct xlnx_bridge *bridge)
+{
+ debugfs_remove(bridge->debugfs_file->file);
+ kfree(bridge->debugfs_file);
+}
+
+static int xlnx_bridge_debugfs_init(void)
+{
+ if (dir) {
+ dir->ref_cnt++;
+ return 0;
+ }
+
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
+
+ dir->dir = debugfs_create_dir("xlnx-bridge", NULL);
+ if (!dir->dir)
+ return -ENODEV;
+ dir->ref_cnt++;
+
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_fini(void)
+{
+ if (--dir->ref_cnt)
+ return;
+
+ debugfs_remove_recursive(dir->dir);
+ dir = NULL;
+}
+
+#else
+
+static int xlnx_bridge_debugfs_register(struct xlnx_bridge *bridge)
+{
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_unregister(struct xlnx_bridge *bridge)
+{
+}
+
+static int xlnx_bridge_debugfs_init(void)
+{
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_fini(void)
+{
+}
+
+#endif
+
+/*
+ * Provider functions
+ */
+
+/**
+ * xlnx_bridge_register - Register the bridge instance
+ * @bridge: Xlnx bridge instance to register
+ *
+ * Register @bridge to be available for clients.
+ *
+ * Return: 0 on success. -EPROBE_DEFER if helper is not initialized, or
+ * -EFAULT if in error state.
+ */
+int xlnx_bridge_register(struct xlnx_bridge *bridge)
+{
+ if (!helper.refcnt)
+ return -EPROBE_DEFER;
+
+ if (helper.error)
+ return -EFAULT;
+
+ mutex_lock(&helper.lock);
+ WARN_ON(!bridge->of_node);
+ bridge->owned = false;
+ xlnx_bridge_debugfs_register(bridge);
+ list_add_tail(&bridge->list, &helper.xlnx_bridges);
+ mutex_unlock(&helper.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xlnx_bridge_register);
+
+/**
+ * xlnx_bridge_unregister - Unregister the bridge instance
+ * @bridge: Xlnx bridge instance to unregister
+ *
+ * Unregister @bridge. The bridge shouldn't be owned by any client
+ * at this point.
+ */
+void xlnx_bridge_unregister(struct xlnx_bridge *bridge)
+{
+ if (helper.error)
+ return;
+
+ mutex_lock(&helper.lock);
+ WARN_ON(bridge->owned);
+ xlnx_bridge_debugfs_unregister(bridge);
+ list_del(&bridge->list);
+ mutex_unlock(&helper.lock);
+}
+EXPORT_SYMBOL_GPL(xlnx_bridge_unregister);
+
+/*
+ * Internal functions: used by Xlnx DRM
+ */
+
+/**
+ * xlnx_bridge_helper_init - Initialize the bridge helper
+ * @void: No arg
+ *
+ * Initialize the bridge helper or increment the reference count
+ * if already initialized.
+ *
+ * Return: 0 on success, or -EFAULT if in error state.
+ */
+int xlnx_bridge_helper_init(void)
+{
+ if (helper.refcnt++ > 0) {
+ if (helper.error)
+ return -EFAULT;
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&helper.xlnx_bridges);
+ mutex_init(&helper.lock);
+ helper.error = false;
+
+ if (xlnx_bridge_debugfs_init())
+ pr_err("failed to init xlnx bridge debugfs\n");
+
+ return 0;
+}
+
+/**
+ * xlnx_bridge_helper_fini - Release the bridge helper
+ *
+ * Clean up or decrement the reference of the bridge helper.
+ */
+void xlnx_bridge_helper_fini(void)
+{
+ if (--helper.refcnt > 0)
+ return;
+
+ xlnx_bridge_debugfs_fini();
+
+ if (WARN_ON(!list_empty(&helper.xlnx_bridges))) {
+ helper.error = true;
+ pr_err("any further xlnx bridge call will fail\n");
+ }
+
+ mutex_destroy(&helper.lock);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_bridge.h b/drivers/gpu/drm/xlnx/xlnx_bridge.h
new file mode 100644
index 000000000000..64330169bd22
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_bridge.h
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM bridge header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_BRIDGE_H_
+#define _XLNX_BRIDGE_H_
+
+struct videomode;
+
+struct xlnx_bridge_debugfs_file;
+
+/**
+ * struct xlnx_bridge - Xilinx bridge device
+ * @list: list node for Xilinx bridge device list
+ * @of_node: OF node for the bridge
+ * @owned: flag if the bridge is owned
+ * @enable: callback to enable the bridge
+ * @disable: callback to disable the bridge
+ * @set_input: callback to set the input
+ * @get_input_fmts: callback to get supported input formats.
+ * @set_output: callback to set the output
+ * @get_output_fmts: callback to get supported output formats.
+ * @set_timing: callback to set timing in connected video timing controller.
+ * @debugfs_file: for debugfs support
+ */
+struct xlnx_bridge {
+ struct list_head list;
+ struct device_node *of_node;
+ bool owned;
+ int (*enable)(struct xlnx_bridge *bridge);
+ void (*disable)(struct xlnx_bridge *bridge);
+ int (*set_input)(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+ int (*get_input_fmts)(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+ int (*set_output)(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+ int (*get_output_fmts)(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+ int (*set_timing)(struct xlnx_bridge *bridge, struct videomode *vm);
+ struct xlnx_bridge_debugfs_file *debugfs_file;
+};
+
+#if IS_ENABLED(CONFIG_DRM_XLNX_BRIDGE)
+/*
+ * Helper functions: used within Xlnx DRM
+ */
+
+struct xlnx_bridge_helper;
+
+int xlnx_bridge_helper_init(void);
+void xlnx_bridge_helper_fini(void);
+
+/*
+ * Helper functions: used by client driver
+ */
+
+int xlnx_bridge_enable(struct xlnx_bridge *bridge);
+void xlnx_bridge_disable(struct xlnx_bridge *bridge);
+int xlnx_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+int xlnx_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+int xlnx_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+int xlnx_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+int xlnx_bridge_set_timing(struct xlnx_bridge *bridge, struct videomode *vm);
+struct xlnx_bridge *of_xlnx_bridge_get(struct device_node *bridge_np);
+void of_xlnx_bridge_put(struct xlnx_bridge *bridge);
+
+/*
+ * Bridge registration: used by bridge driver
+ */
+
+int xlnx_bridge_register(struct xlnx_bridge *bridge);
+void xlnx_bridge_unregister(struct xlnx_bridge *bridge);
+
+#else /* CONFIG_DRM_XLNX_BRIDGE */
+
+struct xlnx_bridge_helper;
+
+static inline inline int xlnx_bridge_helper_init(void)
+{
+ return 0;
+}
+
+static inline void xlnx_bridge_helper_fini(void)
+{
+}
+
+static inline int xlnx_bridge_enable(struct xlnx_bridge *bridge)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline void xlnx_bridge_disable(struct xlnx_bridge *bridge)
+{
+}
+
+static inline int xlnx_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static int xlnx_bridge_set_timing(struct xlnx_bridge *bridge,
+ struct videomode *vm)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline struct xlnx_bridge *
+of_xlnx_bridge_get(struct device_node *bridge_np)
+{
+ return NULL;
+}
+
+static inline void of_xlnx_bridge_put(struct xlnx_bridge *bridge)
+{
+}
+
+static inline int xlnx_bridge_register(struct xlnx_bridge *bridge)
+{
+ return 0;
+}
+
+static inline void xlnx_bridge_unregister(struct xlnx_bridge *bridge)
+{
+}
+
+#endif /* CONFIG_DRM_XLNX_BRIDGE */
+
+#endif /* _XLNX_BRIDGE_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_crtc.c b/drivers/gpu/drm/xlnx/xlnx_crtc.c
new file mode 100644
index 000000000000..d5805c923675
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_crtc.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM crtc driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/list.h>
+
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * The Xilinx CRTC layer is to enable the custom interface to CRTC drivers.
+ * The interface is used by Xilinx DRM driver where it needs CRTC
+ * functionailty. CRTC drivers should attach the desired callbacks
+ * to struct xlnx_crtc and register the xlnx_crtc with correcsponding
+ * drm_device. It's highly recommended CRTC drivers register all callbacks
+ * even though many of them are optional.
+ * The CRTC helper simply walks through the registered CRTC device,
+ * and call the callbacks.
+ */
+
+/**
+ * struct xlnx_crtc_helper - Xilinx CRTC helper
+ * @xlnx_crtcs: list of Xilinx CRTC devices
+ * @lock: lock to protect @xlnx_crtcs
+ * @drm: back pointer to DRM core
+ */
+struct xlnx_crtc_helper {
+ struct list_head xlnx_crtcs;
+ struct mutex lock; /* lock for @xlnx_crtcs */
+ struct drm_device *drm;
+};
+
+#define XLNX_CRTC_MAX_HEIGHT_WIDTH INT_MAX
+
+unsigned int xlnx_crtc_helper_get_align(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ unsigned int align = 1, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_align) {
+ tmp = crtc->get_align(crtc);
+ align = ALIGN(align, tmp);
+ }
+ }
+
+ return align;
+}
+
+u64 xlnx_crtc_helper_get_dma_mask(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u64 mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8), tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_dma_mask) {
+ tmp = crtc->get_dma_mask(crtc);
+ mask = min(mask, tmp);
+ }
+ }
+
+ return mask;
+}
+
+int xlnx_crtc_helper_get_max_width(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ int width = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_max_width) {
+ tmp = crtc->get_max_width(crtc);
+ width = min(width, tmp);
+ }
+ }
+
+ return width;
+}
+
+int xlnx_crtc_helper_get_max_height(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ int height = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_max_height) {
+ tmp = crtc->get_max_height(crtc);
+ height = min(height, tmp);
+ }
+ }
+
+ return height;
+}
+
+uint32_t xlnx_crtc_helper_get_format(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u32 format = 0, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_format) {
+ tmp = crtc->get_format(crtc);
+ if (format && format != tmp)
+ return 0;
+ format = tmp;
+ }
+ }
+
+ return format;
+}
+
+u32 xlnx_crtc_helper_get_cursor_width(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u32 width = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_cursor_width) {
+ tmp = crtc->get_cursor_width(crtc);
+ width = min(width, tmp);
+ }
+ }
+
+ return width;
+}
+
+u32 xlnx_crtc_helper_get_cursor_height(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u32 height = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_cursor_height) {
+ tmp = crtc->get_cursor_height(crtc);
+ height = min(height, tmp);
+ }
+ }
+
+ return height;
+}
+struct xlnx_crtc_helper *xlnx_crtc_helper_init(struct drm_device *drm)
+{
+ struct xlnx_crtc_helper *helper;
+
+ helper = devm_kzalloc(drm->dev, sizeof(*helper), GFP_KERNEL);
+ if (!helper)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&helper->xlnx_crtcs);
+ mutex_init(&helper->lock);
+ helper->drm = drm;
+
+ return helper;
+}
+
+void xlnx_crtc_helper_fini(struct drm_device *drm,
+ struct xlnx_crtc_helper *helper)
+{
+ if (WARN_ON(helper->drm != drm))
+ return;
+
+ if (WARN_ON(!list_empty(&helper->xlnx_crtcs)))
+ return;
+
+ mutex_destroy(&helper->lock);
+ devm_kfree(drm->dev, helper);
+}
+
+void xlnx_crtc_register(struct drm_device *drm, struct xlnx_crtc *crtc)
+{
+ struct xlnx_crtc_helper *helper = xlnx_get_crtc_helper(drm);
+
+ mutex_lock(&helper->lock);
+ list_add_tail(&crtc->list, &helper->xlnx_crtcs);
+ mutex_unlock(&helper->lock);
+}
+EXPORT_SYMBOL_GPL(xlnx_crtc_register);
+
+void xlnx_crtc_unregister(struct drm_device *drm, struct xlnx_crtc *crtc)
+{
+ struct xlnx_crtc_helper *helper = xlnx_get_crtc_helper(drm);
+
+ mutex_lock(&helper->lock);
+ list_del(&crtc->list);
+ mutex_unlock(&helper->lock);
+}
+EXPORT_SYMBOL_GPL(xlnx_crtc_unregister);
diff --git a/drivers/gpu/drm/xlnx/xlnx_crtc.h b/drivers/gpu/drm/xlnx/xlnx_crtc.h
new file mode 100644
index 000000000000..9ab57594aba8
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_crtc.h
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM crtc header
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_CRTC_H_
+#define _XLNX_CRTC_H_
+
+/**
+ * struct xlnx_crtc - Xilinx CRTC device
+ * @crtc: DRM CRTC device
+ * @list: list node for Xilinx CRTC device list
+ * @get_align: Get the alignment requirement of CRTC device
+ * @get_dma_mask: Get the dma mask of CRTC device
+ * @get_max_width: Get the maximum supported width
+ * @get_max_height: Get the maximum supported height
+ * @get_format: Get the current format of CRTC device
+ * @get_cursor_width: Get the cursor width
+ * @get_cursor_height: Get the cursor height
+ */
+struct xlnx_crtc {
+ struct drm_crtc crtc;
+ struct list_head list;
+ unsigned int (*get_align)(struct xlnx_crtc *crtc);
+ u64 (*get_dma_mask)(struct xlnx_crtc *crtc);
+ int (*get_max_width)(struct xlnx_crtc *crtc);
+ int (*get_max_height)(struct xlnx_crtc *crtc);
+ uint32_t (*get_format)(struct xlnx_crtc *crtc);
+ uint32_t (*get_cursor_width)(struct xlnx_crtc *crtc);
+ uint32_t (*get_cursor_height)(struct xlnx_crtc *crtc);
+};
+
+/*
+ * Helper functions: used within Xlnx DRM
+ */
+
+struct xlnx_crtc_helper;
+
+unsigned int xlnx_crtc_helper_get_align(struct xlnx_crtc_helper *helper);
+u64 xlnx_crtc_helper_get_dma_mask(struct xlnx_crtc_helper *helper);
+int xlnx_crtc_helper_get_max_width(struct xlnx_crtc_helper *helper);
+int xlnx_crtc_helper_get_max_height(struct xlnx_crtc_helper *helper);
+uint32_t xlnx_crtc_helper_get_format(struct xlnx_crtc_helper *helper);
+u32 xlnx_crtc_helper_get_cursor_width(struct xlnx_crtc_helper *helper);
+u32 xlnx_crtc_helper_get_cursor_height(struct xlnx_crtc_helper *helper);
+
+struct xlnx_crtc_helper *xlnx_crtc_helper_init(struct drm_device *drm);
+void xlnx_crtc_helper_fini(struct drm_device *drm,
+ struct xlnx_crtc_helper *helper);
+
+/*
+ * CRTC registration: used by other sub-driver modules
+ */
+
+static inline struct xlnx_crtc *to_xlnx_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct xlnx_crtc, crtc);
+}
+
+void xlnx_crtc_register(struct drm_device *drm, struct xlnx_crtc *crtc);
+void xlnx_crtc_unregister(struct drm_device *drm, struct xlnx_crtc *crtc);
+
+#endif /* _XLNX_CRTC_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_csc.c b/drivers/gpu/drm/xlnx/xlnx_csc.c
new file mode 100644
index 000000000000..1d4341dce570
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_csc.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VPSS CSC DRM bridge driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar rao G <vgannava@xilinx.com>
+ */
+
+/*
+ * Overview:
+ * This experimentatl driver works as a bridge driver and
+ * reused the code from V4L2.
+ * TODO:
+ * Need to implement in a modular approach to share driver code between
+ * V4L2 and DRM frameworks.
+ * Should be integrated with plane
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <uapi/linux/media-bus-format.h>
+
+#include "xlnx_bridge.h"
+
+/* Register offset */
+#define XV_CSC_AP_CTRL (0x000)
+#define XV_CSC_INVIDEOFORMAT (0x010)
+#define XV_CSC_OUTVIDEOFORMAT (0x018)
+#define XV_CSC_WIDTH (0x020)
+#define XV_CSC_HEIGHT (0x028)
+#define XV_CSC_K11 (0x050)
+#define XV_CSC_K12 (0x058)
+#define XV_CSC_K13 (0x060)
+#define XV_CSC_K21 (0x068)
+#define XV_CSC_K22 (0x070)
+#define XV_CSC_K23 (0x078)
+#define XV_CSC_K31 (0x080)
+#define XV_CSC_K32 (0x088)
+#define XV_CSC_K33 (0x090)
+#define XV_CSC_ROFFSET (0x098)
+#define XV_CSC_GOFFSET (0x0a0)
+#define XV_CSC_BOFFSET (0x0a8)
+#define XV_CSC_CLAMPMIN (0x0b0)
+#define XV_CSC_CLIPMAX (0x0b8)
+#define XV_CSC_SCALE_FACTOR (4096)
+#define XV_CSC_DIVISOR (10000)
+/* Streaming Macros */
+#define XCSC_CLAMP_MIN_ZERO (0)
+#define XCSC_AP_START BIT(0)
+#define XCSC_AP_AUTO_RESTART BIT(7)
+#define XCSC_STREAM_ON (XCSC_AP_START | XCSC_AP_AUTO_RESTART)
+#define XCSC_STREAM_OFF (0)
+/* GPIO Reset Assert/De-assert */
+#define XCSC_RESET_ASSERT (1)
+#define XCSC_RESET_DEASSERT (0)
+
+#define XCSC_MIN_WIDTH (64)
+#define XCSC_MAX_WIDTH (8192)
+#define XCSC_MIN_HEIGHT (64)
+#define XCSC_MAX_HEIGHT (4320)
+
+static const u32 xilinx_csc_video_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_VUY8_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYYUYY8_1X24,
+};
+
+/* vpss_csc_color_fmt - Color format type */
+enum vpss_csc_color_fmt {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+/**
+ * struct xilinx_csc - Core configuration of csc device structure
+ * @base: pointer to register base address
+ * @dev: device structure
+ * @bridge: xilinx bridge
+ * @cft_in: input color format
+ * @cft_out: output color format
+ * @color_depth: color depth
+ * @k_hw: array of hardware values
+ * @clip_max: clipping maximum value
+ * @width: width of the video
+ * @height: height of video
+ * @max_width: maximum number of pixels in a line
+ * @max_height: maximum number of lines per frame
+ * @rst_gpio: Handle to GPIO specifier to assert/de-assert the reset line
+ * @aclk: IP clock struct
+ */
+struct xilinx_csc {
+ void __iomem *base;
+ struct device *dev;
+ struct xlnx_bridge bridge;
+ enum vpss_csc_color_fmt cft_in;
+ enum vpss_csc_color_fmt cft_out;
+ u32 color_depth;
+ s32 k_hw[3][4];
+ s32 clip_max;
+ u32 width;
+ u32 height;
+ u32 max_width;
+ u32 max_height;
+ struct gpio_desc *rst_gpio;
+ struct clk *aclk;
+};
+
+static inline void xilinx_csc_write(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xilinx_csc_read(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * bridge_to_layer - Gets the parent structure
+ * @bridge: pointer to the member.
+ *
+ * Return: parent structure pointer
+ */
+static inline struct xilinx_csc *bridge_to_layer(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct xilinx_csc, bridge);
+}
+
+static void xilinx_csc_write_rgb_3x3(struct xilinx_csc *csc)
+{
+ xilinx_csc_write(csc->base, XV_CSC_K11, csc->k_hw[0][0]);
+ xilinx_csc_write(csc->base, XV_CSC_K12, csc->k_hw[0][1]);
+ xilinx_csc_write(csc->base, XV_CSC_K13, csc->k_hw[0][2]);
+ xilinx_csc_write(csc->base, XV_CSC_K21, csc->k_hw[1][0]);
+ xilinx_csc_write(csc->base, XV_CSC_K22, csc->k_hw[1][1]);
+ xilinx_csc_write(csc->base, XV_CSC_K23, csc->k_hw[1][2]);
+ xilinx_csc_write(csc->base, XV_CSC_K31, csc->k_hw[2][0]);
+ xilinx_csc_write(csc->base, XV_CSC_K32, csc->k_hw[2][1]);
+ xilinx_csc_write(csc->base, XV_CSC_K33, csc->k_hw[2][2]);
+}
+
+static void xilinx_csc_write_rgb_offset(struct xilinx_csc *csc)
+{
+ xilinx_csc_write(csc->base, XV_CSC_ROFFSET, csc->k_hw[0][3]);
+ xilinx_csc_write(csc->base, XV_CSC_GOFFSET, csc->k_hw[1][3]);
+ xilinx_csc_write(csc->base, XV_CSC_BOFFSET, csc->k_hw[2][3]);
+}
+
+static void xilinx_csc_write_coeff(struct xilinx_csc *csc)
+{
+ xilinx_csc_write_rgb_3x3(csc);
+ xilinx_csc_write_rgb_offset(csc);
+}
+
+static void xcsc_set_default_state(struct xilinx_csc *csc)
+{
+ csc->cft_in = XVIDC_CSF_YCRCB_422;
+ csc->cft_out = XVIDC_CSF_YCRCB_422;
+
+ /* This represents an identity matrix mutliped by 2^12 */
+ csc->k_hw[0][0] = XV_CSC_SCALE_FACTOR;
+ csc->k_hw[0][1] = 0;
+ csc->k_hw[0][2] = 0;
+ csc->k_hw[1][0] = 0;
+ csc->k_hw[1][1] = XV_CSC_SCALE_FACTOR;
+ csc->k_hw[1][2] = 0;
+ csc->k_hw[2][0] = 0;
+ csc->k_hw[2][1] = 0;
+ csc->k_hw[2][2] = XV_CSC_SCALE_FACTOR;
+ csc->k_hw[0][3] = 0;
+ csc->k_hw[1][3] = 0;
+ csc->k_hw[2][3] = 0;
+ csc->clip_max = ((1 << csc->color_depth) - 1);
+ xilinx_csc_write(csc->base, XV_CSC_INVIDEOFORMAT, csc->cft_in);
+ xilinx_csc_write(csc->base, XV_CSC_OUTVIDEOFORMAT, csc->cft_out);
+ xilinx_csc_write_coeff(csc);
+ xilinx_csc_write(csc->base, XV_CSC_CLIPMAX, csc->clip_max);
+ xilinx_csc_write(csc->base, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+}
+
+static void xcsc_ycrcb_to_rgb(struct xilinx_csc *csc, s32 *clip_max)
+{
+ u16 bpc_scale = (1 << (csc->color_depth - 8));
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC IP is
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations.
+ *
+ * Coefficients valid only for BT 709
+ */
+ csc->k_hw[0][0] = 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][1] = 0;
+ csc->k_hw[0][2] = 17927 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][0] = 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][1] = -2132 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][2] = -5329 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][0] = 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][1] = 21124 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][2] = 0;
+ csc->k_hw[0][3] = -248 * bpc_scale;
+ csc->k_hw[1][3] = 77 * bpc_scale;
+ csc->k_hw[2][3] = -289 * bpc_scale;
+ *clip_max = ((1 << csc->color_depth) - 1);
+}
+
+static void xcsc_rgb_to_ycrcb(struct xilinx_csc *csc, s32 *clip_max)
+{
+ u16 bpc_scale = (1 << (csc->color_depth - 8));
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations.
+ *
+ * Coefficients valid only for BT 709
+ */
+ csc->k_hw[0][0] = 1826 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][1] = 6142 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][2] = 620 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][0] = -1006 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][1] = -3386 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][2] = 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][0] = 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][1] = -3989 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][2] = -403 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][3] = 16 * bpc_scale;
+ csc->k_hw[1][3] = 128 * bpc_scale;
+ csc->k_hw[2][3] = 128 * bpc_scale;
+ *clip_max = ((1 << csc->color_depth) - 1);
+}
+
+/**
+ * xcsc_set_coeff- Sets the coefficients
+ * @csc: Pointer to csc device structure
+ *
+ * This function set the coefficients
+ *
+ */
+static void xcsc_set_coeff(struct xilinx_csc *csc)
+{
+ xilinx_csc_write(csc->base, XV_CSC_INVIDEOFORMAT, csc->cft_in);
+ xilinx_csc_write(csc->base, XV_CSC_OUTVIDEOFORMAT, csc->cft_out);
+ xilinx_csc_write_coeff(csc);
+ xilinx_csc_write(csc->base, XV_CSC_CLIPMAX, csc->clip_max);
+ xilinx_csc_write(csc->base, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+}
+
+/**
+ * xilinx_csc_bridge_enable - enabes csc core
+ * @bridge: bridge instance
+ *
+ * This function enables the csc core
+ *
+ * Return: 0 on success.
+ *
+ */
+static int xilinx_csc_bridge_enable(struct xlnx_bridge *bridge)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ xilinx_csc_write(csc->base, XV_CSC_AP_CTRL, XCSC_STREAM_ON);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_disable - disables csc core
+ * @bridge: bridge instance
+ *
+ * This function disables the csc core
+ */
+static void xilinx_csc_bridge_disable(struct xlnx_bridge *bridge)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ xilinx_csc_write(csc->base, XV_CSC_AP_CTRL, XCSC_STREAM_OFF);
+ /* Reset the Global IP Reset through GPIO */
+ gpiod_set_value_cansleep(csc->rst_gpio, XCSC_RESET_ASSERT);
+ gpiod_set_value_cansleep(csc->rst_gpio, XCSC_RESET_DEASSERT);
+}
+
+/**
+ * xilinx_csc_bridge_set_input - Sets the input parameters of csc
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the input parameters of csc
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_csc_bridge_set_input(struct xlnx_bridge *bridge, u32 width,
+ u32 height, u32 bus_fmt)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ xcsc_set_default_state(csc);
+
+ if (height > csc->max_height || height < XCSC_MIN_HEIGHT)
+ return -EINVAL;
+
+ if (width > csc->max_width || width < XCSC_MIN_WIDTH)
+ return -EINVAL;
+
+ csc->height = height;
+ csc->width = width;
+
+ switch (bus_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ csc->cft_in = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ csc->cft_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ csc->cft_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ csc->cft_in = XVIDC_CSF_YCRCB_420;
+ break;
+ default:
+ dev_dbg(csc->dev, "unsupported input video format\n");
+ return -EINVAL;
+ }
+
+ xilinx_csc_write(csc->base, XV_CSC_WIDTH, width);
+ xilinx_csc_write(csc->base, XV_CSC_HEIGHT, height);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_get_input_fmts - input formats supported by csc
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the input video formats information csc
+ * Return: 0 on success.
+ */
+static int xilinx_csc_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_csc_video_fmts;
+ *count = ARRAY_SIZE(xilinx_csc_video_fmts);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_set_output - Sets the output parameters of csc
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the output parameters of csc
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_csc_bridge_set_output(struct xlnx_bridge *bridge, u32 width,
+ u32 height, u32 bus_fmt)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ if (width != csc->width || height != csc->height)
+ return -EINVAL;
+
+ switch (bus_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ csc->cft_out = XVIDC_CSF_RGB;
+ dev_dbg(csc->dev, "Media Format Out : RGB");
+ if (csc->cft_in != MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_ycrcb_to_rgb(csc, &csc->clip_max);
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ csc->cft_out = XVIDC_CSF_YCRCB_444;
+ dev_dbg(csc->dev, "Media Format Out : YUV 444");
+ if (csc->cft_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(csc, &csc->clip_max);
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ csc->cft_out = XVIDC_CSF_YCRCB_422;
+ dev_dbg(csc->dev, "Media Format Out : YUV 422");
+ if (csc->cft_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(csc, &csc->clip_max);
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ csc->cft_out = XVIDC_CSF_YCRCB_420;
+ dev_dbg(csc->dev, "Media Format Out : YUV 420");
+ if (csc->cft_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(csc, &csc->clip_max);
+ break;
+ default:
+ dev_info(csc->dev, "unsupported output video format\n");
+ return -EINVAL;
+ }
+ xcsc_set_coeff(csc);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_get_output_fmts - output formats supported by csc
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the output video formats information csc
+ * Return: 0 on success.
+ */
+static int xilinx_csc_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_csc_video_fmts;
+ *count = ARRAY_SIZE(xilinx_csc_video_fmts);
+ return 0;
+}
+
+static int xcsc_parse_of(struct xilinx_csc *csc)
+{
+ int ret;
+ struct device_node *node = csc->dev->of_node;
+
+ csc->aclk = devm_clk_get(csc->dev, NULL);
+ if (IS_ERR(csc->aclk)) {
+ ret = PTR_ERR(csc->aclk);
+ dev_err(csc->dev, "failed to get aclk %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,video-width",
+ &csc->color_depth);
+ if (ret < 0) {
+ dev_info(csc->dev, "video width not present in DT\n");
+ return ret;
+ }
+ if (csc->color_depth != 8 && csc->color_depth != 10 &&
+ csc->color_depth != 12 && csc->color_depth != 16) {
+ dev_err(csc->dev, "Invalid video width in DT\n");
+ return -EINVAL;
+ }
+ /* Reset GPIO */
+ csc->rst_gpio = devm_gpiod_get(csc->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(csc->rst_gpio)) {
+ if (PTR_ERR(csc->rst_gpio) != -EPROBE_DEFER)
+ dev_err(csc->dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(csc->rst_gpio);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height", &csc->max_height);
+ if (ret < 0) {
+ dev_err(csc->dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (csc->max_height > XCSC_MAX_HEIGHT ||
+ csc->max_height < XCSC_MIN_HEIGHT) {
+ dev_err(csc->dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width", &csc->max_width);
+ if (ret < 0) {
+ dev_err(csc->dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (csc->max_width > XCSC_MAX_WIDTH ||
+ csc->max_width < XCSC_MIN_WIDTH) {
+ dev_err(csc->dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xilinx_csc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_csc *csc;
+ int ret;
+
+ csc = devm_kzalloc(dev, sizeof(*csc), GFP_KERNEL);
+ if (!csc)
+ return -ENOMEM;
+
+ csc->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ csc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(csc->base))
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, csc);
+ ret = xcsc_parse_of(csc);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(csc->aclk);
+ if (ret) {
+ dev_err(csc->dev, "failed to enable clock %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(csc->rst_gpio, XCSC_RESET_DEASSERT);
+ csc->bridge.enable = &xilinx_csc_bridge_enable;
+ csc->bridge.disable = &xilinx_csc_bridge_disable;
+ csc->bridge.set_input = &xilinx_csc_bridge_set_input;
+ csc->bridge.get_input_fmts = &xilinx_csc_bridge_get_input_fmts;
+ csc->bridge.set_output = &xilinx_csc_bridge_set_output;
+ csc->bridge.get_output_fmts = &xilinx_csc_bridge_get_output_fmts;
+ csc->bridge.of_node = dev->of_node;
+
+ ret = xlnx_bridge_register(&csc->bridge);
+ if (ret) {
+ dev_info(csc->dev, "Bridge registration failed\n");
+ goto err_clk;
+ }
+
+ dev_info(csc->dev, "Xilinx VPSS CSC DRM experimental driver probed\n");
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(csc->aclk);
+ return ret;
+}
+
+static int xilinx_csc_remove(struct platform_device *pdev)
+{
+ struct xilinx_csc *csc = platform_get_drvdata(pdev);
+
+ xlnx_bridge_unregister(&csc->bridge);
+ clk_disable_unprepare(csc->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_csc_of_match[] = {
+ { .compatible = "xlnx,vpss-csc"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_csc_of_match);
+
+static struct platform_driver csc_bridge_driver = {
+ .probe = xilinx_csc_probe,
+ .remove = xilinx_csc_remove,
+ .driver = {
+ .name = "xlnx,csc-bridge",
+ .of_match_table = xilinx_csc_of_match,
+ },
+};
+
+module_platform_driver(csc_bridge_driver);
+
+MODULE_AUTHOR("Venkateshwar Rao <vgannava@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA CSC Bridge Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_drv.c b/drivers/gpu/drm/xlnx/xlnx_drv.c
new file mode 100644
index 000000000000..445325407bb5
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_drv.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Driver
+ *
+ * Copyright (C) 2013 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/reservation.h>
+
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+#include "xlnx_fb.h"
+#include "xlnx_gem.h"
+
+#define DRIVER_NAME "xlnx"
+#define DRIVER_DESC "Xilinx DRM KMS Driver"
+#define DRIVER_DATE "20130509"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static uint xlnx_fbdev_vres = 2;
+module_param_named(fbdev_vres, xlnx_fbdev_vres, uint, 0444);
+MODULE_PARM_DESC(fbdev_vres,
+ "fbdev virtual resolution multiplier for fb (default: 2)");
+
+/**
+ * struct xlnx_drm - Xilinx DRM private data
+ * @drm: DRM core
+ * @crtc: Xilinx DRM CRTC helper
+ * @fb: DRM fb helper
+ * @master: logical master device for pipeline
+ * @suspend_state: atomic state for suspend / resume
+ * @is_master: A flag to indicate if this instance is fake master
+ */
+struct xlnx_drm {
+ struct drm_device *drm;
+ struct xlnx_crtc_helper *crtc;
+ struct drm_fb_helper *fb;
+ struct platform_device *master;
+ struct drm_atomic_state *suspend_state;
+ bool is_master;
+};
+
+/**
+ * xlnx_get_crtc_helper - Return the crtc helper instance
+ * @drm: DRM device
+ *
+ * Return: the crtc helper instance
+ */
+struct xlnx_crtc_helper *xlnx_get_crtc_helper(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ return xlnx_drm->crtc;
+}
+
+/**
+ * xlnx_get_align - Return the align requirement through CRTC helper
+ * @drm: DRM device
+ *
+ * Return: the alignment requirement
+ */
+unsigned int xlnx_get_align(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ return xlnx_crtc_helper_get_align(xlnx_drm->crtc);
+}
+
+/**
+ * xlnx_get_format - Return the current format of CRTC
+ * @drm: DRM device
+ *
+ * Return: the current CRTC format
+ */
+uint32_t xlnx_get_format(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ return xlnx_crtc_helper_get_format(xlnx_drm->crtc);
+}
+
+static void xlnx_output_poll_changed(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ if (xlnx_drm->fb)
+ drm_fb_helper_hotplug_event(xlnx_drm->fb);
+}
+
+static const struct drm_mode_config_funcs xlnx_mode_config_funcs = {
+ .fb_create = xlnx_fb_create,
+ .output_poll_changed = xlnx_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void xlnx_mode_config_init(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+ struct xlnx_crtc_helper *crtc = xlnx_drm->crtc;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = xlnx_crtc_helper_get_max_width(crtc);
+ drm->mode_config.max_height = xlnx_crtc_helper_get_max_height(crtc);
+ drm->mode_config.cursor_width =
+ xlnx_crtc_helper_get_cursor_width(crtc);
+ drm->mode_config.cursor_height =
+ xlnx_crtc_helper_get_cursor_height(crtc);
+}
+
+static int xlnx_drm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct xlnx_drm *xlnx_drm = dev->dev_private;
+
+ /* This is a hacky way to allow the root user to run as a master */
+ if (!(drm_is_primary_client(file) && !dev->master) &&
+ !file->is_master && capable(CAP_SYS_ADMIN)) {
+ file->is_master = 1;
+ xlnx_drm->is_master = true;
+ }
+
+ return 0;
+}
+
+static int xlnx_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file = filp->private_data;
+ struct drm_minor *minor = file->minor;
+ struct drm_device *drm = minor->dev;
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ if (xlnx_drm->is_master) {
+ xlnx_drm->is_master = false;
+ file->is_master = 0;
+ }
+
+ return drm_release(inode, filp);
+}
+
+static void xlnx_lastclose(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ if (xlnx_drm->fb)
+ drm_fb_helper_restore_fbdev_mode_unlocked(xlnx_drm->fb);
+}
+
+static const struct file_operations xlnx_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = xlnx_drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver xlnx_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
+ DRIVER_ATOMIC | DRIVER_PRIME,
+ .open = xlnx_drm_open,
+ .lastclose = xlnx_lastclose,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = xlnx_gem_cma_dumb_create,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .fops = &xlnx_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int xlnx_bind(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm;
+ struct drm_device *drm;
+ const struct drm_format_info *info;
+ struct platform_device *master = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(dev->parent);
+ int ret;
+ u32 format;
+
+ drm = drm_dev_alloc(&xlnx_drm_driver, &pdev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ xlnx_drm = devm_kzalloc(drm->dev, sizeof(*xlnx_drm), GFP_KERNEL);
+ if (!xlnx_drm) {
+ ret = -ENOMEM;
+ goto err_drm;
+ }
+
+ drm_mode_config_init(drm);
+ drm->mode_config.funcs = &xlnx_mode_config_funcs;
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize vblank\n");
+ goto err_xlnx_drm;
+ }
+
+ drm->irq_enabled = 1;
+ drm->dev_private = xlnx_drm;
+ xlnx_drm->drm = drm;
+ xlnx_drm->master = master;
+ drm_kms_helper_poll_init(drm);
+ platform_set_drvdata(master, xlnx_drm);
+
+ xlnx_drm->crtc = xlnx_crtc_helper_init(drm);
+ if (IS_ERR(xlnx_drm->crtc)) {
+ ret = PTR_ERR(xlnx_drm->crtc);
+ goto err_xlnx_drm;
+ }
+
+ ret = component_bind_all(&master->dev, drm);
+ if (ret)
+ goto err_crtc;
+
+ xlnx_mode_config_init(drm);
+ drm_mode_config_reset(drm);
+ dma_set_mask(drm->dev, xlnx_crtc_helper_get_dma_mask(xlnx_drm->crtc));
+
+ format = xlnx_crtc_helper_get_format(xlnx_drm->crtc);
+ info = drm_format_info(format);
+ if (info && info->depth && info->cpp[0]) {
+ unsigned int align;
+
+ align = xlnx_crtc_helper_get_align(xlnx_drm->crtc);
+ xlnx_drm->fb = xlnx_fb_init(drm, info->cpp[0] * 8, 1, align,
+ xlnx_fbdev_vres);
+ if (IS_ERR(xlnx_drm->fb)) {
+ dev_err(&pdev->dev,
+ "failed to initialize drm fb\n");
+ xlnx_drm->fb = NULL;
+ }
+ } else {
+ /* fbdev emulation is optional */
+ dev_info(&pdev->dev, "fbdev is not initialized\n");
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto err_fb;
+
+ return 0;
+
+err_fb:
+ if (xlnx_drm->fb)
+ xlnx_fb_fini(xlnx_drm->fb);
+ component_unbind_all(drm->dev, drm);
+err_crtc:
+ xlnx_crtc_helper_fini(drm, xlnx_drm->crtc);
+err_xlnx_drm:
+ drm_mode_config_cleanup(drm);
+err_drm:
+ drm_dev_put(drm);
+ return ret;
+}
+
+static void xlnx_unbind(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm = dev_get_drvdata(dev);
+ struct drm_device *drm = xlnx_drm->drm;
+
+ drm_dev_unregister(drm);
+ if (xlnx_drm->fb)
+ xlnx_fb_fini(xlnx_drm->fb);
+ component_unbind_all(&xlnx_drm->master->dev, drm);
+ xlnx_crtc_helper_fini(drm, xlnx_drm->crtc);
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_dev_put(drm);
+}
+
+static const struct component_master_ops xlnx_master_ops = {
+ .bind = xlnx_bind,
+ .unbind = xlnx_unbind,
+};
+
+static int xlnx_of_component_probe(struct device *master_dev,
+ int (*compare_of)(struct device *, void *),
+ const struct component_master_ops *m_ops)
+{
+ struct device *dev = master_dev->parent;
+ struct device_node *ep, *port, *remote, *parent;
+ struct component_match *match = NULL;
+ int i;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ component_match_add(master_dev, &match, compare_of, dev->of_node);
+
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(dev->of_node, "ports", i);
+ if (!port)
+ break;
+
+ parent = port->parent;
+ if (!of_node_cmp(parent->name, "ports"))
+ parent = parent->parent;
+ parent = of_node_get(parent);
+
+ if (!of_device_is_available(parent)) {
+ of_node_put(parent);
+ of_node_put(port);
+ continue;
+ }
+
+ component_match_add(master_dev, &match, compare_of, parent);
+ of_node_put(parent);
+ of_node_put(port);
+ }
+
+ parent = dev->of_node;
+ for (i = 0; ; i++) {
+ parent = of_node_get(parent);
+ if (!of_device_is_available(parent)) {
+ of_node_put(parent);
+ continue;
+ }
+
+ for_each_endpoint_of_node(parent, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote) ||
+ remote == dev->of_node) {
+ of_node_put(remote);
+ continue;
+ } else if (!of_device_is_available(remote->parent)) {
+ dev_warn(dev, "parent dev of %s unavailable\n",
+ remote->full_name);
+ of_node_put(remote);
+ continue;
+ }
+ component_match_add(master_dev, &match, compare_of,
+ remote);
+ of_node_put(remote);
+ }
+ of_node_put(parent);
+
+ port = of_parse_phandle(dev->of_node, "ports", i);
+ if (!port)
+ break;
+
+ parent = port->parent;
+ if (!of_node_cmp(parent->name, "ports"))
+ parent = parent->parent;
+ of_node_put(port);
+ }
+
+ return component_master_add_with_match(master_dev, m_ops, match);
+}
+
+static int xlnx_compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int xlnx_platform_probe(struct platform_device *pdev)
+{
+ return xlnx_of_component_probe(&pdev->dev, xlnx_compare_of,
+ &xlnx_master_ops);
+}
+
+static int xlnx_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &xlnx_master_ops);
+ return 0;
+}
+
+static void xlnx_platform_shutdown(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &xlnx_master_ops);
+}
+
+static int __maybe_unused xlnx_pm_suspend(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm = dev_get_drvdata(dev);
+ struct drm_device *drm = xlnx_drm->drm;
+
+ drm_kms_helper_poll_disable(drm);
+
+ xlnx_drm->suspend_state = drm_atomic_helper_suspend(drm);
+ if (IS_ERR(xlnx_drm->suspend_state)) {
+ drm_kms_helper_poll_enable(drm);
+ return PTR_ERR(xlnx_drm->suspend_state);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused xlnx_pm_resume(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm = dev_get_drvdata(dev);
+ struct drm_device *drm = xlnx_drm->drm;
+
+ drm_atomic_helper_resume(drm, xlnx_drm->suspend_state);
+ drm_kms_helper_poll_enable(drm);
+
+ return 0;
+}
+
+static const struct dev_pm_ops xlnx_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xlnx_pm_suspend, xlnx_pm_resume)
+};
+
+static struct platform_driver xlnx_driver = {
+ .probe = xlnx_platform_probe,
+ .remove = xlnx_platform_remove,
+ .shutdown = xlnx_platform_shutdown,
+ .driver = {
+ .name = "xlnx-drm",
+ .pm = &xlnx_pm_ops,
+ },
+};
+
+/* bitmap for master id */
+static u32 xlnx_master_ids = GENMASK(31, 0);
+
+/**
+ * xlnx_drm_pipeline_init - Initialize the drm pipeline for the device
+ * @pdev: The platform device to initialize the drm pipeline device
+ *
+ * This function initializes the drm pipeline device, struct drm_device,
+ * on @pdev by creating a logical master platform device. The logical platform
+ * device acts as a master device to bind slave devices and represents
+ * the entire pipeline.
+ * The logical master uses the port bindings of the calling device to
+ * figure out the pipeline topology.
+ *
+ * Return: the logical master platform device if the drm device is initialized
+ * on @pdev. Error code otherwise.
+ */
+struct platform_device *xlnx_drm_pipeline_init(struct platform_device *pdev)
+{
+ struct platform_device *master;
+ int id, ret;
+
+ id = ffs(xlnx_master_ids);
+ if (!id)
+ return ERR_PTR(-ENOSPC);
+
+ master = platform_device_alloc("xlnx-drm", id - 1);
+ if (!master)
+ return ERR_PTR(-ENOMEM);
+
+ master->dev.parent = &pdev->dev;
+ ret = platform_device_add(master);
+ if (ret)
+ goto err_out;
+
+ WARN_ON(master->id != id - 1);
+ xlnx_master_ids &= ~BIT(master->id);
+ return master;
+
+err_out:
+ platform_device_unregister(master);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(xlnx_drm_pipeline_init);
+
+/**
+ * xlnx_drm_pipeline_exit - Release the drm pipeline for the device
+ * @master: The master pipeline device to release
+ *
+ * Release the logical pipeline device returned by xlnx_drm_pipeline_init().
+ */
+void xlnx_drm_pipeline_exit(struct platform_device *master)
+{
+ xlnx_master_ids |= BIT(master->id);
+ platform_device_unregister(master);
+}
+EXPORT_SYMBOL_GPL(xlnx_drm_pipeline_exit);
+
+static int __init xlnx_drm_drv_init(void)
+{
+ xlnx_bridge_helper_init();
+ platform_driver_register(&xlnx_driver);
+ return 0;
+}
+
+static void __exit xlnx_drm_drv_exit(void)
+{
+ platform_driver_unregister(&xlnx_driver);
+ xlnx_bridge_helper_fini();
+}
+
+module_init(xlnx_drm_drv_init);
+module_exit(xlnx_drm_drv_exit);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DRM KMS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_drv.h b/drivers/gpu/drm/xlnx/xlnx_drv.h
new file mode 100644
index 000000000000..0f6595f1bd85
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_drv.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Header for Xilinx
+ *
+ * Copyright (C) 2013 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_DRV_H_
+#define _XLNX_DRV_H_
+
+struct drm_device;
+struct xlnx_crtc_helper;
+
+struct platform_device *xlnx_drm_pipeline_init(struct platform_device *parent);
+void xlnx_drm_pipeline_exit(struct platform_device *pipeline);
+
+uint32_t xlnx_get_format(struct drm_device *drm);
+unsigned int xlnx_get_align(struct drm_device *drm);
+struct xlnx_crtc_helper *xlnx_get_crtc_helper(struct drm_device *drm);
+struct xlnx_bridge_helper *xlnx_get_bridge_helper(struct drm_device *drm);
+
+#endif /* _XLNX_DRV_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_dsi.c b/drivers/gpu/drm/xlnx/xlnx_dsi.c
new file mode 100644
index 000000000000..eae5b92b46c2
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_dsi.c
@@ -0,0 +1,907 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA MIPI DSI Tx Controller driver.
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author : Saurabh Sengar <saurabhs@xilinx.com>
+ * : Siva Rajesh J <siva.rajesh.jarugula@xilinx.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+#include "xlnx_bridge.h"
+
+/* DSI Tx IP registers */
+#define XDSI_CCR 0x00
+#define XDSI_CCR_COREENB BIT(0)
+#define XDSI_CCR_CRREADY BIT(2)
+#define XDSI_PCR 0x04
+#define XDSI_PCR_VIDEOMODE(x) (((x) & 0x3) << 3)
+#define XDSI_PCR_VIDEOMODE_MASK (0x3 << 3)
+#define XDSI_PCR_VIDEOMODE_SHIFT 3
+#define XDSI_PCR_BLLPTYPE(x) ((x) << 5)
+#define XDSI_PCR_BLLPMODE(x) ((x) << 6)
+#define XDSI_PCR_EOTPENABLE(x) ((x) << 13)
+#define XDSI_GIER 0x20
+#define XDSI_ISR 0x24
+#define XDSI_IER 0x28
+#define XDSI_CMD 0x30
+#define XDSI_CMD_QUEUE_PACKET(x) ((x) & GENMASK(23, 0))
+#define XDSI_TIME1 0x50
+#define XDSI_TIME1_BLLP_BURST(x) ((x) & GENMASK(15, 0))
+#define XDSI_TIME1_HSA(x) (((x) & GENMASK(15, 0)) << 16)
+#define XDSI_TIME2 0x54
+#define XDSI_TIME2_VACT(x) ((x) & GENMASK(15, 0))
+#define XDSI_TIME2_HACT(x) (((x) & GENMASK(15, 0)) << 16)
+#define XDSI_HACT_MULTIPLIER GENMASK(1, 0)
+#define XDSI_TIME3 0x58
+#define XDSI_TIME3_HFP(x) ((x) & GENMASK(15, 0))
+#define XDSI_TIME3_HBP(x) (((x) & GENMASK(15, 0)) << 16)
+#define XDSI_TIME4 0x5c
+#define XDSI_TIME4_VFP(x) ((x) & GENMASK(7, 0))
+#define XDSI_TIME4_VBP(x) (((x) & GENMASK(7, 0)) << 8)
+#define XDSI_TIME4_VSA(x) (((x) & GENMASK(7, 0)) << 16)
+#define XDSI_LTIME 0x60
+#define XDSI_BLLP_TIME 0x64
+/*
+ * XDSI_NUM_DATA_T represents number of data types in the
+ * enum mipi_dsi_pixel_format in the MIPI DSI part of DRM framework.
+ */
+#define XDSI_NUM_DATA_T 4
+#define XDSI_VIDEO_MODE_SYNC_PULSE 0x0
+#define XDSI_VIDEO_MODE_SYNC_EVENT 0x1
+#define XDSI_VIDEO_MODE_BURST 0x2
+
+#define XDSI_DPHY_CLK_MIN 197000000000UL
+#define XDSI_DPHY_CLK_MAX 203000000000UL
+#define XDSI_DPHY_CLK_REQ 200000000000UL
+
+/**
+ * struct xlnx_dsi - Core configuration DSI Tx subsystem device structure
+ * @encoder: DRM encoder structure
+ * @dsi_host: DSI host device
+ * @connector: DRM connector structure
+ * @panel_node: MIPI DSI device panel node
+ * @panel: DRM panel structure
+ * @dev: device structure
+ * @iomem: Base address of DSI subsystem
+ * @lanes: number of active data lanes supported by DSI controller
+ * @mode_flags: DSI operation mode related flags
+ * @format: pixel format for video mode of DSI controller
+ * @vm: videomode data structure
+ * @mul_factor: multiplication factor for HACT timing parameter
+ * @eotp_prop: configurable EoTP DSI parameter
+ * @bllp_mode_prop: configurable BLLP mode DSI parameter
+ * @bllp_type_prop: configurable BLLP type DSI parameter
+ * @video_mode_prop: configurable Video mode DSI parameter
+ * @bllp_burst_time_prop: Configurable BLLP time for burst mode
+ * @cmd_queue_prop: configurable command queue
+ * @eotp_prop_val: configurable EoTP DSI parameter value
+ * @bllp_mode_prop_val: configurable BLLP mode DSI parameter value
+ * @bllp_type_prop_val: configurable BLLP type DSI parameter value
+ * @video_mode_prop_val: configurable Video mode DSI parameter value
+ * @bllp_burst_time_prop_val: Configurable BLLP time for burst mode value
+ * @cmd_queue_prop_val: configurable command queue value
+ * @bridge: bridge structure
+ * @height_out: configurable bridge output height parameter
+ * @height_out_prop_val: configurable bridge output height parameter value
+ * @width_out: configurable bridge output width parameter
+ * @width_out_prop_val: configurable bridge output width parameter value
+ * @in_fmt: configurable bridge input media format
+ * @in_fmt_prop_val: configurable media bus format value
+ * @out_fmt: configurable bridge output media format
+ * @out_fmt_prop_val: configurable media bus format value
+ * @video_aclk: Video clock
+ * @dphy_clk_200M: 200MHz DPHY clock and AXI Lite clock
+ */
+struct xlnx_dsi {
+ struct drm_encoder encoder;
+ struct mipi_dsi_host dsi_host;
+ struct drm_connector connector;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct device *dev;
+ void __iomem *iomem;
+ u32 lanes;
+ u32 mode_flags;
+ enum mipi_dsi_pixel_format format;
+ struct videomode vm;
+ u32 mul_factor;
+ struct drm_property *eotp_prop;
+ struct drm_property *bllp_mode_prop;
+ struct drm_property *bllp_type_prop;
+ struct drm_property *video_mode_prop;
+ struct drm_property *bllp_burst_time_prop;
+ struct drm_property *cmd_queue_prop;
+ bool eotp_prop_val;
+ bool bllp_mode_prop_val;
+ bool bllp_type_prop_val;
+ u32 video_mode_prop_val;
+ u32 bllp_burst_time_prop_val;
+ u32 cmd_queue_prop_val;
+ struct xlnx_bridge *bridge;
+ struct drm_property *height_out;
+ u32 height_out_prop_val;
+ struct drm_property *width_out;
+ u32 width_out_prop_val;
+ struct drm_property *in_fmt;
+ u32 in_fmt_prop_val;
+ struct drm_property *out_fmt;
+ u32 out_fmt_prop_val;
+ struct clk *video_aclk;
+ struct clk *dphy_clk_200M;
+};
+
+#define host_to_dsi(host) container_of(host, struct xlnx_dsi, dsi_host)
+#define connector_to_dsi(c) container_of(c, struct xlnx_dsi, connector)
+#define encoder_to_dsi(e) container_of(e, struct xlnx_dsi, encoder)
+
+static inline void xlnx_dsi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xlnx_dsi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xlnx_dsi_set_config_parameters - Configure DSI Tx registers with parameters
+ * given from user application.
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI structure having drm_property parameters
+ * configured from user application and writes them into DSI IP registers.
+ */
+static void xlnx_dsi_set_config_parameters(struct xlnx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = XDSI_PCR_EOTPENABLE(dsi->eotp_prop_val);
+ reg |= XDSI_PCR_VIDEOMODE(dsi->video_mode_prop_val);
+ reg |= XDSI_PCR_BLLPTYPE(dsi->bllp_type_prop_val);
+ reg |= XDSI_PCR_BLLPMODE(dsi->bllp_mode_prop_val);
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_PCR, reg);
+ /*
+ * Configure the burst time if video mode is burst.
+ * HSA of TIME1 register is ignored in this mode.
+ */
+ if (dsi->video_mode_prop_val == XDSI_VIDEO_MODE_BURST) {
+ reg = XDSI_TIME1_BLLP_BURST(dsi->bllp_burst_time_prop_val);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_CMD_QUEUE_PACKET(dsi->cmd_queue_prop_val);
+ xlnx_dsi_writel(dsi->iomem, XDSI_CMD, reg);
+
+ dev_dbg(dsi->dev, "PCR register value is = %x\n",
+ xlnx_dsi_readl(dsi->iomem, XDSI_PCR));
+}
+
+/**
+ * xlnx_dsi_set_display_mode - Configure DSI timing registers
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function writes the timing parameters of DSI IP which are
+ * retrieved from panel timing values.
+ */
+static void xlnx_dsi_set_display_mode(struct xlnx_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+ u32 reg, video_mode;
+
+ reg = xlnx_dsi_readl(dsi->iomem, XDSI_PCR);
+ video_mode = (reg & XDSI_PCR_VIDEOMODE_MASK) >>
+ XDSI_PCR_VIDEOMODE_SHIFT;
+
+ /* configure the HSA value only if non_burst_sync_pluse video mode */
+ if (!video_mode &&
+ (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)) {
+ reg = XDSI_TIME1_HSA(vm->hsync_len);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_TIME4_VFP(vm->vfront_porch) |
+ XDSI_TIME4_VBP(vm->vback_porch) |
+ XDSI_TIME4_VSA(vm->vsync_len);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME4, reg);
+
+ reg = XDSI_TIME3_HFP(vm->hfront_porch) |
+ XDSI_TIME3_HBP(vm->hback_porch);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME3, reg);
+
+ dev_dbg(dsi->dev, "mul factor for parsed datatype is = %d\n",
+ (dsi->mul_factor) / 100);
+ /*
+ * The HACT parameter received from panel timing values should be
+ * divisible by 4. The reason for this is, the word count given as
+ * input to DSI controller is HACT * mul_factor. The mul_factor is
+ * 3, 2.25, 2.25, 2 respectively for RGB888, RGB666_L, RGB666_P and
+ * RGB565.
+ * e.g. for RGB666_L color format and 1080p, the word count is
+ * 1920*2.25 = 4320 which is divisible by 4 and it is a valid input
+ * to DSI controller. Based on this 2.25 mul factor, we come up with
+ * the division factor of (XDSI_HACT_MULTIPLIER) as 4 for checking
+ */
+ if ((vm->hactive & XDSI_HACT_MULTIPLIER) != 0)
+ dev_warn(dsi->dev, "Incorrect HACT will be programmed\n");
+
+ reg = XDSI_TIME2_HACT((vm->hactive) * (dsi->mul_factor) / 100) |
+ XDSI_TIME2_VACT(vm->vactive);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME2, reg);
+
+ dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
+}
+
+/**
+ * xlnx_dsi_set_display_enable - Enables the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_dsi_set_display_enable(struct xlnx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xlnx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg |= XDSI_CCR_COREENB;
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "MIPI DSI Tx controller is enabled.\n");
+}
+
+/**
+ * xlnx_dsi_set_display_disable - Disable the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_dsi_set_display_disable(struct xlnx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xlnx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg &= ~XDSI_CCR_COREENB;
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "DSI Tx is disabled. reset regs to default values\n");
+}
+
+/**
+ * xlnx_dsi_atomic_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @connector: pointer Xilinx DSI connector
+ * @state: DRM connector state
+ * @prop: pointer to the drm_property structure
+ * @val: DSI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the DSI structure property varabiles with the values.
+ * These values are later used to configure the DSI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int xlnx_dsi_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *prop, u64 val)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ dev_dbg(dsi->dev, "property name = %s, value = %lld\n",
+ prop->name, val);
+
+ if (prop == dsi->eotp_prop)
+ dsi->eotp_prop_val = !!val;
+ else if (prop == dsi->bllp_mode_prop)
+ dsi->bllp_mode_prop_val = !!val;
+ else if (prop == dsi->bllp_type_prop)
+ dsi->bllp_type_prop_val = !!val;
+ else if (prop == dsi->video_mode_prop)
+ dsi->video_mode_prop_val = (unsigned int)val;
+ else if (prop == dsi->bllp_burst_time_prop)
+ dsi->bllp_burst_time_prop_val = (unsigned int)val;
+ else if (prop == dsi->cmd_queue_prop)
+ dsi->cmd_queue_prop_val = (unsigned int)val;
+ else if (prop == dsi->height_out)
+ dsi->height_out_prop_val = (u32)val;
+ else if (prop == dsi->width_out)
+ dsi->width_out_prop_val = (u32)val;
+ else if (prop == dsi->in_fmt)
+ dsi->in_fmt_prop_val = (u32)val;
+ else if (prop == dsi->out_fmt)
+ dsi->out_fmt_prop_val = (u32)val;
+ else
+ return -EINVAL;
+
+ xlnx_dsi_set_config_parameters(dsi);
+
+ return 0;
+}
+
+static int
+xlnx_dsi_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *prop, uint64_t *val)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ if (prop == dsi->eotp_prop)
+ *val = dsi->eotp_prop_val;
+ else if (prop == dsi->bllp_mode_prop)
+ *val = dsi->bllp_mode_prop_val;
+ else if (prop == dsi->bllp_type_prop)
+ *val = dsi->bllp_type_prop_val;
+ else if (prop == dsi->video_mode_prop)
+ *val = dsi->video_mode_prop_val;
+ else if (prop == dsi->bllp_burst_time_prop)
+ *val = dsi->bllp_burst_time_prop_val;
+ else if (prop == dsi->cmd_queue_prop)
+ *val = dsi->cmd_queue_prop_val;
+ else if (prop == dsi->height_out)
+ *val = dsi->height_out_prop_val;
+ else if (prop == dsi->width_out)
+ *val = dsi->width_out_prop_val;
+ else if (prop == dsi->in_fmt)
+ *val = dsi->in_fmt_prop_val;
+ else if (prop == dsi->out_fmt)
+ *val = dsi->out_fmt_prop_val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int xlnx_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ u32 panel_lanes;
+ struct xlnx_dsi *dsi = host_to_dsi(host);
+
+ panel_lanes = device->lanes;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel_node = device->dev.of_node;
+
+ if (panel_lanes != dsi->lanes) {
+ dev_err(dsi->dev, "Mismatch of lanes. panel = %d, DSI = %d\n",
+ panel_lanes, dsi->lanes);
+ return -EINVAL;
+ }
+
+ if (dsi->lanes > 4 || dsi->lanes < 1) {
+ dev_err(dsi->dev, "%d lanes : invalid xlnx,dsi-num-lanes\n",
+ dsi->lanes);
+ return -EINVAL;
+ }
+
+ if (device->format != dsi->format) {
+ dev_err(dsi->dev, "Mismatch of format. panel = %d, DSI = %d\n",
+ device->format, dsi->format);
+ return -EINVAL;
+ }
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static int xlnx_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct xlnx_dsi *dsi = host_to_dsi(host);
+
+ dsi->panel = NULL;
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops xlnx_dsi_ops = {
+ .attach = xlnx_dsi_host_attach,
+ .detach = xlnx_dsi_host_detach,
+};
+
+static int xlnx_dsi_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+ int ret;
+
+ dev_dbg(dsi->dev, "connector dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ ret = drm_panel_prepare(dsi->panel);
+ if (ret < 0) {
+ dev_err(dsi->dev, "DRM panel not found\n");
+ return ret;
+ }
+
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ drm_panel_unprepare(dsi->panel);
+ dev_err(dsi->dev, "DRM panel not enabled\n");
+ return ret;
+ }
+ break;
+ default:
+ drm_panel_disable(dsi->panel);
+ drm_panel_unprepare(dsi->panel);
+ break;
+ }
+
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static enum drm_connector_status
+xlnx_dsi_detect(struct drm_connector *connector, bool force)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ if (!dsi->panel) {
+ dsi->panel = of_drm_find_panel(dsi->panel_node);
+ if (dsi->panel)
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ } else if (!dsi->panel_node) {
+ xlnx_dsi_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ }
+
+ if (dsi->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void xlnx_dsi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xlnx_dsi_connector_funcs = {
+ .dpms = xlnx_dsi_connector_dpms,
+ .detect = xlnx_dsi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xlnx_dsi_connector_destroy,
+ .atomic_set_property = xlnx_dsi_atomic_set_property,
+ .atomic_get_property = xlnx_dsi_atomic_get_property,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static int xlnx_dsi_get_modes(struct drm_connector *connector)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ if (dsi->panel)
+ return dsi->panel->funcs->get_modes(dsi->panel);
+
+ return 0;
+}
+
+static struct drm_encoder *
+xlnx_dsi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_dsi(connector)->encoder);
+}
+
+static struct drm_connector_helper_funcs xlnx_dsi_connector_helper_funcs = {
+ .get_modes = xlnx_dsi_get_modes,
+ .best_encoder = xlnx_dsi_best_encoder,
+};
+
+/**
+ * xlnx_dsi_connector_create_property - create DSI connector properties
+ *
+ * @connector: pointer to Xilinx DSI connector
+ *
+ * This function takes the xilinx DSI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void xlnx_dsi_connector_create_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ dsi->eotp_prop = drm_property_create_bool(dev, 0, "eotp");
+ dsi->video_mode_prop = drm_property_create_range(dev, 0, "video_mode",
+ 0, 2);
+ dsi->bllp_mode_prop = drm_property_create_bool(dev, 0, "bllp_mode");
+ dsi->bllp_type_prop = drm_property_create_bool(dev, 0, "bllp_type");
+ dsi->bllp_burst_time_prop =
+ drm_property_create_range(dev, 0, "bllp_burst_time", 0, 0xFFFF);
+ dsi->cmd_queue_prop = drm_property_create_range(dev, 0, "cmd_queue", 0,
+ 0xffffff);
+ dsi->height_out = drm_property_create_range(dev, 0, "height_out",
+ 2, 4096);
+ dsi->width_out = drm_property_create_range(dev, 0, "width_out",
+ 2, 4096);
+ dsi->in_fmt = drm_property_create_range(dev, 0, "in_fmt", 0, 16384);
+ dsi->out_fmt = drm_property_create_range(dev, 0, "out_fmt", 0, 16384);
+}
+
+/**
+ * xlnx_dsi_connector_attach_property - attach DSI connector
+ * properties
+ *
+ * @connector: pointer to Xilinx DSI connector
+ */
+static void xlnx_dsi_connector_attach_property(struct drm_connector *connector)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+ struct drm_mode_object *obj = &connector->base;
+
+ if (dsi->eotp_prop)
+ drm_object_attach_property(obj, dsi->eotp_prop, 1);
+
+ if (dsi->video_mode_prop)
+ drm_object_attach_property(obj, dsi->video_mode_prop, 0);
+
+ if (dsi->bllp_burst_time_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->bllp_burst_time_prop, 0);
+
+ if (dsi->bllp_mode_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->bllp_mode_prop, 0);
+
+ if (dsi->bllp_type_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->bllp_type_prop, 0);
+
+ if (dsi->cmd_queue_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->cmd_queue_prop, 0);
+
+ if (dsi->height_out)
+ drm_object_attach_property(obj, dsi->height_out, 0);
+
+ if (dsi->width_out)
+ drm_object_attach_property(obj, dsi->width_out, 0);
+
+ if (dsi->in_fmt)
+ drm_object_attach_property(obj, dsi->in_fmt, 0);
+
+ if (dsi->out_fmt)
+ drm_object_attach_property(obj, dsi->out_fmt, 0);
+}
+
+static int xlnx_dsi_create_connector(struct drm_encoder *encoder)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+ struct drm_connector *connector = &dsi->connector;
+ int ret;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xlnx_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (ret) {
+ dev_err(dsi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xlnx_dsi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xlnx_dsi_connector_create_property(connector);
+ xlnx_dsi_connector_attach_property(connector);
+
+ return 0;
+}
+
+/**
+ * xlnx_dsi_atomic_mode_set - derive the DSI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @crtc_state: Pointer to drm core crtc state
+ * @connector_state: DSI connector drm state
+ *
+ * This function derives the DSI IP timing parameters from the timing
+ * values given in the attached panel driver.
+ */
+static void
+xlnx_dsi_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+ struct videomode *vm = &dsi->vm;
+ struct drm_display_mode *m = &crtc_state->adjusted_mode;
+
+ /* Set bridge input and output parameters */
+ xlnx_bridge_set_input(dsi->bridge, m->hdisplay, m->vdisplay,
+ dsi->in_fmt_prop_val);
+ xlnx_bridge_set_output(dsi->bridge, dsi->width_out_prop_val,
+ dsi->height_out_prop_val,
+ dsi->out_fmt_prop_val);
+ xlnx_bridge_enable(dsi->bridge);
+
+ vm->hactive = m->hdisplay;
+ vm->vactive = m->vdisplay;
+ vm->vfront_porch = m->vsync_start - m->vdisplay;
+ vm->vback_porch = m->vtotal - m->vsync_end;
+ vm->vsync_len = m->vsync_end - m->vsync_start;
+ vm->hfront_porch = m->hsync_start - m->hdisplay;
+ vm->hback_porch = m->htotal - m->hsync_end;
+ vm->hsync_len = m->hsync_end - m->hsync_start;
+ xlnx_dsi_set_display_mode(dsi);
+}
+
+static void xlnx_dsi_disable(struct drm_encoder *encoder)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+
+ if (dsi->bridge)
+ xlnx_bridge_disable(dsi->bridge);
+
+ xlnx_dsi_set_display_disable(dsi);
+}
+
+static void xlnx_dsi_enable(struct drm_encoder *encoder)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+
+ xlnx_dsi_set_display_enable(dsi);
+}
+
+static const struct drm_encoder_helper_funcs xlnx_dsi_encoder_helper_funcs = {
+ .atomic_mode_set = xlnx_dsi_atomic_mode_set,
+ .enable = xlnx_dsi_enable,
+ .disable = xlnx_dsi_disable,
+};
+
+static const struct drm_encoder_funcs xlnx_dsi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xlnx_dsi_parse_dt(struct xlnx_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+ u32 datatype;
+ static const int xdsi_mul_fact[XDSI_NUM_DATA_T] = {300, 225, 225, 200};
+
+ dsi->dphy_clk_200M = devm_clk_get(dev, "dphy_clk_200M");
+ if (IS_ERR(dsi->dphy_clk_200M)) {
+ ret = PTR_ERR(dsi->dphy_clk_200M);
+ dev_err(dev, "failed to get dphy_clk_200M %d\n", ret);
+ return ret;
+ }
+
+ dsi->video_aclk = devm_clk_get(dev, "s_axis_aclk");
+ if (IS_ERR(dsi->video_aclk)) {
+ ret = PTR_ERR(dsi->video_aclk);
+ dev_err(dev, "failed to get video_clk %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Used as a multiplication factor for HACT based on used
+ * DSI data type.
+ *
+ * e.g. for RGB666_L datatype and 1920x1080 resolution,
+ * the Hact (WC) would be as follows -
+ * 1920 pixels * 18 bits per pixel / 8 bits per byte
+ * = 1920 pixels * 2.25 bytes per pixel = 4320 bytes.
+ *
+ * Data Type - Multiplication factor
+ * RGB888 - 3
+ * RGB666_L - 2.25
+- * RGB666_P - 2.25
+ * RGB565 - 2
+ *
+ * Since the multiplication factor maybe a floating number,
+ * a 100x multiplication factor is used.
+ */
+ ret = of_property_read_u32(node, "xlnx,dsi-num-lanes", &dsi->lanes);
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-num-lanes property\n");
+ return ret;
+ }
+ if (dsi->lanes > 4 || dsi->lanes < 1) {
+ dev_err(dsi->dev, "%d lanes : invalid lanes\n", dsi->lanes);
+ return -EINVAL;
+ }
+ ret = of_property_read_u32(node, "xlnx,dsi-data-type", &datatype);
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-data-type property\n");
+ return ret;
+ }
+ dsi->format = datatype;
+ if (datatype > MIPI_DSI_FMT_RGB565) {
+ dev_err(dsi->dev, "Invalid xlnx,dsi-data-type string\n");
+ return -EINVAL;
+ }
+ dsi->mul_factor = xdsi_mul_fact[datatype];
+ dev_dbg(dsi->dev, "DSI controller num lanes = %d", dsi->lanes);
+ dev_dbg(dsi->dev, "DSI controller datatype = %d\n", datatype);
+
+ return 0;
+}
+
+static int xlnx_dsi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_dsi *dsi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &dsi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * DSI tx drivers. DRM framework can support more than one CRTCs and
+ * DSI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+ drm_encoder_init(drm_dev, encoder, &xlnx_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ drm_encoder_helper_add(encoder, &xlnx_dsi_encoder_helper_funcs);
+ ret = xlnx_dsi_create_connector(encoder);
+ if (ret) {
+ dev_err(dsi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+ if (ret) {
+ xlnx_dsi_connector_destroy(&dsi->connector);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+ return 0;
+}
+
+static void xlnx_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_dsi *dsi = dev_get_drvdata(dev);
+
+ xlnx_dsi_disable(&dsi->encoder);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ xlnx_bridge_disable(dsi->bridge);
+}
+
+static const struct component_ops xlnx_dsi_component_ops = {
+ .bind = xlnx_dsi_bind,
+ .unbind = xlnx_dsi_unbind,
+};
+
+static int xlnx_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xlnx_dsi *dsi;
+ struct device_node *vpss_node;
+ int ret;
+ unsigned long rate;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->dsi_host.ops = &xlnx_dsi_ops;
+ dsi->dsi_host.dev = dev;
+ dsi->dev = dev;
+
+ ret = xlnx_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->iomem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dsi->iomem))
+ return PTR_ERR(dsi->iomem);
+
+ platform_set_drvdata(pdev, dsi);
+
+ /* Bridge support */
+ vpss_node = of_parse_phandle(dsi->dev->of_node, "xlnx,vpss", 0);
+ if (vpss_node) {
+ dsi->bridge = of_xlnx_bridge_get(vpss_node);
+ if (!dsi->bridge) {
+ dev_info(dsi->dev, "Didn't get bridge instance\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
+ ret = clk_set_rate(dsi->dphy_clk_200M, XDSI_DPHY_CLK_REQ);
+ if (ret) {
+ dev_err(dev, "failed to set dphy clk rate %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dsi->dphy_clk_200M);
+ if (ret) {
+ dev_err(dev, "failed to enable dphy clk %d\n", ret);
+ return ret;
+ }
+
+ rate = clk_get_rate(dsi->dphy_clk_200M);
+ if (rate < XDSI_DPHY_CLK_MIN && rate > XDSI_DPHY_CLK_MAX) {
+ dev_err(dev, "Error DPHY clock = %lu\n", rate);
+ ret = -EINVAL;
+ goto err_disable_dphy_clk;
+ }
+
+ ret = clk_prepare_enable(dsi->video_aclk);
+ if (ret) {
+ dev_err(dev, "failed to enable video clk %d\n", ret);
+ goto err_disable_dphy_clk;
+ }
+
+ ret = component_add(dev, &xlnx_dsi_component_ops);
+ if (ret < 0)
+ goto err_disable_video_clk;
+
+ return ret;
+
+err_disable_video_clk:
+ clk_disable_unprepare(dsi->video_aclk);
+err_disable_dphy_clk:
+ clk_disable_unprepare(dsi->dphy_clk_200M);
+ return ret;
+}
+
+static int xlnx_dsi_remove(struct platform_device *pdev)
+{
+ struct xlnx_dsi *dsi = platform_get_drvdata(pdev);
+
+ component_del(&pdev->dev, &xlnx_dsi_component_ops);
+ clk_disable_unprepare(dsi->video_aclk);
+ clk_disable_unprepare(dsi->dphy_clk_200M);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_dsi_of_match[] = {
+ { .compatible = "xlnx,dsi"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_dsi_of_match);
+
+static struct platform_driver dsi_driver = {
+ .probe = xlnx_dsi_probe,
+ .remove = xlnx_dsi_remove,
+ .driver = {
+ .name = "xlnx-dsi",
+ .of_match_table = xlnx_dsi_of_match,
+ },
+};
+
+module_platform_driver(dsi_driver);
+
+MODULE_AUTHOR("Siva Rajesh <sivaraj@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA MIPI DSI Tx Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_fb.c b/drivers/gpu/drm/xlnx/xlnx_fb.c
new file mode 100644
index 000000000000..4ef367e7ca4e
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_fb.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Framebuffer helper
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * Based on drm_fb_cma_helper.c
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+#include "xlnx_fb.h"
+
+#define XLNX_MAX_PLANES 4
+
+struct xlnx_fbdev {
+ struct drm_fb_helper fb_helper;
+ struct drm_framebuffer *fb;
+ unsigned int align;
+ unsigned int vres_mult;
+};
+
+static inline struct xlnx_fbdev *to_fbdev(struct drm_fb_helper *fb_helper)
+{
+ return container_of(fb_helper, struct xlnx_fbdev, fb_helper);
+}
+
+static struct drm_framebuffer_funcs xlnx_fb_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
+static int
+xlnx_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ unsigned int i;
+ int ret = 0;
+
+ switch (cmd) {
+ case FBIO_WAITFORVSYNC:
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set;
+ struct drm_crtc *crtc;
+
+ mode_set = &fb_helper->crtc_info[i].mode_set;
+ crtc = mode_set->crtc;
+ ret = drm_crtc_vblank_get(crtc);
+ if (!ret) {
+ drm_crtc_wait_one_vblank(crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+ }
+ return ret;
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct fb_ops xlnx_fbdev_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_ioctl = xlnx_fb_ioctl,
+};
+
+/**
+ * xlnx_fbdev_create - Create the fbdev with a framebuffer
+ * @fb_helper: fb helper structure
+ * @size: framebuffer size info
+ *
+ * This function is based on drm_fbdev_cma_create().
+ *
+ * Return: 0 if successful, or the error code.
+ */
+static int xlnx_fbdev_create(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *size)
+{
+ struct xlnx_fbdev *fbdev = to_fbdev(fb_helper);
+ struct drm_device *drm = fb_helper->dev;
+ struct drm_gem_cma_object *obj;
+ struct drm_framebuffer *fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ u32 format;
+ const struct drm_format_info *info;
+ size_t bytes;
+ int ret;
+
+ dev_dbg(drm->dev, "surface width(%d), height(%d) and bpp(%d)\n",
+ size->surface_width, size->surface_height, size->surface_bpp);
+
+ size->surface_height *= fbdev->vres_mult;
+ bytes_per_pixel = DIV_ROUND_UP(size->surface_bpp, 8);
+ bytes = ALIGN(size->surface_width * bytes_per_pixel, fbdev->align);
+ bytes *= size->surface_height;
+
+ obj = drm_gem_cma_create(drm, bytes);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ fbi = framebuffer_alloc(0, drm->dev);
+ if (!fbi) {
+ dev_err(drm->dev, "Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_drm_gem_cma_free_object;
+ }
+
+ /* Override the depth given by fb helper with current format value */
+ format = xlnx_get_format(drm);
+ info = drm_format_info(format);
+ if (size->surface_bpp == info->cpp[0] * 8)
+ size->surface_depth = info->depth;
+
+ fbdev->fb = drm_gem_fbdev_fb_create(drm, size, fbdev->align, &obj->base,
+ &xlnx_fb_funcs);
+ if (IS_ERR(fbdev->fb)) {
+ dev_err(drm->dev, "Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(fbdev->fb);
+ goto err_framebuffer_release;
+ }
+
+ fb = fbdev->fb;
+ fb_helper->fb = fb;
+ fb_helper->fbdev = fbi;
+ fbi->par = fb_helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &xlnx_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ dev_err(drm->dev, "Failed to allocate color map.\n");
+ goto err_fb_destroy;
+ }
+
+ drm_fb_helper_fill_info(fbi, fb_helper, size);
+ fbi->var.yres = fb->height / fbdev->vres_mult;
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * fb->pitches[0];
+
+ drm->mode_config.fb_base = (resource_size_t)obj->paddr;
+ fbi->screen_base = (char __iomem *)(obj->vaddr + offset);
+ fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+ fbi->screen_size = bytes;
+ fbi->fix.smem_len = bytes;
+
+ return 0;
+
+err_fb_destroy:
+ drm_framebuffer_unregister_private(fb);
+ drm_gem_fb_destroy(fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+ drm_gem_cma_free_object(&obj->base);
+ return ret;
+}
+
+static struct drm_fb_helper_funcs xlnx_fb_helper_funcs = {
+ .fb_probe = xlnx_fbdev_create,
+};
+
+/**
+ * xlnx_fb_init - Allocate and initializes the Xilinx framebuffer
+ * @drm: DRM device
+ * @preferred_bpp: preferred bits per pixel for the device
+ * @max_conn_count: maximum number of connectors
+ * @align: alignment value for pitch
+ * @vres_mult: multiplier for virtual resolution
+ *
+ * This function is based on drm_fbdev_cma_init().
+ *
+ * Return: a newly allocated drm_fb_helper struct or a ERR_PTR.
+ */
+struct drm_fb_helper *
+xlnx_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult)
+{
+ struct xlnx_fbdev *fbdev;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ return ERR_PTR(-ENOMEM);
+
+ fbdev->vres_mult = vres_mult;
+ fbdev->align = align;
+ fb_helper = &fbdev->fb_helper;
+ drm_fb_helper_prepare(drm, fb_helper, &xlnx_fb_helper_funcs);
+
+ ret = drm_fb_helper_init(drm, fb_helper, max_conn_count);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to initialize drm fb helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to add connectors.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to set initial hw configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return fb_helper;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_free:
+ kfree(fbdev);
+ return ERR_PTR(ret);
+}
+
+/**
+ * xlnx_fbdev_defio_fini - Free the defio fb
+ * @fbi: fb_info struct
+ *
+ * This function is based on drm_fbdev_cma_defio_fini().
+ */
+static void xlnx_fbdev_defio_fini(struct fb_info *fbi)
+{
+ if (!fbi->fbdefio)
+ return;
+
+ fb_deferred_io_cleanup(fbi);
+ kfree(fbi->fbdefio);
+ kfree(fbi->fbops);
+}
+
+/**
+ * xlnx_fbdev_fini - Free the Xilinx framebuffer
+ * @fb_helper: drm_fb_helper struct
+ *
+ * This function is based on drm_fbdev_cma_fini().
+ */
+void xlnx_fb_fini(struct drm_fb_helper *fb_helper)
+{
+ struct xlnx_fbdev *fbdev = to_fbdev(fb_helper);
+
+ drm_fb_helper_unregister_fbi(&fbdev->fb_helper);
+ if (fbdev->fb_helper.fbdev)
+ xlnx_fbdev_defio_fini(fbdev->fb_helper.fbdev);
+
+ if (fbdev->fb_helper.fb)
+ drm_framebuffer_remove(fbdev->fb_helper.fb);
+
+ drm_fb_helper_fini(&fbdev->fb_helper);
+ kfree(fbdev);
+}
+
+/**
+ * xlnx_fb_create - (struct drm_mode_config_funcs *)->fb_create callback
+ * @drm: DRM device
+ * @file_priv: drm file private data
+ * @mode_cmd: mode command for fb creation
+ *
+ * This functions creates a drm_framebuffer with xlnx_fb_funcs for given mode
+ * @mode_cmd. This functions is intended to be used for the fb_create callback
+ * function of drm_mode_config_funcs.
+ *
+ * Return: a drm_framebuffer object if successful, or
+ * ERR_PTR from drm_gem_fb_create_with_funcs().
+ */
+struct drm_framebuffer *
+xlnx_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ return drm_gem_fb_create_with_funcs(drm, file_priv, mode_cmd,
+ &xlnx_fb_funcs);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_fb.h b/drivers/gpu/drm/xlnx/xlnx_fb.h
new file mode 100644
index 000000000000..6efc985f2fb3
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_fb.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Framebuffer helper header
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_FB_H_
+#define _XLNX_FB_H_
+
+struct drm_fb_helper;
+
+struct drm_framebuffer *
+xlnx_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_fb_helper *
+xlnx_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult);
+void xlnx_fb_fini(struct drm_fb_helper *fb_helper);
+
+#endif /* _XLNX_FB_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_gem.c b/drivers/gpu/drm/xlnx/xlnx_gem.c
new file mode 100644
index 000000000000..4a5d533ec72e
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_gem.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS GEM helper
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "xlnx_drv.h"
+#include "xlnx_gem.h"
+
+/*
+ * xlnx_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * @file_priv: drm_file object
+ * @drm: DRM object
+ * @args: info for dumb scanout buffer creation
+ *
+ * This function is for dumb_create callback of drm_driver struct. Simply
+ * it wraps around drm_gem_cma_dumb_create() and sets the pitch value
+ * by retrieving the value from the device.
+ *
+ * Return: The return value from drm_gem_cma_dumb_create()
+ */
+int xlnx_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ unsigned int align = xlnx_get_align(drm);
+
+ if (!args->pitch || !IS_ALIGNED(args->pitch, align))
+ args->pitch = ALIGN(pitch, align);
+
+ return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_gem.h b/drivers/gpu/drm/xlnx/xlnx_gem.h
new file mode 100644
index 000000000000..f380de916379
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_gem.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS GEM helper header
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_GEM_H_
+#define _XLNX_GEM_H_
+
+int xlnx_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+#endif /* _XLNX_GEM_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_mixer.c b/drivers/gpu/drm/xlnx/xlnx_mixer.c
new file mode 100644
index 000000000000..2daa4fda078f
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_mixer.c
@@ -0,0 +1,2821 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx logicore video mixer driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ * : Jeffrey Mouroux <jmouroux@xilinx.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/dmaengine.h>
+#include <video/videomode.h>
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+
+/**************************** Register Data **********************************/
+#define XVMIX_AP_CTRL 0x00000
+#define XVMIX_GIE 0x00004
+#define XVMIX_IER 0x00008
+#define XVMIX_ISR 0x0000c
+#define XVMIX_WIDTH_DATA 0x00010
+#define XVMIX_HEIGHT_DATA 0x00018
+#define XVMIX_BACKGROUND_Y_R_DATA 0x00028
+#define XVMIX_BACKGROUND_U_G_DATA 0x00030
+#define XVMIX_BACKGROUND_V_B_DATA 0x00038
+#define XVMIX_LAYERENABLE_DATA 0x00040
+#define XVMIX_LAYERALPHA_0_DATA 0x00100
+#define XVMIX_LAYERSTARTX_0_DATA 0x00108
+#define XVMIX_LAYERSTARTY_0_DATA 0x00110
+#define XVMIX_LAYERWIDTH_0_DATA 0x00118
+#define XVMIX_LAYERSTRIDE_0_DATA 0x00120
+#define XVMIX_LAYERHEIGHT_0_DATA 0x00128
+#define XVMIX_LAYERSCALE_0_DATA 0x00130
+#define XVMIX_LAYERVIDEOFORMAT_0_DATA 0x00138
+#define XVMIX_LAYER1_BUF1_V_DATA 0x00240
+#define XVMIX_LAYER1_BUF2_V_DATA 0x0024c
+#define XVMIX_LOGOSTARTX_DATA 0x01000
+#define XVMIX_LOGOSTARTY_DATA 0x01008
+#define XVMIX_LOGOWIDTH_DATA 0x01010
+#define XVMIX_LOGOHEIGHT_DATA 0x01018
+#define XVMIX_LOGOSCALEFACTOR_DATA 0x01020
+#define XVMIX_LOGOALPHA_DATA 0x01028
+#define XVMIX_LOGOCLRKEYMIN_R_DATA 0x01030
+#define XVMIX_LOGOCLRKEYMIN_G_DATA 0x01038
+#define XVMIX_LOGOCLRKEYMIN_B_DATA 0x01040
+#define XVMIX_LOGOCLRKEYMAX_R_DATA 0x01048
+#define XVMIX_LOGOCLRKEYMAX_G_DATA 0x01050
+#define XVMIX_LOGOCLRKEYMAX_B_DATA 0x01058
+#define XVMIX_LOGOR_V_BASE 0x10000
+#define XVMIX_LOGOR_V_HIGH 0x10fff
+#define XVMIX_LOGOG_V_BASE 0x20000
+#define XVMIX_LOGOG_V_HIGH 0x20fff
+#define XVMIX_LOGOB_V_BASE 0x30000
+#define XVMIX_LOGOB_V_HIGH 0x30fff
+#define XVMIX_LOGOA_V_BASE 0x40000
+#define XVMIX_LOGOA_V_HIGH 0x40fff
+
+/************************** Constant Definitions *****************************/
+#define XVMIX_LOGO_OFFSET 0x1000
+#define XVMIX_MASK_DISABLE_ALL_LAYERS 0x0
+#define XVMIX_REG_OFFSET 0x100
+#define XVMIX_MASTER_LAYER_IDX 0x0
+#define XVMIX_LOGO_LAYER_IDX 0x1
+#define XVMIX_DISP_MAX_WIDTH 4096
+#define XVMIX_DISP_MAX_HEIGHT 2160
+#define XVMIX_MAX_OVERLAY_LAYERS 16
+#define XVMIX_MAX_BPC 16
+#define XVMIX_ALPHA_MIN 0
+#define XVMIX_ALPHA_MAX 256
+#define XVMIX_LAYER_WIDTH_MIN 64
+#define XVMIX_LAYER_HEIGHT_MIN 64
+#define XVMIX_LOGO_LAYER_WIDTH_MIN 32
+#define XVMIX_LOGO_LAYER_HEIGHT_MIN 32
+#define XVMIX_LOGO_LAYER_WIDTH_MAX 256
+#define XVMIX_LOGO_LAYER_HEIGHT_MAX 256
+#define XVMIX_IRQ_DONE_MASK BIT(0)
+#define XVMIX_GIE_EN_MASK BIT(0)
+#define XVMIX_AP_EN_MASK BIT(0)
+#define XVMIX_AP_RST_MASK BIT(7)
+#define XVMIX_MAX_NUM_SUB_PLANES 4
+#define XVMIX_SCALE_FACTOR_1X 0
+#define XVMIX_SCALE_FACTOR_2X 1
+#define XVMIX_SCALE_FACTOR_4X 2
+#define XVMIX_SCALE_FACTOR_INVALID 3
+#define XVMIX_BASE_ALIGN 8
+
+/*************************** STATIC DATA ************************************/
+static const u32 color_table[] = {
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_AYUV,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_Y8,
+ DRM_FORMAT_Y10,
+ DRM_FORMAT_XVUY2101010,
+ DRM_FORMAT_VUY888,
+ DRM_FORMAT_XVUY8888,
+ DRM_FORMAT_XV15,
+ DRM_FORMAT_XV20,
+};
+
+/*********************** Inline Functions/Macros *****************************/
+#define to_mixer_hw(p) (&((p)->mixer->mixer_hw))
+#define to_xlnx_crtc(x) container_of(x, struct xlnx_crtc, crtc)
+#define to_xlnx_plane(x) container_of(x, struct xlnx_mix_plane, base)
+#define to_xlnx_mixer(x) container_of(x, struct xlnx_mix, crtc)
+
+/**
+ * enum xlnx_mix_layer_id - Describes the layer by index to be acted upon
+ * @XVMIX_LAYER_MASTER: Master layer
+ * @XVMIX_LAYER_1: Layer 1
+ * @XVMIX_LAYER_2: Layer 2
+ * @XVMIX_LAYER_3: Layer 3
+ * @XVMIX_LAYER_4: Layer 4
+ * @XVMIX_LAYER_5: Layer 5
+ * @XVMIX_LAYER_6: Layer 6
+ * @XVMIX_LAYER_7: Layer 7
+ * @XVMIX_LAYER_8: Layer 8
+ * @XVMIX_LAYER_9: Layer 9
+ * @XVMIX_LAYER_10: Layer 10
+ * @XVMIX_LAYER_11: Layer 11
+ * @XVMIX_LAYER_12: Layer 12
+ * @XVMIX_LAYER_13: Layer 13
+ * @XVMIX_LAYER_14: Layer 14
+ * @XVMIX_LAYER_15: Layer 15
+ * @XVMIX_LAYER_16: Layer 16
+ */
+enum xlnx_mix_layer_id {
+ XVMIX_LAYER_MASTER = 0,
+ XVMIX_LAYER_1,
+ XVMIX_LAYER_2,
+ XVMIX_LAYER_3,
+ XVMIX_LAYER_4,
+ XVMIX_LAYER_5,
+ XVMIX_LAYER_6,
+ XVMIX_LAYER_7,
+ XVMIX_LAYER_8,
+ XVMIX_LAYER_9,
+ XVMIX_LAYER_10,
+ XVMIX_LAYER_11,
+ XVMIX_LAYER_12,
+ XVMIX_LAYER_13,
+ XVMIX_LAYER_14,
+ XVMIX_LAYER_15,
+ XVMIX_LAYER_16
+};
+
+/**
+ * struct xlnx_mix_layer_data - Describes the hardware configuration of a given
+ * mixer layer
+ * @hw_config: struct specifying the IP hardware constraints for this layer
+ * @vid_fmt: DRM format for this layer
+ * @can_alpha: Indicates that layer alpha is enabled for this layer
+ * @can_scale: Indicates that layer scaling is enabled for this layer
+ * @is_streaming: Indicates layer is not using mixer DMA but streaming from
+ * external DMA
+ * @max_width: Max possible pixel width
+ * @max_height: Max possible pixel height
+ * @min_width: Min possible pixel width
+ * @min_height: Min possible pixel height
+ * @layer_regs: struct containing current cached register values
+ * @buff_addr: Current physical address of image buffer
+ * @x_pos: Current CRTC x offset
+ * @y_pos: Current CRTC y offset
+ * @width: Current width in pixels
+ * @height: Current hight in pixels
+ * @stride: Current stride (when Mixer is performing DMA)
+ * @alpha: Current alpha setting
+ * @is_active: Logical flag indicating layer in use. If false, calls to
+ * enable layer will be ignored.
+ * @scale_fact: Current scaling factor applied to layer
+ * @id: The logical layer id identifies which layer this struct describes
+ * (e.g. 0 = master, 1-15 = overlay).
+ *
+ * All mixer layers are reprsented by an instance of this struct:
+ * output streaming, overlay, logo.
+ * Current layer-specific register state is stored in the layer_regs struct.
+ * The hardware configuration is stored in struct hw_config.
+ *
+ * Note:
+ * Some properties of the logo layer are unique and not described in this
+ * struct. Those properites are part of the xlnx_mix struct as global
+ * properties.
+ */
+struct xlnx_mix_layer_data {
+ struct {
+ u32 vid_fmt;
+ bool can_alpha;
+ bool can_scale;
+ bool is_streaming;
+ u32 max_width;
+ u32 max_height;
+ u32 min_width;
+ u32 min_height;
+ } hw_config;
+
+ struct {
+ u64 buff_addr1;
+ u64 buff_addr2;
+ u32 x_pos;
+ u32 y_pos;
+ u32 width;
+ u32 height;
+ u32 stride;
+ u32 alpha;
+ bool is_active;
+ u32 scale_fact;
+ } layer_regs;
+
+ enum xlnx_mix_layer_id id;
+};
+
+/**
+ * struct xlnx_mix_hw - Describes a mixer IP block instance within the design
+ * @base: Base physical address of Mixer IP in memory map
+ * @logo_layer_en: Indicates logo layer is enabled in hardware
+ * @logo_pixel_alpha_enabled: Indicates that per-pixel alpha supported for logo
+ * layer
+ * @max_layer_width: Max possible width for any layer on this Mixer
+ * @max_layer_height: Max possible height for any layer on this Mixer
+ * @max_logo_layer_width: Min possible width for any layer on this Mixer
+ * @max_logo_layer_height: Min possible height for any layer on this Mixer
+ * @num_layers: Max number of layers (excl: logo)
+ * @bg_layer_bpc: Bits per component for the background streaming layer
+ * @dma_addr_size: dma address size in bits
+ * @ppc: Pixels per component
+ * @irq: Interrupt request number assigned
+ * @bg_color: Current RGB color value for internal background color generator
+ * @layer_data: Array of layer data
+ * @layer_cnt: Layer data array count
+ * @max_layers: Maximum number of layers supported by hardware
+ * @logo_layer_id: Index of logo layer
+ * @logo_en_mask: Mask used to enable logo layer
+ * @enable_all_mask: Mask used to enable all layers
+ * @reset_gpio: GPIO line used to reset IP between modesetting operations
+ * @intrpt_handler_fn: Interrupt handler function called when frame is completed
+ * @intrpt_data: Data pointer passed to interrupt handler
+ *
+ * Used as the primary data structure for many L2 driver functions. Logo layer
+ * data, if enabled within the IP, is described in this structure. All other
+ * layers are described by an instance of xlnx_mix_layer_data referenced by this
+ * struct.
+ *
+ */
+struct xlnx_mix_hw {
+ void __iomem *base;
+ bool logo_layer_en;
+ bool logo_pixel_alpha_enabled;
+ u32 max_layer_width;
+ u32 max_layer_height;
+ u32 max_logo_layer_width;
+ u32 max_logo_layer_height;
+ u32 num_layers;
+ u32 bg_layer_bpc;
+ u32 dma_addr_size;
+ u32 ppc;
+ int irq;
+ u64 bg_color;
+ struct xlnx_mix_layer_data *layer_data;
+ u32 layer_cnt;
+ u32 max_layers;
+ u32 logo_layer_id;
+ u32 logo_en_mask;
+ u32 enable_all_mask;
+ struct gpio_desc *reset_gpio;
+ void (*intrpt_handler_fn)(void *);
+ void *intrpt_data;
+};
+
+/**
+ * struct xlnx_mix - Container for interfacing DRM driver to mixer
+ * @mixer_hw: Object representing actual hardware state of mixer
+ * @master: Logical master device from xlnx drm
+ * @crtc: Xilinx DRM driver crtc object
+ * @drm_primary_layer: Hardware layer serving as logical DRM primary layer
+ * @hw_master_layer: Base video streaming layer
+ * @hw_logo_layer: Hardware logo layer
+ * @planes: Mixer overlay layers
+ * @num_planes : number of planes
+ * @max_width : maximum width of plane
+ * @max_height : maximum height of plane
+ * @max_cursor_width : maximum cursor width
+ * @max_cursor_height: maximum cursor height
+ * @alpha_prop: Global layer alpha property
+ * @scale_prop: Layer scale property (1x, 2x or 4x)
+ * @bg_color: Background color property for primary layer
+ * @drm: core drm object
+ * @pixel_clock: pixel clock for mixer
+ * @pixel_clock_enabled: pixel clock status
+ * @dpms: mixer drm state
+ * @event: vblank pending event
+ * @vtc_bridge: vtc_bridge structure
+ *
+ * Contains pointers to logical constructions such as the DRM plane manager as
+ * well as pointers to distinquish the mixer layer serving as the DRM "primary"
+ * plane from the actual mixer layer which serves as the background layer in
+ * hardware.
+ *
+ */
+struct xlnx_mix {
+ struct xlnx_mix_hw mixer_hw;
+ struct platform_device *master;
+ struct xlnx_crtc crtc;
+ struct xlnx_mix_plane *drm_primary_layer;
+ struct xlnx_mix_plane *hw_master_layer;
+ struct xlnx_mix_plane *hw_logo_layer;
+ struct xlnx_mix_plane *planes;
+ u32 num_planes;
+ u32 max_width;
+ u32 max_height;
+ u32 max_cursor_width;
+ u32 max_cursor_height;
+ struct drm_property *alpha_prop;
+ struct drm_property *scale_prop;
+ struct drm_property *bg_color;
+ struct drm_device *drm;
+ struct clk *pixel_clock;
+ bool pixel_clock_enabled;
+ int dpms;
+ struct drm_pending_vblank_event *event;
+ struct xlnx_bridge *vtc_bridge;
+};
+
+/**
+ * struct xlnx_mix_plane_dma - Xilinx drm plane VDMA object
+ *
+ * @chan: dma channel
+ * @xt: dma interleaved configuration template
+ * @sgl: data chunk for dma_interleaved_template
+ * @is_active: flag if the DMA is active
+ */
+struct xlnx_mix_plane_dma {
+ struct dma_chan *chan;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+ bool is_active;
+};
+
+/**
+ * struct xlnx_mix_plane - Xilinx drm plane object
+ *
+ * @base: base drm plane object
+ * @mixer_layer: video mixer hardware layer data instance
+ * @mixer: mixer DRM object
+ * @dma: dma object
+ * @id: plane id
+ * @dpms: current dpms level
+ * @format: pixel format
+ */
+struct xlnx_mix_plane {
+ struct drm_plane base;
+ struct xlnx_mix_layer_data *mixer_layer;
+ struct xlnx_mix *mixer;
+ struct xlnx_mix_plane_dma dma[XVMIX_MAX_NUM_SUB_PLANES];
+ int id;
+ int dpms;
+ u32 format;
+};
+
+static inline void reg_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline void reg_writeq(void __iomem *base, int offset, u64 val)
+{
+ writel(lower_32_bits(val), base + offset);
+ writel(upper_32_bits(val), base + offset + 4);
+}
+
+static inline u32 reg_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xlnx_mix_intrpt_enable_done - Enables interrupts
+ * @mixer: instance of mixer IP core
+ *
+ * Enables interrupts in the mixer core
+ */
+static void xlnx_mix_intrpt_enable_done(struct xlnx_mix_hw *mixer)
+{
+ u32 curr_val = reg_readl(mixer->base, XVMIX_IER);
+
+ /* Enable Interrupts */
+ reg_writel(mixer->base, XVMIX_IER, curr_val | XVMIX_IRQ_DONE_MASK);
+ reg_writel(mixer->base, XVMIX_GIE, XVMIX_GIE_EN_MASK);
+}
+
+/**
+ * xlnx_mix_intrpt_disable - Disable interrupts
+ * @mixer: instance of mixer IP core
+ *
+ * Disables interrupts in the mixer core
+ */
+static void xlnx_mix_intrpt_disable(struct xlnx_mix_hw *mixer)
+{
+ u32 curr_val = reg_readl(mixer->base, XVMIX_IER);
+
+ reg_writel(mixer->base, XVMIX_IER, curr_val & (~XVMIX_IRQ_DONE_MASK));
+ reg_writel(mixer->base, XVMIX_GIE, 0);
+}
+
+/**
+ * xlnx_mix_start - Start the mixer core video generator
+ * @mixer: Mixer core instance for which to start video output
+ *
+ * Starts the core to generate a video frame.
+ */
+static void xlnx_mix_start(struct xlnx_mix_hw *mixer)
+{
+ u32 val;
+
+ val = XVMIX_AP_RST_MASK | XVMIX_AP_EN_MASK;
+ reg_writel(mixer->base, XVMIX_AP_CTRL, val);
+}
+
+/**
+ * xlnx_mix_stop - Stop the mixer core video generator
+ * @mixer: Mixer core instance for which to stop video output
+ *
+ * Starts the core to generate a video frame.
+ */
+static void xlnx_mix_stop(struct xlnx_mix_hw *mixer)
+{
+ reg_writel(mixer->base, XVMIX_AP_CTRL, 0);
+}
+
+static inline uint32_t xlnx_mix_get_intr_status(struct xlnx_mix_hw *mixer)
+{
+ return reg_readl(mixer->base, XVMIX_ISR) & XVMIX_IRQ_DONE_MASK;
+}
+
+static inline void xlnx_mix_clear_intr_status(struct xlnx_mix_hw *mixer,
+ uint32_t intr)
+{
+ reg_writel(mixer->base, XVMIX_ISR, intr);
+}
+
+/**
+ * xlnx_mix_get_layer_data - Retrieve current hardware and register
+ * values for a logical video layer
+ * @mixer: Mixer instance to interrogate
+ * @id: Id of layer for which data is requested
+ *
+ * Return:
+ * Structure containing layer-specific data; NULL upon failure
+ */
+static struct xlnx_mix_layer_data *
+xlnx_mix_get_layer_data(struct xlnx_mix_hw *mixer, enum xlnx_mix_layer_id id)
+{
+ u32 i;
+ struct xlnx_mix_layer_data *layer_data;
+
+ for (i = 0; i <= (mixer->layer_cnt - 1); i++) {
+ layer_data = &mixer->layer_data[i];
+ if (layer_data->id == id)
+ return layer_data;
+ }
+ return NULL;
+}
+
+/**
+ * xlnx_mix_set_active_area - Sets the number of active horizontal and
+ * vertical scan lines for the mixer background layer.
+ * @mixer: Mixer instance for which to set a new viewable area
+ * @hactive: Width of new background image dimension
+ * @vactive: Height of new background image dimension
+ *
+ * Minimum values are 64x64 with maximum values determined by the IP hardware
+ * design.
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_set_active_area(struct xlnx_mix_hw *mixer,
+ u32 hactive, u32 vactive)
+{
+ struct xlnx_mix_layer_data *ld =
+ xlnx_mix_get_layer_data(mixer, XVMIX_LAYER_MASTER);
+
+ if (hactive > ld->hw_config.max_width ||
+ vactive > ld->hw_config.max_height) {
+ DRM_ERROR("Invalid layer dimention\n");
+ return -EINVAL;
+ }
+ /* set resolution */
+ reg_writel(mixer->base, XVMIX_HEIGHT_DATA, vactive);
+ reg_writel(mixer->base, XVMIX_WIDTH_DATA, hactive);
+ ld->layer_regs.width = hactive;
+ ld->layer_regs.height = vactive;
+
+ return 0;
+}
+
+/**
+ * is_window_valid - Validate requested plane dimensions
+ * @mixer: Mixer core instance for which to stop video output
+ * @x_pos: x position requested for start of plane
+ * @y_pos: y position requested for start of plane
+ * @width: width of plane
+ * @height: height of plane
+ * @scale: scale factor of plane
+ *
+ * Validates if the requested window is within the frame boundary
+ *
+ * Return:
+ * true on success, false on failure
+ */
+static bool is_window_valid(struct xlnx_mix_hw *mixer, u32 x_pos, u32 y_pos,
+ u32 width, u32 height, u32 scale)
+{
+ struct xlnx_mix_layer_data *master_layer;
+ int scale_factor[3] = {1, 2, 4};
+
+ master_layer = xlnx_mix_get_layer_data(mixer, XVMIX_LAYER_MASTER);
+
+ /* Check if window scale factor is set */
+ if (scale < XVMIX_SCALE_FACTOR_INVALID) {
+ width *= scale_factor[scale];
+ height *= scale_factor[scale];
+ }
+
+ /* verify overlay falls within currently active background area */
+ if (((x_pos + width) <= master_layer->layer_regs.width) &&
+ ((y_pos + height) <= master_layer->layer_regs.height))
+ return true;
+
+ DRM_ERROR("Requested plane dimensions can't be set\n");
+ return false;
+}
+
+/**
+ * xlnx_mix_layer_enable - Enables the requested layers
+ * @mixer: Mixer instance in which to enable a video layer
+ * @id: Logical id (e.g. 16 = logo layer) to enable
+ *
+ * Enables (permit video output) for layers in mixer
+ * Enables the layer denoted by id in the IP core.
+ * Layer 0 will indicate the background layer and layer 8 the logo
+ * layer. Passing max layers value will enable all
+ */
+static void xlnx_mix_layer_enable(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ u32 curr_state;
+
+ /* Ensure layer is marked as 'active' by application before
+ * turning on in hardware. In some cases, layer register data
+ * may be written to otherwise inactive layers in lieu of, eventually,
+ * turning them on.
+ */
+ layer_data = xlnx_mix_get_layer_data(mixer, id);
+ if (!layer_data) {
+ DRM_ERROR("Invalid layer id %d\n", id);
+ return;
+ }
+ if (!layer_data->layer_regs.is_active)
+ return; /* for inactive layers silently return */
+
+ /* Check if request is to enable all layers or single layer */
+ if (id == mixer->max_layers) {
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA,
+ mixer->enable_all_mask);
+
+ } else if ((id < mixer->layer_cnt) || ((id == mixer->logo_layer_id) &&
+ mixer->logo_layer_en)) {
+ curr_state = reg_readl(mixer->base, XVMIX_LAYERENABLE_DATA);
+ if (id == mixer->logo_layer_id)
+ curr_state |= mixer->logo_en_mask;
+ else
+ curr_state |= BIT(id);
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA, curr_state);
+ } else {
+ DRM_ERROR("Can't enable requested layer %d\n", id);
+ }
+}
+
+/**
+ * xlnx_mix_disp_layer_enable - Enables video output represented by the
+ * plane object
+ * @plane: Drm plane object describing video layer to enable
+ *
+ */
+static void xlnx_mix_disp_layer_enable(struct xlnx_mix_plane *plane)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ struct xlnx_mix_layer_data *l_data;
+ u32 id;
+
+ if (!plane)
+ return;
+ mixer_hw = to_mixer_hw(plane);
+ l_data = plane->mixer_layer;
+ id = l_data->id;
+ if (id < XVMIX_LAYER_MASTER || id > mixer_hw->logo_layer_id) {
+ DRM_DEBUG_KMS("Attempt to activate invalid layer: %d\n", id);
+ return;
+ }
+ if (id == XVMIX_LAYER_MASTER && !l_data->hw_config.is_streaming)
+ return;
+
+ xlnx_mix_layer_enable(mixer_hw, id);
+}
+
+/**
+ * xlnx_mix_layer_disable - Disables the requested layer
+ * @mixer: Mixer for which the layer will be disabled
+ * @id: Logical id of the layer to be disabled (0-16)
+ *
+ * Disables the layer denoted by layer_id in the IP core.
+ * Layer 0 will indicate the background layer and layer 16 the logo
+ * layer. Passing the value of max layers will disable all
+ * layers.
+ */
+static void xlnx_mix_layer_disable(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id)
+{
+ u32 num_layers, curr_state;
+
+ num_layers = mixer->layer_cnt;
+
+ if (id == mixer->max_layers) {
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA,
+ XVMIX_MASK_DISABLE_ALL_LAYERS);
+ } else if ((id < num_layers) ||
+ ((id == mixer->logo_layer_id) && (mixer->logo_layer_en))) {
+ curr_state = reg_readl(mixer->base, XVMIX_LAYERENABLE_DATA);
+ if (id == mixer->logo_layer_id)
+ curr_state &= ~(mixer->logo_en_mask);
+ else
+ curr_state &= ~(BIT(id));
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA, curr_state);
+ } else {
+ DRM_ERROR("Can't disable requested layer %d\n", id);
+ }
+}
+
+/**
+ * xlnx_mix_disp_layer_disable - Disables video output represented by the
+ * plane object
+ * @plane: Drm plane object describing video layer to disable
+ *
+ */
+static void xlnx_mix_disp_layer_disable(struct xlnx_mix_plane *plane)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ u32 layer_id;
+
+ if (plane)
+ mixer_hw = to_mixer_hw(plane);
+ else
+ return;
+ layer_id = plane->mixer_layer->id;
+ if (layer_id < XVMIX_LAYER_MASTER ||
+ layer_id > mixer_hw->logo_layer_id)
+ return;
+
+ xlnx_mix_layer_disable(mixer_hw, layer_id);
+}
+
+static int xlnx_mix_mark_layer_inactive(struct xlnx_mix_plane *plane)
+{
+ if (!plane || !plane->mixer_layer)
+ return -ENODEV;
+
+ plane->mixer_layer->layer_regs.is_active = false;
+
+ return 0;
+}
+
+/* apply mode to plane pipe */
+static void xlnx_mix_plane_commit(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ unsigned int i;
+
+ /* for xlnx video framebuffer dma, if used */
+ xilinx_xdma_drm_config(plane->dma[0].chan, plane->format);
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++) {
+ struct xlnx_mix_plane_dma *dma = &plane->dma[i];
+
+ if (dma->chan && dma->is_active) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma->chan,
+ &dma->xt,
+ flags);
+ if (!desc) {
+ DRM_ERROR("failed to prepare DMA descriptor\n");
+ return;
+ }
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan);
+ }
+ }
+}
+
+static int xlnx_mix_plane_get_max_width(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_width;
+}
+
+static int xlnx_mix_plane_get_max_height(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_height;
+}
+
+static int xlnx_mix_plane_get_max_cursor_width(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_cursor_width;
+}
+
+static int xlnx_mix_plane_get_max_cursor_height(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_cursor_height;
+}
+
+static int xlnx_mix_crtc_get_max_width(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_width(crtc->crtc.primary);
+}
+
+static int xlnx_mix_crtc_get_max_height(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_height(crtc->crtc.primary);
+}
+
+static unsigned int xlnx_mix_crtc_get_max_cursor_width(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_cursor_width(crtc->crtc.primary);
+}
+
+static unsigned int xlnx_mix_crtc_get_max_cursor_height(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_cursor_height(crtc->crtc.primary);
+}
+
+/**
+ * xlnx_mix_crtc_get_format - Get the current device format
+ * @crtc: xlnx crtc object
+ *
+ * Get the current format of pipeline
+ *
+ * Return: the corresponding DRM_FORMAT_XXX
+ */
+static uint32_t xlnx_mix_crtc_get_format(struct xlnx_crtc *crtc)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(crtc->crtc.primary);
+
+ return plane->format;
+}
+
+/**
+ * xlnx_mix_crtc_get_align - Get the alignment value for pitch
+ * @crtc: xlnx crtc object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+static unsigned int xlnx_mix_crtc_get_align(struct xlnx_crtc *crtc)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(crtc->crtc.primary);
+ struct xlnx_mix *m = plane->mixer;
+
+ return XVMIX_BASE_ALIGN * m->mixer_hw.ppc;
+}
+
+/**
+ * xlnx_mix_attach_plane_prop - Attach mixer-specific drm property to
+ * the given plane
+ * @plane: Xilinx drm plane object to inspect and attach appropriate
+ * properties to
+ *
+ * The linked mixer layer will be inspected to see what capabilities it offers
+ * (e.g. global layer alpha; scaling) and drm property objects that indicate
+ * those capabilities will then be attached and initialized to default values.
+ */
+static void xlnx_mix_attach_plane_prop(struct xlnx_mix_plane *plane)
+{
+ struct drm_mode_object *base = &plane->base.base;
+ struct xlnx_mix *mixer = plane->mixer;
+
+ if (plane->mixer_layer->hw_config.can_scale)
+ drm_object_attach_property(base, mixer->scale_prop,
+ XVMIX_SCALE_FACTOR_1X);
+ if (plane->mixer_layer->hw_config.can_alpha)
+ drm_object_attach_property(base, mixer->alpha_prop,
+ XVMIX_ALPHA_MAX);
+}
+
+static int xlnx_mix_mark_layer_active(struct xlnx_mix_plane *plane)
+{
+ if (!plane->mixer_layer)
+ return -ENODEV;
+ plane->mixer_layer->layer_regs.is_active = true;
+
+ return 0;
+}
+
+static bool xlnx_mix_isfmt_support(u32 format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(color_table); i++) {
+ if (format == color_table[i])
+ return true;
+ }
+ return false;
+}
+
+/*************** DISPLAY ************/
+
+/**
+ * xlnx_mix_get_layer_scaling - Get layer scaling factor
+ * @mixer: Mixer instance to program with new background color
+ * @id: Plane id
+ *
+ * Applicable only for overlay layers
+ *
+ * Return:
+ * scaling factor of the specified layer
+ */
+static int xlnx_mix_get_layer_scaling(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id)
+{
+ int scale_factor = 0;
+ u32 reg;
+ struct xlnx_mix_layer_data *l_data = xlnx_mix_get_layer_data(mixer, id);
+
+ if (id == mixer->logo_layer_id) {
+ if (mixer->logo_layer_en) {
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS)
+ reg = XVMIX_LOGOSCALEFACTOR_DATA +
+ XVMIX_LOGO_OFFSET;
+ else
+ reg = XVMIX_LOGOSCALEFACTOR_DATA;
+ scale_factor = reg_readl(mixer->base, reg);
+ l_data->layer_regs.scale_fact = scale_factor;
+ }
+ } else {
+ /*Layer0-Layer15*/
+ if (id < mixer->logo_layer_id && l_data->hw_config.can_scale) {
+ reg = XVMIX_LAYERSCALE_0_DATA + (id * XVMIX_REG_OFFSET);
+ scale_factor = reg_readl(mixer->base, reg);
+ l_data->layer_regs.scale_fact = scale_factor;
+ }
+ }
+ return scale_factor;
+}
+
+/**
+ * xlnx_mix_set_layer_window - Sets the position of an overlay layer
+ * @mixer: Specific mixer object instance controlling the video
+ * @id: Logical layer id (1-15) to be positioned
+ * @x_pos: new: Column to start display of overlay layer
+ * @y_pos: new: Row to start display of overlay layer
+ * @width: Number of active columns to dislay for overlay layer
+ * @height: Number of active columns to display for overlay layer
+ * @stride: Width in bytes of overaly memory buffer (memory layer only)
+ *
+ * Sets the position of an overlay layer over the background layer (layer 0)
+ * Applicable only for layers 1-15 or the logo layer
+ *
+ * Return:
+ * Zero on success, -EINVAL if position is invalid or -ENODEV if layer
+ */
+static int xlnx_mix_set_layer_window(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id, u32 x_pos,
+ u32 y_pos, u32 width, u32 height,
+ u32 stride)
+{
+ struct xlnx_mix_layer_data *l_data;
+ u32 scale = 0;
+ int status = -EINVAL;
+ u32 x_reg, y_reg, w_reg, h_reg, s_reg;
+ u32 off;
+
+ l_data = xlnx_mix_get_layer_data(mixer, id);
+ if (!l_data)
+ return status;
+
+ scale = xlnx_mix_get_layer_scaling(mixer, id);
+ if (!is_window_valid(mixer, x_pos, y_pos, width, height, scale))
+ return status;
+
+ if (id == mixer->logo_layer_id) {
+ if (!(mixer->logo_layer_en &&
+ width <= l_data->hw_config.max_width &&
+ height <= l_data->hw_config.max_height &&
+ height >= l_data->hw_config.min_height &&
+ width >= l_data->hw_config.min_width))
+ return status;
+
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS) {
+ x_reg = XVMIX_LOGOSTARTX_DATA + XVMIX_LOGO_OFFSET;
+ y_reg = XVMIX_LOGOSTARTY_DATA + XVMIX_LOGO_OFFSET;
+ w_reg = XVMIX_LOGOWIDTH_DATA + XVMIX_LOGO_OFFSET;
+ h_reg = XVMIX_LOGOHEIGHT_DATA + XVMIX_LOGO_OFFSET;
+ } else {
+ x_reg = XVMIX_LOGOSTARTX_DATA;
+ y_reg = XVMIX_LOGOSTARTY_DATA;
+ w_reg = XVMIX_LOGOWIDTH_DATA;
+ h_reg = XVMIX_LOGOHEIGHT_DATA;
+ }
+ reg_writel(mixer->base, x_reg, x_pos);
+ reg_writel(mixer->base, y_reg, y_pos);
+ reg_writel(mixer->base, w_reg, width);
+ reg_writel(mixer->base, h_reg, height);
+ l_data->layer_regs.x_pos = x_pos;
+ l_data->layer_regs.y_pos = y_pos;
+ l_data->layer_regs.width = width;
+ l_data->layer_regs.height = height;
+ status = 0;
+ } else {
+ /*Layer1-Layer15*/
+
+ if (!(id < mixer->layer_cnt &&
+ width <= l_data->hw_config.max_width &&
+ width >= l_data->hw_config.min_width))
+ return status;
+ x_reg = XVMIX_LAYERSTARTX_0_DATA;
+ y_reg = XVMIX_LAYERSTARTY_0_DATA;
+ w_reg = XVMIX_LAYERWIDTH_0_DATA;
+ h_reg = XVMIX_LAYERHEIGHT_0_DATA;
+ s_reg = XVMIX_LAYERSTRIDE_0_DATA;
+
+ off = id * XVMIX_REG_OFFSET;
+ reg_writel(mixer->base, (x_reg + off), x_pos);
+ reg_writel(mixer->base, (y_reg + off), y_pos);
+ reg_writel(mixer->base, (w_reg + off), width);
+ reg_writel(mixer->base, (h_reg + off), height);
+ l_data->layer_regs.x_pos = x_pos;
+ l_data->layer_regs.y_pos = y_pos;
+ l_data->layer_regs.width = width;
+ l_data->layer_regs.height = height;
+
+ if (!l_data->hw_config.is_streaming)
+ reg_writel(mixer->base, (s_reg + off), stride);
+ status = 0;
+ }
+ return status;
+}
+
+/**
+ * xlnx_mix_set_layer_dimensions - Set layer dimensions
+ * @plane: Drm plane object desribing video layer to reposition
+ * @crtc_x: New horizontal anchor postion from which to begin rendering
+ * @crtc_y: New vertical anchor position from which to begin rendering
+ * @width: Width, in pixels, to render from stream or memory buffer
+ * @height: Height, in pixels, to render from stream or memory buffer
+ * @stride: Width, in bytes, of a memory buffer. Used only for
+ * memory layers. Use 0 for streaming layers.
+ *
+ * Establishes new coordinates and dimensions for a video plane layer
+ * New size and coordinates of window must fit within the currently active
+ * area of the crtc (e.g. the background resolution)
+ *
+ * Return: 0 if successful; Either -EINVAL if coordindate data is invalid
+ * or -ENODEV if layer data not present
+ */
+static int xlnx_mix_set_layer_dimensions(struct xlnx_mix_plane *plane,
+ u32 crtc_x, u32 crtc_y,
+ u32 width, u32 height, u32 stride)
+{
+ struct xlnx_mix *mixer = plane->mixer;
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ struct xlnx_mix_layer_data *layer_data;
+ enum xlnx_mix_layer_id layer_id;
+ int ret = 0;
+
+ layer_data = plane->mixer_layer;
+ layer_id = layer_data->id;
+ if (layer_data->layer_regs.height != height ||
+ layer_data->layer_regs.width != width) {
+ if (mixer->drm_primary_layer == plane)
+ xlnx_mix_layer_disable(mixer_hw, XVMIX_LAYER_MASTER);
+
+ xlnx_mix_layer_disable(mixer_hw, layer_id);
+ }
+ if (mixer->drm_primary_layer == plane) {
+ crtc_x = 0;
+ crtc_y = 0;
+ ret = xlnx_mix_set_active_area(mixer_hw, width, height);
+ if (ret)
+ return ret;
+ xlnx_mix_layer_enable(mixer_hw, XVMIX_LAYER_MASTER);
+ }
+ if (layer_id != XVMIX_LAYER_MASTER && layer_id < mixer_hw->max_layers) {
+ ret = xlnx_mix_set_layer_window(mixer_hw, layer_id, crtc_x,
+ crtc_y, width, height, stride);
+ if (ret)
+ return ret;
+ xlnx_mix_disp_layer_enable(plane);
+ }
+ return ret;
+}
+
+/**
+ * xlnx_mix_set_layer_scaling - Sets scaling factor
+ * @mixer: Instance of mixer to be subject of scaling request
+ * @id: Logical id of video layer subject to new scale setting
+ * @scale: scale Factor (1x, 2x or 4x) for horiz. and vert. dimensions
+ *
+ * Sets the scaling factor for the specified video layer
+ * Not applicable to background stream layer (layer 0)
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure to set scale for layer (likely
+ * returned if resulting size of layer exceeds dimensions of active
+ * display area
+ */
+static int xlnx_mix_set_layer_scaling(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id, u32 scale)
+{
+ void __iomem *reg = mixer->base;
+ struct xlnx_mix_layer_data *l_data;
+ int status = 0;
+ u32 x_pos, y_pos, width, height, offset;
+
+ l_data = xlnx_mix_get_layer_data(mixer, id);
+ x_pos = l_data->layer_regs.x_pos;
+ y_pos = l_data->layer_regs.y_pos;
+ width = l_data->layer_regs.width;
+ height = l_data->layer_regs.height;
+
+ if (!is_window_valid(mixer, x_pos, y_pos, width, height, scale))
+ return -EINVAL;
+
+ if (id == mixer->logo_layer_id) {
+ if (mixer->logo_layer_en) {
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS)
+ reg_writel(reg, XVMIX_LOGOSCALEFACTOR_DATA +
+ XVMIX_LOGO_OFFSET, scale);
+ else
+ reg_writel(reg, XVMIX_LOGOSCALEFACTOR_DATA,
+ scale);
+ l_data->layer_regs.scale_fact = scale;
+ status = 0;
+ }
+ } else {
+ /* Layer0-Layer15 */
+ if (id < mixer->layer_cnt && l_data->hw_config.can_scale) {
+ offset = id * XVMIX_REG_OFFSET;
+
+ reg_writel(reg, (XVMIX_LAYERSCALE_0_DATA + offset),
+ scale);
+ l_data->layer_regs.scale_fact = scale;
+ status = 0;
+ }
+ }
+ return status;
+}
+
+/**
+ * xlnx_mix_set_layer_scale - Change video scale factor for video plane
+ * @plane: Drm plane object describing layer to be modified
+ * @val: Index of scale factor to use:
+ * 0 = 1x
+ * 1 = 2x
+ * 2 = 4x
+ *
+ * Return:
+ * Zero on success, either -EINVAL if scale value is illegal or
+ * -ENODEV if layer does not exist (null)
+ */
+static int xlnx_mix_set_layer_scale(struct xlnx_mix_plane *plane,
+ uint64_t val)
+{
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ struct xlnx_mix_layer_data *layer = plane->mixer_layer;
+ int ret;
+
+ if (!layer || !layer->hw_config.can_scale)
+ return -ENODEV;
+ if (val > XVMIX_SCALE_FACTOR_4X || val < XVMIX_SCALE_FACTOR_1X) {
+ DRM_ERROR("Mixer layer scale value illegal.\n");
+ return -EINVAL;
+ }
+ xlnx_mix_disp_layer_disable(plane);
+ msleep(50);
+ ret = xlnx_mix_set_layer_scaling(mixer_hw, layer->id, val);
+ xlnx_mix_disp_layer_enable(plane);
+
+ return ret;
+}
+
+/**
+ * xlnx_mix_set_layer_alpha - Set the alpha value
+ * @mixer: Instance of mixer controlling layer to modify
+ * @layer_id: Logical id of video overlay to adjust alpha setting
+ * @alpha: Desired alpha setting (0-255) for layer specified
+ * 255 = completely opaque
+ * 0 = fully transparent
+ *
+ * Set the layer global transparency for a video overlay
+ * Not applicable to background streaming layer
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_set_layer_alpha(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id layer_id, u32 alpha)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ u32 reg;
+ int status = -EINVAL;
+
+ layer_data = xlnx_mix_get_layer_data(mixer, layer_id);
+
+ if (layer_id == mixer->logo_layer_id) {
+ if (mixer->logo_layer_en) {
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS)
+ reg = XVMIX_LOGOALPHA_DATA + XVMIX_LOGO_OFFSET;
+ else
+ reg = XVMIX_LOGOALPHA_DATA;
+ reg_writel(mixer->base, reg, alpha);
+ layer_data->layer_regs.alpha = alpha;
+ status = 0;
+ }
+ } else {
+ /*Layer1-Layer15*/
+ if (layer_id < mixer->layer_cnt &&
+ layer_data->hw_config.can_alpha) {
+ u32 offset = layer_id * XVMIX_REG_OFFSET;
+
+ reg = XVMIX_LAYERALPHA_0_DATA;
+ reg_writel(mixer->base, (reg + offset), alpha);
+ layer_data->layer_regs.alpha = alpha;
+ status = 0;
+ }
+ }
+ return status;
+}
+
+/**
+ * xlnx_mix_disp_set_layer_alpha - Change the transparency of an entire plane
+ * @plane: Video layer affected by new alpha setting
+ * @val: Value of transparency setting (0-255) with 255 being opaque
+ * 0 being fully transparent
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_disp_set_layer_alpha(struct xlnx_mix_plane *plane,
+ uint64_t val)
+{
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ struct xlnx_mix_layer_data *layer = plane->mixer_layer;
+
+ if (!layer || !layer->hw_config.can_alpha)
+ return -ENODEV;
+ if (val > XVMIX_ALPHA_MAX || val < XVMIX_ALPHA_MIN) {
+ DRM_ERROR("Mixer layer alpha dts value illegal.\n");
+ return -EINVAL;
+ }
+ return xlnx_mix_set_layer_alpha(mixer_hw, layer->id, val);
+}
+
+/**
+ * xlnx_mix_set_layer_buff_addr - Set buff addr for layer
+ * @mixer: Instance of mixer controlling layer to modify
+ * @id: Logical id of video overlay to adjust alpha setting
+ * @luma_addr: Start address of plane 1 of frame buffer for layer 1
+ * @chroma_addr: Start address of plane 2 of frame buffer for layer 1
+ *
+ * Sets the buffer address of the specified layer
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_set_layer_buff_addr(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id,
+ dma_addr_t luma_addr,
+ dma_addr_t chroma_addr)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ u32 align, offset;
+ u32 reg1, reg2;
+
+ if (id >= mixer->layer_cnt)
+ return -EINVAL;
+
+ /* Check if addr is aligned to aximm width (PPC * 64-bits) */
+ align = mixer->ppc * 8;
+ if ((luma_addr % align) != 0 || (chroma_addr % align) != 0)
+ return -EINVAL;
+
+ offset = (id - 1) * XVMIX_REG_OFFSET;
+ reg1 = XVMIX_LAYER1_BUF1_V_DATA + offset;
+ reg2 = XVMIX_LAYER1_BUF2_V_DATA + offset;
+ layer_data = &mixer->layer_data[id];
+ if (mixer->dma_addr_size == 64 && sizeof(dma_addr_t) == 8) {
+ reg_writeq(mixer->base, reg1, luma_addr);
+ reg_writeq(mixer->base, reg2, chroma_addr);
+ } else {
+ reg_writel(mixer->base, reg1, (u32)luma_addr);
+ reg_writel(mixer->base, reg2, (u32)chroma_addr);
+ }
+ layer_data->layer_regs.buff_addr1 = luma_addr;
+ layer_data->layer_regs.buff_addr2 = chroma_addr;
+
+ return 0;
+}
+
+/**
+ * xlnx_mix_hw_plane_dpms - Implementation of display power management
+ * system call (dpms).
+ * @plane: Plane/mixer layer to enable/disable (based on dpms value)
+ * @dpms: Display power management state to act upon
+ *
+ * Designed to disable and turn off a plane and restore all attached drm
+ * properities to their initial values. Alterntively, if dpms is "on", will
+ * enable a layer.
+ */
+
+static void
+xlnx_mix_hw_plane_dpms(struct xlnx_mix_plane *plane, int dpms)
+{
+ struct xlnx_mix *mixer;
+
+ if (!plane->mixer)
+ return;
+ mixer = plane->mixer;
+ plane->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ xlnx_mix_disp_layer_enable(plane);
+ break;
+ default:
+ xlnx_mix_mark_layer_inactive(plane);
+ xlnx_mix_disp_layer_disable(plane);
+ /* restore to default property values */
+ if (mixer->alpha_prop)
+ xlnx_mix_disp_set_layer_alpha(plane, XVMIX_ALPHA_MAX);
+ if (mixer->scale_prop)
+ xlnx_mix_set_layer_scale(plane, XVMIX_SCALE_FACTOR_1X);
+ }
+}
+
+static void xlnx_mix_plane_dpms(struct drm_plane *base_plane, int dpms)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ unsigned int i;
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", plane->dpms, dpms);
+
+ if (plane->dpms == dpms)
+ return;
+ plane->dpms = dpms;
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ /* start dma engine */
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan && plane->dma[i].is_active)
+ dma_async_issue_pending(plane->dma[i].chan);
+ xlnx_mix_hw_plane_dpms(plane, dpms);
+ break;
+ default:
+ xlnx_mix_hw_plane_dpms(plane, dpms);
+ /* stop dma engine and release descriptors */
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++) {
+ if (plane->dma[i].chan && plane->dma[i].is_active) {
+ dmaengine_terminate_sync(plane->dma[i].chan);
+ plane->dma[i].is_active = false;
+ }
+ }
+ break;
+ }
+}
+
+static int
+xlnx_mix_disp_plane_atomic_set_property(struct drm_plane *base_plane,
+ struct drm_plane_state *state,
+ struct drm_property *property, u64 val)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct xlnx_mix *mixer = plane->mixer;
+
+ if (property == mixer->alpha_prop)
+ return xlnx_mix_disp_set_layer_alpha(plane, val);
+ else if (property == mixer->scale_prop)
+ return xlnx_mix_set_layer_scale(plane, val);
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int
+xlnx_mix_disp_plane_atomic_get_property(struct drm_plane *base_plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct xlnx_mix *mixer = plane->mixer;
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ u32 layer_id = plane->mixer_layer->id;
+
+ if (property == mixer->alpha_prop)
+ *val = mixer_hw->layer_data[layer_id].layer_regs.alpha;
+ else if (property == mixer->scale_prop)
+ *val = mixer_hw->layer_data[layer_id].layer_regs.scale_fact;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xlnx_mix_disp_plane_atomic_update_plane - plane update using atomic
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @crtc_x: x offset of primary plane on crtc
+ * @crtc_y: y offset of primary plane on crtc
+ * @crtc_w: width of primary plane rectangle on crtc
+ * @crtc_h: height of primary plane rectangle on crtc
+ * @src_x: x offset of @fb for panning
+ * @src_y: y offset of @fb for panning
+ * @src_w: width of source rectangle in @fb
+ * @src_h: height of source rectangle in @fb
+ * @ctx: lock acquire context
+ *
+ * Provides a default plane update handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int
+xlnx_mix_disp_plane_atomic_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w,
+ unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto fail;
+
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_w = crtc_w;
+ plane_state->crtc_h = crtc_h;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_w = src_w;
+ plane_state->src_h = src_h;
+
+ if (plane == crtc->cursor)
+ state->legacy_cursor_update = true;
+
+ /* Do async-update if possible */
+ state->async_update = !drm_atomic_helper_async_check(plane->dev, state);
+
+ ret = drm_atomic_commit(state);
+
+fail:
+ drm_atomic_state_put(state);
+ return ret;
+}
+
+static struct drm_plane_funcs xlnx_mix_plane_funcs = {
+ .update_plane = xlnx_mix_disp_plane_atomic_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .atomic_set_property = xlnx_mix_disp_plane_atomic_set_property,
+ .atomic_get_property = xlnx_mix_disp_plane_atomic_get_property,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/**
+ * xlnx_mix_logo_load - Loads mixer's internal bram
+ * @mixer: Mixer instance to act upon
+ * @logo_w: Width of logo in pixels
+ * @logo_h: Height of logo in pixels
+ * @r_buf: Pointer to byte buffer array of R data values
+ * @g_buf: Pointer to byte buffer array of G data values
+ * @b_buf: Pointer to byte buffer array of B data values
+ * @a_buf: Pointer to byte buffer array of A data values
+ *
+ * Loads mixer's internal bram with planar R, G, B and A data
+ *
+ * Return:
+ * Zero on success, -ENODEV if logo layer not enabled; -EINVAL otherwise
+ */
+static int xlnx_mix_logo_load(struct xlnx_mix_hw *mixer, u32 logo_w, u32 logo_h,
+ u8 *r_buf, u8 *g_buf, u8 *b_buf, u8 *a_buf)
+{
+ void __iomem *reg = mixer->base;
+ struct xlnx_mix_layer_data *layer_data;
+
+ int x;
+ u32 shift;
+ u32 rword, gword, bword, aword;
+ u32 pixel_cnt = logo_w * logo_h;
+ u32 unaligned_pix_cnt = pixel_cnt % 4;
+ u32 width, height, curr_x_pos, curr_y_pos;
+ u32 rbase_addr, gbase_addr, bbase_addr, abase_addr;
+
+ layer_data = xlnx_mix_get_layer_data(mixer, mixer->logo_layer_id);
+ rword = 0;
+ gword = 0;
+ bword = 0;
+ aword = 0;
+
+ if (!layer_data)
+ return -ENODEV;
+
+ /* RGBA data should be 32-bit word aligned */
+ if (unaligned_pix_cnt && mixer->logo_pixel_alpha_enabled)
+ return -EINVAL;
+
+ if (!(mixer->logo_layer_en &&
+ logo_w <= layer_data->hw_config.max_width &&
+ logo_h <= layer_data->hw_config.max_height))
+ return -EINVAL;
+
+ width = logo_w;
+ height = logo_h;
+ rbase_addr = XVMIX_LOGOR_V_BASE;
+ gbase_addr = XVMIX_LOGOG_V_BASE;
+ bbase_addr = XVMIX_LOGOB_V_BASE;
+ abase_addr = XVMIX_LOGOA_V_BASE;
+
+ for (x = 0; x < pixel_cnt; x++) {
+ shift = (x % 4) * 8;
+ rword |= r_buf[x] << shift;
+ gword |= g_buf[x] << shift;
+ bword |= b_buf[x] << shift;
+ if (mixer->logo_pixel_alpha_enabled)
+ aword |= a_buf[x] << shift;
+
+ if (x % 4 == 3) {
+ reg_writel(reg, (rbase_addr + (x - 3)), rword);
+ reg_writel(reg, (gbase_addr + (x - 3)), gword);
+ reg_writel(reg, (bbase_addr + (x - 3)), bword);
+ if (mixer->logo_pixel_alpha_enabled)
+ reg_writel(reg, (abase_addr + (x - 3)), aword);
+ }
+ }
+
+ curr_x_pos = layer_data->layer_regs.x_pos;
+ curr_y_pos = layer_data->layer_regs.y_pos;
+ return xlnx_mix_set_layer_window(mixer, mixer->logo_layer_id,
+ curr_x_pos, curr_y_pos,
+ logo_w, logo_h, 0);
+}
+
+static int xlnx_mix_update_logo_img(struct xlnx_mix_plane *plane,
+ struct drm_gem_cma_object *buffer,
+ u32 src_w, u32 src_h)
+{
+ struct xlnx_mix_layer_data *logo_layer = plane->mixer_layer;
+ struct xlnx_mix_hw *mixer = to_mixer_hw(plane);
+ size_t pixel_cnt = src_h * src_w;
+ /* color comp defaults to offset in RG24 buffer */
+ u32 pix_cmp_cnt;
+ u32 logo_cmp_cnt;
+ bool per_pixel_alpha = false;
+ u32 max_width = logo_layer->hw_config.max_width;
+ u32 max_height = logo_layer->hw_config.max_height;
+ u32 min_width = logo_layer->hw_config.min_width;
+ u32 min_height = logo_layer->hw_config.min_height;
+ u8 *r_data = NULL;
+ u8 *g_data = NULL;
+ u8 *b_data = NULL;
+ u8 *a_data = NULL;
+ size_t el_size = sizeof(u8);
+ u8 *pixel_mem_data;
+ int ret, i, j;
+
+ /* ensure valid conditions for update */
+ if (logo_layer->id != mixer->logo_layer_id)
+ return 0;
+
+ if (src_h > max_height || src_w > max_width ||
+ src_h < min_height || src_w < min_width) {
+ DRM_ERROR("Mixer logo/cursor layer dimensions illegal.\n");
+ return -EINVAL;
+ }
+
+ if (!xlnx_mix_isfmt_support(plane->mixer_layer->hw_config.vid_fmt)) {
+ DRM_ERROR("DRM color format not supported for logo layer\n");
+ return -EINVAL;
+ }
+ per_pixel_alpha = (logo_layer->hw_config.vid_fmt ==
+ DRM_FORMAT_RGBA8888) ? true : false;
+ r_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+ g_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+ b_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+ if (per_pixel_alpha)
+ a_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+
+ if (!r_data || !g_data || !b_data || (per_pixel_alpha && !a_data)) {
+ DRM_ERROR("Unable to allocate memory for logo layer data\n");
+ ret = -ENOMEM;
+ goto free;
+ }
+ pix_cmp_cnt = per_pixel_alpha ? 4 : 3;
+ logo_cmp_cnt = pixel_cnt * pix_cmp_cnt;
+ /* ensure buffer attributes have changed to indicate new logo
+ * has been created
+ */
+ if ((phys_addr_t)buffer->vaddr == logo_layer->layer_regs.buff_addr1 &&
+ src_w == logo_layer->layer_regs.width &&
+ src_h == logo_layer->layer_regs.height)
+ return 0;
+
+ /* cache buffer address for future comparison */
+ logo_layer->layer_regs.buff_addr1 = (phys_addr_t)buffer->vaddr;
+ pixel_mem_data = (u8 *)(buffer->vaddr);
+ for (i = 0, j = 0; j < pixel_cnt; j++) {
+ if (per_pixel_alpha && a_data)
+ a_data[j] = pixel_mem_data[i++];
+
+ b_data[j] = pixel_mem_data[i++];
+ g_data[j] = pixel_mem_data[i++];
+ r_data[j] = pixel_mem_data[i++];
+ }
+ ret = xlnx_mix_logo_load(to_mixer_hw(plane), src_w, src_h, r_data,
+ g_data, b_data,
+ per_pixel_alpha ? a_data : NULL);
+free:
+ kfree(r_data);
+ kfree(g_data);
+ kfree(b_data);
+ kfree(a_data);
+
+ return ret;
+}
+
+/**
+ * xlnx_mix_set_plane - Implementation of DRM plane_update callback
+ * @plane: xlnx_mix_plane object containing references to
+ * the base plane and mixer
+ * @fb: Framebuffer descriptor
+ * @crtc_x: X position of layer on crtc. Note, if the plane represents either
+ * the master hardware layer (video0) or the layer representing the DRM primary
+ * layer, the crtc x/y coordinates are either ignored and/or set to 0/0
+ * respectively.
+ * @crtc_y: Y position of layer. See description of crtc_x handling
+ * for more inforation.
+ * @src_x: x-offset in memory buffer from which to start reading
+ * @src_y: y-offset in memory buffer from which to start reading
+ * @src_w: Number of horizontal pixels to read from memory per row
+ * @src_h: Number of rows of video data to read from memory
+ *
+ * Configures a mixer layer to comply with user space SET_PLANE icotl
+ * call.
+ *
+ * Return:
+ * Zero on success, non-zero linux error code otherwise.
+ */
+static int xlnx_mix_set_plane(struct xlnx_mix_plane *plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ struct xlnx_mix *mixer;
+ struct drm_gem_cma_object *luma_buffer;
+ u32 luma_stride = fb->pitches[0];
+ dma_addr_t luma_addr, chroma_addr = 0;
+ u32 active_area_width;
+ u32 active_area_height;
+ enum xlnx_mix_layer_id layer_id;
+ int ret;
+ const struct drm_format_info *info = fb->format;
+
+ mixer = plane->mixer;
+ mixer_hw = &mixer->mixer_hw;
+ layer_id = plane->mixer_layer->id;
+ active_area_width =
+ mixer->drm_primary_layer->mixer_layer->layer_regs.width;
+ active_area_height =
+ mixer->drm_primary_layer->mixer_layer->layer_regs.height;
+ /* compute memory data */
+ luma_buffer = drm_fb_cma_get_gem_obj(fb, 0);
+ luma_addr = drm_fb_cma_get_gem_addr(fb, plane->base.state, 0);
+ if (!luma_addr) {
+ DRM_ERROR("%s failed to get luma paddr\n", __func__);
+ return -EINVAL;
+ }
+
+ if (info->num_planes > 1) {
+ chroma_addr = drm_fb_cma_get_gem_addr(fb, plane->base.state, 1);
+ if (!chroma_addr) {
+ DRM_ERROR("failed to get chroma paddr\n");
+ return -EINVAL;
+ }
+ }
+ ret = xlnx_mix_mark_layer_active(plane);
+ if (ret)
+ return ret;
+
+ switch (layer_id) {
+ case XVMIX_LAYER_MASTER:
+ if (!plane->mixer_layer->hw_config.is_streaming)
+ xlnx_mix_mark_layer_inactive(plane);
+ if (mixer->drm_primary_layer == mixer->hw_master_layer) {
+ xlnx_mix_layer_disable(mixer_hw, layer_id);
+ ret = xlnx_mix_set_active_area(mixer_hw, src_w, src_h);
+ if (ret)
+ return ret;
+ xlnx_mix_layer_enable(mixer_hw, layer_id);
+
+ } else if (src_w != active_area_width ||
+ src_h != active_area_height) {
+ DRM_ERROR("Invalid dimensions for mixer layer 0.\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ ret = xlnx_mix_set_layer_dimensions(plane, crtc_x, crtc_y,
+ src_w, src_h, luma_stride);
+ if (ret)
+ break;
+ if (layer_id == mixer_hw->logo_layer_id) {
+ ret = xlnx_mix_update_logo_img(plane, luma_buffer,
+ src_w, src_h);
+ } else {
+ if (!plane->mixer_layer->hw_config.is_streaming)
+ ret = xlnx_mix_set_layer_buff_addr
+ (mixer_hw, plane->mixer_layer->id,
+ luma_addr, chroma_addr);
+ }
+ }
+ return ret;
+}
+
+/* mode set a plane */
+static int xlnx_mix_plane_mode_set(struct drm_plane *base_plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, uint32_t src_y,
+ u32 src_w, uint32_t src_h)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ const struct drm_format_info *info = fb->format;
+ size_t i = 0;
+ dma_addr_t luma_paddr;
+ int ret;
+ u32 stride;
+
+ /* JPM TODO begin start of code to extract into prep-interleaved*/
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+ DRM_DEBUG_KMS("h: %d(%d), v: %d(%d)\n", src_w, crtc_x, src_h, crtc_y);
+
+ /* We have multiple dma channels. Set each per video plane */
+ for (; i < info->num_planes; i++) {
+ unsigned int width = src_w / (i ? info->hsub : 1);
+ unsigned int height = src_h / (i ? info->vsub : 1);
+
+ luma_paddr = drm_fb_cma_get_gem_addr(fb, base_plane->state, i);
+ if (!luma_paddr) {
+ DRM_ERROR("%s failed to get luma paddr\n", __func__);
+ return -EINVAL;
+ }
+
+ plane->dma[i].xt.numf = height;
+ plane->dma[i].sgl[0].size =
+ drm_format_plane_width_bytes(info, 0, width);
+ plane->dma[i].sgl[0].icg = fb->pitches[0] -
+ plane->dma[i].sgl[0].size;
+ plane->dma[i].xt.src_start = luma_paddr;
+ plane->dma[i].xt.frame_size = info->num_planes;
+ plane->dma[i].xt.dir = DMA_MEM_TO_DEV;
+ plane->dma[i].xt.src_sgl = true;
+ plane->dma[i].xt.dst_sgl = false;
+ plane->dma[i].is_active = true;
+ }
+
+ for (; i < XVMIX_MAX_NUM_SUB_PLANES; i++)
+ plane->dma[i].is_active = false;
+ /* Do we have a video format aware dma channel?
+ * If so, modify descriptor accordingly
+ */
+ if (plane->dma[0].chan && !plane->dma[1].chan && info->num_planes > 1) {
+ stride = plane->dma[0].sgl[0].size + plane->dma[0].sgl[0].icg;
+ plane->dma[0].sgl[0].src_icg = plane->dma[1].xt.src_start -
+ plane->dma[0].xt.src_start -
+ (plane->dma[0].xt.numf * stride);
+ }
+
+ ret = xlnx_mix_set_plane(plane, fb, crtc_x, crtc_y, src_x, src_y,
+ src_w, src_h);
+ return ret;
+}
+
+static int xlnx_mix_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ return 0;
+}
+
+static void xlnx_mix_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+}
+
+static int xlnx_mix_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ int scale;
+ struct xlnx_mix_plane *mix_plane = to_xlnx_plane(plane);
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(mix_plane);
+ struct xlnx_mix *mix;
+
+ /* No check required for the drm_primary_plane */
+ mix = container_of(mixer_hw, struct xlnx_mix, mixer_hw);
+ if (mix->drm_primary_layer == mix_plane)
+ return 0;
+
+ scale = xlnx_mix_get_layer_scaling(mixer_hw,
+ mix_plane->mixer_layer->id);
+ if (is_window_valid(mixer_hw, state->crtc_x, state->crtc_y,
+ state->src_w >> 16, state->src_h >> 16, scale))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void xlnx_mix_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ int ret;
+
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ if (old_state->fb &&
+ old_state->fb->format->format != plane->state->fb->format->format)
+ xlnx_mix_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+
+ ret = xlnx_mix_plane_mode_set(plane, plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret) {
+ DRM_ERROR("failed to mode-set a plane\n");
+ return;
+ }
+ /* apply the new fb addr */
+ xlnx_mix_plane_commit(plane);
+ /* make sure a plane is on */
+ xlnx_mix_plane_dpms(plane, DRM_MODE_DPMS_ON);
+}
+
+static void xlnx_mix_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ xlnx_mix_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+}
+
+static int xlnx_mix_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void
+xlnx_mix_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(new_state->state, plane);
+
+ /* Update the current state with new configurations */
+ drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+ plane->state->crtc = new_state->crtc;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->state = new_state->state;
+
+ xlnx_mix_plane_atomic_update(plane, old_state);
+}
+
+static const struct drm_plane_helper_funcs xlnx_mix_plane_helper_funcs = {
+ .prepare_fb = xlnx_mix_plane_prepare_fb,
+ .cleanup_fb = xlnx_mix_plane_cleanup_fb,
+ .atomic_check = xlnx_mix_plane_atomic_check,
+ .atomic_update = xlnx_mix_plane_atomic_update,
+ .atomic_disable = xlnx_mix_plane_atomic_disable,
+ .atomic_async_check = xlnx_mix_plane_atomic_async_check,
+ .atomic_async_update = xlnx_mix_plane_atomic_async_update,
+};
+
+static int xlnx_mix_init_plane(struct xlnx_mix_plane *plane,
+ unsigned int poss_crtcs,
+ struct device_node *layer_node)
+{
+ struct xlnx_mix *mixer = plane->mixer;
+ char name[16];
+ enum drm_plane_type type;
+ int ret, i;
+
+ plane->dpms = DRM_MODE_DPMS_OFF;
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++) {
+ snprintf(name, sizeof(name), "dma%d", i);
+ plane->dma[i].chan = of_dma_request_slave_channel(layer_node,
+ name);
+ if (PTR_ERR(plane->dma[i].chan) == -ENODEV) {
+ plane->dma[i].chan = NULL;
+ continue;
+ }
+ if (IS_ERR(plane->dma[i].chan)) {
+ DRM_ERROR("failed to request dma channel\n");
+ ret = PTR_ERR(plane->dma[i].chan);
+ plane->dma[i].chan = NULL;
+ goto err_dma;
+ }
+ }
+ if (!xlnx_mix_isfmt_support(plane->mixer_layer->hw_config.vid_fmt)) {
+ DRM_ERROR("DRM color format not supported by mixer\n");
+ ret = -ENODEV;
+ goto err_init;
+ }
+ plane->format = plane->mixer_layer->hw_config.vid_fmt;
+ if (plane == mixer->hw_logo_layer)
+ type = DRM_PLANE_TYPE_CURSOR;
+ if (plane == mixer->drm_primary_layer)
+ type = DRM_PLANE_TYPE_PRIMARY;
+
+ /* initialize drm plane */
+ ret = drm_universal_plane_init(mixer->drm, &plane->base,
+ poss_crtcs, &xlnx_mix_plane_funcs,
+ &plane->format,
+ 1, NULL, type, NULL);
+
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ goto err_init;
+ }
+ drm_plane_helper_add(&plane->base, &xlnx_mix_plane_helper_funcs);
+ of_node_put(layer_node);
+
+ return 0;
+
+err_init:
+ xlnx_mix_disp_layer_disable(plane);
+err_dma:
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan)
+ dma_release_channel(plane->dma[i].chan);
+
+ of_node_put(layer_node);
+ return ret;
+}
+
+static int xlnx_mix_parse_dt_bg_video_fmt(struct device_node *node,
+ struct xlnx_mix_hw *mixer_hw)
+{
+ struct device_node *layer_node;
+ struct xlnx_mix_layer_data *layer;
+ const char *vformat;
+
+ layer_node = of_get_child_by_name(node, "layer_0");
+ layer = &mixer_hw->layer_data[XVMIX_MASTER_LAYER_IDX];
+
+ /* Set default values */
+ layer->hw_config.can_alpha = false;
+ layer->hw_config.can_scale = false;
+ layer->hw_config.min_width = XVMIX_LAYER_WIDTH_MIN;
+ layer->hw_config.min_height = XVMIX_LAYER_HEIGHT_MIN;
+
+ if (of_property_read_string(layer_node, "xlnx,vformat",
+ &vformat)) {
+ DRM_ERROR("No xlnx,vformat value for layer 0 in dts\n");
+ return -EINVAL;
+ }
+ strcpy((char *)&layer->hw_config.vid_fmt, vformat);
+ layer->hw_config.is_streaming =
+ of_property_read_bool(layer_node, "xlnx,layer-streaming");
+ if (of_property_read_u32(node, "xlnx,bpc", &mixer_hw->bg_layer_bpc)) {
+ DRM_ERROR("Failed to get bits per component (bpc) prop\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32(layer_node, "xlnx,layer-max-width",
+ &layer->hw_config.max_width)) {
+ DRM_ERROR("Failed to get screen width prop\n");
+ return -EINVAL;
+ }
+ mixer_hw->max_layer_width = layer->hw_config.max_width;
+ if (of_property_read_u32(layer_node, "xlnx,layer-max-height",
+ &layer->hw_config.max_height)) {
+ DRM_ERROR("Failed to get screen height prop\n");
+ return -EINVAL;
+ }
+ mixer_hw->max_layer_height = layer->hw_config.max_height;
+ layer->id = XVMIX_LAYER_MASTER;
+
+ return 0;
+}
+
+static int xlnx_mix_parse_dt_logo_data(struct device_node *node,
+ struct xlnx_mix_hw *mixer_hw)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ struct device_node *logo_node;
+ u32 max_width, max_height;
+
+ logo_node = of_get_child_by_name(node, "logo");
+ if (!logo_node) {
+ DRM_ERROR("No logo node specified in device tree.\n");
+ return -EINVAL;
+ }
+
+ layer_data = &mixer_hw->layer_data[XVMIX_LOGO_LAYER_IDX];
+
+ /* set defaults for logo layer */
+ layer_data->hw_config.min_height = XVMIX_LOGO_LAYER_HEIGHT_MIN;
+ layer_data->hw_config.min_width = XVMIX_LOGO_LAYER_WIDTH_MIN;
+ layer_data->hw_config.is_streaming = false;
+ layer_data->hw_config.vid_fmt = DRM_FORMAT_RGB888;
+ layer_data->hw_config.can_alpha = true;
+ layer_data->hw_config.can_scale = true;
+ layer_data->layer_regs.buff_addr1 = 0;
+ layer_data->layer_regs.buff_addr2 = 0;
+ layer_data->id = mixer_hw->logo_layer_id;
+
+ if (of_property_read_u32(logo_node, "xlnx,logo-width", &max_width)) {
+ DRM_ERROR("Failed to get logo width prop\n");
+ return -EINVAL;
+ }
+ if (max_width > XVMIX_LOGO_LAYER_WIDTH_MAX ||
+ max_width < XVMIX_LOGO_LAYER_WIDTH_MIN) {
+ DRM_ERROR("Illegal mixer logo layer width.\n");
+ return -EINVAL;
+ }
+ layer_data->hw_config.max_width = max_width;
+ mixer_hw->max_logo_layer_width = layer_data->hw_config.max_width;
+
+ if (of_property_read_u32(logo_node, "xlnx,logo-height", &max_height)) {
+ DRM_ERROR("Failed to get logo height prop\n");
+ return -EINVAL;
+ }
+ if (max_height > XVMIX_LOGO_LAYER_HEIGHT_MAX ||
+ max_height < XVMIX_LOGO_LAYER_HEIGHT_MIN) {
+ DRM_ERROR("Illegal mixer logo layer height.\n");
+ return -EINVAL;
+ }
+ layer_data->hw_config.max_height = max_height;
+ mixer_hw->max_logo_layer_height = layer_data->hw_config.max_height;
+ mixer_hw->logo_pixel_alpha_enabled =
+ of_property_read_bool(logo_node, "xlnx,logo-pixel-alpha");
+ if (mixer_hw->logo_pixel_alpha_enabled)
+ layer_data->hw_config.vid_fmt = DRM_FORMAT_RGBA8888;
+
+ return 0;
+}
+
+static int xlnx_mix_dt_parse(struct device *dev, struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_plane *planes;
+ struct xlnx_mix_hw *mixer_hw;
+ struct device_node *node, *vtc_node;
+ struct xlnx_mix_layer_data *l_data;
+ struct resource res;
+ int ret, l_cnt, i;
+
+ node = dev->of_node;
+ mixer_hw = &mixer->mixer_hw;
+ mixer->dpms = DRM_MODE_DPMS_OFF;
+
+ mixer_hw->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(mixer_hw->reset_gpio)) {
+ ret = PTR_ERR(mixer_hw->reset_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(dev, "No gpio probed for mixer. Deferring\n");
+ else
+ dev_err(dev, "No reset gpio info from dts for mixer\n");
+ return ret;
+ }
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 0);
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 1);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Invalid memory address for mixer %d\n", ret);
+ return ret;
+ }
+ /* Read in mandatory global dts properties */
+ mixer_hw->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(mixer_hw->base)) {
+ dev_err(dev, "Failed to map io mem space for mixer\n");
+ return PTR_ERR(mixer_hw->base);
+ }
+ if (of_device_is_compatible(dev->of_node, "xlnx,mixer-4.0")) {
+ mixer_hw->max_layers = 18;
+ mixer_hw->logo_en_mask = BIT(23);
+ mixer_hw->enable_all_mask = (GENMASK(16, 0) |
+ mixer_hw->logo_en_mask);
+ } else {
+ mixer_hw->max_layers = 10;
+ mixer_hw->logo_en_mask = BIT(15);
+ mixer_hw->enable_all_mask = (GENMASK(8, 0) |
+ mixer_hw->logo_en_mask);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-layers",
+ &mixer_hw->num_layers);
+ if (ret) {
+ dev_err(dev, "No xlnx,num-layers dts prop for mixer node\n");
+ return ret;
+ }
+ mixer_hw->logo_layer_id = mixer_hw->max_layers - 1;
+ if (mixer_hw->num_layers > mixer_hw->max_layers) {
+ dev_err(dev, "Num layer nodes in device tree > mixer max\n");
+ return -EINVAL;
+ }
+ ret = of_property_read_u32(node, "xlnx,dma-addr-width",
+ &mixer_hw->dma_addr_size);
+ if (ret) {
+ dev_err(dev, "missing addr-width dts prop\n");
+ return ret;
+ }
+ if (mixer_hw->dma_addr_size != 32 && mixer_hw->dma_addr_size != 64) {
+ dev_err(dev, "invalid addr-width dts prop\n");
+ return -EINVAL;
+ }
+
+ /* VTC Bridge support */
+ vtc_node = of_parse_phandle(node, "xlnx,bridge", 0);
+ if (vtc_node) {
+ mixer->vtc_bridge = of_xlnx_bridge_get(vtc_node);
+ if (!mixer->vtc_bridge) {
+ dev_info(dev, "Didn't get vtc bridge instance\n");
+ return -EPROBE_DEFER;
+ }
+ } else {
+ dev_info(dev, "vtc bridge property not present\n");
+ }
+
+ mixer_hw->logo_layer_en = of_property_read_bool(node,
+ "xlnx,logo-layer");
+ l_cnt = mixer_hw->num_layers + (mixer_hw->logo_layer_en ? 1 : 0);
+ mixer_hw->layer_cnt = l_cnt;
+
+ l_data = devm_kzalloc(dev, sizeof(*l_data) * l_cnt, GFP_KERNEL);
+ if (!l_data)
+ return -ENOMEM;
+ mixer_hw->layer_data = l_data;
+ /* init DRM planes */
+ planes = devm_kzalloc(dev, sizeof(*planes) * l_cnt, GFP_KERNEL);
+ if (!planes)
+ return -ENOMEM;
+ mixer->planes = planes;
+ mixer->num_planes = l_cnt;
+ for (i = 0; i < mixer->num_planes; i++)
+ mixer->planes[i].mixer = mixer;
+
+ /* establish background layer video properties from dts */
+ ret = xlnx_mix_parse_dt_bg_video_fmt(node, mixer_hw);
+ if (ret)
+ return ret;
+ if (mixer_hw->logo_layer_en) {
+ /* read logo data from dts */
+ ret = xlnx_mix_parse_dt_logo_data(node, mixer_hw);
+ return ret;
+ }
+ return 0;
+}
+
+static int xlnx_mix_of_init_layer(struct device *dev, struct device_node *node,
+ char *name, struct xlnx_mix_layer_data *layer,
+ u32 max_width, struct xlnx_mix *mixer, int id)
+{
+ struct device_node *layer_node;
+ const char *vformat;
+ int ret;
+
+ layer_node = of_get_child_by_name(node, name);
+ if (!layer_node)
+ return -EINVAL;
+
+ /* Set default values */
+ layer->hw_config.can_alpha = false;
+ layer->hw_config.can_scale = false;
+ layer->hw_config.is_streaming = false;
+ layer->hw_config.max_width = max_width;
+ layer->hw_config.min_width = XVMIX_LAYER_WIDTH_MIN;
+ layer->hw_config.min_height = XVMIX_LAYER_HEIGHT_MIN;
+ layer->hw_config.vid_fmt = 0;
+ layer->id = 0;
+ mixer->planes[id].mixer_layer = layer;
+
+ ret = of_property_read_u32(layer_node, "xlnx,layer-id", &layer->id);
+ if (ret) {
+ dev_err(dev, "xlnx,layer-id property not found\n");
+ return ret;
+ }
+ if (layer->id < 1 || layer->id >= mixer->mixer_hw.max_layers) {
+ dev_err(dev, "Mixer layer id %u in dts is out of legal range\n",
+ layer->id);
+ return -EINVAL;
+ }
+ ret = of_property_read_string(layer_node, "xlnx,vformat", &vformat);
+ if (ret) {
+ dev_err(dev, "No mixer layer vformat in dts for layer id %d\n",
+ layer->id);
+ return ret;
+ }
+
+ strcpy((char *)&layer->hw_config.vid_fmt, vformat);
+ layer->hw_config.can_scale =
+ of_property_read_bool(layer_node, "xlnx,layer-scale");
+ if (layer->hw_config.can_scale) {
+ ret = of_property_read_u32(layer_node, "xlnx,layer-max-width",
+ &layer->hw_config.max_width);
+ if (ret) {
+ dev_err(dev, "Mixer layer %d dts missing width prop.\n",
+ layer->id);
+ return ret;
+ }
+
+ if (layer->hw_config.max_width > max_width) {
+ dev_err(dev, "Illlegal Mixer layer %d width %d\n",
+ layer->id, layer->hw_config.max_width);
+ return -EINVAL;
+ }
+ }
+ layer->hw_config.can_alpha =
+ of_property_read_bool(layer_node, "xlnx,layer-alpha");
+ layer->hw_config.is_streaming =
+ of_property_read_bool(layer_node, "xlnx,layer-streaming");
+ if (of_property_read_bool(layer_node, "xlnx,layer-primary")) {
+ if (mixer->drm_primary_layer) {
+ dev_err(dev,
+ "More than one primary layer in mixer dts\n");
+ return -EINVAL;
+ }
+ mixer->drm_primary_layer = &mixer->planes[id];
+ }
+ ret = xlnx_mix_init_plane(&mixer->planes[id], 1, layer_node);
+ if (ret)
+ dev_err(dev, "Unable to init drm mixer plane id = %u", id);
+
+ return ret;
+}
+
+static irqreturn_t xlnx_mix_intr_handler(int irq, void *data)
+{
+ struct xlnx_mix_hw *mixer = data;
+ u32 intr = xlnx_mix_get_intr_status(mixer);
+
+ if (!intr)
+ return IRQ_NONE;
+ if (mixer->intrpt_handler_fn)
+ mixer->intrpt_handler_fn(mixer->intrpt_data);
+ xlnx_mix_clear_intr_status(mixer, intr);
+
+ return IRQ_HANDLED;
+}
+
+static void xlnx_mix_create_plane_properties(struct xlnx_mix *mixer)
+{
+ mixer->scale_prop = drm_property_create_range(mixer->drm, 0, "scale",
+ XVMIX_SCALE_FACTOR_1X,
+ XVMIX_SCALE_FACTOR_4X);
+ mixer->alpha_prop = drm_property_create_range(mixer->drm, 0, "alpha",
+ XVMIX_ALPHA_MIN,
+ XVMIX_ALPHA_MAX);
+}
+
+static int xlnx_mix_plane_create(struct device *dev, struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ struct device_node *node, *layer_node;
+ char name[20];
+ struct xlnx_mix_layer_data *layer_data;
+ int ret, i;
+ int layer_idx;
+
+ node = dev->of_node;
+ mixer_hw = &mixer->mixer_hw;
+ xlnx_mix_create_plane_properties(mixer);
+
+ mixer->planes[XVMIX_MASTER_LAYER_IDX].mixer_layer =
+ &mixer_hw->layer_data[XVMIX_MASTER_LAYER_IDX];
+ mixer->planes[XVMIX_MASTER_LAYER_IDX].id = XVMIX_MASTER_LAYER_IDX;
+ mixer->hw_master_layer = &mixer->planes[XVMIX_MASTER_LAYER_IDX];
+
+ if (mixer_hw->logo_layer_en) {
+ mixer->planes[XVMIX_LOGO_LAYER_IDX].mixer_layer =
+ &mixer_hw->layer_data[XVMIX_LOGO_LAYER_IDX];
+ mixer->planes[XVMIX_LOGO_LAYER_IDX].id = XVMIX_LOGO_LAYER_IDX;
+ mixer->hw_logo_layer = &mixer->planes[XVMIX_LOGO_LAYER_IDX];
+ layer_node = of_get_child_by_name(node, "logo");
+ ret = xlnx_mix_init_plane(&mixer->planes[XVMIX_LOGO_LAYER_IDX],
+ 1, layer_node);
+ if (ret)
+ return ret;
+ }
+ layer_idx = mixer_hw->logo_layer_en ? 2 : 1;
+ for (i = 1; i < mixer_hw->num_layers; i++, layer_idx++) {
+ snprintf(name, sizeof(name), "layer_%d", i);
+ ret = xlnx_mix_of_init_layer(dev, node, name,
+ &mixer_hw->layer_data[layer_idx],
+ mixer_hw->max_layer_width,
+ mixer, layer_idx);
+ if (ret)
+ return ret;
+ }
+ /* If none of the overlay layers were designated as the drm
+ * primary layer, default to the mixer's video0 layer as drm primary
+ */
+ if (!mixer->drm_primary_layer)
+ mixer->drm_primary_layer = mixer->hw_master_layer;
+ layer_node = of_get_child_by_name(node, "layer_0");
+ ret = xlnx_mix_init_plane(&mixer->planes[XVMIX_MASTER_LAYER_IDX], 1,
+ layer_node);
+ /* request irq and obtain pixels-per-clock (ppc) property */
+ mixer_hw->irq = irq_of_parse_and_map(node, 0);
+ if (mixer_hw->irq > 0) {
+ ret = devm_request_irq(dev, mixer_hw->irq,
+ xlnx_mix_intr_handler,
+ IRQF_SHARED, "xlnx-mixer", mixer_hw);
+ if (ret) {
+ dev_err(dev, "Failed to request irq\n");
+ return ret;
+ }
+ }
+ ret = of_property_read_u32(node, "xlnx,ppc", &mixer_hw->ppc);
+ if (ret) {
+ dev_err(dev, "No xlnx,ppc property for mixer dts\n");
+ return ret;
+ }
+
+ mixer->max_width = XVMIX_DISP_MAX_WIDTH;
+ mixer->max_height = XVMIX_DISP_MAX_HEIGHT;
+ if (mixer->hw_logo_layer) {
+ layer_data = &mixer_hw->layer_data[XVMIX_LOGO_LAYER_IDX];
+ mixer->max_cursor_width = layer_data->hw_config.max_width;
+ mixer->max_cursor_height = layer_data->hw_config.max_height;
+ }
+ return 0;
+}
+
+/**
+ * xlnx_mix_plane_restore - Restore the plane states
+ * @mixer: mixer device core structure
+ *
+ * Restore the plane states to the default ones. Any state that needs to be
+ * restored should be here. This improves consistency as applications see
+ * the same default values, and removes mismatch between software and hardware
+ * values as software values are updated as hardware values are reset.
+ */
+static void xlnx_mix_plane_restore(struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_plane *plane;
+ unsigned int i;
+
+ if (!mixer)
+ return;
+ /*
+ * Reinitialize property default values as they get reset by DPMS OFF
+ * operation. User will read the correct default values later, and
+ * planes will be initialized with default values.
+ */
+ for (i = 0; i < mixer->num_planes; i++) {
+ plane = &mixer->planes[i];
+ if (!plane)
+ continue;
+ xlnx_mix_hw_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+ }
+}
+
+/**
+ * xlnx_mix_set_bkg_col - Set background color
+ * @mixer: Mixer instance to program with new background color
+ * @rgb_value: RGB encoded as 32-bit integer in little-endian format
+ *
+ * Set the color to be output as background color when background stream layer
+ */
+static void xlnx_mix_set_bkg_col(struct xlnx_mix_hw *mixer, u64 rgb_value)
+{
+ u32 bg_bpc = mixer->bg_layer_bpc;
+ u32 bpc_mask_shift = XVMIX_MAX_BPC - bg_bpc;
+ u32 val_mask = (GENMASK(15, 0) >> bpc_mask_shift);
+ u16 b_val = (rgb_value >> (bg_bpc * 2)) & val_mask;
+ u16 g_val = (rgb_value >> bg_bpc) & val_mask;
+ u16 r_val = (rgb_value >> 0) & val_mask;
+
+ /* Set Background Color */
+ reg_writel(mixer->base, XVMIX_BACKGROUND_Y_R_DATA, r_val);
+ reg_writel(mixer->base, XVMIX_BACKGROUND_U_G_DATA, g_val);
+ reg_writel(mixer->base, XVMIX_BACKGROUND_V_B_DATA, b_val);
+ mixer->bg_color = rgb_value;
+}
+
+/**
+ * xlnx_mix_reset - Reset the mixer core video generator
+ * @mixer: Mixer core instance for which to start video output
+ *
+ * Toggle the reset gpio and restores the bg color, plane and interrupt mask.
+ */
+static void xlnx_mix_reset(struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_hw *mixer_hw = &mixer->mixer_hw;
+
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 0);
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 1);
+ /* restore layer properties and bg color after reset */
+ xlnx_mix_set_bkg_col(mixer_hw, mixer_hw->bg_color);
+ xlnx_mix_plane_restore(mixer);
+ xlnx_mix_intrpt_enable_done(&mixer->mixer_hw);
+}
+
+static void xlnx_mix_dpms(struct xlnx_mix *mixer, int dpms)
+{
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ xlnx_mix_start(&mixer->mixer_hw);
+ break;
+ default:
+ xlnx_mix_stop(&mixer->mixer_hw);
+ mdelay(50); /* let IP shut down */
+ xlnx_mix_reset(mixer);
+ }
+}
+
+/* set crtc dpms */
+static void xlnx_mix_crtc_dpms(struct drm_crtc *base_crtc, int dpms)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+ int ret;
+ struct videomode vm;
+ struct drm_display_mode *mode = &base_crtc->mode;
+
+ DRM_DEBUG_KMS("dpms: %d\n", dpms);
+ if (mixer->dpms == dpms)
+ return;
+ mixer->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (!mixer->pixel_clock_enabled) {
+ ret = clk_prepare_enable(mixer->pixel_clock);
+ if (ret) {
+ DRM_ERROR("failed to enable a pixel clock\n");
+ mixer->pixel_clock_enabled = false;
+ }
+ }
+ mixer->pixel_clock_enabled = true;
+
+ if (mixer->vtc_bridge) {
+ drm_display_mode_to_videomode(mode, &vm);
+ xlnx_bridge_set_timing(mixer->vtc_bridge, &vm);
+ xlnx_bridge_enable(mixer->vtc_bridge);
+ }
+
+ xlnx_mix_dpms(mixer, dpms);
+ xlnx_mix_plane_dpms(base_crtc->primary, dpms);
+ break;
+ default:
+ xlnx_mix_plane_dpms(base_crtc->primary, dpms);
+ xlnx_mix_dpms(mixer, dpms);
+ xlnx_bridge_disable(mixer->vtc_bridge);
+ if (mixer->pixel_clock_enabled) {
+ clk_disable_unprepare(mixer->pixel_clock);
+ mixer->pixel_clock_enabled = false;
+ }
+ break;
+ }
+}
+
+static void xlnx_mix_set_intr_handler(struct xlnx_mix *mixer,
+ void (*intr_handler_fn)(void *),
+ void *data)
+{
+ mixer->mixer_hw.intrpt_handler_fn = intr_handler_fn;
+ mixer->mixer_hw.intrpt_data = data;
+}
+
+static void xlnx_mix_crtc_vblank_handler(void *data)
+{
+ struct drm_crtc *base_crtc = data;
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+ struct drm_device *drm = base_crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ drm_crtc_handle_vblank(base_crtc);
+ /* Finish page flip */
+ spin_lock_irqsave(&drm->event_lock, flags);
+ event = mixer->event;
+ mixer->event = NULL;
+ if (event) {
+ drm_crtc_send_vblank_event(base_crtc, event);
+ drm_crtc_vblank_put(base_crtc);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+static int xlnx_mix_crtc_enable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+
+ xlnx_mix_set_intr_handler(mixer, xlnx_mix_crtc_vblank_handler,
+ base_crtc);
+ return 0;
+}
+
+static void xlnx_mix_crtc_disable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+
+ mixer->mixer_hw.intrpt_handler_fn = NULL;
+ mixer->mixer_hw.intrpt_data = NULL;
+}
+
+static void xlnx_mix_crtc_destroy(struct drm_crtc *base_crtc)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+
+ /* make sure crtc is off */
+ mixer->alpha_prop = NULL;
+ mixer->scale_prop = NULL;
+ mixer->bg_color = NULL;
+ xlnx_mix_crtc_dpms(base_crtc, DRM_MODE_DPMS_OFF);
+
+ if (mixer->pixel_clock_enabled) {
+ clk_disable_unprepare(mixer->pixel_clock);
+ mixer->pixel_clock_enabled = false;
+ }
+ drm_crtc_cleanup(base_crtc);
+}
+
+static int
+xlnx_mix_disp_crtc_atomic_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static int
+xlnx_mix_disp_crtc_atomic_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ return 0;
+}
+
+static struct drm_crtc_funcs xlnx_mix_crtc_funcs = {
+ .destroy = xlnx_mix_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_set_property = xlnx_mix_disp_crtc_atomic_set_property,
+ .atomic_get_property = xlnx_mix_disp_crtc_atomic_get_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = xlnx_mix_crtc_enable_vblank,
+ .disable_vblank = xlnx_mix_crtc_disable_vblank,
+};
+
+static void
+xlnx_mix_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ xlnx_mix_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+/**
+ * xlnx_mix_clear_event - Clear any event if pending
+ * @crtc: DRM crtc object
+ *
+ */
+static void xlnx_mix_clear_event(struct drm_crtc *crtc)
+{
+ if (crtc->state->event) {
+ complete_all(crtc->state->event->base.completion);
+ crtc->state->event = NULL;
+ }
+}
+
+static void
+xlnx_mix_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ xlnx_mix_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ xlnx_mix_clear_event(crtc);
+}
+
+static void xlnx_mix_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+}
+
+static int xlnx_mix_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static void
+xlnx_mix_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ /* Don't rely on vblank when disabling crtc */
+ if (crtc->state->event) {
+ struct xlnx_crtc *xcrtc = to_xlnx_crtc(crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(xcrtc);
+
+ /* Consume the flip_done event from atomic helper */
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ mixer->event = crtc->state->event;
+ crtc->state->event = NULL;
+ }
+}
+
+static struct drm_crtc_helper_funcs xlnx_mix_crtc_helper_funcs = {
+ .atomic_enable = xlnx_mix_crtc_atomic_enable,
+ .atomic_disable = xlnx_mix_crtc_atomic_disable,
+ .mode_set_nofb = xlnx_mix_crtc_mode_set_nofb,
+ .atomic_check = xlnx_mix_crtc_atomic_check,
+ .atomic_begin = xlnx_mix_crtc_atomic_begin,
+};
+
+/**
+ * xlnx_mix_crtc_create - create crtc for mixer
+ * @mixer: xilinx video mixer object
+ *
+ * Return:
+ * Zero on success, error on failure
+ *
+ */
+static int xlnx_mix_crtc_create(struct xlnx_mix *mixer)
+{
+ struct xlnx_crtc *crtc;
+ struct drm_plane *primary_plane = NULL;
+ struct drm_plane *cursor_plane = NULL;
+ int ret, i;
+
+ crtc = &mixer->crtc;
+ primary_plane = &mixer->drm_primary_layer->base;
+ cursor_plane = &mixer->hw_logo_layer->base;
+
+ for (i = 0; i < mixer->num_planes; i++)
+ xlnx_mix_attach_plane_prop(&mixer->planes[i]);
+ mixer->pixel_clock = devm_clk_get(mixer->drm->dev, NULL);
+ if (IS_ERR(mixer->pixel_clock)) {
+ DRM_DEBUG_KMS("failed to get pixel clock\n");
+ mixer->pixel_clock = NULL;
+ }
+ ret = clk_prepare_enable(mixer->pixel_clock);
+ if (ret) {
+ DRM_ERROR("failed to enable a pixel clock\n");
+ mixer->pixel_clock_enabled = false;
+ goto err_plane;
+ }
+ mixer->pixel_clock_enabled = true;
+ /* initialize drm crtc */
+ ret = drm_crtc_init_with_planes(mixer->drm, &crtc->crtc,
+ &mixer->drm_primary_layer->base,
+ &mixer->hw_logo_layer->base,
+ &xlnx_mix_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize mixer crtc\n");
+ goto err_pixel_clk;
+ }
+ drm_crtc_helper_add(&crtc->crtc, &xlnx_mix_crtc_helper_funcs);
+ crtc->get_max_width = &xlnx_mix_crtc_get_max_width;
+ crtc->get_max_height = &xlnx_mix_crtc_get_max_height;
+ crtc->get_align = &xlnx_mix_crtc_get_align;
+ crtc->get_format = &xlnx_mix_crtc_get_format;
+ crtc->get_cursor_height = &xlnx_mix_crtc_get_max_cursor_height;
+ crtc->get_cursor_width = &xlnx_mix_crtc_get_max_cursor_width;
+ xlnx_crtc_register(mixer->drm, crtc);
+
+ return 0;
+
+err_pixel_clk:
+ if (mixer->pixel_clock_enabled) {
+ clk_disable_unprepare(mixer->pixel_clock);
+ mixer->pixel_clock_enabled = false;
+ }
+err_plane:
+ return ret;
+}
+
+/**
+ * xlnx_mix_init - Establishes a default power-on state for the mixer IP
+ * core
+ * @mixer: instance of IP core to initialize to a default state
+ *
+ * Background layer initialized to maximum height and width settings based on
+ * device tree properties and all overlay layers set to minimum height and width
+ * sizes and positioned to 0,0 in the crtc. All layers are inactive (resulting
+ * in video output being generated by the background color generator).
+ * Interrupts are disabled and the IP is started (with auto-restart enabled).
+ */
+static void xlnx_mix_init(struct xlnx_mix_hw *mixer)
+{
+ u32 i;
+ u32 bg_bpc = mixer->bg_layer_bpc;
+ u64 rgb_bg_clr = (0xFFFF >> (XVMIX_MAX_BPC - bg_bpc)) << (bg_bpc * 2);
+ enum xlnx_mix_layer_id layer_id;
+ struct xlnx_mix_layer_data *layer_data;
+
+ layer_data = xlnx_mix_get_layer_data(mixer, XVMIX_LAYER_MASTER);
+ xlnx_mix_layer_disable(mixer, mixer->max_layers);
+ xlnx_mix_set_active_area(mixer, layer_data->hw_config.max_width,
+ layer_data->hw_config.max_height);
+ /* default to blue */
+ xlnx_mix_set_bkg_col(mixer, rgb_bg_clr);
+
+ for (i = 0; i < mixer->layer_cnt; i++) {
+ layer_id = mixer->layer_data[i].id;
+ layer_data = &mixer->layer_data[i];
+ if (layer_id == XVMIX_LAYER_MASTER)
+ continue;
+ xlnx_mix_set_layer_window(mixer, layer_id, 0, 0,
+ XVMIX_LAYER_WIDTH_MIN,
+ XVMIX_LAYER_HEIGHT_MIN, 0);
+ if (layer_data->hw_config.can_scale)
+ xlnx_mix_set_layer_scaling(mixer, layer_id, 0);
+ if (layer_data->hw_config.can_alpha)
+ xlnx_mix_set_layer_alpha(mixer, layer_id,
+ XVMIX_ALPHA_MAX);
+ }
+ xlnx_mix_intrpt_enable_done(mixer);
+}
+
+static int xlnx_mix_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_mix *mixer = dev_get_drvdata(dev);
+ struct drm_device *drm = data;
+ u32 ret;
+
+ mixer->drm = drm;
+ ret = xlnx_mix_plane_create(dev, mixer);
+ if (ret)
+ return ret;
+ ret = xlnx_mix_crtc_create(mixer);
+ if (ret)
+ return ret;
+ xlnx_mix_init(&mixer->mixer_hw);
+
+ return ret;
+}
+
+static void xlnx_mix_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_mix *mixer = dev_get_drvdata(dev);
+
+ dev_set_drvdata(dev, NULL);
+ xlnx_mix_intrpt_disable(&mixer->mixer_hw);
+ xlnx_crtc_unregister(mixer->drm, &mixer->crtc);
+}
+
+static const struct component_ops xlnx_mix_component_ops = {
+ .bind = xlnx_mix_bind,
+ .unbind = xlnx_mix_unbind,
+};
+
+static int xlnx_mix_probe(struct platform_device *pdev)
+{
+ struct xlnx_mix *mixer;
+ int ret;
+
+ mixer = devm_kzalloc(&pdev->dev, sizeof(*mixer), GFP_KERNEL);
+ if (!mixer)
+ return -ENOMEM;
+
+ /* Sub-driver will access mixer from drvdata */
+ platform_set_drvdata(pdev, mixer);
+ ret = xlnx_mix_dt_parse(&pdev->dev, mixer);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to probe mixer\n");
+ return ret;
+ }
+
+ ret = component_add(&pdev->dev, &xlnx_mix_component_ops);
+ if (ret)
+ goto err;
+
+ mixer->master = xlnx_drm_pipeline_init(pdev);
+ if (IS_ERR(mixer->master)) {
+ dev_err(&pdev->dev, "Failed to initialize the drm pipeline\n");
+ goto err_component;
+ }
+
+ dev_info(&pdev->dev, "Xilinx Mixer driver probed success\n");
+ return ret;
+
+err_component:
+ component_del(&pdev->dev, &xlnx_mix_component_ops);
+err:
+ return ret;
+}
+
+static int xlnx_mix_remove(struct platform_device *pdev)
+{
+ struct xlnx_mix *mixer = platform_get_drvdata(pdev);
+
+ of_xlnx_bridge_put(mixer->vtc_bridge);
+ xlnx_drm_pipeline_exit(mixer->master);
+ component_del(&pdev->dev, &xlnx_mix_component_ops);
+ return 0;
+}
+
+/*
+ * TODO:
+ * In Mixer IP core version 4.0, layer enable bits and logo layer offsets
+ * have been changed. To provide backward compatibility number of max layers
+ * field has been taken to differentiate IP versions.
+ * This logic will have to be changed properly using the IP core version.
+ */
+
+static const struct of_device_id xlnx_mix_of_match[] = {
+ { .compatible = "xlnx,mixer-3.0", },
+ { .compatible = "xlnx,mixer-4.0", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xlnx_mix_of_match);
+
+static struct platform_driver xlnx_mix_driver = {
+ .probe = xlnx_mix_probe,
+ .remove = xlnx_mix_remove,
+ .driver = {
+ .name = "xlnx-mixer",
+ .of_match_table = xlnx_mix_of_match,
+ },
+};
+
+module_platform_driver(xlnx_mix_driver);
+
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_DESCRIPTION("Xilinx Mixer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_pl_disp.c b/drivers/gpu/drm/xlnx/xlnx_pl_disp.c
new file mode 100644
index 000000000000..a4de9b31a717
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_pl_disp.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM CRTC DMA engine driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author : Saurabh Sengar <saurabhs@xilinx.com>
+ * : Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <video/videomode.h>
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * This driver intends to support the display pipeline with DMA engine
+ * driver by initializing DRM crtc and plane objects. The driver makes
+ * an assumption that it's single plane pipeline, as multi-plane pipeline
+ * would require programing beyond the DMA engine interface.
+ */
+
+/**
+ * struct xlnx_dma_chan - struct for DMA engine
+ * @dma_chan: DMA channel
+ * @xt: Interleaved desc config container
+ * @sgl: Data chunk for dma_interleaved_template
+ */
+struct xlnx_dma_chan {
+ struct dma_chan *dma_chan;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+/**
+ * struct xlnx_pl_disp - struct for display subsystem
+ * @dev: device structure
+ * @master: logical master device from xlnx drm
+ * @xlnx_crtc: Xilinx DRM driver crtc object
+ * @plane: base drm plane object
+ * @chan: struct for DMA engine
+ * @event: vblank pending event
+ * @callback: callback for registering DMA callback function
+ * @callback_param: parameter for passing to DMA callback function
+ * @drm: core drm object
+ * @fmt: drm color format
+ * @vtc_bridge: vtc_bridge structure
+ * @fid: field id
+ */
+struct xlnx_pl_disp {
+ struct device *dev;
+ struct platform_device *master;
+ struct xlnx_crtc xlnx_crtc;
+ struct drm_plane plane;
+ struct xlnx_dma_chan *chan;
+ struct drm_pending_vblank_event *event;
+ dma_async_tx_callback callback;
+ void *callback_param;
+ struct drm_device *drm;
+ u32 fmt;
+ struct xlnx_bridge *vtc_bridge;
+ u32 fid;
+};
+
+/*
+ * Xlnx crtc functions
+ */
+static inline struct xlnx_pl_disp *crtc_to_dma(struct xlnx_crtc *xlnx_crtc)
+{
+ return container_of(xlnx_crtc, struct xlnx_pl_disp, xlnx_crtc);
+}
+
+/**
+ * xlnx_pl_disp_complete - vblank handler
+ * @param: parameter to vblank handler
+ *
+ * This function handles the vblank interrupt, and sends an event to
+ * CRTC object.
+ */
+static void xlnx_pl_disp_complete(void *param)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = param;
+ struct drm_device *drm = xlnx_pl_disp->drm;
+
+ drm_handle_vblank(drm, 0);
+}
+
+/**
+ * xlnx_pl_disp_get_format - Get the current display pipeline format
+ * @xlnx_crtc: xlnx crtc object
+ *
+ * Get the current format of pipeline
+ *
+ * Return: the corresponding DRM_FORMAT_XXX
+ */
+static uint32_t xlnx_pl_disp_get_format(struct xlnx_crtc *xlnx_crtc)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ return xlnx_pl_disp->fmt;
+}
+
+/**
+ * xlnx_pl_disp_get_align - Get the alignment value for pitch
+ * @xlnx_crtc: xlnx crtc object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+static unsigned int xlnx_pl_disp_get_align(struct xlnx_crtc *xlnx_crtc)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ return 1 << xlnx_pl_disp->chan->dma_chan->device->copy_align;
+}
+
+/*
+ * DRM plane functions
+ */
+static inline struct xlnx_pl_disp *plane_to_dma(struct drm_plane *plane)
+{
+ return container_of(plane, struct xlnx_pl_disp, plane);
+}
+
+/**
+ * xlnx_pl_disp_plane_disable - Disables DRM plane
+ * @plane: DRM plane object
+ *
+ * Disable the DRM plane, by stopping the corrosponding DMA
+ */
+static void xlnx_pl_disp_plane_disable(struct drm_plane *plane)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+
+ dmaengine_terminate_sync(xlnx_dma_chan->dma_chan);
+}
+
+/**
+ * xlnx_pl_disp_plane_enable - Enables DRM plane
+ * @plane: DRM plane object
+ *
+ * Enable the DRM plane, by enabling the corresponding DMA
+ */
+static void xlnx_pl_disp_plane_enable(struct drm_plane *plane)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+ struct dma_chan *dma_chan = xlnx_dma_chan->dma_chan;
+ struct dma_interleaved_template *xt = &xlnx_dma_chan->xt;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma_chan, xt, flags);
+ if (!desc) {
+ dev_err(xlnx_pl_disp->dev,
+ "failed to prepare DMA descriptor\n");
+ return;
+ }
+ desc->callback = xlnx_pl_disp->callback;
+ desc->callback_param = xlnx_pl_disp->callback_param;
+ xilinx_xdma_set_earlycb(xlnx_dma_chan->dma_chan, desc, EARLY_CALLBACK);
+
+ if (plane->state->fb->flags == DRM_MODE_FB_ALTERNATE_TOP ||
+ plane->state->fb->flags == DRM_MODE_FB_ALTERNATE_BOTTOM) {
+ if (plane->state->fb->flags == DRM_MODE_FB_ALTERNATE_TOP)
+ xlnx_pl_disp->fid = 1;
+ else
+ xlnx_pl_disp->fid = 0;
+
+ xilinx_xdma_set_fid(xlnx_dma_chan->dma_chan, desc,
+ xlnx_pl_disp->fid);
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(xlnx_dma_chan->dma_chan);
+}
+
+static void xlnx_pl_disp_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ xlnx_pl_disp_plane_disable(plane);
+}
+
+static int xlnx_pl_disp_plane_mode_set(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, uint32_t src_y,
+ u32 src_w, uint32_t src_h)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+ const struct drm_format_info *info = fb->format;
+ dma_addr_t luma_paddr, chroma_paddr;
+ size_t stride;
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+
+ if (info->num_planes > 2) {
+ dev_err(xlnx_pl_disp->dev, "Color format not supported\n");
+ return -EINVAL;
+ }
+ luma_paddr = drm_fb_cma_get_gem_addr(fb, plane->state, 0);
+ if (!luma_paddr) {
+ dev_err(xlnx_pl_disp->dev, "failed to get luma paddr\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(xlnx_pl_disp->dev, "num planes = %d\n", info->num_planes);
+ xlnx_dma_chan->xt.numf = src_h;
+ xlnx_dma_chan->sgl[0].size = drm_format_plane_width_bytes(info,
+ 0, src_w);
+ xlnx_dma_chan->sgl[0].icg = fb->pitches[0] - xlnx_dma_chan->sgl[0].size;
+ xlnx_dma_chan->xt.src_start = luma_paddr;
+ xlnx_dma_chan->xt.frame_size = info->num_planes;
+ xlnx_dma_chan->xt.dir = DMA_MEM_TO_DEV;
+ xlnx_dma_chan->xt.src_sgl = true;
+ xlnx_dma_chan->xt.dst_sgl = false;
+
+ /* Do we have a video format aware dma channel?
+ * so, modify descriptor accordingly. Hueristic test:
+ * we have a multi-plane format but only one dma channel
+ */
+ if (info->num_planes > 1) {
+ chroma_paddr = drm_fb_cma_get_gem_addr(fb, plane->state, 1);
+ if (!chroma_paddr) {
+ dev_err(xlnx_pl_disp->dev,
+ "failed to get chroma paddr\n");
+ return -EINVAL;
+ }
+ stride = xlnx_dma_chan->sgl[0].size +
+ xlnx_dma_chan->sgl[0].icg;
+ xlnx_dma_chan->sgl[0].src_icg = chroma_paddr -
+ xlnx_dma_chan->xt.src_start -
+ (xlnx_dma_chan->xt.numf * stride);
+ }
+
+ return 0;
+}
+
+static void xlnx_pl_disp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ int ret;
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+
+ ret = xlnx_pl_disp_plane_mode_set(plane,
+ plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret) {
+ dev_err(xlnx_pl_disp->dev, "failed to mode set a plane\n");
+ return;
+ }
+ /* in case frame buffer is used set the color format */
+ xilinx_xdma_drm_config(xlnx_pl_disp->chan->dma_chan,
+ xlnx_pl_disp->plane.state->fb->format->format);
+ /* apply the new fb addr and enable */
+ xlnx_pl_disp_plane_enable(plane);
+}
+
+static int
+xlnx_pl_disp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_atomic_state *state = new_plane_state->state;
+ const struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
+ const struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
+
+ if (!crtc)
+ return 0;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ /* plane must be enabled when state is active */
+ if (new_crtc_state->active && !new_plane_state->crtc)
+ return -EINVAL;
+
+ /*
+ * This check is required to call modeset if there is a change in color
+ * format
+ */
+ if (new_plane_state->fb && old_plane_state->fb &&
+ new_plane_state->fb->format->format !=
+ old_plane_state->fb->format->format)
+ new_crtc_state->mode_changed = true;
+
+ return 0;
+}
+
+static const struct drm_plane_helper_funcs xlnx_pl_disp_plane_helper_funcs = {
+ .atomic_update = xlnx_pl_disp_plane_atomic_update,
+ .atomic_disable = xlnx_pl_disp_plane_atomic_disable,
+ .atomic_check = xlnx_pl_disp_plane_atomic_check,
+};
+
+static struct drm_plane_funcs xlnx_pl_disp_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static inline struct xlnx_pl_disp *drm_crtc_to_dma(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+
+ return crtc_to_dma(xlnx_crtc);
+}
+
+static void xlnx_pl_disp_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ drm_crtc_vblank_on(crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ /* Consume the flip_done event from atomic helper */
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static void xlnx_pl_disp_clear_event(struct drm_crtc *crtc)
+{
+ if (crtc->state->event) {
+ complete_all(crtc->state->event->base.completion);
+ crtc->state->event = NULL;
+ }
+}
+
+static void xlnx_pl_disp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ int vrefresh;
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+ struct videomode vm;
+
+ if (xlnx_pl_disp->vtc_bridge) {
+ /* set video timing */
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
+ xlnx_bridge_set_timing(xlnx_pl_disp->vtc_bridge, &vm);
+ xlnx_bridge_enable(xlnx_pl_disp->vtc_bridge);
+ }
+
+ xlnx_pl_disp_plane_enable(crtc->primary);
+
+ /* Delay of 1 vblank interval for timing gen to be stable */
+ vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+ msleep(1 * 1000 / vrefresh);
+}
+
+static void xlnx_pl_disp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ xlnx_pl_disp_plane_disable(crtc->primary);
+ xlnx_pl_disp_clear_event(crtc);
+ drm_crtc_vblank_off(crtc);
+ xlnx_bridge_disable(xlnx_pl_disp->vtc_bridge);
+}
+
+static int xlnx_pl_disp_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static struct drm_crtc_helper_funcs xlnx_pl_disp_crtc_helper_funcs = {
+ .atomic_enable = xlnx_pl_disp_crtc_atomic_enable,
+ .atomic_disable = xlnx_pl_disp_crtc_atomic_disable,
+ .atomic_check = xlnx_pl_disp_crtc_atomic_check,
+ .atomic_begin = xlnx_pl_disp_crtc_atomic_begin,
+};
+
+static void xlnx_pl_disp_crtc_destroy(struct drm_crtc *crtc)
+{
+ xlnx_pl_disp_plane_disable(crtc->primary);
+ drm_crtc_cleanup(crtc);
+}
+
+static int xlnx_pl_disp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ /*
+ * Use the complete callback for vblank event assuming the dma engine
+ * starts on the next descriptor upon this event. This may not be safe
+ * assumption for some dma engines.
+ */
+ xlnx_pl_disp->callback = xlnx_pl_disp_complete;
+ xlnx_pl_disp->callback_param = xlnx_pl_disp;
+
+ return 0;
+}
+
+static void xlnx_pl_disp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ xlnx_pl_disp->callback = NULL;
+ xlnx_pl_disp->callback_param = NULL;
+}
+
+static struct drm_crtc_funcs xlnx_pl_disp_crtc_funcs = {
+ .destroy = xlnx_pl_disp_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = xlnx_pl_disp_crtc_enable_vblank,
+ .disable_vblank = xlnx_pl_disp_crtc_disable_vblank,
+};
+
+static int xlnx_pl_disp_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = data;
+ struct xlnx_pl_disp *xlnx_pl_disp = dev_get_drvdata(dev);
+ int ret;
+ u32 *fmts = NULL;
+ unsigned int num_fmts = 0;
+
+ /* in case of fb IP query the supported formats and there count */
+ xilinx_xdma_get_drm_vid_fmts(xlnx_pl_disp->chan->dma_chan,
+ &num_fmts, &fmts);
+ ret = drm_universal_plane_init(drm, &xlnx_pl_disp->plane, 0,
+ &xlnx_pl_disp_plane_funcs,
+ fmts ? fmts : &xlnx_pl_disp->fmt,
+ num_fmts ? num_fmts : 1,
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(&xlnx_pl_disp->plane,
+ &xlnx_pl_disp_plane_helper_funcs);
+
+ ret = drm_crtc_init_with_planes(drm, &xlnx_pl_disp->xlnx_crtc.crtc,
+ &xlnx_pl_disp->plane, NULL,
+ &xlnx_pl_disp_crtc_funcs, NULL);
+ if (ret) {
+ drm_plane_cleanup(&xlnx_pl_disp->plane);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&xlnx_pl_disp->xlnx_crtc.crtc,
+ &xlnx_pl_disp_crtc_helper_funcs);
+ xlnx_pl_disp->xlnx_crtc.get_format = &xlnx_pl_disp_get_format;
+ xlnx_pl_disp->xlnx_crtc.get_align = &xlnx_pl_disp_get_align;
+ xlnx_pl_disp->drm = drm;
+ xlnx_crtc_register(xlnx_pl_disp->drm, &xlnx_pl_disp->xlnx_crtc);
+
+ return 0;
+}
+
+static void xlnx_pl_disp_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = dev_get_drvdata(dev);
+
+ drm_plane_cleanup(&xlnx_pl_disp->plane);
+ drm_crtc_cleanup(&xlnx_pl_disp->xlnx_crtc.crtc);
+}
+
+static const struct component_ops xlnx_pl_disp_component_ops = {
+ .bind = xlnx_pl_disp_bind,
+ .unbind = xlnx_pl_disp_unbind,
+};
+
+static int xlnx_pl_disp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *vtc_node;
+ struct xlnx_pl_disp *xlnx_pl_disp;
+ int ret;
+ const char *vformat;
+ struct dma_chan *dma_chan;
+ struct xlnx_dma_chan *xlnx_dma_chan;
+
+ xlnx_pl_disp = devm_kzalloc(dev, sizeof(*xlnx_pl_disp), GFP_KERNEL);
+ if (!xlnx_pl_disp)
+ return -ENOMEM;
+
+ dma_chan = of_dma_request_slave_channel(dev->of_node, "dma0");
+ if (IS_ERR_OR_NULL(dma_chan)) {
+ dev_err(dev, "failed to request dma channel\n");
+ return PTR_ERR(dma_chan);
+ }
+
+ xlnx_dma_chan = devm_kzalloc(dev, sizeof(*xlnx_dma_chan), GFP_KERNEL);
+ if (!xlnx_dma_chan)
+ return -ENOMEM;
+
+ xlnx_dma_chan->dma_chan = dma_chan;
+ xlnx_pl_disp->chan = xlnx_dma_chan;
+ ret = of_property_read_string(dev->of_node, "xlnx,vformat", &vformat);
+ if (ret) {
+ dev_err(dev, "No xlnx,vformat value in dts\n");
+ goto err_dma;
+ }
+
+ strcpy((char *)&xlnx_pl_disp->fmt, vformat);
+
+ /* VTC Bridge support */
+ vtc_node = of_parse_phandle(dev->of_node, "xlnx,bridge", 0);
+ if (vtc_node) {
+ xlnx_pl_disp->vtc_bridge = of_xlnx_bridge_get(vtc_node);
+ if (!xlnx_pl_disp->vtc_bridge) {
+ dev_info(dev, "Didn't get vtc bridge instance\n");
+ return -EPROBE_DEFER;
+ }
+ } else {
+ dev_info(dev, "vtc bridge property not present\n");
+ }
+
+ xlnx_pl_disp->dev = dev;
+ platform_set_drvdata(pdev, xlnx_pl_disp);
+
+ ret = component_add(dev, &xlnx_pl_disp_component_ops);
+ if (ret)
+ goto err_dma;
+
+ xlnx_pl_disp->master = xlnx_drm_pipeline_init(pdev);
+ if (IS_ERR(xlnx_pl_disp->master)) {
+ ret = PTR_ERR(xlnx_pl_disp->master);
+ dev_err(dev, "failed to initialize the drm pipeline\n");
+ goto err_component;
+ }
+
+ dev_info(&pdev->dev, "Xlnx PL display driver probed\n");
+
+ return 0;
+
+err_component:
+ component_del(dev, &xlnx_pl_disp_component_ops);
+err_dma:
+ dma_release_channel(xlnx_pl_disp->chan->dma_chan);
+
+ return ret;
+}
+
+static int xlnx_pl_disp_remove(struct platform_device *pdev)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = platform_get_drvdata(pdev);
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+
+ of_xlnx_bridge_put(xlnx_pl_disp->vtc_bridge);
+ xlnx_drm_pipeline_exit(xlnx_pl_disp->master);
+ component_del(&pdev->dev, &xlnx_pl_disp_component_ops);
+
+ /* Make sure the channel is terminated before release */
+ dmaengine_terminate_sync(xlnx_dma_chan->dma_chan);
+ dma_release_channel(xlnx_dma_chan->dma_chan);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_pl_disp_of_match[] = {
+ { .compatible = "xlnx,pl-disp"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_pl_disp_of_match);
+
+static struct platform_driver xlnx_pl_disp_driver = {
+ .probe = xlnx_pl_disp_probe,
+ .remove = xlnx_pl_disp_remove,
+ .driver = {
+ .name = "xlnx-pl-disp",
+ .of_match_table = xlnx_pl_disp_of_match,
+ },
+};
+
+module_platform_driver(xlnx_pl_disp_driver);
+
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_DESCRIPTION("Xilinx DRM Display Driver for PL IPs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_scaler.c b/drivers/gpu/drm/xlnx/xlnx_scaler.c
new file mode 100644
index 000000000000..9d20671c8c83
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_scaler.c
@@ -0,0 +1,1748 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VPSS SCALER DRM bridge driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar rao G <vgannava@xilinx.com>
+ * Rohit Athavale <rathavale@xilinx.com>
+ */
+
+/*
+ * Overview:
+ * This experimentatl driver works as a bridge driver and
+ * reused the code from V4L2.
+ * TODO:
+ * Need to implement in a modular approach to share driver code between
+ * V4L2 and DRM frameworks.
+ * Should be integrated with plane.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <uapi/linux/media-bus-format.h>
+
+#include "xlnx_bridge.h"
+
+#define XSCALER_MAX_WIDTH (3840)
+#define XSCALER_MAX_HEIGHT (2160)
+#define XSCALER_MAX_PHASES (64)
+#define XSCALER_MIN_WIDTH (64)
+#define XSCALER_MIN_HEIGHT (64)
+
+/* Video subsytems block offset */
+#define S_AXIS_RESET_OFF (0x00010000)
+#define V_HSCALER_OFF (0x00000000)
+#define V_VSCALER_OFF (0x00020000)
+
+/* HW Reset Network GPIO Channel */
+#define XGPIO_CH_RESET_SEL (1)
+#define XGPIO_RESET_MASK_VIDEO_IN BIT(0)
+#define XGPIO_RESET_MASK_IP_AXIS BIT(1)
+#define XGPIO_RESET_MASK_ALL_BLOCKS (XGPIO_RESET_MASK_VIDEO_IN | \
+ XGPIO_RESET_MASK_IP_AXIS)
+#define XGPIO_DATA_OFFSET (0x0)
+#define XGPIO_DATA2_OFFSET (0x8)
+#define XGPIO_TRI2_OFFSET (0xc)
+
+#define XGPIO_ISR_OFFSET (0x120)
+#define XGPIO_IER_OFFSET (0x128)
+#define XGPIO_CHAN_OFFSET (8)
+#define STEP_PRECISION (65536)
+
+/* SCALER POWER MACROS */
+#define XSCALER_RESET_ASSERT (0x1)
+#define XSCALER_RESET_DEASSERT (0x0)
+
+/* Video IP PPC */
+#define XSCALER_PPC_1 (1)
+#define XSCALER_PPC_2 (2)
+
+#define XV_HSCALER_MAX_H_TAPS (12)
+#define XV_HSCALER_MAX_H_PHASES (64)
+#define XV_HSCALER_MAX_LINE_WIDTH (3840)
+#define XV_VSCALER_MAX_V_TAPS (12)
+#define XV_VSCALER_MAX_V_PHASES (64)
+
+#define XV_HSCALER_TAPS_2 (2)
+#define XV_HSCALER_TAPS_4 (4)
+#define XV_HSCALER_TAPS_6 (6)
+#define XV_HSCALER_TAPS_8 (8)
+#define XV_HSCALER_TAPS_10 (10)
+#define XV_HSCALER_TAPS_12 (12)
+#define XV_VSCALER_TAPS_2 (2)
+#define XV_VSCALER_TAPS_4 (4)
+#define XV_VSCALER_TAPS_6 (6)
+#define XV_VSCALER_TAPS_8 (8)
+#define XV_VSCALER_TAPS_10 (10)
+#define XV_VSCALER_TAPS_12 (12)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XHSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XHSC_MASK_HIGH_16BITS GENMASK(31, 16)
+#define XHSC_MASK_LOW_32BITS GENMASK(31, 0)
+#define XHSC_STEP_PRECISION_SHIFT (16)
+#define XHSC_HPHASE_SHIFT_BY_6 (6)
+#define XHSC_HPHASE_MULTIPLIER (9)
+#define XSCALER_BITSHIFT_16 (16)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XVSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XVSC_MASK_HIGH_16BITS GENMASK(31, 16)
+
+/* Scaler AP Control Registers */
+#define XSCALER_START BIT(0)
+#define XSCALER_AUTO_RESTART BIT(7)
+#define XSCALER_STREAM_ON (XSCALER_START | XSCALER_AUTO_RESTART)
+
+/* H-scaler registers */
+#define XV_HSCALER_CTRL_ADDR_AP_CTRL (0x0000)
+#define XV_HSCALER_CTRL_ADDR_GIE (0x0004)
+#define XV_HSCALER_CTRL_ADDR_IER (0x0008)
+#define XV_HSCALER_CTRL_ADDR_ISR (0x000c)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA (0x0010)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA (0x0018)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA (0x0020)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x0028)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA (0x0030)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA (0X0038)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE (0x0800)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_HIGH (0x0bff)
+
+#define XV_HSCALER_CTRL_WIDTH_HWREG_HFLTCOEFF (16)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_HFLTCOEFF (384)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE (0x2000)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_HIGH (0x3fff)
+#define XV_HSCALER_CTRL_WIDTH_HWREG_PHASESH_V (18)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_PHASESH_V (1920)
+
+/* H-scaler masks */
+#define XV_HSCALER_PHASESH_V_OUTPUT_WR_EN BIT(8)
+
+/* V-scaler registers */
+#define XV_VSCALER_CTRL_ADDR_AP_CTRL (0x000)
+#define XV_VSCALER_CTRL_ADDR_GIE (0x004)
+#define XV_VSCALER_CTRL_ADDR_IER (0x008)
+#define XV_VSCALER_CTRL_ADDR_ISR (0x00c)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA (0x010)
+#define XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA (0x018)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA (0x020)
+#define XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA (0x028)
+#define XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x030)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE (0x800)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_HIGH (0xbff)
+
+/* H-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const u16
+xhsc_coeff_taps6[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { -132, 236, 3824, 236, -132, 64, },
+ { -116, 184, 3816, 292, -144, 64, },
+ { -100, 132, 3812, 348, -160, 64, },
+ { -88, 84, 3808, 404, -176, 64, },
+ { -72, 36, 3796, 464, -192, 64, },
+ { -60, -8, 3780, 524, -208, 68, },
+ { -48, -52, 3768, 588, -228, 68, },
+ { -32, -96, 3748, 652, -244, 68, },
+ { -20, -136, 3724, 716, -260, 72, },
+ { -8, -172, 3696, 784, -276, 72, },
+ { 0, -208, 3676, 848, -292, 72, },
+ { 12, -244, 3640, 920, -308, 76, },
+ { 20, -276, 3612, 988, -324, 76, },
+ { 32, -304, 3568, 1060, -340, 80, },
+ { 40, -332, 3532, 1132, -356, 80, },
+ { 48, -360, 3492, 1204, -372, 84, },
+ { 56, -384, 3448, 1276, -388, 88, },
+ { 64, -408, 3404, 1352, -404, 88, },
+ { 72, -428, 3348, 1428, -416, 92, },
+ { 76, -448, 3308, 1500, -432, 92, },
+ { 84, -464, 3248, 1576, -444, 96, },
+ { 88, -480, 3200, 1652, -460, 96, },
+ { 92, -492, 3140, 1728, -472, 100, },
+ { 96, -504, 3080, 1804, -484, 104, },
+ { 100, -516, 3020, 1880, -492, 104, },
+ { 104, -524, 2956, 1960, -504, 104, },
+ { 104, -532, 2892, 2036, -512, 108, },
+ { 108, -540, 2832, 2108, -520, 108, },
+ { 108, -544, 2764, 2184, -528, 112, },
+ { 112, -544, 2688, 2260, -532, 112, },
+ { 112, -548, 2624, 2336, -540, 112, },
+ { 112, -548, 2556, 2408, -544, 112, },
+ { 112, -544, 2480, 2480, -544, 112, },
+ { 112, -544, 2408, 2556, -548, 112, },
+ { 112, -540, 2336, 2624, -548, 112, },
+ { 112, -532, 2260, 2688, -544, 112, },
+ { 112, -528, 2184, 2764, -544, 108, },
+ { 108, -520, 2108, 2832, -540, 108, },
+ { 108, -512, 2036, 2892, -532, 104, },
+ { 104, -504, 1960, 2956, -524, 104, },
+ { 104, -492, 1880, 3020, -516, 100, },
+ { 104, -484, 1804, 3080, -504, 96, },
+ { 100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const u16
+xhsc_coeff_taps8[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const u16
+xhsc_coeff_taps10[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const u16
+xhsc_coeff_taps12[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+/* V-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const u16
+xvsc_coeff_taps6[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_6] = {
+ {-132, 236, 3824, 236, -132, 64, },
+ {-116, 184, 3816, 292, -144, 64, },
+ {-100, 132, 3812, 348, -160, 64, },
+ {-88, 84, 3808, 404, -176, 64, },
+ {-72, 36, 3796, 464, -192, 64, },
+ {-60, -8, 3780, 524, -208, 68, },
+ {-48, -52, 3768, 588, -228, 68, },
+ {-32, -96, 3748, 652, -244, 68, },
+ {-20, -136, 3724, 716, -260, 72, },
+ {-8, -172, 3696, 784, -276, 72, },
+ {0, -208, 3676, 848, -292, 72, },
+ {12, -244, 3640, 920, -308, 76, },
+ {20, -276, 3612, 988, -324, 76, },
+ {32, -304, 3568, 1060, -340, 80, },
+ {40, -332, 3532, 1132, -356, 80, },
+ {48, -360, 3492, 1204, -372, 84, },
+ {56, -384, 3448, 1276, -388, 88, },
+ {64, -408, 3404, 1352, -404, 88, },
+ {72, -428, 3348, 1428, -416, 92, },
+ {76, -448, 3308, 1500, -432, 92, },
+ {84, -464, 3248, 1576, -444, 96, },
+ {88, -480, 3200, 1652, -460, 96, },
+ {92, -492, 3140, 1728, -472, 100, },
+ {96, -504, 3080, 1804, -484, 104, },
+ {100, -516, 3020, 1880, -492, 104, },
+ {104, -524, 2956, 1960, -504, 104, },
+ {104, -532, 2892, 2036, -512, 108, },
+ {108, -540, 2832, 2108, -520, 108, },
+ {108, -544, 2764, 2184, -528, 112, },
+ {112, -544, 2688, 2260, -532, 112, },
+ {112, -548, 2624, 2336, -540, 112, },
+ {112, -548, 2556, 2408, -544, 112, },
+ {112, -544, 2480, 2480, -544, 112, },
+ {112, -544, 2408, 2556, -548, 112, },
+ {112, -540, 2336, 2624, -548, 112, },
+ {112, -532, 2260, 2688, -544, 112, },
+ {112, -528, 2184, 2764, -544, 108, },
+ {108, -520, 2108, 2832, -540, 108, },
+ {108, -512, 2036, 2892, -532, 104, },
+ {104, -504, 1960, 2956, -524, 104, },
+ {104, -492, 1880, 3020, -516, 100, },
+ {104, -484, 1804, 3080, -504, 96, },
+ {100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const u16
+xvsc_coeff_taps8[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const u16
+xvsc_coeff_taps10[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const u16
+xvsc_coeff_taps12[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+enum xilinx_scaler_vid_reg_fmts {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+static const u32 xilinx_scaler_video_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_VUY8_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYYUYY8_1X24,
+};
+
+/**
+ * struct xilinx_scaler - Core configuration of scaler device structure
+ * @base: pointer to register base address
+ * @dev: device structure
+ * @bridge: xilinx bridge
+ * @width_in: input width
+ * @height_in: input height
+ * @width_out: output width
+ * @height_out: output height
+ * @fmt_in: input format
+ * @fmt_out: output format
+ * @num_hori_taps: number of horizontal taps
+ * @num_vert_taps: number of vertical taps
+ * @max_num_phases: maximum number of phases
+ * @pix_per_clk: Pixels per Clock cycle the IP operates upon
+ * @max_pixels: The maximum number of pixels that the H-scaler examines
+ * @max_lines: The maximum number of lines that the V-scaler examines
+ * @H_phases: The phases needed to program the H-scaler for different taps
+ * @hscaler_coeff: The complete array of H-scaler coefficients
+ * @vscaler_coeff: The complete array of V-scaler coefficients
+ * @is_polyphase: Track if scaling algorithm is polyphase or not
+ * @rst_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ * @ctrl_clk: AXI Lite clock
+ * @axis_clk: Video Clock
+ */
+struct xilinx_scaler {
+ void __iomem *base;
+ struct device *dev;
+ struct xlnx_bridge bridge;
+ u32 width_in;
+ u32 height_in;
+ u32 width_out;
+ u32 height_out;
+ u32 fmt_in;
+ u32 fmt_out;
+ u32 num_hori_taps;
+ u32 num_vert_taps;
+ u32 max_num_phases;
+ u32 pix_per_clk;
+ u32 max_pixels;
+ u32 max_lines;
+ u32 H_phases[XV_HSCALER_MAX_LINE_WIDTH];
+ short hscaler_coeff[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_MAX_H_TAPS];
+ short vscaler_coeff[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_MAX_V_TAPS];
+ bool is_polyphase;
+ struct gpio_desc *rst_gpio;
+ struct clk *ctrl_clk;
+ struct clk *axis_clk;
+};
+
+static inline void xilinx_scaler_write(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xilinx_scaler_read(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void xilinx_scaler_clr(void __iomem *base, u32 offset, u32 clr)
+{
+ xilinx_scaler_write(base, offset,
+ xilinx_scaler_read(base, offset) & ~clr);
+}
+
+static inline void xilinx_scaler_set(void __iomem *base, u32 offset, u32 set)
+{
+ xilinx_scaler_write(base, offset,
+ xilinx_scaler_read(base, offset) | set);
+}
+
+static inline void
+xilinx_scaler_disable_block(struct xilinx_scaler *scaler, u32 channel,
+ u32 ip_block)
+{
+ xilinx_scaler_clr(scaler->base, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF, ip_block);
+}
+
+static inline void
+xilinx_scaler_enable_block(struct xilinx_scaler *scaler, u32 channel,
+ u32 ip_block)
+{
+ xilinx_scaler_set(scaler->base, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF, ip_block);
+}
+
+/**
+ * bridge_to_layer - Gets the parent structure
+ * @bridge: pointer to the member.
+ *
+ * Return: parent structure pointer
+ */
+static inline struct xilinx_scaler *bridge_to_layer(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct xilinx_scaler, bridge);
+}
+
+/**
+ * xilinx_scaler_reset - Resets scaler block
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function resets scaler block
+ */
+static void xilinx_scaler_reset(struct xilinx_scaler *scaler)
+{
+ xilinx_scaler_disable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_ALL_BLOCKS);
+ xilinx_scaler_enable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_IP_AXIS);
+}
+
+/**
+ * xv_hscaler_calculate_phases - Calculates h-scaler phases
+ * @scaler: Pointer to scaler registers base
+ * @width_in: input width
+ * @width_out: output width
+ * @pixel_rate: pixel rate
+ */
+static void
+xv_hscaler_calculate_phases(struct xilinx_scaler *scaler,
+ u32 width_in, u32 width_out, u32 pixel_rate)
+{
+ unsigned int loop_width;
+ unsigned int x, s;
+ int offset = 0;
+ int xwrite_pos = 0;
+ bool output_write_en;
+ bool get_new_pix;
+ u64 phaseH;
+ u32 array_idx = 0;
+ int nr_rds;
+ int nr_rds_clck;
+ unsigned int nphases = scaler->max_num_phases;
+ unsigned int nppc = scaler->pix_per_clk;
+ unsigned int shift = XHSC_STEP_PRECISION_SHIFT - ilog2(nphases);
+
+ loop_width = max_t(u32, width_in, width_out);
+ loop_width = ALIGN(loop_width + nppc - 1, nppc);
+
+ for (x = 0; x < loop_width; x++) {
+ nr_rds_clck = 0;
+ for (s = 0; s < nppc; s++) {
+ phaseH = (offset >> shift) & (nphases - 1);
+ get_new_pix = false;
+ output_write_en = false;
+ if ((offset >> XHSC_STEP_PRECISION_SHIFT) != 0) {
+ get_new_pix = true;
+ offset -= (1 << XHSC_STEP_PRECISION_SHIFT);
+ array_idx++;
+ }
+
+ if (((offset >> XHSC_STEP_PRECISION_SHIFT) == 0) &&
+ xwrite_pos < width_out) {
+ offset += pixel_rate;
+ output_write_en = true;
+ xwrite_pos++;
+ }
+
+ scaler->H_phases[x] |= (phaseH <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ scaler->H_phases[x] |= (array_idx <<
+ (XHSC_HPHASE_SHIFT_BY_6 +
+ (s * XHSC_HPHASE_MULTIPLIER)));
+ if (output_write_en) {
+ scaler->H_phases[x] |=
+ (XV_HSCALER_PHASESH_V_OUTPUT_WR_EN <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ }
+
+ if (get_new_pix)
+ nr_rds_clck++;
+ }
+ if (array_idx >= nppc)
+ array_idx &= (nppc - 1);
+
+ nr_rds += nr_rds_clck;
+ if (nr_rds >= nppc)
+ nr_rds -= nppc;
+ }
+}
+
+/**
+ * xv_hscaler_load_ext_coeff - Loads external coefficients of h-scaler
+ * @scaler: Pointer to scaler registers base
+ * @coeff: Pointer to coeff array
+ * @ntaps: number of taps
+ *
+ * This function loads h-scaler coefficients.
+ */
+static void
+xv_hscaler_load_ext_coeff(struct xilinx_scaler *scaler,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+ u32 nphases = scaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_HSCALER_MAX_H_TAPS - ntaps;
+ offset = pad >> 1;
+ /* Load coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ scaler->hscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+
+ if (pad) {
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < offset; j++)
+ scaler->hscaler_coeff[i][j] = 0;
+ j = ntaps + offset;
+ for (; j < XV_HSCALER_MAX_H_TAPS; j++)
+ scaler->hscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+/**
+ * xv_hscaler_coeff_select - Selection of H-Scaler coefficients of operation
+ * @scaler: Pointer to Scaler device structure
+ * @width_in: Width of input video
+ * @width_out: Width of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 12-tap
+ * filter may operate with 10 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * H-scaler number of taps.
+ */
+static int
+xv_hscaler_select_coeff(struct xilinx_scaler *scaler,
+ u32 width_in, u32 width_out)
+{
+ const short *coeff;
+ u16 hscale_ratio;
+ u32 ntaps = scaler->num_hori_taps;
+
+ /*
+ * Scale Down Mode will use dynamic filter selection logic
+ * Scale Up Mode (including 1:1) will always use 6 tap filter
+ */
+ if (width_out < width_in) {
+ hscale_ratio = ((width_in * 10) / width_out);
+
+ switch (scaler->num_hori_taps) {
+ case XV_HSCALER_TAPS_6:
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ break;
+ case XV_HSCALER_TAPS_8:
+ if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_10:
+ if (hscale_ratio > 25) {
+ coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XV_HSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_12:
+ if (hscale_ratio > 35) {
+ coeff = &xhsc_coeff_taps12[0][0];
+ ntaps = XV_HSCALER_TAPS_12;
+ } else if (hscale_ratio > 25) {
+ coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XV_HSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ default:
+ dev_info(scaler->dev, "Unsupported H-scaler number of taps\n");
+ return -EINVAL;
+ }
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ xv_hscaler_load_ext_coeff(scaler, coeff, ntaps);
+ return 0;
+}
+
+/**
+ * xv_hscaler_set_coeff - Sets h-scaler coefficients
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets coefficients of h-scaler.
+ */
+static void xv_hscaler_set_coeff(struct xilinx_scaler *scaler)
+{
+ int val, i, j, offset, rd_indx;
+ u32 ntaps = scaler->num_hori_taps;
+ u32 nphases = scaler->max_num_phases;
+ u32 base_addr;
+
+ offset = (XV_HSCALER_MAX_H_TAPS - ntaps) / 2;
+ base_addr = V_HSCALER_OFF + XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE;
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (scaler->hscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (scaler->hscaler_coeff[i][rd_indx] &
+ XHSC_MASK_LOW_16BITS);
+ xilinx_scaler_write(scaler->base, base_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+/**
+ * xv_vscaler_load_ext_coeff - Loads external coefficients of v-scaler
+ * @scaler: Pointer to scaler device structure
+ * @coeff: Pointer to coeff array
+ * @ntaps: number of taps
+ *
+ * This function loads v-scaler coefficients.
+ */
+static void
+xv_vscaler_load_ext_coeff(struct xilinx_scaler *scaler,
+ const short *coeff, u32 ntaps)
+{
+ int i, j, pad, offset;
+ u32 nphases = scaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_VSCALER_MAX_V_TAPS - ntaps;
+ offset = pad ? (pad >> 1) : 0;
+ /* Load User defined coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ scaler->vscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+ if (pad) {
+ /* effective taps < max_taps */
+ for (i = 0; i < nphases; i++) {
+ /* pad left */
+ for (j = 0; j < offset; j++)
+ scaler->vscaler_coeff[i][j] = 0;
+ /* pad right */
+ j = ntaps + offset;
+ for (; j < XV_VSCALER_MAX_V_TAPS; j++)
+ scaler->vscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+/**
+ * xv_vscaler_set_coeff - Sets v-scaler coefficients
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets coefficients of v-scaler.
+ */
+static void xv_vscaler_set_coeff(struct xilinx_scaler *scaler)
+{
+ u32 nphases = scaler->max_num_phases;
+ u32 ntaps = scaler->num_vert_taps;
+ int val, i, j, offset, rd_indx;
+ u32 base_addr;
+
+ offset = (XV_VSCALER_MAX_V_TAPS - ntaps) / 2;
+ base_addr = V_VSCALER_OFF + XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (scaler->vscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (scaler->vscaler_coeff[i][rd_indx] &
+ XVSC_MASK_LOW_16BITS);
+ xilinx_scaler_write(scaler->base, base_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+/**
+ * xv_vscaler_coeff_select - Selection of V-Scaler coefficients of operation
+ * @scaler: Pointer to Scaler device structure
+ * @height_in: Height of input video
+ * @height_out: Height of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 10-tap
+ * filter may operate with 6 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * V-scaler number of taps.
+ */
+static int
+xv_vscaler_select_coeff(struct xilinx_scaler *scaler,
+ u32 height_in, u32 height_out)
+{
+ const short *coeff;
+ u16 vscale_ratio;
+ u32 ntaps = scaler->num_vert_taps;
+
+ /*
+ * Scale Down Mode will use dynamic filter selection logic
+ * Scale Up Mode (including 1:1) will always use 6 tap filter
+ */
+
+ if (height_out < height_in) {
+ vscale_ratio = ((height_in * 10) / height_out);
+
+ switch (scaler->num_vert_taps) {
+ case XV_VSCALER_TAPS_6:
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ break;
+ case XV_VSCALER_TAPS_8:
+ if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ case XV_VSCALER_TAPS_10:
+ if (vscale_ratio > 25) {
+ coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XV_VSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ case XV_VSCALER_TAPS_12:
+ if (vscale_ratio > 35) {
+ coeff = &xvsc_coeff_taps12[0][0];
+ ntaps = XV_VSCALER_TAPS_12;
+ } else if (vscale_ratio > 25) {
+ coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XV_VSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+
+ xv_vscaler_load_ext_coeff(scaler, coeff, ntaps);
+ return 0;
+}
+
+/**
+ * xv_hscaler_set_phases - Sets phases of h-scaler
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets phases of h-scaler.
+ */
+static void
+xv_hscaler_set_phases(struct xilinx_scaler *scaler)
+{
+ u32 loop_width;
+ u32 index, val;
+ u32 offset, i, lsb, msb;
+
+ loop_width = scaler->max_pixels / scaler->pix_per_clk;
+ offset = V_HSCALER_OFF + XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE;
+
+ switch (scaler->pix_per_clk) {
+ case XSCALER_PPC_1:
+ index = 0;
+ for (i = 0; i < loop_width; i += 2) {
+ lsb = scaler->H_phases[i] & XHSC_MASK_LOW_16BITS;
+ msb = scaler->H_phases[i + 1] & XHSC_MASK_LOW_16BITS;
+ val = (msb << 16 | lsb);
+ xilinx_scaler_write(scaler->base, offset +
+ (index * 4), val);
+ ++index;
+ }
+ return;
+ case XSCALER_PPC_2:
+ for (i = 0; i < loop_width; i++) {
+ val = (scaler->H_phases[i] & XHSC_MASK_LOW_32BITS);
+ xilinx_scaler_write(scaler->base, offset +
+ (i * 4), val);
+ }
+ return;
+ }
+}
+
+/**
+ * xv_vscaler_setup_video_fmt - Sets video format of v-scaler
+ * @scaler: Pointer to scaler device structure
+ * @code_in: format to be set
+ *
+ * This function set the given format of v-scaler
+ *
+ * Return: format value on success. -EINVAL for invalid format.
+ *
+ */
+static int
+xv_vscaler_setup_video_fmt(struct xilinx_scaler *scaler, u32 code_in)
+{
+ u32 video_in;
+
+ switch (code_in) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ video_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ video_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ video_in = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ video_in = XVIDC_CSF_YCRCB_420;
+ break;
+ default:
+ dev_info(scaler->dev, "Vscaler Unsupported media fmt\n");
+ return -EINVAL;
+ }
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ video_in);
+ return video_in;
+}
+
+/**
+ * xv_hscaler_setup_video_fmt - Sets video format of h-scaler
+ * @scaler: Pointer to scaler device structure
+ * @code_out: bus format to be set
+ * @vsc_out: return value of vscaler
+ *
+ * This function set the given video format of h-scaler
+ *
+ * Return: format value on success. -EINVAL for invalid format.
+ *
+ */
+static int xv_hscaler_setup_video_fmt(struct xilinx_scaler *scaler,
+ u32 code_out, u32 vsc_out)
+{
+ u32 video_out;
+
+ switch (vsc_out) {
+ case XVIDC_CSF_YCRCB_422:
+ break;
+ case XVIDC_CSF_YCRCB_444:
+ break;
+ case XVIDC_CSF_RGB:
+ break;
+ case XVIDC_CSF_YCRCB_420:
+ break;
+ default:
+ dev_info(scaler->dev, "unsupported format from Vscaler");
+ return -EINVAL;
+ }
+
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ vsc_out);
+
+ switch (code_out) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ video_out = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ video_out = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ video_out = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ video_out = XVIDC_CSF_YCRCB_420;
+ break;
+ default:
+ dev_info(scaler->dev, "Hscaler Unsupported Out media fmt\n");
+ return -EINVAL;
+ }
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA,
+ video_out);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_parse_of - Parse device tree information
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function reads the device tree contents
+ *
+ * Return: 0 on success. -EINVAL for invalid value.
+ *
+ */
+static int xilinx_scaler_parse_of(struct xilinx_scaler *scaler)
+{
+ int ret;
+ u32 dt_ppc;
+ struct device_node *node = scaler->dev->of_node;
+
+ scaler->ctrl_clk = devm_clk_get(scaler->dev, "aclk_ctrl");
+ if (IS_ERR(scaler->ctrl_clk)) {
+ ret = PTR_ERR(scaler->ctrl_clk);
+ dev_err(scaler->dev, "failed to get axi lite clk %d\n", ret);
+ return ret;
+ }
+
+ scaler->axis_clk = devm_clk_get(scaler->dev, "aclk_axis");
+ if (IS_ERR(scaler->axis_clk)) {
+ ret = PTR_ERR(scaler->axis_clk);
+ dev_err(scaler->dev, "failed to get video clk %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,h-scaler-taps",
+ &scaler->num_hori_taps);
+ if (ret < 0) {
+ dev_info(scaler->dev, "h-scaler-taps not present in DT\n");
+ return ret;
+ }
+ switch (scaler->num_hori_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_HSCALER_TAPS_4:
+ scaler->is_polyphase = false;
+ break;
+ case XV_HSCALER_TAPS_6:
+ case XV_HSCALER_TAPS_8:
+ case XV_HSCALER_TAPS_10:
+ case XV_HSCALER_TAPS_12:
+ scaler->is_polyphase = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,v-scaler-taps",
+ &scaler->num_vert_taps);
+ if (ret < 0) {
+ dev_info(scaler->dev, "v-scaler-taps not present in DT\n");
+ return ret;
+ }
+
+ switch (scaler->num_vert_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_VSCALER_TAPS_4:
+ if (scaler->num_vert_taps != scaler->num_hori_taps)
+ return -EINVAL;
+ break;
+ case XV_VSCALER_TAPS_6:
+ case XV_VSCALER_TAPS_8:
+ case XV_VSCALER_TAPS_10:
+ case XV_VSCALER_TAPS_12:
+ scaler->is_polyphase = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,samples-per-clk", &dt_ppc);
+ if (ret < 0) {
+ dev_info(scaler->dev, "PPC is missing in DT\n");
+ return ret;
+ }
+ if (dt_ppc != XSCALER_PPC_1 && dt_ppc != XSCALER_PPC_2) {
+ dev_info(scaler->dev, "Unsupported ppc: %d", dt_ppc);
+ return -EINVAL;
+ }
+ scaler->pix_per_clk = dt_ppc;
+
+ /* Reset GPIO */
+ scaler->rst_gpio = devm_gpiod_get(scaler->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(scaler->rst_gpio)) {
+ if (PTR_ERR(scaler->rst_gpio) != -EPROBE_DEFER)
+ dev_err(scaler->dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(scaler->rst_gpio);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &scaler->max_lines);
+ if (ret < 0) {
+ dev_err(scaler->dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (scaler->max_lines > XSCALER_MAX_HEIGHT ||
+ scaler->max_lines < XSCALER_MIN_HEIGHT) {
+ dev_err(scaler->dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &scaler->max_pixels);
+ if (ret < 0) {
+ dev_err(scaler->dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (scaler->max_pixels > XSCALER_MAX_WIDTH ||
+ scaler->max_pixels < XSCALER_MIN_WIDTH) {
+ dev_err(scaler->dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_scaler_stream - Set up v-scaler and h-scaler for streaming
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets up the required configuration of v-scaler and h-scaler
+ *
+ * Return: 0 on success. Returns -EINVAL on failure conditions.
+ */
+static int xilinx_scaler_stream(struct xilinx_scaler *scaler)
+{
+ u32 fmt_in, fmt_out;
+ u32 pixel_rate;
+ u32 line_rate;
+ int ret;
+
+ fmt_in = scaler->fmt_in;
+ fmt_out = scaler->fmt_out;
+ line_rate = (scaler->height_in * STEP_PRECISION) / scaler->height_out;
+
+ if (scaler->is_polyphase) {
+ ret = xv_vscaler_select_coeff(scaler, scaler->height_in,
+ scaler->height_out);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: vscaler select coeff\n");
+ return ret;
+ }
+ xv_vscaler_set_coeff(scaler);
+ }
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA,
+ line_rate);
+ ret = xv_vscaler_setup_video_fmt(scaler, scaler->fmt_in);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: vscaler setup video format\n");
+ return ret;
+ }
+ pixel_rate = (scaler->width_in * STEP_PRECISION) / scaler->width_out;
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA,
+ pixel_rate);
+ ret = xv_hscaler_setup_video_fmt(scaler, scaler->fmt_out, ret);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: vscaler setup video format\n");
+ return ret;
+ }
+ if (scaler->is_polyphase) {
+ ret = xv_hscaler_select_coeff(scaler, scaler->width_in,
+ scaler->width_out);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: hscaler select coeff\n");
+ return ret;
+ }
+ xv_hscaler_set_coeff(scaler);
+ }
+ xv_hscaler_calculate_phases(scaler, scaler->width_in,
+ scaler->width_out, pixel_rate);
+ xv_hscaler_set_phases(scaler);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_enable - enabes scaler sub-cores
+ * @bridge: bridge instance
+ *
+ * This function enables the scaler sub-cores
+ *
+ * Return: 0 on success. Return -EINVAL on failure conditions.
+ *
+ */
+static int xilinx_scaler_bridge_enable(struct xlnx_bridge *bridge)
+{
+ int ret;
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ ret = xilinx_scaler_stream(scaler);
+ if (ret)
+ return ret;
+
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xilinx_scaler_enable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_IP_AXIS);
+ return ret;
+}
+
+/**
+ * xilinx_scaler_bridge_disable - disables scaler sub-cores
+ * @bridge: bridge instance
+ *
+ * This function disables the scaler sub-cores
+ */
+static void xilinx_scaler_bridge_disable(struct xlnx_bridge *bridge)
+{
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ xilinx_scaler_disable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_ALL_BLOCKS);
+}
+
+/**
+ * xilinx_scaler_bridge_set_input - Sets the input parameters of scaler
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the input parameters of scaler
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_scaler_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ if (width > scaler->max_pixels || height > scaler->max_lines)
+ return -EINVAL;
+
+ scaler->height_in = height;
+ scaler->width_in = width;
+ scaler->fmt_in = bus_fmt;
+
+ /* IP Reset through GPIO */
+ gpiod_set_value_cansleep(scaler->rst_gpio, XSCALER_RESET_ASSERT);
+ gpiod_set_value_cansleep(scaler->rst_gpio, XSCALER_RESET_DEASSERT);
+ xilinx_scaler_reset(scaler);
+ memset(scaler->H_phases, 0, sizeof(scaler->H_phases));
+
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA, height);
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA, width);
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA, width);
+
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_get_input_fmts - input formats supported by scaler
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the input video formats information scaler
+ * Return: 0 on success.
+ */
+static int xilinx_scaler_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_scaler_video_fmts;
+ *count = ARRAY_SIZE(xilinx_scaler_video_fmts);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_set_output - Sets the output parameters of scaler
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the output parameters of scaler
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_scaler_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ if (width > scaler->max_pixels || height > scaler->max_lines)
+ return -EINVAL;
+
+ scaler->height_out = height;
+ scaler->width_out = width;
+ scaler->fmt_out = bus_fmt;
+
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA, height);
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA, height);
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA, width);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_get_output_fmts - output formats supported by scaler
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the output video formats information scaler
+ * Return: 0 on success.
+ */
+static int xilinx_scaler_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_scaler_video_fmts;
+ *count = ARRAY_SIZE(xilinx_scaler_video_fmts);
+ return 0;
+}
+
+static int xilinx_scaler_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_scaler *scaler;
+ int ret;
+
+ scaler = devm_kzalloc(dev, sizeof(*scaler), GFP_KERNEL);
+ if (!scaler)
+ return -ENOMEM;
+ scaler->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ scaler->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scaler->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, scaler);
+
+ ret = xilinx_scaler_parse_of(scaler);
+ if (ret < 0) {
+ dev_info(scaler->dev, "parse_of failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(scaler->ctrl_clk);
+ if (ret) {
+ dev_err(scaler->dev, "unable to enable axi lite clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(scaler->axis_clk);
+ if (ret) {
+ dev_err(scaler->dev, "unable to enable video clk %d\n", ret);
+ goto err_ctrl_clk;
+ }
+
+ scaler->max_num_phases = XSCALER_MAX_PHASES;
+
+ /* Reset the Global IP Reset through a GPIO */
+ gpiod_set_value_cansleep(scaler->rst_gpio, XSCALER_RESET_DEASSERT);
+ xilinx_scaler_reset(scaler);
+
+ scaler->bridge.enable = &xilinx_scaler_bridge_enable;
+ scaler->bridge.disable = &xilinx_scaler_bridge_disable;
+ scaler->bridge.set_input = &xilinx_scaler_bridge_set_input;
+ scaler->bridge.get_input_fmts = &xilinx_scaler_bridge_get_input_fmts;
+ scaler->bridge.set_output = &xilinx_scaler_bridge_set_output;
+ scaler->bridge.get_output_fmts = &xilinx_scaler_bridge_get_output_fmts;
+ scaler->bridge.of_node = dev->of_node;
+
+ ret = xlnx_bridge_register(&scaler->bridge);
+ if (ret) {
+ dev_info(scaler->dev, "Bridge registration failed\n");
+ goto err_axis_clk;
+ }
+ dev_info(scaler->dev, "xlnx drm scaler experimental driver probed\n");
+
+ return 0;
+
+err_axis_clk:
+ clk_disable_unprepare(scaler->axis_clk);
+err_ctrl_clk:
+ clk_disable_unprepare(scaler->ctrl_clk);
+ return ret;
+}
+
+static int xilinx_scaler_remove(struct platform_device *pdev)
+{
+ struct xilinx_scaler *scaler = platform_get_drvdata(pdev);
+
+ xlnx_bridge_unregister(&scaler->bridge);
+ clk_disable_unprepare(scaler->axis_clk);
+ clk_disable_unprepare(scaler->ctrl_clk);
+ return 0;
+}
+
+static const struct of_device_id xilinx_scaler_of_match[] = {
+ { .compatible = "xlnx,vpss-scaler"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_scaler_of_match);
+
+static struct platform_driver scaler_bridge_driver = {
+ .probe = xilinx_scaler_probe,
+ .remove = xilinx_scaler_remove,
+ .driver = {
+ .name = "xlnx,scaler-bridge",
+ .of_match_table = xilinx_scaler_of_match,
+ },
+};
+
+module_platform_driver(scaler_bridge_driver);
+
+MODULE_AUTHOR("Venkateshwar Rao <vgannava@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA SCALER Bridge Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi.c b/drivers/gpu/drm/xlnx/xlnx_sdi.c
new file mode 100644
index 000000000000..9fb7b5db5589
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi.c
@@ -0,0 +1,1227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI Tx Subsystem driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/videomode.h>
+#include "xlnx_sdi_modes.h"
+#include "xlnx_sdi_timing.h"
+
+#include "xlnx_bridge.h"
+
+/* SDI register offsets */
+#define XSDI_TX_RST_CTRL 0x00
+#define XSDI_TX_MDL_CTRL 0x04
+#define XSDI_TX_GLBL_IER 0x0C
+#define XSDI_TX_ISR_STAT 0x10
+#define XSDI_TX_IER_STAT 0x14
+#define XSDI_TX_ST352_LINE 0x18
+#define XSDI_TX_ST352_DATA_CH0 0x1C
+#define XSDI_TX_VER 0x3C
+#define XSDI_TX_SYS_CFG 0x40
+#define XSDI_TX_STS_SB_TDATA 0x60
+#define XSDI_TX_AXI4S_STS1 0x68
+#define XSDI_TX_AXI4S_STS2 0x6C
+#define XSDI_TX_ST352_DATA_DS2 0x70
+
+/* MODULE_CTRL register masks */
+#define XSDI_TX_CTRL_M BIT(7)
+#define XSDI_TX_CTRL_INS_CRC BIT(12)
+#define XSDI_TX_CTRL_INS_ST352 BIT(13)
+#define XSDI_TX_CTRL_OVR_ST352 BIT(14)
+#define XSDI_TX_CTRL_INS_SYNC_BIT BIT(16)
+#define XSDI_TX_CTRL_USE_ANC_IN BIT(18)
+#define XSDI_TX_CTRL_INS_LN BIT(19)
+#define XSDI_TX_CTRL_INS_EDH BIT(20)
+#define XSDI_TX_CTRL_MODE 0x7
+#define XSDI_TX_CTRL_MUX 0x7
+#define XSDI_TX_CTRL_MODE_SHIFT 4
+#define XSDI_TX_CTRL_M_SHIFT 7
+#define XSDI_TX_CTRL_MUX_SHIFT 8
+#define XSDI_TX_CTRL_ST352_F2_EN_SHIFT 15
+#define XSDI_TX_CTRL_420_BIT BIT(21)
+#define XSDI_TX_CTRL_INS_ST352_CHROMA BIT(23)
+#define XSDI_TX_CTRL_USE_DS2_3GA BIT(24)
+
+/* TX_ST352_LINE register masks */
+#define XSDI_TX_ST352_LINE_MASK GENMASK(10, 0)
+#define XSDI_TX_ST352_LINE_F2_SHIFT 16
+
+/* ISR STAT register masks */
+#define XSDI_GTTX_RSTDONE_INTR BIT(0)
+#define XSDI_TX_CE_ALIGN_ERR_INTR BIT(1)
+#define XSDI_AXI4S_VID_LOCK_INTR BIT(8)
+#define XSDI_OVERFLOW_INTR BIT(9)
+#define XSDI_UNDERFLOW_INTR BIT(10)
+#define XSDI_IER_EN_MASK (XSDI_GTTX_RSTDONE_INTR | \
+ XSDI_TX_CE_ALIGN_ERR_INTR | \
+ XSDI_OVERFLOW_INTR | \
+ XSDI_UNDERFLOW_INTR)
+
+/* RST_CTRL_OFFSET masks */
+#define XSDI_TX_CTRL_EN BIT(0)
+#define XSDI_TX_BRIDGE_CTRL_EN BIT(8)
+#define XSDI_TX_AXI4S_CTRL_EN BIT(9)
+/* STS_SB_TX_TDATA masks */
+#define XSDI_TX_TDATA_GT_RESETDONE BIT(2)
+
+#define XSDI_TX_MUX_SD_HD_3GA 0
+#define XSDI_TX_MUX_3GB 1
+#define XSDI_TX_MUX_8STREAM_6G_12G 2
+#define XSDI_TX_MUX_4STREAM_6G 3
+#define XSDI_TX_MUX_16STREAM_12G 4
+
+#define SDI_MAX_DATASTREAM 8
+#define PIXELS_PER_CLK 2
+#define XSDI_CH_SHIFT 29
+#define XST352_PROG_PIC BIT(6)
+#define XST352_PROG_TRANS BIT(7)
+#define XST352_2048_SHIFT BIT(6)
+#define XST352_YUV420_MASK 0x03
+#define ST352_BYTE3 0x00
+#define ST352_BYTE4 0x01
+#define GT_TIMEOUT 50
+/* SDI modes */
+#define XSDI_MODE_HD 0
+#define XSDI_MODE_SD 1
+#define XSDI_MODE_3GA 2
+#define XSDI_MODE_3GB 3
+#define XSDI_MODE_6G 4
+#define XSDI_MODE_12G 5
+
+#define SDI_TIMING_PARAMS_SIZE 48
+
+/**
+ * enum payload_line_1 - Payload Ids Line 1 number
+ * @PAYLD_LN1_HD_3_6_12G: line 1 HD,3G,6G or 12G mode value
+ * @PAYLD_LN1_SDPAL: line 1 SD PAL mode value
+ * @PAYLD_LN1_SDNTSC: line 1 SD NTSC mode value
+ */
+enum payload_line_1 {
+ PAYLD_LN1_HD_3_6_12G = 10,
+ PAYLD_LN1_SDPAL = 9,
+ PAYLD_LN1_SDNTSC = 13
+};
+
+/**
+ * enum payload_line_2 - Payload Ids Line 2 number
+ * @PAYLD_LN2_HD_3_6_12G: line 2 HD,3G,6G or 12G mode value
+ * @PAYLD_LN2_SDPAL: line 2 SD PAL mode value
+ * @PAYLD_LN2_SDNTSC: line 2 SD NTSC mode value
+ */
+enum payload_line_2 {
+ PAYLD_LN2_HD_3_6_12G = 572,
+ PAYLD_LN2_SDPAL = 322,
+ PAYLD_LN2_SDNTSC = 276
+};
+
+/**
+ * struct xlnx_sdi - Core configuration SDI Tx subsystem device structure
+ * @encoder: DRM encoder structure
+ * @connector: DRM connector structure
+ * @dev: device structure
+ * @base: Base address of SDI subsystem
+ * @mode_flags: SDI operation mode related flags
+ * @wait_event: wait event
+ * @event_received: wait event status
+ * @enable_st352_chroma: Able to send ST352 packets in Chroma stream.
+ * @enable_anc_data: Enable/Disable Ancillary Data insertion for Audio
+ * @sdi_mode: configurable SDI mode parameter, supported values are:
+ * 0 - HD
+ * 1 - SD
+ * 2 - 3GA
+ * 3 - 3GB
+ * 4 - 6G
+ * 5 - 12G
+ * @sdi_mod_prop_val: configurable SDI mode parameter value
+ * @sdi_data_strm: configurable SDI data stream parameter
+ * @sdi_data_strm_prop_val: configurable number of SDI data streams
+ * value currently supported are 2, 4 and 8
+ * @sdi_420_in: Specifying input bus color format parameter to SDI
+ * @sdi_420_in_val: 1 for yuv420 and 0 for yuv422
+ * @sdi_420_out: configurable SDI out color format parameter
+ * @sdi_420_out_val: 1 for yuv420 and 0 for yuv422
+ * @is_frac_prop: configurable SDI fractional fps parameter
+ * @is_frac_prop_val: configurable SDI fractional fps parameter value
+ * @bridge: bridge structure
+ * @height_out: configurable bridge output height parameter
+ * @height_out_prop_val: configurable bridge output height parameter value
+ * @width_out: configurable bridge output width parameter
+ * @width_out_prop_val: configurable bridge output width parameter value
+ * @in_fmt: configurable bridge input media format
+ * @in_fmt_prop_val: configurable media bus format value
+ * @out_fmt: configurable bridge output media format
+ * @out_fmt_prop_val: configurable media bus format value
+ * @en_st352_c_prop: configurable ST352 payload on Chroma stream parameter
+ * @en_st352_c_val: configurable ST352 payload on Chroma parameter value
+ * @use_ds2_3ga_prop: Use DS2 instead of DS3 in 3GA mode parameter
+ * @use_ds2_3ga_val: Use DS2 instead of DS3 in 3GA mode parameter value
+ * @video_mode: current display mode
+ * @axi_clk: AXI Lite interface clock
+ * @sditx_clk: SDI Tx Clock
+ * @vidin_clk: Video Clock
+ */
+struct xlnx_sdi {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct device *dev;
+ void __iomem *base;
+ u32 mode_flags;
+ wait_queue_head_t wait_event;
+ bool event_received;
+ bool enable_st352_chroma;
+ bool enable_anc_data;
+ struct drm_property *sdi_mode;
+ u32 sdi_mod_prop_val;
+ struct drm_property *sdi_data_strm;
+ u32 sdi_data_strm_prop_val;
+ struct drm_property *sdi_420_in;
+ bool sdi_420_in_val;
+ struct drm_property *sdi_420_out;
+ bool sdi_420_out_val;
+ struct drm_property *is_frac_prop;
+ bool is_frac_prop_val;
+ struct xlnx_bridge *bridge;
+ struct drm_property *height_out;
+ u32 height_out_prop_val;
+ struct drm_property *width_out;
+ u32 width_out_prop_val;
+ struct drm_property *in_fmt;
+ u32 in_fmt_prop_val;
+ struct drm_property *out_fmt;
+ u32 out_fmt_prop_val;
+ struct drm_property *en_st352_c_prop;
+ bool en_st352_c_val;
+ struct drm_property *use_ds2_3ga_prop;
+ bool use_ds2_3ga_val;
+ struct drm_display_mode video_mode;
+ struct clk *axi_clk;
+ struct clk *sditx_clk;
+ struct clk *vidin_clk;
+};
+
+#define connector_to_sdi(c) container_of(c, struct xlnx_sdi, connector)
+#define encoder_to_sdi(e) container_of(e, struct xlnx_sdi, encoder)
+
+static inline void xlnx_sdi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xlnx_sdi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xlnx_sdi_en_axi4s - Enable SDI Tx AXI4S-to-Video core
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx AXI4S-to-Video core.
+ */
+static void xlnx_sdi_en_axi4s(struct xlnx_sdi *sdi)
+{
+ u32 data;
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_AXI4S_CTRL_EN;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_en_bridge - Enable SDI Tx bridge
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx bridge.
+ */
+static void xlnx_sdi_en_bridge(struct xlnx_sdi *sdi)
+{
+ u32 data;
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_BRIDGE_CTRL_EN;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_irq_handler - SDI Tx interrupt
+ * @irq: irq number
+ * @data: irq data
+ *
+ * Return: IRQ_HANDLED for all cases.
+ *
+ * This is the compact GT ready interrupt.
+ */
+static irqreturn_t xlnx_sdi_irq_handler(int irq, void *data)
+{
+ struct xlnx_sdi *sdi = (struct xlnx_sdi *)data;
+ u32 reg;
+
+ reg = xlnx_sdi_readl(sdi->base, XSDI_TX_ISR_STAT);
+
+ if (reg & XSDI_GTTX_RSTDONE_INTR)
+ dev_dbg(sdi->dev, "GT reset interrupt received\n");
+ if (reg & XSDI_TX_CE_ALIGN_ERR_INTR)
+ dev_err_ratelimited(sdi->dev, "SDI SD CE align error\n");
+ if (reg & XSDI_OVERFLOW_INTR)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Overflow error\n");
+ if (reg & XSDI_UNDERFLOW_INTR)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Underflow error\n");
+ xlnx_sdi_writel(sdi->base, XSDI_TX_ISR_STAT,
+ reg & ~(XSDI_AXI4S_VID_LOCK_INTR));
+
+ reg = xlnx_sdi_readl(sdi->base, XSDI_TX_STS_SB_TDATA);
+ if (reg & XSDI_TX_TDATA_GT_RESETDONE) {
+ sdi->event_received = true;
+ wake_up_interruptible(&sdi->wait_event);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * xlnx_sdi_set_payload_line - set ST352 packet line number
+ * @sdi: Pointer to SDI Tx structure
+ * @line_1: line number used to insert st352 packet for field 1.
+ * @line_2: line number used to insert st352 packet for field 2.
+ *
+ * This function set 352 packet line number.
+ */
+static void xlnx_sdi_set_payload_line(struct xlnx_sdi *sdi,
+ u32 line_1, u32 line_2)
+{
+ u32 data;
+
+ data = ((line_1 & XSDI_TX_ST352_LINE_MASK) |
+ ((line_2 & XSDI_TX_ST352_LINE_MASK) <<
+ XSDI_TX_ST352_LINE_F2_SHIFT));
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_ST352_LINE, data);
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data |= (1 << XSDI_TX_CTRL_ST352_F2_EN_SHIFT);
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_set_payload_data - set ST352 packet payload
+ * @sdi: Pointer to SDI Tx structure
+ * @data_strm: data stream number
+ * @payload: st352 packet payload
+ *
+ * This function set ST352 payload data to corresponding stream.
+ */
+static void xlnx_sdi_set_payload_data(struct xlnx_sdi *sdi,
+ u32 data_strm, u32 payload)
+{
+ xlnx_sdi_writel(sdi->base,
+ (XSDI_TX_ST352_DATA_CH0 + (data_strm * 4)), payload);
+
+ dev_dbg(sdi->dev, "enable_st352_chroma = %d and en_st352_c_val = %d\n",
+ sdi->enable_st352_chroma, sdi->en_st352_c_val);
+ if (sdi->enable_st352_chroma && sdi->en_st352_c_val) {
+ xlnx_sdi_writel(sdi->base,
+ (XSDI_TX_ST352_DATA_DS2 + (data_strm * 4)),
+ payload);
+ }
+}
+
+/**
+ * xlnx_sdi_set_display_disable - Disable the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_sdi_set_display_disable(struct xlnx_sdi *sdi)
+{
+ u32 i;
+
+ for (i = 0; i < SDI_MAX_DATASTREAM; i++)
+ xlnx_sdi_set_payload_data(sdi, i, 0);
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, 0);
+}
+
+/**
+ * xlnx_sdi_payload_config - config the SDI payload parameters
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: display mode
+ *
+ * This function config the SDI st352 payload parameter.
+ */
+static void xlnx_sdi_payload_config(struct xlnx_sdi *sdi, u32 mode)
+{
+ u32 payload_1, payload_2;
+
+ switch (mode) {
+ case XSDI_MODE_SD:
+ payload_1 = PAYLD_LN1_SDPAL;
+ payload_2 = PAYLD_LN2_SDPAL;
+ break;
+ case XSDI_MODE_HD:
+ case XSDI_MODE_3GA:
+ case XSDI_MODE_3GB:
+ case XSDI_MODE_6G:
+ case XSDI_MODE_12G:
+ payload_1 = PAYLD_LN1_HD_3_6_12G;
+ payload_2 = PAYLD_LN2_HD_3_6_12G;
+ break;
+ default:
+ payload_1 = 0;
+ payload_2 = 0;
+ break;
+ }
+
+ xlnx_sdi_set_payload_line(sdi, payload_1, payload_2);
+}
+
+/**
+ * xlnx_sdi_set_mode - Set mode parameters in SDI Tx
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: SDI Tx display mode
+ * @is_frac: 0 - integer 1 - fractional
+ * @mux_ptrn: specifiy the data stream interleaving pattern to be used
+ * This function config the SDI st352 payload parameter.
+ */
+static void xlnx_sdi_set_mode(struct xlnx_sdi *sdi, u32 mode,
+ bool is_frac, u32 mux_ptrn)
+{
+ u32 data;
+
+ xlnx_sdi_payload_config(sdi, mode);
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data &= ~(XSDI_TX_CTRL_MODE << XSDI_TX_CTRL_MODE_SHIFT);
+ data &= ~(XSDI_TX_CTRL_M);
+ data &= ~(XSDI_TX_CTRL_MUX << XSDI_TX_CTRL_MUX_SHIFT);
+ data &= ~XSDI_TX_CTRL_420_BIT;
+
+ data |= (((mode & XSDI_TX_CTRL_MODE) << XSDI_TX_CTRL_MODE_SHIFT) |
+ (is_frac << XSDI_TX_CTRL_M_SHIFT) |
+ ((mux_ptrn & XSDI_TX_CTRL_MUX) << XSDI_TX_CTRL_MUX_SHIFT));
+
+ if (sdi->sdi_420_out_val)
+ data |= XSDI_TX_CTRL_420_BIT;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_set_config_parameters - Configure SDI Tx registers with parameters
+ * given from user application.
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI structure having drm_property parameters
+ * configured from user application and writes them into SDI IP registers.
+ */
+static void xlnx_sdi_set_config_parameters(struct xlnx_sdi *sdi)
+{
+ int mux_ptrn = -EINVAL;
+
+ switch (sdi->sdi_mod_prop_val) {
+ case XSDI_MODE_3GA:
+ mux_ptrn = XSDI_TX_MUX_SD_HD_3GA;
+ break;
+ case XSDI_MODE_3GB:
+ mux_ptrn = XSDI_TX_MUX_3GB;
+ break;
+ case XSDI_MODE_6G:
+ if (sdi->sdi_data_strm_prop_val == 4)
+ mux_ptrn = XSDI_TX_MUX_4STREAM_6G;
+ else if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ case XSDI_MODE_12G:
+ if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ default:
+ mux_ptrn = 0;
+ break;
+ }
+ if (mux_ptrn == -EINVAL) {
+ dev_err(sdi->dev, "%d data stream not supported for %d mode",
+ sdi->sdi_data_strm_prop_val, sdi->sdi_mod_prop_val);
+ return;
+ }
+ xlnx_sdi_set_mode(sdi, sdi->sdi_mod_prop_val, sdi->is_frac_prop_val,
+ mux_ptrn);
+}
+
+/**
+ * xlnx_sdi_atomic_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @connector: pointer Xilinx SDI connector
+ * @state: DRM connector state
+ * @property: pointer to the drm_property structure
+ * @val: SDI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the SDI structure property varabiles with the values.
+ * These values are later used to configure the SDI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int
+xlnx_sdi_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property, uint64_t val)
+{
+ struct xlnx_sdi *sdi = connector_to_sdi(connector);
+
+ if (property == sdi->sdi_mode)
+ sdi->sdi_mod_prop_val = (unsigned int)val;
+ else if (property == sdi->sdi_data_strm)
+ sdi->sdi_data_strm_prop_val = (unsigned int)val;
+ else if (property == sdi->sdi_420_in)
+ sdi->sdi_420_in_val = val;
+ else if (property == sdi->sdi_420_out)
+ sdi->sdi_420_out_val = val;
+ else if (property == sdi->is_frac_prop)
+ sdi->is_frac_prop_val = !!val;
+ else if (property == sdi->height_out)
+ sdi->height_out_prop_val = (unsigned int)val;
+ else if (property == sdi->width_out)
+ sdi->width_out_prop_val = (unsigned int)val;
+ else if (property == sdi->in_fmt)
+ sdi->in_fmt_prop_val = (unsigned int)val;
+ else if (property == sdi->out_fmt)
+ sdi->out_fmt_prop_val = (unsigned int)val;
+ else if (property == sdi->en_st352_c_prop)
+ sdi->en_st352_c_val = !!val;
+ else if (property == sdi->use_ds2_3ga_prop)
+ sdi->use_ds2_3ga_val = !!val;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int
+xlnx_sdi_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct xlnx_sdi *sdi = connector_to_sdi(connector);
+
+ if (property == sdi->sdi_mode)
+ *val = sdi->sdi_mod_prop_val;
+ else if (property == sdi->sdi_data_strm)
+ *val = sdi->sdi_data_strm_prop_val;
+ else if (property == sdi->sdi_420_in)
+ *val = sdi->sdi_420_in_val;
+ else if (property == sdi->sdi_420_out)
+ *val = sdi->sdi_420_out_val;
+ else if (property == sdi->is_frac_prop)
+ *val = sdi->is_frac_prop_val;
+ else if (property == sdi->height_out)
+ *val = sdi->height_out_prop_val;
+ else if (property == sdi->width_out)
+ *val = sdi->width_out_prop_val;
+ else if (property == sdi->in_fmt)
+ *val = sdi->in_fmt_prop_val;
+ else if (property == sdi->out_fmt)
+ *val = sdi->out_fmt_prop_val;
+ else if (property == sdi->en_st352_c_prop)
+ *val = sdi->en_st352_c_val;
+ else if (property == sdi->use_ds2_3ga_prop)
+ *val = sdi->use_ds2_3ga_val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xlnx_sdi_get_mode_id - Search for a video mode in the supported modes table
+ *
+ * @mode: mode being searched
+ *
+ * Return: mode id if mode is found OR -EINVAL otherwise
+ */
+static int xlnx_sdi_get_mode_id(struct drm_display_mode *mode)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++)
+ if (drm_mode_equal(&xlnx_sdi_modes[i].mode, mode))
+ return i;
+ return -EINVAL;
+}
+
+/**
+ * xlnx_sdi_drm_add_modes - Adds SDI supported modes
+ * @connector: pointer Xilinx SDI connector
+ *
+ * Return: Count of modes added
+ *
+ * This function adds the SDI modes supported and returns its count
+ */
+static int xlnx_sdi_drm_add_modes(struct drm_connector *connector)
+{
+ int num_modes = 0;
+ u32 i;
+ struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++) {
+ const struct drm_display_mode *ptr = &xlnx_sdi_modes[i].mode;
+
+ mode = drm_mode_duplicate(dev, ptr);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ }
+ return num_modes;
+}
+
+static enum drm_connector_status
+xlnx_sdi_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void xlnx_sdi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xlnx_sdi_connector_funcs = {
+ .detect = xlnx_sdi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xlnx_sdi_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_set_property = xlnx_sdi_atomic_set_property,
+ .atomic_get_property = xlnx_sdi_atomic_get_property,
+};
+
+static struct drm_encoder *
+xlnx_sdi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_sdi(connector)->encoder);
+}
+
+static int xlnx_sdi_get_modes(struct drm_connector *connector)
+{
+ return xlnx_sdi_drm_add_modes(connector);
+}
+
+static struct drm_connector_helper_funcs xlnx_sdi_connector_helper_funcs = {
+ .get_modes = xlnx_sdi_get_modes,
+ .best_encoder = xlnx_sdi_best_encoder,
+};
+
+/**
+ * xlnx_sdi_drm_connector_create_property - create SDI connector properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ *
+ * This function takes the xilinx SDI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void
+xlnx_sdi_drm_connector_create_property(struct drm_connector *base_connector)
+{
+ struct drm_device *dev = base_connector->dev;
+ struct xlnx_sdi *sdi = connector_to_sdi(base_connector);
+
+ sdi->is_frac_prop = drm_property_create_bool(dev, 0, "is_frac");
+ sdi->sdi_mode = drm_property_create_range(dev, 0,
+ "sdi_mode", 0, 5);
+ sdi->sdi_data_strm = drm_property_create_range(dev, 0,
+ "sdi_data_stream", 2, 8);
+ sdi->sdi_420_in = drm_property_create_bool(dev, 0, "sdi_420_in");
+ sdi->sdi_420_out = drm_property_create_bool(dev, 0, "sdi_420_out");
+ sdi->height_out = drm_property_create_range(dev, 0,
+ "height_out", 2, 4096);
+ sdi->width_out = drm_property_create_range(dev, 0,
+ "width_out", 2, 4096);
+ sdi->in_fmt = drm_property_create_range(dev, 0,
+ "in_fmt", 0, 16384);
+ sdi->out_fmt = drm_property_create_range(dev, 0,
+ "out_fmt", 0, 16384);
+ if (sdi->enable_st352_chroma) {
+ sdi->en_st352_c_prop = drm_property_create_bool(dev, 0,
+ "en_st352_c");
+ sdi->use_ds2_3ga_prop = drm_property_create_bool(dev, 0,
+ "use_ds2_3ga");
+ }
+}
+
+/**
+ * xlnx_sdi_drm_connector_attach_property - attach SDI connector
+ * properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ */
+static void
+xlnx_sdi_drm_connector_attach_property(struct drm_connector *base_connector)
+{
+ struct xlnx_sdi *sdi = connector_to_sdi(base_connector);
+ struct drm_mode_object *obj = &base_connector->base;
+
+ if (sdi->sdi_mode)
+ drm_object_attach_property(obj, sdi->sdi_mode, 0);
+
+ if (sdi->sdi_data_strm)
+ drm_object_attach_property(obj, sdi->sdi_data_strm, 0);
+
+ if (sdi->sdi_420_in)
+ drm_object_attach_property(obj, sdi->sdi_420_in, 0);
+
+ if (sdi->sdi_420_out)
+ drm_object_attach_property(obj, sdi->sdi_420_out, 0);
+
+ if (sdi->is_frac_prop)
+ drm_object_attach_property(obj, sdi->is_frac_prop, 0);
+
+ if (sdi->height_out)
+ drm_object_attach_property(obj, sdi->height_out, 0);
+
+ if (sdi->width_out)
+ drm_object_attach_property(obj, sdi->width_out, 0);
+
+ if (sdi->in_fmt)
+ drm_object_attach_property(obj, sdi->in_fmt, 0);
+
+ if (sdi->out_fmt)
+ drm_object_attach_property(obj, sdi->out_fmt, 0);
+
+ if (sdi->en_st352_c_prop)
+ drm_object_attach_property(obj, sdi->en_st352_c_prop, 0);
+
+ if (sdi->use_ds2_3ga_prop)
+ drm_object_attach_property(obj, sdi->use_ds2_3ga_prop, 0);
+}
+
+static int xlnx_sdi_create_connector(struct drm_encoder *encoder)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+ struct drm_connector *connector = &sdi->connector;
+ int ret;
+
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xlnx_sdi_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret) {
+ dev_err(sdi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xlnx_sdi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xlnx_sdi_drm_connector_create_property(connector);
+ xlnx_sdi_drm_connector_attach_property(connector);
+
+ return 0;
+}
+
+/**
+ * xlnx_sdi_set_display_enable - Enables the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_sdi_set_display_enable(struct xlnx_sdi *sdi)
+{
+ u32 data;
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_CTRL_EN;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_calc_st352_payld - calculate the st352 payload
+ *
+ * @sdi: pointer to SDI Tx structure
+ * @mode: DRM display mode
+ *
+ * This function calculates the st352 payload to be configured.
+ * Please refer to SMPTE ST352 documents for it.
+ * Return: return st352 payload
+ */
+static u32 xlnx_sdi_calc_st352_payld(struct xlnx_sdi *sdi,
+ struct drm_display_mode *mode)
+{
+ u8 byt1, byt2;
+ u16 is_p;
+ int id;
+ u32 sdi_mode = sdi->sdi_mod_prop_val;
+ bool is_frac = sdi->is_frac_prop_val;
+ u32 byt3 = ST352_BYTE3;
+
+ id = xlnx_sdi_get_mode_id(mode);
+ dev_dbg(sdi->dev, "mode id: %d\n", id);
+ if (mode->hdisplay == 2048 || mode->hdisplay == 4096)
+ byt3 |= XST352_2048_SHIFT;
+ if (sdi->sdi_420_in_val)
+ byt3 |= XST352_YUV420_MASK;
+
+ /* byte 2 calculation */
+ is_p = !(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ byt2 = xlnx_sdi_modes[id].st352_byt2[is_frac];
+ if (sdi_mode == XSDI_MODE_3GB ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN) || is_p)
+ byt2 |= XST352_PROG_PIC;
+ if (is_p && mode->vtotal >= 1125)
+ byt2 |= XST352_PROG_TRANS;
+
+ /* byte 1 calculation */
+ byt1 = xlnx_sdi_modes[id].st352_byt1[sdi_mode];
+
+ return (ST352_BYTE4 << 24 | byt3 << 16 | byt2 << 8 | byt1);
+}
+
+static void xlnx_sdi_setup(struct xlnx_sdi *sdi)
+{
+ u32 reg;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+
+ reg = xlnx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ reg |= XSDI_TX_CTRL_INS_CRC | XSDI_TX_CTRL_INS_ST352 |
+ XSDI_TX_CTRL_OVR_ST352 | XSDI_TX_CTRL_INS_SYNC_BIT |
+ XSDI_TX_CTRL_INS_EDH;
+
+ if (sdi->enable_anc_data)
+ reg |= XSDI_TX_CTRL_USE_ANC_IN;
+
+ if (sdi->enable_st352_chroma) {
+ if (sdi->en_st352_c_val) {
+ reg |= XSDI_TX_CTRL_INS_ST352_CHROMA;
+ if (sdi->use_ds2_3ga_val)
+ reg |= XSDI_TX_CTRL_USE_DS2_3GA;
+ else
+ reg &= ~XSDI_TX_CTRL_USE_DS2_3GA;
+ } else {
+ reg &= ~XSDI_TX_CTRL_INS_ST352_CHROMA;
+ reg &= ~XSDI_TX_CTRL_USE_DS2_3GA;
+ }
+ }
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, reg);
+ xlnx_sdi_writel(sdi->base, XSDI_TX_IER_STAT, XSDI_IER_EN_MASK);
+ xlnx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 1);
+ xlnx_stc_reset(sdi->base);
+}
+
+/**
+ * xlnx_sdi_encoder_atomic_mode_set - drive the SDI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @crtc_state: DRM crtc state
+ * @connector_state: DRM connector state
+ *
+ * This function derives the SDI IP timing parameters from the timing
+ * values given to timing module.
+ */
+static void xlnx_sdi_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct videomode vm;
+ u32 payload, i;
+ u32 sditx_blank, vtc_blank;
+
+ /* Set timing parameters as per bridge output parameters */
+ xlnx_bridge_set_input(sdi->bridge, adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay, sdi->in_fmt_prop_val);
+ xlnx_bridge_set_output(sdi->bridge, sdi->width_out_prop_val,
+ sdi->height_out_prop_val, sdi->out_fmt_prop_val);
+ xlnx_bridge_enable(sdi->bridge);
+
+ if (sdi->bridge) {
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++) {
+ if (xlnx_sdi_modes[i].mode.hdisplay ==
+ sdi->width_out_prop_val &&
+ xlnx_sdi_modes[i].mode.vdisplay ==
+ sdi->height_out_prop_val &&
+ xlnx_sdi_modes[i].mode.vrefresh ==
+ adjusted_mode->vrefresh) {
+ memcpy((char *)adjusted_mode +
+ offsetof(struct drm_display_mode,
+ clock),
+ &xlnx_sdi_modes[i].mode.clock,
+ SDI_TIMING_PARAMS_SIZE);
+ break;
+ }
+ }
+ }
+
+ xlnx_sdi_setup(sdi);
+ xlnx_sdi_set_config_parameters(sdi);
+
+ /* set st352 payloads */
+ payload = xlnx_sdi_calc_st352_payld(sdi, adjusted_mode);
+ dev_dbg(sdi->dev, "payload : %0x\n", payload);
+
+ for (i = 0; i < sdi->sdi_data_strm_prop_val / 2; i++) {
+ if (sdi->sdi_mod_prop_val == XSDI_MODE_3GB)
+ payload |= (i << 1) << XSDI_CH_SHIFT;
+ xlnx_sdi_set_payload_data(sdi, i, payload);
+ }
+
+ /* UHDSDI is fixed 2 pixels per clock, horizontal timings div by 2 */
+ vm.hactive = adjusted_mode->hdisplay / PIXELS_PER_CLK;
+ vm.hfront_porch = (adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay) / PIXELS_PER_CLK;
+ vm.hback_porch = (adjusted_mode->htotal -
+ adjusted_mode->hsync_end) / PIXELS_PER_CLK;
+ vm.hsync_len = (adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start) / PIXELS_PER_CLK;
+
+ vm.vactive = adjusted_mode->vdisplay;
+ vm.vfront_porch = adjusted_mode->vsync_start -
+ adjusted_mode->vdisplay;
+ vm.vback_porch = adjusted_mode->vtotal -
+ adjusted_mode->vsync_end;
+ vm.vsync_len = adjusted_mode->vsync_end -
+ adjusted_mode->vsync_start;
+ vm.flags = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vm.flags |= DISPLAY_FLAGS_INTERLACED;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ vm.flags |= DISPLAY_FLAGS_HSYNC_LOW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ vm.flags |= DISPLAY_FLAGS_VSYNC_LOW;
+
+ do {
+ sditx_blank = (adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay) +
+ (adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start) +
+ (adjusted_mode->htotal -
+ adjusted_mode->hsync_end);
+
+ vtc_blank = (vm.hfront_porch + vm.hback_porch +
+ vm.hsync_len) * PIXELS_PER_CLK;
+
+ if (vtc_blank != sditx_blank)
+ vm.hfront_porch++;
+ } while (vtc_blank < sditx_blank);
+
+ vm.pixelclock = adjusted_mode->clock * 1000;
+
+ /* parameters for sdi audio */
+ sdi->video_mode.vdisplay = adjusted_mode->vdisplay;
+ sdi->video_mode.hdisplay = adjusted_mode->hdisplay;
+ sdi->video_mode.vrefresh = adjusted_mode->vrefresh;
+ sdi->video_mode.flags = adjusted_mode->flags;
+
+ xlnx_stc_sig(sdi->base, &vm);
+}
+
+static void xlnx_sdi_commit(struct drm_encoder *encoder)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+ long ret;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+ xlnx_sdi_set_display_enable(sdi);
+ ret = wait_event_interruptible_timeout(sdi->wait_event,
+ sdi->event_received,
+ usecs_to_jiffies(GT_TIMEOUT));
+ if (!ret) {
+ dev_err(sdi->dev, "Timeout: GT interrupt not received\n");
+ return;
+ }
+ sdi->event_received = false;
+ /* enable sdi bridge, timing controller and Axi4s_vid_out_ctrl */
+ xlnx_sdi_en_bridge(sdi);
+ xlnx_stc_enable(sdi->base);
+ xlnx_sdi_en_axi4s(sdi);
+}
+
+static void xlnx_sdi_disable(struct drm_encoder *encoder)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+
+ if (sdi->bridge)
+ xlnx_bridge_disable(sdi->bridge);
+
+ xlnx_sdi_set_display_disable(sdi);
+ xlnx_stc_disable(sdi->base);
+}
+
+static const struct drm_encoder_helper_funcs xlnx_sdi_encoder_helper_funcs = {
+ .atomic_mode_set = xlnx_sdi_encoder_atomic_mode_set,
+ .enable = xlnx_sdi_commit,
+ .disable = xlnx_sdi_disable,
+};
+
+static const struct drm_encoder_funcs xlnx_sdi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xlnx_sdi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_sdi *sdi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &sdi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * SDI tx drivers. DRM framework can support more than one CRTCs and
+ * SDI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+
+ drm_encoder_init(drm_dev, encoder, &xlnx_sdi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ drm_encoder_helper_add(encoder, &xlnx_sdi_encoder_helper_funcs);
+
+ ret = xlnx_sdi_create_connector(encoder);
+ if (ret) {
+ dev_err(sdi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ }
+ return ret;
+}
+
+static void xlnx_sdi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_sdi *sdi = dev_get_drvdata(dev);
+
+ xlnx_sdi_set_display_disable(sdi);
+ xlnx_stc_disable(sdi->base);
+ drm_encoder_cleanup(&sdi->encoder);
+ drm_connector_cleanup(&sdi->connector);
+ xlnx_bridge_disable(sdi->bridge);
+}
+
+static const struct component_ops xlnx_sdi_component_ops = {
+ .bind = xlnx_sdi_bind,
+ .unbind = xlnx_sdi_unbind,
+};
+
+static int xlnx_sdi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xlnx_sdi *sdi;
+ struct device_node *vpss_node;
+ int ret, irq;
+ struct device_node *ports, *port;
+ u32 nports = 0, portmask = 0;
+
+ sdi = devm_kzalloc(dev, sizeof(*sdi), GFP_KERNEL);
+ if (!sdi)
+ return -ENOMEM;
+
+ sdi->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sdi->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(sdi->base);
+ }
+ platform_set_drvdata(pdev, sdi);
+
+ sdi->axi_clk = devm_clk_get(dev, "s_axi_aclk");
+ if (IS_ERR(sdi->axi_clk)) {
+ ret = PTR_ERR(sdi->axi_clk);
+ dev_err(dev, "failed to get s_axi_aclk %d\n", ret);
+ return ret;
+ }
+
+ sdi->sditx_clk = devm_clk_get(dev, "sdi_tx_clk");
+ if (IS_ERR(sdi->sditx_clk)) {
+ ret = PTR_ERR(sdi->sditx_clk);
+ dev_err(dev, "failed to get sdi_tx_clk %d\n", ret);
+ return ret;
+ }
+
+ sdi->vidin_clk = devm_clk_get(dev, "video_in_clk");
+ if (IS_ERR(sdi->vidin_clk)) {
+ ret = PTR_ERR(sdi->vidin_clk);
+ dev_err(dev, "failed to get video_in_clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sdi->axi_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable axi_clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sdi->sditx_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable sditx_clk %d\n", ret);
+ goto err_disable_axi_clk;
+ }
+
+ ret = clk_prepare_enable(sdi->vidin_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable vidin_clk %d\n", ret);
+ goto err_disable_sditx_clk;
+ }
+
+ /* in case all "port" nodes are grouped under a "ports" node */
+ ports = of_get_child_by_name(sdi->dev->of_node, "ports");
+ if (!ports) {
+ dev_dbg(dev, "Searching for port nodes in device node.\n");
+ ports = sdi->dev->of_node;
+ }
+
+ for_each_child_of_node(ports, port) {
+ struct device_node *endpoint;
+ u32 index;
+
+ if (!port->name || of_node_cmp(port->name, "port")) {
+ dev_dbg(dev, "port name is null or node name is not port!\n");
+ continue;
+ }
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(dev, "No remote port at %s\n", port->name);
+ of_node_put(endpoint);
+ ret = -EINVAL;
+ goto err_disable_vidin_clk;
+ }
+
+ of_node_put(endpoint);
+
+ ret = of_property_read_u32(port, "reg", &index);
+ if (ret) {
+ dev_err(dev, "reg property not present - %d\n", ret);
+ goto err_disable_vidin_clk;
+ }
+
+ portmask |= (1 << index);
+
+ nports++;
+ }
+
+ if (nports == 2 && portmask & 0x3) {
+ dev_dbg(dev, "enable ancillary port\n");
+ sdi->enable_anc_data = true;
+ } else if (nports == 1 && portmask & 0x1) {
+ dev_dbg(dev, "no ancillary port\n");
+ sdi->enable_anc_data = false;
+ } else {
+ dev_err(dev, "Incorrect dt node!\n");
+ ret = -EINVAL;
+ goto err_disable_vidin_clk;
+ }
+
+ sdi->enable_st352_chroma = of_property_read_bool(sdi->dev->of_node,
+ "xlnx,tx-insert-c-str-st352");
+
+ /* disable interrupt */
+ xlnx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_disable_vidin_clk;
+ }
+
+ ret = devm_request_threaded_irq(sdi->dev, irq, NULL,
+ xlnx_sdi_irq_handler, IRQF_ONESHOT,
+ dev_name(sdi->dev), sdi);
+ if (ret < 0)
+ goto err_disable_vidin_clk;
+
+ /* initialize the wait queue for GT reset event */
+ init_waitqueue_head(&sdi->wait_event);
+
+ /* Bridge support */
+ vpss_node = of_parse_phandle(sdi->dev->of_node, "xlnx,vpss", 0);
+ if (vpss_node) {
+ sdi->bridge = of_xlnx_bridge_get(vpss_node);
+ if (!sdi->bridge) {
+ dev_info(sdi->dev, "Didn't get bridge instance\n");
+ ret = -EPROBE_DEFER;
+ goto err_disable_vidin_clk;
+ }
+ }
+
+ /* video mode properties needed by audio driver are shared to audio
+ * driver through a pointer in platform data. This will be used in
+ * audio driver. The solution may be needed to modify/extend to avoid
+ * probable error scenarios
+ */
+ pdev->dev.platform_data = &sdi->video_mode;
+
+ ret = component_add(dev, &xlnx_sdi_component_ops);
+ if (ret < 0)
+ goto err_disable_vidin_clk;
+
+ return ret;
+
+err_disable_vidin_clk:
+ clk_disable_unprepare(sdi->vidin_clk);
+err_disable_sditx_clk:
+ clk_disable_unprepare(sdi->sditx_clk);
+err_disable_axi_clk:
+ clk_disable_unprepare(sdi->axi_clk);
+
+ return ret;
+}
+
+static int xlnx_sdi_remove(struct platform_device *pdev)
+{
+ struct xlnx_sdi *sdi = platform_get_drvdata(pdev);
+
+ component_del(&pdev->dev, &xlnx_sdi_component_ops);
+ clk_disable_unprepare(sdi->vidin_clk);
+ clk_disable_unprepare(sdi->sditx_clk);
+ clk_disable_unprepare(sdi->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_sdi_of_match[] = {
+ { .compatible = "xlnx,sdi-tx"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_sdi_of_match);
+
+static struct platform_driver sdi_tx_driver = {
+ .probe = xlnx_sdi_probe,
+ .remove = xlnx_sdi_remove,
+ .driver = {
+ .name = "xlnx-sdi-tx",
+ .of_match_table = xlnx_sdi_of_match,
+ },
+};
+
+module_platform_driver(sdi_tx_driver);
+
+MODULE_AUTHOR("Saurabh Sengar <saurabhs@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA SDI Tx Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi_modes.h b/drivers/gpu/drm/xlnx/xlnx_sdi_modes.h
new file mode 100644
index 000000000000..534f7d80f29c
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi_modes.h
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI modes timing values for various
+ * resolutions
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#ifndef _XLNX_SDI_MODES_H_
+#define _XLNX_SDI_MODES_H_
+
+/**
+ * struct xlnx_sdi_display_config - SDI supported modes structure
+ * @mode: drm display mode
+ * @st352_byt2: st352 byte 2 value
+ * index 0 : value for integral fps
+ * index 1 : value for fractional fps
+ * @st352_byt1: st352 byte 1 value
+ * index 0 : value for HD mode
+ * index 1 : value for SD mode
+ * index 2 : value for 3GA
+ * index 3 : value for 3GB
+ * index 4 : value for 6G
+ * index 5 : value for 12G
+ */
+struct xlnx_sdi_display_config {
+ struct drm_display_mode mode;
+ u8 st352_byt2[2];
+ u8 st352_byt1[6];
+};
+
+/*
+ * xlnx_sdi_modes - SDI DRM modes
+ */
+static const struct xlnx_sdi_display_config xlnx_sdi_modes[] = {
+ /* 0 - dummy, VICs start at 1 */
+ { },
+ /* SD: 720x486i@60Hz */
+ {{ DRM_MODE("720x486i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
+ 801, 858, 0, 243, 247, 250, 262, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* SD: 720x576i@50Hz */
+ {{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
+ 795, 864, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* HD: 1280x720@25Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2990, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@24Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 3155, 4125, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@30Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2330, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@50Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@60Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1920x1080@24Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@25Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@30Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@48Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@50Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@60Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@24Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@25Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@30Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@48Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@50Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@60Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@24Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@25Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@30Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@30Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@25Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@24Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@48Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@50Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@60Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@60Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2136,
+ 2180, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@50Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@48Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@96Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2291,
+ 2379, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@100Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@120Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@96Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2377,
+ 2421, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@100Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2322,
+ 2366, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@120Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 6G: 3840x2160@30Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@25Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@24Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@24Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@25Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@30Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@48Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@50Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@60Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@48Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@50Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@60Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 593408, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+};
+
+#endif /* _XLNX_SDI_MODES_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi_timing.c b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.c
new file mode 100644
index 000000000000..61ee98e87fdc
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI Tx timing controller driver
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#include <drm/drmP.h>
+#include <linux/device.h>
+#include <video/videomode.h>
+#include "xlnx_sdi_timing.h"
+
+/* timing controller register offsets */
+#define XSTC_CTL 0x00
+#define XSTC_STATS 0x04
+#define XSTC_ERROR 0x08
+#define XSTC_GASIZE 0x60
+#define XSTC_GENC 0x68
+#define XSTC_GPOL 0x6c
+#define XSTC_GHSIZE 0x70
+#define XSTC_GVSIZE 0x74
+#define XSTC_GHSYNC 0x78
+#define XSTC_GVBH_F0 0x7c
+#define XSTC_GVSYNC_F0 0x80
+#define XSTC_GVSH_F0 0x84
+#define XSTC_GVBH_F1 0x88
+#define XSTC_GVSYNC_F1 0x8C
+#define XSTC_GVSH_F1 0x90
+#define XSTC_GASIZE_F1 0x94
+#define XSTC_OFFSET 0x10000
+
+/* timing controller register bit */
+#define XSTC_CTL_FIP BIT(6) /* field id polarity */
+#define XSTC_CTL_ACP BIT(5) /* active chroma polarity */
+#define XSTC_CTL_AVP BIT(4) /* active video polarity */
+#define XSTC_CTL_HSP BIT(3) /* hori sync polarity */
+#define XSTC_CTL_VSP BIT(2) /* vert sync polarity */
+#define XSTC_CTL_HBP BIT(1) /* hori blank polarity */
+#define XSTC_CTL_VBP BIT(0) /* vert blank polarity */
+#define XSTC_CTL_FIPSS BIT(26) /* field id polarity source */
+#define XSTC_CTL_ACPSS BIT(25) /* active chroma polarity src */
+#define XSTC_CTL_AVPSS BIT(24) /* active video polarity src */
+#define XSTC_CTL_HSPSS BIT(23) /* hori sync polarity src */
+#define XSTC_CTL_VSPSS BIT(22) /* vert sync polarity src */
+#define XSTC_CTL_HBPSS BIT(21) /* hori blank polarity src */
+#define XSTC_CTL_VBPSS BIT(20) /* vert blank polarity src */
+#define XSTC_CTL_VCSS BIT(18) /* chroma src */
+#define XSTC_CTL_VASS BIT(17) /* vertical offset src */
+#define XSTC_CTL_VBSS BIT(16) /* vertical sync end src */
+#define XSTC_CTL_VSSS BIT(15) /* vertical sync start src */
+#define XSTC_CTL_VFSS BIT(14) /* vertical active size src */
+#define XSTC_CTL_VTSS BIT(13) /* vertical frame size src */
+#define XSTC_CTL_HBSS BIT(11) /* horiz sync end src */
+#define XSTC_CTL_HSSS BIT(10) /* horiz sync start src */
+#define XSTC_CTL_HFSS BIT(9) /* horiz active size src */
+#define XSTC_CTL_HTSS BIT(8) /* horiz frame size src */
+#define XSTC_CTL_GE BIT(2) /* timing generator enable */
+#define XSTC_CTL_RU BIT(1) /* timing register update */
+
+/* timing generator horizontal 1 */
+#define XSTC_GH1_BPSTART_MASK GENMASK(28, 16)
+#define XSTC_GH1_BPSTART_SHIFT 16
+#define XSTC_GH1_SYNCSTART_MASK GENMASK(12, 0)
+/* timing generator vertical 1 (filed 0) */
+#define XSTC_GV1_BPSTART_MASK GENMASK(28, 16)
+#define XSTC_GV1_BPSTART_SHIFT 16
+#define XSTC_GV1_SYNCSTART_MASK GENMASK(12, 0)
+/* timing generator/detector vblank/vsync horizontal offset registers */
+#define XSTC_XVXHOX_HEND_MASK GENMASK(28, 16)
+#define XSTC_XVXHOX_HEND_SHIFT 16
+#define XSTC_XVXHOX_HSTART_MASK GENMASK(12, 0)
+
+#define XSTC_GHFRAME_HSIZE GENMASK(12, 0)
+#define XSTC_GVFRAME_HSIZE_F1 GENMASK(12, 0)
+#define XSTC_GA_ACTSIZE_MASK GENMASK(12, 0)
+/* reset register bit definition */
+#define XSTC_RST BIT(31)
+/* Interlaced bit in XSTC_GENC */
+#define XSTC_GENC_INTERL BIT(6)
+
+/**
+ * struct xlnx_stc_polarity - timing signal polarity
+ *
+ * @field_id: field ID polarity
+ * @vblank: vblank polarity
+ * @vsync: vsync polarity
+ * @hblank: hblank polarity
+ * @hsync: hsync polarity
+ */
+struct xlnx_stc_polarity {
+ u8 field_id;
+ u8 vblank;
+ u8 vsync;
+ u8 hblank;
+ u8 hsync;
+};
+
+/**
+ * struct xlnx_stc_hori_off - timing signal horizontal offset
+ *
+ * @v0blank_hori_start: vblank horizontal start (field 0)
+ * @v0blank_hori_end: vblank horizontal end (field 0)
+ * @v0sync_hori_start: vsync horizontal start (field 0)
+ * @v0sync_hori_end: vsync horizontal end (field 0)
+ * @v1blank_hori_start: vblank horizontal start (field 1)
+ * @v1blank_hori_end: vblank horizontal end (field 1)
+ * @v1sync_hori_start: vsync horizontal start (field 1)
+ * @v1sync_hori_end: vsync horizontal end (field 1)
+ */
+struct xlnx_stc_hori_off {
+ u16 v0blank_hori_start;
+ u16 v0blank_hori_end;
+ u16 v0sync_hori_start;
+ u16 v0sync_hori_end;
+ u16 v1blank_hori_start;
+ u16 v1blank_hori_end;
+ u16 v1sync_hori_start;
+ u16 v1sync_hori_end;
+};
+
+/**
+ * xlnx_stc_writel - Memory mapped SDI Tx timing controller write
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ * @val: value to be written
+ *
+ * This function writes the value to SDI TX timing controller registers
+ */
+static inline void xlnx_stc_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + XSTC_OFFSET + offset);
+}
+
+/**
+ * xlnx_stc_readl - Memory mapped timing controllerregister read
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ *
+ * Return: The contents of the SDI Tx timing controller register
+ *
+ * This function returns the contents of the corresponding SDI Tx register.
+ */
+static inline u32 xlnx_stc_readl(void __iomem *base, int offset)
+{
+ return readl(base + XSTC_OFFSET + offset);
+}
+
+/**
+ * xlnx_stc_enable - Enable timing controller
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function enables the SDI Tx subsystem's timing controller
+ */
+void xlnx_stc_enable(void __iomem *base)
+{
+ u32 reg;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg | XSTC_CTL_GE);
+}
+
+/**
+ * xlnx_stc_disable - Disable timing controller
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function disables the SDI Tx subsystem's timing controller
+ */
+void xlnx_stc_disable(void __iomem *base)
+{
+ u32 reg;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg & ~XSTC_CTL_GE);
+}
+
+/**
+ * xlnx_stc_reset - Reset timing controller
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function resets the SDI Tx subsystem's timing controller
+ */
+void xlnx_stc_reset(void __iomem *base)
+{
+ u32 reg;
+
+ xlnx_stc_writel(base, XSTC_CTL, XSTC_RST);
+
+ /* enable register update */
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg | XSTC_CTL_RU);
+}
+
+/**
+ * xlnx_stc_polarity - Configure timing signal polarity
+ * @base: Base address of SDI Tx subsystem
+ * @polarity: timing signal polarity data
+ *
+ * This function configure timing signal polarity
+ */
+static void xlnx_stc_polarity(void __iomem *base,
+ struct xlnx_stc_polarity *polarity)
+{
+ u32 reg = 0;
+
+ reg = XSTC_CTL_ACP;
+ reg |= XSTC_CTL_AVP;
+ if (polarity->field_id)
+ reg |= XSTC_CTL_FIP;
+ if (polarity->vblank)
+ reg |= XSTC_CTL_VBP;
+ if (polarity->vsync)
+ reg |= XSTC_CTL_VSP;
+ if (polarity->hblank)
+ reg |= XSTC_CTL_HBP;
+ if (polarity->hsync)
+ reg |= XSTC_CTL_HSP;
+
+ xlnx_stc_writel(base, XSTC_GPOL, reg);
+}
+
+/**
+ * xlnx_stc_hori_off - Configure horzontal timing offset
+ * @base: Base address of SDI Tx subsystem
+ * @hori_off: horizontal offset configuration data
+ * @flags: Display flags
+ *
+ * This function configure horizontal offset
+ */
+static void xlnx_stc_hori_off(void __iomem *base,
+ struct xlnx_stc_hori_off *hori_off,
+ enum display_flags flags)
+{
+ u32 reg;
+
+ /* Calculate and update Generator VBlank Hori field 0 */
+ reg = hori_off->v0blank_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v0blank_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVBH_F0, reg);
+
+ /* Calculate and update Generator VSync Hori field 0 */
+ reg = hori_off->v0sync_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v0sync_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVSH_F0, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = hori_off->v1blank_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v1blank_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVBH_F1, reg);
+ }
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = hori_off->v1sync_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v1sync_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVSH_F1, reg);
+ }
+}
+
+/**
+ * xlnx_stc_src - Configure timing source
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function configure timing source
+ */
+static void xlnx_stc_src(void __iomem *base)
+{
+ u32 reg;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ reg |= XSTC_CTL_VCSS;
+ reg |= XSTC_CTL_VASS;
+ reg |= XSTC_CTL_VBSS;
+ reg |= XSTC_CTL_VSSS;
+ reg |= XSTC_CTL_VFSS;
+ reg |= XSTC_CTL_VTSS;
+ reg |= XSTC_CTL_HBSS;
+ reg |= XSTC_CTL_HSSS;
+ reg |= XSTC_CTL_HFSS;
+ reg |= XSTC_CTL_HTSS;
+ xlnx_stc_writel(base, XSTC_CTL, reg);
+}
+
+/**
+ * xlnx_stc_sig - Generates timing signal
+ * @base: Base address of SDI Tx subsystem
+ * @vm: video mode
+ *
+ * This function generated the timing for given vide mode
+ */
+void xlnx_stc_sig(void __iomem *base, struct videomode *vm)
+{
+ u32 reg;
+ u32 htotal, hactive, hsync_start, hbackporch_start;
+ u32 vtotal, vactive, vsync_start, vbackporch_start;
+ struct xlnx_stc_hori_off hori_off;
+ struct xlnx_stc_polarity polarity;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg & ~XSTC_CTL_RU);
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
+ hactive = vm->hactive;
+ vactive = vm->vactive;
+ hsync_start = vm->hactive + vm->hfront_porch;
+ vsync_start = vm->vactive + vm->vfront_porch;
+ hbackporch_start = hsync_start + vm->hsync_len;
+ vbackporch_start = vsync_start + vm->vsync_len;
+
+ DRM_DEBUG_DRIVER("ha: %d, va: %d\n", hactive, vactive);
+ DRM_DEBUG_DRIVER("hs: %d, hb: %d\n", hsync_start, hbackporch_start);
+ DRM_DEBUG_DRIVER("vs: %d, vb: %d\n", vsync_start, vbackporch_start);
+ DRM_DEBUG_DRIVER("ht: %d, vt: %d\n", htotal, vtotal);
+
+ reg = htotal & XSTC_GHFRAME_HSIZE;
+ xlnx_stc_writel(base, XSTC_GHSIZE, reg);
+ reg = vtotal & XSTC_GVFRAME_HSIZE_F1;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ if (vm->pixelclock == 148500000)
+ reg |= (reg + 2) <<
+ XSTC_GV1_BPSTART_SHIFT;
+ else
+ reg |= (reg + 1) <<
+ XSTC_GV1_BPSTART_SHIFT;
+ } else {
+ reg |= reg << XSTC_GV1_BPSTART_SHIFT;
+ }
+ xlnx_stc_writel(base, XSTC_GVSIZE, reg);
+ reg = hactive & XSTC_GA_ACTSIZE_MASK;
+ reg |= (vactive & XSTC_GA_ACTSIZE_MASK) << 16;
+ xlnx_stc_writel(base, XSTC_GASIZE, reg);
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ if (vactive == 243)
+ reg = ((vactive + 1) & XSTC_GA_ACTSIZE_MASK) << 16;
+ else
+ reg = (vactive & XSTC_GA_ACTSIZE_MASK) << 16;
+ xlnx_stc_writel(base, XSTC_GASIZE_F1, reg);
+ }
+
+ reg = hsync_start & XSTC_GH1_SYNCSTART_MASK;
+ reg |= (hbackporch_start << XSTC_GH1_BPSTART_SHIFT) &
+ XSTC_GH1_BPSTART_MASK;
+ xlnx_stc_writel(base, XSTC_GHSYNC, reg);
+ reg = vsync_start & XSTC_GV1_SYNCSTART_MASK;
+ reg |= (vbackporch_start << XSTC_GV1_BPSTART_SHIFT) &
+ XSTC_GV1_BPSTART_MASK;
+
+ /*
+ * Fix the Vsync_vstart and vsync_vend of Field 0
+ * for all interlaced modes including 3GB.
+ */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ reg = ((((reg & XSTC_GV1_BPSTART_MASK) >>
+ XSTC_GV1_BPSTART_SHIFT) - 1) <<
+ XSTC_GV1_BPSTART_SHIFT) |
+ ((reg & XSTC_GV1_SYNCSTART_MASK) - 1);
+
+ xlnx_stc_writel(base, XSTC_GVSYNC_F0, reg);
+
+ /*
+ * Fix the Vsync_vstart and vsync_vend of Field 1
+ * for interlaced and 3GB modes.
+ */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ if (vm->pixelclock == 148500000)
+ /* Revert and increase by 1 for 3GB mode */
+ reg = ((((reg & XSTC_GV1_BPSTART_MASK) >>
+ XSTC_GV1_BPSTART_SHIFT) + 2) <<
+ XSTC_GV1_BPSTART_SHIFT) |
+ ((reg & XSTC_GV1_SYNCSTART_MASK) + 2);
+ else
+ /* Only revert the reduction */
+ reg = ((((reg & XSTC_GV1_BPSTART_MASK) >>
+ XSTC_GV1_BPSTART_SHIFT) + 1) <<
+ XSTC_GV1_BPSTART_SHIFT) |
+ ((reg & XSTC_GV1_SYNCSTART_MASK) + 1);
+ }
+
+ hori_off.v0blank_hori_start = hactive;
+ hori_off.v0blank_hori_end = hactive;
+ hori_off.v0sync_hori_start = hsync_start;
+ hori_off.v0sync_hori_end = hsync_start;
+ hori_off.v1blank_hori_start = hactive;
+ hori_off.v1blank_hori_end = hactive;
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ hori_off.v1sync_hori_start = hsync_start - (htotal / 2);
+ hori_off.v1sync_hori_end = hsync_start - (htotal / 2);
+ xlnx_stc_writel(base, XSTC_GVSYNC_F1, reg);
+ reg = xlnx_stc_readl(base, XSTC_GENC)
+ | XSTC_GENC_INTERL;
+ xlnx_stc_writel(base, XSTC_GENC, reg);
+ } else {
+ hori_off.v1sync_hori_start = hsync_start;
+ hori_off.v1sync_hori_end = hsync_start;
+ reg = xlnx_stc_readl(base, XSTC_GENC)
+ & ~XSTC_GENC_INTERL;
+ xlnx_stc_writel(base, XSTC_GENC, reg);
+ }
+
+ xlnx_stc_hori_off(base, &hori_off, vm->flags);
+ /* set up polarity */
+ memset(&polarity, 0x0, sizeof(polarity));
+ polarity.hsync = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vsync = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.hblank = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vblank = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.field_id = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
+ xlnx_stc_polarity(base, &polarity);
+
+ xlnx_stc_src(base);
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg | XSTC_CTL_RU);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi_timing.h b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.h
new file mode 100644
index 000000000000..4ca9f8972e0a
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI Tx timing controller driver
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#ifndef _XLNX_SDI_TIMING_H_
+#define _XLNX_SDI_TIMING_H_
+
+struct videomode;
+
+void xlnx_stc_enable(void __iomem *base);
+void xlnx_stc_disable(void __iomem *base);
+void xlnx_stc_reset(void __iomem *base);
+void xlnx_stc_sig(void __iomem *base, struct videomode *vm);
+
+#endif /* _XLNX_SDI_TIMING_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_vtc.c b/drivers/gpu/drm/xlnx/xlnx_vtc.c
new file mode 100644
index 000000000000..427b35b84e16
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_vtc.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Video Timing Controller support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ * Saurabh Sengar <saurabhs@xilinx.com>
+ * Vishal Sagar <vishal.sagar@xilinx.com>
+ *
+ * This driver adds support to control the Xilinx Video Timing
+ * Controller connected to the CRTC.
+ */
+
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <video/videomode.h>
+#include "xlnx_bridge.h"
+
+/* register offsets */
+#define XVTC_CTL 0x000
+#define XVTC_VER 0x010
+#define XVTC_GASIZE 0x060
+#define XVTC_GENC 0x068
+#define XVTC_GPOL 0x06c
+#define XVTC_GHSIZE 0x070
+#define XVTC_GVSIZE 0x074
+#define XVTC_GHSYNC 0x078
+#define XVTC_GVBHOFF_F0 0x07c
+#define XVTC_GVSYNC_F0 0x080
+#define XVTC_GVSHOFF_F0 0x084
+#define XVTC_GVBHOFF_F1 0x088
+#define XVTC_GVSYNC_F1 0x08C
+#define XVTC_GVSHOFF_F1 0x090
+#define XVTC_GASIZE_F1 0x094
+
+/* vtc control register bits */
+#define XVTC_CTL_SWRESET BIT(31)
+#define XVTC_CTL_FIPSS BIT(26)
+#define XVTC_CTL_ACPSS BIT(25)
+#define XVTC_CTL_AVPSS BIT(24)
+#define XVTC_CTL_HSPSS BIT(23)
+#define XVTC_CTL_VSPSS BIT(22)
+#define XVTC_CTL_HBPSS BIT(21)
+#define XVTC_CTL_VBPSS BIT(20)
+#define XVTC_CTL_VCSS BIT(18)
+#define XVTC_CTL_VASS BIT(17)
+#define XVTC_CTL_VBSS BIT(16)
+#define XVTC_CTL_VSSS BIT(15)
+#define XVTC_CTL_VFSS BIT(14)
+#define XVTC_CTL_VTSS BIT(13)
+#define XVTC_CTL_HBSS BIT(11)
+#define XVTC_CTL_HSSS BIT(10)
+#define XVTC_CTL_HFSS BIT(9)
+#define XVTC_CTL_HTSS BIT(8)
+#define XVTC_CTL_GE BIT(2)
+#define XVTC_CTL_RU BIT(1)
+
+/* vtc generator polarity register bits */
+#define XVTC_GPOL_FIP BIT(6)
+#define XVTC_GPOL_ACP BIT(5)
+#define XVTC_GPOL_AVP BIT(4)
+#define XVTC_GPOL_HSP BIT(3)
+#define XVTC_GPOL_VSP BIT(2)
+#define XVTC_GPOL_HBP BIT(1)
+#define XVTC_GPOL_VBP BIT(0)
+
+/* vtc generator horizontal 1 */
+#define XVTC_GH1_BPSTART_MASK GENMASK(28, 16)
+#define XVTC_GH1_BPSTART_SHIFT 16
+#define XVTC_GH1_SYNCSTART_MASK GENMASK(12, 0)
+/* vtc generator vertical 1 (field 0) */
+#define XVTC_GV1_BPSTART_MASK GENMASK(28, 16)
+#define XVTC_GV1_BPSTART_SHIFT 16
+#define XVTC_GV1_SYNCSTART_MASK GENMASK(12, 0)
+/* vtc generator/detector vblank/vsync horizontal offset registers */
+#define XVTC_XVXHOX_HEND_MASK GENMASK(28, 16)
+#define XVTC_XVXHOX_HEND_SHIFT 16
+#define XVTC_XVXHOX_HSTART_MASK GENMASK(12, 0)
+
+#define XVTC_GHFRAME_HSIZE GENMASK(12, 0)
+#define XVTC_GVFRAME_HSIZE_F1 GENMASK(12, 0)
+#define XVTC_GA_ACTSIZE_MASK GENMASK(12, 0)
+
+/* vtc generator encoding register bits */
+#define XVTC_GENC_INTERL BIT(6)
+
+/**
+ * struct xlnx_vtc - Xilinx VTC object
+ *
+ * @bridge: xilinx bridge structure
+ * @dev: device structure
+ * @base: base addr
+ * @ppc: pixels per clock
+ * @axi_clk: AXI Lite clock
+ * @vid_clk: Video clock
+ */
+struct xlnx_vtc {
+ struct xlnx_bridge bridge;
+ struct device *dev;
+ void __iomem *base;
+ u32 ppc;
+ struct clk *axi_clk;
+ struct clk *vid_clk;
+};
+
+static inline void xlnx_vtc_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xlnx_vtc_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static inline struct xlnx_vtc *bridge_to_vtc(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct xlnx_vtc, bridge);
+}
+
+static void xlnx_vtc_reset(struct xlnx_vtc *vtc)
+{
+ u32 reg;
+
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, XVTC_CTL_SWRESET);
+
+ /* enable register update */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg | XVTC_CTL_RU);
+}
+
+/**
+ * xlnx_vtc_enable - Enable the VTC
+ * @bridge: xilinx bridge structure pointer
+ *
+ * Return:
+ * Zero on success.
+ *
+ * This function enables the VTC
+ */
+static int xlnx_vtc_enable(struct xlnx_bridge *bridge)
+{
+ u32 reg;
+ struct xlnx_vtc *vtc = bridge_to_vtc(bridge);
+
+ /* enable generator */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg | XVTC_CTL_GE);
+ dev_dbg(vtc->dev, "enabled\n");
+ return 0;
+}
+
+/**
+ * xlnx_vtc_disable - Disable the VTC
+ * @bridge: xilinx bridge structure pointer
+ *
+ * This function disables and resets the VTC.
+ */
+static void xlnx_vtc_disable(struct xlnx_bridge *bridge)
+{
+ u32 reg;
+ struct xlnx_vtc *vtc = bridge_to_vtc(bridge);
+
+ /* disable generator and reset */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg & ~XVTC_CTL_GE);
+ xlnx_vtc_reset(vtc);
+ dev_dbg(vtc->dev, "disabled\n");
+}
+
+/**
+ * xlnx_vtc_set_timing - Configures the VTC
+ * @bridge: xilinx bridge structure pointer
+ * @vm: video mode requested
+ *
+ * Return:
+ * Zero on success.
+ *
+ * This function calculates the timing values from the video mode
+ * structure passed from the CRTC and configures the VTC.
+ */
+static int xlnx_vtc_set_timing(struct xlnx_bridge *bridge,
+ struct videomode *vm)
+{
+ u32 reg;
+ u32 htotal, hactive, hsync_start, hbackporch_start;
+ u32 vtotal, vactive, vsync_start, vbackporch_start;
+ struct xlnx_vtc *vtc = bridge_to_vtc(bridge);
+
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg & ~XVTC_CTL_RU);
+
+ vm->hactive /= vtc->ppc;
+ vm->hfront_porch /= vtc->ppc;
+ vm->hback_porch /= vtc->ppc;
+ vm->hsync_len /= vtc->ppc;
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
+
+ hactive = vm->hactive;
+ vactive = vm->vactive;
+
+ hsync_start = vm->hactive + vm->hfront_porch;
+ vsync_start = vm->vactive + vm->vfront_porch;
+
+ hbackporch_start = hsync_start + vm->hsync_len;
+ vbackporch_start = vsync_start + vm->vsync_len;
+
+ dev_dbg(vtc->dev, "ha: %d, va: %d\n", hactive, vactive);
+ dev_dbg(vtc->dev, "ht: %d, vt: %d\n", htotal, vtotal);
+ dev_dbg(vtc->dev, "hs: %d, hb: %d\n", hsync_start, hbackporch_start);
+ dev_dbg(vtc->dev, "vs: %d, vb: %d\n", vsync_start, vbackporch_start);
+
+ reg = htotal & XVTC_GHFRAME_HSIZE;
+ xlnx_vtc_writel(vtc->base, XVTC_GHSIZE, reg);
+
+ reg = vtotal & XVTC_GVFRAME_HSIZE_F1;
+ reg |= reg << XVTC_GV1_BPSTART_SHIFT;
+ xlnx_vtc_writel(vtc->base, XVTC_GVSIZE, reg);
+
+ reg = hactive & XVTC_GA_ACTSIZE_MASK;
+ reg |= (vactive & XVTC_GA_ACTSIZE_MASK) << 16;
+ xlnx_vtc_writel(vtc->base, XVTC_GASIZE, reg);
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ xlnx_vtc_writel(vtc->base, XVTC_GASIZE_F1, reg);
+
+ reg = hsync_start & XVTC_GH1_SYNCSTART_MASK;
+ reg |= (hbackporch_start << XVTC_GH1_BPSTART_SHIFT) &
+ XVTC_GH1_BPSTART_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GHSYNC, reg);
+
+ reg = vsync_start & XVTC_GV1_SYNCSTART_MASK;
+ reg |= (vbackporch_start << XVTC_GV1_BPSTART_SHIFT) &
+ XVTC_GV1_BPSTART_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVSYNC_F0, reg);
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ xlnx_vtc_writel(vtc->base, XVTC_GVSYNC_F1, reg);
+ reg = xlnx_vtc_readl(vtc->base, XVTC_GENC) | XVTC_GENC_INTERL;
+ xlnx_vtc_writel(vtc->base, XVTC_GENC, reg);
+ } else {
+ reg = xlnx_vtc_readl(vtc->base, XVTC_GENC) & ~XVTC_GENC_INTERL;
+ xlnx_vtc_writel(vtc->base, XVTC_GENC, reg);
+ }
+
+ /* configure horizontal offset */
+ /* Calculate and update Generator VBlank Hori field 0 */
+ reg = hactive & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hactive << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVBHOFF_F0, reg);
+
+ /* Calculate and update Generator VSync Hori field 0 */
+ reg = hsync_start & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hsync_start << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVSHOFF_F0, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = hactive & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hactive << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVBHOFF_F1, reg);
+ }
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = (hsync_start - (htotal / 2)) & XVTC_XVXHOX_HSTART_MASK;
+ reg |= ((hsync_start - (htotal / 2)) <<
+ XVTC_XVXHOX_HEND_SHIFT) & XVTC_XVXHOX_HEND_MASK;
+ } else {
+ reg = hsync_start & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hsync_start << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ }
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ xlnx_vtc_writel(vtc->base, XVTC_GVSHOFF_F1, reg);
+
+ /* configure polarity of signals */
+ reg = 0;
+ reg |= XVTC_GPOL_ACP;
+ reg |= XVTC_GPOL_AVP;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ reg |= XVTC_GPOL_FIP;
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH) {
+ reg |= XVTC_GPOL_VBP;
+ reg |= XVTC_GPOL_VSP;
+ }
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH) {
+ reg |= XVTC_GPOL_HBP;
+ reg |= XVTC_GPOL_HSP;
+ }
+ xlnx_vtc_writel(vtc->base, XVTC_GPOL, reg);
+
+ /* configure timing source */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ reg |= XVTC_CTL_VCSS;
+ reg |= XVTC_CTL_VASS;
+ reg |= XVTC_CTL_VBSS;
+ reg |= XVTC_CTL_VSSS;
+ reg |= XVTC_CTL_VFSS;
+ reg |= XVTC_CTL_VTSS;
+ reg |= XVTC_CTL_HBSS;
+ reg |= XVTC_CTL_HSSS;
+ reg |= XVTC_CTL_HFSS;
+ reg |= XVTC_CTL_HTSS;
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg);
+
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg | XVTC_CTL_RU);
+ dev_dbg(vtc->dev, "set timing done\n");
+
+ return 0;
+}
+
+static int xlnx_vtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct xlnx_vtc *vtc;
+ struct resource *res;
+ int ret;
+
+ vtc = devm_kzalloc(dev, sizeof(*vtc), GFP_KERNEL);
+ if (!vtc)
+ return -ENOMEM;
+
+ vtc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get resource for device\n");
+ return -EFAULT;
+ }
+
+ vtc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vtc->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(vtc->base);
+ }
+
+ platform_set_drvdata(pdev, vtc);
+
+ ret = of_property_read_u32(dev->of_node, "xlnx,pixels-per-clock",
+ &vtc->ppc);
+ if (ret || (vtc->ppc != 1 && vtc->ppc != 2 && vtc->ppc != 4)) {
+ dev_err(dev, "failed to get ppc\n");
+ return ret;
+ }
+ dev_info(dev, "vtc ppc = %d\n", vtc->ppc);
+
+ vtc->axi_clk = devm_clk_get(vtc->dev, "s_axi_aclk");
+ if (IS_ERR(vtc->axi_clk)) {
+ ret = PTR_ERR(vtc->axi_clk);
+ dev_err(dev, "failed to get axi lite clk %d\n", ret);
+ return ret;
+ }
+
+ vtc->vid_clk = devm_clk_get(vtc->dev, "clk");
+ if (IS_ERR(vtc->vid_clk)) {
+ ret = PTR_ERR(vtc->vid_clk);
+ dev_err(dev, "failed to get video clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(vtc->axi_clk);
+ if (ret) {
+ dev_err(vtc->dev, "unable to enable axilite clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(vtc->vid_clk);
+ if (ret) {
+ dev_err(vtc->dev, "unable to enable video clk %d\n", ret);
+ goto err_axi_clk;
+ }
+
+ xlnx_vtc_reset(vtc);
+
+ vtc->bridge.enable = &xlnx_vtc_enable;
+ vtc->bridge.disable = &xlnx_vtc_disable;
+ vtc->bridge.set_timing = &xlnx_vtc_set_timing;
+ vtc->bridge.of_node = dev->of_node;
+ ret = xlnx_bridge_register(&vtc->bridge);
+ if (ret) {
+ dev_err(dev, "Bridge registration failed\n");
+ goto err_vid_clk;
+ }
+
+ dev_info(dev, "Xilinx VTC IP version : 0x%08x\n",
+ xlnx_vtc_readl(vtc->base, XVTC_VER));
+ dev_info(dev, "Xilinx VTC DRM Bridge driver probed\n");
+ return 0;
+
+err_vid_clk:
+ clk_disable_unprepare(vtc->vid_clk);
+err_axi_clk:
+ clk_disable_unprepare(vtc->axi_clk);
+ return ret;
+}
+
+static int xlnx_vtc_remove(struct platform_device *pdev)
+{
+ struct xlnx_vtc *vtc = platform_get_drvdata(pdev);
+
+ xlnx_bridge_unregister(&vtc->bridge);
+ clk_disable_unprepare(vtc->vid_clk);
+ clk_disable_unprepare(vtc->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_vtc_of_match[] = {
+ { .compatible = "xlnx,bridge-v-tc-6.1" },
+ { /* end of table */ },
+};
+
+MODULE_DEVICE_TABLE(of, xlnx_vtc_of_match);
+
+static struct platform_driver xlnx_vtc_bridge_driver = {
+ .probe = xlnx_vtc_probe,
+ .remove = xlnx_vtc_remove,
+ .driver = {
+ .name = "xlnx,bridge-vtc",
+ .of_match_table = xlnx_vtc_of_match,
+ },
+};
+
+module_platform_driver(xlnx_vtc_bridge_driver);
+
+MODULE_AUTHOR("Vishal Sagar");
+MODULE_DESCRIPTION("Xilinx VTC Bridge Driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
new file mode 100644
index 000000000000..1786a70897b5
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -0,0 +1,3333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP Display Controller Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_fb.h"
+#include "zynqmp_disp.h"
+#include "zynqmp_dp.h"
+#include "zynqmp_dpsub.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * The display part of ZynqMP DP subsystem. Internally, the device
+ * is partitioned into 3 blocks: AV buffer manager, Blender, Audio.
+ * The driver creates the DRM crtc and plane objectes and maps the DRM
+ * interface into those 3 blocks. In high level, the driver is layered
+ * in the following way:
+ *
+ * zynqmp_disp_crtc & zynqmp_disp_plane
+ * |->zynqmp_disp
+ * |->zynqmp_disp_aud
+ * |->zynqmp_disp_blend
+ * |->zynqmp_disp_av_buf
+ *
+ * The driver APIs are used externally by
+ * - zynqmp_dpsub: Top level ZynqMP DP subsystem driver
+ * - zynqmp_dp: ZynqMP DP driver
+ * - xlnx_crtc: Xilinx DRM specific crtc functions
+ */
+
+/* The default value is ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565 */
+static uint zynqmp_disp_gfx_init_fmt;
+module_param_named(gfx_init_fmt, zynqmp_disp_gfx_init_fmt, uint, 0444);
+MODULE_PARM_DESC(gfx_init_fmt, "The initial format of the graphics layer\n"
+ "\t\t0 = rgb565 (default)\n"
+ "\t\t1 = rgb888\n"
+ "\t\t2 = argb8888\n");
+/* These value should be mapped to index of av_buf_gfx_fmts[] */
+#define ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565 10
+#define ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB888 5
+#define ZYNQMP_DISP_AV_BUF_GFX_FMT_ARGB8888 1
+static const u32 zynqmp_disp_gfx_init_fmts[] = {
+ ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565,
+ ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB888,
+ ZYNQMP_DISP_AV_BUF_GFX_FMT_ARGB8888,
+};
+
+/* Blender registers */
+#define ZYNQMP_DISP_V_BLEND_BG_CLR_0 0x0
+#define ZYNQMP_DISP_V_BLEND_BG_CLR_1 0x4
+#define ZYNQMP_DISP_V_BLEND_BG_CLR_2 0x8
+#define ZYNQMP_DISP_V_BLEND_BG_MAX 0xfff
+#define ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA 0xc
+#define ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MASK 0x1fe
+#define ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX 0xff
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT 0x14
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB 0x0
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR444 0x1
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR422 0x2
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YONLY 0x3
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_XVYCC 0x4
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_EN_DOWNSAMPLE BIT(4)
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL 0x18
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_EN_US BIT(0)
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_RGB BIT(1)
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_BYPASS BIT(8)
+#define ZYNQMP_DISP_V_BLEND_NUM_COEFF 9
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF0 0x20
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF1 0x24
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF2 0x28
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF3 0x2c
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF4 0x30
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF5 0x34
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF6 0x38
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF7 0x3c
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF8 0x40
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF0 0x44
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF1 0x48
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF2 0x4c
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF3 0x50
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF4 0x54
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF5 0x58
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF6 0x5c
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF7 0x60
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF8 0x64
+#define ZYNQMP_DISP_V_BLEND_NUM_OFFSET 3
+#define ZYNQMP_DISP_V_BLEND_LUMA_IN1CSC_OFFSET 0x68
+#define ZYNQMP_DISP_V_BLEND_CR_IN1CSC_OFFSET 0x6c
+#define ZYNQMP_DISP_V_BLEND_CB_IN1CSC_OFFSET 0x70
+#define ZYNQMP_DISP_V_BLEND_LUMA_OUTCSC_OFFSET 0x74
+#define ZYNQMP_DISP_V_BLEND_CR_OUTCSC_OFFSET 0x78
+#define ZYNQMP_DISP_V_BLEND_CB_OUTCSC_OFFSET 0x7c
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF0 0x80
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF1 0x84
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF2 0x88
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF3 0x8c
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF4 0x90
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF5 0x94
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF6 0x98
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF7 0x9c
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF8 0xa0
+#define ZYNQMP_DISP_V_BLEND_LUMA_IN2CSC_OFFSET 0xa4
+#define ZYNQMP_DISP_V_BLEND_CR_IN2CSC_OFFSET 0xa8
+#define ZYNQMP_DISP_V_BLEND_CB_IN2CSC_OFFSET 0xac
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_ENABLE 0x1d0
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_COMP1 0x1d4
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_COMP2 0x1d8
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_COMP3 0x1dc
+
+/* AV buffer manager registers */
+#define ZYNQMP_DISP_AV_BUF_FMT 0x0
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_SHIFT 0
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK (0x1f << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_UYVY (0 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY (1 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YVYU (2 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV (3 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16 (4 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24 (5 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI (6 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MONO (7 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2 (8 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUV444 (9 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888 (10 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880 (11 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10 (12 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUV444_10 (13 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2_10 (14 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_10 (15 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_10 (16 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24_10 (17 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YONLY_10 (18 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420 (19 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420 (20 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2_420 (21 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420_10 (22 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420_10 (23 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2_420_10 (24 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_SHIFT 8
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK (0xf << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888 (0 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888 (1 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB888 (2 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_BGR888 (3 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551 (4 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444 (5 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565 (6 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_8BPP (7 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_4BPP (8 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_2BPP (9 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_1BPP (10 << 8)
+#define ZYNQMP_DISP_AV_BUF_NON_LIVE_LATENCY 0x8
+#define ZYNQMP_DISP_AV_BUF_CHBUF 0x10
+#define ZYNQMP_DISP_AV_BUF_CHBUF_EN BIT(0)
+#define ZYNQMP_DISP_AV_BUF_CHBUF_FLUSH BIT(1)
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT 2
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MASK (0xf << 2)
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MAX 0xf
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_AUD_MAX 0x3
+#define ZYNQMP_DISP_AV_BUF_STATUS 0x28
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL 0x2c
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EN BIT(0)
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_SHIFT 1
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_EX_VSYNC 0
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_EX_VID 1
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_EX_AUD 2
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_INT_VSYNC 3
+#define ZYNQMP_DISP_AV_BUF_STC_INIT_VALUE0 0x30
+#define ZYNQMP_DISP_AV_BUF_STC_INIT_VALUE1 0x34
+#define ZYNQMP_DISP_AV_BUF_STC_ADJ 0x38
+#define ZYNQMP_DISP_AV_BUF_STC_VID_VSYNC_TS0 0x3c
+#define ZYNQMP_DISP_AV_BUF_STC_VID_VSYNC_TS1 0x40
+#define ZYNQMP_DISP_AV_BUF_STC_EXT_VSYNC_TS0 0x44
+#define ZYNQMP_DISP_AV_BUF_STC_EXT_VSYNC_TS1 0x48
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT_TS0 0x4c
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT_TS1 0x50
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT2_TS0 0x54
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT2_TS1 0x58
+#define ZYNQMP_DISP_AV_BUF_STC_SNAPSHOT0 0x60
+#define ZYNQMP_DISP_AV_BUF_STC_SNAPSHOT1 0x64
+#define ZYNQMP_DISP_AV_BUF_OUTPUT 0x70
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_SHIFT 0
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK (0x3 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_LIVE (0 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MEM (1 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_PATTERN (2 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_NONE (3 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_SHIFT 2
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK (0x3 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_DISABLE (0 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM (1 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_LIVE (2 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_NONE (3 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_SHIFT 4
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK (0x3 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_PL (0 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MEM (1 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_PATTERN (2 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_DISABLE (3 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN BIT(6)
+#define ZYNQMP_DISP_AV_BUF_HCOUNT_VCOUNT_INT0 0x74
+#define ZYNQMP_DISP_AV_BUF_HCOUNT_VCOUNT_INT1 0x78
+#define ZYNQMP_DISP_AV_BUF_PATTERN_GEN_SELECT 0x100
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC 0x120
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS BIT(0)
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS BIT(1)
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING BIT(2)
+#define ZYNQMP_DISP_AV_BUF_SRST_REG 0x124
+#define ZYNQMP_DISP_AV_BUF_SRST_REG_VID_RST BIT(1)
+#define ZYNQMP_DISP_AV_BUF_AUDIO_CH_CONFIG 0x12c
+#define ZYNQMP_DISP_AV_BUF_GFX_COMP0_SF 0x200
+#define ZYNQMP_DISP_AV_BUF_GFX_COMP1_SF 0x204
+#define ZYNQMP_DISP_AV_BUF_GFX_COMP2_SF 0x208
+#define ZYNQMP_DISP_AV_BUF_VID_COMP0_SF 0x20c
+#define ZYNQMP_DISP_AV_BUF_VID_COMP1_SF 0x210
+#define ZYNQMP_DISP_AV_BUF_VID_COMP2_SF 0x214
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP0_SF 0x218
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP1_SF 0x21c
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP2_SF 0x220
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_CONFIG 0x224
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP0_SF 0x228
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP1_SF 0x22c
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP2_SF 0x230
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_CONFIG 0x234
+#define ZYNQMP_DISP_AV_BUF_4BIT_SF 0x11111
+#define ZYNQMP_DISP_AV_BUF_5BIT_SF 0x10842
+#define ZYNQMP_DISP_AV_BUF_6BIT_SF 0x10410
+#define ZYNQMP_DISP_AV_BUF_8BIT_SF 0x10101
+#define ZYNQMP_DISP_AV_BUF_10BIT_SF 0x10040
+#define ZYNQMP_DISP_AV_BUF_NULL_SF 0
+#define ZYNQMP_DISP_AV_BUF_NUM_SF 3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_6 0x0
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 0x1
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_10 0x2
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_12 0x3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_MASK GENMASK(2, 0)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB 0x0
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444 0x1
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422 0x2
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YONLY 0x3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_MASK GENMASK(5, 4)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_CB_FIRST BIT(8)
+#define ZYNQMP_DISP_AV_BUF_PALETTE_MEMORY 0x400
+
+/* Audio registers */
+#define ZYNQMP_DISP_AUD_MIXER_VOLUME 0x0
+#define ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE 0x20002000
+#define ZYNQMP_DISP_AUD_MIXER_META_DATA 0x4
+#define ZYNQMP_DISP_AUD_CH_STATUS0 0x8
+#define ZYNQMP_DISP_AUD_CH_STATUS1 0xc
+#define ZYNQMP_DISP_AUD_CH_STATUS2 0x10
+#define ZYNQMP_DISP_AUD_CH_STATUS3 0x14
+#define ZYNQMP_DISP_AUD_CH_STATUS4 0x18
+#define ZYNQMP_DISP_AUD_CH_STATUS5 0x1c
+#define ZYNQMP_DISP_AUD_CH_A_DATA0 0x20
+#define ZYNQMP_DISP_AUD_CH_A_DATA1 0x24
+#define ZYNQMP_DISP_AUD_CH_A_DATA2 0x28
+#define ZYNQMP_DISP_AUD_CH_A_DATA3 0x2c
+#define ZYNQMP_DISP_AUD_CH_A_DATA4 0x30
+#define ZYNQMP_DISP_AUD_CH_A_DATA5 0x34
+#define ZYNQMP_DISP_AUD_CH_B_DATA0 0x38
+#define ZYNQMP_DISP_AUD_CH_B_DATA1 0x3c
+#define ZYNQMP_DISP_AUD_CH_B_DATA2 0x40
+#define ZYNQMP_DISP_AUD_CH_B_DATA3 0x44
+#define ZYNQMP_DISP_AUD_CH_B_DATA4 0x48
+#define ZYNQMP_DISP_AUD_CH_B_DATA5 0x4c
+#define ZYNQMP_DISP_AUD_SOFT_RESET 0xc00
+#define ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST BIT(0)
+
+#define ZYNQMP_DISP_AV_BUF_NUM_VID_GFX_BUFFERS 4
+#define ZYNQMP_DISP_AV_BUF_NUM_BUFFERS 6
+
+#define ZYNQMP_DISP_NUM_LAYERS 2
+#define ZYNQMP_DISP_MAX_NUM_SUB_PLANES 3
+/*
+ * 3840x2160 is advertised max resolution, but almost any resolutions under
+ * 300Mhz pixel rate would work. Thus put 4096 as maximum width and height.
+ */
+#define ZYNQMP_DISP_MAX_WIDTH 4096
+#define ZYNQMP_DISP_MAX_HEIGHT 4096
+/* 44 bit addressing. This is acutally DPDMA limitation */
+#define ZYNQMP_DISP_MAX_DMA_BIT 44
+
+/**
+ * enum zynqmp_disp_layer_type - Layer type (can be used for hw ID)
+ * @ZYNQMP_DISP_LAYER_VID: Video layer
+ * @ZYNQMP_DISP_LAYER_GFX: Graphics layer
+ */
+enum zynqmp_disp_layer_type {
+ ZYNQMP_DISP_LAYER_VID,
+ ZYNQMP_DISP_LAYER_GFX
+};
+
+/**
+ * enum zynqmp_disp_layer_mode - Layer mode
+ * @ZYNQMP_DISP_LAYER_NONLIVE: non-live (memory) mode
+ * @ZYNQMP_DISP_LAYER_LIVE: live (stream) mode
+ */
+enum zynqmp_disp_layer_mode {
+ ZYNQMP_DISP_LAYER_NONLIVE,
+ ZYNQMP_DISP_LAYER_LIVE
+};
+
+/**
+ * struct zynqmp_disp_layer_dma - struct for DMA engine
+ * @chan: DMA channel
+ * @is_active: flag if the DMA is active
+ * @xt: Interleaved desc config container
+ * @sgl: Data chunk for dma_interleaved_template
+ */
+struct zynqmp_disp_layer_dma {
+ struct dma_chan *chan;
+ bool is_active;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+/**
+ * struct zynqmp_disp_layer - Display subsystem layer
+ * @plane: DRM plane
+ * @bridge: Xlnx bridge
+ * @of_node: device node
+ * @dma: struct for DMA engine
+ * @num_chan: Number of DMA channel
+ * @id: Layer ID
+ * @offset: Layer offset in the register space
+ * @enabled: flag if enabled
+ * @fmt: Current format descriptor
+ * @drm_fmts: Array of supported DRM formats
+ * @num_fmts: Number of supported DRM formats
+ * @bus_fmts: Array of supported bus formats
+ * @num_bus_fmts: Number of supported bus formats
+ * @w: Width
+ * @h: Height
+ * @mode: the operation mode
+ * @other: other layer
+ * @disp: back pointer to struct zynqmp_disp
+ */
+struct zynqmp_disp_layer {
+ struct drm_plane plane;
+ struct xlnx_bridge bridge;
+ struct device_node *of_node;
+ struct zynqmp_disp_layer_dma dma[ZYNQMP_DISP_MAX_NUM_SUB_PLANES];
+ unsigned int num_chan;
+ enum zynqmp_disp_layer_type id;
+ u32 offset;
+ u8 enabled;
+ const struct zynqmp_disp_fmt *fmt;
+ u32 *drm_fmts;
+ unsigned int num_fmts;
+ u32 *bus_fmts;
+ unsigned int num_bus_fmts;
+ u32 w;
+ u32 h;
+ enum zynqmp_disp_layer_mode mode;
+ struct zynqmp_disp_layer *other;
+ struct zynqmp_disp *disp;
+};
+
+/**
+ * struct zynqmp_disp_blend - Blender
+ * @base: Base address offset
+ */
+struct zynqmp_disp_blend {
+ void __iomem *base;
+};
+
+/**
+ * struct zynqmp_disp_av_buf - AV buffer manager
+ * @base: Base address offset
+ */
+struct zynqmp_disp_av_buf {
+ void __iomem *base;
+};
+
+/**
+ * struct zynqmp_disp_aud - Audio
+ * @base: Base address offset
+ */
+struct zynqmp_disp_aud {
+ void __iomem *base;
+};
+
+/**
+ * struct zynqmp_disp - Display subsystem
+ * @xlnx_crtc: Xilinx DRM crtc
+ * @dev: device structure
+ * @dpsub: Display subsystem
+ * @drm: DRM core
+ * @enabled: flag if enabled
+ * @blend: Blender block
+ * @av_buf: AV buffer manager block
+ * @aud:Audio block
+ * @layers: layers
+ * @g_alpha_prop: global alpha property
+ * @alpha: current global alpha value
+ * @g_alpha_en_prop: the global alpha enable property
+ * @alpha_en: flag if the global alpha is enabled
+ * @color_prop: output color format property
+ * @color: current output color value
+ * @bg_c0_prop: 1st component of background color property
+ * @bg_c0: current value of 1st background color component
+ * @bg_c1_prop: 2nd component of background color property
+ * @bg_c1: current value of 2nd background color component
+ * @bg_c2_prop: 3rd component of background color property
+ * @bg_c2: current value of 3rd background color component
+ * @tpg_prop: Test Pattern Generation mode property
+ * @tpg_on: current TPG mode state
+ * @event: pending vblank event request
+ * @_ps_pclk: Pixel clock from PS
+ * @_pl_pclk: Pixel clock from PL
+ * @pclk: Pixel clock
+ * @pclk_en: Flag if the pixel clock is enabled
+ * @_ps_audclk: Audio clock from PS
+ * @_pl_audclk: Audio clock from PL
+ * @audclk: Audio clock
+ * @audclk_en: Flag if the audio clock is enabled
+ * @aclk: APB clock
+ * @aclk_en: Flag if the APB clock is enabled
+ */
+struct zynqmp_disp {
+ struct xlnx_crtc xlnx_crtc;
+ struct device *dev;
+ struct zynqmp_dpsub *dpsub;
+ struct drm_device *drm;
+ bool enabled;
+ struct zynqmp_disp_blend blend;
+ struct zynqmp_disp_av_buf av_buf;
+ struct zynqmp_disp_aud aud;
+ struct zynqmp_disp_layer layers[ZYNQMP_DISP_NUM_LAYERS];
+ struct drm_property *g_alpha_prop;
+ u32 alpha;
+ struct drm_property *g_alpha_en_prop;
+ bool alpha_en;
+ struct drm_property *color_prop;
+ unsigned int color;
+ struct drm_property *bg_c0_prop;
+ u32 bg_c0;
+ struct drm_property *bg_c1_prop;
+ u32 bg_c1;
+ struct drm_property *bg_c2_prop;
+ u32 bg_c2;
+ struct drm_property *tpg_prop;
+ bool tpg_on;
+ struct drm_pending_vblank_event *event;
+ /* Don't operate directly on _ps_ */
+ struct clk *_ps_pclk;
+ struct clk *_pl_pclk;
+ struct clk *pclk;
+ bool pclk_en;
+ struct clk *_ps_audclk;
+ struct clk *_pl_audclk;
+ struct clk *audclk;
+ bool audclk_en;
+ struct clk *aclk;
+ bool aclk_en;
+};
+
+/**
+ * struct zynqmp_disp_fmt - Display subsystem format mapping
+ * @drm_fmt: drm format
+ * @disp_fmt: Display subsystem format
+ * @bus_fmt: Bus formats (live formats)
+ * @rgb: flag for RGB formats
+ * @swap: flag to swap r & b for rgb formats, and u & v for yuv formats
+ * @chroma_sub: flag for chroma subsampled formats
+ * @sf: scaling factors for upto 3 color components
+ */
+struct zynqmp_disp_fmt {
+ u32 drm_fmt;
+ u32 disp_fmt;
+ u32 bus_fmt;
+ bool rgb;
+ bool swap;
+ bool chroma_sub;
+ u32 sf[3];
+};
+
+static void zynqmp_disp_write(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static u32 zynqmp_disp_read(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static void zynqmp_disp_clr(void __iomem *base, int offset, u32 clr)
+{
+ zynqmp_disp_write(base, offset, zynqmp_disp_read(base, offset) & ~clr);
+}
+
+static void zynqmp_disp_set(void __iomem *base, int offset, u32 set)
+{
+ zynqmp_disp_write(base, offset, zynqmp_disp_read(base, offset) | set);
+}
+
+/*
+ * Clock functions
+ */
+
+/**
+ * zynqmp_disp_clk_enable - Enable the clock if needed
+ * @clk: clk device
+ * @flag: flag if the clock is enabled
+ *
+ * Enable the clock only if it's not enabled @flag.
+ *
+ * Return: value from clk_prepare_enable().
+ */
+static int zynqmp_disp_clk_enable(struct clk *clk, bool *flag)
+{
+ int ret = 0;
+
+ if (!*flag) {
+ ret = clk_prepare_enable(clk);
+ if (!ret)
+ *flag = true;
+ }
+
+ return ret;
+}
+
+/**
+ * zynqmp_disp_clk_enable - Enable the clock if needed
+ * @clk: clk device
+ * @flag: flag if the clock is enabled
+ *
+ * Disable the clock only if it's enabled @flag.
+ */
+static void zynqmp_disp_clk_disable(struct clk *clk, bool *flag)
+{
+ if (*flag) {
+ clk_disable_unprepare(clk);
+ *flag = false;
+ }
+}
+
+/**
+ * zynqmp_disp_clk_enable - Enable and disable the clock
+ * @clk: clk device
+ * @flag: flag if the clock is enabled
+ *
+ * This is to ensure the clock is disabled. The initial hardware state is
+ * unknown, and this makes sure that the clock is disabled.
+ *
+ * Return: value from clk_prepare_enable().
+ */
+static int zynqmp_disp_clk_enable_disable(struct clk *clk, bool *flag)
+{
+ int ret = 0;
+
+ if (!*flag) {
+ ret = clk_prepare_enable(clk);
+ clk_disable_unprepare(clk);
+ }
+
+ return ret;
+}
+
+/*
+ * Blender functions
+ */
+
+/**
+ * zynqmp_disp_blend_set_output_fmt - Set the output format of the blend
+ * @blend: blend object
+ * @fmt: output format
+ *
+ * Set the output format to @fmt.
+ */
+static void
+zynqmp_disp_blend_set_output_fmt(struct zynqmp_disp_blend *blend, u32 fmt)
+{
+ u16 reset_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u32 reset_offsets[] = { 0x0, 0x0, 0x0 };
+ u16 sdtv_coeffs[] = { 0x4c9, 0x864, 0x1d3,
+ 0x7d4d, 0x7ab3, 0x800,
+ 0x800, 0x794d, 0x7eb3 };
+ u32 full_range_offsets[] = { 0x0, 0x8000000, 0x8000000 };
+ u16 *coeffs;
+ u32 *offsets;
+ u32 offset, i;
+
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT, fmt);
+ if (fmt == ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB) {
+ coeffs = reset_coeffs;
+ offsets = reset_offsets;
+ } else {
+ /* Hardcode Full-range SDTV values. Can be runtime config */
+ coeffs = sdtv_coeffs;
+ offsets = full_range_offsets;
+ }
+
+ offset = ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF0;
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_COEFF; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, coeffs[i]);
+
+ offset = ZYNQMP_DISP_V_BLEND_LUMA_OUTCSC_OFFSET;
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_OFFSET; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, offsets[i]);
+}
+
+/**
+ * zynqmp_disp_blend_layer_coeff - Set the coefficients for @layer
+ * @blend: blend object
+ * @layer: layer to set the coefficients for
+ * @on: if layer is on / off
+ *
+ * Depending on the format (rgb / yuv and swap), and the status (on / off),
+ * this function sets the coefficients for the given layer @layer accordingly.
+ */
+static void zynqmp_disp_blend_layer_coeff(struct zynqmp_disp_blend *blend,
+ struct zynqmp_disp_layer *layer,
+ bool on)
+{
+ u32 offset, i, s0, s1;
+ u16 sdtv_coeffs[] = { 0x1000, 0x166f, 0x0,
+ 0x1000, 0x7483, 0x7a7f,
+ 0x1000, 0x0, 0x1c5a };
+ u16 sdtv_coeffs_yonly[] = { 0x0, 0x0, 0x1000,
+ 0x0, 0x0, 0x1000,
+ 0x0, 0x0, 0x1000 };
+ u16 swap_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u16 null_coeffs[] = { 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0 };
+ u16 *coeffs;
+ u32 sdtv_offsets[] = { 0x0, 0x1800, 0x1800 };
+ u32 sdtv_offsets_yonly[] = { 0x1800, 0x1800, 0x0 };
+ u32 null_offsets[] = { 0x0, 0x0, 0x0 };
+ u32 *offsets;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_VID)
+ offset = ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF0;
+ else
+ offset = ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF0;
+
+ if (!on) {
+ coeffs = null_coeffs;
+ offsets = null_offsets;
+ } else {
+ if (!layer->fmt->rgb) {
+ /*
+ * In case of Y_ONLY formats, pixels are unpacked
+ * differently compared to YCbCr
+ */
+ if (layer->fmt->drm_fmt == DRM_FORMAT_Y8 ||
+ layer->fmt->drm_fmt == DRM_FORMAT_Y10) {
+ coeffs = sdtv_coeffs_yonly;
+ offsets = sdtv_offsets_yonly;
+ } else {
+ coeffs = sdtv_coeffs;
+ offsets = sdtv_offsets;
+ }
+
+ s0 = 1;
+ s1 = 2;
+ } else {
+ coeffs = swap_coeffs;
+ s0 = 0;
+ s1 = 2;
+
+ /* No offset for RGB formats */
+ offsets = null_offsets;
+ }
+
+ if (layer->fmt->swap) {
+ for (i = 0; i < 3; i++) {
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ coeffs[i * 3 + s1] ^= coeffs[i * 3 + s0];
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ }
+ }
+ }
+
+ /* Program coefficients. Can be runtime configurable */
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_COEFF; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, coeffs[i]);
+
+ if (layer->id == ZYNQMP_DISP_LAYER_VID)
+ offset = ZYNQMP_DISP_V_BLEND_LUMA_IN1CSC_OFFSET;
+ else
+ offset = ZYNQMP_DISP_V_BLEND_LUMA_IN2CSC_OFFSET;
+
+ /* Program offsets. Can be runtime configurable */
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_OFFSET; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, offsets[i]);
+}
+
+/**
+ * zynqmp_disp_blend_layer_enable - Enable a layer
+ * @blend: blend object
+ * @layer: layer to enable
+ *
+ * Enable a layer @layer.
+ */
+static void zynqmp_disp_blend_layer_enable(struct zynqmp_disp_blend *blend,
+ struct zynqmp_disp_layer *layer)
+{
+ u32 reg;
+
+ reg = layer->fmt->rgb ? ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_RGB : 0;
+ reg |= layer->fmt->chroma_sub ?
+ ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_EN_US : 0;
+
+ zynqmp_disp_write(blend->base,
+ ZYNQMP_DISP_V_BLEND_LAYER_CONTROL + layer->offset,
+ reg);
+
+ zynqmp_disp_blend_layer_coeff(blend, layer, true);
+}
+
+/**
+ * zynqmp_disp_blend_layer_disable - Disable a layer
+ * @blend: blend object
+ * @layer: layer to disable
+ *
+ * Disable a layer @layer.
+ */
+static void zynqmp_disp_blend_layer_disable(struct zynqmp_disp_blend *blend,
+ struct zynqmp_disp_layer *layer)
+{
+ zynqmp_disp_write(blend->base,
+ ZYNQMP_DISP_V_BLEND_LAYER_CONTROL + layer->offset, 0);
+
+ zynqmp_disp_blend_layer_coeff(blend, layer, false);
+}
+
+/**
+ * zynqmp_disp_blend_set_bg_color - Set the background color
+ * @blend: blend object
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color.
+ */
+static void zynqmp_disp_blend_set_bg_color(struct zynqmp_disp_blend *blend,
+ u32 c0, u32 c1, u32 c2)
+{
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_BG_CLR_0, c0);
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_BG_CLR_1, c1);
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_BG_CLR_2, c2);
+}
+
+/**
+ * zynqmp_disp_blend_set_alpha - Set the alpha for blending
+ * @blend: blend object
+ * @alpha: alpha value to be used
+ *
+ * Set the alpha for blending.
+ */
+static void
+zynqmp_disp_blend_set_alpha(struct zynqmp_disp_blend *blend, u32 alpha)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(blend->base,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA);
+ reg &= ~ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MASK;
+ reg |= alpha << 1;
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA,
+ reg);
+}
+
+/**
+ * zynqmp_disp_blend_enable_alpha - Enable/disable the global alpha
+ * @blend: blend object
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Enable/disable the global alpha blending based on @enable.
+ */
+static void
+zynqmp_disp_blend_enable_alpha(struct zynqmp_disp_blend *blend, bool enable)
+{
+ if (enable)
+ zynqmp_disp_set(blend->base,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+ else
+ zynqmp_disp_clr(blend->base,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+}
+
+/* List of blend output formats */
+/* The id / order should be aligned with zynqmp_disp_color_enum */
+static const struct zynqmp_disp_fmt blend_output_fmts[] = {
+ {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB,
+ }, {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR444,
+ }, {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR422,
+ }, {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YONLY,
+ }
+};
+
+/*
+ * AV buffer manager functions
+ */
+
+/* List of video layer formats */
+#define ZYNQMP_DISP_AV_BUF_VID_FMT_YUYV 2
+static const struct zynqmp_disp_fmt av_buf_vid_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_VYUY,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUYV,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVYU,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU422,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV16,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV61,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_Y8,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MONO,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_Y10,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YONLY_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR2101010,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB2101010,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV420,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU420,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV12,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV21,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XV15,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XV20,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }
+};
+
+/* List of graphics layer formats */
+static const struct zynqmp_disp_fmt av_buf_gfx_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_ABGR8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_ARGB8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_BGR888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA5551,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA5551,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA4444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA4444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB565,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR565,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }
+};
+
+/* List of live formats */
+/* Format can be combination of color, bpc, and cb-cr order.
+ * - Color: RGB / YUV444 / YUV422 / Y only
+ * - BPC: 6, 8, 10, 12
+ * - Swap: Cb and Cr swap
+ * which can be 32 bus formats. Only list the subset of those for now.
+ */
+static const struct zynqmp_disp_fmt av_buf_live_fmts[] = {
+ {
+ .bus_fmt = MEDIA_BUS_FMT_RGB666_1X18,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_6 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_RBG888_1X24,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_UYVY8_1X16,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_VUY8_1X24,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_UYVY10_1X20,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_10 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }
+};
+
+/**
+ * zynqmp_disp_av_buf_set_fmt - Set the input formats
+ * @av_buf: av buffer manager
+ * @fmt: formats
+ *
+ * Set the av buffer manager format to @fmt. @fmt should have valid values
+ * for both video and graphics layer.
+ */
+static void
+zynqmp_disp_av_buf_set_fmt(struct zynqmp_disp_av_buf *av_buf, u32 fmt)
+{
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_FMT, fmt);
+}
+
+/**
+ * zynqmp_disp_av_buf_get_fmt - Get the input formats
+ * @av_buf: av buffer manager
+ *
+ * Get the input formats (which include video and graphics) of
+ * av buffer manager.
+ *
+ * Return: value of ZYNQMP_DISP_AV_BUF_FMT register.
+ */
+static u32
+zynqmp_disp_av_buf_get_fmt(struct zynqmp_disp_av_buf *av_buf)
+{
+ return zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_FMT);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_live_fmt - Set the live_input format
+ * @av_buf: av buffer manager
+ * @fmt: format
+ * @is_vid: if it's for video layer
+ *
+ * Set the live input format to @fmt. @fmt should have valid values.
+ * @vid will determine if it's for video layer or graphics layer
+ * @fmt should be a valid hardware value.
+ */
+static void zynqmp_disp_av_buf_set_live_fmt(struct zynqmp_disp_av_buf *av_buf,
+ u32 fmt, bool is_vid)
+{
+ u32 offset;
+
+ if (is_vid)
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_VID_CONFIG;
+ else
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_GFX_CONFIG;
+
+ zynqmp_disp_write(av_buf->base, offset, fmt);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_vid_clock_src - Set the video clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the video clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void
+zynqmp_disp_av_buf_set_vid_clock_src(struct zynqmp_disp_av_buf *av_buf,
+ bool from_ps)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ if (from_ps)
+ reg |= ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_vid_clock_src_is_ps - if ps clock is used
+ * @av_buf: av buffer manager
+ *
+ * Return: if ps clock is used
+ */
+static bool
+zynqmp_disp_av_buf_vid_clock_src_is_ps(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ return !!(reg & ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_vid_timing_src - Set the video timing source
+ * @av_buf: av buffer manager
+ * @internal: flag if the video timing is generated internally
+ *
+ * Set the video timing source based on @internal. It can come externally or
+ * be generated internally.
+ */
+static void
+zynqmp_disp_av_buf_set_vid_timing_src(struct zynqmp_disp_av_buf *av_buf,
+ bool internal)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ if (internal)
+ reg |= ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_vid_timing_src_is_int - if internal timing is used
+ * @av_buf: av buffer manager
+ *
+ * Return: if the internal timing is used
+ */
+static bool
+zynqmp_disp_av_buf_vid_timing_src_is_int(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ return !!(reg & ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_aud_clock_src - Set the audio clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the audio clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void
+zynqmp_disp_av_buf_set_aud_clock_src(struct zynqmp_disp_av_buf *av_buf,
+ bool from_ps)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ if (from_ps)
+ reg |= ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable_buf - Enable buffers
+ * @av_buf: av buffer manager
+ *
+ * Enable all (video and audio) buffers.
+ */
+static void
+zynqmp_disp_av_buf_enable_buf(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = ZYNQMP_DISP_AV_BUF_CHBUF_EN;
+ reg |= ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MAX <<
+ ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_VID_GFX_BUFFERS; i++)
+ zynqmp_disp_write(av_buf->base,
+ ZYNQMP_DISP_AV_BUF_CHBUF + i * 4, reg);
+
+ reg = ZYNQMP_DISP_AV_BUF_CHBUF_EN;
+ reg |= ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_AUD_MAX <<
+ ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (; i < ZYNQMP_DISP_AV_BUF_NUM_BUFFERS; i++)
+ zynqmp_disp_write(av_buf->base,
+ ZYNQMP_DISP_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable_buf - Disable buffers
+ * @av_buf: av buffer manager
+ *
+ * Disable all (video and audio) buffers.
+ */
+static void
+zynqmp_disp_av_buf_disable_buf(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = ZYNQMP_DISP_AV_BUF_CHBUF_FLUSH & ~ZYNQMP_DISP_AV_BUF_CHBUF_EN;
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_BUFFERS; i++)
+ zynqmp_disp_write(av_buf->base,
+ ZYNQMP_DISP_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable_aud - Enable audio
+ * @av_buf: av buffer manager
+ *
+ * Enable all audio buffers.
+ */
+static void
+zynqmp_disp_av_buf_enable_aud(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MEM;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable - Enable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * De-assert the video pipe reset
+ */
+static void
+zynqmp_disp_av_buf_enable(struct zynqmp_disp_av_buf *av_buf)
+{
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_SRST_REG, 0);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable - Disable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * Assert the video pipe reset
+ */
+static void
+zynqmp_disp_av_buf_disable(struct zynqmp_disp_av_buf *av_buf)
+{
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_SRST_REG,
+ ZYNQMP_DISP_AV_BUF_SRST_REG_VID_RST);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable_aud - Disable audio
+ * @av_buf: av buffer manager
+ *
+ * Disable all audio buffers.
+ */
+static void
+zynqmp_disp_av_buf_disable_aud(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_DISABLE;
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_tpg - Set TPG mode
+ * @av_buf: av buffer manager
+ * @tpg_on: if TPG should be on
+ *
+ * Set the TPG mode based on @tpg_on.
+ */
+static void zynqmp_disp_av_buf_set_tpg(struct zynqmp_disp_av_buf *av_buf,
+ bool tpg_on)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
+ if (tpg_on)
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_PATTERN;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_PATTERN;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable_vid - Enable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to enable
+ * @mode: operation mode of layer
+ *
+ * Enable the video/graphics buffer for @layer.
+ */
+static void zynqmp_disp_av_buf_enable_vid(struct zynqmp_disp_av_buf *av_buf,
+ struct zynqmp_disp_layer *layer,
+ enum zynqmp_disp_layer_mode mode)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID) {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
+ if (mode == ZYNQMP_DISP_LAYER_NONLIVE)
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MEM;
+ else
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_LIVE;
+ } else {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM;
+ if (mode == ZYNQMP_DISP_LAYER_NONLIVE)
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM;
+ else
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_LIVE;
+ }
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable_vid - Disable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to disable
+ *
+ * Disable the video/graphics buffer for @layer.
+ */
+static void
+zynqmp_disp_av_buf_disable_vid(struct zynqmp_disp_av_buf *av_buf,
+ struct zynqmp_disp_layer *layer)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID) {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_NONE;
+ } else {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_DISABLE;
+ }
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_init_sf - Initialize scaling factors
+ * @av_buf: av buffer manager
+ * @vid_fmt: video format descriptor
+ * @gfx_fmt: graphics format descriptor
+ *
+ * Initialize scaling factors for both video and graphics layers.
+ * If the format descriptor is NULL, the function skips the programming.
+ */
+static void zynqmp_disp_av_buf_init_sf(struct zynqmp_disp_av_buf *av_buf,
+ const struct zynqmp_disp_fmt *vid_fmt,
+ const struct zynqmp_disp_fmt *gfx_fmt)
+{
+ unsigned int i;
+ u32 offset;
+
+ if (gfx_fmt) {
+ offset = ZYNQMP_DISP_AV_BUF_GFX_COMP0_SF;
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++)
+ zynqmp_disp_write(av_buf->base, offset + i * 4,
+ gfx_fmt->sf[i]);
+ }
+
+ if (vid_fmt) {
+ offset = ZYNQMP_DISP_AV_BUF_VID_COMP0_SF;
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++)
+ zynqmp_disp_write(av_buf->base, offset + i * 4,
+ vid_fmt->sf[i]);
+ }
+}
+
+/**
+ * zynqmp_disp_av_buf_init_live_sf - Initialize scaling factors for live source
+ * @av_buf: av buffer manager
+ * @fmt: format descriptor
+ * @is_vid: flag if this is for video layer
+ *
+ * Initialize scaling factors for live source.
+ */
+static void zynqmp_disp_av_buf_init_live_sf(struct zynqmp_disp_av_buf *av_buf,
+ const struct zynqmp_disp_fmt *fmt,
+ bool is_vid)
+{
+ unsigned int i;
+ u32 offset;
+
+ if (is_vid)
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP0_SF;
+ else
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP0_SF;
+
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++)
+ zynqmp_disp_write(av_buf->base, offset + i * 4,
+ fmt->sf[i]);
+}
+
+/*
+ * Audio functions
+ */
+
+/**
+ * zynqmp_disp_aud_init - Initialize the audio
+ * @aud: audio
+ *
+ * Initialize the audio with default mixer volume. The de-assertion will
+ * initialize the audio states.
+ */
+static void zynqmp_disp_aud_init(struct zynqmp_disp_aud *aud)
+{
+ /* Clear the audio soft reset register as it's an non-reset flop */
+ zynqmp_disp_write(aud->base, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
+ zynqmp_disp_write(aud->base, ZYNQMP_DISP_AUD_MIXER_VOLUME,
+ ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE);
+}
+
+/**
+ * zynqmp_disp_aud_deinit - De-initialize the audio
+ * @aud: audio
+ *
+ * Put the audio in reset.
+ */
+static void zynqmp_disp_aud_deinit(struct zynqmp_disp_aud *aud)
+{
+ zynqmp_disp_set(aud->base, ZYNQMP_DISP_AUD_SOFT_RESET,
+ ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
+}
+
+/*
+ * ZynqMP Display layer functions
+ */
+
+/**
+ * zynqmp_disp_layer_check_size - Verify width and height for the layer
+ * @disp: Display subsystem
+ * @layer: layer
+ * @width: width
+ * @height: height
+ *
+ * The Display subsystem has the limitation that both layers should have
+ * identical size. This function stores width and height of @layer, and verifies
+ * if the size (width and height) is valid.
+ *
+ * Return: 0 on success, or -EINVAL if width or/and height is invalid.
+ */
+static int zynqmp_disp_layer_check_size(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ u32 width, u32 height)
+{
+ struct zynqmp_disp_layer *other = layer->other;
+
+ if (other->enabled && (other->w != width || other->h != height)) {
+ dev_err(disp->dev, "Layer width:height must be %d:%d\n",
+ other->w, other->h);
+ return -EINVAL;
+ }
+
+ layer->w = width;
+ layer->h = height;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_map_fmt - Find the Display subsystem format for given drm format
+ * @fmts: format table to look up
+ * @size: size of the table @fmts
+ * @drm_fmt: DRM format to search
+ *
+ * Search a Display subsystem format corresponding to the given DRM format
+ * @drm_fmt, and return the format descriptor which contains the Display
+ * subsystem format value.
+ *
+ * Return: a Display subsystem format descriptor on success, or NULL.
+ */
+static const struct zynqmp_disp_fmt *
+zynqmp_disp_map_fmt(const struct zynqmp_disp_fmt fmts[],
+ unsigned int size, uint32_t drm_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (fmts[i].drm_fmt == drm_fmt)
+ return &fmts[i];
+
+ return NULL;
+}
+
+/**
+ * zynqmp_disp_set_fmt - Set the format of the layer
+ * @disp: Display subsystem
+ * @layer: layer to set the format
+ * @drm_fmt: DRM format to set
+ *
+ * Set the format of the given layer to @drm_fmt.
+ *
+ * Return: 0 on success. -EINVAL if @drm_fmt is not supported by the layer.
+ */
+static int zynqmp_disp_layer_set_fmt(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ uint32_t drm_fmt)
+{
+ const struct zynqmp_disp_fmt *fmt;
+ const struct zynqmp_disp_fmt *vid_fmt = NULL, *gfx_fmt = NULL;
+ u32 size, fmts, mask;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_VID) {
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ mask = ~ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK;
+ fmt = zynqmp_disp_map_fmt(av_buf_vid_fmts, size, drm_fmt);
+ vid_fmt = fmt;
+ } else {
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ mask = ~ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK;
+ fmt = zynqmp_disp_map_fmt(av_buf_gfx_fmts, size, drm_fmt);
+ gfx_fmt = fmt;
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmts = zynqmp_disp_av_buf_get_fmt(&disp->av_buf);
+ fmts &= mask;
+ fmts |= fmt->disp_fmt;
+ zynqmp_disp_av_buf_set_fmt(&disp->av_buf, fmts);
+ zynqmp_disp_av_buf_init_sf(&disp->av_buf, vid_fmt, gfx_fmt);
+ layer->fmt = fmt;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_map_live_fmt - Find the hardware format for given bus format
+ * @fmts: format table to look up
+ * @size: size of the table @fmts
+ * @bus_fmt: bus format to search
+ *
+ * Search a Display subsystem format corresponding to the given bus format
+ * @bus_fmt, and return the format descriptor which contains the Display
+ * subsystem format value.
+ *
+ * Return: a Display subsystem format descriptor on success, or NULL.
+ */
+static const struct zynqmp_disp_fmt *
+zynqmp_disp_map_live_fmt(const struct zynqmp_disp_fmt fmts[],
+ unsigned int size, uint32_t bus_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (fmts[i].bus_fmt == bus_fmt)
+ return &fmts[i];
+
+ return NULL;
+}
+
+/**
+ * zynqmp_disp_set_live_fmt - Set the live format of the layer
+ * @disp: Display subsystem
+ * @layer: layer to set the format
+ * @bus_fmt: bus format to set
+ *
+ * Set the live format of the given layer to @live_fmt.
+ *
+ * Return: 0 on success. -EINVAL if @bus_fmt is not supported by the layer.
+ */
+static int zynqmp_disp_layer_set_live_fmt(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ uint32_t bus_fmt)
+{
+ const struct zynqmp_disp_fmt *fmt;
+ u32 size;
+ bool is_vid = layer->id == ZYNQMP_DISP_LAYER_VID;
+
+ size = ARRAY_SIZE(av_buf_live_fmts);
+ fmt = zynqmp_disp_map_live_fmt(av_buf_live_fmts, size, bus_fmt);
+ if (!fmt)
+ return -EINVAL;
+
+ zynqmp_disp_av_buf_set_live_fmt(&disp->av_buf, fmt->disp_fmt, is_vid);
+ zynqmp_disp_av_buf_init_live_sf(&disp->av_buf, fmt, is_vid);
+ layer->fmt = fmt;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_set_tpg - Enable or disable TPG
+ * @disp: Display subsystem
+ * @layer: Video layer
+ * @tpg_on: flag if TPG needs to be enabled or disabled
+ *
+ * Enable / disable the TPG mode on the video layer @layer depending on
+ * @tpg_on. The video layer should be disabled prior to enable request.
+ *
+ * Return: 0 on success. -ENODEV if it's not video layer. -EIO if
+ * the video layer is enabled.
+ */
+static int zynqmp_disp_layer_set_tpg(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ bool tpg_on)
+{
+ if (layer->id != ZYNQMP_DISP_LAYER_VID) {
+ dev_err(disp->dev,
+ "only the video layer has the tpg mode\n");
+ return -ENODEV;
+ }
+
+ if (layer->enabled) {
+ dev_err(disp->dev,
+ "the video layer should be disabled for tpg mode\n");
+ return -EIO;
+ }
+
+ zynqmp_disp_blend_layer_coeff(&disp->blend, layer, tpg_on);
+ zynqmp_disp_av_buf_set_tpg(&disp->av_buf, tpg_on);
+ disp->tpg_on = tpg_on;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_get_tpg - Get the TPG mode status
+ * @disp: Display subsystem
+ * @layer: Video layer
+ *
+ * Return if the TPG is enabled or not.
+ *
+ * Return: true if TPG is on, otherwise false
+ */
+static bool zynqmp_disp_layer_get_tpg(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer)
+{
+ return disp->tpg_on;
+}
+
+/**
+ * zynqmp_disp_get_fmt - Get the supported DRM formats of the layer
+ * @disp: Display subsystem
+ * @layer: layer to get the formats
+ * @drm_fmts: pointer to array of DRM format strings
+ * @num_fmts: pointer to number of returned DRM formats
+ *
+ * Get the supported DRM formats of the given layer.
+ */
+static void zynqmp_disp_layer_get_fmts(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ u32 **drm_fmts, unsigned int *num_fmts)
+{
+ *drm_fmts = layer->drm_fmts;
+ *num_fmts = layer->num_fmts;
+}
+
+/**
+ * zynqmp_disp_layer_enable - Enable the layer
+ * @disp: Display subsystem
+ * @layer: layer to esable
+ * @mode: operation mode
+ *
+ * Enable the layer @layer.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+static int zynqmp_disp_layer_enable(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ enum zynqmp_disp_layer_mode mode)
+{
+ struct device *dev = disp->dev;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ unsigned int i;
+
+ if (layer->enabled && layer->mode != mode) {
+ dev_err(dev, "layer is already enabled in different mode\n");
+ return -EBUSY;
+ }
+
+ zynqmp_disp_av_buf_enable_vid(&disp->av_buf, layer, mode);
+ zynqmp_disp_blend_layer_enable(&disp->blend, layer);
+
+ layer->enabled = true;
+ layer->mode = mode;
+
+ if (mode == ZYNQMP_DISP_LAYER_LIVE)
+ return 0;
+
+ for (i = 0; i < ZYNQMP_DISP_MAX_NUM_SUB_PLANES; i++) {
+ struct zynqmp_disp_layer_dma *dma = &layer->dma[i];
+
+ if (dma->chan && dma->is_active) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma->chan,
+ &dma->xt, flags);
+ if (!desc) {
+ dev_err(dev, "failed to prep DMA descriptor\n");
+ return -ENOMEM;
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_layer_disable - Disable the layer
+ * @disp: Display subsystem
+ * @layer: layer to disable
+ * @mode: operation mode
+ *
+ * Disable the layer @layer.
+ *
+ * Return: 0 on success, or -EBUSY if the layer is in different mode.
+ */
+static int zynqmp_disp_layer_disable(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ enum zynqmp_disp_layer_mode mode)
+{
+ struct device *dev = disp->dev;
+ unsigned int i;
+
+ if (layer->mode != mode) {
+ dev_err(dev, "the layer is operating in different mode\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < ZYNQMP_DISP_MAX_NUM_SUB_PLANES; i++)
+ if (layer->dma[i].chan && layer->dma[i].is_active)
+ dmaengine_terminate_sync(layer->dma[i].chan);
+
+ zynqmp_disp_av_buf_disable_vid(&disp->av_buf, layer);
+ zynqmp_disp_blend_layer_disable(&disp->blend, layer);
+ layer->enabled = false;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_layer_request_dma - Request DMA channels for a layer
+ * @disp: Display subsystem
+ * @layer: layer to request DMA channels
+ * @name: identifier string for layer type
+ *
+ * Request DMA engine channels for corresponding layer.
+ *
+ * Return: 0 on success, or err value from of_dma_request_slave_channel().
+ */
+static int
+zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer, const char *name)
+{
+ struct zynqmp_disp_layer_dma *dma;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < layer->num_chan; i++) {
+ char temp[16];
+
+ dma = &layer->dma[i];
+ snprintf(temp, sizeof(temp), "%s%d", name, i);
+ dma->chan = of_dma_request_slave_channel(layer->of_node,
+ temp);
+ if (IS_ERR(dma->chan)) {
+ dev_err(disp->dev, "failed to request dma channel\n");
+ ret = PTR_ERR(dma->chan);
+ dma->chan = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_layer_release_dma - Release DMA channels for a layer
+ * @disp: Display subsystem
+ * @layer: layer to release DMA channels
+ *
+ * Release the dma channels associated with @layer.
+ */
+static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer)
+{
+ unsigned int i;
+
+ for (i = 0; i < layer->num_chan; i++) {
+ if (layer->dma[i].chan) {
+ /* Make sure the channel is terminated before release */
+ dmaengine_terminate_all(layer->dma[i].chan);
+ dma_release_channel(layer->dma[i].chan);
+ }
+ }
+}
+
+/**
+ * zynqmp_disp_layer_is_live - if any layer is live
+ * @disp: Display subsystem
+ *
+ * Return: true if any layer is live
+ */
+static bool zynqmp_disp_layer_is_live(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ if (disp->layers[i].enabled &&
+ disp->layers[i].mode == ZYNQMP_DISP_LAYER_LIVE)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * zynqmp_disp_layer_is_enabled - if any layer is enabled
+ * @disp: Display subsystem
+ *
+ * Return: true if any layer is enabled
+ */
+static bool zynqmp_disp_layer_is_enabled(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ if (disp->layers[i].enabled)
+ return true;
+
+ return false;
+}
+
+/**
+ * zynqmp_disp_layer_destroy - Destroy all layers
+ * @disp: Display subsystem
+ *
+ * Destroy all layers.
+ */
+static void zynqmp_disp_layer_destroy(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ zynqmp_disp_layer_release_dma(disp, &disp->layers[i]);
+ if (disp->layers[i].of_node)
+ of_node_put(disp->layers[i].of_node);
+ }
+}
+
+/**
+ * zynqmp_disp_layer_create - Create all layers
+ * @disp: Display subsystem
+ *
+ * Create all layers.
+ *
+ * Return: 0 on success, otherwise error code from failed function
+ */
+static int zynqmp_disp_layer_create(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ unsigned int i;
+ int num_chans[ZYNQMP_DISP_NUM_LAYERS] = { 3, 1 };
+ const char * const dma_name[] = { "vid", "gfx" };
+ int ret;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ char temp[16];
+
+ layer = &disp->layers[i];
+ layer->id = i;
+ layer->offset = i * 4;
+ layer->other = &disp->layers[!i];
+ layer->num_chan = num_chans[i];
+ snprintf(temp, sizeof(temp), "%s-layer", dma_name[i]);
+ layer->of_node = of_get_child_by_name(disp->dev->of_node, temp);
+ if (!layer->of_node)
+ goto err;
+ ret = zynqmp_disp_layer_request_dma(disp, layer, dma_name[i]);
+ if (ret)
+ goto err;
+ layer->disp = disp;
+ }
+
+ return 0;
+
+err:
+ zynqmp_disp_layer_destroy(disp);
+ return ret;
+}
+
+/*
+ * ZynqMP Display internal functions
+ */
+
+/*
+ * Output format enumeration.
+ * The ID should be aligned with blend_output_fmts.
+ * The string should be aligned with how zynqmp_dp_set_color() decodes.
+ */
+static struct drm_prop_enum_list zynqmp_disp_color_enum[] = {
+ { 0, "rgb" },
+ { 1, "ycrcb444" },
+ { 2, "ycrcb422" },
+ { 3, "yonly" },
+};
+
+/**
+ * zynqmp_disp_set_output_fmt - Set the output format
+ * @disp: Display subsystem
+ * @id: the format ID. Refer to zynqmp_disp_color_enum[].
+ *
+ * This function sets the output format of the display / blender as well as
+ * the format of DP controller. The @id should be aligned with
+ * zynqmp_disp_color_enum.
+ */
+static void
+zynqmp_disp_set_output_fmt(struct zynqmp_disp *disp, unsigned int id)
+{
+ const struct zynqmp_disp_fmt *fmt = &blend_output_fmts[id];
+
+ zynqmp_dp_set_color(disp->dpsub->dp, zynqmp_disp_color_enum[id].name);
+ zynqmp_disp_blend_set_output_fmt(&disp->blend, fmt->disp_fmt);
+}
+
+/**
+ * zynqmp_disp_set_bg_color - Set the background color
+ * @disp: Display subsystem
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color with given color components (@c0, @c1, @c2).
+ */
+static void zynqmp_disp_set_bg_color(struct zynqmp_disp *disp,
+ u32 c0, u32 c1, u32 c2)
+{
+ zynqmp_disp_blend_set_bg_color(&disp->blend, c0, c1, c2);
+}
+
+/**
+ * zynqmp_disp_set_alpha - Set the alpha value
+ * @disp: Display subsystem
+ * @alpha: alpha value to set
+ *
+ * Set the alpha value for blending.
+ */
+static void zynqmp_disp_set_alpha(struct zynqmp_disp *disp, u32 alpha)
+{
+ disp->alpha = alpha;
+ zynqmp_disp_blend_set_alpha(&disp->blend, alpha);
+}
+
+/**
+ * zynqmp_disp_get_alpha - Get the alpha value
+ * @disp: Display subsystem
+ *
+ * Get the alpha value for blending.
+ *
+ * Return: current alpha value.
+ */
+static u32 zynqmp_disp_get_alpha(struct zynqmp_disp *disp)
+{
+ return disp->alpha;
+}
+
+/**
+ * zynqmp_disp_set_g_alpha - Enable/disable the global alpha blending
+ * @disp: Display subsystem
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Set the alpha value for blending.
+ */
+static void zynqmp_disp_set_g_alpha(struct zynqmp_disp *disp, bool enable)
+{
+ disp->alpha_en = enable;
+ zynqmp_disp_blend_enable_alpha(&disp->blend, enable);
+}
+
+/**
+ * zynqmp_disp_get_g_alpha - Get the global alpha status
+ * @disp: Display subsystem
+ *
+ * Get the global alpha statue.
+ *
+ * Return: true if global alpha is enabled, or false.
+ */
+static bool zynqmp_disp_get_g_alpha(struct zynqmp_disp *disp)
+{
+ return disp->alpha_en;
+}
+
+/**
+ * zynqmp_disp_enable - Enable the Display subsystem
+ * @disp: Display subsystem
+ *
+ * Enable the Display subsystem.
+ */
+static void zynqmp_disp_enable(struct zynqmp_disp *disp)
+{
+ bool live;
+
+ if (disp->enabled)
+ return;
+
+ zynqmp_disp_av_buf_enable(&disp->av_buf);
+ /* Choose clock source based on the DT clock handle */
+ zynqmp_disp_av_buf_set_vid_clock_src(&disp->av_buf, !!disp->_ps_pclk);
+ zynqmp_disp_av_buf_set_aud_clock_src(&disp->av_buf, !!disp->_ps_audclk);
+ live = zynqmp_disp_layer_is_live(disp);
+ zynqmp_disp_av_buf_set_vid_timing_src(&disp->av_buf, !live);
+ zynqmp_disp_av_buf_enable_buf(&disp->av_buf);
+ zynqmp_disp_av_buf_enable_aud(&disp->av_buf);
+ zynqmp_disp_aud_init(&disp->aud);
+ disp->enabled = true;
+}
+
+/**
+ * zynqmp_disp_disable - Disable the Display subsystem
+ * @disp: Display subsystem
+ * @force: flag to disable forcefully
+ *
+ * Disable the Display subsystem.
+ */
+static void zynqmp_disp_disable(struct zynqmp_disp *disp, bool force)
+{
+ struct drm_crtc *crtc = &disp->xlnx_crtc.crtc;
+
+ if (!force && (!disp->enabled || zynqmp_disp_layer_is_enabled(disp)))
+ return;
+
+ zynqmp_disp_aud_deinit(&disp->aud);
+ zynqmp_disp_av_buf_disable_aud(&disp->av_buf);
+ zynqmp_disp_av_buf_disable_buf(&disp->av_buf);
+ zynqmp_disp_av_buf_disable(&disp->av_buf);
+
+ /* Mark the flip is done as crtc is disabled anyway */
+ if (crtc->state->event) {
+ complete_all(crtc->state->event->base.completion);
+ crtc->state->event = NULL;
+ }
+
+ disp->enabled = false;
+}
+
+/**
+ * zynqmp_disp_init - Initialize the Display subsystem states
+ * @disp: Display subsystem
+ *
+ * Some states are not initialized as desired. For example, the output select
+ * register resets to the live source. This function is to initialize
+ * some register states as desired.
+ */
+static void zynqmp_disp_init(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ layer = &disp->layers[i];
+ zynqmp_disp_av_buf_disable_vid(&disp->av_buf, layer);
+ }
+}
+
+/*
+ * ZynqMP Display external functions for zynqmp_dp
+ */
+
+/**
+ * zynqmp_disp_handle_vblank - Handle the vblank event
+ * @disp: Display subsystem
+ *
+ * This function handles the vblank interrupt, and sends an event to
+ * CRTC object. This will be called by the DP vblank interrupt handler.
+ */
+void zynqmp_disp_handle_vblank(struct zynqmp_disp *disp)
+{
+ struct drm_crtc *crtc = &disp->xlnx_crtc.crtc;
+
+ drm_crtc_handle_vblank(crtc);
+}
+
+/**
+ * zynqmp_disp_get_apb_clk_rate - Get the current APB clock rate
+ * @disp: Display subsystem
+ *
+ * Return: the current APB clock rate.
+ */
+unsigned int zynqmp_disp_get_apb_clk_rate(struct zynqmp_disp *disp)
+{
+ return clk_get_rate(disp->aclk);
+}
+
+/**
+ * zynqmp_disp_aud_enabled - If the audio is enabled
+ * @disp: Display subsystem
+ *
+ * Return if the audio is enabled depending on the audio clock.
+ *
+ * Return: true if audio is enabled, or false.
+ */
+bool zynqmp_disp_aud_enabled(struct zynqmp_disp *disp)
+{
+ return !!disp->audclk;
+}
+
+/**
+ * zynqmp_disp_get_aud_clk_rate - Get the current audio clock rate
+ * @disp: Display subsystem
+ *
+ * Return: the current audio clock rate.
+ */
+unsigned int zynqmp_disp_get_aud_clk_rate(struct zynqmp_disp *disp)
+{
+ if (zynqmp_disp_aud_enabled(disp))
+ return 0;
+ return clk_get_rate(disp->aclk);
+}
+
+/**
+ * zynqmp_disp_get_crtc_mask - Return the CRTC bit mask
+ * @disp: Display subsystem
+ *
+ * Return: the crtc mask of the zyqnmp_disp CRTC.
+ */
+uint32_t zynqmp_disp_get_crtc_mask(struct zynqmp_disp *disp)
+{
+ return drm_crtc_mask(&disp->xlnx_crtc.crtc);
+}
+
+/*
+ * Xlnx bridge functions
+ */
+
+static inline struct zynqmp_disp_layer
+*bridge_to_layer(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct zynqmp_disp_layer, bridge);
+}
+
+static int zynqmp_disp_bridge_enable(struct xlnx_bridge *bridge)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret;
+
+ if (!disp->_pl_pclk) {
+ dev_err(disp->dev, "PL clock is required for live\n");
+ return -ENODEV;
+ }
+
+ ret = zynqmp_disp_layer_check_size(disp, layer, layer->w, layer->h);
+ if (ret)
+ return ret;
+
+ zynqmp_disp_set_g_alpha(disp, disp->alpha_en);
+ zynqmp_disp_set_alpha(disp, disp->alpha);
+ ret = zynqmp_disp_layer_enable(layer->disp, layer,
+ ZYNQMP_DISP_LAYER_LIVE);
+ if (ret)
+ return ret;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_GFX && disp->tpg_on) {
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+ }
+
+ if (zynqmp_disp_av_buf_vid_timing_src_is_int(&disp->av_buf) ||
+ zynqmp_disp_av_buf_vid_clock_src_is_ps(&disp->av_buf)) {
+ dev_info(disp->dev,
+ "Disabling the pipeline to change the clk/timing src");
+ zynqmp_disp_disable(disp, true);
+ zynqmp_disp_av_buf_set_vid_clock_src(&disp->av_buf, false);
+ zynqmp_disp_av_buf_set_vid_timing_src(&disp->av_buf, false);
+ }
+
+ zynqmp_disp_enable(disp);
+
+ return 0;
+}
+
+static void zynqmp_disp_bridge_disable(struct xlnx_bridge *bridge)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+ struct zynqmp_disp *disp = layer->disp;
+
+ zynqmp_disp_disable(disp, false);
+
+ zynqmp_disp_layer_disable(disp, layer, ZYNQMP_DISP_LAYER_LIVE);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID && disp->tpg_on)
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+
+ if (!zynqmp_disp_layer_is_live(disp)) {
+ dev_info(disp->dev,
+ "Disabling the pipeline to change the clk/timing src");
+ zynqmp_disp_disable(disp, true);
+ zynqmp_disp_av_buf_set_vid_clock_src(&disp->av_buf, true);
+ zynqmp_disp_av_buf_set_vid_timing_src(&disp->av_buf, true);
+ if (zynqmp_disp_layer_is_enabled(disp))
+ zynqmp_disp_enable(disp);
+ }
+}
+
+static int zynqmp_disp_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+ int ret;
+
+ ret = zynqmp_disp_layer_check_size(layer->disp, layer, width, height);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_disp_layer_set_live_fmt(layer->disp, layer, bus_fmt);
+ if (ret)
+ dev_err(layer->disp->dev, "failed to set live fmt\n");
+
+ return ret;
+}
+
+static int zynqmp_disp_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+
+ *fmts = layer->bus_fmts;
+ *count = layer->num_bus_fmts;
+
+ return 0;
+}
+
+/*
+ * DRM plane functions
+ */
+
+static inline struct zynqmp_disp_layer *plane_to_layer(struct drm_plane *plane)
+{
+ return container_of(plane, struct zynqmp_disp_layer, plane);
+}
+
+static int zynqmp_disp_plane_enable(struct drm_plane *plane)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret;
+
+ zynqmp_disp_set_g_alpha(disp, disp->alpha_en);
+ zynqmp_disp_set_alpha(disp, disp->alpha);
+ ret = zynqmp_disp_layer_enable(layer->disp, layer,
+ ZYNQMP_DISP_LAYER_NONLIVE);
+ if (ret)
+ return ret;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_GFX && disp->tpg_on) {
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+ }
+
+ return 0;
+}
+
+static int zynqmp_disp_plane_disable(struct drm_plane *plane)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+
+ zynqmp_disp_layer_disable(disp, layer, ZYNQMP_DISP_LAYER_NONLIVE);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID && disp->tpg_on)
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+
+ return 0;
+}
+
+static int zynqmp_disp_plane_mode_set(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ const struct drm_format_info *info = fb->format;
+ struct device *dev = layer->disp->dev;
+ dma_addr_t paddr;
+ unsigned int i;
+ int ret;
+
+ if (!info) {
+ dev_err(dev, "No format info found\n");
+ return -EINVAL;
+ }
+
+ ret = zynqmp_disp_layer_check_size(layer->disp, layer, src_w, src_h);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = src_w / (i ? info->hsub : 1);
+ unsigned int height = src_h / (i ? info->vsub : 1);
+ int width_bytes;
+
+ paddr = drm_fb_cma_get_gem_addr(fb, plane->state, i);
+ if (!paddr) {
+ dev_err(dev, "failed to get a paddr\n");
+ return -EINVAL;
+ }
+
+ layer->dma[i].xt.numf = height;
+ width_bytes = drm_format_plane_width_bytes(info, i, width);
+ layer->dma[i].sgl[0].size = width_bytes;
+ layer->dma[i].sgl[0].icg = fb->pitches[i] -
+ layer->dma[i].sgl[0].size;
+ layer->dma[i].xt.src_start = paddr;
+ layer->dma[i].xt.frame_size = 1;
+ layer->dma[i].xt.dir = DMA_MEM_TO_DEV;
+ layer->dma[i].xt.src_sgl = true;
+ layer->dma[i].xt.dst_sgl = false;
+ layer->dma[i].is_active = true;
+ }
+
+ for (; i < ZYNQMP_DISP_MAX_NUM_SUB_PLANES; i++)
+ layer->dma[i].is_active = false;
+
+ ret = zynqmp_disp_layer_set_fmt(layer->disp, layer, info->format);
+ if (ret)
+ dev_err(dev, "failed to set dp_sub layer fmt\n");
+
+ return ret;
+}
+
+static void zynqmp_disp_plane_destroy(struct drm_plane *plane)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+
+ xlnx_bridge_unregister(&layer->bridge);
+ drm_plane_cleanup(plane);
+}
+
+static int
+zynqmp_disp_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property, u64 val)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret = 0;
+
+ if (property == disp->g_alpha_prop)
+ zynqmp_disp_set_alpha(disp, val);
+ else if (property == disp->g_alpha_en_prop)
+ zynqmp_disp_set_g_alpha(disp, val);
+ else if (property == disp->tpg_prop)
+ ret = zynqmp_disp_layer_set_tpg(disp, layer, val);
+ else
+ return -EINVAL;
+
+ return ret;
+}
+
+static int
+zynqmp_disp_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret = 0;
+
+ if (property == disp->g_alpha_prop)
+ *val = zynqmp_disp_get_alpha(disp);
+ else if (property == disp->g_alpha_en_prop)
+ *val = zynqmp_disp_get_g_alpha(disp);
+ else if (property == disp->tpg_prop)
+ *val = zynqmp_disp_layer_get_tpg(disp, layer);
+ else
+ return -EINVAL;
+
+ return ret;
+}
+
+static int
+zynqmp_disp_plane_atomic_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_w = crtc_w;
+ plane_state->crtc_h = crtc_h;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_w = src_w;
+ plane_state->src_h = src_h;
+
+ if (plane == crtc->cursor)
+ state->legacy_cursor_update = true;
+
+ /* Do async-update if possible */
+ state->async_update = !drm_atomic_helper_async_check(plane->dev, state);
+ ret = drm_atomic_commit(state);
+fail:
+ drm_atomic_state_put(state);
+ return ret;
+}
+
+static struct drm_plane_funcs zynqmp_disp_plane_funcs = {
+ .update_plane = zynqmp_disp_plane_atomic_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .atomic_set_property = zynqmp_disp_plane_atomic_set_property,
+ .atomic_get_property = zynqmp_disp_plane_atomic_get_property,
+ .destroy = zynqmp_disp_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static void
+zynqmp_disp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ int ret;
+
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ if (plane->state->fb == old_state->fb)
+ return;
+
+ if (old_state->fb &&
+ old_state->fb->format->format != plane->state->fb->format->format)
+ zynqmp_disp_plane_disable(plane);
+
+ ret = zynqmp_disp_plane_mode_set(plane, plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret)
+ return;
+
+ zynqmp_disp_plane_enable(plane);
+}
+
+static void
+zynqmp_disp_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ zynqmp_disp_plane_disable(plane);
+}
+
+static int zynqmp_disp_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void
+zynqmp_disp_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ int ret;
+
+ if (plane->state->fb == new_state->fb)
+ return;
+
+ if (plane->state->fb &&
+ plane->state->fb->format->format != new_state->fb->format->format)
+ zynqmp_disp_plane_disable(plane);
+
+ /* Update the current state with new configurations */
+ drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+ plane->state->crtc = new_state->crtc;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->state = new_state->state;
+
+ ret = zynqmp_disp_plane_mode_set(plane, plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret)
+ return;
+
+ zynqmp_disp_plane_enable(plane);
+}
+
+static const struct drm_plane_helper_funcs zynqmp_disp_plane_helper_funcs = {
+ .atomic_update = zynqmp_disp_plane_atomic_update,
+ .atomic_disable = zynqmp_disp_plane_atomic_disable,
+ .atomic_async_check = zynqmp_disp_plane_atomic_async_check,
+ .atomic_async_update = zynqmp_disp_plane_atomic_async_update,
+};
+
+static int zynqmp_disp_create_plane(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ unsigned int i;
+ u32 *fmts = NULL;
+ unsigned int num_fmts = 0;
+ enum drm_plane_type type;
+ int ret;
+
+ /* graphics layer is primary, and video layer is overaly */
+ type = DRM_PLANE_TYPE_OVERLAY;
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ layer = &disp->layers[i];
+ zynqmp_disp_layer_get_fmts(disp, layer, &fmts, &num_fmts);
+ ret = drm_universal_plane_init(disp->drm, &layer->plane, 0,
+ &zynqmp_disp_plane_funcs, fmts,
+ num_fmts, NULL, type, NULL);
+ if (ret)
+ goto err_plane;
+ drm_plane_helper_add(&layer->plane,
+ &zynqmp_disp_plane_helper_funcs);
+ type = DRM_PLANE_TYPE_PRIMARY;
+ }
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ layer = &disp->layers[i];
+ layer->bridge.enable = &zynqmp_disp_bridge_enable;
+ layer->bridge.disable = &zynqmp_disp_bridge_disable;
+ layer->bridge.set_input = &zynqmp_disp_bridge_set_input;
+ layer->bridge.get_input_fmts =
+ &zynqmp_disp_bridge_get_input_fmts;
+ layer->bridge.of_node = layer->of_node;
+ xlnx_bridge_register(&layer->bridge);
+ }
+
+ /* Attach properties to each layers */
+ drm_object_attach_property(&layer->plane.base, disp->g_alpha_prop,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX);
+ disp->alpha = ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX;
+ /* Enable the global alpha as default */
+ drm_object_attach_property(&layer->plane.base, disp->g_alpha_en_prop,
+ true);
+ disp->alpha_en = true;
+
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ drm_object_attach_property(&layer->plane.base, disp->tpg_prop, false);
+
+ return ret;
+
+err_plane:
+ if (i)
+ drm_plane_cleanup(&disp->layers[0].plane);
+ return ret;
+}
+
+static void zynqmp_disp_destroy_plane(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ zynqmp_disp_plane_destroy(&disp->layers[i].plane);
+}
+
+/*
+ * Xlnx crtc functions
+ */
+
+static inline struct zynqmp_disp *xlnx_crtc_to_disp(struct xlnx_crtc *xlnx_crtc)
+{
+ return container_of(xlnx_crtc, struct zynqmp_disp, xlnx_crtc);
+}
+
+static int zynqmp_disp_get_max_width(struct xlnx_crtc *xlnx_crtc)
+{
+ return ZYNQMP_DISP_MAX_WIDTH;
+}
+
+static int zynqmp_disp_get_max_height(struct xlnx_crtc *xlnx_crtc)
+{
+ return ZYNQMP_DISP_MAX_HEIGHT;
+}
+
+static uint32_t zynqmp_disp_get_format(struct xlnx_crtc *xlnx_crtc)
+{
+ struct zynqmp_disp *disp = xlnx_crtc_to_disp(xlnx_crtc);
+
+ return disp->layers[ZYNQMP_DISP_LAYER_GFX].fmt->drm_fmt;
+}
+
+static unsigned int zynqmp_disp_get_align(struct xlnx_crtc *xlnx_crtc)
+{
+ struct zynqmp_disp *disp = xlnx_crtc_to_disp(xlnx_crtc);
+ struct zynqmp_disp_layer *layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+
+ return 1 << layer->dma->chan->device->copy_align;
+}
+
+static u64 zynqmp_disp_get_dma_mask(struct xlnx_crtc *xlnx_crtc)
+{
+ return DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT);
+}
+
+/*
+ * DRM crtc functions
+ */
+
+static inline struct zynqmp_disp *crtc_to_disp(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+
+ return xlnx_crtc_to_disp(xlnx_crtc);
+}
+
+static int zynqmp_disp_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+ unsigned long rate;
+ long diff;
+ int ret;
+
+ zynqmp_disp_clk_disable(disp->pclk, &disp->pclk_en);
+ ret = clk_set_rate(disp->pclk, adjusted_mode->clock * 1000);
+ if (ret) {
+ dev_err(disp->dev, "failed to set a pixel clock\n");
+ return ret;
+ }
+
+ rate = clk_get_rate(disp->pclk);
+ diff = rate - adjusted_mode->clock * 1000;
+ if (abs(diff) > (adjusted_mode->clock * 1000) / 20) {
+ dev_info(disp->dev, "request pixel rate: %d actual rate: %lu\n",
+ adjusted_mode->clock, rate);
+ } else {
+ dev_dbg(disp->dev, "request pixel rate: %d actual rate: %lu\n",
+ adjusted_mode->clock, rate);
+ }
+
+ /* The timing register should be programmed always */
+ zynqmp_dp_encoder_mode_set_stream(disp->dpsub->dp, adjusted_mode);
+
+ return 0;
+}
+
+static void
+zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ int ret, vrefresh;
+
+ zynqmp_disp_crtc_mode_set(crtc, &crtc->state->mode,
+ adjusted_mode, crtc->x, crtc->y, NULL);
+
+ pm_runtime_get_sync(disp->dev);
+ ret = zynqmp_disp_clk_enable(disp->pclk, &disp->pclk_en);
+ if (ret) {
+ dev_err(disp->dev, "failed to enable a pixel clock\n");
+ return;
+ }
+ zynqmp_disp_set_output_fmt(disp, disp->color);
+ zynqmp_disp_set_bg_color(disp, disp->bg_c0, disp->bg_c1, disp->bg_c2);
+ zynqmp_disp_enable(disp);
+ /* Delay of 3 vblank intervals for timing gen to be stable */
+ vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+ msleep(3 * 1000 / vrefresh);
+}
+
+static void
+zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ zynqmp_disp_clk_disable(disp->pclk, &disp->pclk_en);
+ zynqmp_disp_plane_disable(crtc->primary);
+ zynqmp_disp_disable(disp, true);
+ drm_crtc_vblank_off(crtc);
+ pm_runtime_put_sync(disp->dev);
+}
+
+static int zynqmp_disp_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static void
+zynqmp_disp_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ drm_crtc_vblank_on(crtc);
+ /* Don't rely on vblank when disabling crtc */
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ /* Consume the flip_done event from atomic helper */
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static struct drm_crtc_helper_funcs zynqmp_disp_crtc_helper_funcs = {
+ .atomic_enable = zynqmp_disp_crtc_atomic_enable,
+ .atomic_disable = zynqmp_disp_crtc_atomic_disable,
+ .atomic_check = zynqmp_disp_crtc_atomic_check,
+ .atomic_begin = zynqmp_disp_crtc_atomic_begin,
+};
+
+static void zynqmp_disp_crtc_destroy(struct drm_crtc *crtc)
+{
+ zynqmp_disp_crtc_atomic_disable(crtc, NULL);
+ drm_crtc_cleanup(crtc);
+}
+
+static int zynqmp_disp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ zynqmp_dp_enable_vblank(disp->dpsub->dp);
+
+ return 0;
+}
+
+static void zynqmp_disp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ zynqmp_dp_disable_vblank(disp->dpsub->dp);
+}
+
+static int
+zynqmp_disp_crtc_atomic_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ /*
+ * CRTC prop values are just stored here and applied when CRTC gets
+ * enabled
+ */
+ if (property == disp->color_prop)
+ disp->color = val;
+ else if (property == disp->bg_c0_prop)
+ disp->bg_c0 = val;
+ else if (property == disp->bg_c1_prop)
+ disp->bg_c1 = val;
+ else if (property == disp->bg_c2_prop)
+ disp->bg_c2 = val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+zynqmp_disp_crtc_atomic_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ if (property == disp->color_prop)
+ *val = disp->color;
+ else if (property == disp->bg_c0_prop)
+ *val = disp->bg_c0;
+ else if (property == disp->bg_c1_prop)
+ *val = disp->bg_c1;
+ else if (property == disp->bg_c2_prop)
+ *val = disp->bg_c2;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct drm_crtc_funcs zynqmp_disp_crtc_funcs = {
+ .destroy = zynqmp_disp_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_set_property = zynqmp_disp_crtc_atomic_set_property,
+ .atomic_get_property = zynqmp_disp_crtc_atomic_get_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = zynqmp_disp_crtc_enable_vblank,
+ .disable_vblank = zynqmp_disp_crtc_disable_vblank,
+};
+
+static void zynqmp_disp_create_crtc(struct zynqmp_disp *disp)
+{
+ struct drm_plane *plane = &disp->layers[ZYNQMP_DISP_LAYER_GFX].plane;
+ struct drm_mode_object *obj = &disp->xlnx_crtc.crtc.base;
+ int ret;
+
+ ret = drm_crtc_init_with_planes(disp->drm, &disp->xlnx_crtc.crtc, plane,
+ NULL, &zynqmp_disp_crtc_funcs, NULL);
+ drm_crtc_helper_add(&disp->xlnx_crtc.crtc,
+ &zynqmp_disp_crtc_helper_funcs);
+ drm_object_attach_property(obj, disp->color_prop, 0);
+ zynqmp_dp_set_color(disp->dpsub->dp, zynqmp_disp_color_enum[0].name);
+ drm_object_attach_property(obj, disp->bg_c0_prop, 0);
+ drm_object_attach_property(obj, disp->bg_c1_prop, 0);
+ drm_object_attach_property(obj, disp->bg_c2_prop, 0);
+
+ disp->xlnx_crtc.get_max_width = &zynqmp_disp_get_max_width;
+ disp->xlnx_crtc.get_max_height = &zynqmp_disp_get_max_height;
+ disp->xlnx_crtc.get_format = &zynqmp_disp_get_format;
+ disp->xlnx_crtc.get_align = &zynqmp_disp_get_align;
+ disp->xlnx_crtc.get_dma_mask = &zynqmp_disp_get_dma_mask;
+ xlnx_crtc_register(disp->drm, &disp->xlnx_crtc);
+}
+
+static void zynqmp_disp_destroy_crtc(struct zynqmp_disp *disp)
+{
+ xlnx_crtc_unregister(disp->drm, &disp->xlnx_crtc);
+ zynqmp_disp_crtc_destroy(&disp->xlnx_crtc.crtc);
+}
+
+static void zynqmp_disp_map_crtc_to_plane(struct zynqmp_disp *disp)
+{
+ u32 possible_crtcs = drm_crtc_mask(&disp->xlnx_crtc.crtc);
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ disp->layers[i].plane.possible_crtcs = possible_crtcs;
+}
+
+/*
+ * Component functions
+ */
+
+int zynqmp_disp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_disp *disp = dpsub->disp;
+ struct drm_device *drm = data;
+ int num;
+ u64 max;
+ int ret;
+
+ disp->drm = drm;
+
+ max = ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX;
+ disp->g_alpha_prop = drm_property_create_range(drm, 0, "alpha", 0, max);
+ disp->g_alpha_en_prop = drm_property_create_bool(drm, 0,
+ "g_alpha_en");
+ num = ARRAY_SIZE(zynqmp_disp_color_enum);
+ disp->color_prop = drm_property_create_enum(drm, 0,
+ "output_color",
+ zynqmp_disp_color_enum,
+ num);
+ max = ZYNQMP_DISP_V_BLEND_BG_MAX;
+ disp->bg_c0_prop = drm_property_create_range(drm, 0, "bg_c0", 0, max);
+ disp->bg_c1_prop = drm_property_create_range(drm, 0, "bg_c1", 0, max);
+ disp->bg_c2_prop = drm_property_create_range(drm, 0, "bg_c2", 0, max);
+ disp->tpg_prop = drm_property_create_bool(drm, 0, "tpg");
+
+ ret = zynqmp_disp_create_plane(disp);
+ if (ret)
+ return ret;
+ zynqmp_disp_create_crtc(disp);
+ zynqmp_disp_map_crtc_to_plane(disp);
+
+ return 0;
+}
+
+void zynqmp_disp_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_disp *disp = dpsub->disp;
+
+ zynqmp_disp_destroy_crtc(disp);
+ zynqmp_disp_destroy_plane(disp);
+ drm_property_destroy(disp->drm, disp->bg_c2_prop);
+ drm_property_destroy(disp->drm, disp->bg_c1_prop);
+ drm_property_destroy(disp->drm, disp->bg_c0_prop);
+ drm_property_destroy(disp->drm, disp->color_prop);
+ drm_property_destroy(disp->drm, disp->g_alpha_en_prop);
+ drm_property_destroy(disp->drm, disp->g_alpha_prop);
+}
+
+/*
+ * Platform initialization functions
+ */
+
+static int zynqmp_disp_enumerate_fmts(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ u32 *bus_fmts;
+ u32 i, size, num_bus_fmts;
+ u32 gfx_fmt = ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565;
+
+ num_bus_fmts = ARRAY_SIZE(av_buf_live_fmts);
+ bus_fmts = devm_kzalloc(disp->dev, sizeof(*bus_fmts) * num_bus_fmts,
+ GFP_KERNEL);
+ if (!bus_fmts)
+ return -ENOMEM;
+ for (i = 0; i < num_bus_fmts; i++)
+ bus_fmts[i] = av_buf_live_fmts[i].bus_fmt;
+
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ layer->num_bus_fmts = num_bus_fmts;
+ layer->bus_fmts = bus_fmts;
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(disp->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+ for (i = 0; i < layer->num_fmts; i++)
+ layer->drm_fmts[i] = av_buf_vid_fmts[i].drm_fmt;
+ layer->fmt = &av_buf_vid_fmts[ZYNQMP_DISP_AV_BUF_VID_FMT_YUYV];
+
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_GFX];
+ layer->num_bus_fmts = num_bus_fmts;
+ layer->bus_fmts = bus_fmts;
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(disp->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+
+ for (i = 0; i < layer->num_fmts; i++)
+ layer->drm_fmts[i] = av_buf_gfx_fmts[i].drm_fmt;
+ if (zynqmp_disp_gfx_init_fmt < ARRAY_SIZE(zynqmp_disp_gfx_init_fmts))
+ gfx_fmt = zynqmp_disp_gfx_init_fmts[zynqmp_disp_gfx_init_fmt];
+ layer->fmt = &av_buf_gfx_fmts[gfx_fmt];
+
+ return 0;
+}
+
+int zynqmp_disp_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub;
+ struct zynqmp_disp *disp;
+ struct resource *res;
+ int ret;
+
+ disp = devm_kzalloc(&pdev->dev, sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
+ disp->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "blend");
+ disp->blend.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(disp->blend.base))
+ return PTR_ERR(disp->blend.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "av_buf");
+ disp->av_buf.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(disp->av_buf.base))
+ return PTR_ERR(disp->av_buf.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
+ disp->aud.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(disp->aud.base))
+ return PTR_ERR(disp->aud.base);
+
+ dpsub = platform_get_drvdata(pdev);
+ dpsub->disp = disp;
+ disp->dpsub = dpsub;
+
+ ret = zynqmp_disp_enumerate_fmts(disp);
+ if (ret)
+ return ret;
+
+ /* Try the live PL video clock */
+ disp->_pl_pclk = devm_clk_get(disp->dev, "dp_live_video_in_clk");
+ if (!IS_ERR(disp->_pl_pclk)) {
+ disp->pclk = disp->_pl_pclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->pclk,
+ &disp->pclk_en);
+ if (ret)
+ disp->pclk = NULL;
+ } else if (PTR_ERR(disp->_pl_pclk) == -EPROBE_DEFER) {
+ return PTR_ERR(disp->_pl_pclk);
+ }
+
+ /* If the live PL video clock is not valid, fall back to PS clock */
+ if (!disp->pclk) {
+ disp->_ps_pclk = devm_clk_get(disp->dev, "dp_vtc_pixel_clk_in");
+ if (IS_ERR(disp->_ps_pclk)) {
+ dev_err(disp->dev, "failed to init any video clock\n");
+ return PTR_ERR(disp->_ps_pclk);
+ }
+ disp->pclk = disp->_ps_pclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->pclk,
+ &disp->pclk_en);
+ if (ret) {
+ dev_err(disp->dev, "failed to init any video clock\n");
+ return ret;
+ }
+ }
+
+ disp->aclk = devm_clk_get(disp->dev, "dp_apb_clk");
+ if (IS_ERR(disp->aclk))
+ return PTR_ERR(disp->aclk);
+ ret = zynqmp_disp_clk_enable(disp->aclk, &disp->aclk_en);
+ if (ret) {
+ dev_err(disp->dev, "failed to enable the APB clk\n");
+ return ret;
+ }
+
+ /* Try the live PL audio clock */
+ disp->_pl_audclk = devm_clk_get(disp->dev, "dp_live_audio_aclk");
+ if (!IS_ERR(disp->_pl_audclk)) {
+ disp->audclk = disp->_pl_audclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->audclk,
+ &disp->audclk_en);
+ if (ret)
+ disp->audclk = NULL;
+ }
+
+ /* If the live PL audio clock is not valid, fall back to PS clock */
+ if (!disp->audclk) {
+ disp->_ps_audclk = devm_clk_get(disp->dev, "dp_aud_clk");
+ if (!IS_ERR(disp->_ps_audclk)) {
+ disp->audclk = disp->_ps_audclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->audclk,
+ &disp->audclk_en);
+ if (ret)
+ disp->audclk = NULL;
+ }
+
+ if (!disp->audclk) {
+ dev_err(disp->dev,
+ "audio is disabled due to clock failure\n");
+ }
+ }
+
+ ret = zynqmp_disp_layer_create(disp);
+ if (ret)
+ goto error_aclk;
+
+ zynqmp_disp_init(disp);
+
+ return 0;
+
+error_aclk:
+ zynqmp_disp_clk_disable(disp->aclk, &disp->aclk_en);
+ return ret;
+}
+
+int zynqmp_disp_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ struct zynqmp_disp *disp = dpsub->disp;
+
+ zynqmp_disp_layer_destroy(disp);
+ if (disp->audclk)
+ zynqmp_disp_clk_disable(disp->audclk, &disp->audclk_en);
+ zynqmp_disp_clk_disable(disp->aclk, &disp->aclk_en);
+ zynqmp_disp_clk_disable(disp->pclk, &disp->pclk_en);
+ dpsub->disp = NULL;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.h b/drivers/gpu/drm/xlnx/zynqmp_disp.h
new file mode 100644
index 000000000000..28d8188f8f5e
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP Display Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZYNQMP_DISP_H_
+#define _ZYNQMP_DISP_H_
+
+struct zynqmp_disp;
+
+void zynqmp_disp_handle_vblank(struct zynqmp_disp *disp);
+unsigned int zynqmp_disp_get_apb_clk_rate(struct zynqmp_disp *disp);
+bool zynqmp_disp_aud_enabled(struct zynqmp_disp *disp);
+unsigned int zynqmp_disp_get_aud_clk_rate(struct zynqmp_disp *disp);
+uint32_t zynqmp_disp_get_crtc_mask(struct zynqmp_disp *disp);
+
+int zynqmp_disp_bind(struct device *dev, struct device *master, void *data);
+void zynqmp_disp_unbind(struct device *dev, struct device *master, void *data);
+
+int zynqmp_disp_probe(struct platform_device *pdev);
+int zynqmp_disp_remove(struct platform_device *pdev);
+
+#endif /* _ZYNQMP_DISP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
new file mode 100644
index 000000000000..c3c86dacac97
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -0,0 +1,1917 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DisplayPort Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_of.h>
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+
+#include "zynqmp_disp.h"
+#include "zynqmp_dpsub.h"
+
+static uint zynqmp_dp_aux_timeout_ms = 50;
+module_param_named(aux_timeout_ms, zynqmp_dp_aux_timeout_ms, uint, 0444);
+MODULE_PARM_DESC(aux_timeout_ms, "DP aux timeout value in msec (default: 50)");
+
+/*
+ * Some sink requires a delay after power on request
+ */
+static uint zynqmp_dp_power_on_delay_ms = 4;
+module_param_named(power_on_delay_ms, zynqmp_dp_power_on_delay_ms, uint, 0444);
+MODULE_PARM_DESC(aux_timeout_ms, "DP power on delay in msec (default: 4)");
+
+/* Link configuration registers */
+#define ZYNQMP_DP_TX_LINK_BW_SET 0x0
+#define ZYNQMP_DP_TX_LANE_CNT_SET 0x4
+#define ZYNQMP_DP_TX_ENHANCED_FRAME_EN 0x8
+#define ZYNQMP_DP_TX_TRAINING_PATTERN_SET 0xc
+#define ZYNQMP_DP_TX_SCRAMBLING_DISABLE 0x14
+#define ZYNQMP_DP_TX_DOWNSPREAD_CTL 0x18
+#define ZYNQMP_DP_TX_SW_RESET 0x1c
+#define ZYNQMP_DP_TX_SW_RESET_STREAM1 BIT(0)
+#define ZYNQMP_DP_TX_SW_RESET_STREAM2 BIT(1)
+#define ZYNQMP_DP_TX_SW_RESET_STREAM3 BIT(2)
+#define ZYNQMP_DP_TX_SW_RESET_STREAM4 BIT(3)
+#define ZYNQMP_DP_TX_SW_RESET_AUX BIT(7)
+#define ZYNQMP_DP_TX_SW_RESET_ALL (ZYNQMP_DP_TX_SW_RESET_STREAM1 | \
+ ZYNQMP_DP_TX_SW_RESET_STREAM2 | \
+ ZYNQMP_DP_TX_SW_RESET_STREAM3 | \
+ ZYNQMP_DP_TX_SW_RESET_STREAM4 | \
+ ZYNQMP_DP_TX_SW_RESET_AUX)
+
+/* Core enable registers */
+#define ZYNQMP_DP_TX_ENABLE 0x80
+#define ZYNQMP_DP_TX_ENABLE_MAIN_STREAM 0x84
+#define ZYNQMP_DP_TX_FORCE_SCRAMBLER_RESET 0xc0
+#define ZYNQMP_DP_TX_VERSION 0xf8
+#define ZYNQMP_DP_TX_VERSION_MAJOR_MASK GENMASK(31, 24)
+#define ZYNQMP_DP_TX_VERSION_MAJOR_SHIFT 24
+#define ZYNQMP_DP_TX_VERSION_MINOR_MASK GENMASK(23, 16)
+#define ZYNQMP_DP_TX_VERSION_MINOR_SHIFT 16
+#define ZYNQMP_DP_TX_VERSION_REVISION_MASK GENMASK(15, 12)
+#define ZYNQMP_DP_TX_VERSION_REVISION_SHIFT 12
+#define ZYNQMP_DP_TX_VERSION_PATCH_MASK GENMASK(11, 8)
+#define ZYNQMP_DP_TX_VERSION_PATCH_SHIFT 8
+#define ZYNQMP_DP_TX_VERSION_INTERNAL_MASK GENMASK(7, 0)
+#define ZYNQMP_DP_TX_VERSION_INTERNAL_SHIFT 0
+
+/* Core ID registers */
+#define ZYNQMP_DP_TX_CORE_ID 0xfc
+#define ZYNQMP_DP_TX_CORE_ID_MAJOR_MASK GENMASK(31, 24)
+#define ZYNQMP_DP_TX_CORE_ID_MAJOR_SHIFT 24
+#define ZYNQMP_DP_TX_CORE_ID_MINOR_MASK GENMASK(23, 16)
+#define ZYNQMP_DP_TX_CORE_ID_MINOR_SHIFT 16
+#define ZYNQMP_DP_TX_CORE_ID_REVISION_MASK GENMASK(15, 8)
+#define ZYNQMP_DP_TX_CORE_ID_REVISION_SHIFT 8
+#define ZYNQMP_DP_TX_CORE_ID_DIRECTION GENMASK(1)
+
+/* AUX channel interface registers */
+#define ZYNQMP_DP_TX_AUX_COMMAND 0x100
+#define ZYNQMP_DP_TX_AUX_COMMAND_CMD_SHIFT 8
+#define ZYNQMP_DP_TX_AUX_COMMAND_ADDRESS_ONLY BIT(12)
+#define ZYNQMP_DP_TX_AUX_COMMAND_BYTES_SHIFT 0
+#define ZYNQMP_DP_TX_AUX_WRITE_FIFO 0x104
+#define ZYNQMP_DP_TX_AUX_ADDRESS 0x108
+#define ZYNQMP_DP_TX_CLK_DIVIDER 0x10c
+#define ZYNQMP_DP_TX_CLK_DIVIDER_MHZ 1000000
+#define ZYNQMP_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT 8
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE 0x130
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_HPD BIT(0)
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REQUEST BIT(1)
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY BIT(2)
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT BIT(3)
+#define ZYNQMP_DP_TX_AUX_REPLY_DATA 0x134
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE 0x138
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_ACK (0)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_NACK BIT(0)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_DEFER BIT(1)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_ACK (0)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_NACK BIT(2)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_DEFER BIT(3)
+#define ZYNQMP_DP_TX_AUX_REPLY_CNT 0x13c
+#define ZYNQMP_DP_TX_AUX_REPLY_CNT_MASK 0xff
+#define ZYNQMP_DP_TX_INTR_STATUS 0x140
+#define ZYNQMP_DP_TX_INTR_MASK 0x144
+#define ZYNQMP_DP_TX_INTR_HPD_IRQ BIT(0)
+#define ZYNQMP_DP_TX_INTR_HPD_EVENT BIT(1)
+#define ZYNQMP_DP_TX_INTR_REPLY_RECV BIT(2)
+#define ZYNQMP_DP_TX_INTR_REPLY_TIMEOUT BIT(3)
+#define ZYNQMP_DP_TX_INTR_HPD_PULSE BIT(4)
+#define ZYNQMP_DP_TX_INTR_EXT_PKT_TXD BIT(5)
+#define ZYNQMP_DP_TX_INTR_LIV_ABUF_UNDRFLW BIT(12)
+#define ZYNQMP_DP_TX_INTR_VBLANK_START BIT(13)
+#define ZYNQMP_DP_TX_INTR_PIXEL0_MATCH BIT(14)
+#define ZYNQMP_DP_TX_INTR_PIXEL1_MATCH BIT(15)
+#define ZYNQMP_DP_TX_INTR_CHBUF_UNDERFLW_MASK 0x3f0000
+#define ZYNQMP_DP_TX_INTR_CHBUF_OVERFLW_MASK 0xfc00000
+#define ZYNQMP_DP_TX_INTR_CUST_TS_2 BIT(28)
+#define ZYNQMP_DP_TX_INTR_CUST_TS BIT(29)
+#define ZYNQMP_DP_TX_INTR_EXT_VSYNC_TS BIT(30)
+#define ZYNQMP_DP_TX_INTR_VSYNC_TS BIT(31)
+#define ZYNQMP_DP_TX_INTR_ALL (ZYNQMP_DP_TX_INTR_HPD_IRQ | \
+ ZYNQMP_DP_TX_INTR_HPD_EVENT | \
+ ZYNQMP_DP_TX_INTR_REPLY_RECV | \
+ ZYNQMP_DP_TX_INTR_REPLY_TIMEOUT | \
+ ZYNQMP_DP_TX_INTR_HPD_PULSE | \
+ ZYNQMP_DP_TX_INTR_EXT_PKT_TXD | \
+ ZYNQMP_DP_TX_INTR_LIV_ABUF_UNDRFLW | \
+ ZYNQMP_DP_TX_INTR_CHBUF_UNDERFLW_MASK | \
+ ZYNQMP_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+#define ZYNQMP_DP_TX_NO_INTR_ALL (ZYNQMP_DP_TX_INTR_PIXEL0_MATCH | \
+ ZYNQMP_DP_TX_INTR_PIXEL1_MATCH | \
+ ZYNQMP_DP_TX_INTR_CUST_TS_2 | \
+ ZYNQMP_DP_TX_INTR_CUST_TS | \
+ ZYNQMP_DP_TX_INTR_EXT_VSYNC_TS | \
+ ZYNQMP_DP_TX_INTR_VSYNC_TS)
+#define ZYNQMP_DP_TX_REPLY_DATA_CNT 0x148
+#define ZYNQMP_DP_SUB_TX_INTR_STATUS 0x3a0
+#define ZYNQMP_DP_SUB_TX_INTR_MASK 0x3a4
+#define ZYNQMP_DP_SUB_TX_INTR_EN 0x3a8
+#define ZYNQMP_DP_SUB_TX_INTR_DS 0x3ac
+
+/* Main stream attribute registers */
+#define ZYNQMP_DP_TX_MAIN_STREAM_HTOTAL 0x180
+#define ZYNQMP_DP_TX_MAIN_STREAM_VTOTAL 0x184
+#define ZYNQMP_DP_TX_MAIN_STREAM_POLARITY 0x188
+#define ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT 0
+#define ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT 1
+#define ZYNQMP_DP_TX_MAIN_STREAM_HSWIDTH 0x18c
+#define ZYNQMP_DP_TX_MAIN_STREAM_VSWIDTH 0x190
+#define ZYNQMP_DP_TX_MAIN_STREAM_HRES 0x194
+#define ZYNQMP_DP_TX_MAIN_STREAM_VRES 0x198
+#define ZYNQMP_DP_TX_MAIN_STREAM_HSTART 0x19c
+#define ZYNQMP_DP_TX_MAIN_STREAM_VSTART 0x1a0
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0 0x1a4
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC BIT(0)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_FORMAT_SHIFT 1
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_DYNAMIC_RANGE BIT(3)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_YCBCR_COLRIMETRY BIT(4)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_BPC_SHIFT 5
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC1 0x1a8
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_INTERLACED_VERT BIT(0)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_STEREO_VID_SHIFT 1
+#define ZYNQMP_DP_TX_M_VID 0x1ac
+#define ZYNQMP_DP_TX_TRANSFER_UNIT_SIZE 0x1b0
+#define ZYNQMP_DP_TX_DEF_TRANSFER_UNIT_SIZE 64
+#define ZYNQMP_DP_TX_N_VID 0x1b4
+#define ZYNQMP_DP_TX_USER_PIXEL_WIDTH 0x1b8
+#define ZYNQMP_DP_TX_USER_DATA_CNT_PER_LANE 0x1bc
+#define ZYNQMP_DP_TX_MIN_BYTES_PER_TU 0x1c4
+#define ZYNQMP_DP_TX_FRAC_BYTES_PER_TU 0x1c8
+#define ZYNQMP_DP_TX_INIT_WAIT 0x1cc
+
+/* PHY configuration and status registers */
+#define ZYNQMP_DP_TX_PHY_CONFIG 0x200
+#define ZYNQMP_DP_TX_PHY_CONFIG_PHY_RESET BIT(0)
+#define ZYNQMP_DP_TX_PHY_CONFIG_GTTX_RESET BIT(1)
+#define ZYNQMP_DP_TX_PHY_CONFIG_PHY_PMA_RESET BIT(8)
+#define ZYNQMP_DP_TX_PHY_CONFIG_PHY_PCS_RESET BIT(9)
+#define ZYNQMP_DP_TX_PHY_CONFIG_ALL_RESET (ZYNQMP_DP_TX_PHY_CONFIG_PHY_RESET | \
+ ZYNQMP_DP_TX_PHY_CONFIG_GTTX_RESET | \
+ ZYNQMP_DP_TX_PHY_CONFIG_PHY_PMA_RESET | \
+ ZYNQMP_DP_TX_PHY_CONFIG_PHY_PCS_RESET)
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_0 0x210
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_1 0x214
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_2 0x218
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_3 0x21c
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_0 0x220
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_1 0x224
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_2 0x228
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_3 0x22c
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING 0x234
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162 0x1
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270 0x3
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540 0x5
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN 0x238
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_0 BIT(0)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_1 BIT(1)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL 0xf
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_0 0x23c
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_1 0x240
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_2 0x244
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_3 0x248
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_0 0x24c
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_1 0x250
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_2 0x254
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_3 0x258
+#define ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 0x24c
+#define ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_1 0x250
+#define ZYNQMP_DP_TX_PHY_STATUS 0x280
+#define ZYNQMP_DP_TX_PHY_STATUS_PLL_LOCKED_SHIFT 4
+#define ZYNQMP_DP_TX_PHY_STATUS_FPGA_PLL_LOCKED BIT(6)
+
+/* Audio registers */
+#define ZYNQMP_DP_TX_AUDIO_CONTROL 0x300
+#define ZYNQMP_DP_TX_AUDIO_CHANNELS 0x304
+#define ZYNQMP_DP_TX_AUDIO_INFO_DATA 0x308
+#define ZYNQMP_DP_TX_AUDIO_M_AUD 0x328
+#define ZYNQMP_DP_TX_AUDIO_N_AUD 0x32c
+#define ZYNQMP_DP_TX_AUDIO_EXT_DATA 0x330
+
+#define ZYNQMP_DP_MISC0_RGB (0)
+#define ZYNQMP_DP_MISC0_YCRCB_422 (5 << 1)
+#define ZYNQMP_DP_MISC0_YCRCB_444 (6 << 1)
+#define ZYNQMP_DP_MISC0_FORMAT_MASK 0xe
+#define ZYNQMP_DP_MISC0_BPC_6 (0 << 5)
+#define ZYNQMP_DP_MISC0_BPC_8 (1 << 5)
+#define ZYNQMP_DP_MISC0_BPC_10 (2 << 5)
+#define ZYNQMP_DP_MISC0_BPC_12 (3 << 5)
+#define ZYNQMP_DP_MISC0_BPC_16 (4 << 5)
+#define ZYNQMP_DP_MISC0_BPC_MASK 0xe0
+#define ZYNQMP_DP_MISC1_Y_ONLY (1 << 7)
+
+#define ZYNQMP_DP_MAX_LANES 2
+#define ZYNQMP_MAX_FREQ 3000000
+
+#define DP_REDUCED_BIT_RATE 162000
+#define DP_HIGH_BIT_RATE 270000
+#define DP_HIGH_BIT_RATE2 540000
+#define DP_MAX_TRAINING_TRIES 5
+#define DP_V1_2 0x12
+
+/**
+ * struct zynqmp_dp_link_config - Common link config between source and sink
+ * @max_rate: maximum link rate
+ * @max_lanes: maximum number of lanes
+ */
+struct zynqmp_dp_link_config {
+ int max_rate;
+ u8 max_lanes;
+};
+
+/**
+ * struct zynqmp_dp_mode - Configured mode of DisplayPort
+ * @bw_code: code for bandwidth(link rate)
+ * @lane_cnt: number of lanes
+ * @pclock: pixel clock frequency of current mode
+ * @fmt: format identifier string
+ */
+struct zynqmp_dp_mode {
+ u8 bw_code;
+ u8 lane_cnt;
+ int pclock;
+ const char *fmt;
+};
+
+/**
+ * struct zynqmp_dp_config - Configuration of DisplayPort from DTS
+ * @misc0: misc0 configuration (per DP v1.2 spec)
+ * @misc1: misc1 configuration (per DP v1.2 spec)
+ * @bpp: bits per pixel
+ * @bpc: bits per component
+ * @num_colors: number of color components
+ */
+struct zynqmp_dp_config {
+ u8 misc0;
+ u8 misc1;
+ u8 bpp;
+ u8 bpc;
+ u8 num_colors;
+};
+
+/**
+ * struct zynqmp_dp - Xilinx DisplayPort core
+ * @encoder: the drm encoder structure
+ * @connector: the drm connector structure
+ * @sync_prop: synchronous mode property
+ * @bpc_prop: bpc mode property
+ * @dev: device structure
+ * @dpsub: Display subsystem
+ * @drm: DRM core
+ * @iomem: device I/O memory for register access
+ * @irq: irq
+ * @config: IP core configuration from DTS
+ * @aux: aux channel
+ * @phy: PHY handles for DP lanes
+ * @num_lanes: number of enabled phy lanes
+ * @hpd_work: hot plug detection worker
+ * @status: connection status
+ * @enabled: flag to indicate if the device is enabled
+ * @dpms: current dpms state
+ * @dpcd: DP configuration data from currently connected sink device
+ * @link_config: common link configuration between IP core and sink device
+ * @mode: current mode between IP core and sink device
+ * @train_set: set of training data
+ */
+struct zynqmp_dp {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct drm_property *sync_prop;
+ struct drm_property *bpc_prop;
+ struct device *dev;
+ struct zynqmp_dpsub *dpsub;
+ struct drm_device *drm;
+ void __iomem *iomem;
+ int irq;
+
+ struct zynqmp_dp_config config;
+ struct drm_dp_aux aux;
+ struct phy *phy[ZYNQMP_DP_MAX_LANES];
+ u8 num_lanes;
+ struct delayed_work hpd_work;
+ enum drm_connector_status status;
+ bool enabled;
+
+ int dpms;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ struct zynqmp_dp_link_config link_config;
+ struct zynqmp_dp_mode mode;
+ u8 train_set[ZYNQMP_DP_MAX_LANES];
+};
+
+static inline struct zynqmp_dp *encoder_to_dp(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct zynqmp_dp, encoder);
+}
+
+static inline struct zynqmp_dp *connector_to_dp(struct drm_connector *connector)
+{
+ return container_of(connector, struct zynqmp_dp, connector);
+}
+
+static void zynqmp_dp_write(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static u32 zynqmp_dp_read(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static void zynqmp_dp_clr(void __iomem *base, int offset, u32 clr)
+{
+ zynqmp_dp_write(base, offset, zynqmp_dp_read(base, offset) & ~clr);
+}
+
+static void zynqmp_dp_set(void __iomem *base, int offset, u32 set)
+{
+ zynqmp_dp_write(base, offset, zynqmp_dp_read(base, offset) | set);
+}
+
+/*
+ * Internal functions: used by zynqmp_disp.c
+ */
+
+/**
+ * zynqmp_dp_update_bpp - Update the current bpp config
+ * @dp: DisplayPort IP core structure
+ *
+ * Update the current bpp based on the color format: bpc & num_colors.
+ * Any function that changes bpc or num_colors should call this
+ * to keep the bpp value in sync.
+ */
+static void zynqmp_dp_update_bpp(struct zynqmp_dp *dp)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ config->bpp = dp->config.bpc * dp->config.num_colors;
+}
+
+/**
+ * zynqmp_dp_set_color - Set the color
+ * @dp: DisplayPort IP core structure
+ * @color: color string, from zynqmp_disp_color_enum
+ *
+ * Update misc register values based on @color string.
+ *
+ * Return: 0 on success, or -EINVAL.
+ */
+int zynqmp_dp_set_color(struct zynqmp_dp *dp, const char *color)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ config->misc0 &= ~ZYNQMP_DP_MISC0_FORMAT_MASK;
+ config->misc1 &= ~ZYNQMP_DP_MISC1_Y_ONLY;
+ if (strcmp(color, "rgb") == 0) {
+ config->misc0 |= ZYNQMP_DP_MISC0_RGB;
+ config->num_colors = 3;
+ } else if (strcmp(color, "ycrcb422") == 0) {
+ config->misc0 |= ZYNQMP_DP_MISC0_YCRCB_422;
+ config->num_colors = 2;
+ } else if (strcmp(color, "ycrcb444") == 0) {
+ config->misc0 |= ZYNQMP_DP_MISC0_YCRCB_444;
+ config->num_colors = 3;
+ } else if (strcmp(color, "yonly") == 0) {
+ config->misc1 |= ZYNQMP_DP_MISC1_Y_ONLY;
+ config->num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid colormetry in DT\n");
+ return -EINVAL;
+ }
+ zynqmp_dp_update_bpp(dp);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_enable_vblank - Enable vblank
+ * @dp: DisplayPort IP core structure
+ *
+ * Enable vblank interrupt
+ */
+void zynqmp_dp_enable_vblank(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_EN,
+ ZYNQMP_DP_TX_INTR_VBLANK_START);
+}
+
+/**
+ * zynqmp_dp_disable_vblank - Disable vblank
+ * @dp: DisplayPort IP core structure
+ *
+ * Disable vblank interrupt
+ */
+void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS,
+ ZYNQMP_DP_TX_INTR_VBLANK_START);
+}
+
+/*
+ * DP PHY functions
+ */
+
+/**
+ * zynqmp_dp_init_phy - Initialize the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the phy.
+ *
+ * Return: 0 if the phy instances are initialized correctly, or the error code
+ * returned from the callee functions.
+ * Note: We can call this function without any phy lane assigned to DP.
+ */
+static int zynqmp_dp_init_phy(struct zynqmp_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->num_lanes; i++) {
+ ret = phy_init(dp->phy[i]);
+ if (ret) {
+ dev_err(dp->dev, "failed to init phy lane %d\n", i);
+ return ret;
+ }
+ }
+ /* Wait for PLL to be locked for the primary (1st) lane */
+ if (dp->phy[0]) {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS,
+ ZYNQMP_DP_TX_INTR_ALL);
+ zynqmp_dp_clr(dp->iomem, ZYNQMP_DP_TX_PHY_CONFIG,
+ ZYNQMP_DP_TX_PHY_CONFIG_ALL_RESET);
+ ret = xpsgtr_wait_pll_lock(dp->phy[0]);
+ if (ret) {
+ dev_err(dp->dev, "failed to lock pll\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_exit_phy - Exit the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Exit the phy.
+ */
+static void zynqmp_dp_exit_phy(struct zynqmp_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->num_lanes; i++) {
+ ret = phy_exit(dp->phy[i]);
+ if (ret)
+ dev_err(dp->dev, "failed to exit phy(%d) %d\n", i, ret);
+ }
+}
+
+/**
+ * zynqmp_dp_phy_ready - Check if PHY is ready
+ * @dp: DisplayPort IP core structure
+ *
+ * Check if PHY is ready. If PHY is not ready, wait 1ms to check for 100 times.
+ * This amount of delay was suggested by IP designer.
+ *
+ * Return: 0 if PHY is ready, or -ENODEV if PHY is not ready.
+ */
+static int zynqmp_dp_phy_ready(struct zynqmp_dp *dp)
+{
+ u32 i, reg, ready;
+
+ ready = (1 << dp->num_lanes) - 1;
+
+ /* Wait for 100 * 1ms. This should be enough time for PHY to be ready */
+ for (i = 0; ; i++) {
+ reg = zynqmp_dp_read(dp->iomem, ZYNQMP_DP_TX_PHY_STATUS);
+ if ((reg & ready) == ready)
+ return 0;
+
+ if (i == 100) {
+ dev_err(dp->dev, "PHY isn't ready\n");
+ return -ENODEV;
+ }
+
+ usleep_range(1000, 1100);
+ }
+
+ return 0;
+}
+
+/*
+ * Power Management functions
+ */
+/**
+ * zynqmp_dp_pm_resume - Resume DP IP
+ * @dp: DisplayPort IP core structure
+ *
+ * Resume the DP IP including PHY and pipeline.
+ */
+void zynqmp_dp_pm_resume(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_init_phy(dp);
+}
+/**
+ * zynqmp_dp_pm_suspend - Suspend DP IP
+ * @dp: DisplayPort IP core structure
+ *
+ * Suspend the DP IP including PHY and pipeline.
+ */
+void zynqmp_dp_pm_suspend(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_exit_phy(dp);
+}
+/*
+ * DP functions
+ */
+
+/**
+ * zynqmp_dp_max_rate - Calculate and return available max pixel clock
+ * @link_rate: link rate (Kilo-bytes / sec)
+ * @lane_num: number of lanes
+ * @bpp: bits per pixel
+ *
+ * Return: max pixel clock (KHz) supported by current link config.
+ */
+static inline int zynqmp_dp_max_rate(int link_rate, u8 lane_num, u8 bpp)
+{
+ return link_rate * lane_num * 8 / bpp;
+}
+
+/**
+ * zynqmp_dp_mode_configure - Configure the link values
+ * @dp: DisplayPort IP core structure
+ * @pclock: pixel clock for requested display mode
+ * @current_bw: current link rate
+ *
+ * Find the link configuration values, rate and lane count for requested pixel
+ * clock @pclock. The @pclock is stored in the mode to be used in other
+ * functions later. The returned rate is downshifted from the current rate
+ * @current_bw.
+ *
+ * Return: Current link rate code, or -EINVAL.
+ */
+static int zynqmp_dp_mode_configure(struct zynqmp_dp *dp, int pclock,
+ u8 current_bw)
+{
+ int max_rate = dp->link_config.max_rate;
+ u8 bws[3] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate);
+ u8 bpp = dp->config.bpp;
+ u8 lane_cnt;
+ s8 i;
+
+ if (current_bw == DP_LINK_BW_1_62) {
+ dev_err(dp->dev, "can't downshift. already lowest link rate\n");
+ return -EINVAL;
+ }
+
+ for (i = ARRAY_SIZE(bws) - 1; i >= 0; i--) {
+ if (current_bw && bws[i] >= current_bw)
+ continue;
+
+ if (bws[i] <= max_link_rate_code)
+ break;
+ }
+
+ for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) {
+ int bw;
+ u32 rate;
+
+ bw = drm_dp_bw_code_to_link_rate(bws[i]);
+ rate = zynqmp_dp_max_rate(bw, lane_cnt, bpp);
+ if (pclock <= rate) {
+ dp->mode.bw_code = bws[i];
+ dp->mode.lane_cnt = lane_cnt;
+ dp->mode.pclock = pclock;
+ return dp->mode.bw_code;
+ }
+ }
+
+ dev_err(dp->dev, "failed to configure link values\n");
+
+ return -EINVAL;
+}
+
+/**
+ * zynqmp_dp_adjust_train - Adjust train values
+ * @dp: DisplayPort IP core structure
+ * @link_status: link status from sink which contains requested training values
+ */
+static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 *train_set = dp->train_set;
+ u8 voltage = 0, preemphasis = 0;
+ u8 i;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ u8 v = drm_dp_get_adjust_request_voltage(link_status, i);
+ u8 p = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+
+ if (v > voltage)
+ voltage = v;
+
+ if (p > preemphasis)
+ preemphasis = p;
+ }
+
+ if (voltage >= DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
+ voltage |= DP_TRAIN_MAX_SWING_REACHED;
+
+ if (preemphasis >= DP_TRAIN_PRE_EMPH_LEVEL_2)
+ preemphasis |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++)
+ train_set[i] = voltage | preemphasis;
+}
+
+/**
+ * zynqmp_dp_update_vs_emph - Update the training values
+ * @dp: DisplayPort IP core structure
+ *
+ * Update the training values based on the request from sink. The mapped values
+ * are predefined, and values(vs, pe, pc) are from the device manual.
+ *
+ * Return: 0 if vs and emph are updated successfully, or the error code returned
+ * by drm_dp_dpcd_write().
+ */
+static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp)
+{
+ u8 *train_set = dp->train_set;
+ u8 i, v_level, p_level;
+ int ret;
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set,
+ dp->mode.lane_cnt);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ u32 reg = ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4;
+
+ v_level = (train_set[i] & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ p_level = (train_set[i] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ xpsgtr_margining_factor(dp->phy[i], p_level, v_level);
+ xpsgtr_override_deemph(dp->phy[i], p_level, v_level);
+ zynqmp_dp_write(dp->iomem, reg, 0x2);
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_link_train_cr - Train clock recovery
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if clock recovery train is done successfully, or corresponding
+ * error code.
+ */
+static int zynqmp_dp_link_train_cr(struct zynqmp_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 vs = 0, tries = 0;
+ u16 max_tries, i;
+ bool cr_done;
+ int ret;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * 256 loops should be maximum iterations for 4 lanes and 4 values.
+ * So, This loop should exit before 512 iterations
+ */
+ for (max_tries = 0; max_tries < 512; max_tries++) {
+ ret = zynqmp_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_clock_recovery_delay(dp->dpcd);
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ cr_done = drm_dp_clock_recovery_ok(link_status, lane_cnt);
+ if (cr_done)
+ break;
+
+ for (i = 0; i < lane_cnt; i++)
+ if (!(dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED))
+ break;
+ if (i == lane_cnt)
+ break;
+
+ if ((dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == vs)
+ tries++;
+ else
+ tries = 0;
+
+ if (tries == DP_MAX_TRAINING_TRIES)
+ break;
+
+ vs = dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ zynqmp_dp_adjust_train(dp, link_status);
+ }
+
+ if (!cr_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_link_train_ce - Train channel equalization
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if channel equalization train is done successfully, or
+ * corresponding error code.
+ */
+static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 pat, tries;
+ int ret;
+ bool ce_done;
+
+ if (dp->dpcd[DP_DPCD_REV] >= DP_V1_2 &&
+ dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED)
+ pat = DP_TRAINING_PATTERN_3;
+ else
+ pat = DP_TRAINING_PATTERN_2;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRAINING_PATTERN_SET, pat);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ pat | DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) {
+ ret = zynqmp_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_channel_eq_delay(dp->dpcd);
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ ce_done = drm_dp_channel_eq_ok(link_status, lane_cnt);
+ if (ce_done)
+ break;
+
+ zynqmp_dp_adjust_train(dp, link_status);
+ }
+
+ if (!ce_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_link_train - Train the link
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if all trains are done successfully, or corresponding error code.
+ */
+static int zynqmp_dp_train(struct zynqmp_dp *dp)
+{
+ u32 reg;
+ u8 bw_code = dp->mode.bw_code;
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 aux_lane_cnt = lane_cnt;
+ bool enhanced;
+ int ret;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_LANE_CNT_SET, lane_cnt);
+ enhanced = drm_dp_enhanced_frame_cap(dp->dpcd);
+ if (enhanced) {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENHANCED_FRAME_EN, 1);
+ aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+
+ if (dp->dpcd[3] & 0x1) {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_DOWNSPREAD_CTL, 1);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+ } else {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_DOWNSPREAD_CTL, 0);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL, 0);
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET, aux_lane_cnt);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to set lane count\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+ DP_SET_ANSI_8B10B);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to set ANSI 8B/10B encoding\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LINK_BW_SET, bw_code);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to set DP bandwidth\n");
+ return ret;
+ }
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_LINK_BW_SET, bw_code);
+ switch (bw_code) {
+ case DP_LINK_BW_1_62:
+ reg = ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162;
+ break;
+ case DP_LINK_BW_2_7:
+ reg = ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270;
+ break;
+ case DP_LINK_BW_5_4:
+ default:
+ reg = ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540;
+ break;
+ }
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING,
+ reg);
+ ret = zynqmp_dp_phy_ready(dp);
+ if (ret < 0)
+ return ret;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_SCRAMBLING_DISABLE, 1);
+ memset(dp->train_set, 0, 4);
+ ret = zynqmp_dp_link_train_cr(dp);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_dp_link_train_ce(dp);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to disable training pattern\n");
+ return ret;
+ }
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_SCRAMBLING_DISABLE, 0);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_train_loop - Downshift the link rate during training
+ * @dp: DisplayPort IP core structure
+ *
+ * Train the link by downshifting the link rate if training is not successful.
+ */
+static void zynqmp_dp_train_loop(struct zynqmp_dp *dp)
+{
+ struct zynqmp_dp_mode *mode = &dp->mode;
+ u8 bw = mode->bw_code;
+ int ret;
+
+ do {
+ if (dp->status == connector_status_disconnected ||
+ !dp->enabled)
+ return;
+
+ ret = zynqmp_dp_train(dp);
+ if (!ret)
+ return;
+
+ ret = zynqmp_dp_mode_configure(dp, mode->pclock, bw);
+ if (ret < 0)
+ goto err_out;
+
+ bw = ret;
+ } while (bw >= DP_LINK_BW_1_62);
+
+err_out:
+ dev_err(dp->dev, "failed to train the DP link\n");
+}
+
+/*
+ * DP Aux functions
+ */
+
+#define AUX_READ_BIT 0x1
+
+/**
+ * zynqmp_dp_aux_cmd_submit - Submit aux command
+ * @dp: DisplayPort IP core structure
+ * @cmd: aux command
+ * @addr: aux address
+ * @buf: buffer for command data
+ * @bytes: number of bytes for @buf
+ * @reply: reply code to be returned
+ *
+ * Submit an aux command. All aux related commands, native or i2c aux
+ * read/write, are submitted through this function. The function is mapped to
+ * the transfer function of struct drm_dp_aux. This function involves in
+ * multiple register reads/writes, thus synchronization is needed, and it is
+ * done by drm_dp_helper using @hw_mutex. The calling thread goes into sleep
+ * if there's no immediate reply to the command submission. The reply code is
+ * returned at @reply if @reply != NULL.
+ *
+ * Return: 0 if the command is submitted properly, or corresponding error code:
+ * -EBUSY when there is any request already being processed
+ * -ETIMEDOUT when receiving reply is timed out
+ * -EIO when received bytes are less than requested
+ */
+static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr,
+ u8 *buf, u8 bytes, u8 *reply)
+{
+ bool is_read = (cmd & AUX_READ_BIT) ? true : false;
+ void __iomem *iomem = dp->iomem;
+ u32 reg, i;
+
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_INTR_SIGNAL_STATE);
+ if (reg & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REQUEST)
+ return -EBUSY;
+
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUX_ADDRESS, addr);
+ if (!is_read)
+ for (i = 0; i < bytes; i++)
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUX_WRITE_FIFO,
+ buf[i]);
+
+ reg = cmd << ZYNQMP_DP_TX_AUX_COMMAND_CMD_SHIFT;
+ if (!buf || !bytes)
+ reg |= ZYNQMP_DP_TX_AUX_COMMAND_ADDRESS_ONLY;
+ else
+ reg |= (bytes - 1) << ZYNQMP_DP_TX_AUX_COMMAND_BYTES_SHIFT;
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUX_COMMAND, reg);
+
+ /* Wait for reply to be delivered upto 2ms */
+ for (i = 0; ; i++) {
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_INTR_SIGNAL_STATE);
+ if (reg & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY)
+ break;
+
+ if (reg & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT ||
+ i == 2)
+ return -ETIMEDOUT;
+
+ usleep_range(1000, 1100);
+ }
+
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_AUX_REPLY_CODE);
+ if (reply)
+ *reply = reg;
+
+ if (is_read &&
+ (reg == ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_ACK ||
+ reg == ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_ACK)) {
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_REPLY_DATA_CNT);
+ if ((reg & ZYNQMP_DP_TX_AUX_REPLY_CNT_MASK) != bytes)
+ return -EIO;
+
+ for (i = 0; i < bytes; i++) {
+ buf[i] = zynqmp_dp_read(iomem,
+ ZYNQMP_DP_TX_AUX_REPLY_DATA);
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t
+zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct zynqmp_dp *dp = container_of(aux, struct zynqmp_dp, aux);
+ int ret;
+ unsigned int i, iter;
+
+ /* Number of loops = timeout in msec / aux delay (400 usec) */
+ iter = zynqmp_dp_aux_timeout_ms * 1000 / 400;
+ iter = iter ? iter : 1;
+
+ for (i = 0; i < iter; i++) {
+ ret = zynqmp_dp_aux_cmd_submit(dp, msg->request, msg->address,
+ msg->buffer, msg->size,
+ &msg->reply);
+ if (!ret) {
+ dev_dbg(dp->dev, "aux %d retries\n", i);
+ return msg->size;
+ }
+
+ if (dp->status == connector_status_disconnected) {
+ dev_dbg(dp->dev, "no connected aux device\n");
+ return -ENODEV;
+ }
+
+ usleep_range(400, 500);
+ }
+
+ dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret);
+
+ return ret;
+}
+
+/**
+ * zynqmp_dp_init_aux - Initialize the DP aux
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the DP aux. The aux clock is derived from the axi clock, so
+ * this function gets the axi clock frequency and calculates the filter
+ * value. Additionally, the interrupts and transmitter are enabled.
+ *
+ * Return: 0 on success, error value otherwise
+ */
+static int zynqmp_dp_init_aux(struct zynqmp_dp *dp)
+{
+ unsigned int rate;
+ u32 reg, w;
+
+ rate = zynqmp_disp_get_apb_clk_rate(dp->dpsub->disp);
+ if (rate < ZYNQMP_DP_TX_CLK_DIVIDER_MHZ) {
+ dev_err(dp->dev, "aclk should be higher than 1MHz\n");
+ return -EINVAL;
+ }
+
+ /* Allowable values for this register are: 8, 16, 24, 32, 40, 48 */
+ for (w = 8; w <= 48; w += 8) {
+ /* AUX pulse width should be between 0.4 to 0.6 usec */
+ if (w >= (4 * rate / 10000000) &&
+ w <= (6 * rate / 10000000))
+ break;
+ }
+
+ if (w > 48) {
+ dev_err(dp->dev, "aclk frequency too high\n");
+ return -EINVAL;
+ }
+ reg = w << ZYNQMP_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT;
+ reg |= rate / ZYNQMP_DP_TX_CLK_DIVIDER_MHZ;
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_CLK_DIVIDER, reg);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_EN,
+ ZYNQMP_DP_TX_INTR_ALL);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS,
+ ZYNQMP_DP_TX_NO_INTR_ALL);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 1);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_exit_aux - De-initialize the DP aux
+ * @dp: DisplayPort IP core structure
+ *
+ * De-initialize the DP aux. Disable all interrupts which are enabled
+ * through aux initialization, as well as the transmitter.
+ */
+static void zynqmp_dp_exit_aux(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 0);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS, 0xffffffff);
+}
+
+/*
+ * Generic DP functions
+ */
+
+/**
+ * zynqmp_dp_update_misc - Write the misc registers
+ * @dp: DisplayPort IP core structure
+ *
+ * The misc register values are stored in the structure, and this
+ * function applies the values into the registers.
+ */
+static void zynqmp_dp_update_misc(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_MAIN_STREAM_MISC0,
+ dp->config.misc0);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_MAIN_STREAM_MISC1,
+ dp->config.misc1);
+}
+
+/**
+ * zynqmp_dp_set_sync_mode - Set the sync mode bit in the software misc state
+ * @dp: DisplayPort IP core structure
+ * @mode: flag if the sync mode should be on or off
+ *
+ * Set the bit in software misc state. To apply to hardware,
+ * zynqmp_dp_update_misc() should be called.
+ */
+static void zynqmp_dp_set_sync_mode(struct zynqmp_dp *dp, bool mode)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ if (mode)
+ config->misc0 |= ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC;
+ else
+ config->misc0 &= ~ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC;
+}
+
+/**
+ * zynqmp_dp_get_sync_mode - Get the sync mode state
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: true if the sync mode is on, or false
+ */
+static bool zynqmp_dp_get_sync_mode(struct zynqmp_dp *dp)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ return !!(config->misc0 & ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC);
+}
+
+/**
+ * zynqmp_dp_set_bpc - Set bpc value in software misc state
+ * @dp: DisplayPort IP core structure
+ * @bpc: bits per component
+ *
+ * Return: 0 on success, or the fallback bpc value
+ */
+static u8 zynqmp_dp_set_bpc(struct zynqmp_dp *dp, u8 bpc)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+ u8 ret = 0;
+
+ if (dp->connector.display_info.bpc &&
+ dp->connector.display_info.bpc != bpc) {
+ dev_err(dp->dev, "requested bpc (%u) != display info (%u)\n",
+ bpc, dp->connector.display_info.bpc);
+ bpc = dp->connector.display_info.bpc;
+ }
+
+ config->misc0 &= ~ZYNQMP_DP_MISC0_BPC_MASK;
+ switch (bpc) {
+ case 6:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_6;
+ break;
+ case 8:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_8;
+ break;
+ case 10:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_10;
+ break;
+ case 12:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_12;
+ break;
+ case 16:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_16;
+ break;
+ default:
+ dev_err(dp->dev, "Not supported bpc (%u). fall back to 8bpc\n",
+ bpc);
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_8;
+ ret = 8;
+ break;
+ }
+ config->bpc = bpc;
+ zynqmp_dp_update_bpp(dp);
+
+ return ret;
+}
+
+/**
+ * zynqmp_dp_get_bpc - Set bpc value from software state
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: current bpc value
+ */
+static u8 zynqmp_dp_get_bpc(struct zynqmp_dp *dp)
+{
+ return dp->config.bpc;
+}
+
+/**
+ * zynqmp_dp_encoder_mode_set_transfer_unit - Set the transfer unit values
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Set the transfer unit, and caculate all transfer unit size related values.
+ * Calculation is based on DP and IP core specification.
+ */
+static void
+zynqmp_dp_encoder_mode_set_transfer_unit(struct zynqmp_dp *dp,
+ struct drm_display_mode *mode)
+{
+ u32 tu = ZYNQMP_DP_TX_DEF_TRANSFER_UNIT_SIZE;
+ u32 bw, vid_kbytes, avg_bytes_per_tu, init_wait;
+
+ /* Use the max transfer unit size (default) */
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRANSFER_UNIT_SIZE, tu);
+
+ vid_kbytes = mode->clock * (dp->config.bpp / 8);
+ bw = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ avg_bytes_per_tu = vid_kbytes * tu / (dp->mode.lane_cnt * bw / 1000);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_MIN_BYTES_PER_TU,
+ avg_bytes_per_tu / 1000);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_FRAC_BYTES_PER_TU,
+ avg_bytes_per_tu % 1000);
+
+ /* Configure the initial wait cycle based on transfer unit size */
+ if (tu < (avg_bytes_per_tu / 1000))
+ init_wait = 0;
+ else if ((avg_bytes_per_tu / 1000) <= 4)
+ init_wait = tu;
+ else
+ init_wait = tu - avg_bytes_per_tu / 1000;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_INIT_WAIT, init_wait);
+}
+
+/**
+ * zynqmp_dp_encoder_mode_set_stream - Configure the main stream
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Configure the main stream based on the requested mode @mode. Calculation is
+ * based on IP core specification.
+ */
+void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
+ struct drm_display_mode *mode)
+{
+ void __iomem *iomem = dp->iomem;
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 reg, wpl;
+ unsigned int rate;
+
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HTOTAL, mode->htotal);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VTOTAL, mode->vtotal);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_POLARITY,
+ (!!(mode->flags & DRM_MODE_FLAG_PVSYNC) <<
+ ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT) |
+ (!!(mode->flags & DRM_MODE_FLAG_PHSYNC) <<
+ ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT));
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HSWIDTH,
+ mode->hsync_end - mode->hsync_start);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VSWIDTH,
+ mode->vsync_end - mode->vsync_start);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HRES, mode->hdisplay);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VRES, mode->vdisplay);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HSTART,
+ mode->htotal - mode->hsync_start);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VSTART,
+ mode->vtotal - mode->vsync_start);
+
+ /* In synchronous mode, set the diviers */
+ if (dp->config.misc0 & ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC) {
+ reg = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_N_VID, reg);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_M_VID, mode->clock);
+ rate = zynqmp_disp_get_aud_clk_rate(dp->dpsub->disp);
+ if (rate) {
+ dev_dbg(dp->dev, "Audio rate: %d\n", rate / 512);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_N_AUD, reg);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_M_AUD,
+ rate / 1000);
+ }
+ }
+
+ /* Only 2 channel audio is supported now */
+ if (zynqmp_disp_aud_enabled(dp->dpsub->disp))
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_CHANNELS, 1);
+
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_USER_PIXEL_WIDTH, 1);
+
+ /* Translate to the native 16 bit datapath based on IP core spec */
+ wpl = (mode->hdisplay * dp->config.bpp + 15) / 16;
+ reg = wpl + wpl % lane_cnt - lane_cnt;
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_USER_DATA_CNT_PER_LANE, reg);
+}
+
+/*
+ * DRM connector functions
+ */
+
+static enum drm_connector_status
+zynqmp_dp_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ struct zynqmp_dp_link_config *link_config = &dp->link_config;
+ u32 state, i;
+ int ret;
+
+ /*
+ * This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to
+ * get the HPD signal with some monitors.
+ */
+ for (i = 0; i < 10; i++) {
+ state = zynqmp_dp_read(dp->iomem,
+ ZYNQMP_DP_TX_INTR_SIGNAL_STATE);
+ if (state & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_HPD)
+ break;
+ msleep(100);
+ }
+
+ if (state & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_HPD) {
+ dp->status = connector_status_connected;
+ ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0) {
+ dev_dbg(dp->dev, "DPCD read first try fails");
+ ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0) {
+ dev_dbg(dp->dev, "DPCD read retry fails");
+ goto disconnected;
+ }
+ }
+
+ link_config->max_rate = min_t(int,
+ drm_dp_max_link_rate(dp->dpcd),
+ DP_HIGH_BIT_RATE2);
+ link_config->max_lanes = min_t(u8,
+ drm_dp_max_lane_count(dp->dpcd),
+ dp->num_lanes);
+
+ return connector_status_connected;
+ }
+
+disconnected:
+ dp->status = connector_status_disconnected;
+ return connector_status_disconnected;
+}
+
+static int zynqmp_dp_connector_get_modes(struct drm_connector *connector)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &dp->aux.ddc);
+ if (!edid)
+ return 0;
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static struct drm_encoder *
+zynqmp_dp_connector_best_encoder(struct drm_connector *connector)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+
+ return &dp->encoder;
+}
+
+static int zynqmp_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 bpp = dp->config.bpp;
+ int max_rate = dp->link_config.max_rate;
+ int rate;
+
+ if (mode->clock > ZYNQMP_MAX_FREQ) {
+ dev_dbg(dp->dev, "filtered the mode, %s,for high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ return MODE_CLOCK_HIGH;
+ }
+
+ /* Check with link rate and lane count */
+ rate = zynqmp_dp_max_rate(max_rate, max_lanes, bpp);
+ if (mode->clock > rate) {
+ dev_dbg(dp->dev, "filtered the mode, %s,for high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ return MODE_CLOCK_HIGH;
+ }
+
+ return MODE_OK;
+}
+
+static void zynqmp_dp_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static int
+zynqmp_dp_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ int ret;
+
+ if (property == dp->sync_prop) {
+ zynqmp_dp_set_sync_mode(dp, val);
+ } else if (property == dp->bpc_prop) {
+ u8 bpc;
+
+ bpc = zynqmp_dp_set_bpc(dp, val);
+ if (bpc) {
+ drm_object_property_set_value(&connector->base,
+ property, bpc);
+ ret = -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+zynqmp_dp_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+
+ if (property == dp->sync_prop)
+ *val = zynqmp_dp_get_sync_mode(dp);
+ else if (property == dp->bpc_prop)
+ *val = zynqmp_dp_get_bpc(dp);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct drm_connector_funcs zynqmp_dp_connector_funcs = {
+ .detect = zynqmp_dp_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = zynqmp_dp_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_set_property = zynqmp_dp_connector_atomic_set_property,
+ .atomic_get_property = zynqmp_dp_connector_atomic_get_property,
+};
+
+static struct drm_connector_helper_funcs zynqmp_dp_connector_helper_funcs = {
+ .get_modes = zynqmp_dp_connector_get_modes,
+ .best_encoder = zynqmp_dp_connector_best_encoder,
+ .mode_valid = zynqmp_dp_connector_mode_valid,
+};
+
+/*
+ * DRM encoder functions
+ */
+
+static void zynqmp_dp_encoder_enable(struct drm_encoder *encoder)
+{
+ struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ void __iomem *iomem = dp->iomem;
+ unsigned int i;
+ int ret = 0;
+
+ pm_runtime_get_sync(dp->dev);
+ dp->enabled = true;
+ zynqmp_dp_init_aux(dp);
+ zynqmp_dp_update_misc(dp);
+ if (zynqmp_disp_aud_enabled(dp->dpsub->disp))
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_CONTROL, 1);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_PHY_POWER_DOWN, 0);
+ if (dp->status == connector_status_connected) {
+ for (i = 0; i < 3; i++) {
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ usleep_range(300, 500);
+ }
+ /* Some monitors take time to wake up properly */
+ msleep(zynqmp_dp_power_on_delay_ms);
+ }
+ if (ret != 1)
+ dev_dbg(dp->dev, "DP aux failed\n");
+ else
+ zynqmp_dp_train_loop(dp);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_SW_RESET,
+ ZYNQMP_DP_TX_SW_RESET_ALL);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_ENABLE_MAIN_STREAM, 1);
+}
+
+static void zynqmp_dp_encoder_disable(struct drm_encoder *encoder)
+{
+ struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ void __iomem *iomem = dp->iomem;
+
+ dp->enabled = false;
+ cancel_delayed_work(&dp->hpd_work);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_ENABLE_MAIN_STREAM, 0);
+ drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_PHY_POWER_DOWN,
+ ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
+ if (zynqmp_disp_aud_enabled(dp->dpsub->disp))
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_CONTROL, 0);
+ pm_runtime_put_sync(dp->dev);
+}
+
+static void
+zynqmp_dp_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 bpp = dp->config.bpp;
+ int rate, max_rate = dp->link_config.max_rate;
+ int ret;
+
+ /* Check again as bpp or format might have been chagned */
+ rate = zynqmp_dp_max_rate(max_rate, max_lanes, bpp);
+ if (mode->clock > rate) {
+ dev_err(dp->dev, "the mode, %s,has too high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ }
+
+ ret = zynqmp_dp_mode_configure(dp, adjusted_mode->clock, 0);
+ if (ret < 0)
+ return;
+
+ zynqmp_dp_encoder_mode_set_transfer_unit(dp, adjusted_mode);
+}
+
+#define ZYNQMP_DP_MIN_H_BACKPORCH 20
+
+static int
+zynqmp_dp_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ int diff = mode->htotal - mode->hsync_end;
+
+ /*
+ * ZynqMP DP requires horizontal backporch to be greater than 12.
+ * This limitation may not be compatible with the sink device.
+ */
+ if (diff < ZYNQMP_DP_MIN_H_BACKPORCH) {
+ int vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+
+ dev_dbg(encoder->dev->dev, "hbackporch adjusted: %d to %d",
+ diff, ZYNQMP_DP_MIN_H_BACKPORCH - diff);
+ diff = ZYNQMP_DP_MIN_H_BACKPORCH - diff;
+ adjusted_mode->htotal += diff;
+ adjusted_mode->clock = adjusted_mode->vtotal *
+ adjusted_mode->htotal * vrefresh / 1000;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs zynqmp_dp_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_encoder_helper_funcs zynqmp_dp_encoder_helper_funcs = {
+ .enable = zynqmp_dp_encoder_enable,
+ .disable = zynqmp_dp_encoder_disable,
+ .atomic_mode_set = zynqmp_dp_encoder_atomic_mode_set,
+ .atomic_check = zynqmp_dp_encoder_atomic_check,
+};
+
+/*
+ * Component functions
+ */
+
+static void zynqmp_dp_hpd_work_func(struct work_struct *work)
+{
+ struct zynqmp_dp *dp;
+
+ dp = container_of(work, struct zynqmp_dp, hpd_work.work);
+
+ if (dp->drm)
+ drm_helper_hpd_irq_event(dp->drm);
+}
+
+static struct drm_prop_enum_list zynqmp_dp_bpc_enum[] = {
+ { 6, "6BPC" },
+ { 8, "8BPC" },
+ { 10, "10BPC" },
+ { 12, "12BPC" },
+};
+
+int zynqmp_dp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_dp *dp = dpsub->dp;
+ struct drm_encoder *encoder = &dp->encoder;
+ struct drm_connector *connector = &dp->connector;
+ struct drm_device *drm = data;
+ struct device_node *port;
+ int ret;
+
+ if (!dp->num_lanes)
+ return 0;
+
+ encoder->possible_crtcs |= zynqmp_disp_get_crtc_mask(dpsub->disp);
+ for_each_child_of_node(dev->of_node, port) {
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+ encoder->possible_crtcs |= drm_of_find_possible_crtcs(drm,
+ port);
+ }
+ drm_encoder_init(drm, encoder, &zynqmp_dp_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ drm_encoder_helper_add(encoder, &zynqmp_dp_encoder_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ ret = drm_connector_init(encoder->dev, connector,
+ &zynqmp_dp_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ dev_err(dp->dev, "failed to initialize the drm connector");
+ goto error_encoder;
+ }
+
+ drm_connector_helper_add(connector, &zynqmp_dp_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ connector->dpms = DRM_MODE_DPMS_OFF;
+
+ dp->drm = drm;
+ dp->sync_prop = drm_property_create_bool(drm, 0, "sync");
+ dp->bpc_prop = drm_property_create_enum(drm, 0, "bpc",
+ zynqmp_dp_bpc_enum,
+ ARRAY_SIZE(zynqmp_dp_bpc_enum));
+
+ dp->config.misc0 &= ~ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC;
+ drm_object_attach_property(&connector->base, dp->sync_prop, false);
+ ret = zynqmp_dp_set_bpc(dp, 8);
+ drm_object_attach_property(&connector->base, dp->bpc_prop,
+ ret ? ret : 8);
+ zynqmp_dp_update_bpp(dp);
+
+ INIT_DELAYED_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func);
+
+ /* This enables interrupts, so should be called after DRM init */
+ ret = zynqmp_dp_init_aux(dp);
+ if (ret) {
+ dev_err(dp->dev, "failed to initialize DP aux");
+ goto error_prop;
+ }
+
+ return 0;
+
+error_prop:
+ drm_property_destroy(dp->drm, dp->bpc_prop);
+ drm_property_destroy(dp->drm, dp->sync_prop);
+ zynqmp_dp_connector_destroy(&dp->connector);
+error_encoder:
+ drm_encoder_cleanup(&dp->encoder);
+ return ret;
+}
+
+void zynqmp_dp_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_dp *dp = dpsub->dp;
+
+ disable_irq(dp->irq);
+ if (!dp->num_lanes)
+ return;
+
+ cancel_delayed_work_sync(&dp->hpd_work);
+ zynqmp_dp_exit_aux(dp);
+ drm_property_destroy(dp->drm, dp->bpc_prop);
+ drm_property_destroy(dp->drm, dp->sync_prop);
+ zynqmp_dp_connector_destroy(&dp->connector);
+ drm_encoder_cleanup(&dp->encoder);
+}
+
+/*
+ * Platform functions
+ */
+
+static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
+{
+ struct zynqmp_dp *dp = (struct zynqmp_dp *)data;
+ u32 status, mask;
+
+ status = zynqmp_dp_read(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_STATUS);
+ mask = zynqmp_dp_read(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_MASK);
+ if (!(status & ~mask))
+ return IRQ_NONE;
+
+ /* dbg for diagnostic, but not much that the driver can do */
+ if (status & ZYNQMP_DP_TX_INTR_CHBUF_UNDERFLW_MASK)
+ dev_dbg_ratelimited(dp->dev, "underflow interrupt\n");
+ if (status & ZYNQMP_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+ dev_dbg_ratelimited(dp->dev, "overflow interrupt\n");
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_STATUS, status);
+
+ /* The DP vblank will not be enabled with remote crtc device */
+ if (status & ZYNQMP_DP_TX_INTR_VBLANK_START)
+ zynqmp_disp_handle_vblank(dp->dpsub->disp);
+
+ if (status & ZYNQMP_DP_TX_INTR_HPD_EVENT)
+ schedule_delayed_work(&dp->hpd_work, 0);
+
+ if (status & ZYNQMP_DP_TX_INTR_HPD_IRQ) {
+ int ret;
+ u8 status[DP_LINK_STATUS_SIZE + 2];
+
+ ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
+ DP_LINK_STATUS_SIZE + 2);
+ if (ret < 0)
+ goto handled;
+
+ if (status[4] & DP_LINK_STATUS_UPDATED ||
+ !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
+ !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) {
+ zynqmp_dp_train_loop(dp);
+ }
+ }
+
+handled:
+ return IRQ_HANDLED;
+}
+
+int zynqmp_dp_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub;
+ struct zynqmp_dp *dp;
+ struct resource *res;
+ unsigned int i;
+ int irq, ret;
+
+ dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->dpms = DRM_MODE_DPMS_OFF;
+ dp->status = connector_status_disconnected;
+ dp->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp");
+ dp->iomem = devm_ioremap_resource(dp->dev, res);
+ if (IS_ERR(dp->iomem))
+ return PTR_ERR(dp->iomem);
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_PHY_POWER_DOWN,
+ ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
+ zynqmp_dp_set(dp->iomem, ZYNQMP_DP_TX_PHY_CONFIG,
+ ZYNQMP_DP_TX_PHY_CONFIG_ALL_RESET);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_FORCE_SCRAMBLER_RESET, 1);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 0);
+
+ dp->num_lanes = 2;
+ for (i = 0; i < ZYNQMP_DP_MAX_LANES; i++) {
+ char phy_name[16];
+
+ snprintf(phy_name, sizeof(phy_name), "dp-phy%d", i);
+ dp->phy[i] = devm_phy_get(dp->dev, phy_name);
+ if (IS_ERR(dp->phy[i])) {
+ ret = PTR_ERR(dp->phy[i]);
+ dp->phy[i] = NULL;
+
+ /* 2nd lane is optional */
+ if (i == 1 && ret == -ENODEV) {
+ dp->num_lanes = 1;
+ break;
+ }
+
+ /*
+ * If no phy lane is assigned, the DP Tx gets disabled.
+ * The display part of the DP subsystem can still be
+ * used to drive the output to FPGA, thus let the DP
+ * subsystem driver to proceed without this DP Tx.
+ */
+ if (i == 0 && ret == -ENODEV) {
+ dp->num_lanes = 0;
+ goto out;
+ }
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dp->dev, "failed to get phy lane\n");
+
+ return ret;
+ }
+ }
+
+ ret = zynqmp_dp_init_phy(dp);
+ if (ret)
+ goto error_phy;
+
+ dp->aux.name = "ZynqMP DP AUX";
+ dp->aux.dev = dp->dev;
+ dp->aux.transfer = zynqmp_dp_aux_transfer;
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to initialize DP aux\n");
+ goto error;
+ }
+
+out:
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto error;
+ }
+
+ ret = devm_request_threaded_irq(dp->dev, irq, NULL,
+ zynqmp_dp_irq_handler, IRQF_ONESHOT,
+ dev_name(dp->dev), dp);
+ if (ret < 0)
+ goto error;
+ dp->irq = irq;
+
+ dpsub = platform_get_drvdata(pdev);
+ dpsub->dp = dp;
+ dp->dpsub = dpsub;
+
+ dev_dbg(dp->dev,
+ "ZynqMP DisplayPort Tx driver probed with %u phy lanes\n",
+ dp->num_lanes);
+
+ return 0;
+
+error:
+ drm_dp_aux_unregister(&dp->aux);
+error_phy:
+ zynqmp_dp_exit_phy(dp);
+ return ret;
+}
+
+int zynqmp_dp_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ struct zynqmp_dp *dp = dpsub->dp;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 0);
+ drm_dp_aux_unregister(&dp->aux);
+ zynqmp_dp_exit_phy(dp);
+ dpsub->dp = NULL;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.h b/drivers/gpu/drm/xlnx/zynqmp_dp.h
new file mode 100644
index 000000000000..2f6ce3f3e8cf
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.h
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DisplayPort Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZYNQMP_DP_H_
+#define _ZYNQMP_DP_H_
+
+struct zynqmp_dp;
+struct drm_display_mode;
+
+const int zynqmp_dp_set_color(struct zynqmp_dp *dp, const char *color);
+void zynqmp_dp_enable_vblank(struct zynqmp_dp *dp);
+void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp);
+void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
+ struct drm_display_mode *mode);
+void __maybe_unused zynqmp_dp_pm_suspend(struct zynqmp_dp *dp);
+void __maybe_unused zynqmp_dp_pm_resume(struct zynqmp_dp *dp);
+int zynqmp_dp_bind(struct device *dev, struct device *master, void *data);
+void zynqmp_dp_unbind(struct device *dev, struct device *master, void *data);
+
+int zynqmp_dp_probe(struct platform_device *pdev);
+int zynqmp_dp_remove(struct platform_device *pdev);
+
+#endif /* _ZYNQMP_DP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
new file mode 100644
index 000000000000..9b3545348f7b
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DP Subsystem Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "xlnx_drv.h"
+
+#include "zynqmp_disp.h"
+#include "zynqmp_dp.h"
+#include "zynqmp_dpsub.h"
+
+static int
+zynqmp_dpsub_bind(struct device *dev, struct device *master, void *data)
+{
+ int ret;
+
+ ret = zynqmp_disp_bind(dev, master, data);
+ if (ret)
+ return ret;
+
+ /* zynqmp_disp should bind first, so zynqmp_dp encoder can find crtc */
+ ret = zynqmp_dp_bind(dev, master, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+zynqmp_dpsub_unbind(struct device *dev, struct device *master, void *data)
+{
+ zynqmp_dp_unbind(dev, master, data);
+ zynqmp_disp_unbind(dev, master, data);
+}
+
+static const struct component_ops zynqmp_dpsub_component_ops = {
+ .bind = zynqmp_dpsub_bind,
+ .unbind = zynqmp_dpsub_unbind,
+};
+
+static int zynqmp_dpsub_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub;
+ int ret;
+
+ dpsub = devm_kzalloc(&pdev->dev, sizeof(*dpsub), GFP_KERNEL);
+ if (!dpsub)
+ return -ENOMEM;
+
+ /* Sub-driver will access dpsub from drvdata */
+ platform_set_drvdata(pdev, dpsub);
+ pm_runtime_enable(&pdev->dev);
+
+ /*
+ * DP should be probed first so that the zynqmp_disp can set the output
+ * format accordingly.
+ */
+ ret = zynqmp_dp_probe(pdev);
+ if (ret)
+ goto err_pm;
+
+ ret = zynqmp_disp_probe(pdev);
+ if (ret)
+ goto err_dp;
+
+ ret = component_add(&pdev->dev, &zynqmp_dpsub_component_ops);
+ if (ret)
+ goto err_disp;
+
+ /* Try the reserved memory. Proceed if there's none */
+ of_reserved_mem_device_init(&pdev->dev);
+
+ /* Populate the sound child nodes */
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to populate child nodes\n");
+ goto err_rmem;
+ }
+
+ dpsub->master = xlnx_drm_pipeline_init(pdev);
+ if (IS_ERR(dpsub->master)) {
+ dev_err(&pdev->dev, "failed to initialize the drm pipeline\n");
+ goto err_populate;
+ }
+
+ dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
+
+ return 0;
+
+err_populate:
+ of_platform_depopulate(&pdev->dev);
+err_rmem:
+ of_reserved_mem_device_release(&pdev->dev);
+ component_del(&pdev->dev, &zynqmp_dpsub_component_ops);
+err_disp:
+ zynqmp_disp_remove(pdev);
+err_dp:
+ zynqmp_dp_remove(pdev);
+err_pm:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int zynqmp_dpsub_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ int err, ret = 0;
+
+ xlnx_drm_pipeline_exit(dpsub->master);
+ of_platform_depopulate(&pdev->dev);
+ of_reserved_mem_device_release(&pdev->dev);
+ component_del(&pdev->dev, &zynqmp_dpsub_component_ops);
+
+ err = zynqmp_disp_remove(pdev);
+ if (err)
+ ret = -EIO;
+
+ err = zynqmp_dp_remove(pdev);
+ if (err)
+ ret = -EIO;
+
+ pm_runtime_disable(&pdev->dev);
+
+ return err;
+}
+
+static int __maybe_unused zynqmp_dpsub_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+
+ zynqmp_dp_pm_suspend(dpsub->dp);
+
+ return 0;
+}
+
+static int __maybe_unused zynqmp_dpsub_pm_resume(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+
+ zynqmp_dp_pm_resume(dpsub->dp);
+
+ return 0;
+}
+
+static const struct dev_pm_ops zynqmp_dpsub_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dpsub_pm_suspend,
+ zynqmp_dpsub_pm_resume)
+};
+
+static const struct of_device_id zynqmp_dpsub_of_match[] = {
+ { .compatible = "xlnx,zynqmp-dpsub-1.7", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, zynqmp_dpsub_of_match);
+
+static struct platform_driver zynqmp_dpsub_driver = {
+ .probe = zynqmp_dpsub_probe,
+ .remove = zynqmp_dpsub_remove,
+ .driver = {
+ .name = "zynqmp-display",
+ .of_match_table = zynqmp_dpsub_of_match,
+ .pm = &zynqmp_dpsub_pm_ops,
+ },
+};
+
+module_platform_driver(zynqmp_dpsub_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("ZynqMP DP Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
new file mode 100644
index 000000000000..6606beffee15
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DPSUB Subsystem Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZYNQMP_DPSUB_H_
+#define _ZYNQMP_DPSUB_H_
+
+struct zynqmp_dpsub {
+ struct zynqmp_dp *dp;
+ struct zynqmp_disp *disp;
+ struct platform_device *master;
+};
+
+#endif /* _ZYNQMP_DPSUB_H_ */
diff --git a/drivers/gpu/drm/zocl/Kconfig b/drivers/gpu/drm/zocl/Kconfig
new file mode 100644
index 000000000000..6a54d01cccd1
--- /dev/null
+++ b/drivers/gpu/drm/zocl/Kconfig
@@ -0,0 +1,8 @@
+config DRM_ZOCL
+ tristate "Xilinx Zynq OpenCL"
+ depends on DRM
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ Xilinx Zynq OpenCL Manager
diff --git a/drivers/gpu/drm/zocl/Makefile b/drivers/gpu/drm/zocl/Makefile
new file mode 100644
index 000000000000..da58e5084f9d
--- /dev/null
+++ b/drivers/gpu/drm/zocl/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+zocl-y := zocl_drv.o zocl_bo.o
+
+obj-$(CONFIG_DRM_ZOCL) += zocl.o
diff --git a/drivers/gpu/drm/zocl/zocl_bo.c b/drivers/gpu/drm/zocl/zocl_bo.c
new file mode 100644
index 000000000000..123a37842ad4
--- /dev/null
+++ b/drivers/gpu/drm/zocl/zocl_bo.c
@@ -0,0 +1,271 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
+#include "zocl_drv.h"
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <linux/zocl_ioctl.h>
+
+static inline void __user *to_user_ptr(u64 address)
+{
+ return (void __user *)(uintptr_t)address;
+}
+
+void zocl_describe(const struct drm_zocl_bo *obj)
+{
+ size_t size_in_kb = obj->base.base.size / 1024;
+ size_t physical_addr = obj->base.paddr;
+
+ DRM_INFO("%p: H[0x%zxKB] D[0x%zx]\n",
+ obj,
+ size_in_kb,
+ physical_addr);
+}
+
+static struct drm_zocl_bo *zocl_create_bo(struct drm_device *dev,
+ uint64_t unaligned_size)
+{
+ size_t size = PAGE_ALIGN(unaligned_size);
+ struct drm_gem_cma_object *cma_obj;
+
+ DRM_DEBUG("%s:%s:%d: %zd\n", __FILE__, __func__, __LINE__, size);
+
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
+ cma_obj = drm_gem_cma_create(dev, size);
+ if (IS_ERR(cma_obj))
+ return ERR_PTR(-ENOMEM);
+
+ return to_zocl_bo(&cma_obj->base);
+}
+
+int zocl_create_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ int ret;
+ struct drm_zocl_create_bo *args = data;
+ struct drm_zocl_bo *bo;
+
+ if (((args->flags & DRM_ZOCL_BO_FLAGS_COHERENT) == 0) ||
+ ((args->flags & DRM_ZOCL_BO_FLAGS_CMA) == 0))
+ return -EINVAL;
+
+ bo = zocl_create_bo(dev, args->size);
+ bo->flags |= DRM_ZOCL_BO_FLAGS_COHERENT;
+ bo->flags |= DRM_ZOCL_BO_FLAGS_CMA;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, bo);
+
+ if (IS_ERR(bo)) {
+ DRM_DEBUG("object creation failed\n");
+ return PTR_ERR(bo);
+ }
+ ret = drm_gem_handle_create(filp, &bo->base.base, &args->handle);
+ if (ret) {
+ drm_gem_cma_free_object(&bo->base.base);
+ DRM_DEBUG("handle creation failed\n");
+ return ret;
+ }
+
+ zocl_describe(bo);
+ drm_gem_object_put_unlocked(&bo->base.base);
+
+ return ret;
+}
+
+int zocl_map_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ struct drm_zocl_map_bo *args = data;
+ struct drm_gem_object *gem_obj;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+ gem_obj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ /* The mmap offset was set up at BO allocation time. */
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ zocl_describe(to_zocl_bo(gem_obj));
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return 0;
+}
+
+int zocl_sync_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_sync_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+ void *kaddr;
+ int ret = 0;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ if ((args->offset > gem_obj->size) || (args->size > gem_obj->size) ||
+ ((args->offset + args->size) > gem_obj->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ kaddr = drm_gem_cma_prime_vmap(gem_obj);
+
+ /* only invalidate the range of addresses requested by the user */
+ kaddr += args->offset;
+
+ if (args->dir == DRM_ZOCL_SYNC_BO_TO_DEVICE)
+ flush_kernel_vmap_range(kaddr, args->size);
+ else if (args->dir == DRM_ZOCL_SYNC_BO_FROM_DEVICE)
+ invalidate_kernel_vmap_range(kaddr, args->size);
+ else
+ ret = -EINVAL;
+
+out:
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
+
+int zocl_info_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_bo *bo;
+ struct drm_zocl_info_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ bo = to_zocl_bo(gem_obj);
+
+ args->size = bo->base.base.size;
+ args->paddr = bo->base.paddr;
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return 0;
+}
+
+int zocl_pwrite_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_pwrite_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+ char __user *user_data = to_user_ptr(args->data_ptr);
+ int ret = 0;
+ void *kaddr;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ if ((args->offset > gem_obj->size) || (args->size > gem_obj->size)
+ || ((args->offset + args->size) > gem_obj->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (args->size == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!access_ok(user_data, args->size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ kaddr = drm_gem_cma_prime_vmap(gem_obj);
+ kaddr += args->offset;
+
+ ret = copy_from_user(kaddr, user_data, args->size);
+out:
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
+
+int zocl_pread_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_pread_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+ char __user *user_data = to_user_ptr(args->data_ptr);
+ int ret = 0;
+ void *kaddr;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ if ((args->offset > gem_obj->size) || (args->size > gem_obj->size)
+ || ((args->offset + args->size) > gem_obj->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (args->size == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!access_ok(user_data, args->size)) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ kaddr = drm_gem_cma_prime_vmap(gem_obj);
+ kaddr += args->offset;
+
+ ret = copy_to_user(user_data, kaddr, args->size);
+
+out:
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/zocl/zocl_drv.c b/drivers/gpu/drm/zocl/zocl_drv.c
new file mode 100644
index 000000000000..a97082ecc54e
--- /dev/null
+++ b/drivers/gpu/drm/zocl/zocl_drv.c
@@ -0,0 +1,217 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
+#include "zocl_drv.h"
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <linux/zocl_ioctl.h>
+
+#define ZOCL_DRIVER_NAME "zocl"
+#define ZOCL_DRIVER_DESC "Zynq BO manager"
+#define ZOCL_DRIVER_DATE "20161024"
+#define ZOCL_DRIVER_MAJOR 2016
+#define ZOCL_DRIVER_MINOR 3
+#define ZOCL_DRIVER_PATCHLEVEL 1
+#define ZOCL_FILE_PAGE_OFFSET 0x00100000
+
+#ifndef VM_RESERVED
+#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
+#endif
+
+static const struct vm_operations_struct reg_physical_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
+static int zocl_drm_load(struct drm_device *drm, unsigned long flags)
+{
+ struct platform_device *pdev;
+ struct resource *res;
+ struct drm_zocl_dev *zdev;
+ void __iomem *map;
+
+ pdev = to_platform_device(drm->dev);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ map = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(map)) {
+ DRM_ERROR("Failed to map registers: %ld\n", PTR_ERR(map));
+ return PTR_ERR(map);
+ }
+
+ zdev = devm_kzalloc(drm->dev, sizeof(*zdev), GFP_KERNEL);
+ if (!zdev)
+ return -ENOMEM;
+
+ zdev->ddev = drm;
+ drm->dev_private = zdev;
+ zdev->regs = map;
+ zdev->res_start = res->start;
+ zdev->res_len = resource_size(res);
+ platform_set_drvdata(pdev, zdev);
+
+ return 0;
+}
+
+static int zocl_drm_unload(struct drm_device *drm)
+{
+ return 0;
+}
+
+static void zocl_free_object(struct drm_gem_object *obj)
+{
+ struct drm_zocl_bo *zocl_obj = to_zocl_bo(obj);
+
+ DRM_INFO("Freeing BO\n");
+ zocl_describe(zocl_obj);
+ drm_gem_cma_free_object(obj);
+}
+
+static int zocl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_zocl_dev *zdev = dev->dev_private;
+ unsigned long vsize;
+ int rc;
+
+ /* If the page offset is > than 4G, then let GEM handle that and do what
+ * it thinks is best,we will only handle page offsets less than 4G.
+ */
+ if (likely(vma->vm_pgoff >= ZOCL_FILE_PAGE_OFFSET))
+ return drm_gem_cma_mmap(filp, vma);
+
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+
+ vsize = vma->vm_end - vma->vm_start;
+ if (vsize > zdev->res_len)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+
+ vma->vm_ops = &reg_physical_vm_ops;
+ rc = io_remap_pfn_range(vma, vma->vm_start,
+ zdev->res_start >> PAGE_SHIFT,
+ vsize, vma->vm_page_prot);
+
+ return rc;
+}
+
+static const struct drm_ioctl_desc zocl_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(ZOCL_CREATE_BO, zocl_create_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_MAP_BO, zocl_map_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_SYNC_BO, zocl_sync_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_INFO_BO, zocl_info_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_PWRITE_BO, zocl_pwrite_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_PREAD_BO, zocl_pread_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+};
+
+static const struct file_operations zocl_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = zocl_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+};
+
+static struct drm_driver zocl_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_RENDER,
+ .load = zocl_drm_load,
+ .unload = zocl_drm_unload,
+ .gem_free_object = zocl_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .ioctls = zocl_ioctls,
+ .num_ioctls = ARRAY_SIZE(zocl_ioctls),
+ .fops = &zocl_driver_fops,
+ .name = ZOCL_DRIVER_NAME,
+ .desc = ZOCL_DRIVER_DESC,
+ .date = ZOCL_DRIVER_DATE,
+ .major = ZOCL_DRIVER_MAJOR,
+ .minor = ZOCL_DRIVER_MINOR,
+ .patchlevel = ZOCL_DRIVER_PATCHLEVEL,
+};
+
+/* init xilinx opencl drm platform */
+static int zocl_drm_platform_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&zocl_driver, pdev);
+}
+
+/* exit xilinx opencl drm platform */
+static int zocl_drm_platform_remove(struct platform_device *pdev)
+{
+ struct drm_zocl_dev *zdev = platform_get_drvdata(pdev);
+
+ if (zdev->ddev) {
+ drm_dev_unregister(zdev->ddev);
+ drm_dev_put(zdev->ddev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id zocl_drm_of_match[] = {
+ { .compatible = "xlnx,zocl", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, zocl_drm_of_match);
+
+static struct platform_driver zocl_drm_private_driver = {
+ .probe = zocl_drm_platform_probe,
+ .remove = zocl_drm_platform_remove,
+ .driver = {
+ .name = "zocl-drm",
+ .of_match_table = zocl_drm_of_match,
+ },
+};
+
+module_platform_driver(zocl_drm_private_driver);
+
+MODULE_VERSION(__stringify(ZOCL_DRIVER_MAJOR) "."
+ __stringify(ZOCL_DRIVER_MINOR) "."
+ __stringify(ZOCL_DRIVER_PATCHLEVEL));
+
+MODULE_DESCRIPTION(ZOCL_DRIVER_DESC);
+MODULE_AUTHOR("Sonal Santan <sonal.santan@xilinx.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/zocl/zocl_drv.h b/drivers/gpu/drm/zocl/zocl_drv.h
new file mode 100644
index 000000000000..ef6a9acadfc1
--- /dev/null
+++ b/drivers/gpu/drm/zocl/zocl_drv.h
@@ -0,0 +1,59 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZOCL_DRV_H_
+#define _ZOCL_DRV_H_
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_gem_cma_helper.h>
+
+struct drm_zocl_bo {
+ struct drm_gem_cma_object base;
+ uint32_t flags;
+};
+
+struct drm_zocl_dev {
+ struct drm_device *ddev;
+ void __iomem *regs;
+ phys_addr_t res_start;
+ resource_size_t res_len;
+ unsigned int irq;
+};
+
+static inline struct drm_zocl_bo *to_zocl_bo(struct drm_gem_object *bo)
+{
+ return (struct drm_zocl_bo *) bo;
+}
+
+int zocl_create_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_sync_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_map_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_info_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_pwrite_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_pread_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+void zocl_describe(const struct drm_zocl_bo *obj);
+
+#endif